content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# First decide the clustering of CSIs. Read decisions from table I_MOD_CSI_AC.
# Run PCA analysis.
#
# Author: E620927
###############################################################################
MULTIPLIER <- data.frame()
for (modelid_column in modelid_columns) {
# modelid_column <- modelid_columns[1]
Output_PCA <- file.path(Output_root,paste0("PCA_MONTHLY_",modelid_column))
if (!dir.exists(Output_PCA)) {
dir.create(Output_PCA)
}
M_CSI <- fetchTable(dbc,"I_MOD_CSI_AC",current_version)
M_CSI <- M_CSI[!is.na(M_CSI[,modelid_column]),]
MODEL_ID <- na.omit(unique(M_CSI[,modelid_column]))
CSI_Eigenvectors <- data.frame()
PC1TS <- data.frame()
PC1LEVEL <- data.frame()
model_ids <- c()
for (i in 1:length(MODEL_ID)) {
# i = 2
print(i)
model <- MODEL_ID[i]
csi <- M_CSI[M_CSI[,modelid_column]==model,]$CSI_ID
ac_id <- M_CSI[M_CSI[,modelid_column]==model,]$AC_ID[1]
curves_mod <- combineCurves(selectCurves(CSI_CURVES, csi))
first_diff_ts <- diff(curves_mod)
subgroup_no_pca <- prcomp(na.omit(first_diff_ts),center = FALSE,scale = FALSE)
total = sum(subgroup_no_pca$sdev^2) # total variance
var_pc1 = (subgroup_no_pca$sdev[1])^2
percent = var_pc1/total
PCA_rotation <- subgroup_no_pca$rotation[,1,drop=FALSE]
PC1_no_series <- subgroup_no_pca$x[,1,drop=FALSE]
if (all(PCA_rotation<0)) {
PCA_rotation <- -PCA_rotation
PC1_no_series <- -PC1_no_series
}
PC1_no_series <- ts(PC1_no_series,start=determineTsStartDate(as.yearperiod(index(na.omit(first_diff_ts)))),frequency=determineTsFrequency(as.yearperiod(index(na.omit(first_diff_ts)))))
pc1_ts <- PC1_no_series/sum(PCA_rotation)
pc1_frame <- data.frame(MONTH=as.yearperiod(index(na.omit(first_diff_ts))),AC_ID=ac_id,MOD_ID=model,PC1=pc1_ts,row.names=NULL)
PC1TS <- rbind(PC1TS, pc1_frame)
multiplier <- PCA_rotation*sum(PCA_rotation)
rotation <- data.frame(PCA_rotation, multiplier)
rownames(rotation) <- csi
colnames(rotation) <- c("EIGENVECTOR","MULTIPLIER")
CSI_Eigenvectors <- rbind(CSI_Eigenvectors,rotation)
pca_level <- ts(curves_mod%*%PCA_rotation/sum(PCA_rotation),start=determineTsStartDate(as.Date(as.yearperiod(index(curves_mod)))),frequency=determineTsFrequency(as.Date(as.yearperiod(index(curves_mod)))))
pc1level <- data.frame(MONTH=as.yearperiod(index(curves_mod)),AC_ID=ac_id,MOD_ID=model,PC1=pca_level,row.names=NULL)
PC1LEVEL <- rbind(PC1LEVEL, pc1level)
model <- cleanString(model)
model_ids <- c(model_ids,model)
file_name <- paste(Output_PCA,"/",model,"_leveldata.png",sep="")
png(file_name,width=600,height=400)
ts.plot(curves_mod,ylab="spread",main=paste0("MOD_ID: ",model, " - CSI level"), col=1:length(csi),type="b")
legend("topleft",legend=csi,col=1:length(csi),pch=1)
dev.off()
file_name <- paste(Output_PCA,"/",model,"_diffdata.csv",sep="")
write.csv(first_diff_ts,file_name)
file_name <- paste(Output_PCA,"/",model,"_diffdata.png",sep="")
png(file_name,width=600,height=400)
ts.plot(first_diff_ts,ylab="differenced spread",main=paste0("MOD_ID: ",model," - CSI first differenced"), col=1:length(csi),type="b")
legend("topleft",legend=csi,col=1:length(csi),pch=1)
dev.off()
file_name <- paste(Output_PCA,"/",model,"_pc1.csv",sep="")
write.csv(pc1_frame,file_name)
file_name <- paste(Output_PCA,"/",model,"_pc1data.png",sep="")
png(file_name,width=600,height=400)
ts.plot(pc1_ts,ylab="differenced spread",main=paste0("MOD_ID: ",model," - PC1 first differenced"),type="b",sub=paste0("Total Variance Explained By PC1 is ",formatPercentages(round(percent,3))))
dev.off()
file_name <- paste(Output_PCA,"/",model,"_pc1level.csv",sep="")
write.csv(pc1level,file_name)
file_name <- paste(Output_PCA,"/",model,"_pc1level.png",sep="")
png(file_name,width=600,height=400)
ts.plot(pca_level,ylab="spread",main=paste0("MOD_ID: ",model," - PC1 level"),type="b")
dev.off()
file_name <- paste(Output_PCA,"/",model,"_RotationAndMultplier.csv",sep="")
write.csv(rotation,file_name)
file_name <- paste(Output_PCA,"/",model,"_PCAOutput.tex",sep="")
print(xtable(rotation,caption=paste0("PCA Output: ",model)),file=file_name, floating=FALSE, include.rownames=TRUE)
ts_tmp <- na.omit(ts.union(first_diff_ts,pc1_ts))
colnames(ts_tmp) <- c(as.character(csi),"pc1")
cor <- round(cor(ts_tmp,method="kendall"),2)
file_name <- paste(Output_PCA,"/",model,"_RetainedCorrelation_kendall.png",sep="")
png(file_name,width=900)
grid.arrange(tableGrob(cor),top=textGrob("Kendall Correlation"))
dev.off()
file_name <- paste(Output_PCA,"/",model,"_RetainedCorrelation_spearman.png",sep="")
cor <- round(cor(ts_tmp,method="spearman"),2)
png(file_name,width=900)
grid.arrange(tableGrob(cor),top=textGrob("Spearman Correlation"))
dev.off()
}
colnames(CSI_Eigenvectors) <- c("EIGENVECTOR","MULTIPLIER")
#CSI_Eigenvectors = data.frame(VERSION=current_version,TimeStamp = gsub(":","-",Sys.time()),CSI_ID=rownames(CSI_Eigenvectors),CSI_Eigenvectors,row.names=NULL)
CSI_Eigenvectors = data.frame(CSI_ID=rownames(CSI_Eigenvectors),CSI_Eigenvectors,row.names=NULL)
dat = merge(M_CSI,CSI_Eigenvectors,by="CSI_ID")
dat <- dat[,c("CSI_ID",modelid_column,"AC_ID","EIGENVECTOR","MULTIPLIER")]
colnames(dat)[colnames(dat) == modelid_column] <- modelid_columns[1]
MULTIPLIER = rbind(MULTIPLIER,dat)
table_name <- paste0("O_PC1TS_MONTHLY_FINAL_",modelid_column)
#try(sqlDrop(dbc, table_name))
PC1TS[,1] <- as.character(PC1TS[,1])
data <- data.frame(VERSION=current_version,TimeStamp = gsub(":","-",Sys.time()),PC1TS)
#sqlSave(dbc, PC1TS, tablename = table_name, addPK=FALSE, safer = FALSE)
saveTable(dbc,table_name,data)
table_name <- paste0("O_PC1LEVEL_MONTHLY_FINAL_",modelid_column)
#try(sqlDrop(dbc, table_name))
PC1LEVEL[,1] <- as.character(PC1LEVEL[,1])
data <- data.frame(VERSION=current_version,TimeStamp = gsub(":","-",Sys.time()),PC1LEVEL)
#sqlSave(dbc, PC1LEVEL, tablename = table_name, addPK=FALSE, safer = FALSE)
saveTable(dbc,table_name,data)
# file_name = paste(Output_PCA,"/EigenvectorAndMultiplier.tex",sep="")
# print(xtable(dat[,4:ncol(dat)],caption="Eigenvector and Multiplier of PCA Analysis"),file=file_name, tabular.environment = 'longtable', floating=FALSE, include.rownames=FALSE)
file_name = paste(Output_PCA,"/ClustersAll.tex",sep="")
writeVectorToTex(file_name,"CLUSTERS",model_ids,command="renewcommand")
}
MULTIPLIER <- unique(MULTIPLIER)
MULTIPLIER <- versionDataFrame(MULTIPLIER,current_version)
MULTIPLIER <- MULTIPLIER[order(MULTIPLIER[,"AC_ID"]),]
#write.csv(MULTIPLIER[order(MULTIPLIER$MOD_ID),],"./test.csv")
table_name <- "O_MULTIPLIER_MONTHLY_ALL"
saveTable(dbc,table_name,MULTIPLIER)
| /3_GeneratePC_Alternative_Monthly.R | no_license | charleshjw/CISM_Enhanced | R | false | false | 6,861 | r | # First decide the clustering of CSIs. Read decisions from table I_MOD_CSI_AC.
# Run PCA analysis.
#
# Author: E620927
###############################################################################
MULTIPLIER <- data.frame()
for (modelid_column in modelid_columns) {
# modelid_column <- modelid_columns[1]
Output_PCA <- file.path(Output_root,paste0("PCA_MONTHLY_",modelid_column))
if (!dir.exists(Output_PCA)) {
dir.create(Output_PCA)
}
M_CSI <- fetchTable(dbc,"I_MOD_CSI_AC",current_version)
M_CSI <- M_CSI[!is.na(M_CSI[,modelid_column]),]
MODEL_ID <- na.omit(unique(M_CSI[,modelid_column]))
CSI_Eigenvectors <- data.frame()
PC1TS <- data.frame()
PC1LEVEL <- data.frame()
model_ids <- c()
for (i in 1:length(MODEL_ID)) {
# i = 2
print(i)
model <- MODEL_ID[i]
csi <- M_CSI[M_CSI[,modelid_column]==model,]$CSI_ID
ac_id <- M_CSI[M_CSI[,modelid_column]==model,]$AC_ID[1]
curves_mod <- combineCurves(selectCurves(CSI_CURVES, csi))
first_diff_ts <- diff(curves_mod)
subgroup_no_pca <- prcomp(na.omit(first_diff_ts),center = FALSE,scale = FALSE)
total = sum(subgroup_no_pca$sdev^2) # total variance
var_pc1 = (subgroup_no_pca$sdev[1])^2
percent = var_pc1/total
PCA_rotation <- subgroup_no_pca$rotation[,1,drop=FALSE]
PC1_no_series <- subgroup_no_pca$x[,1,drop=FALSE]
if (all(PCA_rotation<0)) {
PCA_rotation <- -PCA_rotation
PC1_no_series <- -PC1_no_series
}
PC1_no_series <- ts(PC1_no_series,start=determineTsStartDate(as.yearperiod(index(na.omit(first_diff_ts)))),frequency=determineTsFrequency(as.yearperiod(index(na.omit(first_diff_ts)))))
pc1_ts <- PC1_no_series/sum(PCA_rotation)
pc1_frame <- data.frame(MONTH=as.yearperiod(index(na.omit(first_diff_ts))),AC_ID=ac_id,MOD_ID=model,PC1=pc1_ts,row.names=NULL)
PC1TS <- rbind(PC1TS, pc1_frame)
multiplier <- PCA_rotation*sum(PCA_rotation)
rotation <- data.frame(PCA_rotation, multiplier)
rownames(rotation) <- csi
colnames(rotation) <- c("EIGENVECTOR","MULTIPLIER")
CSI_Eigenvectors <- rbind(CSI_Eigenvectors,rotation)
pca_level <- ts(curves_mod%*%PCA_rotation/sum(PCA_rotation),start=determineTsStartDate(as.Date(as.yearperiod(index(curves_mod)))),frequency=determineTsFrequency(as.Date(as.yearperiod(index(curves_mod)))))
pc1level <- data.frame(MONTH=as.yearperiod(index(curves_mod)),AC_ID=ac_id,MOD_ID=model,PC1=pca_level,row.names=NULL)
PC1LEVEL <- rbind(PC1LEVEL, pc1level)
model <- cleanString(model)
model_ids <- c(model_ids,model)
file_name <- paste(Output_PCA,"/",model,"_leveldata.png",sep="")
png(file_name,width=600,height=400)
ts.plot(curves_mod,ylab="spread",main=paste0("MOD_ID: ",model, " - CSI level"), col=1:length(csi),type="b")
legend("topleft",legend=csi,col=1:length(csi),pch=1)
dev.off()
file_name <- paste(Output_PCA,"/",model,"_diffdata.csv",sep="")
write.csv(first_diff_ts,file_name)
file_name <- paste(Output_PCA,"/",model,"_diffdata.png",sep="")
png(file_name,width=600,height=400)
ts.plot(first_diff_ts,ylab="differenced spread",main=paste0("MOD_ID: ",model," - CSI first differenced"), col=1:length(csi),type="b")
legend("topleft",legend=csi,col=1:length(csi),pch=1)
dev.off()
file_name <- paste(Output_PCA,"/",model,"_pc1.csv",sep="")
write.csv(pc1_frame,file_name)
file_name <- paste(Output_PCA,"/",model,"_pc1data.png",sep="")
png(file_name,width=600,height=400)
ts.plot(pc1_ts,ylab="differenced spread",main=paste0("MOD_ID: ",model," - PC1 first differenced"),type="b",sub=paste0("Total Variance Explained By PC1 is ",formatPercentages(round(percent,3))))
dev.off()
file_name <- paste(Output_PCA,"/",model,"_pc1level.csv",sep="")
write.csv(pc1level,file_name)
file_name <- paste(Output_PCA,"/",model,"_pc1level.png",sep="")
png(file_name,width=600,height=400)
ts.plot(pca_level,ylab="spread",main=paste0("MOD_ID: ",model," - PC1 level"),type="b")
dev.off()
file_name <- paste(Output_PCA,"/",model,"_RotationAndMultplier.csv",sep="")
write.csv(rotation,file_name)
file_name <- paste(Output_PCA,"/",model,"_PCAOutput.tex",sep="")
print(xtable(rotation,caption=paste0("PCA Output: ",model)),file=file_name, floating=FALSE, include.rownames=TRUE)
ts_tmp <- na.omit(ts.union(first_diff_ts,pc1_ts))
colnames(ts_tmp) <- c(as.character(csi),"pc1")
cor <- round(cor(ts_tmp,method="kendall"),2)
file_name <- paste(Output_PCA,"/",model,"_RetainedCorrelation_kendall.png",sep="")
png(file_name,width=900)
grid.arrange(tableGrob(cor),top=textGrob("Kendall Correlation"))
dev.off()
file_name <- paste(Output_PCA,"/",model,"_RetainedCorrelation_spearman.png",sep="")
cor <- round(cor(ts_tmp,method="spearman"),2)
png(file_name,width=900)
grid.arrange(tableGrob(cor),top=textGrob("Spearman Correlation"))
dev.off()
}
colnames(CSI_Eigenvectors) <- c("EIGENVECTOR","MULTIPLIER")
#CSI_Eigenvectors = data.frame(VERSION=current_version,TimeStamp = gsub(":","-",Sys.time()),CSI_ID=rownames(CSI_Eigenvectors),CSI_Eigenvectors,row.names=NULL)
CSI_Eigenvectors = data.frame(CSI_ID=rownames(CSI_Eigenvectors),CSI_Eigenvectors,row.names=NULL)
dat = merge(M_CSI,CSI_Eigenvectors,by="CSI_ID")
dat <- dat[,c("CSI_ID",modelid_column,"AC_ID","EIGENVECTOR","MULTIPLIER")]
colnames(dat)[colnames(dat) == modelid_column] <- modelid_columns[1]
MULTIPLIER = rbind(MULTIPLIER,dat)
table_name <- paste0("O_PC1TS_MONTHLY_FINAL_",modelid_column)
#try(sqlDrop(dbc, table_name))
PC1TS[,1] <- as.character(PC1TS[,1])
data <- data.frame(VERSION=current_version,TimeStamp = gsub(":","-",Sys.time()),PC1TS)
#sqlSave(dbc, PC1TS, tablename = table_name, addPK=FALSE, safer = FALSE)
saveTable(dbc,table_name,data)
table_name <- paste0("O_PC1LEVEL_MONTHLY_FINAL_",modelid_column)
#try(sqlDrop(dbc, table_name))
PC1LEVEL[,1] <- as.character(PC1LEVEL[,1])
data <- data.frame(VERSION=current_version,TimeStamp = gsub(":","-",Sys.time()),PC1LEVEL)
#sqlSave(dbc, PC1LEVEL, tablename = table_name, addPK=FALSE, safer = FALSE)
saveTable(dbc,table_name,data)
# file_name = paste(Output_PCA,"/EigenvectorAndMultiplier.tex",sep="")
# print(xtable(dat[,4:ncol(dat)],caption="Eigenvector and Multiplier of PCA Analysis"),file=file_name, tabular.environment = 'longtable', floating=FALSE, include.rownames=FALSE)
file_name = paste(Output_PCA,"/ClustersAll.tex",sep="")
writeVectorToTex(file_name,"CLUSTERS",model_ids,command="renewcommand")
}
MULTIPLIER <- unique(MULTIPLIER)
MULTIPLIER <- versionDataFrame(MULTIPLIER,current_version)
MULTIPLIER <- MULTIPLIER[order(MULTIPLIER[,"AC_ID"]),]
#write.csv(MULTIPLIER[order(MULTIPLIER$MOD_ID),],"./test.csv")
table_name <- "O_MULTIPLIER_MONTHLY_ALL"
saveTable(dbc,table_name,MULTIPLIER)
|
# Using R version 3.5.2
#' Return a dataframe that ...
#' @param inputs file path for an individual, processed dataframe
#' @param outputs file path where you would like to store your outputs
#' @param simulation_number string that denotes the simulation number the data came from (eg "001")
#' @param replicate_number string that denotes what number replicate within the simulation the data is
#' @param burnin integer, specifies how many burnin timesteps to remove
#' @param interval integer, specifies how many monthly timesteps you want to
#' aggregate by. Should usually be 12 if you want to convert monthly to annual
#' @param func function to use when converting monthly timesteps (eg mean, min,
#' max, sample etc)
#' @return a dataframe (or list of dataframes, see TODO section) where the columns
#' are mean monthly biomass for that year (or whatever specified interval is)
#' TODO: At this stage the inputs and outputs of this function are for a single
#' replicate. But I have set it up as a list in case later on it makes more
#' sense to average over the replicates before calculating the indicator
#'
#' TODO: Add default values to variable, interval and function (adult_biomass,
#' 12 and mean respectively) unless specified.
#'
# indicator <- "proportion_total_biomass"
# variable <- "biomass"
# interval <- 3
# burnin <- 0
# func <- mean
# simulation_number <- "ae"
# replicate_numbers <- 0:1
# replicate_number <- as.character(replicate_numbers[1])
# inputs <- "N:/Quantitative-Ecology/Indicators-Project/Serengeti/Outputs_from_adaptor_code/map_of_life/Test_runs/ae_BuildModel/MassBinsOutputs_NI_0_Cell0_biomass.rds"
# outputs <- "N:\\Quantitative-Ecology\\Indicators-Project\\Serengeti\\Outputs_from_indicator_code\\Indicator_inputs\\proportion_total_biomass\\Test_runs\\"
#
# x <- prepare_proportion_total_biomass_inputs(test_input, test_output,simulation_number,
# replicate_number,burnin, interval, func )
prepare_proportion_total_biomass_inputs <- function(inputs, outputs, simulation_number,
replicate_number, burnin, interval, func){
require(stringr)
require(tidyverse)
require(reshape2)
scenario <- basename(outputs)
replicate <- readRDS(inputs)
replicate <- replicate[, -ncol(replicate)]
replicate[replicate == -9999] <- NA
# Create or set output folder
output_folder <- outputs
if( !dir.exists( file.path(output_folder) ) ) {
dir.create( file.path(output_folder), recursive = TRUE )
}
# Function to remove burnin
remove_burn_in <- function(data, burnin) {
data[,(burnin + 1):ncol(data)]
}
#replicates_no_burnin <- lapply(replicates, remove_burn_in, burnin )
replicate_no_burnin <- remove_burn_in(replicate, burnin)
# Function to convert monthly timesteps to yearly by taking the mean of a specified interval (12 to convert monthly to yearly)
convert_timesteps <- function(dataframe, interval, func){
monthly_matrix <- t(dataframe)
n <- interval
time_converted_matrix <- t(aggregate(monthly_matrix,list(rep(1:(nrow(monthly_matrix) %/%
n + 1), each = n, len = nrow(monthly_matrix))),
func, na.rm = TRUE))
time_converted_matrix <- time_converted_matrix[-1,]
time_converted_matrix[is.nan(time_converted_matrix)] = NA
return(time_converted_matrix)
}
# Loop through each replicate and convert biomass per month to mean annual biomass
if (interval > 1) {
proportion_total_biomass_inputs <- convert_timesteps(replicate_no_burnin, interval, func)
saveRDS( proportion_total_biomass_inputs, file = file.path(output_folder,
paste(scenario, simulation_number, replicate_number, "proportion_total_biomass_inputs", sep = "_" )))
return(proportion_total_biomass_inputs)
} else if (interval == 1 ) {
proportion_total_biomass_inputs <- replicate_no_burnin
saveRDS( proportion_total_biomass_inputs,
file = file.path(output_folder,paste(scenario, simulation_number, cell_number,
"proportion_total_biomass_inputs", sep = "_" )))
return(proportion_total_biomass_inputs)
}
}
| /2_prepare_inputs/prepare_proportion_total_biomass_inputs.R | no_license | conservationscience/model_outputs_to_indicator_inputs | R | false | false | 4,431 | r | # Using R version 3.5.2
#' Return a dataframe that ...
#' @param inputs file path for an individual, processed dataframe
#' @param outputs file path where you would like to store your outputs
#' @param simulation_number string that denotes the simulation number the data came from (eg "001")
#' @param replicate_number string that denotes what number replicate within the simulation the data is
#' @param burnin integer, specifies how many burnin timesteps to remove
#' @param interval integer, specifies how many monthly timesteps you want to
#' aggregate by. Should usually be 12 if you want to convert monthly to annual
#' @param func function to use when converting monthly timesteps (eg mean, min,
#' max, sample etc)
#' @return a dataframe (or list of dataframes, see TODO section) where the columns
#' are mean monthly biomass for that year (or whatever specified interval is)
#' TODO: At this stage the inputs and outputs of this function are for a single
#' replicate. But I have set it up as a list in case later on it makes more
#' sense to average over the replicates before calculating the indicator
#'
#' TODO: Add default values to variable, interval and function (adult_biomass,
#' 12 and mean respectively) unless specified.
#'
# indicator <- "proportion_total_biomass"
# variable <- "biomass"
# interval <- 3
# burnin <- 0
# func <- mean
# simulation_number <- "ae"
# replicate_numbers <- 0:1
# replicate_number <- as.character(replicate_numbers[1])
# inputs <- "N:/Quantitative-Ecology/Indicators-Project/Serengeti/Outputs_from_adaptor_code/map_of_life/Test_runs/ae_BuildModel/MassBinsOutputs_NI_0_Cell0_biomass.rds"
# outputs <- "N:\\Quantitative-Ecology\\Indicators-Project\\Serengeti\\Outputs_from_indicator_code\\Indicator_inputs\\proportion_total_biomass\\Test_runs\\"
#
# x <- prepare_proportion_total_biomass_inputs(test_input, test_output,simulation_number,
# replicate_number,burnin, interval, func )
prepare_proportion_total_biomass_inputs <- function(inputs, outputs, simulation_number,
replicate_number, burnin, interval, func){
require(stringr)
require(tidyverse)
require(reshape2)
scenario <- basename(outputs)
replicate <- readRDS(inputs)
replicate <- replicate[, -ncol(replicate)]
replicate[replicate == -9999] <- NA
# Create or set output folder
output_folder <- outputs
if( !dir.exists( file.path(output_folder) ) ) {
dir.create( file.path(output_folder), recursive = TRUE )
}
# Function to remove burnin
remove_burn_in <- function(data, burnin) {
data[,(burnin + 1):ncol(data)]
}
#replicates_no_burnin <- lapply(replicates, remove_burn_in, burnin )
replicate_no_burnin <- remove_burn_in(replicate, burnin)
# Function to convert monthly timesteps to yearly by taking the mean of a specified interval (12 to convert monthly to yearly)
convert_timesteps <- function(dataframe, interval, func){
monthly_matrix <- t(dataframe)
n <- interval
time_converted_matrix <- t(aggregate(monthly_matrix,list(rep(1:(nrow(monthly_matrix) %/%
n + 1), each = n, len = nrow(monthly_matrix))),
func, na.rm = TRUE))
time_converted_matrix <- time_converted_matrix[-1,]
time_converted_matrix[is.nan(time_converted_matrix)] = NA
return(time_converted_matrix)
}
# Loop through each replicate and convert biomass per month to mean annual biomass
if (interval > 1) {
proportion_total_biomass_inputs <- convert_timesteps(replicate_no_burnin, interval, func)
saveRDS( proportion_total_biomass_inputs, file = file.path(output_folder,
paste(scenario, simulation_number, replicate_number, "proportion_total_biomass_inputs", sep = "_" )))
return(proportion_total_biomass_inputs)
} else if (interval == 1 ) {
proportion_total_biomass_inputs <- replicate_no_burnin
saveRDS( proportion_total_biomass_inputs,
file = file.path(output_folder,paste(scenario, simulation_number, cell_number,
"proportion_total_biomass_inputs", sep = "_" )))
return(proportion_total_biomass_inputs)
}
}
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include glue_service.R
NULL
.glue$batch_create_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionInputList = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_create_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionNameList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Succeeded = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Errors = structure(list(structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionsToDelete = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TablesToDelete = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(TableName = structure(logical(0), tags = list(type = "string")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_version_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), VersionIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_version_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(TableName = structure(logical(0), tags = list(type = "string")), VersionId = structure(logical(0), tags = list(type = "string")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_crawlers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_crawlers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Crawlers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), State = structure(logical(0), tags = list(type = "string")), TablePrefix = structure(logical(0), tags = list(type = "string")), Schedule = structure(list(ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CrawlElapsedTime = structure(logical(0), tags = list(type = "long")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), LastCrawl = structure(list(Status = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string")), MessagePrefix = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Version = structure(logical(0), tags = list(type = "long")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CrawlersNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_dev_endpoints_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpointNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_dev_endpoints_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpoints = structure(list(structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), PrivateAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), PublicAddress = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), LastUpdateStatus = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), LastModifiedTimestamp = structure(logical(0), tags = list(type = "timestamp")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), DevEndpointsNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Jobs = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), JobsNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionsToGet = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Partitions = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), UnprocessedKeys = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_triggers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TriggerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_triggers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Triggers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), TriggersNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_workflows_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Names = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_workflows_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Workflows = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), LastRun = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), MissingWorkflows = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_stop_job_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_stop_job_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SuccessfulSubmissions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Errors = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$cancel_ml_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$cancel_ml_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GrokClassifier = structure(list(Classification = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Classification = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Schedule = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), TablePrefix = structure(logical(0), tags = list(type = "string")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), InputRecordTables = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionInput = structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_script_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DagNodes = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), NodeType = structure(logical(0), tags = list(type = "string")), Args = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LineNumber = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), DagEdges = structure(list(structure(list(Source = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string")), TargetParameter = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Language = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_script_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PythonScript = structure(logical(0), tags = list(type = "string")), ScalaCode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_security_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EncryptionConfiguration = structure(list(S3Encryption = structure(list(structure(list(S3EncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CloudWatchEncryption = structure(list(CloudWatchEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JobBookmarksEncryption = structure(list(JobBookmarksEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_security_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Description = structure(logical(0), tags = list(type = "string")), StartOnCreation = structure(logical(0), tags = list(type = "boolean")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionInput = structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ColumnName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), ColumnName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_resource_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyHashCondition = structure(logical(0), tags = list(type = "string")), ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_resource_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_security_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_security_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_version_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_version_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_catalog_import_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_catalog_import_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ImportStatus = structure(list(ImportCompleted = structure(logical(0), tags = list(type = "boolean")), ImportTime = structure(logical(0), tags = list(type = "timestamp")), ImportedBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Classifier = structure(list(GrokClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifiers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifiers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Classifiers = structure(list(structure(list(GrokClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Errors = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), ColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Errors = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), HidePassword = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Connection = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connections_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Filter = structure(list(MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), HidePassword = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connections_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ConnectionList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Crawler = structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), State = structure(logical(0), tags = list(type = "string")), TablePrefix = structure(logical(0), tags = list(type = "string")), Schedule = structure(list(ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CrawlElapsedTime = structure(logical(0), tags = list(type = "long")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), LastCrawl = structure(list(Status = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string")), MessagePrefix = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Version = structure(logical(0), tags = list(type = "long")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_metrics_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerNameList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_metrics_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerMetricsList = structure(list(structure(list(CrawlerName = structure(logical(0), tags = list(type = "string")), TimeLeftSeconds = structure(logical(0), tags = list(type = "double")), StillEstimating = structure(logical(0), tags = list(type = "boolean")), LastRuntimeSeconds = structure(logical(0), tags = list(type = "double")), MedianRuntimeSeconds = structure(logical(0), tags = list(type = "double")), TablesCreated = structure(logical(0), tags = list(type = "integer")), TablesUpdated = structure(logical(0), tags = list(type = "integer")), TablesDeleted = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawlers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawlers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Crawlers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), State = structure(logical(0), tags = list(type = "string")), TablePrefix = structure(logical(0), tags = list(type = "string")), Schedule = structure(list(ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CrawlElapsedTime = structure(logical(0), tags = list(type = "long")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), LastCrawl = structure(list(Status = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string")), MessagePrefix = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Version = structure(logical(0), tags = list(type = "long")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_data_catalog_encryption_settings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_data_catalog_encryption_settings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DataCatalogEncryptionSettings = structure(list(EncryptionAtRest = structure(list(CatalogEncryptionMode = structure(logical(0), tags = list(type = "string")), SseAwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ConnectionPasswordEncryption = structure(list(ReturnConnectionPasswordEncrypted = structure(logical(0), tags = list(type = "boolean")), AwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Database = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_databases_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), ResourceShareType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_databases_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DatabaseList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dataflow_graph_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PythonScript = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dataflow_graph_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DagNodes = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), NodeType = structure(logical(0), tags = list(type = "string")), Args = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LineNumber = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), DagEdges = structure(list(structure(list(Source = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string")), TargetParameter = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpoint = structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), PrivateAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), PublicAddress = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), LastUpdateStatus = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), LastModifiedTimestamp = structure(logical(0), tags = list(type = "timestamp")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoints_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoints_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpoints = structure(list(structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), PrivateAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), PublicAddress = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), LastUpdateStatus = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), LastModifiedTimestamp = structure(logical(0), tags = list(type = "timestamp")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Job = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_bookmark_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_bookmark_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobBookmarkEntry = structure(list(JobName = structure(logical(0), tags = list(type = "string")), Version = structure(logical(0), tags = list(type = "integer")), Run = structure(logical(0), tags = list(type = "integer")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), JobBookmark = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), PredecessorsIncluded = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobRun = structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_runs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_runs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Jobs = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), Properties = structure(list(TaskType = structure(logical(0), tags = list(type = "string")), ImportLabelsTaskRunProperties = structure(list(InputS3Path = structure(logical(0), tags = list(type = "string")), Replace = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), ExportLabelsTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), LabelingSetGenerationTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), FindMatchesTaskRunProperties = structure(list(JobId = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ErrorString = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionTime = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_runs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Filter = structure(list(TaskRunType = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), StartedBefore = structure(logical(0), tags = list(type = "timestamp")), StartedAfter = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Sort = structure(list(Column = structure(logical(0), tags = list(type = "string")), SortDirection = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_runs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRuns = structure(list(structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), Properties = structure(list(TaskType = structure(logical(0), tags = list(type = "string")), ImportLabelsTaskRunProperties = structure(list(InputS3Path = structure(logical(0), tags = list(type = "string")), Replace = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), ExportLabelsTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), LabelingSetGenerationTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), FindMatchesTaskRunProperties = structure(list(JobId = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ErrorString = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionTime = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), InputRecordTables = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), EvaluationMetrics = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesMetrics = structure(list(AreaUnderPRCurve = structure(logical(0), tags = list(type = "double", box = TRUE)), Precision = structure(logical(0), tags = list(type = "double", box = TRUE)), Recall = structure(logical(0), tags = list(type = "double", box = TRUE)), F1 = structure(logical(0), tags = list(type = "double", box = TRUE)), ConfusionMatrix = structure(list(NumTruePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalsePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumTrueNegatives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalseNegatives = structure(logical(0), tags = list(type = "long", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), LabelCount = structure(logical(0), tags = list(type = "integer")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transforms_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Filter = structure(list(Name = structure(logical(0), tags = list(type = "string")), TransformType = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), CreatedBefore = structure(logical(0), tags = list(type = "timestamp")), CreatedAfter = structure(logical(0), tags = list(type = "timestamp")), LastModifiedBefore = structure(logical(0), tags = list(type = "timestamp")), LastModifiedAfter = structure(logical(0), tags = list(type = "timestamp")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Sort = structure(list(Column = structure(logical(0), tags = list(type = "string")), SortDirection = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transforms_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Transforms = structure(list(structure(list(TransformId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), InputRecordTables = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), EvaluationMetrics = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesMetrics = structure(list(AreaUnderPRCurve = structure(logical(0), tags = list(type = "double", box = TRUE)), Precision = structure(logical(0), tags = list(type = "double", box = TRUE)), Recall = structure(logical(0), tags = list(type = "double", box = TRUE)), F1 = structure(logical(0), tags = list(type = "double", box = TRUE)), ConfusionMatrix = structure(list(NumTruePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalsePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumTrueNegatives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalseNegatives = structure(logical(0), tags = list(type = "long", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), LabelCount = structure(logical(0), tags = list(type = "integer")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_mapping_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Source = structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Sinks = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(list(Jdbc = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), S3 = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDB = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_mapping_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Mapping = structure(list(structure(list(SourceTable = structure(logical(0), tags = list(type = "string")), SourcePath = structure(logical(0), tags = list(type = "string")), SourceType = structure(logical(0), tags = list(type = "string")), TargetTable = structure(logical(0), tags = list(type = "string")), TargetPath = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Partition = structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partitions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), Expression = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Segment = structure(list(SegmentNumber = structure(logical(0), tags = list(type = "integer")), TotalSegments = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partitions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Partitions = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_plan_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Mapping = structure(list(structure(list(SourceTable = structure(logical(0), tags = list(type = "string")), SourcePath = structure(logical(0), tags = list(type = "string")), SourceType = structure(logical(0), tags = list(type = "string")), TargetTable = structure(logical(0), tags = list(type = "string")), TargetPath = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Source = structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Sinks = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(list(Jdbc = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), S3 = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDB = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Language = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_plan_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PythonScript = structure(logical(0), tags = list(type = "string")), ScalaCode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GetResourcePoliciesResponseList = structure(list(structure(list(PolicyInJson = structure(logical(0), tags = list(type = "string")), PolicyHash = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyInJson = structure(logical(0), tags = list(type = "string")), PolicyHash = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SecurityConfiguration = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), EncryptionConfiguration = structure(list(S3Encryption = structure(list(structure(list(S3EncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CloudWatchEncryption = structure(list(CloudWatchEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JobBookmarksEncryption = structure(list(JobBookmarksEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configurations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configurations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SecurityConfigurations = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), EncryptionConfiguration = structure(list(S3Encryption = structure(list(structure(list(S3EncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CloudWatchEncryption = structure(list(CloudWatchEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JobBookmarksEncryption = structure(list(JobBookmarksEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Table = structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_version_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_version_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TableVersion = structure(list(Table = structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_versions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_versions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TableVersions = structure(list(structure(list(Table = structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tables_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Expression = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tables_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TableList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tags_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tags_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_triggers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), DependentJobName = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_triggers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Triggers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserDefinedFunction = structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_functions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Pattern = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_functions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserDefinedFunctions = structure(list(structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Workflow = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), LastRun = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Run = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_properties_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_properties_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_runs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_runs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Runs = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$import_catalog_to_glue_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$import_catalog_to_glue_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_crawlers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_crawlers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_dev_endpoints_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_dev_endpoints_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpointNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_ml_transforms_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Filter = structure(list(Name = structure(logical(0), tags = list(type = "string")), TransformType = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), CreatedBefore = structure(logical(0), tags = list(type = "timestamp")), CreatedAfter = structure(logical(0), tags = list(type = "timestamp")), LastModifiedBefore = structure(logical(0), tags = list(type = "timestamp")), LastModifiedAfter = structure(logical(0), tags = list(type = "timestamp")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Sort = structure(list(Column = structure(logical(0), tags = list(type = "string")), SortDirection = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_ml_transforms_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_triggers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), DependentJobName = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_triggers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TriggerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_workflows_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_workflows_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Workflows = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_data_catalog_encryption_settings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DataCatalogEncryptionSettings = structure(list(EncryptionAtRest = structure(list(CatalogEncryptionMode = structure(logical(0), tags = list(type = "string")), SseAwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ConnectionPasswordEncryption = structure(list(ReturnConnectionPasswordEncrypted = structure(logical(0), tags = list(type = "boolean")), AwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_data_catalog_encryption_settings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_resource_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyInJson = structure(logical(0), tags = list(type = "string")), ResourceArn = structure(logical(0), tags = list(type = "string")), PolicyHashCondition = structure(logical(0), tags = list(type = "string")), PolicyExistsCondition = structure(logical(0), tags = list(type = "string")), EnableHybrid = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_resource_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyHash = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_workflow_run_properties_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), RunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_workflow_run_properties_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$reset_job_bookmark_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$reset_job_bookmark_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobBookmarkEntry = structure(list(JobName = structure(logical(0), tags = list(type = "string")), Version = structure(logical(0), tags = list(type = "integer")), Run = structure(logical(0), tags = list(type = "integer")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), JobBookmark = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$search_tables_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Filters = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Comparator = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), SearchText = structure(logical(0), tags = list(type = "string")), SortCriteria = structure(list(structure(list(FieldName = structure(logical(0), tags = list(type = "string")), Sort = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), ResourceShareType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$search_tables_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), TableList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_schedule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_schedule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_export_labels_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_export_labels_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_import_labels_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), InputS3Path = structure(logical(0), tags = list(type = "string")), ReplaceAllLabels = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_import_labels_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_job_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_job_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_evaluation_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_evaluation_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_labeling_set_generation_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_labeling_set_generation_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_workflow_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_workflow_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_schedule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_schedule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_workflow_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_workflow_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GrokClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(ColumnStatistics = structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(ColumnStatistics = structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), ConnectionInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Schedule = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), TablePrefix = structure(logical(0), tags = list(type = "string")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_schedule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerName = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_schedule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), DatabaseInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), PublicKey = structure(logical(0), tags = list(type = "string")), AddPublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DeletePublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), CustomLibraries = structure(list(ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), UpdateEtlLibraries = structure(logical(0), tags = list(type = "boolean")), DeleteArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AddArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobUpdate = structure(list(Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValueList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PartitionInput = structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), SkipArchive = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), TriggerUpdate = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionName = structure(logical(0), tags = list(type = "string")), FunctionInput = structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
| /paws/R/glue_interfaces.R | permissive | jcheng5/paws | R | false | false | 271,887 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include glue_service.R
NULL
.glue$batch_create_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionInputList = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_create_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionNameList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Succeeded = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Errors = structure(list(structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionsToDelete = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TablesToDelete = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(TableName = structure(logical(0), tags = list(type = "string")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_version_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), VersionIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_delete_table_version_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(TableName = structure(logical(0), tags = list(type = "string")), VersionId = structure(logical(0), tags = list(type = "string")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_crawlers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_crawlers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Crawlers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), State = structure(logical(0), tags = list(type = "string")), TablePrefix = structure(logical(0), tags = list(type = "string")), Schedule = structure(list(ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CrawlElapsedTime = structure(logical(0), tags = list(type = "long")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), LastCrawl = structure(list(Status = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string")), MessagePrefix = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Version = structure(logical(0), tags = list(type = "long")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CrawlersNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_dev_endpoints_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpointNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_dev_endpoints_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpoints = structure(list(structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), PrivateAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), PublicAddress = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), LastUpdateStatus = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), LastModifiedTimestamp = structure(logical(0), tags = list(type = "timestamp")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), DevEndpointsNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Jobs = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), JobsNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionsToGet = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Partitions = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), UnprocessedKeys = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_triggers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TriggerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_triggers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Triggers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), TriggersNotFound = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_workflows_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Names = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_get_workflows_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Workflows = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), LastRun = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), MissingWorkflows = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_stop_job_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$batch_stop_job_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SuccessfulSubmissions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Errors = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string")), ErrorDetail = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$cancel_ml_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$cancel_ml_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GrokClassifier = structure(list(Classification = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Classification = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Schedule = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), TablePrefix = structure(logical(0), tags = list(type = "string")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), InputRecordTables = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionInput = structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_script_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DagNodes = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), NodeType = structure(logical(0), tags = list(type = "string")), Args = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LineNumber = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), DagEdges = structure(list(structure(list(Source = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string")), TargetParameter = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Language = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_script_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PythonScript = structure(logical(0), tags = list(type = "string")), ScalaCode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_security_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), EncryptionConfiguration = structure(list(S3Encryption = structure(list(structure(list(S3EncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CloudWatchEncryption = structure(list(CloudWatchEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JobBookmarksEncryption = structure(list(JobBookmarksEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_security_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Description = structure(logical(0), tags = list(type = "string")), StartOnCreation = structure(logical(0), tags = list(type = "boolean")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionInput = structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$create_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ColumnName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), ColumnName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_column_statistics_for_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_resource_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyHashCondition = structure(logical(0), tags = list(type = "string")), ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_resource_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_security_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_security_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_version_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_table_version_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$delete_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_catalog_import_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_catalog_import_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ImportStatus = structure(list(ImportCompleted = structure(logical(0), tags = list(type = "boolean")), ImportTime = structure(logical(0), tags = list(type = "timestamp")), ImportedBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Classifier = structure(list(GrokClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifiers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_classifiers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Classifiers = structure(list(structure(list(GrokClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), Version = structure(logical(0), tags = list(type = "long")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Errors = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), ColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_column_statistics_for_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Errors = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), HidePassword = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Connection = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connections_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Filter = structure(list(MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), HidePassword = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_connections_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ConnectionList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdatedBy = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Crawler = structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), State = structure(logical(0), tags = list(type = "string")), TablePrefix = structure(logical(0), tags = list(type = "string")), Schedule = structure(list(ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CrawlElapsedTime = structure(logical(0), tags = list(type = "long")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), LastCrawl = structure(list(Status = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string")), MessagePrefix = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Version = structure(logical(0), tags = list(type = "long")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_metrics_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerNameList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawler_metrics_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerMetricsList = structure(list(structure(list(CrawlerName = structure(logical(0), tags = list(type = "string")), TimeLeftSeconds = structure(logical(0), tags = list(type = "double")), StillEstimating = structure(logical(0), tags = list(type = "boolean")), LastRuntimeSeconds = structure(logical(0), tags = list(type = "double")), MedianRuntimeSeconds = structure(logical(0), tags = list(type = "double")), TablesCreated = structure(logical(0), tags = list(type = "integer")), TablesUpdated = structure(logical(0), tags = list(type = "integer")), TablesDeleted = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawlers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_crawlers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Crawlers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), State = structure(logical(0), tags = list(type = "string")), TablePrefix = structure(logical(0), tags = list(type = "string")), Schedule = structure(list(ScheduleExpression = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CrawlElapsedTime = structure(logical(0), tags = list(type = "long")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), LastCrawl = structure(list(Status = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string")), MessagePrefix = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Version = structure(logical(0), tags = list(type = "long")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_data_catalog_encryption_settings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_data_catalog_encryption_settings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DataCatalogEncryptionSettings = structure(list(EncryptionAtRest = structure(list(CatalogEncryptionMode = structure(logical(0), tags = list(type = "string")), SseAwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ConnectionPasswordEncryption = structure(list(ReturnConnectionPasswordEncrypted = structure(logical(0), tags = list(type = "boolean")), AwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Database = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_databases_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), ResourceShareType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_databases_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DatabaseList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dataflow_graph_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PythonScript = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dataflow_graph_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DagNodes = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), NodeType = structure(logical(0), tags = list(type = "string")), Args = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LineNumber = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), DagEdges = structure(list(structure(list(Source = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string")), TargetParameter = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpoint = structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), PrivateAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), PublicAddress = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), LastUpdateStatus = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), LastModifiedTimestamp = structure(logical(0), tags = list(type = "timestamp")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoints_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_dev_endpoints_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpoints = structure(list(structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), RoleArn = structure(logical(0), tags = list(type = "string")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SubnetId = structure(logical(0), tags = list(type = "string")), YarnEndpointAddress = structure(logical(0), tags = list(type = "string")), PrivateAddress = structure(logical(0), tags = list(type = "string")), ZeppelinRemoteSparkInterpreterPort = structure(logical(0), tags = list(type = "integer")), PublicAddress = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), WorkerType = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), NumberOfNodes = structure(logical(0), tags = list(type = "integer")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string")), FailureReason = structure(logical(0), tags = list(type = "string")), LastUpdateStatus = structure(logical(0), tags = list(type = "string")), CreatedTimestamp = structure(logical(0), tags = list(type = "timestamp")), LastModifiedTimestamp = structure(logical(0), tags = list(type = "timestamp")), PublicKey = structure(logical(0), tags = list(type = "string")), PublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Job = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_bookmark_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_bookmark_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobBookmarkEntry = structure(list(JobName = structure(logical(0), tags = list(type = "string")), Version = structure(logical(0), tags = list(type = "integer")), Run = structure(logical(0), tags = list(type = "integer")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), JobBookmark = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), PredecessorsIncluded = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobRun = structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_runs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_job_runs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Jobs = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), Properties = structure(list(TaskType = structure(logical(0), tags = list(type = "string")), ImportLabelsTaskRunProperties = structure(list(InputS3Path = structure(logical(0), tags = list(type = "string")), Replace = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), ExportLabelsTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), LabelingSetGenerationTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), FindMatchesTaskRunProperties = structure(list(JobId = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ErrorString = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionTime = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_runs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Filter = structure(list(TaskRunType = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), StartedBefore = structure(logical(0), tags = list(type = "timestamp")), StartedAfter = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), Sort = structure(list(Column = structure(logical(0), tags = list(type = "string")), SortDirection = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_task_runs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRuns = structure(list(structure(list(TransformId = structure(logical(0), tags = list(type = "string")), TaskRunId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), Properties = structure(list(TaskType = structure(logical(0), tags = list(type = "string")), ImportLabelsTaskRunProperties = structure(list(InputS3Path = structure(logical(0), tags = list(type = "string")), Replace = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), ExportLabelsTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), LabelingSetGenerationTaskRunProperties = structure(list(OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), FindMatchesTaskRunProperties = structure(list(JobId = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ErrorString = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ExecutionTime = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), InputRecordTables = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), EvaluationMetrics = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesMetrics = structure(list(AreaUnderPRCurve = structure(logical(0), tags = list(type = "double", box = TRUE)), Precision = structure(logical(0), tags = list(type = "double", box = TRUE)), Recall = structure(logical(0), tags = list(type = "double", box = TRUE)), F1 = structure(logical(0), tags = list(type = "double", box = TRUE)), ConfusionMatrix = structure(list(NumTruePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalsePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumTrueNegatives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalseNegatives = structure(logical(0), tags = list(type = "long", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), LabelCount = structure(logical(0), tags = list(type = "integer")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transforms_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Filter = structure(list(Name = structure(logical(0), tags = list(type = "string")), TransformType = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), CreatedBefore = structure(logical(0), tags = list(type = "timestamp")), CreatedAfter = structure(logical(0), tags = list(type = "timestamp")), LastModifiedBefore = structure(logical(0), tags = list(type = "timestamp")), LastModifiedAfter = structure(logical(0), tags = list(type = "timestamp")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Sort = structure(list(Column = structure(logical(0), tags = list(type = "string")), SortDirection = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_ml_transforms_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Transforms = structure(list(structure(list(TransformId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), InputRecordTables = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CatalogId = structure(logical(0), tags = list(type = "string")), ConnectionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), EvaluationMetrics = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesMetrics = structure(list(AreaUnderPRCurve = structure(logical(0), tags = list(type = "double", box = TRUE)), Precision = structure(logical(0), tags = list(type = "double", box = TRUE)), Recall = structure(logical(0), tags = list(type = "double", box = TRUE)), F1 = structure(logical(0), tags = list(type = "double", box = TRUE)), ConfusionMatrix = structure(list(NumTruePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalsePositives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumTrueNegatives = structure(logical(0), tags = list(type = "long", box = TRUE)), NumFalseNegatives = structure(logical(0), tags = list(type = "long", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), LabelCount = structure(logical(0), tags = list(type = "integer")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_mapping_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Source = structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Sinks = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(list(Jdbc = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), S3 = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDB = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_mapping_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Mapping = structure(list(structure(list(SourceTable = structure(logical(0), tags = list(type = "string")), SourcePath = structure(logical(0), tags = list(type = "string")), SourceType = structure(logical(0), tags = list(type = "string")), TargetTable = structure(logical(0), tags = list(type = "string")), TargetPath = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Partition = structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partitions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), Expression = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Segment = structure(list(SegmentNumber = structure(logical(0), tags = list(type = "integer")), TotalSegments = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_partitions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Partitions = structure(list(structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), CreationTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_plan_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Mapping = structure(list(structure(list(SourceTable = structure(logical(0), tags = list(type = "string")), SourcePath = structure(logical(0), tags = list(type = "string")), SourceType = structure(logical(0), tags = list(type = "string")), TargetTable = structure(logical(0), tags = list(type = "string")), TargetPath = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Source = structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Sinks = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(list(Jdbc = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), S3 = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDB = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Param = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Language = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_plan_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PythonScript = structure(logical(0), tags = list(type = "string")), ScalaCode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GetResourcePoliciesResponseList = structure(list(structure(list(PolicyInJson = structure(logical(0), tags = list(type = "string")), PolicyHash = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_resource_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyInJson = structure(logical(0), tags = list(type = "string")), PolicyHash = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SecurityConfiguration = structure(list(Name = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), EncryptionConfiguration = structure(list(S3Encryption = structure(list(structure(list(S3EncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CloudWatchEncryption = structure(list(CloudWatchEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JobBookmarksEncryption = structure(list(JobBookmarksEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configurations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_security_configurations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SecurityConfigurations = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), CreatedTimeStamp = structure(logical(0), tags = list(type = "timestamp")), EncryptionConfiguration = structure(list(S3Encryption = structure(list(structure(list(S3EncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CloudWatchEncryption = structure(list(CloudWatchEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JobBookmarksEncryption = structure(list(JobBookmarksEncryptionMode = structure(logical(0), tags = list(type = "string")), KmsKeyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Table = structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_version_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_version_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TableVersion = structure(list(Table = structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_versions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_table_versions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TableVersions = structure(list(structure(list(Table = structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), VersionId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tables_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Expression = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tables_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TableList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tags_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_tags_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_triggers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), DependentJobName = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_triggers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Triggers = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserDefinedFunction = structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_functions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Pattern = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_user_defined_functions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserDefinedFunctions = structure(list(structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Workflow = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), LastRun = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Run = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_properties_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_run_properties_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_runs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), IncludeGraph = structure(logical(0), tags = list(type = "boolean", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$get_workflow_runs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Runs = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowRunId = structure(logical(0), tags = list(type = "string")), WorkflowRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), Status = structure(logical(0), tags = list(type = "string")), Statistics = structure(list(TotalActions = structure(logical(0), tags = list(type = "integer")), TimeoutActions = structure(logical(0), tags = list(type = "integer")), FailedActions = structure(logical(0), tags = list(type = "integer")), StoppedActions = structure(logical(0), tags = list(type = "integer")), SucceededActions = structure(logical(0), tags = list(type = "integer")), RunningActions = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Graph = structure(list(Nodes = structure(list(structure(list(Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), UniqueId = structure(logical(0), tags = list(type = "string")), TriggerDetails = structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), JobDetails = structure(list(JobRuns = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), TriggerName = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), LastModifiedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), JobRunState = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), ErrorMessage = structure(logical(0), tags = list(type = "string")), PredecessorRuns = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), ExecutionTime = structure(logical(0), tags = list(type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), LogGroupName = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), CrawlerDetails = structure(list(Crawls = structure(list(structure(list(State = structure(logical(0), tags = list(type = "string")), StartedOn = structure(logical(0), tags = list(type = "timestamp")), CompletedOn = structure(logical(0), tags = list(type = "timestamp")), ErrorMessage = structure(logical(0), tags = list(type = "string")), LogGroup = structure(logical(0), tags = list(type = "string")), LogStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), Edges = structure(list(structure(list(SourceId = structure(logical(0), tags = list(type = "string")), DestinationId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$import_catalog_to_glue_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$import_catalog_to_glue_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_crawlers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), NextToken = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_crawlers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_dev_endpoints_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_dev_endpoints_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DevEndpointNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_jobs_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_jobs_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_ml_transforms_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Filter = structure(list(Name = structure(logical(0), tags = list(type = "string")), TransformType = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), CreatedBefore = structure(logical(0), tags = list(type = "timestamp")), CreatedAfter = structure(logical(0), tags = list(type = "timestamp")), LastModifiedBefore = structure(logical(0), tags = list(type = "timestamp")), LastModifiedAfter = structure(logical(0), tags = list(type = "timestamp")), Schema = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Sort = structure(list(Column = structure(logical(0), tags = list(type = "string")), SortDirection = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_ml_transforms_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_triggers_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), DependentJobName = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), Tags = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_triggers_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TriggerNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_workflows_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$list_workflows_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Workflows = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_data_catalog_encryption_settings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DataCatalogEncryptionSettings = structure(list(EncryptionAtRest = structure(list(CatalogEncryptionMode = structure(logical(0), tags = list(type = "string")), SseAwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ConnectionPasswordEncryption = structure(list(ReturnConnectionPasswordEncrypted = structure(logical(0), tags = list(type = "boolean")), AwsKmsKeyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_data_catalog_encryption_settings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_resource_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyInJson = structure(logical(0), tags = list(type = "string")), ResourceArn = structure(logical(0), tags = list(type = "string")), PolicyHashCondition = structure(logical(0), tags = list(type = "string")), PolicyExistsCondition = structure(logical(0), tags = list(type = "string")), EnableHybrid = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_resource_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyHash = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_workflow_run_properties_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), RunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$put_workflow_run_properties_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$reset_job_bookmark_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$reset_job_bookmark_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobBookmarkEntry = structure(list(JobName = structure(logical(0), tags = list(type = "string")), Version = structure(logical(0), tags = list(type = "integer")), Run = structure(logical(0), tags = list(type = "integer")), Attempt = structure(logical(0), tags = list(type = "integer")), PreviousRunId = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string")), JobBookmark = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$search_tables_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), Filters = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string")), Comparator = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), SearchText = structure(logical(0), tags = list(type = "string")), SortCriteria = structure(list(structure(list(FieldName = structure(logical(0), tags = list(type = "string")), Sort = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), MaxResults = structure(logical(0), tags = list(type = "integer", box = TRUE)), ResourceShareType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$search_tables_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), TableList = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), UpdateTime = structure(logical(0), tags = list(type = "timestamp")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreatedBy = structure(logical(0), tags = list(type = "string")), IsRegisteredWithLakeFormation = structure(logical(0), tags = list(type = "boolean")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CatalogId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_schedule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_crawler_schedule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_export_labels_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_export_labels_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_import_labels_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), InputS3Path = structure(logical(0), tags = list(type = "string")), ReplaceAllLabels = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_import_labels_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_job_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobRunId = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_job_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_evaluation_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_evaluation_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_labeling_set_generation_task_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), OutputS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_ml_labeling_set_generation_task_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TaskRunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_workflow_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$start_workflow_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_schedule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_crawler_schedule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_workflow_run_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), RunId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$stop_workflow_run_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_classifier_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GrokClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), GrokPattern = structure(logical(0), tags = list(type = "string")), CustomPatterns = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), XMLClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Classification = structure(logical(0), tags = list(type = "string")), RowTag = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), JsonClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), JsonPath = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CsvClassifier = structure(list(Name = structure(logical(0), tags = list(type = "string")), Delimiter = structure(logical(0), tags = list(type = "string")), QuoteSymbol = structure(logical(0), tags = list(type = "string")), ContainsHeader = structure(logical(0), tags = list(type = "string")), Header = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DisableValueTrimming = structure(logical(0), tags = list(type = "boolean", box = TRUE)), AllowSingleColumn = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_classifier_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(ColumnStatistics = structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), ColumnStatisticsList = structure(list(structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_column_statistics_for_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Errors = structure(list(structure(list(ColumnStatistics = structure(list(ColumnName = structure(logical(0), tags = list(type = "string")), ColumnType = structure(logical(0), tags = list(type = "string")), AnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), StatisticsData = structure(list(Type = structure(logical(0), tags = list(type = "string")), BooleanColumnStatisticsData = structure(list(NumberOfTrues = structure(logical(0), tags = list(type = "long")), NumberOfFalses = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DateColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "timestamp")), MaximumValue = structure(logical(0), tags = list(type = "timestamp")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DecimalColumnStatisticsData = structure(list(MinimumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), MaximumValue = structure(list(UnscaledValue = structure(logical(0), tags = list(type = "blob")), Scale = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), DoubleColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "double")), MaximumValue = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), LongColumnStatisticsData = structure(list(MinimumValue = structure(logical(0), tags = list(type = "long")), MaximumValue = structure(logical(0), tags = list(type = "long")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), StringColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long")), NumberOfDistinctValues = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), BinaryColumnStatisticsData = structure(list(MaximumLength = structure(logical(0), tags = list(type = "long")), AverageLength = structure(logical(0), tags = list(type = "double")), NumberOfNulls = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), Error = structure(list(ErrorCode = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_connection_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), ConnectionInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), ConnectionType = structure(logical(0), tags = list(type = "string")), MatchCriteria = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ConnectionProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), PhysicalConnectionRequirements = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SecurityGroupIdList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AvailabilityZone = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_connection_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Targets = structure(list(S3Targets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), JdbcTargets = structure(list(structure(list(ConnectionName = structure(logical(0), tags = list(type = "string")), Path = structure(logical(0), tags = list(type = "string")), Exclusions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), DynamoDBTargets = structure(list(structure(list(Path = structure(logical(0), tags = list(type = "string")), scanAll = structure(logical(0), tags = list(type = "boolean", box = TRUE)), scanRate = structure(logical(0), tags = list(type = "double", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "list")), CatalogTargets = structure(list(structure(list(DatabaseName = structure(logical(0), tags = list(type = "string")), Tables = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), Schedule = structure(logical(0), tags = list(type = "string")), Classifiers = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), TablePrefix = structure(logical(0), tags = list(type = "string")), SchemaChangePolicy = structure(list(UpdateBehavior = structure(logical(0), tags = list(type = "string")), DeleteBehavior = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Configuration = structure(logical(0), tags = list(type = "string")), CrawlerSecurityConfiguration = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_schedule_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CrawlerName = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_crawler_schedule_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_database_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), DatabaseInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LocationUri = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), CreateTableDefaultPermissions = structure(list(structure(list(Principal = structure(list(DataLakePrincipalIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Permissions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), TargetDatabase = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_database_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_dev_endpoint_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EndpointName = structure(logical(0), tags = list(type = "string")), PublicKey = structure(logical(0), tags = list(type = "string")), AddPublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), DeletePublicKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), CustomLibraries = structure(list(ExtraPythonLibsS3Path = structure(logical(0), tags = list(type = "string")), ExtraJarsS3Path = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), UpdateEtlLibraries = structure(logical(0), tags = list(type = "boolean")), DeleteArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AddArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_dev_endpoint_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_job_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string")), JobUpdate = structure(list(Description = structure(logical(0), tags = list(type = "string")), LogUri = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), ExecutionProperty = structure(list(MaxConcurrentRuns = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Command = structure(list(Name = structure(logical(0), tags = list(type = "string")), ScriptLocation = structure(logical(0), tags = list(type = "string")), PythonVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DefaultArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), NonOverridableArguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Connections = structure(list(Connections = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), MaxRetries = structure(logical(0), tags = list(type = "integer")), AllocatedCapacity = structure(logical(0), tags = list(deprecated = TRUE, deprecatedMessage = "This property is deprecated, use MaxCapacity instead.", type = "integer")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), GlueVersion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_job_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(JobName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_ml_transform_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(TransformType = structure(logical(0), tags = list(type = "string")), FindMatchesParameters = structure(list(PrimaryKeyColumnName = structure(logical(0), tags = list(type = "string")), PrecisionRecallTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), AccuracyCostTradeoff = structure(logical(0), tags = list(type = "double", box = TRUE)), EnforceProvidedLabels = structure(logical(0), tags = list(type = "boolean", box = TRUE))), tags = list(type = "structure"))), tags = list(type = "structure")), Role = structure(logical(0), tags = list(type = "string")), GlueVersion = structure(logical(0), tags = list(type = "string")), MaxCapacity = structure(logical(0), tags = list(type = "double", box = TRUE)), WorkerType = structure(logical(0), tags = list(type = "string")), NumberOfWorkers = structure(logical(0), tags = list(type = "integer", box = TRUE)), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), MaxRetries = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_ml_transform_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransformId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_partition_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableName = structure(logical(0), tags = list(type = "string")), PartitionValueList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PartitionInput = structure(list(Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_partition_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_table_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), TableInput = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Owner = structure(logical(0), tags = list(type = "string")), LastAccessTime = structure(logical(0), tags = list(type = "timestamp")), LastAnalyzedTime = structure(logical(0), tags = list(type = "timestamp")), Retention = structure(logical(0), tags = list(type = "integer")), StorageDescriptor = structure(list(Columns = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), Location = structure(logical(0), tags = list(type = "string")), InputFormat = structure(logical(0), tags = list(type = "string")), OutputFormat = structure(logical(0), tags = list(type = "string")), Compressed = structure(logical(0), tags = list(type = "boolean")), NumberOfBuckets = structure(logical(0), tags = list(type = "integer")), SerdeInfo = structure(list(Name = structure(logical(0), tags = list(type = "string")), SerializationLibrary = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), BucketColumns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SortColumns = structure(list(structure(list(Column = structure(logical(0), tags = list(type = "string")), SortOrder = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), SkewedInfo = structure(list(SkewedColumnNames = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValues = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), SkewedColumnValueLocationMaps = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure")), StoredAsSubDirectories = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PartitionKeys = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Comment = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), ViewOriginalText = structure(logical(0), tags = list(type = "string")), ViewExpandedText = structure(logical(0), tags = list(type = "string")), TableType = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), TargetTable = structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), SkipArchive = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_table_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_trigger_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), TriggerUpdate = structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_trigger_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Trigger = structure(list(Name = structure(logical(0), tags = list(type = "string")), WorkflowName = structure(logical(0), tags = list(type = "string")), Id = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Schedule = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(JobName = structure(logical(0), tags = list(type = "string")), Arguments = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map")), Timeout = structure(logical(0), tags = list(type = "integer", box = TRUE)), SecurityConfiguration = structure(logical(0), tags = list(type = "string")), NotificationProperty = structure(list(NotifyDelayAfter = structure(logical(0), tags = list(type = "integer", box = TRUE))), tags = list(type = "structure")), CrawlerName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Predicate = structure(list(Logical = structure(logical(0), tags = list(type = "string")), Conditions = structure(list(structure(list(LogicalOperator = structure(logical(0), tags = list(type = "string")), JobName = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), CrawlerName = structure(logical(0), tags = list(type = "string")), CrawlState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_user_defined_function_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CatalogId = structure(logical(0), tags = list(type = "string")), DatabaseName = structure(logical(0), tags = list(type = "string")), FunctionName = structure(logical(0), tags = list(type = "string")), FunctionInput = structure(list(FunctionName = structure(logical(0), tags = list(type = "string")), ClassName = structure(logical(0), tags = list(type = "string")), OwnerName = structure(logical(0), tags = list(type = "string")), OwnerType = structure(logical(0), tags = list(type = "string")), ResourceUris = structure(list(structure(list(ResourceType = structure(logical(0), tags = list(type = "string")), Uri = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_user_defined_function_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_workflow_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), DefaultRunProperties = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.glue$update_workflow_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R, R/mice_extentions.R
\name{WideToLong}
\alias{WideToLong}
\alias{WideToLong.data.frame}
\alias{WideToLong.mids}
\title{WideToLong: Converting from wide to long formats}
\usage{
WideToLong(data, id.name, response.base, time.varying.bases, sep)
\method{WideToLong}{data.frame}(data, id.name, response.base,
time.varying.bases = NULL, sep = ".")
\method{WideToLong}{mids}(data, id.name, response.base,
time.varying.bases = NULL, sep = ".")
}
\arguments{
\item{data}{A data frame or mids object in "wide" format. Specifically, both
the response and any time varying covariates should be specified
as multiple columns with the same base name, but a different
suffix. The suffix values will be the future period labels.}
\item{id.name}{The name of the identifying variable, a character string.}
\item{response.base}{The common prefix for the response variable, a character
string.}
\item{time.varying.bases}{A character vector of name prefixes for
time-varying covariates.}
\item{sep}{The character delimiter separating the variable name base from
the period identifier.}
}
\description{
In longitudinal or other multiple response studies, data presented in a long
format will often feature dependence between rows. While this is the
preferred format for lme4, such a format would hide important information
from multiple imputation models and make the MAR assumption less plausible.
Hense, the suggestion is to impute data in a wide format, where rows are
again independent, and then return the mids object to a long format for use
with FitModel, ForwardSelect, or BackwardEliminate.
}
\examples{
wide.df <- data.frame(pid = 1:100,
my.response.1 = rnorm(100),
my.response.2 = rnorm(100),
x.1 = rnorm(100),
x.2 = rnorm(100))
# add missingness
wide.df[25:50, "my.response.2"] <- NA
wide.df[45:55, "x.1"] <- NA
wide.mids <- ImputeData(wide.df, droplist = c("pid"))
long.mids <- WideToLong(wide.mids, "pid", "my.response", c("x"), sep = ".")
my.model <- FitModel(my.response ~ (1 | pid) + x, data = long.mids)
summary(my.model)
}
\references{
Stef van Buuren, Karin Groothuis-Oudshoorn (2011).
mice: Multivariate Imputation by Chained Equations in R. Journal of
Statistical Software, 45(3), 1-67. URL
http://www.jstatsoft.org/v45/i03/.
}
\seealso{
\code{\link{LongToWide}}
}
| /man/WideToLong.Rd | permissive | baogorek/glmmplus | R | false | true | 2,574 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R, R/mice_extentions.R
\name{WideToLong}
\alias{WideToLong}
\alias{WideToLong.data.frame}
\alias{WideToLong.mids}
\title{WideToLong: Converting from wide to long formats}
\usage{
WideToLong(data, id.name, response.base, time.varying.bases, sep)
\method{WideToLong}{data.frame}(data, id.name, response.base,
time.varying.bases = NULL, sep = ".")
\method{WideToLong}{mids}(data, id.name, response.base,
time.varying.bases = NULL, sep = ".")
}
\arguments{
\item{data}{A data frame or mids object in "wide" format. Specifically, both
the response and any time varying covariates should be specified
as multiple columns with the same base name, but a different
suffix. The suffix values will be the future period labels.}
\item{id.name}{The name of the identifying variable, a character string.}
\item{response.base}{The common prefix for the response variable, a character
string.}
\item{time.varying.bases}{A character vector of name prefixes for
time-varying covariates.}
\item{sep}{The character delimiter separating the variable name base from
the period identifier.}
}
\description{
In longitudinal or other multiple response studies, data presented in a long
format will often feature dependence between rows. While this is the
preferred format for lme4, such a format would hide important information
from multiple imputation models and make the MAR assumption less plausible.
Hense, the suggestion is to impute data in a wide format, where rows are
again independent, and then return the mids object to a long format for use
with FitModel, ForwardSelect, or BackwardEliminate.
}
\examples{
wide.df <- data.frame(pid = 1:100,
my.response.1 = rnorm(100),
my.response.2 = rnorm(100),
x.1 = rnorm(100),
x.2 = rnorm(100))
# add missingness
wide.df[25:50, "my.response.2"] <- NA
wide.df[45:55, "x.1"] <- NA
wide.mids <- ImputeData(wide.df, droplist = c("pid"))
long.mids <- WideToLong(wide.mids, "pid", "my.response", c("x"), sep = ".")
my.model <- FitModel(my.response ~ (1 | pid) + x, data = long.mids)
summary(my.model)
}
\references{
Stef van Buuren, Karin Groothuis-Oudshoorn (2011).
mice: Multivariate Imputation by Chained Equations in R. Journal of
Statistical Software, 45(3), 1-67. URL
http://www.jstatsoft.org/v45/i03/.
}
\seealso{
\code{\link{LongToWide}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npfixedcompR.R
\name{computemixdist}
\alias{computemixdist}
\title{Computing non-parametric mixing distribution}
\usage{
computemixdist(x, ...)
}
\arguments{
\item{x}{a object from implemented family generated by \code{\link{makeobject}}.}
\item{...}{parameters above passed to the specific method}
}
\description{
Computing non-parametric mixing distribution
}
\details{
The full list of implemented family is in \code{\link{makeobject}}.
The avaliable parameters are listed as follows:
\itemize{
\item mix: The initial proper mixing distribution
\item tol: tolerance to stop the code
\item maxiter: maximum iterations allowed.
\item verbose: logical; whether to print the intermediate results.
}
This function essentially calls the class method in the object.
}
\examples{
data = rnorm(500, c(0, 2))
pi0 = 0.5
x = makeobject(data, pi0 = pi0, method = "npnormll")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormllw")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormcvm")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormcvmw")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormad")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "nptll")
computemixdist(x)
}
| /man/computemixdist.Rd | no_license | xiangjiexue/npfixedcompR | R | false | true | 1,322 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npfixedcompR.R
\name{computemixdist}
\alias{computemixdist}
\title{Computing non-parametric mixing distribution}
\usage{
computemixdist(x, ...)
}
\arguments{
\item{x}{a object from implemented family generated by \code{\link{makeobject}}.}
\item{...}{parameters above passed to the specific method}
}
\description{
Computing non-parametric mixing distribution
}
\details{
The full list of implemented family is in \code{\link{makeobject}}.
The avaliable parameters are listed as follows:
\itemize{
\item mix: The initial proper mixing distribution
\item tol: tolerance to stop the code
\item maxiter: maximum iterations allowed.
\item verbose: logical; whether to print the intermediate results.
}
This function essentially calls the class method in the object.
}
\examples{
data = rnorm(500, c(0, 2))
pi0 = 0.5
x = makeobject(data, pi0 = pi0, method = "npnormll")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormllw")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormcvm")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormcvmw")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "npnormad")
computemixdist(x)
x = makeobject(data, pi0 = pi0, method = "nptll")
computemixdist(x)
}
|
\name{knn.predict}
\alias{knn.predict}
\title{ KNN prediction routine using pre-calculated distances }
\description{
K-Nearest Neighbor prediction method which uses the distances calculated by
\code{\link{knn.dist}}.
}
\usage{
knn.predict(train, test, y, dist.matrix, k = 1, agg.meth = if (is.factor(y)) "majority" else "mean", ties.meth = "min")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{train}{ indexes which specify the rows of \emph{x} provided to \code{\link{knn.dist}}
to be used in making the predictions }
\item{test}{ indexes which specify the rows of \emph{x} provided to \code{\link{knn.dist}}
to make predictions for }
\item{y}{ responses, see details below }
\item{dist.matrix}{ the output from a call to \code{\link{knn.dist}} }
\item{k}{ the number of nearest neighbors to consider }
\item{agg.meth}{ method to combine responses of the nearest neighbors, defaults to
"majority" for classification and "mean" for continuous responses }
\item{ties.meth}{ method to handle ties for the kth neighbor, the default is "min" which
uses all ties, alternatives include "max" which uses none if there are ties
for the k-th nearest neighbor, "random" which
selects among the ties randomly and "first" which uses the ties in their
order in the data }
}
\details{
Predictions are calculated for each test case by aggregating the responses of the
k-nearest neighbors among the training cases. \code{k} may be specified to be any positive
integer less than the number of training cases, but is generally between 1 and 10.
The indexes for the training and test cases are in reference to the order of the entire
data set as it was passed to \code{\link{knn.dist}}.
Only responses for the training cases are used. The responses provided in y may be those
for the entire data set (test and training cases), or just for the training cases.
The aggregation may be any named function. By default, classification (factored responses)
will use the "majority" class function and non-factored responses will use "mean". Other
options to consider include "min", "max" and "median".
The ties are handled using the \code{\link{rank}} function. Further information may be found
by examining the \code{ties.method} there.
}
\value{
a vector of predictions whose length is the number of test cases.
}
\author{ Atina Dunlap Brooks }
\note{
For the traditional scenario, classification using the Euclidean distance on a fixed set
of training cases and a fixed set of test cases, the method \code{\link[class]{knn}} is ideal.
The functions \code{\link{knn.dist}} and \code{\link{knn.predict}} are intend to be
used when something beyond the traditional case is desired. For example, prediction on
a continuous y (non-classification), cross-validation for the selection of k,
or the use of an alternate distance method are well handled.
}
\seealso{ \code{\link{knn.dist}}, \code{\link{dist}}, \code{\link[class]{knn}} }
\examples{
# a quick classification example
x1 <- c(rnorm(20,mean=1),rnorm(20,mean=5))
x2 <- c(rnorm(20,mean=5),rnorm(20,mean=1))
x <- cbind(x1,x2)
y <- c(rep(1,20),rep(0,20))
train <- sample(1:40,30)
# plot the training cases
plot(x1[train],x2[train],col=y[train]+1,xlab="x1",ylab="x2")
# predict the other cases
test <- (1:40)[-train]
kdist <- knn.dist(x)
preds <- knn.predict(train,test,y,kdist,k=3,agg.meth="majority")
# add the predictions to the plot
points(x1[test],x2[test],col=as.integer(preds)+1,pch="+")
# display the confusion matrix
table(y[test],preds)
# the iris example used by knn(class)
library(class)
data(iris3)
train <- rbind(iris3[1:25,,1], iris3[1:25,,2], iris3[1:25,,3])
test <- rbind(iris3[26:50,,1], iris3[26:50,,2], iris3[26:50,,3])
cl <- factor(c(rep("s",25), rep("c",25), rep("v",25)))
# how to get predictions from knn(class)
pred<-knn(train, test, cl, k = 3)
# display the confusion matrix
table(pred,cl)
# how to get predictions with knn.dist and knn.predict
x <- rbind(train,test)
kdist <- knn.dist(x)
pred <- knn.predict(1:75, 76:150, cl, kdist, k=3)
# display the confusion matrix
table(pred,cl)
# note any small differences are a result of both methods
# breaking ties in majority class randomly
# 5-fold cross-validation to select k for above example
fold <- sample(1:5,75,replace=TRUE)
cvpred <- matrix(NA,nrow=75,ncol=10)
for (k in 1:10)
for (i in 1:5)
cvpred[which(fold==i),k] <- knn.predict(train=which(fold!=i),test=which(fold==i),cl,kdist,k=k)
# display misclassification rates for k=1:10
apply(cvpred,2,function(x) sum(cl!=x))
}
\keyword{ methods }
| /hw2data/knnflex/man/knn.predict.Rd | no_license | xiyuxiexxy/MachineLearningWSU | R | false | false | 4,812 | rd | \name{knn.predict}
\alias{knn.predict}
\title{ KNN prediction routine using pre-calculated distances }
\description{
K-Nearest Neighbor prediction method which uses the distances calculated by
\code{\link{knn.dist}}.
}
\usage{
knn.predict(train, test, y, dist.matrix, k = 1, agg.meth = if (is.factor(y)) "majority" else "mean", ties.meth = "min")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{train}{ indexes which specify the rows of \emph{x} provided to \code{\link{knn.dist}}
to be used in making the predictions }
\item{test}{ indexes which specify the rows of \emph{x} provided to \code{\link{knn.dist}}
to make predictions for }
\item{y}{ responses, see details below }
\item{dist.matrix}{ the output from a call to \code{\link{knn.dist}} }
\item{k}{ the number of nearest neighbors to consider }
\item{agg.meth}{ method to combine responses of the nearest neighbors, defaults to
"majority" for classification and "mean" for continuous responses }
\item{ties.meth}{ method to handle ties for the kth neighbor, the default is "min" which
uses all ties, alternatives include "max" which uses none if there are ties
for the k-th nearest neighbor, "random" which
selects among the ties randomly and "first" which uses the ties in their
order in the data }
}
\details{
Predictions are calculated for each test case by aggregating the responses of the
k-nearest neighbors among the training cases. \code{k} may be specified to be any positive
integer less than the number of training cases, but is generally between 1 and 10.
The indexes for the training and test cases are in reference to the order of the entire
data set as it was passed to \code{\link{knn.dist}}.
Only responses for the training cases are used. The responses provided in y may be those
for the entire data set (test and training cases), or just for the training cases.
The aggregation may be any named function. By default, classification (factored responses)
will use the "majority" class function and non-factored responses will use "mean". Other
options to consider include "min", "max" and "median".
The ties are handled using the \code{\link{rank}} function. Further information may be found
by examining the \code{ties.method} there.
}
\value{
a vector of predictions whose length is the number of test cases.
}
\author{ Atina Dunlap Brooks }
\note{
For the traditional scenario, classification using the Euclidean distance on a fixed set
of training cases and a fixed set of test cases, the method \code{\link[class]{knn}} is ideal.
The functions \code{\link{knn.dist}} and \code{\link{knn.predict}} are intend to be
used when something beyond the traditional case is desired. For example, prediction on
a continuous y (non-classification), cross-validation for the selection of k,
or the use of an alternate distance method are well handled.
}
\seealso{ \code{\link{knn.dist}}, \code{\link{dist}}, \code{\link[class]{knn}} }
\examples{
# a quick classification example
x1 <- c(rnorm(20,mean=1),rnorm(20,mean=5))
x2 <- c(rnorm(20,mean=5),rnorm(20,mean=1))
x <- cbind(x1,x2)
y <- c(rep(1,20),rep(0,20))
train <- sample(1:40,30)
# plot the training cases
plot(x1[train],x2[train],col=y[train]+1,xlab="x1",ylab="x2")
# predict the other cases
test <- (1:40)[-train]
kdist <- knn.dist(x)
preds <- knn.predict(train,test,y,kdist,k=3,agg.meth="majority")
# add the predictions to the plot
points(x1[test],x2[test],col=as.integer(preds)+1,pch="+")
# display the confusion matrix
table(y[test],preds)
# the iris example used by knn(class)
library(class)
data(iris3)
train <- rbind(iris3[1:25,,1], iris3[1:25,,2], iris3[1:25,,3])
test <- rbind(iris3[26:50,,1], iris3[26:50,,2], iris3[26:50,,3])
cl <- factor(c(rep("s",25), rep("c",25), rep("v",25)))
# how to get predictions from knn(class)
pred<-knn(train, test, cl, k = 3)
# display the confusion matrix
table(pred,cl)
# how to get predictions with knn.dist and knn.predict
x <- rbind(train,test)
kdist <- knn.dist(x)
pred <- knn.predict(1:75, 76:150, cl, kdist, k=3)
# display the confusion matrix
table(pred,cl)
# note any small differences are a result of both methods
# breaking ties in majority class randomly
# 5-fold cross-validation to select k for above example
fold <- sample(1:5,75,replace=TRUE)
cvpred <- matrix(NA,nrow=75,ncol=10)
for (k in 1:10)
for (i in 1:5)
cvpred[which(fold==i),k] <- knn.predict(train=which(fold!=i),test=which(fold==i),cl,kdist,k=k)
# display misclassification rates for k=1:10
apply(cvpred,2,function(x) sum(cl!=x))
}
\keyword{ methods }
|
#' Blood pressure data from a clinical study
#'
#' Data from 200 subjects
#'
#' @format A data frame with 2438 rows and 13 variables:
#' \describe{
#' \item{ID}{Subject identification number}
#' \item{BIRTH_WT}{birth weight (lbs)}
#' \item{WEIGHT}{current weight (lbs)}
#' \item{HEIGHT}{current height (cm)}
#' \item{BMI}{current body mass index}
#' \item{age}{current age (yrs)}
#' \item{dias}{diastolic blood pressure}
#' \item{sys}{systolic blood pressure}
#' \item{SexM}{indicator of sex male}
#' \item{RaceB}{indicator of race black}
#' \item{RaceW}{indicator of race white}
#' \item{PHIGHBP}{indicator that either parent had high blood pressure}
#' \item{PDIABET}{indicator that either parent had diabetes}
#' }
#' @source Data provided by Wanzhu Tu, Indiana University School of Medicine
#' @references Tu, W., Eckert, G. J., DiMeglio, L. A., Yu, Z., Jung, J., and Pratt, J. H. (2011). \emph{Intensified effect of adiposity on blood pressure in overweight and obese children}. Hypertension, 58(5), 818-824.
"bloodpressure"
#' Coral reef data from survey data on 6 sites
#'
#' Data from 68 subjects
#'
#' @format A data frame with 269 rows and 14 variables:
#' \describe{
#' \item{ZONE}{Management zone}
#' \item{site}{Name of the habitat site}
#' \item{complexity}{habitat benthic complexity}
#' \item{rugosity}{a measurement related to terrain complexity}
#' \item{LC}{cover of low complexity}
#' \item{HC}{cover of high complexity}
#' \item{SCORE1}{PCA score 1 from Wilson, Graham, Polunin}
#' \item{SCORE2}{PCA score 2 from Wilson, Graham, Polunin}
#' \item{macro}{indicator of race white}
#' \item{species}{fish species}
#' \item{abundance}{fish abundance}
#' \item{biomass}{fish biomass}
#' }
#' @source Data from supplementary material provided for Fisher, R., Wilson, S. K., Sin, T. M., Lee, A. C., and Langlois, T. J. (2018). \emph{A simple function for full-subsets multiple regression in ecology with R}. Ecology and evolution, 8(12), 6104-6113.
#' @references Wilson, S. K., Graham, N. A. J., and Polunin, N. V. (2007). \emph{Appraisal of visual assessments of habitat complexity and benthic composition on coral reefs}. Marine Biology, 151(3), 1069-1076.
"reef"
| /fuzzedpackages/bayesGAM/R/data.r | no_license | akhikolla/testpackages | R | false | false | 2,237 | r |
#' Blood pressure data from a clinical study
#'
#' Data from 200 subjects
#'
#' @format A data frame with 2438 rows and 13 variables:
#' \describe{
#' \item{ID}{Subject identification number}
#' \item{BIRTH_WT}{birth weight (lbs)}
#' \item{WEIGHT}{current weight (lbs)}
#' \item{HEIGHT}{current height (cm)}
#' \item{BMI}{current body mass index}
#' \item{age}{current age (yrs)}
#' \item{dias}{diastolic blood pressure}
#' \item{sys}{systolic blood pressure}
#' \item{SexM}{indicator of sex male}
#' \item{RaceB}{indicator of race black}
#' \item{RaceW}{indicator of race white}
#' \item{PHIGHBP}{indicator that either parent had high blood pressure}
#' \item{PDIABET}{indicator that either parent had diabetes}
#' }
#' @source Data provided by Wanzhu Tu, Indiana University School of Medicine
#' @references Tu, W., Eckert, G. J., DiMeglio, L. A., Yu, Z., Jung, J., and Pratt, J. H. (2011). \emph{Intensified effect of adiposity on blood pressure in overweight and obese children}. Hypertension, 58(5), 818-824.
"bloodpressure"
#' Coral reef data from survey data on 6 sites
#'
#' Data from 68 subjects
#'
#' @format A data frame with 269 rows and 14 variables:
#' \describe{
#' \item{ZONE}{Management zone}
#' \item{site}{Name of the habitat site}
#' \item{complexity}{habitat benthic complexity}
#' \item{rugosity}{a measurement related to terrain complexity}
#' \item{LC}{cover of low complexity}
#' \item{HC}{cover of high complexity}
#' \item{SCORE1}{PCA score 1 from Wilson, Graham, Polunin}
#' \item{SCORE2}{PCA score 2 from Wilson, Graham, Polunin}
#' \item{macro}{indicator of race white}
#' \item{species}{fish species}
#' \item{abundance}{fish abundance}
#' \item{biomass}{fish biomass}
#' }
#' @source Data from supplementary material provided for Fisher, R., Wilson, S. K., Sin, T. M., Lee, A. C., and Langlois, T. J. (2018). \emph{A simple function for full-subsets multiple regression in ecology with R}. Ecology and evolution, 8(12), 6104-6113.
#' @references Wilson, S. K., Graham, N. A. J., and Polunin, N. V. (2007). \emph{Appraisal of visual assessments of habitat complexity and benthic composition on coral reefs}. Marine Biology, 151(3), 1069-1076.
"reef"
|
#ui.R
library(shiny)
library(shinythemes)
shinyUI(fluidPage(
theme=shinytheme("flatly"),
titlePanel(h1("Word Guesser", align="center"),
windowTitle = "Data Science Capstone Project"),
h4("(reading in your thoughts)", align="center"),
br(),
fluidRow(
column(6, offset=3,
tabsetPanel(type = "tabs",
tabPanel("Standard",
textInput("sentence1", label = "", value = ""),
tags$head(tags$style(type="text/css", "#sentence1 {width: 600px;}")),
fluidRow(
column(6,
actionButton("goButton", "Guess!"),
br(), br(),br()
),
column(6,
p(textOutput("info1")),
h3(textOutput("pred1"))
)
)
),
tabPanel("Dynamic",
textInput("sentence2", label = "", value = ""),
tags$head(tags$style(type="text/css", "#sentence2 {width: 600px;}")),
fluidRow(
column(6,
br(),br(),br()
),
column(6,
p(textOutput("info2")),
h3(textOutput("pred2"))
)
)
)
)
)
),
br(),br(),
fluidRow(
column(5, offset=1,
wellPanel(
h4("Instructions"),
p("Just write something in the text box."),
p("In 'standard' mode, click on the button. In 'dynamic' mode,
the suggestion appears automatically."),
p("At this stage, english (US) is the only available language. Spanish, french,
and german will be proposed soon.")
)
),
column(5,
selectInput("lang",
label = "Language",
choices = list("English (US)" = "en_us",
"German" = "german",
"Italian" = "italian",
"French"= "french"),
selected = "en_us")
)
)
)) | /app1/ui.R | no_license | yonidahan/SwiftKey_capstone | R | false | false | 4,010 | r |
#ui.R
library(shiny)
library(shinythemes)
shinyUI(fluidPage(
theme=shinytheme("flatly"),
titlePanel(h1("Word Guesser", align="center"),
windowTitle = "Data Science Capstone Project"),
h4("(reading in your thoughts)", align="center"),
br(),
fluidRow(
column(6, offset=3,
tabsetPanel(type = "tabs",
tabPanel("Standard",
textInput("sentence1", label = "", value = ""),
tags$head(tags$style(type="text/css", "#sentence1 {width: 600px;}")),
fluidRow(
column(6,
actionButton("goButton", "Guess!"),
br(), br(),br()
),
column(6,
p(textOutput("info1")),
h3(textOutput("pred1"))
)
)
),
tabPanel("Dynamic",
textInput("sentence2", label = "", value = ""),
tags$head(tags$style(type="text/css", "#sentence2 {width: 600px;}")),
fluidRow(
column(6,
br(),br(),br()
),
column(6,
p(textOutput("info2")),
h3(textOutput("pred2"))
)
)
)
)
)
),
br(),br(),
fluidRow(
column(5, offset=1,
wellPanel(
h4("Instructions"),
p("Just write something in the text box."),
p("In 'standard' mode, click on the button. In 'dynamic' mode,
the suggestion appears automatically."),
p("At this stage, english (US) is the only available language. Spanish, french,
and german will be proposed soon.")
)
),
column(5,
selectInput("lang",
label = "Language",
choices = list("English (US)" = "en_us",
"German" = "german",
"Italian" = "italian",
"French"= "french"),
selected = "en_us")
)
)
)) |
library(readxl)
library(readr)
library(dplyr)
library(tidyr)
library(tidyverse)
library(ncdf4)
library(ncdf.tools)
library(raster) # package for raster manipulation
library(rgdal) # package for geospatial analysis
library(ggplot2) # package for plotting
library(maptools)
# Installing
#install.packages("readr")
# Loading
library("readr")
#rm(list=ls())
THV_ActivePower_2011 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power 2011.xlsx")
THV_ActivePower_2012 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power 2012.xlsx")
THV_ActivePower_2013 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power 2013.xlsx")
THV_ActivePower_2014 <- read_excel( "/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power-2014.xlsx")
THV_ActivePower_2015 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV - Active Power - 2015.xlsx")
THV_ActivePower_2016 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV - Active Power - 2016.xlsx")
THV_ActivePower_2017 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV - Active Power - 2017.xlsx")
THV_ActivePower_2015$PCTimeStamp<-as.POSIXct(THV_ActivePower_2015$PCTimeStamp)
THV_ActivePower_2016$PCTimeStamp<-as.POSIXct(THV_ActivePower_2016$PCTimeStamp)
THV_ActivePower_2017$PCTimeStamp<-as.POSIXct(THV_ActivePower_2017$PCTimeStamp)
THV_2011_time<-as.POSIXct(THV_ActivePower_2011$X__1[3:52562])
THV_2012_time<-as.POSIXct(THV_ActivePower_2012$X__1[3:52706])
THV_2013_time<-as.POSIXct(THV_ActivePower_2013$X__1[3:52562])
THV_2014_time<-as.POSIXct(THV_ActivePower_2014$X__1[3:52562])
THV_2015_time<-THV_ActivePower_2015$PCTimeStamp
THV_2016_time<-THV_ActivePower_2016$PCTimeStamp
THV_2017_time<-THV_ActivePower_2017$PCTimeStamp
ActivePower_2011_data=matrix(0,nrow=52560,ncol=30)
ActivePower_2011=data.frame(THV_ActivePower_2011[3:52562,c(2:31)])
ActivePower_2012_data=matrix(0,nrow=52704,ncol=30)
ActivePower_2012=data.frame(THV_ActivePower_2012[3:52706,c(2:31)])
ActivePower_2013_data=matrix(0,nrow=52560,ncol=30)
ActivePower_2013=data.frame(THV_ActivePower_2013[3:52562,c(2:31)])
ActivePower_2014_data=matrix(0,nrow=52560,ncol=30)
ActivePower_2014=data.frame(THV_ActivePower_2014[3:52562,c(2:31)])
## ActivePower_2012_data 52707 obs. of 30 variables
for (i in 1:52704)
{
for (j in 1:30)
{
ActivePower_2012_data[i,j]<- as.numeric(ActivePower_2012[i,j])
}
}
for (ii in 1:52560)
{
for (j in 1:30)
{
ActivePower_2011_data[ii,j]<- as.numeric(ActivePower_2011[ii,j])
ActivePower_2013_data[ii,j]<- as.numeric(ActivePower_2013[ii,j])
ActivePower_2014_data[ii,j]<- as.numeric(ActivePower_2014[ii,j])
}
}
ActivePower_2015_data=data.frame(THV_ActivePower_2015[,2:31])
ActivePower_2016_data=data.frame(THV_ActivePower_2016[,2:31])
ActivePower_2017_data=data.frame(THV_ActivePower_2017[,2:31])
ActivePower_2011_data=as_data_frame(ActivePower_2011_data)
ActivePower_2012_data=as_data_frame(ActivePower_2012_data)
ActivePower_2013_data=as_data_frame(ActivePower_2013_data)
ActivePower_2014_data=as_data_frame(ActivePower_2014_data)
names(ActivePower_2011_data)=names(ActivePower_2015_data)
names(ActivePower_2012_data)=names(ActivePower_2015_data)
names(ActivePower_2013_data)=names(ActivePower_2015_data)
names(ActivePower_2014_data)=names(ActivePower_2015_data)
names(ActivePower_2015_data)=names(ActivePower_2015_data)
power_data=rbind(ActivePower_2011_data,ActivePower_2012_data,ActivePower_2013_data,ActivePower_2014_data,ActivePower_2015_data,ActivePower_2016_data,ActivePower_2017_data)
power_data[is.na(power_data)] <- -0.009999
# 368208st
tt_power=data.frame(PCTimeStamp=c(THV_2011_time,THV_2012_time,THV_2013_time,THV_2014_time,THV_2015_time,THV_2016_time,THV_2017_time))#THV_ActivePower_2011_2014_time=matrix(0,nrow=210380,ncol=1)
power_data_thv_theni=data.frame(tt_power,power_data)
power_data_thv_theni[is.na(power_data_thv_theni)] <- -0.009999
# 368208st
write.table(power_data_thv_theni, file = "powerdata_THV_tehni_misssing0.009999.txt", sep = "\t",
row.names = FALSE)
| /Wind farms analysis/create_file_POW_CLP_THENI.R | no_license | yasminezakari/projects | R | false | false | 4,044 | r | library(readxl)
library(readr)
library(dplyr)
library(tidyr)
library(tidyverse)
library(ncdf4)
library(ncdf.tools)
library(raster) # package for raster manipulation
library(rgdal) # package for geospatial analysis
library(ggplot2) # package for plotting
library(maptools)
# Installing
#install.packages("readr")
# Loading
library("readr")
#rm(list=ls())
THV_ActivePower_2011 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power 2011.xlsx")
THV_ActivePower_2012 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power 2012.xlsx")
THV_ActivePower_2013 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power 2013.xlsx")
THV_ActivePower_2014 <- read_excel( "/Users/zakari/Desktop/CLP\ data/Theni/THV-Active Power-2014.xlsx")
THV_ActivePower_2015 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV - Active Power - 2015.xlsx")
THV_ActivePower_2016 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV - Active Power - 2016.xlsx")
THV_ActivePower_2017 <- read_excel("/Users/zakari/Desktop/CLP\ data/Theni/THV - Active Power - 2017.xlsx")
THV_ActivePower_2015$PCTimeStamp<-as.POSIXct(THV_ActivePower_2015$PCTimeStamp)
THV_ActivePower_2016$PCTimeStamp<-as.POSIXct(THV_ActivePower_2016$PCTimeStamp)
THV_ActivePower_2017$PCTimeStamp<-as.POSIXct(THV_ActivePower_2017$PCTimeStamp)
THV_2011_time<-as.POSIXct(THV_ActivePower_2011$X__1[3:52562])
THV_2012_time<-as.POSIXct(THV_ActivePower_2012$X__1[3:52706])
THV_2013_time<-as.POSIXct(THV_ActivePower_2013$X__1[3:52562])
THV_2014_time<-as.POSIXct(THV_ActivePower_2014$X__1[3:52562])
THV_2015_time<-THV_ActivePower_2015$PCTimeStamp
THV_2016_time<-THV_ActivePower_2016$PCTimeStamp
THV_2017_time<-THV_ActivePower_2017$PCTimeStamp
ActivePower_2011_data=matrix(0,nrow=52560,ncol=30)
ActivePower_2011=data.frame(THV_ActivePower_2011[3:52562,c(2:31)])
ActivePower_2012_data=matrix(0,nrow=52704,ncol=30)
ActivePower_2012=data.frame(THV_ActivePower_2012[3:52706,c(2:31)])
ActivePower_2013_data=matrix(0,nrow=52560,ncol=30)
ActivePower_2013=data.frame(THV_ActivePower_2013[3:52562,c(2:31)])
ActivePower_2014_data=matrix(0,nrow=52560,ncol=30)
ActivePower_2014=data.frame(THV_ActivePower_2014[3:52562,c(2:31)])
## ActivePower_2012_data 52707 obs. of 30 variables
for (i in 1:52704)
{
for (j in 1:30)
{
ActivePower_2012_data[i,j]<- as.numeric(ActivePower_2012[i,j])
}
}
for (ii in 1:52560)
{
for (j in 1:30)
{
ActivePower_2011_data[ii,j]<- as.numeric(ActivePower_2011[ii,j])
ActivePower_2013_data[ii,j]<- as.numeric(ActivePower_2013[ii,j])
ActivePower_2014_data[ii,j]<- as.numeric(ActivePower_2014[ii,j])
}
}
ActivePower_2015_data=data.frame(THV_ActivePower_2015[,2:31])
ActivePower_2016_data=data.frame(THV_ActivePower_2016[,2:31])
ActivePower_2017_data=data.frame(THV_ActivePower_2017[,2:31])
ActivePower_2011_data=as_data_frame(ActivePower_2011_data)
ActivePower_2012_data=as_data_frame(ActivePower_2012_data)
ActivePower_2013_data=as_data_frame(ActivePower_2013_data)
ActivePower_2014_data=as_data_frame(ActivePower_2014_data)
names(ActivePower_2011_data)=names(ActivePower_2015_data)
names(ActivePower_2012_data)=names(ActivePower_2015_data)
names(ActivePower_2013_data)=names(ActivePower_2015_data)
names(ActivePower_2014_data)=names(ActivePower_2015_data)
names(ActivePower_2015_data)=names(ActivePower_2015_data)
power_data=rbind(ActivePower_2011_data,ActivePower_2012_data,ActivePower_2013_data,ActivePower_2014_data,ActivePower_2015_data,ActivePower_2016_data,ActivePower_2017_data)
power_data[is.na(power_data)] <- -0.009999
# 368208st
tt_power=data.frame(PCTimeStamp=c(THV_2011_time,THV_2012_time,THV_2013_time,THV_2014_time,THV_2015_time,THV_2016_time,THV_2017_time))#THV_ActivePower_2011_2014_time=matrix(0,nrow=210380,ncol=1)
power_data_thv_theni=data.frame(tt_power,power_data)
power_data_thv_theni[is.na(power_data_thv_theni)] <- -0.009999
# 368208st
write.table(power_data_thv_theni, file = "powerdata_THV_tehni_misssing0.009999.txt", sep = "\t",
row.names = FALSE)
|
setwd("~/GitHub/MMSS_311_2")
qog <- read.csv("http://www.qogdata.pol.gu.se/data/qog_std_cs_jan19.csv")
print(dim(qog))
| /lab1 4-5.R | no_license | kevinhyx1220/MMSS_311_2 | R | false | false | 119 | r | setwd("~/GitHub/MMSS_311_2")
qog <- read.csv("http://www.qogdata.pol.gu.se/data/qog_std_cs_jan19.csv")
print(dim(qog))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostics.R
\name{reportDACC}
\alias{reportDACC}
\title{Summarise diagnostic accuracy data in a convenient format}
\usage{
reportDACC(df)
}
\arguments{
\item{df}{A data frame of diagnostic accuracy data as returned by
\code{extractDACC}}
}
\value{
A data frame summarising the diagnostic accuracy information for each
study
}
\description{
Summarise diagnostic accuracy data in a convenient format
}
\seealso{
\code{extractDACC}, \code{\link[mada]{madad}}
}
| /man/reportDACC.Rd | no_license | RichardBirnie/mautils | R | false | true | 540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostics.R
\name{reportDACC}
\alias{reportDACC}
\title{Summarise diagnostic accuracy data in a convenient format}
\usage{
reportDACC(df)
}
\arguments{
\item{df}{A data frame of diagnostic accuracy data as returned by
\code{extractDACC}}
}
\value{
A data frame summarising the diagnostic accuracy information for each
study
}
\description{
Summarise diagnostic accuracy data in a convenient format
}
\seealso{
\code{extractDACC}, \code{\link[mada]{madad}}
}
|
modelInfo <- list(label = "Nearest Shrunken Centroids",
library = "pamr",
type = "Classification",
parameters = data.frame(parameter = 'threshold',
class = "numeric",
label = 'Shrinkage Threshold'),
grid = function(x, y, len = NULL, search = "grid") {
cc <- complete.cases(x) & complete.cases(y)
x <- x[cc,,drop = FALSE]
y <- y[cc]
initialThresh <- pamr.train(list(x=t(x), y=y))$threshold
initialThresh <- initialThresh[-c(1, length(initialThresh))]
if(search == "grid") {
out <- data.frame(threshold = seq(from = min(initialThresh),
to = max(initialThresh),
length = len))
} else {
out <- data.frame(threshold = runif(len, min = min(initialThresh),max = max(initialThresh)))
}
out
},
loop = function(grid) {
grid <- grid[order(grid$threshold, decreasing = TRUE),, drop = FALSE]
loop <- grid[1,,drop = FALSE]
submodels <- list(grid[-1,,drop = FALSE])
list(loop = loop, submodels = submodels)
},
fit = function(x, y, wts, param, lev, last, classProbs, ...)
pamr.train(list(x = t(x), y = y), threshold = param$threshold, ...),
predict = function(modelFit, newdata, submodels = NULL) {
out <- pamr.predict(modelFit,
t(newdata),
threshold = modelFit$tuneValue$threshold)
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
for(j in seq(along = submodels$threshold))
{
tmp[[j+1]] <- pamr.predict(modelFit,
t(newdata),
threshold = submodels$threshold[j])
}
out <- tmp
}
out
},
prob = function(modelFit, newdata, submodels = NULL) {
out <- pamr.predict(modelFit, t(newdata),
threshold = modelFit$tuneValue$threshold,
type= "posterior")
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
for(j in seq(along = submodels$threshold))
{
tmpProb <- pamr.predict(modelFit, t(newdata),
threshold = submodels$threshold[j],
type= "posterior")
tmp[[j+1]] <- as.data.frame(tmpProb[, modelFit$obsLevels,drop = FALSE])
}
out <- tmp
}
out
},
predictors = function(x, newdata = NULL, threshold = NULL, ...) {
if(is.null(newdata))
{
if(!is.null(x$xData)) newdata <- x$xData else stop("must supply newdata")
}
if(is.null(threshold))
{
if(!is.null(x$threshold)) threshold <- x$threshold else stop("must supply threshold")
}
varIndex <- pamr.predict(x, newx = newdata, threshold = threshold, type = "nonzero")
colnames(newdata)[varIndex]
},
varImp = function (object, threshold = NULL, data = NULL, ...) {
if(is.null(data)) data <- object$xData
if(is.null(threshold)) threshold <- object$tuneValue$threshold
if( dim(object$centroids)[1] != dim(data)[2])
stop("the number of columns (=variables) is not consistent with the pamr object")
if(is.null(dimnames(data))) {
featureNames <- paste("Feature", seq(along = data[1,]), sep = "")
colnames(data) <- featureNames
} else featureNames <- dimnames(data)[[2]]
x <- t(data)
retainedX <- x[object$gene.subset, object$sample.subset, drop = F]
centroids <- pamr.predict(object, x, threshold = threshold, type = "cent")
standCentroids <- (centroids - object$centroid.overall)/object$sd
rownames(standCentroids) <- featureNames
colnames(standCentroids) <- names(object$prior)
as.data.frame(standCentroids)
},
levels = function(x) names(x$prior),
tags = c("Prototype Models", "Implicit Feature Selection", "Linear Classifier"),
sort = function(x) x[order(x[,1]),])
| /models/files/pam.R | no_license | JackStat/caret | R | false | false | 5,663 | r | modelInfo <- list(label = "Nearest Shrunken Centroids",
library = "pamr",
type = "Classification",
parameters = data.frame(parameter = 'threshold',
class = "numeric",
label = 'Shrinkage Threshold'),
grid = function(x, y, len = NULL, search = "grid") {
cc <- complete.cases(x) & complete.cases(y)
x <- x[cc,,drop = FALSE]
y <- y[cc]
initialThresh <- pamr.train(list(x=t(x), y=y))$threshold
initialThresh <- initialThresh[-c(1, length(initialThresh))]
if(search == "grid") {
out <- data.frame(threshold = seq(from = min(initialThresh),
to = max(initialThresh),
length = len))
} else {
out <- data.frame(threshold = runif(len, min = min(initialThresh),max = max(initialThresh)))
}
out
},
loop = function(grid) {
grid <- grid[order(grid$threshold, decreasing = TRUE),, drop = FALSE]
loop <- grid[1,,drop = FALSE]
submodels <- list(grid[-1,,drop = FALSE])
list(loop = loop, submodels = submodels)
},
fit = function(x, y, wts, param, lev, last, classProbs, ...)
pamr.train(list(x = t(x), y = y), threshold = param$threshold, ...),
predict = function(modelFit, newdata, submodels = NULL) {
out <- pamr.predict(modelFit,
t(newdata),
threshold = modelFit$tuneValue$threshold)
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
for(j in seq(along = submodels$threshold))
{
tmp[[j+1]] <- pamr.predict(modelFit,
t(newdata),
threshold = submodels$threshold[j])
}
out <- tmp
}
out
},
prob = function(modelFit, newdata, submodels = NULL) {
out <- pamr.predict(modelFit, t(newdata),
threshold = modelFit$tuneValue$threshold,
type= "posterior")
if(!is.null(submodels))
{
tmp <- vector(mode = "list", length = nrow(submodels) + 1)
tmp[[1]] <- out
for(j in seq(along = submodels$threshold))
{
tmpProb <- pamr.predict(modelFit, t(newdata),
threshold = submodels$threshold[j],
type= "posterior")
tmp[[j+1]] <- as.data.frame(tmpProb[, modelFit$obsLevels,drop = FALSE])
}
out <- tmp
}
out
},
predictors = function(x, newdata = NULL, threshold = NULL, ...) {
if(is.null(newdata))
{
if(!is.null(x$xData)) newdata <- x$xData else stop("must supply newdata")
}
if(is.null(threshold))
{
if(!is.null(x$threshold)) threshold <- x$threshold else stop("must supply threshold")
}
varIndex <- pamr.predict(x, newx = newdata, threshold = threshold, type = "nonzero")
colnames(newdata)[varIndex]
},
varImp = function (object, threshold = NULL, data = NULL, ...) {
if(is.null(data)) data <- object$xData
if(is.null(threshold)) threshold <- object$tuneValue$threshold
if( dim(object$centroids)[1] != dim(data)[2])
stop("the number of columns (=variables) is not consistent with the pamr object")
if(is.null(dimnames(data))) {
featureNames <- paste("Feature", seq(along = data[1,]), sep = "")
colnames(data) <- featureNames
} else featureNames <- dimnames(data)[[2]]
x <- t(data)
retainedX <- x[object$gene.subset, object$sample.subset, drop = F]
centroids <- pamr.predict(object, x, threshold = threshold, type = "cent")
standCentroids <- (centroids - object$centroid.overall)/object$sd
rownames(standCentroids) <- featureNames
colnames(standCentroids) <- names(object$prior)
as.data.frame(standCentroids)
},
levels = function(x) names(x$prior),
tags = c("Prototype Models", "Implicit Feature Selection", "Linear Classifier"),
sort = function(x) x[order(x[,1]),])
|
\name{optrees-package}
\alias{optrees-package}
\alias{optrees}
\docType{package}
\title{Optimal Trees in Weighted Graphs}
\description{Finds optimal trees in weighted graphs. In particular, this package provides solving tools for minimum cost spanning tree problems, minimum cost arborescence problems, shortest path tree problems and minimum cut tree problems.
}
\details{
\tabular{ll}{
Package: \tab optrees\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-09-01\cr
License: \tab GPL-3 \cr
}
The most important functions are \link{getMinimumSpanningTree}, \link{getMinimumArborescence}, \link{getShortestPathTree} and \link{getMinimumCutTree}. The other functions included in the package are auxiliary functions that can be used independently.
}
\author{
Manuel Fontenla <manu.fontenla@gmail.com>
} | /man/optrees-package.Rd | no_license | Ayoub-Idrissi/optrees | R | false | false | 832 | rd | \name{optrees-package}
\alias{optrees-package}
\alias{optrees}
\docType{package}
\title{Optimal Trees in Weighted Graphs}
\description{Finds optimal trees in weighted graphs. In particular, this package provides solving tools for minimum cost spanning tree problems, minimum cost arborescence problems, shortest path tree problems and minimum cut tree problems.
}
\details{
\tabular{ll}{
Package: \tab optrees\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-09-01\cr
License: \tab GPL-3 \cr
}
The most important functions are \link{getMinimumSpanningTree}, \link{getMinimumArborescence}, \link{getShortestPathTree} and \link{getMinimumCutTree}. The other functions included in the package are auxiliary functions that can be used independently.
}
\author{
Manuel Fontenla <manu.fontenla@gmail.com>
} |
menu_column <- tabItem(tabName = "column",
fluidRow(
column(width = 12,
tabBox(title ="column工作台",width = 12,
id='tabSet_column',height = '300px',
tabPanel('sheet1',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet1'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt1'
)
))
)),
tabPanel('sheet2',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet2'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt2'
)
))
)),
tabPanel('sheet3',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet3'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt3'
)
))
)),
tabPanel('sheet4',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet4'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt4'
)
))
))
)
)
)
) | /02_column_body.R | no_license | takewiki/bes | R | false | false | 3,550 | r | menu_column <- tabItem(tabName = "column",
fluidRow(
column(width = 12,
tabBox(title ="column工作台",width = 12,
id='tabSet_column',height = '300px',
tabPanel('sheet1',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet1'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt1'
)
))
)),
tabPanel('sheet2',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet2'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt2'
)
))
)),
tabPanel('sheet3',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet3'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt3'
)
))
)),
tabPanel('sheet4',tagList(
fluidRow(column(4,box(
title = "操作区域", width = NULL, solidHeader = TRUE, status = "primary",
'sheet4'
)),
column(8, box(
title = "报表区域", width = NULL, solidHeader = TRUE, status = "primary",
'rpt4'
)
))
))
)
)
)
) |
### -------------------------------------------------------- ###
# -------------------- Combine CQO Results --------------------#
### -------------------------------------------------------- ###
#Jonathan Jupke
#22.01.19
#Paper: Should ecologists prefer model- over algorithm-based multivariate methods?
#Combine the results of the single CQOs into one table.
## -- OVERVIEW -- ##
# 1.Setup
# 2.Build Table
# 3.Work on Table
# 4.Save to File
## -------------- ##
# 01 Setup ----------------------------------------------------------------
pacman::p_load(dplyr, data.table)
# other required packages: here, fs, readr, stringr
setwd(here::here("result_data/02_cqo/"))
output.files = fs::dir_ls()
# create empty list to hold results
list.of.analysis.data <- vector(mode = "list")
# start points to fill analysis data tables. Start and endpoints for different seeds
fill.ends <- c(1, 5, 9, 13, 17)
# 02. Build Table ---------------------------------------------------------
## FOR LOOP: READ RESULT FILES AND FORMAT INTO TABLE.
for (i in 1:length(output.files)) {
# BEGIN FOR LOOP 1
load(output.files[i])
# Number of rows: 4 per seed times 5 seeds
analysis.data = data.table()
analysis.data[,
c("variable",
"samples",
"response",
"method"
) :=
list(
rep(c("env1", "env2", "rand1", "rand2"), 5),
rep(CQO_result[[1]]$Samples, 20),
rep(CQO_result[[1]]$Response, 20),
rep("CQO", 20)
)
]
# FOR 5 SEEDS
for (k in 1:5) {
# BEGIN FOR LOOP 2
analysis.data[fill.ends[k]:(k * 4),
c("test statistic",
"runtime") :=
list(
as.numeric(apply(abs(CQO_result[[k]]$Summary@post$Coef@C), 1, sum)),
rep(as.numeric(CQO_result[[k]]$Time[3]), 4)
)
]
analysis.data[fill.ends[k]:(k * 4), "p.value" := CQO_result[[k]][7]]
} # END FOR LOOP 2
list.of.analysis.data[[i]] <- analysis.data
} # END FOR LOOP 1
cqo_combine = rbindlist(list.of.analysis.data)
# 03. Work on table --------------------------------------------------------
# replace rand1 and rand2 with noise
cqo_combine[variable %like% "rand", variable := "Noise"]
cqo_combine[, c("false.negative.01",
"false.negative.03",
"false.negative.05",
"false.negative.07",
"false.negative.1",
"false.positive.01",
"false.positive.03",
"false.positive.05",
"false.positive.07",
"false.positive.1") := 0]
# FPR and FNR
signivalue = c(0.01, 0.03, 0.05, 0.07, 0.1)
for (sv in 1:5) {
sigv = signivalue[sv]
n.variable = paste0("false.negative",
stringr::str_extract(as.character(sigv), "\\.+.*")
)
p.variable = paste0("false.positive",
stringr::str_extract(as.character(sigv), "\\.+.*")
)
for (i in 1:nrow(cqo_combine)) {
if (cqo_combine[i, variable %like% "env" & p.value > sigv])
cqo_combine[i, (n.variable) := 1]
if (cqo_combine[i, variable == "Noise" & p.value < sigv])
cqo_combine[i, (p.variable) := 1]
}
}
# 04. Save to File --------------------------------------------------------
readr::write_csv(
x = cqo_combine,
path = here::here("result_data/05_collected_results/cqo_results.csv")
)
| /r_scripts/03_analyse_results/combine_cqo_results.R | no_license | JonJup/Should-ecologists-prefer-model-over-distance-based-multivariate-methods | R | false | false | 3,853 | r | ### -------------------------------------------------------- ###
# -------------------- Combine CQO Results --------------------#
### -------------------------------------------------------- ###
#Jonathan Jupke
#22.01.19
#Paper: Should ecologists prefer model- over algorithm-based multivariate methods?
#Combine the results of the single CQOs into one table.
## -- OVERVIEW -- ##
# 1.Setup
# 2.Build Table
# 3.Work on Table
# 4.Save to File
## -------------- ##
# 01 Setup ----------------------------------------------------------------
pacman::p_load(dplyr, data.table)
# other required packages: here, fs, readr, stringr
setwd(here::here("result_data/02_cqo/"))
output.files = fs::dir_ls()
# create empty list to hold results
list.of.analysis.data <- vector(mode = "list")
# start points to fill analysis data tables. Start and endpoints for different seeds
fill.ends <- c(1, 5, 9, 13, 17)
# 02. Build Table ---------------------------------------------------------
## FOR LOOP: READ RESULT FILES AND FORMAT INTO TABLE.
for (i in 1:length(output.files)) {
# BEGIN FOR LOOP 1
load(output.files[i])
# Number of rows: 4 per seed times 5 seeds
analysis.data = data.table()
analysis.data[,
c("variable",
"samples",
"response",
"method"
) :=
list(
rep(c("env1", "env2", "rand1", "rand2"), 5),
rep(CQO_result[[1]]$Samples, 20),
rep(CQO_result[[1]]$Response, 20),
rep("CQO", 20)
)
]
# FOR 5 SEEDS
for (k in 1:5) {
# BEGIN FOR LOOP 2
analysis.data[fill.ends[k]:(k * 4),
c("test statistic",
"runtime") :=
list(
as.numeric(apply(abs(CQO_result[[k]]$Summary@post$Coef@C), 1, sum)),
rep(as.numeric(CQO_result[[k]]$Time[3]), 4)
)
]
analysis.data[fill.ends[k]:(k * 4), "p.value" := CQO_result[[k]][7]]
} # END FOR LOOP 2
list.of.analysis.data[[i]] <- analysis.data
} # END FOR LOOP 1
cqo_combine = rbindlist(list.of.analysis.data)
# 03. Work on table --------------------------------------------------------
# replace rand1 and rand2 with noise
cqo_combine[variable %like% "rand", variable := "Noise"]
cqo_combine[, c("false.negative.01",
"false.negative.03",
"false.negative.05",
"false.negative.07",
"false.negative.1",
"false.positive.01",
"false.positive.03",
"false.positive.05",
"false.positive.07",
"false.positive.1") := 0]
# FPR and FNR
signivalue = c(0.01, 0.03, 0.05, 0.07, 0.1)
for (sv in 1:5) {
sigv = signivalue[sv]
n.variable = paste0("false.negative",
stringr::str_extract(as.character(sigv), "\\.+.*")
)
p.variable = paste0("false.positive",
stringr::str_extract(as.character(sigv), "\\.+.*")
)
for (i in 1:nrow(cqo_combine)) {
if (cqo_combine[i, variable %like% "env" & p.value > sigv])
cqo_combine[i, (n.variable) := 1]
if (cqo_combine[i, variable == "Noise" & p.value < sigv])
cqo_combine[i, (p.variable) := 1]
}
}
# 04. Save to File --------------------------------------------------------
readr::write_csv(
x = cqo_combine,
path = here::here("result_data/05_collected_results/cqo_results.csv")
)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(tidyr)
library(ggmap)
library(ggplot2)
library(stringr) #str_detect
source("getLatLng.R")
Mykey = "YOUR_KEY"
register_google(key = Mykey)
# Define UI for application that draws a histogram
ui <- fluidPage(
navbarPage("首頁",
tabPanel("地址分析",column(3,
h4("API抓取經緯度"),
actionButton("request", "取資料"),
hr(),
numericInput("seed", h3("種子碼"), value = 20180112),
numericInput("k_value", h3("k值"), value = 10),
actionButton("do", "執行")
),
column(9,
tabsetPanel(
tabPanel("地址表單",tableOutput("Rout1")),
tabPanel("地址分布",plotOutput("Rout2",height = "800px")),
tabPanel("k-mean結果",plotOutput("Rout3",height = "800px"))
))),
tabPanel("訂單分析",fluidRow(
column(3,
h4("過濾"),
sliderInput('price', '價格多少以上',
min=0, max=12000, value=300,
step=100, round=0),
selectInput('payment', '付款方式', c("信用卡",
"ATM轉帳",
"貨到付款",
"現金",
"無",
"其他"))
),
column(9,
tableOutput('ordersTable')
)
)),
tabPanel("會員分析",fluidRow(
column(3,
h4("user資料"),
textInput("phone_number", h3("手機號碼"), value = "")
),
column(9,
tableOutput('userTable')
)
))
))
# Define server logic required to draw a histogram
server <- function(input, output) {
v <- reactiveValues(address = read.csv("address.csv",stringsAsFactors = FALSE,header = FALSE, fileEncoding = "UTF-8")) #shiny跟一般R不同的地方,暫存要存在reactiveValue,用法類似於list
orders <- read.csv("orders.csv", stringsAsFactors = FALSE)
user <- read.csv("user.csv", stringsAsFactors = FALSE)
#----------- tabpanel 1開始
getLatLngWithProcress = function(address, total){
incProgress(1/total,detail = "解析地址中")
return(getLatLng(address))
}
observeEvent(input$request,{
withProgress(message = "擷取經緯度", value = 0,{
v$addresswithLatLng <- v$address %>%
rowwise() %>%
mutate(LatLng = getLatLngWithProcress(V1,nrow(v$address))) %>%
filter(LatLng != "error") %>%
separate(LatLng,c("Lat","Lng"),sep = ",") %>%
mutate(Lat = as.numeric(Lat), Lng = as.numeric(Lng))
})
})
output$Rout1 <- renderTable({
if (is.null(v$addresswithLatLng))
return(v$address)
v$addresswithLatLng
})
#-------------
output$Rout2 <- renderPlot({
if(is.null(v$addresswithLatLng))
return()
ggmap(get_googlemap(center=c(121.52311,25.04126), zoom=12, maptype='satellite'), extent='device') +
geom_point(data = v$addresswithLatLng, aes(x = Lat, y= Lng), colour = "red")
})
#-------------
observeEvent(input$do,{
if(is.null(v$addresswithLatLng))
return()
set.seed(input$seed)
k <- kmeans(x = v$addresswithLatLng[,c("Lat","Lng")], centers = input$k_value)
v$addressWithKmean <- v$addresswithLatLng %>%
ungroup() %>%
mutate(category = k$cluster)
print("Kmean success")
})
output$Rout3 <- renderPlot({
if(is.null(v$addressWithKmean))
return()
map <- get_googlemap(center=c(121.52311,25.04126), zoom=12, maptype='satellite')
ggmap(map,extent='device') + geom_point(data = v$addressWithKmean, aes(x = Lat, y= Lng), colour = factor(v$addressWithKmean$category)) #factor 把向量取唯一後作標籤
})
#---------------tabpanel 1結束
output$ordersTable <- renderTable({
orders %>%
filter(input$price < PRICE, input$payment == PAYMENTTYPE)
})
#--------------tabpanel 2結束
output$userTable <- renderTable({
if(input$phone_number=="") return(user)
user %>%
filter(str_detect(MOBILE, input$phone_number))
})
#--------------tabpanel 3結束
}
# Run the application
shinyApp(ui = ui, server = server)
| /main.R | no_license | encoreg34979/shiny_practice | R | false | false | 4,697 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(tidyr)
library(ggmap)
library(ggplot2)
library(stringr) #str_detect
source("getLatLng.R")
Mykey = "YOUR_KEY"
register_google(key = Mykey)
# Define UI for application that draws a histogram
ui <- fluidPage(
navbarPage("首頁",
tabPanel("地址分析",column(3,
h4("API抓取經緯度"),
actionButton("request", "取資料"),
hr(),
numericInput("seed", h3("種子碼"), value = 20180112),
numericInput("k_value", h3("k值"), value = 10),
actionButton("do", "執行")
),
column(9,
tabsetPanel(
tabPanel("地址表單",tableOutput("Rout1")),
tabPanel("地址分布",plotOutput("Rout2",height = "800px")),
tabPanel("k-mean結果",plotOutput("Rout3",height = "800px"))
))),
tabPanel("訂單分析",fluidRow(
column(3,
h4("過濾"),
sliderInput('price', '價格多少以上',
min=0, max=12000, value=300,
step=100, round=0),
selectInput('payment', '付款方式', c("信用卡",
"ATM轉帳",
"貨到付款",
"現金",
"無",
"其他"))
),
column(9,
tableOutput('ordersTable')
)
)),
tabPanel("會員分析",fluidRow(
column(3,
h4("user資料"),
textInput("phone_number", h3("手機號碼"), value = "")
),
column(9,
tableOutput('userTable')
)
))
))
# Define server logic required to draw a histogram
server <- function(input, output) {
v <- reactiveValues(address = read.csv("address.csv",stringsAsFactors = FALSE,header = FALSE, fileEncoding = "UTF-8")) #shiny跟一般R不同的地方,暫存要存在reactiveValue,用法類似於list
orders <- read.csv("orders.csv", stringsAsFactors = FALSE)
user <- read.csv("user.csv", stringsAsFactors = FALSE)
#----------- tabpanel 1開始
getLatLngWithProcress = function(address, total){
incProgress(1/total,detail = "解析地址中")
return(getLatLng(address))
}
observeEvent(input$request,{
withProgress(message = "擷取經緯度", value = 0,{
v$addresswithLatLng <- v$address %>%
rowwise() %>%
mutate(LatLng = getLatLngWithProcress(V1,nrow(v$address))) %>%
filter(LatLng != "error") %>%
separate(LatLng,c("Lat","Lng"),sep = ",") %>%
mutate(Lat = as.numeric(Lat), Lng = as.numeric(Lng))
})
})
output$Rout1 <- renderTable({
if (is.null(v$addresswithLatLng))
return(v$address)
v$addresswithLatLng
})
#-------------
output$Rout2 <- renderPlot({
if(is.null(v$addresswithLatLng))
return()
ggmap(get_googlemap(center=c(121.52311,25.04126), zoom=12, maptype='satellite'), extent='device') +
geom_point(data = v$addresswithLatLng, aes(x = Lat, y= Lng), colour = "red")
})
#-------------
observeEvent(input$do,{
if(is.null(v$addresswithLatLng))
return()
set.seed(input$seed)
k <- kmeans(x = v$addresswithLatLng[,c("Lat","Lng")], centers = input$k_value)
v$addressWithKmean <- v$addresswithLatLng %>%
ungroup() %>%
mutate(category = k$cluster)
print("Kmean success")
})
output$Rout3 <- renderPlot({
if(is.null(v$addressWithKmean))
return()
map <- get_googlemap(center=c(121.52311,25.04126), zoom=12, maptype='satellite')
ggmap(map,extent='device') + geom_point(data = v$addressWithKmean, aes(x = Lat, y= Lng), colour = factor(v$addressWithKmean$category)) #factor 把向量取唯一後作標籤
})
#---------------tabpanel 1結束
output$ordersTable <- renderTable({
orders %>%
filter(input$price < PRICE, input$payment == PAYMENTTYPE)
})
#--------------tabpanel 2結束
output$userTable <- renderTable({
if(input$phone_number=="") return(user)
user %>%
filter(str_detect(MOBILE, input$phone_number))
})
#--------------tabpanel 3結束
}
# Run the application
shinyApp(ui = ui, server = server)
|
# This file contains various control functions.
# Basic response handler, only really useful in nonblocking cases
# all function argument is left in for backward compatibility,
# it is not used.
`redisGetResponse` <- function(all=TRUE)
{
if(!exists('count',where=.redisEnv$current)) return(.getResponse())
if(.redisEnv$current$count < 1) return(NULL)
replicate(.redisEnv$current$count, .getResponse(), simplify=FALSE)
}
`redisSetBlocking` <- function(value=TRUE)
{
value <- as.logical(value)
if(is.na(value)) stop("logical value required")
assign('block',value,envir=.redisEnv$current)
}
`redisConnect` <-
function(host='localhost', port=6379, returnRef=FALSE, timeout=2678399L)
{
.redisEnv$current <- new.env()
# R nonblocking connections are flaky, especially on Windows, see
# for example:
# http://www.mail-archive.com/r-devel@r-project.org/msg16420.html.
# So, we use blocking connections now.
con <- socketConnection(host, port, open='a+b', blocking=TRUE, timeout=timeout)
# Stash state in the redis enivronment describing this connection:
assign('con',con,envir=.redisEnv$current)
assign('host',host,envir=.redisEnv$current)
assign('port',port,envir=.redisEnv$current)
assign('block',TRUE,envir=.redisEnv$current)
assign('timeout',timeout,envir=.redisEnv$current)
# Count is for nonblocking communication, it keeps track of the number of
# getResponse calls that are pending.
assign('count',0,envir=.redisEnv$current)
tryCatch(.redisPP(),
error=function(e) {
cat(paste('Error: ',e,'\n'))
close(con);
rm(list='con',envir=.redisEnv$current)
})
if(returnRef) return(.redisEnv$current)
invisible()
}
`redisClose` <-
function()
{
con <- .redis()
close(con)
remove(list='con',envir=.redisEnv$current)
}
`redisAuth` <-
function(pwd)
{
.redisCmd(.raw('AUTH'), .raw(pwd))
}
`redisSave` <-
function()
{
.redisCmd(.raw('SAVE'))
}
`redisBgSave` <-
function()
{
.redisCmd(.raw('BGSAVE'))
}
`redisBgRewriteAOF` <-
function()
{
.redisCmd(.raw('BGREWRITEAOF'))
}
`redisShutdown` <-
function()
{
.redisCmd(.raw('SHUTDOWN'))
remove(list='con',envir=.redisEnv$current)
}
`redisInfo` <-
function()
{
x <- .redisCmd(.raw('INFO'))
z <- strsplit(x,'\r\n')[[1]]
rj <- c(grep("^$",z), grep("^#",z))
if(length(rj)>0) z <- z[-rj]
w <- unlist(lapply(z,strsplit,':'))
n <- length(w)
e <- seq(from=2,to=n,by=2)
o <- seq(from=1,to=n,by=2)
z <- as.list(w[e])
names(z) <- w[o]
z
}
`redisSlaveOf` <-
function(host,port)
{
# Use host="no" port="one" to disable slave replication
.redisCmd(.raw('SLAVEOF'),.raw(as.character(host)), .raw(as.character(port)))
}
redisFlushDB <- function() {
.redisCmd(.raw('FLUSHDB'))
}
redisFlushAll <- function() {
.redisCmd(.raw('FLUSHALL'))
}
redisSelect <- function(index) {
.redisCmd(.raw('SELECT'),.raw(as.character(index)))
}
redisDBSize <- function() {
.redisCmd(.raw('DBSIZE'))
}
redisGetContext <- function() {
.redisEnv$current
}
redisSetContext <- function(e=NULL)
{
if(is.null(e)) .redisEnv$current <- .redisEnv
else {
if(!is.environment(e)) stop("Invalid context")
.redisEnv$current <- e
}
}
| /R/controlCMD.R | permissive | kennyhelsens/rredis | R | false | false | 3,173 | r | # This file contains various control functions.
# Basic response handler, only really useful in nonblocking cases
# all function argument is left in for backward compatibility,
# it is not used.
`redisGetResponse` <- function(all=TRUE)
{
if(!exists('count',where=.redisEnv$current)) return(.getResponse())
if(.redisEnv$current$count < 1) return(NULL)
replicate(.redisEnv$current$count, .getResponse(), simplify=FALSE)
}
`redisSetBlocking` <- function(value=TRUE)
{
value <- as.logical(value)
if(is.na(value)) stop("logical value required")
assign('block',value,envir=.redisEnv$current)
}
`redisConnect` <-
function(host='localhost', port=6379, returnRef=FALSE, timeout=2678399L)
{
.redisEnv$current <- new.env()
# R nonblocking connections are flaky, especially on Windows, see
# for example:
# http://www.mail-archive.com/r-devel@r-project.org/msg16420.html.
# So, we use blocking connections now.
con <- socketConnection(host, port, open='a+b', blocking=TRUE, timeout=timeout)
# Stash state in the redis enivronment describing this connection:
assign('con',con,envir=.redisEnv$current)
assign('host',host,envir=.redisEnv$current)
assign('port',port,envir=.redisEnv$current)
assign('block',TRUE,envir=.redisEnv$current)
assign('timeout',timeout,envir=.redisEnv$current)
# Count is for nonblocking communication, it keeps track of the number of
# getResponse calls that are pending.
assign('count',0,envir=.redisEnv$current)
tryCatch(.redisPP(),
error=function(e) {
cat(paste('Error: ',e,'\n'))
close(con);
rm(list='con',envir=.redisEnv$current)
})
if(returnRef) return(.redisEnv$current)
invisible()
}
`redisClose` <-
function()
{
con <- .redis()
close(con)
remove(list='con',envir=.redisEnv$current)
}
`redisAuth` <-
function(pwd)
{
.redisCmd(.raw('AUTH'), .raw(pwd))
}
`redisSave` <-
function()
{
.redisCmd(.raw('SAVE'))
}
`redisBgSave` <-
function()
{
.redisCmd(.raw('BGSAVE'))
}
`redisBgRewriteAOF` <-
function()
{
.redisCmd(.raw('BGREWRITEAOF'))
}
`redisShutdown` <-
function()
{
.redisCmd(.raw('SHUTDOWN'))
remove(list='con',envir=.redisEnv$current)
}
`redisInfo` <-
function()
{
x <- .redisCmd(.raw('INFO'))
z <- strsplit(x,'\r\n')[[1]]
rj <- c(grep("^$",z), grep("^#",z))
if(length(rj)>0) z <- z[-rj]
w <- unlist(lapply(z,strsplit,':'))
n <- length(w)
e <- seq(from=2,to=n,by=2)
o <- seq(from=1,to=n,by=2)
z <- as.list(w[e])
names(z) <- w[o]
z
}
`redisSlaveOf` <-
function(host,port)
{
# Use host="no" port="one" to disable slave replication
.redisCmd(.raw('SLAVEOF'),.raw(as.character(host)), .raw(as.character(port)))
}
redisFlushDB <- function() {
.redisCmd(.raw('FLUSHDB'))
}
redisFlushAll <- function() {
.redisCmd(.raw('FLUSHALL'))
}
redisSelect <- function(index) {
.redisCmd(.raw('SELECT'),.raw(as.character(index)))
}
redisDBSize <- function() {
.redisCmd(.raw('DBSIZE'))
}
redisGetContext <- function() {
.redisEnv$current
}
redisSetContext <- function(e=NULL)
{
if(is.null(e)) .redisEnv$current <- .redisEnv
else {
if(!is.environment(e)) stop("Invalid context")
.redisEnv$current <- e
}
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Week 3 assignment to cache the inverse of a matrix
makeCacheMatrix <- function(x = matrix()) { # create list of functions
m <- NULL # clears matrix "m"
set <- function(y) { # set value of the vector
x <<- y
m <<- NULL
}
get <- function() x # get value of the vector
setinverse <- function(inverse) m <<- inverse # set value of INVERSE vector, not MEAN
getinverse <- function() m # set value of inverse vector
list(set = set, # return a list of the variables
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Week 3 assignment to cache the inverse of a matrix
cacheSolve <- function(x, ...) { # define a new function
m <- x$getinverse() # check if this has been calc'd before
if (!is.null(m)) {
message("getting cached data") # return a message if it has been calc'd
return(m) # return cached value
}
data <- x$get() # put matrix ino "data"
m <- solve(data, ...) # invert "data" put into "m"
x$setinverse(m) # put "m" into cache
m # print "m"
} | /cachematrix.R | no_license | bobersk/ProgrammingAssignment2 | R | false | false | 2,078 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Week 3 assignment to cache the inverse of a matrix
makeCacheMatrix <- function(x = matrix()) { # create list of functions
m <- NULL # clears matrix "m"
set <- function(y) { # set value of the vector
x <<- y
m <<- NULL
}
get <- function() x # get value of the vector
setinverse <- function(inverse) m <<- inverse # set value of INVERSE vector, not MEAN
getinverse <- function() m # set value of inverse vector
list(set = set, # return a list of the variables
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Week 3 assignment to cache the inverse of a matrix
cacheSolve <- function(x, ...) { # define a new function
m <- x$getinverse() # check if this has been calc'd before
if (!is.null(m)) {
message("getting cached data") # return a message if it has been calc'd
return(m) # return cached value
}
data <- x$get() # put matrix ino "data"
m <- solve(data, ...) # invert "data" put into "m"
x$setinverse(m) # put "m" into cache
m # print "m"
} |
buildLink <- function(link.url, image.url, image.height=32, tooltip = "") {
return(as.character(shiny::tags$a(href = link.url,
target = "_blank",
shiny::tags$img(src = image.url,
style = paste0("height: ", image.height, "px;"),
title = tooltip
))
)
)
}
getPackageNameFromHTML <- function(html) {
return(stringr::str_replace(stringr::str_replace(stringr::str_extract(html, ">(.*)<"),
"<", ""),
">", ""))
}
getPackageDetailsHTML <- function(df) {
fields <- c(
"Package",
"Title",
"Description",
"Version",
"License",
"License", # Actually downloads but needs to be an existing field name
"Author",
"Authors@R",
"Maintainer",
"BugReports",
"URL",
"Depends",
"Imports",
"Suggests",
"Reverse depends",
"Reverse imports",
"Reverse suggests"
)
headers <- c(
"Name",
"Short description",
"Long description",
"Version",
"License",
"Total Downloads",
"Authors",
"Authors", # Actually, the Authors@R field
"Maintainer",
"BugReports",
"URLs",
"Depends",
"Imports",
"Suggests",
"Reverse depends",
"Reverse imports",
"Reverse suggests"
)
placeholders <- c(
NA,
NA,
NA,
NA,
NA,
{
paste0("<img src='https://cranlogs.r-pkg.org/badges/grand-total/", df[1, "Package"], "'/>")
},
{ if(!is.na(df[1, "Authors@R"])) {
""
}
else {
paste0(unlist(strsplit(df[1, "Author"], ",")), collapse="<br>")
}
},
{
if(!is.na(df[1, "Authors@R"])) {
pers <- eval(parse(text = df[1, "Authors@R"]))
stringr::str_replace_all(stringr::str_replace_all(stringr::str_replace_all(paste0(pers, collapse = "#"), ">", ">"), "<", "<"), "#", "<br>")
}
else {
""
}
},
{ stringr::str_replace_all(stringr::str_replace_all(df[1,"Maintainer"], ">", ">"), "<", "<") },
{ if(!is.na(df[1, "BugReports"])) {
paste0("<a href='", df[1, "BugReports"], "'>", df[1, "BugReports"], "</a>")
}
else {
NA
}
},
{
if(!is.na(df[1, "URL"])) {
urls <- unlist(strsplit(df[1, "URL"], ","))
html.urls<-""
first <- TRUE
for(i in 1:NROW(urls)) {
if(first) {
first <- FALSE
}
else {
html.urls <- paste0(html.urls, "<br>")
}
html.urls <- paste0(html.urls, "<a href='", urls[i], "'>", urls[i],"</a>")
}
html.urls
}
else {
NA
}
},
NA,
NA,
NA,
NA,
NA,
NA
)
html <- "<div style=\"background-color: #FCFAFA; padding-left: 20px\">"
for(i in 1:NROW(fields)) {
if(!is.na(df[1, fields[i]])) {
if(is.na(placeholders[i])) {
html <- paste0(html,"<span style=\"color:#CDC9C9; font-size:90%; font-style:bold\">", headers[i], "</span><br>")
html <- paste0(html,"<p>", df[1, fields[i]], "</p>")
html <- paste0(html,"<p></p>")
}
else {
if(placeholders[i] != "") {
html <- paste0(html,"<span style=\"color:#CDC9C9; font-size:90%; font-style:bold\">", headers[i], "</span><br>")
html <- paste0(html, "<p>", eval(placeholders[i]), "</p>")
html <- paste0(html,"<p></p>")
}
}
}
}
html <- paste0(html, "<div>")
return(html)
}
js <- "
$(document).keyup(function(event) {
if ($(\"#txt_search\").is(\":focus\") && (event.keyCode == 13)) {
$(\"#btn_search\").click();
}
if ($(\"#txt_alwayscase\").is(\":focus\") && (event.keyCode == 13)) {
$(\"#btn_search\").click();
}
});
"
optwidth <- function() return("60%")
inline <- function(widget, label) {
return(
shiny::div(style = "display: inline-block; vertical-align: middle; margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px",
shiny::HTML(paste0("<span style = 'middle; margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px'>", label, " </span>")),
shiny::div(style = "display: inline-block; vertical-align: middle; margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px",
widget
)
)
)
}
processSearch <- function(search = TRUE, input, package.list) {
if(search) {
if(!is.null(input$txt_search)) {
if(stringr::str_replace_all(input$txt_search, " ", "") !="") {
options("packagefinder.lst_searchterms" = unique(append(getOption("packagefinder.lst_searchterms", c()), shiny::isolate(input$txt_search))))
if(!is.null(input$chk_case) & input$chk_case != FALSE) {
case.sensitive <- TRUE
}
else {
case.sensitive <- FALSE
}
if(input$txt_alwayscase != "") {
always.sensitive <- stringr::str_replace_all(unlist(strsplit(input$txt_alwayscase,",")), " ", "")
}
else {
always.sensitive = NULL
}
mode <- tolower(input$rad_mode)
terms <- scan(text = input$txt_search, what = "character")
if(!input$chk_regex) res <- findPackage(terms, silent = TRUE, return.df = TRUE, mode = mode, case.sensitive = case.sensitive, always.sensitive = always.sensitive, index = getOption("packagefinder.index", NULL))
else res <- findPackage(query=terms, silent = TRUE, return.df = TRUE, mode = mode, case.sensitive = case.sensitive, always.sensitive = always.sensitive, index = getOption("packagefinder.index", NULL))
}
}
}
else {
newoncran <- package.list
newoncran <- newoncran[lubridate::ymd(newoncran$Published) >= lubridate::today()-getOption("packagefinder.num_optcrandays", 3),]
newoncran <- newoncran[order(lubridate::ymd(newoncran$Published), decreasing = TRUE),]
newoncran <- newoncran[, c("Package", "Title", "Description")]
names(newoncran) <- c("Name", "Short Description", "Long Description")
res <- cbind(Score = rep(100, NROW(newoncran)), newoncran)
res <- cbind(res, GO = rep(NA, NROW(newoncran)))
newoncran <- newoncran[sapply(unique(newoncran$Name), function(x) {min(which(newoncran$Name == x))}),]
}
if(!is.null(res)) {
num.results <- NROW(res)
res[,"Long Description"] <- NULL
orig.name <- res$Name
res$GO <- NULL
res$Installed <- rep("", NROW(res))
res$ActionPDF <- rep("", NROW(res))
res$ActionWeb <- rep("", NROW(res))
res$ActionGitHub <- rep("", NROW(res))
res$Favorite <- rep("", NROW(res))
inst <- utils::installed.packages()
datetime <- as.Date
df_ext <- package.list
df_ext <- df_ext[sapply(unique(df_ext$Package), function(x) {min(which(df_ext$Package == x))}),]
df_ext$AllURLs <- tolower(stringr::str_replace(paste0(df_ext$URL, ",", df_ext$BugReports), " ", ","))
df_ext$GitHub <- NA
for(i in 1:NROW(res)) {
urls.split <- unlist(strsplit(df_ext$AllURLs[df_ext$Package == orig.name[i]], ","))
if(NROW(urls.split)>0) {
match.git <- stringr::str_detect(urls.split, "github.com")
if(sum(match.git, na.rm=TRUE) > 0) {
df_ext$GitHub[df_ext$Package == orig.name[i]] <- urls.split[which(match.git == TRUE)][1]
}
}
}
for(i in 1:NROW(res)) {
if(orig.name[i] %in% inst[,"Package"]) {
res$Installed[i] = "Installed"
}
else {
res$Installed[i] <- paste0("<img src=\"https://www.zuckarelli.de/files/download-col.png\" style=\"height:32px\"
title = \"Install package '", orig.name[i] , "' (with dependencies)\"/>")
}
res$ActionPDF[i] <- buildLink(
link.url = paste0("https://cran.r-project.org/web/packages/", res$Name[i], "\\", res$Name[i], ".PDF"),
image.url = "https://www.zuckarelli.de/files/PDF-col.png",
tooltip = paste0("PDF manual of package '", res$Name[i], "'")
)
res$ActionWeb[i] <- buildLink(
link.url = paste0("https://cran.r-project.org/web/", res$Name[i]),
image.url = "https://www.zuckarelli.de/files/r-col.png",
tooltip = paste0("CRAN website of package '", res$Name[i], "'")
)
github.url <- df_ext$GitHub[which(df_ext$Package == res$Name[i])]
if(!is.na(github.url)) {
res$ActionGitHub[i] <- buildLink(
link.url = github.url,
image.url = "https://www.zuckarelli.de/files/social-github-col.png",
tooltip = paste0("GitHub repository of package '", res$Name[i], "'")
)
}
else {
res$ActionGitHub[i] <- ""
}
}
res$Name = paste0("<span style=\"font-weight:bold\">", res$Name, "</span>")
}
else {
num.results <- 0
}
return(list(df = res, df_ext = df_ext, num.results = num.results))
}
getPackageFinderCode <- function(input, search = TRUE, cran.days = 3) {
if(search) {
if(tolower(input$rad_mode) == "and") mode <- ", mode = \"and\""
else mode <- ""
if(!is.null(input$chk_case) & input$chk_case != FALSE) case.sensitive <- ", case.sensitive = TRUE"
else case.sensitive <- ""
if(input$txt_alwayscase != "") {
always.sensitive <- stringr::str_replace_all(unlist(strsplit(input$txt_alwayscase,",")), " ", "")
if(NROW(always.sensitive) > 1) always.sensitive <- paste0(", always.sensitive = c(", paste0(always.sensitive, collapse = ", "), ")")
else always.sensitive <- paste0(", always.sensitive = \"", always.sensitive, "\"")
}
else always.sensitive <- ""
terms <- scan(text = input$txt_search, what = "character")
if(NROW(terms) > 1) terms <- paste0("c(", paste0(paste0("\"", terms, "\""), collapse = ", "), ")")
else terms <- paste0("\"", terms, "\"")
if(input$chk_regex) terms <- paste0("query = ", terms)
code <- paste0("findPackage(", terms, mode, case.sensitive, always.sensitive, ")")
}
else {
code <- paste0("whatsNew(last.days = ", cran.days, ")")
}
return(code)
}
waitUI <- function(code) {
return(
as.list(shiny::tagList(shiny::HTML(paste0("<table id='msg' style='width:100%'>
<tr>
<td>
<p><span style='font-weight: bold'>While we are searching ... Did you know?</span><span> You can also search from the R console:</span></p>
<span style='font-family:Courier; font-size:120%'>", code, "</span> ",
shiny::actionButton("copy", "Copy R code"), "
</td>
<td>
<a href= \"https://github.com/jsugarelli/packagefinder\"><img src='https://www.zuckarelli.de/files/hexagon-packagefinder.png' style='width:120px'></a>
</td>
</tr>
</table><p id='p1'> </p><p id='p2'> </p>"))
),
))
} | /R/addintools.r | no_license | cran/packagefinder | R | false | false | 11,584 | r | buildLink <- function(link.url, image.url, image.height=32, tooltip = "") {
return(as.character(shiny::tags$a(href = link.url,
target = "_blank",
shiny::tags$img(src = image.url,
style = paste0("height: ", image.height, "px;"),
title = tooltip
))
)
)
}
getPackageNameFromHTML <- function(html) {
return(stringr::str_replace(stringr::str_replace(stringr::str_extract(html, ">(.*)<"),
"<", ""),
">", ""))
}
getPackageDetailsHTML <- function(df) {
fields <- c(
"Package",
"Title",
"Description",
"Version",
"License",
"License", # Actually downloads but needs to be an existing field name
"Author",
"Authors@R",
"Maintainer",
"BugReports",
"URL",
"Depends",
"Imports",
"Suggests",
"Reverse depends",
"Reverse imports",
"Reverse suggests"
)
headers <- c(
"Name",
"Short description",
"Long description",
"Version",
"License",
"Total Downloads",
"Authors",
"Authors", # Actually, the Authors@R field
"Maintainer",
"BugReports",
"URLs",
"Depends",
"Imports",
"Suggests",
"Reverse depends",
"Reverse imports",
"Reverse suggests"
)
placeholders <- c(
NA,
NA,
NA,
NA,
NA,
{
paste0("<img src='https://cranlogs.r-pkg.org/badges/grand-total/", df[1, "Package"], "'/>")
},
{ if(!is.na(df[1, "Authors@R"])) {
""
}
else {
paste0(unlist(strsplit(df[1, "Author"], ",")), collapse="<br>")
}
},
{
if(!is.na(df[1, "Authors@R"])) {
pers <- eval(parse(text = df[1, "Authors@R"]))
stringr::str_replace_all(stringr::str_replace_all(stringr::str_replace_all(paste0(pers, collapse = "#"), ">", ">"), "<", "<"), "#", "<br>")
}
else {
""
}
},
{ stringr::str_replace_all(stringr::str_replace_all(df[1,"Maintainer"], ">", ">"), "<", "<") },
{ if(!is.na(df[1, "BugReports"])) {
paste0("<a href='", df[1, "BugReports"], "'>", df[1, "BugReports"], "</a>")
}
else {
NA
}
},
{
if(!is.na(df[1, "URL"])) {
urls <- unlist(strsplit(df[1, "URL"], ","))
html.urls<-""
first <- TRUE
for(i in 1:NROW(urls)) {
if(first) {
first <- FALSE
}
else {
html.urls <- paste0(html.urls, "<br>")
}
html.urls <- paste0(html.urls, "<a href='", urls[i], "'>", urls[i],"</a>")
}
html.urls
}
else {
NA
}
},
NA,
NA,
NA,
NA,
NA,
NA
)
html <- "<div style=\"background-color: #FCFAFA; padding-left: 20px\">"
for(i in 1:NROW(fields)) {
if(!is.na(df[1, fields[i]])) {
if(is.na(placeholders[i])) {
html <- paste0(html,"<span style=\"color:#CDC9C9; font-size:90%; font-style:bold\">", headers[i], "</span><br>")
html <- paste0(html,"<p>", df[1, fields[i]], "</p>")
html <- paste0(html,"<p></p>")
}
else {
if(placeholders[i] != "") {
html <- paste0(html,"<span style=\"color:#CDC9C9; font-size:90%; font-style:bold\">", headers[i], "</span><br>")
html <- paste0(html, "<p>", eval(placeholders[i]), "</p>")
html <- paste0(html,"<p></p>")
}
}
}
}
html <- paste0(html, "<div>")
return(html)
}
js <- "
$(document).keyup(function(event) {
if ($(\"#txt_search\").is(\":focus\") && (event.keyCode == 13)) {
$(\"#btn_search\").click();
}
if ($(\"#txt_alwayscase\").is(\":focus\") && (event.keyCode == 13)) {
$(\"#btn_search\").click();
}
});
"
optwidth <- function() return("60%")
inline <- function(widget, label) {
return(
shiny::div(style = "display: inline-block; vertical-align: middle; margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px",
shiny::HTML(paste0("<span style = 'middle; margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px'>", label, " </span>")),
shiny::div(style = "display: inline-block; vertical-align: middle; margin-top:0px; margin-bottom:0px; padding-top:0px; padding-bottom:0px",
widget
)
)
)
}
processSearch <- function(search = TRUE, input, package.list) {
if(search) {
if(!is.null(input$txt_search)) {
if(stringr::str_replace_all(input$txt_search, " ", "") !="") {
options("packagefinder.lst_searchterms" = unique(append(getOption("packagefinder.lst_searchterms", c()), shiny::isolate(input$txt_search))))
if(!is.null(input$chk_case) & input$chk_case != FALSE) {
case.sensitive <- TRUE
}
else {
case.sensitive <- FALSE
}
if(input$txt_alwayscase != "") {
always.sensitive <- stringr::str_replace_all(unlist(strsplit(input$txt_alwayscase,",")), " ", "")
}
else {
always.sensitive = NULL
}
mode <- tolower(input$rad_mode)
terms <- scan(text = input$txt_search, what = "character")
if(!input$chk_regex) res <- findPackage(terms, silent = TRUE, return.df = TRUE, mode = mode, case.sensitive = case.sensitive, always.sensitive = always.sensitive, index = getOption("packagefinder.index", NULL))
else res <- findPackage(query=terms, silent = TRUE, return.df = TRUE, mode = mode, case.sensitive = case.sensitive, always.sensitive = always.sensitive, index = getOption("packagefinder.index", NULL))
}
}
}
else {
newoncran <- package.list
newoncran <- newoncran[lubridate::ymd(newoncran$Published) >= lubridate::today()-getOption("packagefinder.num_optcrandays", 3),]
newoncran <- newoncran[order(lubridate::ymd(newoncran$Published), decreasing = TRUE),]
newoncran <- newoncran[, c("Package", "Title", "Description")]
names(newoncran) <- c("Name", "Short Description", "Long Description")
res <- cbind(Score = rep(100, NROW(newoncran)), newoncran)
res <- cbind(res, GO = rep(NA, NROW(newoncran)))
newoncran <- newoncran[sapply(unique(newoncran$Name), function(x) {min(which(newoncran$Name == x))}),]
}
if(!is.null(res)) {
num.results <- NROW(res)
res[,"Long Description"] <- NULL
orig.name <- res$Name
res$GO <- NULL
res$Installed <- rep("", NROW(res))
res$ActionPDF <- rep("", NROW(res))
res$ActionWeb <- rep("", NROW(res))
res$ActionGitHub <- rep("", NROW(res))
res$Favorite <- rep("", NROW(res))
inst <- utils::installed.packages()
datetime <- as.Date
df_ext <- package.list
df_ext <- df_ext[sapply(unique(df_ext$Package), function(x) {min(which(df_ext$Package == x))}),]
df_ext$AllURLs <- tolower(stringr::str_replace(paste0(df_ext$URL, ",", df_ext$BugReports), " ", ","))
df_ext$GitHub <- NA
for(i in 1:NROW(res)) {
urls.split <- unlist(strsplit(df_ext$AllURLs[df_ext$Package == orig.name[i]], ","))
if(NROW(urls.split)>0) {
match.git <- stringr::str_detect(urls.split, "github.com")
if(sum(match.git, na.rm=TRUE) > 0) {
df_ext$GitHub[df_ext$Package == orig.name[i]] <- urls.split[which(match.git == TRUE)][1]
}
}
}
for(i in 1:NROW(res)) {
if(orig.name[i] %in% inst[,"Package"]) {
res$Installed[i] = "Installed"
}
else {
res$Installed[i] <- paste0("<img src=\"https://www.zuckarelli.de/files/download-col.png\" style=\"height:32px\"
title = \"Install package '", orig.name[i] , "' (with dependencies)\"/>")
}
res$ActionPDF[i] <- buildLink(
link.url = paste0("https://cran.r-project.org/web/packages/", res$Name[i], "\\", res$Name[i], ".PDF"),
image.url = "https://www.zuckarelli.de/files/PDF-col.png",
tooltip = paste0("PDF manual of package '", res$Name[i], "'")
)
res$ActionWeb[i] <- buildLink(
link.url = paste0("https://cran.r-project.org/web/", res$Name[i]),
image.url = "https://www.zuckarelli.de/files/r-col.png",
tooltip = paste0("CRAN website of package '", res$Name[i], "'")
)
github.url <- df_ext$GitHub[which(df_ext$Package == res$Name[i])]
if(!is.na(github.url)) {
res$ActionGitHub[i] <- buildLink(
link.url = github.url,
image.url = "https://www.zuckarelli.de/files/social-github-col.png",
tooltip = paste0("GitHub repository of package '", res$Name[i], "'")
)
}
else {
res$ActionGitHub[i] <- ""
}
}
res$Name = paste0("<span style=\"font-weight:bold\">", res$Name, "</span>")
}
else {
num.results <- 0
}
return(list(df = res, df_ext = df_ext, num.results = num.results))
}
getPackageFinderCode <- function(input, search = TRUE, cran.days = 3) {
if(search) {
if(tolower(input$rad_mode) == "and") mode <- ", mode = \"and\""
else mode <- ""
if(!is.null(input$chk_case) & input$chk_case != FALSE) case.sensitive <- ", case.sensitive = TRUE"
else case.sensitive <- ""
if(input$txt_alwayscase != "") {
always.sensitive <- stringr::str_replace_all(unlist(strsplit(input$txt_alwayscase,",")), " ", "")
if(NROW(always.sensitive) > 1) always.sensitive <- paste0(", always.sensitive = c(", paste0(always.sensitive, collapse = ", "), ")")
else always.sensitive <- paste0(", always.sensitive = \"", always.sensitive, "\"")
}
else always.sensitive <- ""
terms <- scan(text = input$txt_search, what = "character")
if(NROW(terms) > 1) terms <- paste0("c(", paste0(paste0("\"", terms, "\""), collapse = ", "), ")")
else terms <- paste0("\"", terms, "\"")
if(input$chk_regex) terms <- paste0("query = ", terms)
code <- paste0("findPackage(", terms, mode, case.sensitive, always.sensitive, ")")
}
else {
code <- paste0("whatsNew(last.days = ", cran.days, ")")
}
return(code)
}
waitUI <- function(code) {
return(
as.list(shiny::tagList(shiny::HTML(paste0("<table id='msg' style='width:100%'>
<tr>
<td>
<p><span style='font-weight: bold'>While we are searching ... Did you know?</span><span> You can also search from the R console:</span></p>
<span style='font-family:Courier; font-size:120%'>", code, "</span> ",
shiny::actionButton("copy", "Copy R code"), "
</td>
<td>
<a href= \"https://github.com/jsugarelli/packagefinder\"><img src='https://www.zuckarelli.de/files/hexagon-packagefinder.png' style='width:120px'></a>
</td>
</tr>
</table><p id='p1'> </p><p id='p2'> </p>"))
),
))
} |
#' @title Ages, lengths, and sexes of Troutperch.
#'
#' @description The assigned ages (by scales), total lengths (mm), and sexes of Troutperch (\emph{Percopsis omsicomaycus}) captured in southeastern Lake Michigan.
#'
#' @name TroutperchLM1
#'
#' @docType data
#'
#' @format A data frame with 431 observations on the following 3 variables:
#' \describe{
#' \item{age}{Assigned ages (by scales).}
#' \item{tl}{Measured total length (mm).}
#' \item{sex}{Sex (\code{f}=female and \code{m}=male).}
#' }
#'
#' @section Topic(s):
#' \itemize{
#' \item Growth
#' \item von Bertalanffy
#' }
#'
#' @concept Growth 'von Bertalanffy'
#'
#' @source Simulated from the age-length data provided in Table 1 of House, R., and L. Wells. 1973. Age, growth, spawning season, and fecundity of the trout-perch (\emph{Percopsis omsicomaycus}) in southeastern Lake Michigan. Journal of the Fisheries Research Board of Canada. 30:1221-1225.
#'
#' @keywords datasets
#'
#' @examples
#' data(TroutperchLM1)
#' str(TroutperchLM1)
#' head(TroutperchLM1)
#' op <- par(mfrow=c(1,2),pch=19)
#' plot(tl~age,data=TroutperchLM1,subset=sex=="f",main="female")
#' plot(tl~age,data=TroutperchLM1,subset=sex=="m",main="male")
#' par(op)
#'
NULL | /FSAdata/R/TroutperchLM1.R | no_license | ingted/R-Examples | R | false | false | 1,280 | r | #' @title Ages, lengths, and sexes of Troutperch.
#'
#' @description The assigned ages (by scales), total lengths (mm), and sexes of Troutperch (\emph{Percopsis omsicomaycus}) captured in southeastern Lake Michigan.
#'
#' @name TroutperchLM1
#'
#' @docType data
#'
#' @format A data frame with 431 observations on the following 3 variables:
#' \describe{
#' \item{age}{Assigned ages (by scales).}
#' \item{tl}{Measured total length (mm).}
#' \item{sex}{Sex (\code{f}=female and \code{m}=male).}
#' }
#'
#' @section Topic(s):
#' \itemize{
#' \item Growth
#' \item von Bertalanffy
#' }
#'
#' @concept Growth 'von Bertalanffy'
#'
#' @source Simulated from the age-length data provided in Table 1 of House, R., and L. Wells. 1973. Age, growth, spawning season, and fecundity of the trout-perch (\emph{Percopsis omsicomaycus}) in southeastern Lake Michigan. Journal of the Fisheries Research Board of Canada. 30:1221-1225.
#'
#' @keywords datasets
#'
#' @examples
#' data(TroutperchLM1)
#' str(TroutperchLM1)
#' head(TroutperchLM1)
#' op <- par(mfrow=c(1,2),pch=19)
#' plot(tl~age,data=TroutperchLM1,subset=sex=="f",main="female")
#' plot(tl~age,data=TroutperchLM1,subset=sex=="m",main="male")
#' par(op)
#'
NULL |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(shinydashboard)
library(leaflet)
dashboardPage(
dashboardHeader(title = 'BIT'),
dashboardSidebar(sidebarMenu(
menuItem(
"Fish Response Index", tabName = "fri", icon = icon("dashboard")
),
menuItem(
"Some Other Tool", tabName = "ot", icon = icon("dashboard")
),
menuItem(
"About", tabName = 'about', icon = icon('question')
)
)),
dashboardBody(tabItems(
tabItem(tabName = 'fri',
fluidRow(column(
width = 12,
tabsetPanel(
tabPanel(
'Map',
leafletOutput('map', width = '100%', height = '600px'),
absolutePanel(
top = 5, right = 15,
bsButton('startBut', 'Get Started!', style = 'success')
),
bsModal(
'modalWelcome', 'Load Data', 'startBut', size = 'large',
fluidRow(
fluidRow(column(
3, fileInput("splist_fn", h4("Fish species list (e.g., FRI.csv):"), accept = ".csv")
),
column(7, h4(
textOutput("splist_fn_txt")
))),
fluidRow(column(
3, fileInput("trwlstn_fn", h4("Trawl stations:"), accept = ".csv")
),
column(7, h4(
textOutput("trwlstn_fn_txt")
))),
fluidRow(column(
3, fileInput("abun_fn", h4("Fish abundance:"), accept = ".csv")
),
column(7, h4(
textOutput("abun_fn_txt")
)))
)
)
),
tabPanel('Data Summary'),
tabPanel('Results')
)
))),
tabItem(tabName = 'ot',
box(
status = "warning", width = NULL,
"Box content"
))
))
)
| /ui.R | no_license | jgrew/BIT | R | false | false | 2,350 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(shinydashboard)
library(leaflet)
dashboardPage(
dashboardHeader(title = 'BIT'),
dashboardSidebar(sidebarMenu(
menuItem(
"Fish Response Index", tabName = "fri", icon = icon("dashboard")
),
menuItem(
"Some Other Tool", tabName = "ot", icon = icon("dashboard")
),
menuItem(
"About", tabName = 'about', icon = icon('question')
)
)),
dashboardBody(tabItems(
tabItem(tabName = 'fri',
fluidRow(column(
width = 12,
tabsetPanel(
tabPanel(
'Map',
leafletOutput('map', width = '100%', height = '600px'),
absolutePanel(
top = 5, right = 15,
bsButton('startBut', 'Get Started!', style = 'success')
),
bsModal(
'modalWelcome', 'Load Data', 'startBut', size = 'large',
fluidRow(
fluidRow(column(
3, fileInput("splist_fn", h4("Fish species list (e.g., FRI.csv):"), accept = ".csv")
),
column(7, h4(
textOutput("splist_fn_txt")
))),
fluidRow(column(
3, fileInput("trwlstn_fn", h4("Trawl stations:"), accept = ".csv")
),
column(7, h4(
textOutput("trwlstn_fn_txt")
))),
fluidRow(column(
3, fileInput("abun_fn", h4("Fish abundance:"), accept = ".csv")
),
column(7, h4(
textOutput("abun_fn_txt")
)))
)
)
),
tabPanel('Data Summary'),
tabPanel('Results')
)
))),
tabItem(tabName = 'ot',
box(
status = "warning", width = NULL,
"Box content"
))
))
)
|
\name{mongo.oid.time}
\alias{mongo.oid.time}
\title{Get an Object ID's time}
\usage{
mongo.oid.time(oid)
}
\arguments{
\item{oid}{(\link{mongo.oid}) The OID to be examined.}
}
\value{
(integer) ("POSIXct") The time portion of the given
\code{oid}.
}
\description{
Get the 32-bit UTC time portion of an OID (Object ID).
}
\details{
See \url{http://www.mongodb.org/display/DOCS/Object+IDs}
}
\examples{
oid <- mongo.oid.create()
print(mongo.oid.time(oid))
}
\seealso{
\link{mongo.oid},\cr \code{\link{mongo.oid.create}},\cr
\code{\link{as.character.mongo.oid}},\cr
\code{\link{mongo.oid.to.string}},\cr
\code{\link{mongo.oid.from.string}},\cr
\code{\link{mongo.bson.buffer.append}},\cr
\code{\link{mongo.bson.buffer.append.oid}},\cr
\link{mongo.bson.buffer},\cr \link{mongo.bson}.
}
| /man/mongo.oid.time.Rd | no_license | StefanoSpada/rmongodb | R | false | false | 784 | rd | \name{mongo.oid.time}
\alias{mongo.oid.time}
\title{Get an Object ID's time}
\usage{
mongo.oid.time(oid)
}
\arguments{
\item{oid}{(\link{mongo.oid}) The OID to be examined.}
}
\value{
(integer) ("POSIXct") The time portion of the given
\code{oid}.
}
\description{
Get the 32-bit UTC time portion of an OID (Object ID).
}
\details{
See \url{http://www.mongodb.org/display/DOCS/Object+IDs}
}
\examples{
oid <- mongo.oid.create()
print(mongo.oid.time(oid))
}
\seealso{
\link{mongo.oid},\cr \code{\link{mongo.oid.create}},\cr
\code{\link{as.character.mongo.oid}},\cr
\code{\link{mongo.oid.to.string}},\cr
\code{\link{mongo.oid.from.string}},\cr
\code{\link{mongo.bson.buffer.append}},\cr
\code{\link{mongo.bson.buffer.append.oid}},\cr
\link{mongo.bson.buffer},\cr \link{mongo.bson}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a.R
\name{randTree}
\alias{randTree}
\title{Generate a random tree}
\usage{
randTree(n, wndmtrx = FALSE, parallel = FALSE)
}
\arguments{
\item{n}{number of tips, integer, must be 3 or greater}
\item{wndmtrx}{T/F add node matrix? Default FALSE.}
\item{parallel}{T/F run in parallel? Default FALSE.}
}
\description{
Returns a random \code{TreeMan} tree with \code{n}
tips.
}
\details{
Equivalent to \code{ape}'s \code{rtree()} but returns a
\code{TreeMan} tree. Tree is always rooted and bifurcating.
}
\examples{
tree <- randTree(5)
}
\seealso{
\code{\link{TreeMan-class}}, \code{\link{blncdTree}},
\code{\link{unblncdTree}}
}
| /man/randTree.Rd | permissive | ropensci/phylotaR | R | false | true | 707 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a.R
\name{randTree}
\alias{randTree}
\title{Generate a random tree}
\usage{
randTree(n, wndmtrx = FALSE, parallel = FALSE)
}
\arguments{
\item{n}{number of tips, integer, must be 3 or greater}
\item{wndmtrx}{T/F add node matrix? Default FALSE.}
\item{parallel}{T/F run in parallel? Default FALSE.}
}
\description{
Returns a random \code{TreeMan} tree with \code{n}
tips.
}
\details{
Equivalent to \code{ape}'s \code{rtree()} but returns a
\code{TreeMan} tree. Tree is always rooted and bifurcating.
}
\examples{
tree <- randTree(5)
}
\seealso{
\code{\link{TreeMan-class}}, \code{\link{blncdTree}},
\code{\link{unblncdTree}}
}
|
## Criacao das tabelas base
info_contrato <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_contrato.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character","nr_documento_contratado"="character"))
item_contrato <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_item_contrato.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character"))
orgaos <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_orgaos.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character"))
empenho <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_empenhos.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character","cnpj_cpf"="character"))
licitacoes <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_licitacao.csv",encoding = "UTF-8")
##
DAP_ativa <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/cafdapativa.csv", encoding = "UTF-8")
DAP_pessoa_fisica <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/DAP_Pessoa_Fisica.csv", encoding = "UTF-8")
DAP_cooperativas <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/cooperativasDAP.csv", encoding = "UTF-8",colClasses = c("CNPJEspecifico"="character"))
DAP_ativa <- DAP_ativa %>%
mutate(MUNICIPIO= tolower(iconv(MUNICIPIO, from="UTF-8", to="ASCII//TRANSLIT")))
DAP_ativa_RS <- DAP_ativa %>%
filter(stringr::str_detect(UF,"RS"))
DAP_ativa_RS <- DAP_ativa_RS %>%
mutate(NOME_T1 = toupper(iconv(NOME_T1,from="UTF-8", to="ASCII//TRANSLIT"))) %>%
mutate(NOME_T2 = toupper(iconv(NOME_T2,from="UTF-8", to="ASCII//TRANSLIT")))
| /arqsAnalise/main.R | no_license | ricardoadley/Analises-agro-familiar | R | false | false | 1,616 | r | ## Criacao das tabelas base
info_contrato <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_contrato.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character","nr_documento_contratado"="character"))
item_contrato <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_item_contrato.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character"))
orgaos <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_orgaos.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character"))
empenho <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_empenhos.csv",encoding = "UTF-8",colClasses = c("id_orgao"="character","cnpj_cpf"="character"))
licitacoes <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/bd/info_licitacao.csv",encoding = "UTF-8")
##
DAP_ativa <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/cafdapativa.csv", encoding = "UTF-8")
DAP_pessoa_fisica <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/DAP_Pessoa_Fisica.csv", encoding = "UTF-8")
DAP_cooperativas <- data.table::fread("/home/ricardo/Documentos/ta_na_mesa/data/cooperativasDAP.csv", encoding = "UTF-8",colClasses = c("CNPJEspecifico"="character"))
DAP_ativa <- DAP_ativa %>%
mutate(MUNICIPIO= tolower(iconv(MUNICIPIO, from="UTF-8", to="ASCII//TRANSLIT")))
DAP_ativa_RS <- DAP_ativa %>%
filter(stringr::str_detect(UF,"RS"))
DAP_ativa_RS <- DAP_ativa_RS %>%
mutate(NOME_T1 = toupper(iconv(NOME_T1,from="UTF-8", to="ASCII//TRANSLIT"))) %>%
mutate(NOME_T2 = toupper(iconv(NOME_T2,from="UTF-8", to="ASCII//TRANSLIT")))
|
#INPUT: 1) Lookup table of mature miR names and accession #s (hsa_miR_accessionTOname.txt)
# 2) a directpry of individual miRNA "isofom" level TCGA data matrices downloaded using TCGA-Assembler... i.e:
#################################################################################
#OUTPUT: an udpated data matrix with full miRNA names.
#################################################################################
#change directory to a directory containing files to update and accessionTOname file i.e.: > setwd("Desktop/miRdata/")
#setwd("~/Desktop/tumor-origin/data")
library(splitstackshape)
#library(qdap)
library(plyr)
library(reshape)
# filenames = dir(pattern="*isoforms.quantification.txt")
filenames = dir(pattern="*.isoforms.quantification.txt$")
update_miRname = function(infile)
{
tempFile = read.table(infile, header=TRUE, stringsAsFactors=FALSE)
tempFile =cSplit(tempFile, "miRNA_region", sep=",")
full_list = read.table("hsa_miR_accessionTOname.txt", header=TRUE, stringsAsFactors=FALSE)
# change Alias to match column title in tempFile
full_list = setNames(full_list,c('miRNA_region_2','fullName'))
mergedFile = merge(tempFile, full_list, by.x="miRNA_region_2", by.y="miRNA_region_2")
#tempFile$fullName = lookup(tempFile$miRNA_region_2, full_list$Alias, full_list$Name)
temp2 = data.frame(mergedFile$fullName, mergedFile$read_count)
colnames(temp2) = c("miRNA", "Count")
write.table(tempFile, file=paste("temp/", infile, ".names.txt", sep=""),sep="\t",col.names=TRUE, row.names=FALSE)
write.table(temp2, file=paste("temp/", infile, ".counts.txt", sep=""),sep="\t",col.names=TRUE, row.names=FALSE)
temp3 = temp2[!(is.na(temp2[,1])),]
temp3 = temp3[order(temp3[,1]), ]
temp3 = aggregate(data=temp3, temp3[,2] ~ temp3[,1], FUN=sum)
colnames(temp3) = c("miRNA", infile)
write.table(temp3, file=paste("temp/", infile, ".sumSort.txt", sep=""),sep="\t",col.names=TRUE, row.names=FALSE)
}
lapply(filenames, update_miRname)
#next need to join all the data matrix files into one matrix
mergeFiles = list.files(path="temp/", pattern="*sumSort.txt")
for (file in mergeFiles){
if(!exists("mirNames")){
mirNames = read.table(paste("temp/", file, sep=""), header=TRUE, stringsAsFactors=FALSE)
dim(mirNames)
}
if(exists("mirNames")){
temp_dataset = read.table(paste("temp/", file, sep=""), header=TRUE, stringsAsFactors=FALSE)
mirNames = rbind.fill(mirNames, temp_dataset)
rm(temp_dataset)
}
}
mirNames = as.matrix(mirNames[,1])
mirNames = as.data.frame((sort(unique(mirNames))))
colnames(mirNames) = "miRNA"
# merge each file with this generated names column, putting zero if no match
#setwd("~/Desktop/tumor-origin/data/temp")
#append temp/ to path of sumSort files
mergeFiles <- paste("temp/", mergeFiles, sep="")
import.list <- llply(mergeFiles, read.table, header=TRUE)
data_matrix =join(mirNames, as.data.frame(import.list[1]), by= "miRNA", type="left")
for(i in 2:length(mergeFiles)){
data_matrix =join(data_matrix, as.data.frame(import.list[i]), by= "miRNA", type="left")
}
data_matrix[is.na(data_matrix)] = 0
#setwd("~/Desktop/tumor-origin/data")
write.table(data_matrix, file="miR_counts_matrix.txt", sep="\t", col.names=TRUE, row.names=FALSE)
| /archive/data/get_Matrix.R | no_license | programmingprincess/tumor-origin | R | false | false | 3,269 | r |
#INPUT: 1) Lookup table of mature miR names and accession #s (hsa_miR_accessionTOname.txt)
# 2) a directpry of individual miRNA "isofom" level TCGA data matrices downloaded using TCGA-Assembler... i.e:
#################################################################################
#OUTPUT: an udpated data matrix with full miRNA names.
#################################################################################
#change directory to a directory containing files to update and accessionTOname file i.e.: > setwd("Desktop/miRdata/")
#setwd("~/Desktop/tumor-origin/data")
library(splitstackshape)
#library(qdap)
library(plyr)
library(reshape)
# filenames = dir(pattern="*isoforms.quantification.txt")
filenames = dir(pattern="*.isoforms.quantification.txt$")
update_miRname = function(infile)
{
tempFile = read.table(infile, header=TRUE, stringsAsFactors=FALSE)
tempFile =cSplit(tempFile, "miRNA_region", sep=",")
full_list = read.table("hsa_miR_accessionTOname.txt", header=TRUE, stringsAsFactors=FALSE)
# change Alias to match column title in tempFile
full_list = setNames(full_list,c('miRNA_region_2','fullName'))
mergedFile = merge(tempFile, full_list, by.x="miRNA_region_2", by.y="miRNA_region_2")
#tempFile$fullName = lookup(tempFile$miRNA_region_2, full_list$Alias, full_list$Name)
temp2 = data.frame(mergedFile$fullName, mergedFile$read_count)
colnames(temp2) = c("miRNA", "Count")
write.table(tempFile, file=paste("temp/", infile, ".names.txt", sep=""),sep="\t",col.names=TRUE, row.names=FALSE)
write.table(temp2, file=paste("temp/", infile, ".counts.txt", sep=""),sep="\t",col.names=TRUE, row.names=FALSE)
temp3 = temp2[!(is.na(temp2[,1])),]
temp3 = temp3[order(temp3[,1]), ]
temp3 = aggregate(data=temp3, temp3[,2] ~ temp3[,1], FUN=sum)
colnames(temp3) = c("miRNA", infile)
write.table(temp3, file=paste("temp/", infile, ".sumSort.txt", sep=""),sep="\t",col.names=TRUE, row.names=FALSE)
}
lapply(filenames, update_miRname)
#next need to join all the data matrix files into one matrix
mergeFiles = list.files(path="temp/", pattern="*sumSort.txt")
for (file in mergeFiles){
if(!exists("mirNames")){
mirNames = read.table(paste("temp/", file, sep=""), header=TRUE, stringsAsFactors=FALSE)
dim(mirNames)
}
if(exists("mirNames")){
temp_dataset = read.table(paste("temp/", file, sep=""), header=TRUE, stringsAsFactors=FALSE)
mirNames = rbind.fill(mirNames, temp_dataset)
rm(temp_dataset)
}
}
mirNames = as.matrix(mirNames[,1])
mirNames = as.data.frame((sort(unique(mirNames))))
colnames(mirNames) = "miRNA"
# merge each file with this generated names column, putting zero if no match
#setwd("~/Desktop/tumor-origin/data/temp")
#append temp/ to path of sumSort files
mergeFiles <- paste("temp/", mergeFiles, sep="")
import.list <- llply(mergeFiles, read.table, header=TRUE)
data_matrix =join(mirNames, as.data.frame(import.list[1]), by= "miRNA", type="left")
for(i in 2:length(mergeFiles)){
data_matrix =join(data_matrix, as.data.frame(import.list[i]), by= "miRNA", type="left")
}
data_matrix[is.na(data_matrix)] = 0
#setwd("~/Desktop/tumor-origin/data")
write.table(data_matrix, file="miR_counts_matrix.txt", sep="\t", col.names=TRUE, row.names=FALSE)
|
library(tidyverse)
library(DeclareDesign)
library(wesanderson)
library(tidyr)
library(patchwork)
load("./data/results_simulations_power.Rdata")
RColorBrewer::display.brewer.all()
my_font <- "Palatino Linotype"
my_bkgd <- "white"
#my_bkgd <- "#f5f5f2"
pal <- RColorBrewer::brewer.pal(9, "Spectral")
my_theme <- theme(text = element_text(family = my_font, color = "#22211d"),
rect = element_rect(fill = my_bkgd),
plot.background = element_rect(fill = my_bkgd, color = NA),
panel.background = element_rect(fill = my_bkgd, color = NA),
panel.border = element_rect(color="black"),
strip.background = element_rect(color="black", fill="gray85"),
legend.background = element_rect(fill = my_bkgd, color = NA),
legend.key = element_rect(size = 6, fill = "white", colour = NA),
legend.key.size = unit(1, "cm"),
legend.text = element_text(size = 14, family = my_font),
legend.title = element_text(size=14),
plot.title = element_text(size = 22, face = "bold", family=my_font),
plot.subtitle = element_text(size=16, family=my_font),
axis.title= element_text(size=22),
axis.text = element_text(size=14, family=my_font),
axis.title.x = element_text(hjust=1),
strip.text = element_text(family = my_font, color = "#22211d",
size = 13, face="italic"))
theme_set(theme_bw() + my_theme)
results_c <-results %>%
mutate(N_fct=as.factor(N),
eff1=as.factor(eff1),
eff2=as.factor(eff2),
power_bin=ifelse(power>0.79, "Power > 80%", "Power < 80%")) %>%
mutate(Treatment_Effects=ifelse(term=="violence", "Exposure to Violence",
"Exposure to Non-Violence"))
res_vio <- results_c %>%
filter(term=="violence") %>%
group_by(N_fct, eff1, term, Treatment_Effects) %>%
summarise(power=mean(power)) %>%
ungroup() %>%
mutate(power_bin=ifelse(power>0.79, "Power > 80%", "Power < 80%"))
res_goods <- results_c %>%
filter(term=="goods") %>%
group_by(N_fct, eff2, term, Treatment_Effects) %>%
summarise(power=mean(power)) %>%
ungroup() %>%
mutate(power_bin=ifelse(power>0.79, "Power > 80%", "Power < 80%"))
# Violence
pal <- wes_palette("Zissou1", n=5)
violence <- ggplot(res_vio %>% filter(term=="violence"), aes(x=N_fct,y=eff1, fill=fct_rev(power_bin)))+
geom_tile(colour="gray95",size=0.5, alpha=.8) +
guides(fill=guide_legend(title="Power Results"))+
labs(x="Number of Observations",
y="") +
scale_fill_manual(values=c(pal[1], pal[5])) +
facet_grid(~ Treatment_Effects) +
theme(axis.text.x = element_text(angle=45, hjust=1, size=10),
strip.text = element_text(family = my_font, color = "#22211d",
size = 14, face="italic"),
plot.caption = element_text(size=10))
goods <- ggplot(res_goods %>% filter(term=="goods"), aes(x=N_fct,y=eff2, fill=fct_rev(power_bin)))+
geom_tile(colour="gray95",size=0.5, alpha=.8) +
guides(fill=guide_legend(title="Power Results"))+
labs(x="",y="Effect Size",
caption="")+
guides(fill=FALSE) +
scale_fill_manual(values=c(pal[1], pal[5])) +
facet_grid(~ Treatment_Effects) +
theme(axis.text.x = element_text(angle=45, hjust=1, size=10),
strip.text = element_text(family = my_font, color = "#22211d",
size = 14, face="italic"),
plot.caption = element_text(size=10))
graph <- goods + violence +
plot_annotation(title="Power Analysis for List Experiments",
subtitle="Criminal governance amid the COVID-19 pandemic (EGAP GRANT)",
caption="Power Analysis estimated using the DeclareDesign framework")
getwd()
?ggsave
ggsave(graph, filename = "./power_analysis/egap_covid_poweranalysis/power_analysis.png", width = 14, height = 8, units = "in", pointsize = 12, bg = "white")
| /R/code_graph.r | no_license | TiagoVentura/egap_covid_poweranalysis | R | false | false | 4,170 | r | library(tidyverse)
library(DeclareDesign)
library(wesanderson)
library(tidyr)
library(patchwork)
load("./data/results_simulations_power.Rdata")
RColorBrewer::display.brewer.all()
my_font <- "Palatino Linotype"
my_bkgd <- "white"
#my_bkgd <- "#f5f5f2"
pal <- RColorBrewer::brewer.pal(9, "Spectral")
my_theme <- theme(text = element_text(family = my_font, color = "#22211d"),
rect = element_rect(fill = my_bkgd),
plot.background = element_rect(fill = my_bkgd, color = NA),
panel.background = element_rect(fill = my_bkgd, color = NA),
panel.border = element_rect(color="black"),
strip.background = element_rect(color="black", fill="gray85"),
legend.background = element_rect(fill = my_bkgd, color = NA),
legend.key = element_rect(size = 6, fill = "white", colour = NA),
legend.key.size = unit(1, "cm"),
legend.text = element_text(size = 14, family = my_font),
legend.title = element_text(size=14),
plot.title = element_text(size = 22, face = "bold", family=my_font),
plot.subtitle = element_text(size=16, family=my_font),
axis.title= element_text(size=22),
axis.text = element_text(size=14, family=my_font),
axis.title.x = element_text(hjust=1),
strip.text = element_text(family = my_font, color = "#22211d",
size = 13, face="italic"))
theme_set(theme_bw() + my_theme)
results_c <-results %>%
mutate(N_fct=as.factor(N),
eff1=as.factor(eff1),
eff2=as.factor(eff2),
power_bin=ifelse(power>0.79, "Power > 80%", "Power < 80%")) %>%
mutate(Treatment_Effects=ifelse(term=="violence", "Exposure to Violence",
"Exposure to Non-Violence"))
res_vio <- results_c %>%
filter(term=="violence") %>%
group_by(N_fct, eff1, term, Treatment_Effects) %>%
summarise(power=mean(power)) %>%
ungroup() %>%
mutate(power_bin=ifelse(power>0.79, "Power > 80%", "Power < 80%"))
res_goods <- results_c %>%
filter(term=="goods") %>%
group_by(N_fct, eff2, term, Treatment_Effects) %>%
summarise(power=mean(power)) %>%
ungroup() %>%
mutate(power_bin=ifelse(power>0.79, "Power > 80%", "Power < 80%"))
# Violence
pal <- wes_palette("Zissou1", n=5)
violence <- ggplot(res_vio %>% filter(term=="violence"), aes(x=N_fct,y=eff1, fill=fct_rev(power_bin)))+
geom_tile(colour="gray95",size=0.5, alpha=.8) +
guides(fill=guide_legend(title="Power Results"))+
labs(x="Number of Observations",
y="") +
scale_fill_manual(values=c(pal[1], pal[5])) +
facet_grid(~ Treatment_Effects) +
theme(axis.text.x = element_text(angle=45, hjust=1, size=10),
strip.text = element_text(family = my_font, color = "#22211d",
size = 14, face="italic"),
plot.caption = element_text(size=10))
goods <- ggplot(res_goods %>% filter(term=="goods"), aes(x=N_fct,y=eff2, fill=fct_rev(power_bin)))+
geom_tile(colour="gray95",size=0.5, alpha=.8) +
guides(fill=guide_legend(title="Power Results"))+
labs(x="",y="Effect Size",
caption="")+
guides(fill=FALSE) +
scale_fill_manual(values=c(pal[1], pal[5])) +
facet_grid(~ Treatment_Effects) +
theme(axis.text.x = element_text(angle=45, hjust=1, size=10),
strip.text = element_text(family = my_font, color = "#22211d",
size = 14, face="italic"),
plot.caption = element_text(size=10))
graph <- goods + violence +
plot_annotation(title="Power Analysis for List Experiments",
subtitle="Criminal governance amid the COVID-19 pandemic (EGAP GRANT)",
caption="Power Analysis estimated using the DeclareDesign framework")
getwd()
?ggsave
ggsave(graph, filename = "./power_analysis/egap_covid_poweranalysis/power_analysis.png", width = 14, height = 8, units = "in", pointsize = 12, bg = "white")
|
library(tidyverse)
library(x3ptools)
library(bulletxtrctr)
x3p <- read_x3p("~/papers/dissertations/eric-dissertation/images/Hamby (2009) Barrel/bullets/Barrel 1/Br1 Bullet 1-5.x3p")
x3p <- x3p %>% x3p_rotate(angle = -90)
x3p <- x3p %>% y_flip_x3p()
x3p <- x3p %>% x3p_m_to_mum()
#x3p %>% x3p_image()
cc <- x3p %>% x3p_crosscut_optimize()
ccdata <- x3p %>% x3p_crosscut(y = cc)
ccdata %>% ggplot(aes(x = x, y = value)) + geom_line()
grooves <- ccdata %>% cc_locate_grooves(return_plot = TRUE)
sigs <- ccdata %>% cc_get_signature(grooves)
sigs %>%
ggplot(aes(x = x, y = sig)) + geom_line()
bstats <- read.csv("~/papers/dissertations/eric-dissertation/data/data-25-25/bullet-stats.csv", stringsAsFactors = FALSE)
bullets <- read_bullet("~/papers/dissertations/eric-dissertation/images/Hamby (2009) Barrel/bullets/Barrel 1/")
bullets <- bullets %>%
mutate(
x3p = x3p %>% purrr::map(.f = function(x) {
x <- x %>% x3p_rotate(angle=-90) %>% y_flip_x3p()
x %>% x3p_m_to_mum()
})
)
bullets <- bullets %>% mutate(
cc = x3p %>% purrr::map(.f = function(x) x3p_crosscut_optimize(x))
)
bullets <- bullets %>% mutate(
ccdata = purrr::map2(.x = x3p, .y = cc, .f = function(x, y) x3p_crosscut(x3p=x, y = y))
)
bullets <- bullets %>% mutate(
grooves = ccdata %>% purrr::map(.f = function(x) cc_locate_grooves(x))
)
bullets <- bullets %>% mutate(
sigs = purrr::map2(.x = ccdata, .y = grooves, .f = function(x, y) cc_get_signature(ccdata=x, grooves = y))
)
signatures <- bullets %>% unnest(sigs)
signatures %>%
ggplot(aes( x= x, y = sig)) +
geom_line() +
facet_wrap(~source, ncol=6)
lands <- unique(bullets$source)
comparisons <- data.frame(
expand.grid(land1 = lands, land2 = lands), stringsAsFactors = FALSE)
comparisons <- comparisons %>% mutate(
aligned = purrr::map2(.x = land1, .y = land2, .f = function(xx, yy) {
land1 <- bullets$sigs[bullets$source == xx][[1]]
land2 <- bullets$sigs[bullets$source == yy][[1]]
land1$bullet <- "first-land"
land2$bullet <- "second-land"
sig_align(land1$sig, land2$sig)
})
)
comparisons <- comparisons %>% mutate(
striae = aligned %>% purrr::map(.f = sig_cms_max, span = 75)
)
comparisons <- comparisons %>% mutate(
legacy_features = purrr::map(striae, extract_features_all_legacy, resolution = 1.5625)
)
legacy <- comparisons %>% tidyr::unnest(legacy_features)
legacy <- legacy %>% mutate(
bullet1 = gsub(".*(Bullet [12]).*", "\\1", land1),
l1 = gsub(".*Bullet [12]-([1-6]).*", "\\1", land1),
land_id1 = sprintf("Hamby252-Br1-B%d-L%s", parse_number(bullet1), l1)
)
legacy <- legacy %>% mutate(
bullet2 = gsub(".*(Bullet [12]).*", "\\1", land2),
l2 = gsub(".*Bullet [12]-([1-6]).*", "\\1", land2),
land_id2 = sprintf("Hamby252-Br1-B%d-L%s", parse_number(bullet2), l2)
)
legacy %>% ggplot(aes(x = land_id1, y=land_id2, fill=ccf)) + geom_tile() +
scale_fill_gradient2(low="darkgrey", mid="white", high = "darkorange", midpoint = 0.5)
cf <- read.csv("data/hamby-comparisons.csv")
br1 <- cf %>% filter(grepl("Hamby252-Br1-", land_id1), grepl("Hamby252-Br1-", land_id2))
br1 %>% ggplot(aes(x = land_id1, y=land_id2, fill=ccf)) + geom_tile() +
scale_fill_gradient2(low="darkgrey", mid="white", high = "darkorange", midpoint = 0.5)
full_features <- legacy %>%
left_join(br1, by=c("land_id1", "land_id2"))
feature_x <- full_features %>% select(land_id1, land_id2, ends_with(".x")) %>%
pivot_longer(ends_with(".x"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".x", "", feature)
)
feature_y <- full_features %>% select(land_id1, land_id2, ends_with(".y")) %>%
pivot_longer(ends_with(".y"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".y", "", feature)
)
features <- feature_x %>% left_join(feature_y, by=c("land_id1", "land_id2", "feature"))
features %>%
ggplot(aes(x = values.x, y = values.y)) + geom_point() +
facet_wrap(~feature, scales="free")
####
# try all barrels
bullets <- read_bullet("~/papers/dissertations/eric-dissertation/images/Hamby (2009) Barrel/bullets/")
bullets <- bullets %>%
mutate(
x3p = x3p %>% purrr::map(.f = function(x) {
# browser()
dims <- dim(x$surface.matrix)
if (dims[1] < dims[2]) {
x <- x %>% x3p_rotate(angle=-90) %>% y_flip_x3p()
} else {
x <- x %>% y_flip_x3p()
}
x %>% x3p_m_to_mum()
})
)
cc <- rep(NA, nrow(bullets))
for (i in 1:nrow(bullets)) {
cc[i] <- bullets$x3p[[i]] %>% x3p_crosscut_optimize()
}
bullets$cc <- cc
bullets <- bullets %>% mutate(
ccdata = purrr::map2(.x = x3p, .y = cc, .f = function(x, y) x3p_crosscut(x3p=x, y = y))
)
bullets <- bullets %>% mutate(
grooves = ccdata %>% purrr::map(.f = function(x) cc_locate_grooves(x))
)
bullets <- bullets %>% mutate(
sigs = purrr::map2(.x = ccdata, .y = grooves, .f = function(x, y) cc_get_signature(ccdata=x, grooves = y))
)
signatures <- bullets %>% unnest(sigs)
signatures %>%
ggplot(aes( x= x, y = sig)) +
geom_line() +
facet_wrap(~source, ncol=6)
saveRDS(bullets, "bullets.rds")
lands <- unique(bullets$source)
comparisons <- data.frame(
expand.grid(land1 = lands, land2 = lands), stringsAsFactors = FALSE)
comparisons <- comparisons %>% mutate(
aligned = purrr::map2(.x = land1, .y = land2, .f = function(xx, yy) {
land1 <- bullets$sigs[bullets$source == xx][[1]]
land2 <- bullets$sigs[bullets$source == yy][[1]]
land1$bullet <- "first-land"
land2$bullet <- "second-land"
sig_align(land1$sig, land2$sig)
})
)
saveRDS(comparisons, "comparisons.rds")
comparisons <- comparisons %>% mutate(
striae = aligned %>% purrr::map(.f = sig_cms_max, span = 75)
)
comparisons <- comparisons %>% mutate(
legacy_features = purrr::map(striae, extract_features_all_legacy, resolution = 1.5625)
)
saveRDS(comparisons, "comparisons.rds")
legacy <- comparisons %>% tidyr::unnest(legacy_features)
legacy <- legacy %>% mutate(
study1 = ifelse(grepl("/Br", legacy$land1), "Hamby252", NA),
study1 = ifelse(grepl("/Ukn", legacy$land1), "Hamby252", study1),
study1 = ifelse(grepl("/br", legacy$land1), "Hamby173", study1),
study2 = ifelse(grepl("/Br", legacy$land2), "Hamby252", NA),
study2 = ifelse(grepl("/Ukn", legacy$land2), "Hamby252", study2),
study2 = ifelse(grepl("/br", legacy$land2), "Hamby173", study2)
)
legacy <- legacy %>% mutate(
barrel1 = gsub(".*((Br[0-9]+)|(Ukn)|(br[0-9A-Z]+)).*", "\\1", land1),
barrel1 = ifelse(is.na(parse_number(barrel1)), "Ukn", parse_number(barrel1))
)
legacy <- legacy %>% mutate(
barrel2 = gsub(".*((Br[0-9]+)|(Ukn)|(br[0-9A-Z]+)).*", "\\1", land2),
barrel2 = ifelse(is.na(parse_number(barrel2)), "Ukn", parse_number(barrel2))
)
legacy <- legacy %>% mutate(
bullet1 = gsub(".*((Bullet [12A-Z])|(_[12]_)).*", "\\1", land1),
bullet1 = gsub("Bullet ", "", bullet1),
bullet1 = ifelse(is.na(parse_number(bullet1)), bullet1, parse_number(bullet1))
)
legacy <- legacy %>% mutate(
bullet2 = gsub(".*((Bullet [12A-Z])|(_[12]_)).*", "\\1", land2),
bullet2 = gsub("Bullet ", "", bullet2),
bullet2 = ifelse(is.na(parse_number(bullet2)), bullet2, parse_number(bullet2))
)
legacy <- legacy %>% mutate(
l1 = gsub(".*Bullet [12A-Z]-([1-6]).*", "\\1", land1),
l1 = gsub(".*_land([1-6]).*", "\\1", l1),
)
legacy <- legacy %>% mutate(
l2 = gsub(".*Bullet [12A-Z]-([1-6]).*", "\\1", land2),
l2 = gsub(".*_land([1-6]).*", "\\1", l2),
)
legacy <- legacy %>% mutate(
land_id1 = sprintf("%s-Br%s-B%s-L%s", study1, barrel1, bullet1, l1),
land_id2 = sprintf("%s-Br%s-B%s-L%s", study2, barrel2, bullet2, l2)
)
write.csv(legacy %>% select(-aligned, -striae), "Hamby173-252-features.csv", row.names=FALSE)
legacy %>% filter(study1 == "Hamby252", study2 == "Hamby252") %>%
ggplot(aes(x = l1, y=l2, fill=ccf)) + geom_tile() +
scale_fill_gradient2(low="darkgrey", mid="white", high = "darkorange", midpoint = 0.5) +
facet_grid(barrel1+bullet1~barrel2+bullet2)
cf <- read.csv("data/hamby-comparisons.csv")
full_features <- legacy %>%
left_join(cf, by=c("land_id1", "land_id2"))
feature_x <- full_features %>% select(land_id1, land_id2, ends_with(".x")) %>%
pivot_longer(ends_with(".x"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".x", "", feature)
)
feature_y <- full_features %>% select(land_id1, land_id2, ends_with(".y")) %>%
pivot_longer(ends_with(".y"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".y", "", feature)
)
feature_y <- na.omit(feature_y)
features <- feature_x %>% left_join(feature_y, by=c("land_id1", "land_id2", "feature"))
features <- features %>% left_join(cf %>% select(land_id1, land_id2, same_source), by=c("land_id1", "land_id2"))
features %>%
ggplot(aes(x = values.x, y = values.y, colour = same_source)) + geom_point() +
facet_wrap(~feature, scales="free")
| /code/create-features.R | no_license | ganeshkrishnann/DIB-Hamby | R | false | false | 8,840 | r | library(tidyverse)
library(x3ptools)
library(bulletxtrctr)
x3p <- read_x3p("~/papers/dissertations/eric-dissertation/images/Hamby (2009) Barrel/bullets/Barrel 1/Br1 Bullet 1-5.x3p")
x3p <- x3p %>% x3p_rotate(angle = -90)
x3p <- x3p %>% y_flip_x3p()
x3p <- x3p %>% x3p_m_to_mum()
#x3p %>% x3p_image()
cc <- x3p %>% x3p_crosscut_optimize()
ccdata <- x3p %>% x3p_crosscut(y = cc)
ccdata %>% ggplot(aes(x = x, y = value)) + geom_line()
grooves <- ccdata %>% cc_locate_grooves(return_plot = TRUE)
sigs <- ccdata %>% cc_get_signature(grooves)
sigs %>%
ggplot(aes(x = x, y = sig)) + geom_line()
bstats <- read.csv("~/papers/dissertations/eric-dissertation/data/data-25-25/bullet-stats.csv", stringsAsFactors = FALSE)
bullets <- read_bullet("~/papers/dissertations/eric-dissertation/images/Hamby (2009) Barrel/bullets/Barrel 1/")
bullets <- bullets %>%
mutate(
x3p = x3p %>% purrr::map(.f = function(x) {
x <- x %>% x3p_rotate(angle=-90) %>% y_flip_x3p()
x %>% x3p_m_to_mum()
})
)
bullets <- bullets %>% mutate(
cc = x3p %>% purrr::map(.f = function(x) x3p_crosscut_optimize(x))
)
bullets <- bullets %>% mutate(
ccdata = purrr::map2(.x = x3p, .y = cc, .f = function(x, y) x3p_crosscut(x3p=x, y = y))
)
bullets <- bullets %>% mutate(
grooves = ccdata %>% purrr::map(.f = function(x) cc_locate_grooves(x))
)
bullets <- bullets %>% mutate(
sigs = purrr::map2(.x = ccdata, .y = grooves, .f = function(x, y) cc_get_signature(ccdata=x, grooves = y))
)
signatures <- bullets %>% unnest(sigs)
signatures %>%
ggplot(aes( x= x, y = sig)) +
geom_line() +
facet_wrap(~source, ncol=6)
lands <- unique(bullets$source)
comparisons <- data.frame(
expand.grid(land1 = lands, land2 = lands), stringsAsFactors = FALSE)
comparisons <- comparisons %>% mutate(
aligned = purrr::map2(.x = land1, .y = land2, .f = function(xx, yy) {
land1 <- bullets$sigs[bullets$source == xx][[1]]
land2 <- bullets$sigs[bullets$source == yy][[1]]
land1$bullet <- "first-land"
land2$bullet <- "second-land"
sig_align(land1$sig, land2$sig)
})
)
comparisons <- comparisons %>% mutate(
striae = aligned %>% purrr::map(.f = sig_cms_max, span = 75)
)
comparisons <- comparisons %>% mutate(
legacy_features = purrr::map(striae, extract_features_all_legacy, resolution = 1.5625)
)
legacy <- comparisons %>% tidyr::unnest(legacy_features)
legacy <- legacy %>% mutate(
bullet1 = gsub(".*(Bullet [12]).*", "\\1", land1),
l1 = gsub(".*Bullet [12]-([1-6]).*", "\\1", land1),
land_id1 = sprintf("Hamby252-Br1-B%d-L%s", parse_number(bullet1), l1)
)
legacy <- legacy %>% mutate(
bullet2 = gsub(".*(Bullet [12]).*", "\\1", land2),
l2 = gsub(".*Bullet [12]-([1-6]).*", "\\1", land2),
land_id2 = sprintf("Hamby252-Br1-B%d-L%s", parse_number(bullet2), l2)
)
legacy %>% ggplot(aes(x = land_id1, y=land_id2, fill=ccf)) + geom_tile() +
scale_fill_gradient2(low="darkgrey", mid="white", high = "darkorange", midpoint = 0.5)
cf <- read.csv("data/hamby-comparisons.csv")
br1 <- cf %>% filter(grepl("Hamby252-Br1-", land_id1), grepl("Hamby252-Br1-", land_id2))
br1 %>% ggplot(aes(x = land_id1, y=land_id2, fill=ccf)) + geom_tile() +
scale_fill_gradient2(low="darkgrey", mid="white", high = "darkorange", midpoint = 0.5)
full_features <- legacy %>%
left_join(br1, by=c("land_id1", "land_id2"))
feature_x <- full_features %>% select(land_id1, land_id2, ends_with(".x")) %>%
pivot_longer(ends_with(".x"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".x", "", feature)
)
feature_y <- full_features %>% select(land_id1, land_id2, ends_with(".y")) %>%
pivot_longer(ends_with(".y"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".y", "", feature)
)
features <- feature_x %>% left_join(feature_y, by=c("land_id1", "land_id2", "feature"))
features %>%
ggplot(aes(x = values.x, y = values.y)) + geom_point() +
facet_wrap(~feature, scales="free")
####
# try all barrels
bullets <- read_bullet("~/papers/dissertations/eric-dissertation/images/Hamby (2009) Barrel/bullets/")
bullets <- bullets %>%
mutate(
x3p = x3p %>% purrr::map(.f = function(x) {
# browser()
dims <- dim(x$surface.matrix)
if (dims[1] < dims[2]) {
x <- x %>% x3p_rotate(angle=-90) %>% y_flip_x3p()
} else {
x <- x %>% y_flip_x3p()
}
x %>% x3p_m_to_mum()
})
)
cc <- rep(NA, nrow(bullets))
for (i in 1:nrow(bullets)) {
cc[i] <- bullets$x3p[[i]] %>% x3p_crosscut_optimize()
}
bullets$cc <- cc
bullets <- bullets %>% mutate(
ccdata = purrr::map2(.x = x3p, .y = cc, .f = function(x, y) x3p_crosscut(x3p=x, y = y))
)
bullets <- bullets %>% mutate(
grooves = ccdata %>% purrr::map(.f = function(x) cc_locate_grooves(x))
)
bullets <- bullets %>% mutate(
sigs = purrr::map2(.x = ccdata, .y = grooves, .f = function(x, y) cc_get_signature(ccdata=x, grooves = y))
)
signatures <- bullets %>% unnest(sigs)
signatures %>%
ggplot(aes( x= x, y = sig)) +
geom_line() +
facet_wrap(~source, ncol=6)
saveRDS(bullets, "bullets.rds")
lands <- unique(bullets$source)
comparisons <- data.frame(
expand.grid(land1 = lands, land2 = lands), stringsAsFactors = FALSE)
comparisons <- comparisons %>% mutate(
aligned = purrr::map2(.x = land1, .y = land2, .f = function(xx, yy) {
land1 <- bullets$sigs[bullets$source == xx][[1]]
land2 <- bullets$sigs[bullets$source == yy][[1]]
land1$bullet <- "first-land"
land2$bullet <- "second-land"
sig_align(land1$sig, land2$sig)
})
)
saveRDS(comparisons, "comparisons.rds")
comparisons <- comparisons %>% mutate(
striae = aligned %>% purrr::map(.f = sig_cms_max, span = 75)
)
comparisons <- comparisons %>% mutate(
legacy_features = purrr::map(striae, extract_features_all_legacy, resolution = 1.5625)
)
saveRDS(comparisons, "comparisons.rds")
legacy <- comparisons %>% tidyr::unnest(legacy_features)
legacy <- legacy %>% mutate(
study1 = ifelse(grepl("/Br", legacy$land1), "Hamby252", NA),
study1 = ifelse(grepl("/Ukn", legacy$land1), "Hamby252", study1),
study1 = ifelse(grepl("/br", legacy$land1), "Hamby173", study1),
study2 = ifelse(grepl("/Br", legacy$land2), "Hamby252", NA),
study2 = ifelse(grepl("/Ukn", legacy$land2), "Hamby252", study2),
study2 = ifelse(grepl("/br", legacy$land2), "Hamby173", study2)
)
legacy <- legacy %>% mutate(
barrel1 = gsub(".*((Br[0-9]+)|(Ukn)|(br[0-9A-Z]+)).*", "\\1", land1),
barrel1 = ifelse(is.na(parse_number(barrel1)), "Ukn", parse_number(barrel1))
)
legacy <- legacy %>% mutate(
barrel2 = gsub(".*((Br[0-9]+)|(Ukn)|(br[0-9A-Z]+)).*", "\\1", land2),
barrel2 = ifelse(is.na(parse_number(barrel2)), "Ukn", parse_number(barrel2))
)
legacy <- legacy %>% mutate(
bullet1 = gsub(".*((Bullet [12A-Z])|(_[12]_)).*", "\\1", land1),
bullet1 = gsub("Bullet ", "", bullet1),
bullet1 = ifelse(is.na(parse_number(bullet1)), bullet1, parse_number(bullet1))
)
legacy <- legacy %>% mutate(
bullet2 = gsub(".*((Bullet [12A-Z])|(_[12]_)).*", "\\1", land2),
bullet2 = gsub("Bullet ", "", bullet2),
bullet2 = ifelse(is.na(parse_number(bullet2)), bullet2, parse_number(bullet2))
)
legacy <- legacy %>% mutate(
l1 = gsub(".*Bullet [12A-Z]-([1-6]).*", "\\1", land1),
l1 = gsub(".*_land([1-6]).*", "\\1", l1),
)
legacy <- legacy %>% mutate(
l2 = gsub(".*Bullet [12A-Z]-([1-6]).*", "\\1", land2),
l2 = gsub(".*_land([1-6]).*", "\\1", l2),
)
legacy <- legacy %>% mutate(
land_id1 = sprintf("%s-Br%s-B%s-L%s", study1, barrel1, bullet1, l1),
land_id2 = sprintf("%s-Br%s-B%s-L%s", study2, barrel2, bullet2, l2)
)
write.csv(legacy %>% select(-aligned, -striae), "Hamby173-252-features.csv", row.names=FALSE)
legacy %>% filter(study1 == "Hamby252", study2 == "Hamby252") %>%
ggplot(aes(x = l1, y=l2, fill=ccf)) + geom_tile() +
scale_fill_gradient2(low="darkgrey", mid="white", high = "darkorange", midpoint = 0.5) +
facet_grid(barrel1+bullet1~barrel2+bullet2)
cf <- read.csv("data/hamby-comparisons.csv")
full_features <- legacy %>%
left_join(cf, by=c("land_id1", "land_id2"))
feature_x <- full_features %>% select(land_id1, land_id2, ends_with(".x")) %>%
pivot_longer(ends_with(".x"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".x", "", feature)
)
feature_y <- full_features %>% select(land_id1, land_id2, ends_with(".y")) %>%
pivot_longer(ends_with(".y"), names_to = "feature", values_to="values") %>%
mutate(
feature = gsub(".y", "", feature)
)
feature_y <- na.omit(feature_y)
features <- feature_x %>% left_join(feature_y, by=c("land_id1", "land_id2", "feature"))
features <- features %>% left_join(cf %>% select(land_id1, land_id2, same_source), by=c("land_id1", "land_id2"))
features %>%
ggplot(aes(x = values.x, y = values.y, colour = same_source)) + geom_point() +
facet_wrap(~feature, scales="free")
|
context("marxan_problem")
test_that("character (compile)", {
# make and compile problem
path <- system.file("extdata/input.dat", package = "prioritizr")
p <- marxan_problem(path)
o <- compile(p)
# load data
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
pu_data$locked_in <- pu_data$status == 2
pu_data$locked_out <- pu_data$status == 3
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
bound_data <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
# make and compile equivalent problem
p2 <- problem(pu_data, spec_data, puvspr_data, cost_column = "cost") %>%
add_min_set_objective() %>%
add_relative_targets("prop") %>%
add_locked_in_constraints("locked_in") %>%
add_locked_out_constraints("locked_out") %>%
add_boundary_penalties(1, 1, data = bound_data) %>%
add_binary_decisions()
o2 <- compile(p2)
# compare two problems
expect_equal(o$obj(), o2$obj())
expect_true(all(o$A() == o2$A()))
expect_equal(o$rhs(), o2$rhs())
expect_equal(o$sense(), o2$sense())
expect_equal(o$modelsense(), o2$modelsense())
expect_equal(o$col_ids(), o2$col_ids())
expect_equal(o$row_ids(), o2$row_ids())
expect_equal(o$lb(), o2$lb())
expect_equal(o$ub(), o2$ub())
expect_equal(o$vtype(), o2$vtype())
})
test_that("character (solve)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# make problem
path <- system.file("extdata/input.dat", package = "prioritizr")
p <- marxan_problem(path) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("character (solve, absolute INPUTDIR path)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# set up input.dat with absolute file paths
path <- file.path(tempfile(fileext = ".dat"))
f <- readLines(system.file("extdata/input.dat", package = "prioritizr"))
f[grep("INPUTDIR", f, fixed = TRUE)] <- paste("INPUTDIR",
system.file("extdata/input", package = "prioritizr"))
writeLines(f, path)
# make problem
p <- marxan_problem(path) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("character (solve, absolute file paths)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# set up input.dat with absolute file paths
path <- file.path(tempfile(fileext = ".dat"))
f <- readLines(system.file("extdata/input.dat", package = "prioritizr"))
f[grep("INPUTDIR", f, fixed = TRUE)] <- ""
f[grep("SPECNAME", f, fixed = TRUE)] <- paste("SPECNAME",
system.file("extdata/input/spec.dat",
package = "prioritizr"))
f[grep("PUNAME", f, fixed = TRUE)] <- paste("PUNAME",
system.file("extdata/input/pu.dat",
package = "prioritizr"))
f[grep("PUVSPRNAME", f, fixed = TRUE)] <- paste("PUVSPRNAME",
system.file(
"extdata/input/puvspr.dat",
package = "prioritizr"))
f[grep("BOUNDNAME", f, fixed = TRUE)] <- paste("BOUNDNAME",
system.file(
"extdata/input/bound.dat",
package = "prioritizr"))
writeLines(f, path)
# make problem
p <- marxan_problem(path) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("data.frame (compile, boundary penalties)", {
# load data
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
bound_data <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
# make and compile problem
p <- marxan_problem(pu_data, spec_data, puvspr_data, bound_data, 3)
o <- compile(p)
# make and compile equivalent problem
pu_data$locked_in <- pu_data$status == 2
pu_data$locked_out <- pu_data$status == 3
p2 <- problem(pu_data, spec_data, puvspr_data, cost_column = "cost") %>%
add_min_set_objective() %>%
add_relative_targets("prop") %>%
add_locked_in_constraints("locked_in") %>%
add_locked_out_constraints("locked_out") %>%
add_boundary_penalties(3, 1, data = bound_data) %>%
add_binary_decisions()
o2 <- compile(p2)
# compare two problems
expect_equal(o$obj(), o2$obj())
expect_true(all(o$A() == o2$A()))
expect_equal(o$rhs(), o2$rhs())
expect_equal(o$sense(), o2$sense())
expect_equal(o$modelsense(), o2$modelsense())
expect_equal(o$col_ids(), o2$col_ids())
expect_equal(o$row_ids(), o2$row_ids())
expect_equal(o$lb(), o2$lb())
expect_equal(o$ub(), o2$ub())
expect_equal(o$vtype(), o2$vtype())
})
test_that("data.frame (compile, no boundary penalties)", {
# load data
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
# make and compile problem
p <- marxan_problem(pu_data, spec_data, puvspr_data)
o <- compile(p)
# make and compile equivalent problem
pu_data$locked_in <- pu_data$status == 2
pu_data$locked_out <- pu_data$status == 3
p2 <- problem(pu_data, spec_data, puvspr_data, cost_column = "cost") %>%
add_min_set_objective() %>%
add_relative_targets("prop") %>%
add_locked_in_constraints("locked_in") %>%
add_locked_out_constraints("locked_out") %>%
add_binary_decisions()
o2 <- compile(p2)
# compare two problems
expect_equal(o$obj(), o2$obj())
expect_true(all(o$A() == o2$A()))
expect_equal(o$rhs(), o2$rhs())
expect_equal(o$sense(), o2$sense())
expect_equal(o$modelsense(), o2$modelsense())
expect_equal(o$col_ids(), o2$col_ids())
expect_equal(o$row_ids(), o2$row_ids())
expect_equal(o$lb(), o2$lb())
expect_equal(o$ub(), o2$ub())
expect_equal(o$vtype(), o2$vtype())
})
test_that("data.frame (solve, boundary penalties)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# make problem
path <- system.file("extdata/input.dat", package = "prioritizr")
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
bound_data <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
p <- marxan_problem(pu_data, spec_data, puvspr_data, bound_data, blm = 1) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("data.frame (solve, no boundary penalties)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# make problem
path <- system.file("extdata/input.dat", package = "prioritizr")
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
p <- marxan_problem(pu_data, spec_data, puvspr_data) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("invalid inputs", {
# load data
wd <- system.file("extdata/input", package = "prioritizr")
p <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
s <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
pv <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
b <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
# run tests
expect_error(marxan_problem(NULL))
expect_error(marxan_problem("a"))
expect_error(marxan_problem(p[, -1], s, pv, b, 5))
expect_error(marxan_problem(p[-1, ], s, pv, b, 5))
expect_error(marxan_problem(`[<-`(p, 1, 1, NA), s, pv, b, 5))
expect_error(marxan_problem(p, s[-1, ], pv, b, 5))
expect_error(marxan_problem(p, s[, -1], pv, b, 5))
expect_error(marxan_problem(p, `[<-`(s, 1, 1, NA), pv, b, 5))
expect_error(marxan_problem(p, s, pv[, -1], b, 5))
expect_error(marxan_problem(p, s, `[<-`(pv, 1, 1, NA), b, 5))
expect_error(marxan_problem(p, s, pv, b[, -1], 5))
expect_error(marxan_problem(p, s, pv, `[<-`(b, 1, 1, NA), 5))
expect_error(marxan_problem(p, s, pv, b, NA))
expect_error(marxan_problem(p, s, pv, b, c(5, 5)))
})
| /tests/testthat/test_marxan_problem.R | no_license | IsaakBM/prioritizr | R | false | false | 10,242 | r | context("marxan_problem")
test_that("character (compile)", {
# make and compile problem
path <- system.file("extdata/input.dat", package = "prioritizr")
p <- marxan_problem(path)
o <- compile(p)
# load data
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
pu_data$locked_in <- pu_data$status == 2
pu_data$locked_out <- pu_data$status == 3
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
bound_data <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
# make and compile equivalent problem
p2 <- problem(pu_data, spec_data, puvspr_data, cost_column = "cost") %>%
add_min_set_objective() %>%
add_relative_targets("prop") %>%
add_locked_in_constraints("locked_in") %>%
add_locked_out_constraints("locked_out") %>%
add_boundary_penalties(1, 1, data = bound_data) %>%
add_binary_decisions()
o2 <- compile(p2)
# compare two problems
expect_equal(o$obj(), o2$obj())
expect_true(all(o$A() == o2$A()))
expect_equal(o$rhs(), o2$rhs())
expect_equal(o$sense(), o2$sense())
expect_equal(o$modelsense(), o2$modelsense())
expect_equal(o$col_ids(), o2$col_ids())
expect_equal(o$row_ids(), o2$row_ids())
expect_equal(o$lb(), o2$lb())
expect_equal(o$ub(), o2$ub())
expect_equal(o$vtype(), o2$vtype())
})
test_that("character (solve)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# make problem
path <- system.file("extdata/input.dat", package = "prioritizr")
p <- marxan_problem(path) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("character (solve, absolute INPUTDIR path)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# set up input.dat with absolute file paths
path <- file.path(tempfile(fileext = ".dat"))
f <- readLines(system.file("extdata/input.dat", package = "prioritizr"))
f[grep("INPUTDIR", f, fixed = TRUE)] <- paste("INPUTDIR",
system.file("extdata/input", package = "prioritizr"))
writeLines(f, path)
# make problem
p <- marxan_problem(path) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("character (solve, absolute file paths)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# set up input.dat with absolute file paths
path <- file.path(tempfile(fileext = ".dat"))
f <- readLines(system.file("extdata/input.dat", package = "prioritizr"))
f[grep("INPUTDIR", f, fixed = TRUE)] <- ""
f[grep("SPECNAME", f, fixed = TRUE)] <- paste("SPECNAME",
system.file("extdata/input/spec.dat",
package = "prioritizr"))
f[grep("PUNAME", f, fixed = TRUE)] <- paste("PUNAME",
system.file("extdata/input/pu.dat",
package = "prioritizr"))
f[grep("PUVSPRNAME", f, fixed = TRUE)] <- paste("PUVSPRNAME",
system.file(
"extdata/input/puvspr.dat",
package = "prioritizr"))
f[grep("BOUNDNAME", f, fixed = TRUE)] <- paste("BOUNDNAME",
system.file(
"extdata/input/bound.dat",
package = "prioritizr"))
writeLines(f, path)
# make problem
p <- marxan_problem(path) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("data.frame (compile, boundary penalties)", {
# load data
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
bound_data <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
# make and compile problem
p <- marxan_problem(pu_data, spec_data, puvspr_data, bound_data, 3)
o <- compile(p)
# make and compile equivalent problem
pu_data$locked_in <- pu_data$status == 2
pu_data$locked_out <- pu_data$status == 3
p2 <- problem(pu_data, spec_data, puvspr_data, cost_column = "cost") %>%
add_min_set_objective() %>%
add_relative_targets("prop") %>%
add_locked_in_constraints("locked_in") %>%
add_locked_out_constraints("locked_out") %>%
add_boundary_penalties(3, 1, data = bound_data) %>%
add_binary_decisions()
o2 <- compile(p2)
# compare two problems
expect_equal(o$obj(), o2$obj())
expect_true(all(o$A() == o2$A()))
expect_equal(o$rhs(), o2$rhs())
expect_equal(o$sense(), o2$sense())
expect_equal(o$modelsense(), o2$modelsense())
expect_equal(o$col_ids(), o2$col_ids())
expect_equal(o$row_ids(), o2$row_ids())
expect_equal(o$lb(), o2$lb())
expect_equal(o$ub(), o2$ub())
expect_equal(o$vtype(), o2$vtype())
})
test_that("data.frame (compile, no boundary penalties)", {
# load data
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
# make and compile problem
p <- marxan_problem(pu_data, spec_data, puvspr_data)
o <- compile(p)
# make and compile equivalent problem
pu_data$locked_in <- pu_data$status == 2
pu_data$locked_out <- pu_data$status == 3
p2 <- problem(pu_data, spec_data, puvspr_data, cost_column = "cost") %>%
add_min_set_objective() %>%
add_relative_targets("prop") %>%
add_locked_in_constraints("locked_in") %>%
add_locked_out_constraints("locked_out") %>%
add_binary_decisions()
o2 <- compile(p2)
# compare two problems
expect_equal(o$obj(), o2$obj())
expect_true(all(o$A() == o2$A()))
expect_equal(o$rhs(), o2$rhs())
expect_equal(o$sense(), o2$sense())
expect_equal(o$modelsense(), o2$modelsense())
expect_equal(o$col_ids(), o2$col_ids())
expect_equal(o$row_ids(), o2$row_ids())
expect_equal(o$lb(), o2$lb())
expect_equal(o$ub(), o2$ub())
expect_equal(o$vtype(), o2$vtype())
})
test_that("data.frame (solve, boundary penalties)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# make problem
path <- system.file("extdata/input.dat", package = "prioritizr")
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
bound_data <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
p <- marxan_problem(pu_data, spec_data, puvspr_data, bound_data, blm = 1) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("data.frame (solve, no boundary penalties)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# make problem
path <- system.file("extdata/input.dat", package = "prioritizr")
wd <- system.file("extdata/input", package = "prioritizr")
pu_data <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
spec_data <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
puvspr_data <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
p <- marxan_problem(pu_data, spec_data, puvspr_data) %>%
add_default_solver(time_limit = 5)
# check that problem can be solved
s <- solve(p)
# tests
expect_is(s, "data.frame")
expect_true("solution_1" %in% names(s))
expect_true(is.numeric(s$solution_1))
})
test_that("invalid inputs", {
# load data
wd <- system.file("extdata/input", package = "prioritizr")
p <- read.table(file.path(wd, "pu.dat"), header = TRUE, sep = ",")
s <- read.table(file.path(wd, "spec.dat"), header = TRUE, sep = ",")
pv <- read.table(file.path(wd, "puvspr.dat"), header = TRUE,
sep = ",")
b <- read.table(file.path(wd, "bound.dat"), header = TRUE,
sep = "\t")
# run tests
expect_error(marxan_problem(NULL))
expect_error(marxan_problem("a"))
expect_error(marxan_problem(p[, -1], s, pv, b, 5))
expect_error(marxan_problem(p[-1, ], s, pv, b, 5))
expect_error(marxan_problem(`[<-`(p, 1, 1, NA), s, pv, b, 5))
expect_error(marxan_problem(p, s[-1, ], pv, b, 5))
expect_error(marxan_problem(p, s[, -1], pv, b, 5))
expect_error(marxan_problem(p, `[<-`(s, 1, 1, NA), pv, b, 5))
expect_error(marxan_problem(p, s, pv[, -1], b, 5))
expect_error(marxan_problem(p, s, `[<-`(pv, 1, 1, NA), b, 5))
expect_error(marxan_problem(p, s, pv, b[, -1], 5))
expect_error(marxan_problem(p, s, pv, `[<-`(b, 1, 1, NA), 5))
expect_error(marxan_problem(p, s, pv, b, NA))
expect_error(marxan_problem(p, s, pv, b, c(5, 5)))
})
|
#' Simulate operating characteristics of repaired Cox regression and competitors.
#'
#'
#' This function is intended to verify the operating characteristics of the approximate conditional inferential approach of \insertCite{kz19;textual}{PHInfiniteEstimates} to proportional hazards regression. An exponential regression model, corresponding to the proportional hazards regression model, is fit to the data, and new data sets are simulated from this model. P-values are calculated for these new data sets, and their empirical distribution is compared to the theoretical uniform distribution.
#' @param nobs number of observations in simulated data set.
#' @param k number of covariates in simulated data set. Each covariate is dochotomous.
#' @param B odds of 1 vs. 0 in dichotomous variables.
#' @param c censoring proportion.
#' @param nsamp number of samples.
#' @param beta regression parameters, all zeros if null, and all the same value if a scalar.
#' @param add partial simulation results to be added to, or NULL if de novo.
#' @param half does nothing; provided for compatabilitity with simcode.
#' @param verbose Triggers verbose messages.
#' @param smoothfirst Triggers normal rather than dichotomous interest covariate.
#' @return a list with components
#' \itemize{
#' \item out matrix with columns corresponding to p-values.
#' }
#' @importFrom stats runif
#' @export
heinzeschemper<-function(nobs=50,k=5,B=1,c=0,nsamp=1000,beta=NULL,add=NULL,half=NULL,verbose=FALSE,smoothfirst=FALSE){
if (is.null(add)) {
set.seed(202043125)
start <- 0
} else {
outout <- rbind(add$out, array(NA, c(nsamp, dim(add$out)[2])))
start <- dim(add$out)[1]
set.seed(add$seed)
}
if(is.null(beta)) beta<-rep(0,k)
if(length(beta)==1) beta<-rep(beta,k)
gg<-as.formula(paste("Surv(times,delta)~",paste("x",seq(k),sep="",collapse="+")))
hh<-as.formula(paste("Surv(times,delta)~",paste("x",(2:k),sep="",collapse="+")))
d1 <- Sys.time()
cenp<-rep(NA,nsamp)
for(kk in seq(nsamp)){
if (verbose) {
d2 <- Sys.time()
message("kk=",kk," of ",nsamp,". Completion time ",(d2 - d1) * (nsamp - kk)/kk + d2)
}
randdat<-if(smoothfirst) cbind(rnorm(nobs),as.data.frame(array(runif(nobs*(k-1))>(B/(1+B)),c(nobs,k-1)))+0) else as.data.frame(array(runif(nobs*k)>(B/(1+B)),c(nobs,k)))+0
names(randdat)<-paste("x",seq(k),sep="")
randdat$x<-as.matrix(randdat)
randdat$times<--log(runif(nobs))/exp(randdat$x%*%beta)
randdat$delta<-runif(nobs) > c
cenp[kk]<-mean(randdat$delta)
randdat$y<-Surv(randdat$t,randdat$delta)
# cat("About to run fixcoxph\n")
repairedfit<-fixcoxph(randdat,randdat$x,"x1")
penalizedout<-coxphf(gg,randdat,maxit=400,maxstep=0.05)
penalizedoutsmaller<-coxphf(hh,randdat,maxit=400,maxstep=0.05)
myout<-summarizefits(repairedfit,penalizedout,penalizedoutsmaller,"x1")
if((start+kk)==1){
outout<-array(NA,c(nsamp,length(myout)))
dimnames(outout)<-list(NULL,names(myout))
}
outout[start+kk,]<-myout
}
return(list(out=outout,seed=.Random.seed,settings=list(nobs=nobs,k=k,B=B,c=c,nsamp=nsamp,beta=beta,half=half,verbose=verbose),cenp=cenp))
}
| /R/heinzeschemper.R | no_license | cran/PHInfiniteEstimates | R | false | false | 3,231 | r | #' Simulate operating characteristics of repaired Cox regression and competitors.
#'
#'
#' This function is intended to verify the operating characteristics of the approximate conditional inferential approach of \insertCite{kz19;textual}{PHInfiniteEstimates} to proportional hazards regression. An exponential regression model, corresponding to the proportional hazards regression model, is fit to the data, and new data sets are simulated from this model. P-values are calculated for these new data sets, and their empirical distribution is compared to the theoretical uniform distribution.
#' @param nobs number of observations in simulated data set.
#' @param k number of covariates in simulated data set. Each covariate is dochotomous.
#' @param B odds of 1 vs. 0 in dichotomous variables.
#' @param c censoring proportion.
#' @param nsamp number of samples.
#' @param beta regression parameters, all zeros if null, and all the same value if a scalar.
#' @param add partial simulation results to be added to, or NULL if de novo.
#' @param half does nothing; provided for compatabilitity with simcode.
#' @param verbose Triggers verbose messages.
#' @param smoothfirst Triggers normal rather than dichotomous interest covariate.
#' @return a list with components
#' \itemize{
#' \item out matrix with columns corresponding to p-values.
#' }
#' @importFrom stats runif
#' @export
heinzeschemper<-function(nobs=50,k=5,B=1,c=0,nsamp=1000,beta=NULL,add=NULL,half=NULL,verbose=FALSE,smoothfirst=FALSE){
if (is.null(add)) {
set.seed(202043125)
start <- 0
} else {
outout <- rbind(add$out, array(NA, c(nsamp, dim(add$out)[2])))
start <- dim(add$out)[1]
set.seed(add$seed)
}
if(is.null(beta)) beta<-rep(0,k)
if(length(beta)==1) beta<-rep(beta,k)
gg<-as.formula(paste("Surv(times,delta)~",paste("x",seq(k),sep="",collapse="+")))
hh<-as.formula(paste("Surv(times,delta)~",paste("x",(2:k),sep="",collapse="+")))
d1 <- Sys.time()
cenp<-rep(NA,nsamp)
for(kk in seq(nsamp)){
if (verbose) {
d2 <- Sys.time()
message("kk=",kk," of ",nsamp,". Completion time ",(d2 - d1) * (nsamp - kk)/kk + d2)
}
randdat<-if(smoothfirst) cbind(rnorm(nobs),as.data.frame(array(runif(nobs*(k-1))>(B/(1+B)),c(nobs,k-1)))+0) else as.data.frame(array(runif(nobs*k)>(B/(1+B)),c(nobs,k)))+0
names(randdat)<-paste("x",seq(k),sep="")
randdat$x<-as.matrix(randdat)
randdat$times<--log(runif(nobs))/exp(randdat$x%*%beta)
randdat$delta<-runif(nobs) > c
cenp[kk]<-mean(randdat$delta)
randdat$y<-Surv(randdat$t,randdat$delta)
# cat("About to run fixcoxph\n")
repairedfit<-fixcoxph(randdat,randdat$x,"x1")
penalizedout<-coxphf(gg,randdat,maxit=400,maxstep=0.05)
penalizedoutsmaller<-coxphf(hh,randdat,maxit=400,maxstep=0.05)
myout<-summarizefits(repairedfit,penalizedout,penalizedoutsmaller,"x1")
if((start+kk)==1){
outout<-array(NA,c(nsamp,length(myout)))
dimnames(outout)<-list(NULL,names(myout))
}
outout[start+kk,]<-myout
}
return(list(out=outout,seed=.Random.seed,settings=list(nobs=nobs,k=k,B=B,c=c,nsamp=nsamp,beta=beta,half=half,verbose=verbose),cenp=cenp))
}
|
# this file runs
rm(list=ls(all=TRUE));
source("synth_applyAllEstimators.R");
qqq = new.env();
print(getwd());
PATH_PROJ=getwd();
environment( synth_applyAllEstimators ) = qqq;
# setting up the parameters
# define parameters of the run
sampleSize=seq(30,150,by=10);
sampleSize = c(sampleSize, 200, 250, 300);
p = 20; # 5, 20, 50, 150
mahDist = 2;
repetition=500;
print("total sample size is");
print(sampleSize);
gammaBase = 1000^(1/10);
gammaValues = gammaBase^(c(-10:10));
checkValue = 0; kappa = 1;
print(paste("mah distance is ", as.character(mahDist), ", feature size us " , as.character(p), sep="") );
for( k in 1:length(sampleSize) ) {
# write current sample size and apply all estimators
currentSampleSize = sampleSize[k];
# store data in tempFileName
tempPrefile = paste(PATH_PROJ, "/temp/temp_" , sep="");
tempFileName = paste( PATH_PROJ , "/dist", as.character(mahDist), "/p", as.character(p), "/synth_p", as.character(p), "_sample", as.character(currentSampleSize) , ".RData" , sep="");
cat("\n\n");
print("running the applyAllEstimators");
synth_applyAllEstimators( currentSampleSize , mahDist , p, repetition , gammaValues , checkValue , kappa , tempFileName );
}
| /synthetic/dist2/synth_dist2_p20.R | no_license | danik0411/optimum-rlda | R | false | false | 1,197 | r | # this file runs
rm(list=ls(all=TRUE));
source("synth_applyAllEstimators.R");
qqq = new.env();
print(getwd());
PATH_PROJ=getwd();
environment( synth_applyAllEstimators ) = qqq;
# setting up the parameters
# define parameters of the run
sampleSize=seq(30,150,by=10);
sampleSize = c(sampleSize, 200, 250, 300);
p = 20; # 5, 20, 50, 150
mahDist = 2;
repetition=500;
print("total sample size is");
print(sampleSize);
gammaBase = 1000^(1/10);
gammaValues = gammaBase^(c(-10:10));
checkValue = 0; kappa = 1;
print(paste("mah distance is ", as.character(mahDist), ", feature size us " , as.character(p), sep="") );
for( k in 1:length(sampleSize) ) {
# write current sample size and apply all estimators
currentSampleSize = sampleSize[k];
# store data in tempFileName
tempPrefile = paste(PATH_PROJ, "/temp/temp_" , sep="");
tempFileName = paste( PATH_PROJ , "/dist", as.character(mahDist), "/p", as.character(p), "/synth_p", as.character(p), "_sample", as.character(currentSampleSize) , ".RData" , sep="");
cat("\n\n");
print("running the applyAllEstimators");
synth_applyAllEstimators( currentSampleSize , mahDist , p, repetition , gammaValues , checkValue , kappa , tempFileName );
}
|
function (thetamat, n, p)
{
e <- get("data.env", .GlobalEnv)
e[["calcsum"]][[length(e[["calcsum"]]) + 1]] <- list(thetamat = thetamat,
n = n, p = p)
.Call("_flam_calcsum", PACKAGE = "flam", thetamat, n, p)
}
| /valgrind_test_dir/calcsum-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 230 | r | function (thetamat, n, p)
{
e <- get("data.env", .GlobalEnv)
e[["calcsum"]][[length(e[["calcsum"]]) + 1]] <- list(thetamat = thetamat,
n = n, p = p)
.Call("_flam_calcsum", PACKAGE = "flam", thetamat, n, p)
}
|
## ---- strength
strength <- function(n = 1000, x, pars, seed = 0, h0) {
# Purpose: to calculate the power or size of the t-test and the Mann-
# Whitney U-test under different scenarios, depending on
# whether the null hypothesis (H0) is false or true
# Inputs: n: the number of simulated sets of data
# x: the number of heights to simulate in each set of data
# pars: a vector containing the mean and standard deviation of the
# male heights and then the female heights, with the
# percentage in decimal form of the heights from the original
# data that are from males, labelled "m.mu", "m.sd", "f.mu",
# "f.sd" and "rat", respectively
# seed: the seed to set to ensure reproducibility
# h0: a logical variable that is TRUE if the null hypothesis is known
# to be true, or FALSE if it is known to be false, and dictates
# whether the size or power of each test is calculated,
# respectively
# Outputs: t.size: the size of the t-test under the given scenario
# t.power: the power of the t-test under the given scenario
# mw.size: the size of the Mann-Whitney U-test under the given
# scenario
# mw.power: the power of the Mann-Whitney U-test under the given
# scenario
# Apply both the t-test and the Mann-Whitney U-test to n simulated datasets
# and store the p-values given by the tests
t.p <- t(n = n, x = x, pars = pars, seed = seed)
m.p <- mann(n = n, x = x, pars = pars, seed = seed)
# Find the proportion of p-values that are under 0.05 and so would cause us to
# reject the null hypothesis at a 5% significance level, whether correctly or
# not
tpr <- length(which(t.p <= 0.05)) / length(t.p)
mpr <- length(which(m.p <= 0.05)) / length(m.p)
# If h0 is TRUE, then this is the calculated size
if (h0 == T)
return(list("t.size" = tpr, "mw.size" = mpr))
# If h0 is FALSE, then this is the calculated power
if (h0 == F)
return(list("t.power" = tpr, "mw.power" = mpr))
}
| /strength.r | no_license | joshenson0104/Assignment3jh304 | R | false | false | 2,226 | r | ## ---- strength
strength <- function(n = 1000, x, pars, seed = 0, h0) {
# Purpose: to calculate the power or size of the t-test and the Mann-
# Whitney U-test under different scenarios, depending on
# whether the null hypothesis (H0) is false or true
# Inputs: n: the number of simulated sets of data
# x: the number of heights to simulate in each set of data
# pars: a vector containing the mean and standard deviation of the
# male heights and then the female heights, with the
# percentage in decimal form of the heights from the original
# data that are from males, labelled "m.mu", "m.sd", "f.mu",
# "f.sd" and "rat", respectively
# seed: the seed to set to ensure reproducibility
# h0: a logical variable that is TRUE if the null hypothesis is known
# to be true, or FALSE if it is known to be false, and dictates
# whether the size or power of each test is calculated,
# respectively
# Outputs: t.size: the size of the t-test under the given scenario
# t.power: the power of the t-test under the given scenario
# mw.size: the size of the Mann-Whitney U-test under the given
# scenario
# mw.power: the power of the Mann-Whitney U-test under the given
# scenario
# Apply both the t-test and the Mann-Whitney U-test to n simulated datasets
# and store the p-values given by the tests
t.p <- t(n = n, x = x, pars = pars, seed = seed)
m.p <- mann(n = n, x = x, pars = pars, seed = seed)
# Find the proportion of p-values that are under 0.05 and so would cause us to
# reject the null hypothesis at a 5% significance level, whether correctly or
# not
tpr <- length(which(t.p <= 0.05)) / length(t.p)
mpr <- length(which(m.p <= 0.05)) / length(m.p)
# If h0 is TRUE, then this is the calculated size
if (h0 == T)
return(list("t.size" = tpr, "mw.size" = mpr))
# If h0 is FALSE, then this is the calculated power
if (h0 == F)
return(list("t.power" = tpr, "mw.power" = mpr))
}
|
rm(list=ls())
############################################################################################################
###process simulation results ############
processResult=function(result, truth) {
result=result[!is.na(result[,1]),]
###coverage rate
total_ps11=numeric(dim(result)[1])
for (g in 1:dim(result)[1]) {
total_ps11[g]=as.numeric(result[g,3] <= truth & result[g,4] >= truth)
}
coverage_ps11=sum(total_ps11)/length(total_ps11)
coverage_ps11
###bias and RMSE
bias11=mean(result[,1]-(truth))
estimate11=mean(result[,1])
temp11=(result[,1]-(truth))^2
RMSE11=sqrt(mean(temp11))
sd11=sd(result[,1])
width11=mean(abs(result[,4]-result[,3]))
sd11Boot=mean(result[,2])
finalOut=c(truth, bias11, bias11/truth, sd11, RMSE11, coverage_ps11, width11, dim(result)[1], sd11Boot)
names(finalOut)=c("truth", "bias", "biasPercent", "sd", "RMSE", "coverage", "widthCI", "num.sim", "sdBoot")
return(finalOut)
}
DIREC_ROOT="C:/Users/Tingting.Zhou/Desktop/paper2/resubmission/linear/"
#varying the value of gammaV for different degrees of overlap
sampleSize=500
gammaV=4
truthVal=rep(0.75, 7)
names(truthVal)=c("ATE", "ATM", "ATT", "ATC", "ATO", "truncate", "truncateQ")
DIRECOUT=paste0(DIREC_ROOT, "Results/")
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
#modelSpec="misPred"
#modelSpec="both"
#modelSpec="misPred2"
for(modelSpec in c("misPred", "both", "misWeight")){
###pencomp
ATE_pencomp=NULL
ATM_pencomp=NULL
ATM_w_pencomp=NULL
ATO_pencomp=NULL
ATT_pencomp=NULL
ATT_w_pencomp=NULL
ATC_pencomp=NULL
ATC_w_pencomp=NULL
truncate_pencomp=NULL
truncateQ_pencomp=NULL
###weighted estimators
ATE=NULL
ATE.aug=NULL
ATM=NULL
ATM.aug=NULL
ATO=NULL
ATO.aug=NULL
ATT=NULL
ATT.aug=NULL
ATC=NULL
ATC.aug=NULL
truncate=NULL
truncate.aug=NULL
truncate_rest=NULL
truncate.aug_rest=NULL
truncateQ=NULL
truncateQ.aug=NULL
truncateQ_rest=NULL
truncateQ.aug_rest=NULL
DIRECR=NULL
DIRECR=paste0(DIREC_ROOT, "homoT/")
###weighted estimator output
ATE=rbind(ATE, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE", "_", modelSpec, ".txt", sep=""), header = T))
ATE.aug=rbind(ATE.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE.aug", "_", modelSpec, ".txt", sep=""), header = T))
ATM=rbind(ATM, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM", "_", modelSpec, ".txt", sep=""), header = T))
ATM.aug=rbind(ATM.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM.aug", "_", modelSpec, ".txt", sep=""), header = T))
ATO=rbind(ATO, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO", "_", modelSpec, ".txt", sep=""), header = T))
ATO.aug=rbind(ATO.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO.aug", "_", modelSpec, ".txt", sep=""), header = T))
truncate=rbind(truncate, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate", "_", modelSpec, ".txt", sep=""), header = T))
truncate.aug=rbind(truncate.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate.aug", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ=rbind(truncateQ, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ.aug=rbind(truncateQ.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ.aug", "_", modelSpec, ".txt", sep=""), header = T))
truncate_rest=rbind(truncate_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate_rest", "_", modelSpec, ".txt", sep=""), header = T))
truncate.aug_rest=rbind(truncate.aug_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate.aug_rest", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ_rest=rbind(truncateQ_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ_rest", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ.aug_rest=rbind(truncateQ.aug_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ.aug_rest", "_", modelSpec, ".txt", sep=""), header = T))
ATT=rbind(ATT, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT", "_", modelSpec, ".txt", sep=""), header = T))
ATT.aug=rbind(ATT.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT.aug", "_", modelSpec, ".txt", sep=""), header = T))
ATC=rbind(ATC, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC", "_", modelSpec, ".txt", sep=""), header = T))
ATC.aug=rbind(ATC.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC.aug", "_", modelSpec, ".txt", sep=""), header = T))
###pencomp output
ATE_pencomp=rbind(ATE_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_pencomp=rbind(ATM_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_w_pencomp=rbind(ATM_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_w_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATO_pencomp=rbind(ATO_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncate_pencomp=rbind(truncate_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ_pencomp=rbind(truncateQ_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATT_pencomp=rbind(ATT_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATT_w_pencomp=rbind(ATT_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATC_pencomp=rbind(ATC_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATC_w_pencomp=rbind(ATC_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
###pencomp
ATE_pencomp=ATE_pencomp[which(!is.na(ATE_pencomp[,1])),]
ATM_pencomp=ATM_pencomp[which(!is.na(ATM_pencomp[,1])),]
ATM_w_pencomp=ATM_w_pencomp[which(!is.na(ATM_w_pencomp[,1])),]
ATO_pencomp=ATO_pencomp[which(!is.na(ATO_pencomp[,1])),]
ATT_pencomp=ATT_pencomp[which(!is.na(ATT_pencomp[,1])),]
ATT_w_pencomp=ATT_w_pencomp[which(!is.na(ATT_w_pencomp[,1])),]
ATC_pencomp=ATC_pencomp[which(!is.na(ATC_pencomp[,1])),]
ATC_w_pencomp=ATC_w_pencomp[which(!is.na(ATC_w_pencomp[,1])),]
###pencomp
dim(ATE_pencomp)
dim(ATM_pencomp)
dim(ATM_w_pencomp)
dim(ATO_pencomp)
dim(ATT_pencomp)
dim(ATT_w_pencomp)
dim(ATC_pencomp)
dim(ATC_w_pencomp)
truncate_pencomp=truncate_pencomp[which(!is.na(truncate_pencomp[,1])),]
truncateQ_pencomp=truncateQ_pencomp[which(!is.na(truncateQ_pencomp[,1])),]
dim(truncate_pencomp)
dim(truncateQ_pencomp)
###weighted estimators
ATE=ATE[which(!is.na(ATE[,1])),]
ATE.aug=ATE.aug[which(!is.na(ATE.aug[,1])),]
ATM=ATM[which(!is.na(ATM[,1])),]
ATM.aug=ATM.aug[which(!is.na(ATM.aug[,1])),]
ATO=ATO[which(!is.na(ATO[,1])),]
ATO.aug=ATO.aug[which(!is.na(ATO.aug[,1])),]
ATT=ATT[which(!is.na(ATT[,1])),]
ATT.aug=ATT.aug[which(!is.na(ATT.aug[,1])),]
ATC=ATC[which(!is.na(ATC[,1])),]
ATC.aug=ATC.aug[which(!is.na(ATC.aug[,1])),]
truncate=truncate[which(!is.na(truncate[,1])),]
truncate.aug=truncate.aug[which(!is.na(truncate.aug[,1])),]
truncate_rest=truncate_rest[which(!is.na(truncate_rest[,1])),]
truncate.aug_rest=truncate.aug_rest[which(!is.na(truncate.aug_rest[,1])),]
truncateQ=truncateQ[which(!is.na(truncateQ[,1])),]
truncateQ.aug=truncateQ.aug[which(!is.na(truncateQ.aug[,1])),]
truncateQ_rest=truncateQ_rest[which(!is.na(truncateQ_rest[,1])),]
truncateQ.aug_rest=truncateQ.aug_rest[which(!is.na(truncateQ.aug_rest[,1])),]
###weighted estimators
dim(ATE)
dim(ATE.aug)
dim(ATM)
dim(ATM.aug)
dim(ATO)
dim(ATO.aug)
dim(ATT)
dim(ATT.aug)
dim(ATC)
dim(ATC.aug)
dim(truncate)
dim(truncate.aug)
dim(truncate_rest)
dim(truncate.aug_rest)
dim(truncateQ)
dim(truncateQ.aug)
dim(truncateQ_rest)
dim(truncateQ.aug_rest)
truth=truthVal["ATE"]
ATE_all=rbind(processResult(result=ATE, truth = truth),
processResult(result=ATE.aug, truth),
processResult(result=ATE_pencomp, truth = truth))
ATE_all
row.names(ATE_all)=c("ATE", "ATE aug", "pencomp")
###########################
truth=truthVal["ATM"]
ATM_all=rbind(processResult(result=ATM, truth = truth),
processResult(result=ATM.aug, truth),
processResult(result=ATM_pencomp, truth = truth),
processResult(result=ATM_w_pencomp, truth = truth) )
ATM_all
row.names(ATM_all)=c("ATM", "ATM aug", "pencomp ATM", "pencomp w ATM")
###########################
truth=truthVal["ATO"]
ATO_all=rbind(processResult(result=ATO, truth = truth),
processResult(result=ATO.aug, truth),
processResult(result=ATO_pencomp, truth = truth) )
ATO_all
row.names(ATO_all)=c("ATO", "ATO aug", "pencomp ATO")
###########################
truth=truthVal["ATT"]
ATT_all=rbind(processResult(result=ATT, truth = truth),
processResult(result=ATT.aug, truth),
processResult(result=ATT_pencomp, truth = truth),
processResult(result=ATT_w_pencomp, truth = truth) )
ATT_all
row.names(ATT_all)=c("ATT", "ATT aug", "pencomp ATT", "pencomp w ATT")
###########################
truth=truthVal["ATC"]
ATC_all=rbind(processResult(result=ATC, truth = truth),
processResult(result=ATC.aug, truth),
processResult(result=ATC_pencomp, truth = truth),
processResult(result=ATC_w_pencomp, truth = truth) )
ATC_all
row.names(ATC_all)=c("ATC", "ATC aug", "pencomp ATC", "pencomp w ATC")
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
truncate_all=NULL
for(k in 1:length(truncateVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateVal[k], digits = 2))
temp=rbind(processResult(result=truncate[, selCol], truth = truth),
processResult(result=truncate_rest[, selCol], truth = truth),
processResult(result=truncate.aug[, selCol], truth = truth),
processResult(result=truncate.aug_rest[, selCol], truth = truth),
processResult(result=truncate_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c("truncate", "truncate rest", "truncate.aug", "truncate.aug rest", "truncate pencomp"),
format(truncateVal[k], digits = 2))
truncate_all=rbind(truncate_all, temp)
}
##################################
truncateQ_all=NULL
for(k in 1:length(truncateQVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateQVal[k], digits = 2))
temp=rbind(processResult(result=truncateQ[, selCol], truth = truth),
processResult(result=truncateQ_rest[, selCol], truth = truth),
processResult(result=truncateQ.aug[, selCol], truth = truth),
processResult(result=truncateQ.aug_rest[, selCol], truth = truth),
processResult(result=truncateQ_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c("truncateQ", "truncateQ rest", "truncateQ.aug", "truncateQ.aug rest", "truncateQ pencomp"),
format(truncateQVal[k], digits = 2))
truncateQ_all=rbind(truncateQ_all, temp)
}
output=rbind(ATE_all, ATM_all, ATO_all, ATT_all, ATC_all, truncate_all, truncateQ_all)
write.table(output, paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
}
########### pencomp with spline only ##########
for(modelSpec in c("misPred2")){
###pencomp
ATE_pencomp=NULL
ATM_pencomp=NULL
ATM_w_pencomp=NULL
ATO_pencomp=NULL
ATT_pencomp=NULL
ATT_w_pencomp=NULL
ATC_pencomp=NULL
ATC_w_pencomp=NULL
truncate_pencomp=NULL
truncateQ_pencomp=NULL
DIRECR=NULL
DIRECR=paste0(DIREC_ROOT, "homoT/")
###pencomp output
ATE_pencomp=rbind(ATE_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_pencomp=rbind(ATM_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_w_pencomp=rbind(ATM_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_w_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATO_pencomp=rbind(ATO_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncate_pencomp=rbind(truncate_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ_pencomp=rbind(truncateQ_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATT_pencomp=rbind(ATT_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATT_w_pencomp=rbind(ATT_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATC_pencomp=rbind(ATC_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATC_w_pencomp=rbind(ATC_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
###pencomp
ATE_pencomp=ATE_pencomp[which(!is.na(ATE_pencomp[,1])),]
ATM_pencomp=ATM_pencomp[which(!is.na(ATM_pencomp[,1])),]
ATM_w_pencomp=ATM_w_pencomp[which(!is.na(ATM_w_pencomp[,1])),]
ATO_pencomp=ATO_pencomp[which(!is.na(ATO_pencomp[,1])),]
ATT_pencomp=ATT_pencomp[which(!is.na(ATT_pencomp[,1])),]
ATT_w_pencomp=ATT_w_pencomp[which(!is.na(ATT_w_pencomp[,1])),]
ATC_pencomp=ATC_pencomp[which(!is.na(ATC_pencomp[,1])),]
ATC_w_pencomp=ATC_w_pencomp[which(!is.na(ATC_w_pencomp[,1])),]
###pencomp
dim(ATE_pencomp)
dim(ATM_pencomp)
dim(ATM_w_pencomp)
dim(ATO_pencomp)
dim(ATT_pencomp)
dim(ATT_w_pencomp)
dim(ATC_pencomp)
dim(ATC_w_pencomp)
truncate_pencomp=truncate_pencomp[which(!is.na(truncate_pencomp[,1])),]
truncateQ_pencomp=truncateQ_pencomp[which(!is.na(truncateQ_pencomp[,1])),]
dim(truncate_pencomp)
dim(truncateQ_pencomp)
truth=truthVal["ATE"]
ATE_all=rbind(processResult(result=ATE_pencomp, truth = truth))
ATE_all
row.names(ATE_all)=c("pencomp")
###########################
truth=truthVal["ATM"]
ATM_all=rbind(processResult(result=ATM_pencomp, truth = truth),
processResult(result=ATM_w_pencomp, truth = truth) )
ATM_all
row.names(ATM_all)=c( "pencomp ATM", "pencomp w ATM")
###########################
truth=truthVal["ATO"]
ATO_all=rbind(processResult(result=ATO_pencomp, truth = truth))
ATO_all
row.names(ATO_all)=c("pencomp ATO")
###########################
truth=truthVal["ATT"]
ATT_all=rbind(processResult(result=ATT_pencomp, truth = truth),
processResult(result=ATT_w_pencomp, truth = truth) )
ATT_all
row.names(ATT_all)=c( "pencomp ATT", "pencomp w ATT")
###########################
truth=truthVal["ATC"]
ATC_all=rbind(processResult(result=ATC_pencomp, truth = truth),
processResult(result=ATC_w_pencomp, truth = truth) )
ATC_all
row.names(ATC_all)=c( "pencomp ATC", "pencomp w ATC")
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
truncate_all=NULL
for(k in 1:length(truncateVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateVal[k], digits = 2))
temp=rbind(processResult(result=truncate_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c("truncate pencomp"), format(truncateVal[k], digits = 2))
truncate_all=rbind(truncate_all, temp)
}
##################################
truncateQ_all=NULL
for(k in 1:length(truncateQVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateQVal[k], digits = 2))
temp=rbind(processResult(result=truncateQ_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c( "truncateQ pencomp"),
format(truncateQVal[k], digits = 2))
truncateQ_all=rbind(truncateQ_all, temp)
}
output=rbind(ATE_all, ATM_all, ATO_all, ATT_all, ATC_all, truncate_all, truncateQ_all)
write.table(output, paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
}
#################################################################################
###############output results in tables #########################################
#varying the value of gammaV for different degrees of overlap
sampleSize=500
gammaV=4
DIRECOUT=paste0(DIREC_ROOT, "Results/")
misPred2=read.table(paste0(DIRECOUT, "misPred2", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
misPred=read.table(paste0(DIRECOUT, "misPred", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
both=read.table(paste0(DIRECOUT, "both", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
misWeight=read.table(paste0(DIRECOUT, "misWeight", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
########change non-coverage rate
noncoverage=function(data, var.name="coverage"){
data[,"noncoverage"] =format(100*(1-data[, var.name]), digits = 2)
return(data)
}
########empirical RMSE relative to correct IPTW (including everyone)
relRMSE=function(data, var.name="RMSE", bench=both["ATE", "RMSE"]){
data[,"relRMSE"] = format(abs(data[, var.name] / bench), digits = 2)
return(data)
}
########multiple bias by 1000
biasT=function(data, var.name="bias"){
data[,"biasT"] =format(abs(data[, var.name])*1000, digits = 0)
return(data)
}
########multiple bias percentage by 100
biasPer=function(data, var.name="biasPercent"){
data[,"biasPer"] =format(abs(data[, var.name]) * 100, digits = 0)
return(data)
}
misPred=relRMSE(misPred, var.name = "RMSE", bench = both["ATE", "RMSE"])
misPred2=relRMSE(misPred2, var.name = "RMSE", bench = both["ATE", "RMSE"])
both=relRMSE(both, var.name = "RMSE", bench = both["ATE", "RMSE"])
misWeight=relRMSE(misWeight, var.name = "RMSE", bench = both["ATE", "RMSE"])
misPred=noncoverage(misPred, var.name = "coverage")
misPred2=noncoverage(misPred2, var.name = "coverage")
both=noncoverage(both, var.name = "coverage")
misWeight=noncoverage(misWeight, var.name = "coverage")
misPred=biasT(misPred, var.name = "bias")
misPred2=biasT(misPred2, var.name = "bias")
both=biasT(both, var.name = "bias")
misWeight=biasT(misWeight, var.name = "bias")
misPred=biasPer(misPred, var.name="biasPercent")
misPred2=biasPer(misPred2, var.name="biasPercent")
both=biasPer(both, var.name="biasPercent")
misWeight=biasPer(misWeight, var.name="biasPercent")
#################
methods=c("ATE", "ATE aug" , "pencomp" ,"ATM" , "ATM aug" ,
"pencomp ATM", "pencomp w ATM", "ATO" , "ATO aug" , "pencomp ATO", "ATT", "ATT aug",
"pencomp ATT", "pencomp w ATT" , "ATC" ,
"ATC aug" , "pencomp ATC" , "pencomp w ATC",
"truncate0.01", "truncate rest0.01","truncate0.05", "truncate rest0.05",
"truncate.aug0.01", "truncate.aug rest0.01","truncate.aug0.05", "truncate.aug rest0.05",
"truncate pencomp0.01", "truncate pencomp0.05",
"truncateQ0", "truncateQ rest0","truncateQ0.005", "truncateQ rest0.005",
"truncateQ.aug0", "truncateQ.aug rest0","truncateQ.aug0.005", "truncateQ.aug rest0.005",
"truncateQ pencomp0", "truncateQ pencomp0.005")
#View(misPred[which(row.names(misPred) %in% methods),])
methods2 = c("pencomp", "pencomp ATM", "pencomp w ATM", "pencomp ATO" , "pencomp ATT", "pencomp w ATT" ,
"pencomp ATC" , "pencomp w ATC" , "truncate pencomp0.01", "truncate pencomp0.05", "truncateQ pencomp0" ,
"truncateQ pencomp0.005")
########select only the methods listed above###########
both=both[which(row.names(both) %in% methods), ]
misPred=misPred[which(row.names(misPred) %in% methods), ]
misPred2=misPred2[which(row.names(misPred2) %in% methods2), ]
misWeight=misWeight[which(row.names(misWeight) %in% methods), ]
row.names(both)==row.names(misPred)
row.names(misPred)==row.names(misWeight)
########## both models are correctly specified###########
n=nrow(both)
bothResult=cbind(rep("&", n), format(both[, "truth"]*1000, digits = 0), rep("&", n), both[, "biasT"], rep("&", n), both[, "biasPer"], rep("&", n), both[, "relRMSE"],
rep("&", n), both[, "noncoverage"], rep("\\\\", n))
bothResult=cbind(row.names(both), bothResult)
write.table(bothResult, paste0(DIRECOUT, "both.txt"), row.names = FALSE, col.names = FALSE, quote = FALSE)
########## misspecified propensity score model###########
n=nrow(misWeight)
misWeightResult=cbind(rep("&", n), format(misWeight[, "truth"]*1000, digits = 0), rep("&", n), misWeight[, "biasT"], rep("&", n), misWeight[, "biasPer"],
rep("&", n), misWeight[, "relRMSE"],
rep("&", n), misWeight[, "noncoverage"], rep("\\\\", n))
misWeightResult=cbind(row.names(misWeight), misWeightResult)
write.table(misWeightResult, paste0(DIRECOUT, "misWeight.txt"), row.names = FALSE, col.names = FALSE, quote = FALSE)
#################################################################################
###############output results in tables #########################################
########## misspecified prediction model ###########
misPredResult=rbind(misPred["ATE",], misPred["ATE aug",], misPred["pencomp",], misPred2["pencomp",],
misPred["ATM",], misPred["ATM aug",], misPred["pencomp ATM",], misPred["pencomp w ATM",],
misPred2["pencomp ATM",], misPred2["pencomp w ATM",],
misPred["ATO",], misPred["ATO aug",], misPred["pencomp ATO",],
misPred2["pencomp ATO",],
misPred["ATT",], misPred["ATT aug",], misPred["pencomp ATT",], misPred["pencomp w ATT",],
misPred2["pencomp ATT",], misPred2["pencomp w ATT",],
misPred["ATC",], misPred["ATC aug",], misPred["pencomp ATC",], misPred["pencomp w ATC",],
misPred2["pencomp ATC",], misPred2["pencomp w ATC",],
misPred["truncate0.01",], misPred["truncate rest0.01",], misPred["truncate.aug0.01",], misPred["truncate.aug rest0.01",],
misPred["truncate pencomp0.01",],
misPred2["truncate pencomp0.01",],
misPred["truncate0.05",], misPred["truncate rest0.05",], misPred["truncate.aug0.05",], misPred["truncate.aug rest0.05",],
misPred["truncate pencomp0.05",],
misPred2["truncate pencomp0.05",],
misPred["truncateQ0",], misPred["truncateQ rest0",], misPred["truncateQ.aug0",], misPred["truncateQ.aug rest0",],
misPred["truncateQ pencomp0",],
misPred2["truncateQ pencomp0",],
misPred["truncateQ0.005",], misPred["truncateQ rest0.005",], misPred["truncateQ.aug0.005",], misPred["truncateQ.aug rest0.005",],
misPred["truncateQ pencomp0.005",],
misPred2["truncateQ pencomp0.005",] )
n=nrow(misPredResult)
misPredResult2=cbind(rep("&", n), format(misPredResult[, "truth"]*1000, digits = 0), rep("&", n), misPredResult[, "biasT"], rep("&", n), misPredResult[, "biasPer"],
rep("&", n), misPredResult[, "relRMSE"],
rep("&", n), misPredResult[, "noncoverage"], rep("\\\\", n))
misPredResult2=cbind(row.names(misPredResult), misPredResult2)
write.table(misPredResult2, paste0(DIRECOUT, "misPred.txt"), quote = F, row.names = F, col.names = F)
############################################### plots ##################################################
############3 for truncated estimands ##################################################################
#rm(list=ls())
pdf(paste0(DIREC_ROOT, "Results/linear_homo.pdf"))
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
gammaV=4
sampleSize=500
DIRECOUT=paste0(DIREC_ROOT, "Results/") ##ouput directory
varName="RMSE"
#varName="absBias"
yrange=c(0.05, 0.7)
################ plots ##################
#par(mfrow=c(2, 3))
### for mispred2 model specification, pencomp estimate
modelSpec="misPred2"
output500=read.table(paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
output500$absBias=abs(output500$bias)
###for weighted estimators
modelSpec="misPred"
output500_w=read.table(paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
output500_w$absBias=abs(output500_w$bias)
##########################
###truncate################
rowSel=paste0(c("truncate"), truncateVal)
b=1:length(truncateVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="RMSE: Truncate estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncate rest"), truncateVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncate pencomp"), truncateVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateVal), las=1)
legend("topright", legend=c("truncate", "truncate rest", "truncate pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
###################################
############# bias ################
varName="absBias"
yrange=c(0, 0.1)
##########################
###truncate################
rowSel=paste0(c("truncate"), truncateVal)
b=1:length(truncateVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="Absolute Bias: Truncate estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncate rest"), truncateVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncate pencomp"), truncateVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateVal), las=1)
legend("topright", legend=c("truncate", "truncate rest", "truncate pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
############################################### plots ##################################################
############3 for truncated estimands at quantile #################################################################
varName="RMSE"
yrange=c(0.05, 1)
##########################
###truncate################
rowSel=paste0(c("truncateQ"), truncateQVal)
b=1:length(truncateQVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="RMSE TruncateQ estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncateQ rest"), truncateQVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncateQ pencomp"), truncateQVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateQVal), las=1)
legend("topright", legend=c("truncateQ", "truncateQ rest", "truncateQ pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
#################absolute empirical bias#############################
varName="absBias"
yrange=c(0, 0.5)
##########################
###truncate################
rowSel=paste0(c("truncateQ"), truncateQVal)
b=1:length(truncateQVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="Absolute Bias TruncateQ estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncateQ rest"), truncateQVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncateQ pencomp"), truncateQVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateQVal), las=1)
legend("topright", legend=c("truncateQ", "truncateQ rest", "truncateQ pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
dev.off()
| /limitedOverlap/Simulation/linear/Function_Linear/analysis_v2.R | no_license | TingtingKayla/limitedOverlap | R | false | false | 30,147 | r |
rm(list=ls())
############################################################################################################
###process simulation results ############
processResult=function(result, truth) {
result=result[!is.na(result[,1]),]
###coverage rate
total_ps11=numeric(dim(result)[1])
for (g in 1:dim(result)[1]) {
total_ps11[g]=as.numeric(result[g,3] <= truth & result[g,4] >= truth)
}
coverage_ps11=sum(total_ps11)/length(total_ps11)
coverage_ps11
###bias and RMSE
bias11=mean(result[,1]-(truth))
estimate11=mean(result[,1])
temp11=(result[,1]-(truth))^2
RMSE11=sqrt(mean(temp11))
sd11=sd(result[,1])
width11=mean(abs(result[,4]-result[,3]))
sd11Boot=mean(result[,2])
finalOut=c(truth, bias11, bias11/truth, sd11, RMSE11, coverage_ps11, width11, dim(result)[1], sd11Boot)
names(finalOut)=c("truth", "bias", "biasPercent", "sd", "RMSE", "coverage", "widthCI", "num.sim", "sdBoot")
return(finalOut)
}
DIREC_ROOT="C:/Users/Tingting.Zhou/Desktop/paper2/resubmission/linear/"
#varying the value of gammaV for different degrees of overlap
sampleSize=500
gammaV=4
truthVal=rep(0.75, 7)
names(truthVal)=c("ATE", "ATM", "ATT", "ATC", "ATO", "truncate", "truncateQ")
DIRECOUT=paste0(DIREC_ROOT, "Results/")
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
#modelSpec="misPred"
#modelSpec="both"
#modelSpec="misPred2"
for(modelSpec in c("misPred", "both", "misWeight")){
###pencomp
ATE_pencomp=NULL
ATM_pencomp=NULL
ATM_w_pencomp=NULL
ATO_pencomp=NULL
ATT_pencomp=NULL
ATT_w_pencomp=NULL
ATC_pencomp=NULL
ATC_w_pencomp=NULL
truncate_pencomp=NULL
truncateQ_pencomp=NULL
###weighted estimators
ATE=NULL
ATE.aug=NULL
ATM=NULL
ATM.aug=NULL
ATO=NULL
ATO.aug=NULL
ATT=NULL
ATT.aug=NULL
ATC=NULL
ATC.aug=NULL
truncate=NULL
truncate.aug=NULL
truncate_rest=NULL
truncate.aug_rest=NULL
truncateQ=NULL
truncateQ.aug=NULL
truncateQ_rest=NULL
truncateQ.aug_rest=NULL
DIRECR=NULL
DIRECR=paste0(DIREC_ROOT, "homoT/")
###weighted estimator output
ATE=rbind(ATE, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE", "_", modelSpec, ".txt", sep=""), header = T))
ATE.aug=rbind(ATE.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE.aug", "_", modelSpec, ".txt", sep=""), header = T))
ATM=rbind(ATM, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM", "_", modelSpec, ".txt", sep=""), header = T))
ATM.aug=rbind(ATM.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM.aug", "_", modelSpec, ".txt", sep=""), header = T))
ATO=rbind(ATO, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO", "_", modelSpec, ".txt", sep=""), header = T))
ATO.aug=rbind(ATO.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO.aug", "_", modelSpec, ".txt", sep=""), header = T))
truncate=rbind(truncate, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate", "_", modelSpec, ".txt", sep=""), header = T))
truncate.aug=rbind(truncate.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate.aug", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ=rbind(truncateQ, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ.aug=rbind(truncateQ.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ.aug", "_", modelSpec, ".txt", sep=""), header = T))
truncate_rest=rbind(truncate_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate_rest", "_", modelSpec, ".txt", sep=""), header = T))
truncate.aug_rest=rbind(truncate.aug_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate.aug_rest", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ_rest=rbind(truncateQ_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ_rest", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ.aug_rest=rbind(truncateQ.aug_rest, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ.aug_rest", "_", modelSpec, ".txt", sep=""), header = T))
ATT=rbind(ATT, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT", "_", modelSpec, ".txt", sep=""), header = T))
ATT.aug=rbind(ATT.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT.aug", "_", modelSpec, ".txt", sep=""), header = T))
ATC=rbind(ATC, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC", "_", modelSpec, ".txt", sep=""), header = T))
ATC.aug=rbind(ATC.aug, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC.aug", "_", modelSpec, ".txt", sep=""), header = T))
###pencomp output
ATE_pencomp=rbind(ATE_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_pencomp=rbind(ATM_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_w_pencomp=rbind(ATM_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_w_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATO_pencomp=rbind(ATO_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncate_pencomp=rbind(truncate_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ_pencomp=rbind(truncateQ_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATT_pencomp=rbind(ATT_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATT_w_pencomp=rbind(ATT_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATC_pencomp=rbind(ATC_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATC_w_pencomp=rbind(ATC_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
###pencomp
ATE_pencomp=ATE_pencomp[which(!is.na(ATE_pencomp[,1])),]
ATM_pencomp=ATM_pencomp[which(!is.na(ATM_pencomp[,1])),]
ATM_w_pencomp=ATM_w_pencomp[which(!is.na(ATM_w_pencomp[,1])),]
ATO_pencomp=ATO_pencomp[which(!is.na(ATO_pencomp[,1])),]
ATT_pencomp=ATT_pencomp[which(!is.na(ATT_pencomp[,1])),]
ATT_w_pencomp=ATT_w_pencomp[which(!is.na(ATT_w_pencomp[,1])),]
ATC_pencomp=ATC_pencomp[which(!is.na(ATC_pencomp[,1])),]
ATC_w_pencomp=ATC_w_pencomp[which(!is.na(ATC_w_pencomp[,1])),]
###pencomp
dim(ATE_pencomp)
dim(ATM_pencomp)
dim(ATM_w_pencomp)
dim(ATO_pencomp)
dim(ATT_pencomp)
dim(ATT_w_pencomp)
dim(ATC_pencomp)
dim(ATC_w_pencomp)
truncate_pencomp=truncate_pencomp[which(!is.na(truncate_pencomp[,1])),]
truncateQ_pencomp=truncateQ_pencomp[which(!is.na(truncateQ_pencomp[,1])),]
dim(truncate_pencomp)
dim(truncateQ_pencomp)
###weighted estimators
ATE=ATE[which(!is.na(ATE[,1])),]
ATE.aug=ATE.aug[which(!is.na(ATE.aug[,1])),]
ATM=ATM[which(!is.na(ATM[,1])),]
ATM.aug=ATM.aug[which(!is.na(ATM.aug[,1])),]
ATO=ATO[which(!is.na(ATO[,1])),]
ATO.aug=ATO.aug[which(!is.na(ATO.aug[,1])),]
ATT=ATT[which(!is.na(ATT[,1])),]
ATT.aug=ATT.aug[which(!is.na(ATT.aug[,1])),]
ATC=ATC[which(!is.na(ATC[,1])),]
ATC.aug=ATC.aug[which(!is.na(ATC.aug[,1])),]
truncate=truncate[which(!is.na(truncate[,1])),]
truncate.aug=truncate.aug[which(!is.na(truncate.aug[,1])),]
truncate_rest=truncate_rest[which(!is.na(truncate_rest[,1])),]
truncate.aug_rest=truncate.aug_rest[which(!is.na(truncate.aug_rest[,1])),]
truncateQ=truncateQ[which(!is.na(truncateQ[,1])),]
truncateQ.aug=truncateQ.aug[which(!is.na(truncateQ.aug[,1])),]
truncateQ_rest=truncateQ_rest[which(!is.na(truncateQ_rest[,1])),]
truncateQ.aug_rest=truncateQ.aug_rest[which(!is.na(truncateQ.aug_rest[,1])),]
###weighted estimators
dim(ATE)
dim(ATE.aug)
dim(ATM)
dim(ATM.aug)
dim(ATO)
dim(ATO.aug)
dim(ATT)
dim(ATT.aug)
dim(ATC)
dim(ATC.aug)
dim(truncate)
dim(truncate.aug)
dim(truncate_rest)
dim(truncate.aug_rest)
dim(truncateQ)
dim(truncateQ.aug)
dim(truncateQ_rest)
dim(truncateQ.aug_rest)
truth=truthVal["ATE"]
ATE_all=rbind(processResult(result=ATE, truth = truth),
processResult(result=ATE.aug, truth),
processResult(result=ATE_pencomp, truth = truth))
ATE_all
row.names(ATE_all)=c("ATE", "ATE aug", "pencomp")
###########################
truth=truthVal["ATM"]
ATM_all=rbind(processResult(result=ATM, truth = truth),
processResult(result=ATM.aug, truth),
processResult(result=ATM_pencomp, truth = truth),
processResult(result=ATM_w_pencomp, truth = truth) )
ATM_all
row.names(ATM_all)=c("ATM", "ATM aug", "pencomp ATM", "pencomp w ATM")
###########################
truth=truthVal["ATO"]
ATO_all=rbind(processResult(result=ATO, truth = truth),
processResult(result=ATO.aug, truth),
processResult(result=ATO_pencomp, truth = truth) )
ATO_all
row.names(ATO_all)=c("ATO", "ATO aug", "pencomp ATO")
###########################
truth=truthVal["ATT"]
ATT_all=rbind(processResult(result=ATT, truth = truth),
processResult(result=ATT.aug, truth),
processResult(result=ATT_pencomp, truth = truth),
processResult(result=ATT_w_pencomp, truth = truth) )
ATT_all
row.names(ATT_all)=c("ATT", "ATT aug", "pencomp ATT", "pencomp w ATT")
###########################
truth=truthVal["ATC"]
ATC_all=rbind(processResult(result=ATC, truth = truth),
processResult(result=ATC.aug, truth),
processResult(result=ATC_pencomp, truth = truth),
processResult(result=ATC_w_pencomp, truth = truth) )
ATC_all
row.names(ATC_all)=c("ATC", "ATC aug", "pencomp ATC", "pencomp w ATC")
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
truncate_all=NULL
for(k in 1:length(truncateVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateVal[k], digits = 2))
temp=rbind(processResult(result=truncate[, selCol], truth = truth),
processResult(result=truncate_rest[, selCol], truth = truth),
processResult(result=truncate.aug[, selCol], truth = truth),
processResult(result=truncate.aug_rest[, selCol], truth = truth),
processResult(result=truncate_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c("truncate", "truncate rest", "truncate.aug", "truncate.aug rest", "truncate pencomp"),
format(truncateVal[k], digits = 2))
truncate_all=rbind(truncate_all, temp)
}
##################################
truncateQ_all=NULL
for(k in 1:length(truncateQVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateQVal[k], digits = 2))
temp=rbind(processResult(result=truncateQ[, selCol], truth = truth),
processResult(result=truncateQ_rest[, selCol], truth = truth),
processResult(result=truncateQ.aug[, selCol], truth = truth),
processResult(result=truncateQ.aug_rest[, selCol], truth = truth),
processResult(result=truncateQ_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c("truncateQ", "truncateQ rest", "truncateQ.aug", "truncateQ.aug rest", "truncateQ pencomp"),
format(truncateQVal[k], digits = 2))
truncateQ_all=rbind(truncateQ_all, temp)
}
output=rbind(ATE_all, ATM_all, ATO_all, ATT_all, ATC_all, truncate_all, truncateQ_all)
write.table(output, paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
}
########### pencomp with spline only ##########
for(modelSpec in c("misPred2")){
###pencomp
ATE_pencomp=NULL
ATM_pencomp=NULL
ATM_w_pencomp=NULL
ATO_pencomp=NULL
ATT_pencomp=NULL
ATT_w_pencomp=NULL
ATC_pencomp=NULL
ATC_w_pencomp=NULL
truncate_pencomp=NULL
truncateQ_pencomp=NULL
DIRECR=NULL
DIRECR=paste0(DIREC_ROOT, "homoT/")
###pencomp output
ATE_pencomp=rbind(ATE_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATE_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_pencomp=rbind(ATM_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATM_w_pencomp=rbind(ATM_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATM_w_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATO_pencomp=rbind(ATO_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATO_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncate_pencomp=rbind(truncate_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncate_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
truncateQ_pencomp=rbind(truncateQ_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/truncateQ_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATT_pencomp=rbind(ATT_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATT_w_pencomp=rbind(ATT_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATT_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
ATC_pencomp=rbind(ATC_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_pencomp", "_", modelSpec, ".txt", sep=""), header = T))
ATC_w_pencomp=rbind(ATC_w_pencomp, read.table(paste(DIRECR, "sampleSize", sampleSize, "/gamma", gammaV, "/ATC_w_pencomp", "_", modelSpec, ".txt", sep=""),header = T))
###pencomp
ATE_pencomp=ATE_pencomp[which(!is.na(ATE_pencomp[,1])),]
ATM_pencomp=ATM_pencomp[which(!is.na(ATM_pencomp[,1])),]
ATM_w_pencomp=ATM_w_pencomp[which(!is.na(ATM_w_pencomp[,1])),]
ATO_pencomp=ATO_pencomp[which(!is.na(ATO_pencomp[,1])),]
ATT_pencomp=ATT_pencomp[which(!is.na(ATT_pencomp[,1])),]
ATT_w_pencomp=ATT_w_pencomp[which(!is.na(ATT_w_pencomp[,1])),]
ATC_pencomp=ATC_pencomp[which(!is.na(ATC_pencomp[,1])),]
ATC_w_pencomp=ATC_w_pencomp[which(!is.na(ATC_w_pencomp[,1])),]
###pencomp
dim(ATE_pencomp)
dim(ATM_pencomp)
dim(ATM_w_pencomp)
dim(ATO_pencomp)
dim(ATT_pencomp)
dim(ATT_w_pencomp)
dim(ATC_pencomp)
dim(ATC_w_pencomp)
truncate_pencomp=truncate_pencomp[which(!is.na(truncate_pencomp[,1])),]
truncateQ_pencomp=truncateQ_pencomp[which(!is.na(truncateQ_pencomp[,1])),]
dim(truncate_pencomp)
dim(truncateQ_pencomp)
truth=truthVal["ATE"]
ATE_all=rbind(processResult(result=ATE_pencomp, truth = truth))
ATE_all
row.names(ATE_all)=c("pencomp")
###########################
truth=truthVal["ATM"]
ATM_all=rbind(processResult(result=ATM_pencomp, truth = truth),
processResult(result=ATM_w_pencomp, truth = truth) )
ATM_all
row.names(ATM_all)=c( "pencomp ATM", "pencomp w ATM")
###########################
truth=truthVal["ATO"]
ATO_all=rbind(processResult(result=ATO_pencomp, truth = truth))
ATO_all
row.names(ATO_all)=c("pencomp ATO")
###########################
truth=truthVal["ATT"]
ATT_all=rbind(processResult(result=ATT_pencomp, truth = truth),
processResult(result=ATT_w_pencomp, truth = truth) )
ATT_all
row.names(ATT_all)=c( "pencomp ATT", "pencomp w ATT")
###########################
truth=truthVal["ATC"]
ATC_all=rbind(processResult(result=ATC_pencomp, truth = truth),
processResult(result=ATC_w_pencomp, truth = truth) )
ATC_all
row.names(ATC_all)=c( "pencomp ATC", "pencomp w ATC")
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
truncate_all=NULL
for(k in 1:length(truncateVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateVal[k], digits = 2))
temp=rbind(processResult(result=truncate_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c("truncate pencomp"), format(truncateVal[k], digits = 2))
truncate_all=rbind(truncate_all, temp)
}
##################################
truncateQ_all=NULL
for(k in 1:length(truncateQVal)){
selCol=paste0(c("estimate", "std", "lowerCI","upperCI"), format(truncateQVal[k], digits = 2))
temp=rbind(processResult(result=truncateQ_pencomp[, selCol], truth = truth) )
row.names(temp)=paste0(c( "truncateQ pencomp"),
format(truncateQVal[k], digits = 2))
truncateQ_all=rbind(truncateQ_all, temp)
}
output=rbind(ATE_all, ATM_all, ATO_all, ATT_all, ATC_all, truncate_all, truncateQ_all)
write.table(output, paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
}
#################################################################################
###############output results in tables #########################################
#varying the value of gammaV for different degrees of overlap
sampleSize=500
gammaV=4
DIRECOUT=paste0(DIREC_ROOT, "Results/")
misPred2=read.table(paste0(DIRECOUT, "misPred2", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
misPred=read.table(paste0(DIRECOUT, "misPred", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
both=read.table(paste0(DIRECOUT, "both", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
misWeight=read.table(paste0(DIRECOUT, "misWeight", "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
########change non-coverage rate
noncoverage=function(data, var.name="coverage"){
data[,"noncoverage"] =format(100*(1-data[, var.name]), digits = 2)
return(data)
}
########empirical RMSE relative to correct IPTW (including everyone)
relRMSE=function(data, var.name="RMSE", bench=both["ATE", "RMSE"]){
data[,"relRMSE"] = format(abs(data[, var.name] / bench), digits = 2)
return(data)
}
########multiple bias by 1000
biasT=function(data, var.name="bias"){
data[,"biasT"] =format(abs(data[, var.name])*1000, digits = 0)
return(data)
}
########multiple bias percentage by 100
biasPer=function(data, var.name="biasPercent"){
data[,"biasPer"] =format(abs(data[, var.name]) * 100, digits = 0)
return(data)
}
misPred=relRMSE(misPred, var.name = "RMSE", bench = both["ATE", "RMSE"])
misPred2=relRMSE(misPred2, var.name = "RMSE", bench = both["ATE", "RMSE"])
both=relRMSE(both, var.name = "RMSE", bench = both["ATE", "RMSE"])
misWeight=relRMSE(misWeight, var.name = "RMSE", bench = both["ATE", "RMSE"])
misPred=noncoverage(misPred, var.name = "coverage")
misPred2=noncoverage(misPred2, var.name = "coverage")
both=noncoverage(both, var.name = "coverage")
misWeight=noncoverage(misWeight, var.name = "coverage")
misPred=biasT(misPred, var.name = "bias")
misPred2=biasT(misPred2, var.name = "bias")
both=biasT(both, var.name = "bias")
misWeight=biasT(misWeight, var.name = "bias")
misPred=biasPer(misPred, var.name="biasPercent")
misPred2=biasPer(misPred2, var.name="biasPercent")
both=biasPer(both, var.name="biasPercent")
misWeight=biasPer(misWeight, var.name="biasPercent")
#################
methods=c("ATE", "ATE aug" , "pencomp" ,"ATM" , "ATM aug" ,
"pencomp ATM", "pencomp w ATM", "ATO" , "ATO aug" , "pencomp ATO", "ATT", "ATT aug",
"pencomp ATT", "pencomp w ATT" , "ATC" ,
"ATC aug" , "pencomp ATC" , "pencomp w ATC",
"truncate0.01", "truncate rest0.01","truncate0.05", "truncate rest0.05",
"truncate.aug0.01", "truncate.aug rest0.01","truncate.aug0.05", "truncate.aug rest0.05",
"truncate pencomp0.01", "truncate pencomp0.05",
"truncateQ0", "truncateQ rest0","truncateQ0.005", "truncateQ rest0.005",
"truncateQ.aug0", "truncateQ.aug rest0","truncateQ.aug0.005", "truncateQ.aug rest0.005",
"truncateQ pencomp0", "truncateQ pencomp0.005")
#View(misPred[which(row.names(misPred) %in% methods),])
methods2 = c("pencomp", "pencomp ATM", "pencomp w ATM", "pencomp ATO" , "pencomp ATT", "pencomp w ATT" ,
"pencomp ATC" , "pencomp w ATC" , "truncate pencomp0.01", "truncate pencomp0.05", "truncateQ pencomp0" ,
"truncateQ pencomp0.005")
########select only the methods listed above###########
both=both[which(row.names(both) %in% methods), ]
misPred=misPred[which(row.names(misPred) %in% methods), ]
misPred2=misPred2[which(row.names(misPred2) %in% methods2), ]
misWeight=misWeight[which(row.names(misWeight) %in% methods), ]
row.names(both)==row.names(misPred)
row.names(misPred)==row.names(misWeight)
########## both models are correctly specified###########
n=nrow(both)
bothResult=cbind(rep("&", n), format(both[, "truth"]*1000, digits = 0), rep("&", n), both[, "biasT"], rep("&", n), both[, "biasPer"], rep("&", n), both[, "relRMSE"],
rep("&", n), both[, "noncoverage"], rep("\\\\", n))
bothResult=cbind(row.names(both), bothResult)
write.table(bothResult, paste0(DIRECOUT, "both.txt"), row.names = FALSE, col.names = FALSE, quote = FALSE)
########## misspecified propensity score model###########
n=nrow(misWeight)
misWeightResult=cbind(rep("&", n), format(misWeight[, "truth"]*1000, digits = 0), rep("&", n), misWeight[, "biasT"], rep("&", n), misWeight[, "biasPer"],
rep("&", n), misWeight[, "relRMSE"],
rep("&", n), misWeight[, "noncoverage"], rep("\\\\", n))
misWeightResult=cbind(row.names(misWeight), misWeightResult)
write.table(misWeightResult, paste0(DIRECOUT, "misWeight.txt"), row.names = FALSE, col.names = FALSE, quote = FALSE)
#################################################################################
###############output results in tables #########################################
########## misspecified prediction model ###########
misPredResult=rbind(misPred["ATE",], misPred["ATE aug",], misPred["pencomp",], misPred2["pencomp",],
misPred["ATM",], misPred["ATM aug",], misPred["pencomp ATM",], misPred["pencomp w ATM",],
misPred2["pencomp ATM",], misPred2["pencomp w ATM",],
misPred["ATO",], misPred["ATO aug",], misPred["pencomp ATO",],
misPred2["pencomp ATO",],
misPred["ATT",], misPred["ATT aug",], misPred["pencomp ATT",], misPred["pencomp w ATT",],
misPred2["pencomp ATT",], misPred2["pencomp w ATT",],
misPred["ATC",], misPred["ATC aug",], misPred["pencomp ATC",], misPred["pencomp w ATC",],
misPred2["pencomp ATC",], misPred2["pencomp w ATC",],
misPred["truncate0.01",], misPred["truncate rest0.01",], misPred["truncate.aug0.01",], misPred["truncate.aug rest0.01",],
misPred["truncate pencomp0.01",],
misPred2["truncate pencomp0.01",],
misPred["truncate0.05",], misPred["truncate rest0.05",], misPred["truncate.aug0.05",], misPred["truncate.aug rest0.05",],
misPred["truncate pencomp0.05",],
misPred2["truncate pencomp0.05",],
misPred["truncateQ0",], misPred["truncateQ rest0",], misPred["truncateQ.aug0",], misPred["truncateQ.aug rest0",],
misPred["truncateQ pencomp0",],
misPred2["truncateQ pencomp0",],
misPred["truncateQ0.005",], misPred["truncateQ rest0.005",], misPred["truncateQ.aug0.005",], misPred["truncateQ.aug rest0.005",],
misPred["truncateQ pencomp0.005",],
misPred2["truncateQ pencomp0.005",] )
n=nrow(misPredResult)
misPredResult2=cbind(rep("&", n), format(misPredResult[, "truth"]*1000, digits = 0), rep("&", n), misPredResult[, "biasT"], rep("&", n), misPredResult[, "biasPer"],
rep("&", n), misPredResult[, "relRMSE"],
rep("&", n), misPredResult[, "noncoverage"], rep("\\\\", n))
misPredResult2=cbind(row.names(misPredResult), misPredResult2)
write.table(misPredResult2, paste0(DIRECOUT, "misPred.txt"), quote = F, row.names = F, col.names = F)
############################################### plots ##################################################
############3 for truncated estimands ##################################################################
#rm(list=ls())
pdf(paste0(DIREC_ROOT, "Results/linear_homo.pdf"))
###asymetric truncation, at quantile level
truncateVal=seq(0.01, 0.1, 0.01)
truncateQVal=seq(0, 0.03, 0.005)
gammaV=4
sampleSize=500
DIRECOUT=paste0(DIREC_ROOT, "Results/") ##ouput directory
varName="RMSE"
#varName="absBias"
yrange=c(0.05, 0.7)
################ plots ##################
#par(mfrow=c(2, 3))
### for mispred2 model specification, pencomp estimate
modelSpec="misPred2"
output500=read.table(paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
output500$absBias=abs(output500$bias)
###for weighted estimators
modelSpec="misPred"
output500_w=read.table(paste0(DIRECOUT, modelSpec, "_gammaV", gammaV, "_sampleSize", sampleSize, ".txt"), sep="\t")
output500_w$absBias=abs(output500_w$bias)
##########################
###truncate################
rowSel=paste0(c("truncate"), truncateVal)
b=1:length(truncateVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="RMSE: Truncate estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncate rest"), truncateVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncate pencomp"), truncateVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateVal), las=1)
legend("topright", legend=c("truncate", "truncate rest", "truncate pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
###################################
############# bias ################
varName="absBias"
yrange=c(0, 0.1)
##########################
###truncate################
rowSel=paste0(c("truncate"), truncateVal)
b=1:length(truncateVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="Absolute Bias: Truncate estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncate rest"), truncateVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncate pencomp"), truncateVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateVal), las=1)
legend("topright", legend=c("truncate", "truncate rest", "truncate pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
############################################### plots ##################################################
############3 for truncated estimands at quantile #################################################################
varName="RMSE"
yrange=c(0.05, 1)
##########################
###truncate################
rowSel=paste0(c("truncateQ"), truncateQVal)
b=1:length(truncateQVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="RMSE TruncateQ estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncateQ rest"), truncateQVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncateQ pencomp"), truncateQVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateQVal), las=1)
legend("topright", legend=c("truncateQ", "truncateQ rest", "truncateQ pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
#################absolute empirical bias#############################
varName="absBias"
yrange=c(0, 0.5)
##########################
###truncate################
rowSel=paste0(c("truncateQ"), truncateQVal)
b=1:length(truncateQVal)
plot(b, output500_w[rowSel,varName], type="o", xlab = "truncation level", ylim = yrange, xaxt="n", ylab = varName,
main="Absolute Bias TruncateQ estimand", lty=1, col="cyan")
#########
rowSel=paste0(c("truncateQ rest"), truncateQVal)
lines(b, output500_w[rowSel,varName], type="o", col="cyan4", lty=2)
########
rowSel=paste0(c("truncateQ pencomp"), truncateQVal)
lines(b, output500[rowSel,varName], type="o", col="red", lty=5)
axis(1, at=b, labels=paste0(truncateQVal), las=1)
legend("topright", legend=c("truncateQ", "truncateQ rest", "truncateQ pencomp"),
col=c("cyan","cyan4", "red"), lty=c(1, 2, 5), cex=0.8)
dev.off()
|
#' Lookup function for translating commonly used ED variables
#' returns out list, readvar variables to read from file, expr if any derivation is needed
#' @param varname character; variable name to read from file
#' @export
ed.var <- function(varname) {
if(varname == "AGB") {
out = list(readvar = "AGB_CO",
type = 'co', units = "kgC/plant",
drelated = NULL, # other deterministically related vars?
expr = "AGB_CO")
} else if(varname == "TotLivBiom") {
out = list(readvar = c("BALIVE"),
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BALIVE")
} else if(varname == "BA") {
out = list(readvar = "BA_CO",
type = 'co', units = "cm2/plant",
drelated = NULL,
expr = "BA_CO")
} else if(varname == "DBH") {
out = list(readvar = "DBH",
type = 'co', units = "cm/plant",
drelated = NULL,
expr = "DBH")
} else if(varname == "AbvGrndWood") {
out = list(readvar = c("AGB_CO"), #until I change BLEAF keeper to be annual work with total AGB
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "AGB_CO")
} else if(varname == "AGB.pft") {
out = list(readvar = c("AGB_CO"), #until I change BLEAF keeper to be annual work with total AGB
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "AGB_CO")
} else if(varname == "leaf_carbon_content") {
out = list(readvar = "BLEAF",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BLEAF")
} else if(varname == "root_carbon_content") {
out = list(readvar = "BROOT",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BROOT")
} else if(varname == "reproductive_litter_carbon_content") {
out = list(readvar = "BSEEDS_CO",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BSEEDS_CO")
} else if(varname == "storage_carbon_content") {
out = list(readvar = "BSTORAGE",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BSTORAGE")
} else if(varname == "GWBI") {
out = list(readvar = "DDBH_DT", # this is actually rate of change in DBH, we'll calculate GWBI from it
type = 'co', units = "cm/yr",
drelated = NULL,
expr = "DDBH_DT")
} else if(varname == "fast_soil_pool_carbon_content") {
out = list(readvar = "FAST_SOIL_C",
type = 'pa', units = "kg/m2",
drelated = NULL,
expr = "FAST_SOIL_C")
} else if(varname == "structural_soil_pool_carbon_content") {
out = list(readvar = "STRUCTURAL_SOIL_C",
type = 'pa', units = "kg/m2",
drelated = NULL,
expr = "STRUCTURAL_SOIL_C")
} else { # No Match!
warning(paste0("Couldn't find varname ", varname, "!"))
out = NULL
}
return(out)
}
| /models/ed/R/ed_varlookup.R | permissive | PecanProject/pecan | R | false | false | 3,563 | r | #' Lookup function for translating commonly used ED variables
#' returns out list, readvar variables to read from file, expr if any derivation is needed
#' @param varname character; variable name to read from file
#' @export
ed.var <- function(varname) {
if(varname == "AGB") {
out = list(readvar = "AGB_CO",
type = 'co', units = "kgC/plant",
drelated = NULL, # other deterministically related vars?
expr = "AGB_CO")
} else if(varname == "TotLivBiom") {
out = list(readvar = c("BALIVE"),
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BALIVE")
} else if(varname == "BA") {
out = list(readvar = "BA_CO",
type = 'co', units = "cm2/plant",
drelated = NULL,
expr = "BA_CO")
} else if(varname == "DBH") {
out = list(readvar = "DBH",
type = 'co', units = "cm/plant",
drelated = NULL,
expr = "DBH")
} else if(varname == "AbvGrndWood") {
out = list(readvar = c("AGB_CO"), #until I change BLEAF keeper to be annual work with total AGB
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "AGB_CO")
} else if(varname == "AGB.pft") {
out = list(readvar = c("AGB_CO"), #until I change BLEAF keeper to be annual work with total AGB
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "AGB_CO")
} else if(varname == "leaf_carbon_content") {
out = list(readvar = "BLEAF",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BLEAF")
} else if(varname == "root_carbon_content") {
out = list(readvar = "BROOT",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BROOT")
} else if(varname == "reproductive_litter_carbon_content") {
out = list(readvar = "BSEEDS_CO",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BSEEDS_CO")
} else if(varname == "storage_carbon_content") {
out = list(readvar = "BSTORAGE",
type = 'co', units = "kgC/plant",
drelated = NULL,
expr = "BSTORAGE")
} else if(varname == "GWBI") {
out = list(readvar = "DDBH_DT", # this is actually rate of change in DBH, we'll calculate GWBI from it
type = 'co', units = "cm/yr",
drelated = NULL,
expr = "DDBH_DT")
} else if(varname == "fast_soil_pool_carbon_content") {
out = list(readvar = "FAST_SOIL_C",
type = 'pa', units = "kg/m2",
drelated = NULL,
expr = "FAST_SOIL_C")
} else if(varname == "structural_soil_pool_carbon_content") {
out = list(readvar = "STRUCTURAL_SOIL_C",
type = 'pa', units = "kg/m2",
drelated = NULL,
expr = "STRUCTURAL_SOIL_C")
} else { # No Match!
warning(paste0("Couldn't find varname ", varname, "!"))
out = NULL
}
return(out)
}
|
# WIP_RunRandomForest.R
# R script for RunRandomForest ArcGIS Pro tool.
# This script will load an existing random forest model
# and a set of rasters for running the model.
# These rasters must be of the same variables and in the same order as used to build the model.
# Options to build a probability raster and to compare model predictions to another point data set.
tool_exec<- function(in_params, out_params){
#####################################################################################################
### Check/Load Required Packages
#####################################################################################################
arc.progress_label("Loading packages...")
if(!requireNamespace("raster", quietly = TRUE))
install.packages("raster", quiet = TRUE)
if(!requireNamespace("sp", quitly = TRUE))
install.packages("sp", quite = TRUE)
if(!requireNamespace("rgdal", quietly = TRUE))
install.packages("rgdal", quiet = TRUE)
if(!requireNamespace("randomForest", quietly = TRUE))
install.packages("randomforest", quiet = TRUE)
# Packages for foreach/dopar method
if(!requireNamespace("parallel", quietly = TRUE))
install.packages("parallel", quiet = TRUE)
if(!requireNamespace("doParallel", quietly = TRUE))
install.packages("doParallel", quiet = TRUE)
if(!requireNamespace("foreach", quietly = TRUE))
install.packages("foreach", quiet = TRUE)
if(!requireNamespace("ROCR", quietly = TRUE))
install.packages("ROCR", quiet = TRUE)
require(raster)
require(sp)
require(rgdal)
require(randomForest)
require(parallel)
require(doParallel)
require(foreach)
require(ROCR)
#####################################################################################################
### Helper functions ################################################################################
#####################################################################################################
#### Extracts point data from rasters without breaking memory limits ####
extractInParts <- function(rasters, points) {
# Check if Raster* can fit entirely in memory
if (canProcessInMemory(rasters)) {
# Extract all point data at once
beginCluster()
result <- extract(rasters, points, method='bilinear')
stopCluster()
return(result)
}
library(doParallel)
library(parallel)
library(foreach)
# Count the available cores on computer
numCores <- detectCores()
if (is.na(numCores)) {
# Number unknown, execute loop sequentially
registerDoSEQ()
} else {
# Create and register cores to be used in parallel
cl <- makeCluster(numCores)
registerDoParallel(cl)
# Load necessary libraries to each core in the cluster
clusterEvalQ(cl, {
library(raster)
library(arcgisbinding)
library(randomForest)
})
}
# Find the suggested block size for processing
bs <- blockSize(rasters)
# Extract point values from input rasters. Results -> the list of each iteration's resulting matrix
result <- foreach (i = 1:bs$n, .combine='combineMatrices') %dopar% {
# Only runs if cluster is sequential
arc.progress_label(paste0("Extracting Data...", ceiling(100*(i/bs$n)), "%"))
# Find the block's starting and ending rows
bStart <- bs$row[i]
bLen <- bs$nrows[i]
bEnd <- bStart+bLen
# Extract the point values from the block
s <- suppressWarnings(extract(crop(rasters, extent(rasters, bStart, bEnd, 1, ncol(rasters))), points, method='bilinear'))
}
# Close the cluster connection
if (!is.na(numCores))
stopCluster(cl)
arc.progress_label(paste0("Extracting Data...", 100, "%"))
return(result)
}
#### Adds two matrices, ignoring NA values (treating them as 0s) ####
combineMatrices <- function(a, b) {
combined <- ifelse(is.na(a),
ifelse(is.na(b),
NA,
b),
ifelse(is.na(b),
a,
a+b))
return(combined)
}
#### Predicts probabilities and creates raster without breaking memory limits ####
predictInParts <- function(rasters, model, fname) {
# Check if Raster* can fit entirely in memory
if (canProcessInMemory(rasters)) {
# Generate entire probability raster at once
p <- predict(rasters, model, type="prob", filename=fname, format="GTiff", overwrite=TRUE)
return(p)
} else {
# Initialize the output file to write probabilities to in parts
out <- raster(rasters)
out <- writeStart(out, filename=fname, format="GTiff", overwrite=TRUE)
}
# Find the suggested block size for processing
bs <- blockSize(rasters)
for (i in 1:bs$n) {
arc.progress_label(paste0("Creating probability raster...", ceiling(100*(i/bs$n)), "%"))
# Calculate block row bounds
bStart <- bs$row[i]
bLen <- bs$nrows[i]
bEnd <- bStart+bLen
# Crop raster to block size
c <- crop(rasters, extent(rasters, bStart, bEnd, 1, ncol(rasters)))
# Apply the model to the cropped raster
p <- predict(c, model, type="prob")
# Write the block's values to the output raster
v <- getValues(p)
out <- writeValues(out, v, bStart)
}
# Stop writing and close the file
out <- writeStop(out)
arc.progress_label(paste0("Creating probability raster...", 100, "%"))
return(out)
}
# Function to plot a graph and save to specified file
plotandsave <- function(f, filename, baseline=FALSE) {
dev.new()
plot(f, main=filename)
if (baseline) {abline(a=0,b=1)}
dev.copy(win.metafile, paste0(filename, ".wmf"))
dev.off()
}
#####################################################################################################
### Define input/output parameters
#####################################################################################################
workingDir <- in_params[[1]][1] # Working directory
modelFile <- in_params[[2]][1] # Random forest model name (modelFile.RFmodel, modelFile.rasterList)
inputRasters <- in_params[[3]] # List of input rasters, must match type and order of those used to build the model
testData <- in_params[[4]] # Optional input point feature class of data to run the model on
fieldName <- in_params[[5]] # If testData provided, specify the data field for point classification
isWet <- in_params[[6]] # Field value indicating is-a-wetland
notWet <- in_params[[7]] # Field value indicating not-a-wetland
calcStats <- in_params[[8]] # Whether model performance statistics should be calculated
outProbRaster <- out_params[[1]] # Optional probability raster for the area covered by rasters in the raster list
setwd(workingDir)
cat(paste0("Current working directory: ", workingDir, "\n"))
#####################################################################################################
### Load data and if testData specified, create dataframe object to feed to randomForest.predict function
#####################################################################################################
arc.progress_label("Loading random forest model...")
# Load the random forest model (file extension .RFmodel)
load(modelFile)
cat(paste0("Loaded model ", modelFile))
print(rfclass)
# Load the list of rasters used to build this model (file extension .rasterList)
arc.progress_label("Loading rasters...")
rasterList <- sub(".RFmodel", ".rasterList", modelFile)
load(rasterList)
cat(paste0("\n"))
cat(paste0("Rasters must be of the same elevation derivatives with the same length scales and in the same order as those used to build the model", "\n"))
if (length(inputRasters) != length(rasterNames)) stop("You specified a different number of rasters than used to build the model")
for (i in 1:length(rasterNames)) cat(paste0("Model: ",rasterNames[[i]],", Input: ",inputRasters[[i]], "\n"))
cat(paste0("\n"))
# Switch to the same generic names stored in the RFmodel file
for (i in 1:length(inputRasters)) names(inputRasters)[i] <- paste0("Raster",i)
rasters <- stack(inputRasters)
#####################################################################################################
### If test data provided, evaluate model using new data
#####################################################################################################
# Open the feature class with the training dataset points as a data frame
if (!is.null(testData) && is.na(testData)) {
arc.progress_label("Running model on test data...")
allPoints <- arc.open(testData)
# Keep only the column with the input field that holds the wetland Class
allPoints <- arc.select(object = allPoints, fields=fieldName)
# Rename the column heading to Class
names(allPoints)[1] <- "Class"
# Translate to a spatial dataset
points <- arc.data2sp(allPoints)
# Find the raster values at the point locations
pointValues <- extractInParts(rasters, points)
# Append the class values as the first column
pointValues <- cbind(points[,1],pointValues)
# Convert to a data frame
pointValues <- as.data.frame(pointValues)
# Keep only records with one of the requested input field (class) values
pointValues <- pointValues[pointValues$Class == isWet[1]|pointValues$Class == notWet[1],]
# Eliminate rows with NA values
pointValues <- na.omit(pointValues)
# Eliminate columns with coordinate values
coords <- names(pointValues) %in% c("coords.x1","coords.x2")
newdata <- pointValues[!coords]
# Change to generic column headings; the same headings will be used for using this RF model on other basins
for (i in 2:length(newdata)) {
names(newdata)[i] <- paste0("Raster",i-1)
}
print(head(newdata))
# Run model on these data
test <- predict(rfclass, type = "response", newdata = newdata[,-1])
print(table(test, newdata$Class))
}
# Build a probability raster, if requested
if (!is.null(outProbRaster) && !is.na(outProbRaster)) {
arc.progress_label("Creating probability raster")
cat(paste0("Writing probabilities to ", outputProbRaster))
probs <- suppressWarnings(predictInParts(rasters, rfclass, outProbRaster))
cat(paste0("Created GeoTiff probability raster ",outProbRaster[1]))
if (calcStats) {
arc.progress_label("Calculating performance statistics..")
# Process test points, same steps as earlier
pointValues <- extractInParts(probs, points)
pointValues <- cbind(points[,1],pointValues)
pointValues <- as.data.frame(pointValues)
pointValues <- pointValues[pointValues$Class == isWet[1]|pointValues$Class == notWet[1],]
pointValues <- na.omit(pointValues)
coords <- names(pointValues) %in% c("coords.x1","coords.x2")
predictions <- pointValues[!coords]
names(predictions)[2] <- "Prob"
pred <- prediction(predictions$Prob, predictions$Class, label.ordering=c(isWet[1],notWet[1]))
roc <- performance(pred, measure="tpr", x.measure="fpr")
auc <- performance(pred, measure="auc")
cat(paste0("AUROC: ", auc@y.values, "\n"))
plotandsave(roc, paste0(modelName[1],'_roc'), baseline=TRUE)
prc <- performance(pred, measure="prec", x.measure="rec")
idx <- which.max(slot(prc, "y.values")[[1]])
prbe <- slot(prc, "y.values")[[1]][idx]
cutoff <- slot(prc, "x.values")[[1]][idx]
print(c(PRBE=prbe, cutoff=cutoff))
plotandsave(prc, paste0(modelName[1],'_prc'))
acc <- performance(pred, measure="acc")
idx <- which.max(slot(acc, "y.values")[[1]])
maxacc <- slot(acc, "y.values")[[1]][idx]
cutoff <- slot(acc, "x.values")[[1]][idx]
print(c(accuracy=maxacc, cutoff=cutoff))
plotandsave(acc, paste0(modelName[1],'_acc'))
}
}
return(out_params)
} | /WetlandTools/WIP_RunRandomForest.R | no_license | tabrasel/ForestedWetlands | R | false | false | 12,563 | r | # WIP_RunRandomForest.R
# R script for RunRandomForest ArcGIS Pro tool.
# This script will load an existing random forest model
# and a set of rasters for running the model.
# These rasters must be of the same variables and in the same order as used to build the model.
# Options to build a probability raster and to compare model predictions to another point data set.
tool_exec<- function(in_params, out_params){
#####################################################################################################
### Check/Load Required Packages
#####################################################################################################
arc.progress_label("Loading packages...")
if(!requireNamespace("raster", quietly = TRUE))
install.packages("raster", quiet = TRUE)
if(!requireNamespace("sp", quitly = TRUE))
install.packages("sp", quite = TRUE)
if(!requireNamespace("rgdal", quietly = TRUE))
install.packages("rgdal", quiet = TRUE)
if(!requireNamespace("randomForest", quietly = TRUE))
install.packages("randomforest", quiet = TRUE)
# Packages for foreach/dopar method
if(!requireNamespace("parallel", quietly = TRUE))
install.packages("parallel", quiet = TRUE)
if(!requireNamespace("doParallel", quietly = TRUE))
install.packages("doParallel", quiet = TRUE)
if(!requireNamespace("foreach", quietly = TRUE))
install.packages("foreach", quiet = TRUE)
if(!requireNamespace("ROCR", quietly = TRUE))
install.packages("ROCR", quiet = TRUE)
require(raster)
require(sp)
require(rgdal)
require(randomForest)
require(parallel)
require(doParallel)
require(foreach)
require(ROCR)
#####################################################################################################
### Helper functions ################################################################################
#####################################################################################################
#### Extracts point data from rasters without breaking memory limits ####
extractInParts <- function(rasters, points) {
# Check if Raster* can fit entirely in memory
if (canProcessInMemory(rasters)) {
# Extract all point data at once
beginCluster()
result <- extract(rasters, points, method='bilinear')
stopCluster()
return(result)
}
library(doParallel)
library(parallel)
library(foreach)
# Count the available cores on computer
numCores <- detectCores()
if (is.na(numCores)) {
# Number unknown, execute loop sequentially
registerDoSEQ()
} else {
# Create and register cores to be used in parallel
cl <- makeCluster(numCores)
registerDoParallel(cl)
# Load necessary libraries to each core in the cluster
clusterEvalQ(cl, {
library(raster)
library(arcgisbinding)
library(randomForest)
})
}
# Find the suggested block size for processing
bs <- blockSize(rasters)
# Extract point values from input rasters. Results -> the list of each iteration's resulting matrix
result <- foreach (i = 1:bs$n, .combine='combineMatrices') %dopar% {
# Only runs if cluster is sequential
arc.progress_label(paste0("Extracting Data...", ceiling(100*(i/bs$n)), "%"))
# Find the block's starting and ending rows
bStart <- bs$row[i]
bLen <- bs$nrows[i]
bEnd <- bStart+bLen
# Extract the point values from the block
s <- suppressWarnings(extract(crop(rasters, extent(rasters, bStart, bEnd, 1, ncol(rasters))), points, method='bilinear'))
}
# Close the cluster connection
if (!is.na(numCores))
stopCluster(cl)
arc.progress_label(paste0("Extracting Data...", 100, "%"))
return(result)
}
#### Adds two matrices, ignoring NA values (treating them as 0s) ####
combineMatrices <- function(a, b) {
combined <- ifelse(is.na(a),
ifelse(is.na(b),
NA,
b),
ifelse(is.na(b),
a,
a+b))
return(combined)
}
#### Predicts probabilities and creates raster without breaking memory limits ####
predictInParts <- function(rasters, model, fname) {
# Check if Raster* can fit entirely in memory
if (canProcessInMemory(rasters)) {
# Generate entire probability raster at once
p <- predict(rasters, model, type="prob", filename=fname, format="GTiff", overwrite=TRUE)
return(p)
} else {
# Initialize the output file to write probabilities to in parts
out <- raster(rasters)
out <- writeStart(out, filename=fname, format="GTiff", overwrite=TRUE)
}
# Find the suggested block size for processing
bs <- blockSize(rasters)
for (i in 1:bs$n) {
arc.progress_label(paste0("Creating probability raster...", ceiling(100*(i/bs$n)), "%"))
# Calculate block row bounds
bStart <- bs$row[i]
bLen <- bs$nrows[i]
bEnd <- bStart+bLen
# Crop raster to block size
c <- crop(rasters, extent(rasters, bStart, bEnd, 1, ncol(rasters)))
# Apply the model to the cropped raster
p <- predict(c, model, type="prob")
# Write the block's values to the output raster
v <- getValues(p)
out <- writeValues(out, v, bStart)
}
# Stop writing and close the file
out <- writeStop(out)
arc.progress_label(paste0("Creating probability raster...", 100, "%"))
return(out)
}
# Function to plot a graph and save to specified file
plotandsave <- function(f, filename, baseline=FALSE) {
dev.new()
plot(f, main=filename)
if (baseline) {abline(a=0,b=1)}
dev.copy(win.metafile, paste0(filename, ".wmf"))
dev.off()
}
#####################################################################################################
### Define input/output parameters
#####################################################################################################
workingDir <- in_params[[1]][1] # Working directory
modelFile <- in_params[[2]][1] # Random forest model name (modelFile.RFmodel, modelFile.rasterList)
inputRasters <- in_params[[3]] # List of input rasters, must match type and order of those used to build the model
testData <- in_params[[4]] # Optional input point feature class of data to run the model on
fieldName <- in_params[[5]] # If testData provided, specify the data field for point classification
isWet <- in_params[[6]] # Field value indicating is-a-wetland
notWet <- in_params[[7]] # Field value indicating not-a-wetland
calcStats <- in_params[[8]] # Whether model performance statistics should be calculated
outProbRaster <- out_params[[1]] # Optional probability raster for the area covered by rasters in the raster list
setwd(workingDir)
cat(paste0("Current working directory: ", workingDir, "\n"))
#####################################################################################################
### Load data and if testData specified, create dataframe object to feed to randomForest.predict function
#####################################################################################################
arc.progress_label("Loading random forest model...")
# Load the random forest model (file extension .RFmodel)
load(modelFile)
cat(paste0("Loaded model ", modelFile))
print(rfclass)
# Load the list of rasters used to build this model (file extension .rasterList)
arc.progress_label("Loading rasters...")
rasterList <- sub(".RFmodel", ".rasterList", modelFile)
load(rasterList)
cat(paste0("\n"))
cat(paste0("Rasters must be of the same elevation derivatives with the same length scales and in the same order as those used to build the model", "\n"))
if (length(inputRasters) != length(rasterNames)) stop("You specified a different number of rasters than used to build the model")
for (i in 1:length(rasterNames)) cat(paste0("Model: ",rasterNames[[i]],", Input: ",inputRasters[[i]], "\n"))
cat(paste0("\n"))
# Switch to the same generic names stored in the RFmodel file
for (i in 1:length(inputRasters)) names(inputRasters)[i] <- paste0("Raster",i)
rasters <- stack(inputRasters)
#####################################################################################################
### If test data provided, evaluate model using new data
#####################################################################################################
# Open the feature class with the training dataset points as a data frame
if (!is.null(testData) && is.na(testData)) {
arc.progress_label("Running model on test data...")
allPoints <- arc.open(testData)
# Keep only the column with the input field that holds the wetland Class
allPoints <- arc.select(object = allPoints, fields=fieldName)
# Rename the column heading to Class
names(allPoints)[1] <- "Class"
# Translate to a spatial dataset
points <- arc.data2sp(allPoints)
# Find the raster values at the point locations
pointValues <- extractInParts(rasters, points)
# Append the class values as the first column
pointValues <- cbind(points[,1],pointValues)
# Convert to a data frame
pointValues <- as.data.frame(pointValues)
# Keep only records with one of the requested input field (class) values
pointValues <- pointValues[pointValues$Class == isWet[1]|pointValues$Class == notWet[1],]
# Eliminate rows with NA values
pointValues <- na.omit(pointValues)
# Eliminate columns with coordinate values
coords <- names(pointValues) %in% c("coords.x1","coords.x2")
newdata <- pointValues[!coords]
# Change to generic column headings; the same headings will be used for using this RF model on other basins
for (i in 2:length(newdata)) {
names(newdata)[i] <- paste0("Raster",i-1)
}
print(head(newdata))
# Run model on these data
test <- predict(rfclass, type = "response", newdata = newdata[,-1])
print(table(test, newdata$Class))
}
# Build a probability raster, if requested
if (!is.null(outProbRaster) && !is.na(outProbRaster)) {
arc.progress_label("Creating probability raster")
cat(paste0("Writing probabilities to ", outputProbRaster))
probs <- suppressWarnings(predictInParts(rasters, rfclass, outProbRaster))
cat(paste0("Created GeoTiff probability raster ",outProbRaster[1]))
if (calcStats) {
arc.progress_label("Calculating performance statistics..")
# Process test points, same steps as earlier
pointValues <- extractInParts(probs, points)
pointValues <- cbind(points[,1],pointValues)
pointValues <- as.data.frame(pointValues)
pointValues <- pointValues[pointValues$Class == isWet[1]|pointValues$Class == notWet[1],]
pointValues <- na.omit(pointValues)
coords <- names(pointValues) %in% c("coords.x1","coords.x2")
predictions <- pointValues[!coords]
names(predictions)[2] <- "Prob"
pred <- prediction(predictions$Prob, predictions$Class, label.ordering=c(isWet[1],notWet[1]))
roc <- performance(pred, measure="tpr", x.measure="fpr")
auc <- performance(pred, measure="auc")
cat(paste0("AUROC: ", auc@y.values, "\n"))
plotandsave(roc, paste0(modelName[1],'_roc'), baseline=TRUE)
prc <- performance(pred, measure="prec", x.measure="rec")
idx <- which.max(slot(prc, "y.values")[[1]])
prbe <- slot(prc, "y.values")[[1]][idx]
cutoff <- slot(prc, "x.values")[[1]][idx]
print(c(PRBE=prbe, cutoff=cutoff))
plotandsave(prc, paste0(modelName[1],'_prc'))
acc <- performance(pred, measure="acc")
idx <- which.max(slot(acc, "y.values")[[1]])
maxacc <- slot(acc, "y.values")[[1]][idx]
cutoff <- slot(acc, "x.values")[[1]][idx]
print(c(accuracy=maxacc, cutoff=cutoff))
plotandsave(acc, paste0(modelName[1],'_acc'))
}
}
return(out_params)
} |
context("violin")
gg <- ggplot(mtcars, aes(factor(cyl), mpg)) + geom_violin()
test_that("basic geom_violin works", {
L <- expect_doppelganger_built(gg, "violin")
expect_equivalent(length(L$data), 1)
tr <- L$data[[1]]
expect_identical(tr$type, "scatter")
expect_true(tr$fill == "toself")
expect_false(tr$showlegend)
expect_true(all(grepl("density", tr$text[!is.na(tr$text)])))
expect_true(tr$hoverinfo == "text")
})
gg2 <- ggplot(mtcars, aes(factor(cyl), mpg, fill = factor(cyl))) + geom_violin()
test_that("geom_violin with fill aes works", {
L <- expect_doppelganger_built(gg2, "violin-aes")
expect_equivalent(length(L$data), 3)
expect_true(L$layout$showlegend)
expect_equivalent(sum(unlist(lapply(L$data, "[[", "showlegend"))), 3)
})
| /tests/testthat/test-ggplot-violin.R | permissive | slawlor/plotly | R | false | false | 767 | r | context("violin")
gg <- ggplot(mtcars, aes(factor(cyl), mpg)) + geom_violin()
test_that("basic geom_violin works", {
L <- expect_doppelganger_built(gg, "violin")
expect_equivalent(length(L$data), 1)
tr <- L$data[[1]]
expect_identical(tr$type, "scatter")
expect_true(tr$fill == "toself")
expect_false(tr$showlegend)
expect_true(all(grepl("density", tr$text[!is.na(tr$text)])))
expect_true(tr$hoverinfo == "text")
})
gg2 <- ggplot(mtcars, aes(factor(cyl), mpg, fill = factor(cyl))) + geom_violin()
test_that("geom_violin with fill aes works", {
L <- expect_doppelganger_built(gg2, "violin-aes")
expect_equivalent(length(L$data), 3)
expect_true(L$layout$showlegend)
expect_equivalent(sum(unlist(lapply(L$data, "[[", "showlegend"))), 3)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raster.R
\name{dbplot_raster}
\alias{dbplot_raster}
\title{Raster plot}
\usage{
dbplot_raster(data, x, y, fill = n(), resolution = 100)
}
\arguments{
\item{data}{A table (tbl)}
\item{x}{A continuous variable}
\item{y}{A continuous variable}
\item{fill}{The aggregation formula. Defaults to count (n)}
\item{resolution}{The number of bins created by variable. The highest the number, the more records can be potentially imported from the sourd}
}
\description{
To visualize two continuous variables, we typically resort to a Scatter plot. However,
this may not be practical when visualizing millions or billions of dots representing the
intersections of the two variables. A Raster plot may be a better option,
because it concentrates the intersections into squares that are easier to parse visually.
Uses very generic dplyr code to aggregate data and ggplot2 to create
a raster plot. Because of this approach,
the calculations automatically run inside the database if `data` has
a database or sparklyr connection. The `class()` of such tables
in R are: tbl_sql, tbl_dbi, tbl_sql
}
\details{
There are two considerations when using a Raster plot with a database. Both considerations are related
to the size of the results downloaded from the database:
- The number of bins requested: The higher the bins value is, the more data is downloaded from the database.
- How concentrated the data is: This refers to how many intersections return a value. The more intersections without a value,
the less data is downloaded from the database.
}
\examples{
# Returns a 100x100 raster plot of record count of intersections of eruptions and waiting
faithful \%>\%
dbplot_raster(eruptions, waiting)
# Returns a 50x50 raster plot of eruption averages of intersections of eruptions and waiting
faithful \%>\%
dbplot_raster(eruptions, waiting, fill = mean(eruptions), resolution = 50)
}
\seealso{
\code{\link{dbplot_bar}}, \code{\link{dbplot_line}} ,
\code{\link{dbplot_histogram}}
}
| /man/dbplot_raster.Rd | no_license | jmpasmoi/dbplot | R | false | true | 2,066 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raster.R
\name{dbplot_raster}
\alias{dbplot_raster}
\title{Raster plot}
\usage{
dbplot_raster(data, x, y, fill = n(), resolution = 100)
}
\arguments{
\item{data}{A table (tbl)}
\item{x}{A continuous variable}
\item{y}{A continuous variable}
\item{fill}{The aggregation formula. Defaults to count (n)}
\item{resolution}{The number of bins created by variable. The highest the number, the more records can be potentially imported from the sourd}
}
\description{
To visualize two continuous variables, we typically resort to a Scatter plot. However,
this may not be practical when visualizing millions or billions of dots representing the
intersections of the two variables. A Raster plot may be a better option,
because it concentrates the intersections into squares that are easier to parse visually.
Uses very generic dplyr code to aggregate data and ggplot2 to create
a raster plot. Because of this approach,
the calculations automatically run inside the database if `data` has
a database or sparklyr connection. The `class()` of such tables
in R are: tbl_sql, tbl_dbi, tbl_sql
}
\details{
There are two considerations when using a Raster plot with a database. Both considerations are related
to the size of the results downloaded from the database:
- The number of bins requested: The higher the bins value is, the more data is downloaded from the database.
- How concentrated the data is: This refers to how many intersections return a value. The more intersections without a value,
the less data is downloaded from the database.
}
\examples{
# Returns a 100x100 raster plot of record count of intersections of eruptions and waiting
faithful \%>\%
dbplot_raster(eruptions, waiting)
# Returns a 50x50 raster plot of eruption averages of intersections of eruptions and waiting
faithful \%>\%
dbplot_raster(eruptions, waiting, fill = mean(eruptions), resolution = 50)
}
\seealso{
\code{\link{dbplot_bar}}, \code{\link{dbplot_line}} ,
\code{\link{dbplot_histogram}}
}
|
# Desenvolvedor: Lucas Miguel de Carvalho - UNICAMP #
# lucasmiguel@lge.ibi.unicamp.br #
# Script de analise de dados do artigo: #
# https://www.ncbi.nlm.nih.gov/bioproject/555093
# Acessar os dados do SRA ###
# https://www.ncbi.nlm.nih.gov/sra?linkname=bioproject_sra_all&from_uid=555093 #
# Primeiro passo seria baixar o arquivo SraRunInfo.tsv#
# Ele contem todos os links do SRA eo ID das amostras #
# Selecionamos as mostras SRR9696658, SRR9696662, SRR9696666,SRR9696660,SRR9696664,SRR9696668
# posteriormente clicar em 'Send to' -> File -> RunInfo
###### SRA #######
setwd(".")
#https://www.ncbi.nlm.nih.gov/sra/?term=SRP215218
base_dir <- getwd()
dados <-read.csv("SraRunInfo.csv", stringsAsFactors=FALSE)
arquivos <- basename(dados$download_path)
for(i in 1:length(arquivos)){
download.file(dados$download_path[i], arquivos[i])
}
for(a in arquivos) {
cmd = paste("fastq-dump --split-3", a)
system(cmd)
}
###### Trimmomatic #####
#http://www.usadellab.org/cms/?page=trimmomatic
cmd = paste("wget http://www.usadellab.org/cms/uploads/supplementary/Trimmomatic/Trimmomatic-0.39.zip")
system(cmd)
cmd = paste("unzip Trimmomatic-0.39.zip")
system(cmd)
for(a in arquivos){
cmd = paste("java -jar ",base_dir,"/Trimmomatic-0.39/trimmomatic-0.39.jar SE -threads 10 -trimlog ",a,".trimlog -summary ",a,".summary ",a,".fastq ",a,".trim.fastq ILLUMINACLIP:Trimmomatic-0.39/adapters/TruSeq2-SE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36",sep = "")
system(cmd)
}
###### FastQC ######
# https://www.bioinformatics.babraham.ac.uk/projects/fastqc/
cmd = paste("wget https://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.8.zip")
cmd = paste("unzip fastqc_v0.11.8.zip")
system(cmd)
cmd = paste("chmod 755 ",base_dir,"FastQC/fastqc")
system(cmd)
for(a in arquivos){
cmd = paste(base_dir,"FastQC/fastqc ",a,".fastq ",sep = "")
system(cmd)
}
###### Kallisto ####
cmd = paste("wget https://github.com/pachterlab/kallisto/releases/download/v0.44.0/kallisto_linux-v0.44.0.tar.gz")
cmd = paste("tar -xzvf kallisto_linux-v0.44.0.tar.gz")
system(cmd)
## Voce deve criar um indice com o kallisto de seu transcriptoma
## comando: kallisto index -i arabidopsis_index <transcriptoma>
for(a in arquivos){
cmd = paste(base_dir,"/kallisto_linux-v0.44.0/kallisto quant -i arabidopsis_index -o ",a,"_kallisto -b 100 -t 10 --single -l 100 -s 0.001 ",a,".trim.fastq",sep="")
system(cmd)
}
##### Copiar arquivos ########
cmd = paste("for file in ls -l -d SRR*;do cp $file/abundance.tsv $file.tsv;done")
print(cmd)
system(cmd)
###### Montar matriz ########
# precisa de um script presente no trinity
# cmd = ("wget https://github.com/trinityrnaseq/trinityrnaseq/releases/download/v2.8.6/trinityrnaseq-v2.8.6.FULL.tar.gz")
# system (cmd)
lista = paste0(arquivos,".tsv",collapse = " ")
cmd = paste("perl trinityrnaseq-2.8.6/util/abundance_estimates_to_matrix.pl --est_method kallisto --gene_trans_map none ",lista,sep="")
print (cmd)
#gera uma matriz chamada kallisto.isoform.counts.matrix que sera utilizada
#no deseq2 e edgeR
####### Sleuth #########
#source("http://bioconductor.org/biocLite.R")
#biocLite("rhdf5")
#install.packages("devtools", repos = "http://cran.us.r-project.org")
#library("httr")
#set_config(config(ssl_verifypeer = 0L))
#devtools::install_github("pachterlab/sleuth")
library("sleuth")
cmd = "mkdir sleuth"
system(cmd)
cmd = "cp -r SRR*/ sleuth/"
system(cmd)
base_dir <- getwd()
########## SLEUTH #############
#HS1 SRR9696660
#HS2 SRR9696664
#HS3 SRR9696668
#CT1 SRR9696658
#CT2 SRR9696662
#CT3 SRR9696666
sample_id <- list('SRR9696658','SRR9696662','SRR9696666',
'SRR9696660','SRR9696664','SRR9696668')
paths <- list(paste(base_dir,"/sleuth/SRR9696658",sep=""),
paste(base_dir,"/sleuth/SRR9696662",sep=""),
paste(base_dir,"/sleuth/SRR9696666",sep=""),
paste(base_dir,"/sleuth/SRR9696660",sep=""),
paste(base_dir,"/sleuth/SRR9696664",sep=""),
paste(base_dir,"/sleuth/SRR9696668",sep=""))
names(paths) <- sample_id
s2c <- read.table(file.path(base_dir, "amostras.txt"), header = TRUE, stringsAsFactors=FALSE)
s2c <- dplyr::select(s2c, sample = sample, condition, reps)
s2c
#t2g <- read.table("t2g.txt", header = TRUE, stringsAsFactors=FALSE)
s2c <- dplyr::mutate(s2c, path = paths)
print(s2c)
s2c <- data.frame(lapply(s2c, as.character), stringsAsFactors=FALSE)
#transcrito
so <- sleuth_prep(s2c, ~condition, extra_bootstrap_summary = TRUE)
so <- sleuth_fit(so)
#wald
so <- sleuth_wt(so, "conditionTratado")
models(so)
results_table <- sleuth_results(so, test='conditionTratado', test_type = 'wald')
sleuth_significant <- dplyr::filter(results_table, qval <= 0.05)
head(sleuth_significant, 20)
sleuth_list <- sleuth_significant[,1]
write.table(sleuth_significant,file="diferenciais_sleuth.txt")
sleuth_live(so)
pdf("Sleuth_Volcano.pdf")
plot(results_table$b, -1*log10(results_table$qval), col=ifelse(results_table$qval, "red", "black"),xlab="log(qval)", ylab="beta", title="Volcano plot", pch=20)
dev.off()
##### EDEGR #######
if (! require(edgeR)) {
source("https://bioconductor.org/biocLite.R")
biocLite("edgeR")
library(edgeR)
}
data = read.table("kallisto.isoform.counts.matrix", header=T, row.names=1, com='')
col_ordering = c(1,2,3,4,5,6)
rnaseqMatrix = data[,col_ordering]
rnaseqMatrix = round(rnaseqMatrix)
rnaseqMatrix = rnaseqMatrix[rowSums(cpm(rnaseqMatrix) > 1) >= 2,]
conditions = factor(c(rep("Controle", 3), rep("Tratado", 3)))
exp_study = DGEList(counts=rnaseqMatrix, group=conditions)
exp_study = calcNormFactors(exp_study)
exp_study = estimateDisp(exp_study)
et = exactTest(exp_study, pair=c("Controle", "Tratado"))
tTags = topTags(et,n=NULL)
result_table = tTags$table
result_table = data.frame(sampleA="Controle", sampleB="Tratado", result_table)
result_table$logFC = -1 * result_table$logFC
write.table(result_table, file='edgeR.DE_results', sep=' ', quote=F, row.names=T)
write.table(rnaseqMatrix, file='edgeR.count_matrix', sep=' ', quote=F, row.names=T)
pdf("edgeR.Volcano.pdf")
plot(result_table$logFC, -1*log10(result_table$FDR), col=ifelse(result_table$FDR<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
dev.off()
edger_significant <- dplyr::filter(result_table, FDR <= 0.05)
edgeR_list <- row.names(edger_significant)[i]
edgeR_list <- NULL
for(i in 1:length(result_table[,1])){
if(result_table[i,]$FDR <= 0.05){
#print("Entrou\n")
edgeR_list[i] <- row.names(result_table)[i]
}
}
head(edgeR_list)
############### DESEQ2 ##########
if (! require(DESeq2)) {
source("https://bioconductor.org/biocLite.R")
biocLite("DESeq2")
library(DESeq2)
}
data = read.table("kallisto.isoform.counts.matrix", header=T, row.names=1, com='')
col_ordering = c(1,2,3,4,5,6)
rnaseqMatrix = data[,col_ordering]
rnaseqMatrix = round(rnaseqMatrix)
rnaseqMatrix = rnaseqMatrix[rowSums(cpm(rnaseqMatrix) > 1) >= 2,]
conditions = data.frame(conditions=factor(c(rep("Controle", 3), rep("Tratado", 3))))
rownames(conditions) = colnames(rnaseqMatrix)
ddsFullCountTable <- DESeqDataSetFromMatrix(
countData = rnaseqMatrix,
colData = conditions,
design = ~ conditions)
dds = DESeq(ddsFullCountTable)
contrast=c("conditions","Controle","Tratado")
res = results(dds, contrast)
baseMeanA <- rowMeans(counts(dds, normalized=TRUE)[,colData(dds)$conditions == "Controle"])
baseMeanB <- rowMeans(counts(dds, normalized=TRUE)[,colData(dds)$conditions == "Tratado"])
res = cbind(baseMeanA, baseMeanB, as.data.frame(res))
res = cbind(sampleA="Controle", sampleB="Tratado", as.data.frame(res))
res$padj[is.na(res$padj)] <- 1
res = as.data.frame(res[order(res$pvalue),])
write.table(res, file='DESeq2.DE_results', sep=' ', quote=FALSE)
pdf("DESeq2_Volcano.pdf")
plot(res$log2FoldChange, -1*log10(res$padj), col=ifelse(res$padj<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
dev.off()
Deseq2_list <- NULL
for(i in 1:length(res[,1])){
if(res[i,]$padj <= 0.05){
#print("Entrou\n")
Deseq2_list[i] <- row.names(res)[i]
}
}
head(Deseq2_list)
####### Volcano Plot #########
gridlayout = matrix(c(1:4),nrow=2,ncol=2, byrow=TRUE)
layout(gridlayout, widths=c(1,1,1,1), heights=c(1,1,1,1))
plot(res$log2FoldChange, -1*log10(res$padj), col=ifelse(res$padj<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
plot(result_table$logFC, -1*log10(result_table$FDR), col=ifelse(result_table$FDR<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
plot(sleuth_significant$b, -1*log10(sleuth_significant$qval), col=ifelse(sleuth_significant$qval, "red", "black"),xlab="log(qval)", ylab="beta", title="Volcano plot", pch=20)
am <- read.table(file="amostras.txt",sep="\t",header=TRUE)
head(am)
########### Diagrama de Venn #######
sleuth_significant <- read.table(file="diferenciais_comp1.txt",sep=" ",header=T)
sleuth_list <- sleuth_significant[,1]
edgeR_list <- read.table(file="lista_diff_edgeR.txt")
install.packages("VennDiagram")
library(VennDiagram)
library(RColorBrewer)
myCol <- brewer.pal(3, "Paired")
venn.diagram(
x = list(edgeR_list,Deseq2_list,sleuth_list),
category.names = c("edgeR" , "Deseq2","Sleuth"),
filename = 'venn_diagramm_DEG.png',
output=FALSE,
#Saida
imagetype="png" ,
height = 680 ,
width = 880 ,
resolution = 600,
#Numeros
cex = .5,
fontface = "bold",
fontfamily = "serif",
#Circulos
lwd = 2,
lty = 'blank',
fill = myCol,
#Nomes
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer",
cat.fontfamily = "serif",
rotation = 1
)
######### PCA #######
library("DESeq")
countsTable <- read.delim("kallisto.isoform.counts.matrix", header=TRUE, stringsAsFactors=TRUE)
rownames(countsTable) <- countsTable[,1]
countsTable <- countsTable[,2:7]
conds <- factor(c(names(countsTable)))
countsTable_novo <- apply(countsTable,2,as.integer)
countsTable_novo[is.na(countsTable_novo)] <- 0
cds<-newCountDataSet(countsTable_novo,conds)
cds<-estimateSizeFactors(cds)
sizeFactors(cds)
cds <- estimateDispersions(cds,method='blind')
vsd <- varianceStabilizingTransformation(cds)
pdf("PCA.pdf")
plotPCA(vsd)
dev.off()
######### DENDOGRAMA #########
install.packages("ggdendro")
install.packages('dendextend')
library('dendextend')
library("ggplot2")
library("ggdendro")
countsTable <- read.delim("kallisto.isoform.counts.matrix", header=TRUE, stringsAsFactors=TRUE,row.names = 1)
dd <- dist(t(scale(countsTable)), method = "euclidean")
hc <- hclust(dd, method = "ward.D2")
ggdendrogram(hc, rotate = TRUE, theme_dendro = FALSE,
size = 1) + labs(title="Dendrogram in ggplot2")+
xlab("Amostras") +ylab("Altura")
| /script.R | no_license | lmigueel/DEG_Athaliana | R | false | false | 11,170 | r | # Desenvolvedor: Lucas Miguel de Carvalho - UNICAMP #
# lucasmiguel@lge.ibi.unicamp.br #
# Script de analise de dados do artigo: #
# https://www.ncbi.nlm.nih.gov/bioproject/555093
# Acessar os dados do SRA ###
# https://www.ncbi.nlm.nih.gov/sra?linkname=bioproject_sra_all&from_uid=555093 #
# Primeiro passo seria baixar o arquivo SraRunInfo.tsv#
# Ele contem todos os links do SRA eo ID das amostras #
# Selecionamos as mostras SRR9696658, SRR9696662, SRR9696666,SRR9696660,SRR9696664,SRR9696668
# posteriormente clicar em 'Send to' -> File -> RunInfo
###### SRA #######
setwd(".")
#https://www.ncbi.nlm.nih.gov/sra/?term=SRP215218
base_dir <- getwd()
dados <-read.csv("SraRunInfo.csv", stringsAsFactors=FALSE)
arquivos <- basename(dados$download_path)
for(i in 1:length(arquivos)){
download.file(dados$download_path[i], arquivos[i])
}
for(a in arquivos) {
cmd = paste("fastq-dump --split-3", a)
system(cmd)
}
###### Trimmomatic #####
#http://www.usadellab.org/cms/?page=trimmomatic
cmd = paste("wget http://www.usadellab.org/cms/uploads/supplementary/Trimmomatic/Trimmomatic-0.39.zip")
system(cmd)
cmd = paste("unzip Trimmomatic-0.39.zip")
system(cmd)
for(a in arquivos){
cmd = paste("java -jar ",base_dir,"/Trimmomatic-0.39/trimmomatic-0.39.jar SE -threads 10 -trimlog ",a,".trimlog -summary ",a,".summary ",a,".fastq ",a,".trim.fastq ILLUMINACLIP:Trimmomatic-0.39/adapters/TruSeq2-SE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36",sep = "")
system(cmd)
}
###### FastQC ######
# https://www.bioinformatics.babraham.ac.uk/projects/fastqc/
cmd = paste("wget https://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.8.zip")
cmd = paste("unzip fastqc_v0.11.8.zip")
system(cmd)
cmd = paste("chmod 755 ",base_dir,"FastQC/fastqc")
system(cmd)
for(a in arquivos){
cmd = paste(base_dir,"FastQC/fastqc ",a,".fastq ",sep = "")
system(cmd)
}
###### Kallisto ####
cmd = paste("wget https://github.com/pachterlab/kallisto/releases/download/v0.44.0/kallisto_linux-v0.44.0.tar.gz")
cmd = paste("tar -xzvf kallisto_linux-v0.44.0.tar.gz")
system(cmd)
## Voce deve criar um indice com o kallisto de seu transcriptoma
## comando: kallisto index -i arabidopsis_index <transcriptoma>
for(a in arquivos){
cmd = paste(base_dir,"/kallisto_linux-v0.44.0/kallisto quant -i arabidopsis_index -o ",a,"_kallisto -b 100 -t 10 --single -l 100 -s 0.001 ",a,".trim.fastq",sep="")
system(cmd)
}
##### Copiar arquivos ########
cmd = paste("for file in ls -l -d SRR*;do cp $file/abundance.tsv $file.tsv;done")
print(cmd)
system(cmd)
###### Montar matriz ########
# precisa de um script presente no trinity
# cmd = ("wget https://github.com/trinityrnaseq/trinityrnaseq/releases/download/v2.8.6/trinityrnaseq-v2.8.6.FULL.tar.gz")
# system (cmd)
lista = paste0(arquivos,".tsv",collapse = " ")
cmd = paste("perl trinityrnaseq-2.8.6/util/abundance_estimates_to_matrix.pl --est_method kallisto --gene_trans_map none ",lista,sep="")
print (cmd)
#gera uma matriz chamada kallisto.isoform.counts.matrix que sera utilizada
#no deseq2 e edgeR
####### Sleuth #########
#source("http://bioconductor.org/biocLite.R")
#biocLite("rhdf5")
#install.packages("devtools", repos = "http://cran.us.r-project.org")
#library("httr")
#set_config(config(ssl_verifypeer = 0L))
#devtools::install_github("pachterlab/sleuth")
library("sleuth")
cmd = "mkdir sleuth"
system(cmd)
cmd = "cp -r SRR*/ sleuth/"
system(cmd)
base_dir <- getwd()
########## SLEUTH #############
#HS1 SRR9696660
#HS2 SRR9696664
#HS3 SRR9696668
#CT1 SRR9696658
#CT2 SRR9696662
#CT3 SRR9696666
sample_id <- list('SRR9696658','SRR9696662','SRR9696666',
'SRR9696660','SRR9696664','SRR9696668')
paths <- list(paste(base_dir,"/sleuth/SRR9696658",sep=""),
paste(base_dir,"/sleuth/SRR9696662",sep=""),
paste(base_dir,"/sleuth/SRR9696666",sep=""),
paste(base_dir,"/sleuth/SRR9696660",sep=""),
paste(base_dir,"/sleuth/SRR9696664",sep=""),
paste(base_dir,"/sleuth/SRR9696668",sep=""))
names(paths) <- sample_id
s2c <- read.table(file.path(base_dir, "amostras.txt"), header = TRUE, stringsAsFactors=FALSE)
s2c <- dplyr::select(s2c, sample = sample, condition, reps)
s2c
#t2g <- read.table("t2g.txt", header = TRUE, stringsAsFactors=FALSE)
s2c <- dplyr::mutate(s2c, path = paths)
print(s2c)
s2c <- data.frame(lapply(s2c, as.character), stringsAsFactors=FALSE)
#transcrito
so <- sleuth_prep(s2c, ~condition, extra_bootstrap_summary = TRUE)
so <- sleuth_fit(so)
#wald
so <- sleuth_wt(so, "conditionTratado")
models(so)
results_table <- sleuth_results(so, test='conditionTratado', test_type = 'wald')
sleuth_significant <- dplyr::filter(results_table, qval <= 0.05)
head(sleuth_significant, 20)
sleuth_list <- sleuth_significant[,1]
write.table(sleuth_significant,file="diferenciais_sleuth.txt")
sleuth_live(so)
pdf("Sleuth_Volcano.pdf")
plot(results_table$b, -1*log10(results_table$qval), col=ifelse(results_table$qval, "red", "black"),xlab="log(qval)", ylab="beta", title="Volcano plot", pch=20)
dev.off()
##### EDEGR #######
if (! require(edgeR)) {
source("https://bioconductor.org/biocLite.R")
biocLite("edgeR")
library(edgeR)
}
data = read.table("kallisto.isoform.counts.matrix", header=T, row.names=1, com='')
col_ordering = c(1,2,3,4,5,6)
rnaseqMatrix = data[,col_ordering]
rnaseqMatrix = round(rnaseqMatrix)
rnaseqMatrix = rnaseqMatrix[rowSums(cpm(rnaseqMatrix) > 1) >= 2,]
conditions = factor(c(rep("Controle", 3), rep("Tratado", 3)))
exp_study = DGEList(counts=rnaseqMatrix, group=conditions)
exp_study = calcNormFactors(exp_study)
exp_study = estimateDisp(exp_study)
et = exactTest(exp_study, pair=c("Controle", "Tratado"))
tTags = topTags(et,n=NULL)
result_table = tTags$table
result_table = data.frame(sampleA="Controle", sampleB="Tratado", result_table)
result_table$logFC = -1 * result_table$logFC
write.table(result_table, file='edgeR.DE_results', sep=' ', quote=F, row.names=T)
write.table(rnaseqMatrix, file='edgeR.count_matrix', sep=' ', quote=F, row.names=T)
pdf("edgeR.Volcano.pdf")
plot(result_table$logFC, -1*log10(result_table$FDR), col=ifelse(result_table$FDR<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
dev.off()
edger_significant <- dplyr::filter(result_table, FDR <= 0.05)
edgeR_list <- row.names(edger_significant)[i]
edgeR_list <- NULL
for(i in 1:length(result_table[,1])){
if(result_table[i,]$FDR <= 0.05){
#print("Entrou\n")
edgeR_list[i] <- row.names(result_table)[i]
}
}
head(edgeR_list)
############### DESEQ2 ##########
if (! require(DESeq2)) {
source("https://bioconductor.org/biocLite.R")
biocLite("DESeq2")
library(DESeq2)
}
data = read.table("kallisto.isoform.counts.matrix", header=T, row.names=1, com='')
col_ordering = c(1,2,3,4,5,6)
rnaseqMatrix = data[,col_ordering]
rnaseqMatrix = round(rnaseqMatrix)
rnaseqMatrix = rnaseqMatrix[rowSums(cpm(rnaseqMatrix) > 1) >= 2,]
conditions = data.frame(conditions=factor(c(rep("Controle", 3), rep("Tratado", 3))))
rownames(conditions) = colnames(rnaseqMatrix)
ddsFullCountTable <- DESeqDataSetFromMatrix(
countData = rnaseqMatrix,
colData = conditions,
design = ~ conditions)
dds = DESeq(ddsFullCountTable)
contrast=c("conditions","Controle","Tratado")
res = results(dds, contrast)
baseMeanA <- rowMeans(counts(dds, normalized=TRUE)[,colData(dds)$conditions == "Controle"])
baseMeanB <- rowMeans(counts(dds, normalized=TRUE)[,colData(dds)$conditions == "Tratado"])
res = cbind(baseMeanA, baseMeanB, as.data.frame(res))
res = cbind(sampleA="Controle", sampleB="Tratado", as.data.frame(res))
res$padj[is.na(res$padj)] <- 1
res = as.data.frame(res[order(res$pvalue),])
write.table(res, file='DESeq2.DE_results', sep=' ', quote=FALSE)
pdf("DESeq2_Volcano.pdf")
plot(res$log2FoldChange, -1*log10(res$padj), col=ifelse(res$padj<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
dev.off()
Deseq2_list <- NULL
for(i in 1:length(res[,1])){
if(res[i,]$padj <= 0.05){
#print("Entrou\n")
Deseq2_list[i] <- row.names(res)[i]
}
}
head(Deseq2_list)
####### Volcano Plot #########
gridlayout = matrix(c(1:4),nrow=2,ncol=2, byrow=TRUE)
layout(gridlayout, widths=c(1,1,1,1), heights=c(1,1,1,1))
plot(res$log2FoldChange, -1*log10(res$padj), col=ifelse(res$padj<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
plot(result_table$logFC, -1*log10(result_table$FDR), col=ifelse(result_table$FDR<=0.05, "red", "black"),xlab="logCounts", ylab="logFC", title="Volcano plot", pch=20)
plot(sleuth_significant$b, -1*log10(sleuth_significant$qval), col=ifelse(sleuth_significant$qval, "red", "black"),xlab="log(qval)", ylab="beta", title="Volcano plot", pch=20)
am <- read.table(file="amostras.txt",sep="\t",header=TRUE)
head(am)
########### Diagrama de Venn #######
sleuth_significant <- read.table(file="diferenciais_comp1.txt",sep=" ",header=T)
sleuth_list <- sleuth_significant[,1]
edgeR_list <- read.table(file="lista_diff_edgeR.txt")
install.packages("VennDiagram")
library(VennDiagram)
library(RColorBrewer)
myCol <- brewer.pal(3, "Paired")
venn.diagram(
x = list(edgeR_list,Deseq2_list,sleuth_list),
category.names = c("edgeR" , "Deseq2","Sleuth"),
filename = 'venn_diagramm_DEG.png',
output=FALSE,
#Saida
imagetype="png" ,
height = 680 ,
width = 880 ,
resolution = 600,
#Numeros
cex = .5,
fontface = "bold",
fontfamily = "serif",
#Circulos
lwd = 2,
lty = 'blank',
fill = myCol,
#Nomes
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer",
cat.fontfamily = "serif",
rotation = 1
)
######### PCA #######
library("DESeq")
countsTable <- read.delim("kallisto.isoform.counts.matrix", header=TRUE, stringsAsFactors=TRUE)
rownames(countsTable) <- countsTable[,1]
countsTable <- countsTable[,2:7]
conds <- factor(c(names(countsTable)))
countsTable_novo <- apply(countsTable,2,as.integer)
countsTable_novo[is.na(countsTable_novo)] <- 0
cds<-newCountDataSet(countsTable_novo,conds)
cds<-estimateSizeFactors(cds)
sizeFactors(cds)
cds <- estimateDispersions(cds,method='blind')
vsd <- varianceStabilizingTransformation(cds)
pdf("PCA.pdf")
plotPCA(vsd)
dev.off()
######### DENDOGRAMA #########
install.packages("ggdendro")
install.packages('dendextend')
library('dendextend')
library("ggplot2")
library("ggdendro")
countsTable <- read.delim("kallisto.isoform.counts.matrix", header=TRUE, stringsAsFactors=TRUE,row.names = 1)
dd <- dist(t(scale(countsTable)), method = "euclidean")
hc <- hclust(dd, method = "ward.D2")
ggdendrogram(hc, rotate = TRUE, theme_dendro = FALSE,
size = 1) + labs(title="Dendrogram in ggplot2")+
xlab("Amostras") +ylab("Altura")
|
#' Remove empty string elements
#'
#' Take a vector and remove empty string elements. Useful after string splitting and being left with lots of empty string vectors.
#' @param vec A vector that may contain empty strings
#' @keywords string
#'
#' @export
#'
#' @examples
#'
#' remove_empty_strings(c("a", "", "c"))
remove_empty_strings <- function(vec) {
out <- vec[-which(vec == "")]
return(out)
}
| /R/remove_empty_strings.R | no_license | aedobbyn/dobtools | R | false | false | 403 | r | #' Remove empty string elements
#'
#' Take a vector and remove empty string elements. Useful after string splitting and being left with lots of empty string vectors.
#' @param vec A vector that may contain empty strings
#' @keywords string
#'
#' @export
#'
#' @examples
#'
#' remove_empty_strings(c("a", "", "c"))
remove_empty_strings <- function(vec) {
out <- vec[-which(vec == "")]
return(out)
}
|
QC_histogram <-
function(
dataset, data_col = 1, save_name = "dataset", save_dir = getwd(), export_outliers = FALSE,
filter_FRQ = NULL, filter_cal = NULL, filter_HWE = NULL, filter_imp = NULL,
filter_NA = TRUE, filter_NA_FRQ = filter_NA, filter_NA_cal = filter_NA, filter_NA_HWE = filter_NA, filter_NA_imp = filter_NA,
breaks = "Sturges", graph_name = colnames(dataset)[data_col], header_translations,
check_impstatus = FALSE, ignore_impstatus = FALSE, T_strings = c("1", "TRUE", "yes", "YES", "y", "Y"), F_strings = c("0", "FALSE", "no", "NO", "n", "N"), NA_strings = c(NA, "NA", ".", "-"), ...
) {
skip_FRQ <- if(is.null(filter_FRQ)) { TRUE } else { is.na(filter_FRQ) & !filter_NA_FRQ }
skip_cal <- if(is.null(filter_cal)) { TRUE } else { is.na(filter_cal) & !filter_NA_cal }
skip_HWE <- if(is.null(filter_HWE)) { TRUE } else { is.na(filter_HWE) & !filter_NA_HWE }
skip_imp <- if(is.null(filter_imp)) { TRUE } else { is.na(filter_imp) & !filter_NA_imp }
# This is to ensure that HQ_filter won't be looking at missing columns
if(skip_FRQ) filter_FRQ <- NULL
if(skip_cal) filter_cal <- NULL
if(skip_HWE) filter_HWE <- NULL
if(skip_imp) filter_imp <- NULL
if(is.vector(dataset)) {
if(check_impstatus | ignore_impstatus) stop("cannot check or ignore imp-status: vector dataset!")
if(skip_FRQ + skip_cal + skip_HWE + skip_imp > 2L) {
dataset <- data.frame(EFFECT = dataset)
if(is.character(data_col)) {
colnames(dataset) <- data_col
data_col <- 1L
} else {
if(data_col != 1L) stop("Invalid column specified")
}
} else { stop("Insufficient data to apply filters: dataset is single column!") }
} else {
if(is.character(data_col)) {
data_col <- which(colnames(dataset) == data_col)
if(length(data_col) != 1L) stop("Invalid column specified")
} else {
if(is.na(colnames(dataset)[data_col])) stop("Invalid column specified")
} }
if(length(graph_name) != 1L) stop("Argument 'graph_name' has invalid length")
# This line was added not to test graph_name, but to "fix" it before
# the header of dataset is checked/translated
if(skip_FRQ & skip_cal & skip_HWE & skip_imp) {
goodOnes <- !is.na(dataset[ , data_col])
clarf <- "No filter applied"
} else {
header_std <- c("EFF_ALL_FREQ", "HWE_PVAL", "CALLRATE", "IMP_QUALITY", "IMPUTED")[c(!skip_FRQ, !skip_HWE, !skip_cal, !skip_imp, check_impstatus | (!ignore_impstatus & !(skip_cal & skip_HWE & skip_imp)))]
if(missing(header_translations)) {
if(!any(colnames(dataset) == "IMPUTED")) {
if(!check_impstatus & (ignore_impstatus | skip_imp | (skip_HWE & skip_cal)) ) {
if(!ignore_impstatus) {
if(skip_imp & !skip_HWE & !skip_cal) {
dataset$IMPUTED <- 0L
print("Warning: no imputation-status specified - all SNPs set to genotyped") }
if(skip_HWE & skip_cal & !skip_imp) {
dataset$IMPUTED <- 1L
print("Warning: no imputation-status specified - all SNPs set to imputed") }
}
} else { stop("Missing imputation status") }
}
if(!all(header_std %in% colnames(dataset))) stop("Cannot apply filter: missing or unidentified columns")
} else {
header_test <- translate_header(header = colnames(dataset), standard = header_std, alternative = header_translations)
if(any(duplicated(header_test$header_h))) stop("cannot translate header - duplicate column names")
if(header_test$missing_N > 1L) stop("cannot translate header - missing columns")
if(header_test$missing_N == 1L) {
if(header_test$missing_h == "IMPUTED" & !check_impstatus & (ignore_impstatus | skip_imp | (skip_HWE & skip_cal)) ) {
if(!ignore_impstatus) {
if(skip_imp) {
dataset$IMPUTED <- 0L
print("Warning: no imputation-status specified - all SNPs set to genotyped")
} else {
dataset$IMPUTED <- 1L
print("Warning: no imputation-status specified - all SNPs set to imputed") }
header_test$header_h <- c(header_test$header_h, "IMPUTED")
}
} else { stop(paste("cannot translate header - missing column:", paste(header_test$missing_h, collapse = ", "))) }
}
colnames(dataset) <- header_test$header_h
}
if(check_impstatus) {
dataset$IMPUTED <- convert_impstatus(dataset$IMPUTED, T_strings, F_strings, NA_strings, use_log = FALSE)
if(all(is.na(dataset$IMPUTED))) stop("imputation status missing or untranslated")
}
goodOnes <- !is.na(dataset[ , data_col]) & HQ_filter(data = dataset, ignore_impstatus = ignore_impstatus,
FRQ_val = filter_FRQ, cal_val = filter_cal, HWE_val = filter_HWE, imp_val = filter_imp,
FRQ_NA = filter_NA_FRQ, cal_NA = filter_NA_cal, HWE_NA = filter_NA_HWE, imp_NA = filter_NA_imp)
clarf <- "Filtered for"
if(!skip_FRQ) {
if(is.na(filter_FRQ)) { clarf <- paste(clarf, "missing allele frequency;")
} else {
if(filter_NA_FRQ) {
clarf <- paste(clarf, "MAF <", filter_FRQ, "or missing;")
} else {
clarf <- paste(clarf, "MAF <", filter_FRQ, ";")
} } }
if(!skip_cal) {
if(is.na(filter_cal)) { clarf <- paste(clarf, "missing call rates;")
} else {
if(filter_NA_cal) {
clarf <- paste(clarf, "call rate <", filter_cal, "or missing;")
} else {
clarf <- paste(clarf, "call rate <", filter_cal, ";")
} } }
if(!skip_HWE) {
if(is.na(filter_HWE)) { clarf <- paste(clarf, "missing HWE p-value;")
} else {
if(filter_NA_HWE) {
clarf <- paste(clarf, "HWE p <", filter_HWE, "or missing;")
} else {
clarf <- paste(clarf, "HWE p <", filter_HWE, ";")
} } }
if(!skip_imp) {
if(is.na(filter_imp)) { clarf <- paste(clarf, "missing imputation quality;")
} else {
if(filter_NA_imp) {
clarf <- paste(clarf, "imp. qual. <", filter_imp, "or missing;")
} else {
clarf <- paste(clarf, "imp. qual. <", filter_imp, ";")
} } }
clarf <- substr(clarf, 1L, nchar(clarf) - 1L) # removes the final semi-colon
}
goodN <- sum(goodOnes)
if(goodN < 4L) { print("Insufficient non-missing, non-filtered effect sizes")
} else {
min_dat <- min(dataset[goodOnes, data_col])
max_dat <- max(dataset[goodOnes, data_col])
min_N <- 0L
max_N <- 0L
png(paste0(save_dir, "/", save_name, ".png"), width = 1440, height = 480)
par(mfrow = c(1, 2))
(( h1<-hist(mean(dataset[goodOnes, data_col]) + (qnorm(ppoints(goodN)) * sd(dataset[goodOnes, data_col])),
freq = FALSE, plot = TRUE, main = paste("Expected distribution of", graph_name), xlab = graph_name, breaks = breaks, sub = save_name, font.sub = 3, ...) ))
h2_breaks <- h1$breaks
minbreaks <- h2_breaks[1]
maxbreaks <- h2_breaks[length(h2_breaks)]
if (minbreaks > min_dat) {
h2_breaks <- c(min_dat, h2_breaks)
min_N <- sum(dataset[goodOnes, data_col] < minbreaks)
}
if (maxbreaks < max_dat) {
h2_breaks <- c(h2_breaks, max_dat)
max_N <- sum(dataset[goodOnes, data_col] > maxbreaks)
}
(( h2 <- hist(dataset[goodOnes, data_col], breaks = h2_breaks, xlim = c(minbreaks, maxbreaks),
freq = FALSE, plot = TRUE, main = paste("Observed distribution of", graph_name), xlab = graph_name, sub = clarf, font.sub = 3, ...) ))
if(min_N > 0L) {
text(minbreaks, 0.6 * max(h2$density), pos = 4,
label = paste(min_N, "values outside min. range"), cex = 1, col = "red")
}
if(max_N > 0L) {
text(maxbreaks, 0.6 * max(h2$density), pos = 2,
label = paste(max_N, "values outside max. range"), cex = 1, col = "red")
}
dev.off()
if(export_outliers > 0L & min_N + max_N > 0L) {
if(min_N + max_N <= export_outliers | export_outliers == 1) {
write.table(dataset[goodOnes & (dataset[ , data_col] < minbreaks | dataset[ , data_col] > maxbreaks), ],
paste0(save_dir, "/", save_name, ".txt"), col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t")
} else {
write.table(dataset[goodOnes & (dataset[ , data_col] < minbreaks | dataset[ , data_col] > maxbreaks), ][1:export_outliers, ],
paste0(save_dir, "/", save_name, ".txt"), col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t")
} } }
return(invisible())
}
| /R/QC_histogram.R | no_license | cran/QCGWAS | R | false | false | 8,779 | r | QC_histogram <-
function(
dataset, data_col = 1, save_name = "dataset", save_dir = getwd(), export_outliers = FALSE,
filter_FRQ = NULL, filter_cal = NULL, filter_HWE = NULL, filter_imp = NULL,
filter_NA = TRUE, filter_NA_FRQ = filter_NA, filter_NA_cal = filter_NA, filter_NA_HWE = filter_NA, filter_NA_imp = filter_NA,
breaks = "Sturges", graph_name = colnames(dataset)[data_col], header_translations,
check_impstatus = FALSE, ignore_impstatus = FALSE, T_strings = c("1", "TRUE", "yes", "YES", "y", "Y"), F_strings = c("0", "FALSE", "no", "NO", "n", "N"), NA_strings = c(NA, "NA", ".", "-"), ...
) {
skip_FRQ <- if(is.null(filter_FRQ)) { TRUE } else { is.na(filter_FRQ) & !filter_NA_FRQ }
skip_cal <- if(is.null(filter_cal)) { TRUE } else { is.na(filter_cal) & !filter_NA_cal }
skip_HWE <- if(is.null(filter_HWE)) { TRUE } else { is.na(filter_HWE) & !filter_NA_HWE }
skip_imp <- if(is.null(filter_imp)) { TRUE } else { is.na(filter_imp) & !filter_NA_imp }
# This is to ensure that HQ_filter won't be looking at missing columns
if(skip_FRQ) filter_FRQ <- NULL
if(skip_cal) filter_cal <- NULL
if(skip_HWE) filter_HWE <- NULL
if(skip_imp) filter_imp <- NULL
if(is.vector(dataset)) {
if(check_impstatus | ignore_impstatus) stop("cannot check or ignore imp-status: vector dataset!")
if(skip_FRQ + skip_cal + skip_HWE + skip_imp > 2L) {
dataset <- data.frame(EFFECT = dataset)
if(is.character(data_col)) {
colnames(dataset) <- data_col
data_col <- 1L
} else {
if(data_col != 1L) stop("Invalid column specified")
}
} else { stop("Insufficient data to apply filters: dataset is single column!") }
} else {
if(is.character(data_col)) {
data_col <- which(colnames(dataset) == data_col)
if(length(data_col) != 1L) stop("Invalid column specified")
} else {
if(is.na(colnames(dataset)[data_col])) stop("Invalid column specified")
} }
if(length(graph_name) != 1L) stop("Argument 'graph_name' has invalid length")
# This line was added not to test graph_name, but to "fix" it before
# the header of dataset is checked/translated
if(skip_FRQ & skip_cal & skip_HWE & skip_imp) {
goodOnes <- !is.na(dataset[ , data_col])
clarf <- "No filter applied"
} else {
header_std <- c("EFF_ALL_FREQ", "HWE_PVAL", "CALLRATE", "IMP_QUALITY", "IMPUTED")[c(!skip_FRQ, !skip_HWE, !skip_cal, !skip_imp, check_impstatus | (!ignore_impstatus & !(skip_cal & skip_HWE & skip_imp)))]
if(missing(header_translations)) {
if(!any(colnames(dataset) == "IMPUTED")) {
if(!check_impstatus & (ignore_impstatus | skip_imp | (skip_HWE & skip_cal)) ) {
if(!ignore_impstatus) {
if(skip_imp & !skip_HWE & !skip_cal) {
dataset$IMPUTED <- 0L
print("Warning: no imputation-status specified - all SNPs set to genotyped") }
if(skip_HWE & skip_cal & !skip_imp) {
dataset$IMPUTED <- 1L
print("Warning: no imputation-status specified - all SNPs set to imputed") }
}
} else { stop("Missing imputation status") }
}
if(!all(header_std %in% colnames(dataset))) stop("Cannot apply filter: missing or unidentified columns")
} else {
header_test <- translate_header(header = colnames(dataset), standard = header_std, alternative = header_translations)
if(any(duplicated(header_test$header_h))) stop("cannot translate header - duplicate column names")
if(header_test$missing_N > 1L) stop("cannot translate header - missing columns")
if(header_test$missing_N == 1L) {
if(header_test$missing_h == "IMPUTED" & !check_impstatus & (ignore_impstatus | skip_imp | (skip_HWE & skip_cal)) ) {
if(!ignore_impstatus) {
if(skip_imp) {
dataset$IMPUTED <- 0L
print("Warning: no imputation-status specified - all SNPs set to genotyped")
} else {
dataset$IMPUTED <- 1L
print("Warning: no imputation-status specified - all SNPs set to imputed") }
header_test$header_h <- c(header_test$header_h, "IMPUTED")
}
} else { stop(paste("cannot translate header - missing column:", paste(header_test$missing_h, collapse = ", "))) }
}
colnames(dataset) <- header_test$header_h
}
if(check_impstatus) {
dataset$IMPUTED <- convert_impstatus(dataset$IMPUTED, T_strings, F_strings, NA_strings, use_log = FALSE)
if(all(is.na(dataset$IMPUTED))) stop("imputation status missing or untranslated")
}
goodOnes <- !is.na(dataset[ , data_col]) & HQ_filter(data = dataset, ignore_impstatus = ignore_impstatus,
FRQ_val = filter_FRQ, cal_val = filter_cal, HWE_val = filter_HWE, imp_val = filter_imp,
FRQ_NA = filter_NA_FRQ, cal_NA = filter_NA_cal, HWE_NA = filter_NA_HWE, imp_NA = filter_NA_imp)
clarf <- "Filtered for"
if(!skip_FRQ) {
if(is.na(filter_FRQ)) { clarf <- paste(clarf, "missing allele frequency;")
} else {
if(filter_NA_FRQ) {
clarf <- paste(clarf, "MAF <", filter_FRQ, "or missing;")
} else {
clarf <- paste(clarf, "MAF <", filter_FRQ, ";")
} } }
if(!skip_cal) {
if(is.na(filter_cal)) { clarf <- paste(clarf, "missing call rates;")
} else {
if(filter_NA_cal) {
clarf <- paste(clarf, "call rate <", filter_cal, "or missing;")
} else {
clarf <- paste(clarf, "call rate <", filter_cal, ";")
} } }
if(!skip_HWE) {
if(is.na(filter_HWE)) { clarf <- paste(clarf, "missing HWE p-value;")
} else {
if(filter_NA_HWE) {
clarf <- paste(clarf, "HWE p <", filter_HWE, "or missing;")
} else {
clarf <- paste(clarf, "HWE p <", filter_HWE, ";")
} } }
if(!skip_imp) {
if(is.na(filter_imp)) { clarf <- paste(clarf, "missing imputation quality;")
} else {
if(filter_NA_imp) {
clarf <- paste(clarf, "imp. qual. <", filter_imp, "or missing;")
} else {
clarf <- paste(clarf, "imp. qual. <", filter_imp, ";")
} } }
clarf <- substr(clarf, 1L, nchar(clarf) - 1L) # removes the final semi-colon
}
goodN <- sum(goodOnes)
if(goodN < 4L) { print("Insufficient non-missing, non-filtered effect sizes")
} else {
min_dat <- min(dataset[goodOnes, data_col])
max_dat <- max(dataset[goodOnes, data_col])
min_N <- 0L
max_N <- 0L
png(paste0(save_dir, "/", save_name, ".png"), width = 1440, height = 480)
par(mfrow = c(1, 2))
(( h1<-hist(mean(dataset[goodOnes, data_col]) + (qnorm(ppoints(goodN)) * sd(dataset[goodOnes, data_col])),
freq = FALSE, plot = TRUE, main = paste("Expected distribution of", graph_name), xlab = graph_name, breaks = breaks, sub = save_name, font.sub = 3, ...) ))
h2_breaks <- h1$breaks
minbreaks <- h2_breaks[1]
maxbreaks <- h2_breaks[length(h2_breaks)]
if (minbreaks > min_dat) {
h2_breaks <- c(min_dat, h2_breaks)
min_N <- sum(dataset[goodOnes, data_col] < minbreaks)
}
if (maxbreaks < max_dat) {
h2_breaks <- c(h2_breaks, max_dat)
max_N <- sum(dataset[goodOnes, data_col] > maxbreaks)
}
(( h2 <- hist(dataset[goodOnes, data_col], breaks = h2_breaks, xlim = c(minbreaks, maxbreaks),
freq = FALSE, plot = TRUE, main = paste("Observed distribution of", graph_name), xlab = graph_name, sub = clarf, font.sub = 3, ...) ))
if(min_N > 0L) {
text(minbreaks, 0.6 * max(h2$density), pos = 4,
label = paste(min_N, "values outside min. range"), cex = 1, col = "red")
}
if(max_N > 0L) {
text(maxbreaks, 0.6 * max(h2$density), pos = 2,
label = paste(max_N, "values outside max. range"), cex = 1, col = "red")
}
dev.off()
if(export_outliers > 0L & min_N + max_N > 0L) {
if(min_N + max_N <= export_outliers | export_outliers == 1) {
write.table(dataset[goodOnes & (dataset[ , data_col] < minbreaks | dataset[ , data_col] > maxbreaks), ],
paste0(save_dir, "/", save_name, ".txt"), col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t")
} else {
write.table(dataset[goodOnes & (dataset[ , data_col] < minbreaks | dataset[ , data_col] > maxbreaks), ][1:export_outliers, ],
paste0(save_dir, "/", save_name, ".txt"), col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t")
} } }
return(invisible())
}
|
#' @export
exps <- function() {
exps_f <- function(x, alpha) {
s <- numeric(length(x) + 1)
for (i in seq_along(s)) {
if (i == 1) {
s[i] <- x[i]
} else {
s[i] <- alpha * x[i-1] + (1-alpha) * s[i-1]
}
}
return(s)
}
n <- 1e7
x <- runif(n)
return(exps_f(x,0.5))
}
| /R/exps.R | no_license | UWQuickstep/rosa | R | false | false | 295 | r | #' @export
exps <- function() {
exps_f <- function(x, alpha) {
s <- numeric(length(x) + 1)
for (i in seq_along(s)) {
if (i == 1) {
s[i] <- x[i]
} else {
s[i] <- alpha * x[i-1] + (1-alpha) * s[i-1]
}
}
return(s)
}
n <- 1e7
x <- runif(n)
return(exps_f(x,0.5))
}
|
par(mfrow = c(3,1))
##Read data
steps <- read.csv("activity.csv")
## transform date column to be of type 'date'
steps$date <- as.Date(steps$date)
## calculate steps per day (ignore NA)
stepsPerDay <- aggregate(steps ~ date,data = steps, FUN = sum, na.action = na.omit )
hist(stepsPerDay$steps,
main = "Number of steps per day",
xlab = "Number of steps",
breaks = 10,
col = "grey",
xlim = c(0,25000))
stepsMean <- aggregate(steps ~ date,data = steps, FUN = mean, na.action = na.omit )
stepsMedian <- aggregate(steps ~ date,data = steps, FUN = median, na.action = na.omit )
merged <- merge(stepsMean, stepsMedian, by = "date")
names(merged) <- c("date","mean","median")
merged
## Calculate the mean and median daily number of steps
## Calculate and plot the average number of steps per 5-minute interval
stepsPerInterval <- aggregate(steps~interval, data = steps, FUN = mean, na.action = na.omit)
with(stepsPerInterval, plot(interval, steps, type = "l", col = "red", xlab = "5-minute interval", main = "Average number of steps per 5-minute interval"))
##Find which interval the highest average number of steps has
maxInterval <- stepsPerInterval[which.max(stepsPerInterval$steps),1]
abline(v= maxInterval, lwd = 3, lty = 2)
## NA only occur in the 'steps' column. Calculate the number of NA in the column 'steps'
sum(is.na(steps$steps))
##Fill in the mean number of steps in that interval (over all days)
idNA <- which(is.na(steps$steps))
stepsFilled <- steps
for( i in 1:length(idNA)) {
stepsFilled[idNA[i],1] <- stepsPerInterval[which(stepsPerInterval$interval == steps[idNA[i],3]),2]
}
## calculate steps per day (ignore NA)
stepsPerDayFilled <- aggregate(steps ~ date,data = stepsFilled, FUN = sum, na.action = na.omit )
hist(stepsPerDayFilled$steps,
main = "Number of steps per day",
xlab = "Number of steps",
## breaks = 10,
col = "grey",
xlim = c(0,25000))
## Calculate the mean and median daily number of steps
stepsFilledMean <- aggregate(steps ~ date,data = stepsFilled, FUN = mean, na.action = na.omit )
stepsFilledMedian <- aggregate(steps ~ date,data = stepsFilled, FUN = median, na.action = na.omit )
mergedFilled <- merge(stepsFilledMean, stepsFilledMedian, by = "date")
names(mergedFilled) <- c("date","mean","median")
mergedFilled
## Fill in Weekday/Weekend
stepsFilled$Weekday <- weekdays(stepsFilled$date)
stepsFilled$daytype <- as.factor(ifelse(stepsFilled$Weekday %in% c("Saturday","Sunday"), "Weekend","Weekday"))
## create plot
library(ggplot2)
stepsIntervalDaytype <- aggregate(steps~interval+daytype, data = stepsFilled, FUN = mean, na.action = na.omit)
ggplot(stepsIntervalDaytype, aes(interval, steps,daytype)) +
geom_line() +
facet_wrap(~daytype, ncol =1) +
labs(y = "number of steps", x = "5-minute interval")
| /run_analysis.R | no_license | eddiewan/CourseraReproducibleResearch | R | false | false | 2,848 | r |
par(mfrow = c(3,1))
##Read data
steps <- read.csv("activity.csv")
## transform date column to be of type 'date'
steps$date <- as.Date(steps$date)
## calculate steps per day (ignore NA)
stepsPerDay <- aggregate(steps ~ date,data = steps, FUN = sum, na.action = na.omit )
hist(stepsPerDay$steps,
main = "Number of steps per day",
xlab = "Number of steps",
breaks = 10,
col = "grey",
xlim = c(0,25000))
stepsMean <- aggregate(steps ~ date,data = steps, FUN = mean, na.action = na.omit )
stepsMedian <- aggregate(steps ~ date,data = steps, FUN = median, na.action = na.omit )
merged <- merge(stepsMean, stepsMedian, by = "date")
names(merged) <- c("date","mean","median")
merged
## Calculate the mean and median daily number of steps
## Calculate and plot the average number of steps per 5-minute interval
stepsPerInterval <- aggregate(steps~interval, data = steps, FUN = mean, na.action = na.omit)
with(stepsPerInterval, plot(interval, steps, type = "l", col = "red", xlab = "5-minute interval", main = "Average number of steps per 5-minute interval"))
##Find which interval the highest average number of steps has
maxInterval <- stepsPerInterval[which.max(stepsPerInterval$steps),1]
abline(v= maxInterval, lwd = 3, lty = 2)
## NA only occur in the 'steps' column. Calculate the number of NA in the column 'steps'
sum(is.na(steps$steps))
##Fill in the mean number of steps in that interval (over all days)
idNA <- which(is.na(steps$steps))
stepsFilled <- steps
for( i in 1:length(idNA)) {
stepsFilled[idNA[i],1] <- stepsPerInterval[which(stepsPerInterval$interval == steps[idNA[i],3]),2]
}
## calculate steps per day (ignore NA)
stepsPerDayFilled <- aggregate(steps ~ date,data = stepsFilled, FUN = sum, na.action = na.omit )
hist(stepsPerDayFilled$steps,
main = "Number of steps per day",
xlab = "Number of steps",
## breaks = 10,
col = "grey",
xlim = c(0,25000))
## Calculate the mean and median daily number of steps
stepsFilledMean <- aggregate(steps ~ date,data = stepsFilled, FUN = mean, na.action = na.omit )
stepsFilledMedian <- aggregate(steps ~ date,data = stepsFilled, FUN = median, na.action = na.omit )
mergedFilled <- merge(stepsFilledMean, stepsFilledMedian, by = "date")
names(mergedFilled) <- c("date","mean","median")
mergedFilled
## Fill in Weekday/Weekend
stepsFilled$Weekday <- weekdays(stepsFilled$date)
stepsFilled$daytype <- as.factor(ifelse(stepsFilled$Weekday %in% c("Saturday","Sunday"), "Weekend","Weekday"))
## create plot
library(ggplot2)
stepsIntervalDaytype <- aggregate(steps~interval+daytype, data = stepsFilled, FUN = mean, na.action = na.omit)
ggplot(stepsIntervalDaytype, aes(interval, steps,daytype)) +
geom_line() +
facet_wrap(~daytype, ncol =1) +
labs(y = "number of steps", x = "5-minute interval")
|
## module load r/3.5.0-py2-qqwf6c6
## source ~/bin/system.py3.6.5_env/bin/activate
## install Seurat Realease 3.0
#install.packages('devtools')
#devtools::install_github(repo = 'satijalab/seurat', ref = 'release/3.0')
# cowplot enables side-by-side ggplots
library(cowplot)
library(Seurat)
out_objects_dir <- "./results3.0/R.out/data/Robjects"
out_plot_dir <- "./results3.0/R.out/plots"
out_document_dir <- "./results3.0/R.out/results"
if(!dir.exists(out_document_dir))
{
dir.create(out_document_dir, recursive = TRUE)
}
library_id <- c("A", "B", "C", "CT2-1NOV", "CT2-30OCT")
dataList <- readRDS(file=file.path(out_objects_dir, "ExpressionList_QC.rds"))
m <- dataList[["counts"]]
pD <- dataList[["phenoData"]]
fD <- dataList[["featureData"]]
fD$keep[is.na(fD$keep)] <- FALSE
rm(dataList)
# Gene and cell filtering
m <- m[fD$keep, pD$PassAll] ## 11707 genes X 15847 cells # CR3.0 11442 X 16668
pD <- pD[pD$PassAll, ]
rownames(pD) <- pD[, 1]
fD <- fD[fD$keep, ]
# subset data
pbmc <- CreateSeuratObject(counts = m, meta.data = pD)
pbmc.list <- SplitObject(object = pbmc, split.by = "SampleID")
# setup Seurat objects since both count matrices have already filtered
# cells, we do no additional filtering here
pbmc.list <- lapply(pbmc.list, function(.x){
temp <- NormalizeData(object = .x)
temp <- FindVariableFeatures(object = temp)
temp
})
### Integration of 5 PBMC cell datasets
pbmc_int <- FindIntegrationAnchors(object.list = pbmc.list, dims = 1:30)
pbmc.integrated <- IntegrateData(anchorset = pbmc_int, dims = 1:30)
## integrated analysis
DefaultAssay(object = pbmc.integrated) <- "integrated"
# Run the standard workflow for visualization and clustering
pbmc.integrated <- ScaleData(object = pbmc.integrated,
vars.to.regress = c("UmiSums", "prcntMito"),
verbose = FALSE)
pbmc.integrated <- RunPCA(object = pbmc.integrated,
features = pbmc.integrated$integrated@var.features,
npcs = 50, verbose = FALSE)
## plot variance
sd <- pbmc.integrated@reductions$pca@stdev
var <- sd^2/(sum(sd^2))*100
pdf(file.path(out_plot_dir, "1.7.Scree plot of vairance for PCA.pdf"),
height = 10, width = 10)
plot(x=1:50, y=var, pch = 16, type= "b",
ylab= "Variance (%)", xlab = "Principle component")
dev.off()
## UMAP: This depends on python package umap-learn
pbmc.integrated <- RunUMAP(object = pbmc.integrated,
reduction = "pca", dims = 1:18)
## TSNE
pbmc.integrated <- RunTSNE(object = pbmc.integrated, reduction = "pca",
dims = 1:18)
pbmc.integrated <- FindNeighbors(object = pbmc.integrated, reduction = "pca",
dims = 1:18 )
pbmc.integrated <- FindClusters(object = pbmc.integrated, reduction = "pca",
dims = 1:18, save.SNN = TRUE)
pdf(file.path(out_plot_dir, "1.8.18 PCA-Tsne and Umap plot of cell clusters.pdf"),
width = 15, height = 12)
p1 <- DimPlot(object = pbmc.integrated, reduction = "tsne",
group.by = "SampleID", pt.size =0.5)
p2 <- DimPlot(object = pbmc.integrated, reduction = "tsne",
do.return = TRUE, label = TRUE, pt.size = 0.5)
p3 <- DimPlot(object = pbmc.integrated, reduction = "umap",
group.by = "SampleID", pt.size =0.5)
p4 <- DimPlot(object = pbmc.integrated, reduction = "umap",
do.return = TRUE, label = TRUE, pt.size = 0.5)
plot_grid(p1, p2, p3, p4, nrow =2)
dev.off()
all_markers <- FindAllMarkers(object = pbmc.integrated, test.use = "wilcox")
write.table(all_markers, file = file.path(out_document_dir,
"1.0.All.marker.genes.no.imputation.txt"),
sep = "\t", quote = FALSE, row.names = FALSE)
markers.use <- subset(all_markers, avg_logFC >= 1)$gene
pdf(file.path(out_plot_dir,"1.8.Markers.plot.pdf"), height = 40, width = 15)
DoHeatmap(object = pbmc.integrated,
features = markers.use,
cells = NULL,
group.by = "ident", size =1.5,
group.bar = TRUE, disp.min = -2.5, disp.max = NULL,
slot = "scale.data", assay = NULL, label = TRUE,
hjust = 0, angle = 90, combine = TRUE)
dev.off()
markers <- c("CD3E", "CD4","CD5", "CD8A", "CD8B", "TRDC",
"GZMB", "IFNG", "CD79A", "CD79B", "CD19",
"CD69", "MS4A1", "FCER1G", "MS4A2", "JCHAIN",
"ITGAM","FCGR1A", "CD14", "SERPING1", "MX1",
"IL1RAP", "IFNGR1", "CST3", "TLR4",
"NCR1", "KLRB1", "GNLY", "LYZ", "MCM2",
"MCM3", "TOP2A", "CCNB1", "PCNA")
features <- do.call("c", lapply(markers, function(.x) {
rownames(fD)[grepl(paste0("-", .x, "$"), rownames(fD), perl = TRUE)]
}))
## add hemoglobin alpha gene, ""ENSSSCG00000007978"
features <- c(features, "ENSSSCG00000007978")
pdf(file.path(out_plot_dir,"1.9.Overlay of markers.pdf"),
height = 42, width = 31)
FeaturePlot(object = pbmc.integrated, features = features, dims = c(1, 2),
cells = NULL, cols = c("lightgrey", "red"),
pt.size = 1, min.cutoff = "q9",
max.cutoff = NA, reduction = "tsne", split.by = NULL,
shape.by = NULL, blend = FALSE, blend.threshold = 0.5,
order = NULL, label = TRUE, label.size = 4, ncol = 5,
combine = TRUE, coord.fixed = TRUE, sort.cell = TRUE)
dev.off()
pdf(file.path(out_plot_dir,"2.0.Overlay of markers on umap.pdf"),
height = 42, width = 31)
FeaturePlot(object = pbmc.integrated, features = features,
dims = c(1, 2), cells = NULL,
cols = c("lightgrey", "red"), pt.size = 1, min.cutoff = "q9",
max.cutoff = NA, reduction = "umap", split.by = NULL,
shape.by = NULL, blend = FALSE, blend.threshold = 0.5,
order = NULL, label = TRUE, label.size = 4, ncol = 5,
combine = TRUE, coord.fixed = TRUE, sort.cell = TRUE)
dev.off()
save.image(file = file.path(out_objects_dir,
"Seurat.integrated.without.imputation.RData"))
| /R scripts/3.0.Seurat.analysis.wo.imputation.R | no_license | haibol2016/scRNAseq_data_analysis | R | false | false | 6,293 | r | ## module load r/3.5.0-py2-qqwf6c6
## source ~/bin/system.py3.6.5_env/bin/activate
## install Seurat Realease 3.0
#install.packages('devtools')
#devtools::install_github(repo = 'satijalab/seurat', ref = 'release/3.0')
# cowplot enables side-by-side ggplots
library(cowplot)
library(Seurat)
out_objects_dir <- "./results3.0/R.out/data/Robjects"
out_plot_dir <- "./results3.0/R.out/plots"
out_document_dir <- "./results3.0/R.out/results"
if(!dir.exists(out_document_dir))
{
dir.create(out_document_dir, recursive = TRUE)
}
library_id <- c("A", "B", "C", "CT2-1NOV", "CT2-30OCT")
dataList <- readRDS(file=file.path(out_objects_dir, "ExpressionList_QC.rds"))
m <- dataList[["counts"]]
pD <- dataList[["phenoData"]]
fD <- dataList[["featureData"]]
fD$keep[is.na(fD$keep)] <- FALSE
rm(dataList)
# Gene and cell filtering
m <- m[fD$keep, pD$PassAll] ## 11707 genes X 15847 cells # CR3.0 11442 X 16668
pD <- pD[pD$PassAll, ]
rownames(pD) <- pD[, 1]
fD <- fD[fD$keep, ]
# subset data
pbmc <- CreateSeuratObject(counts = m, meta.data = pD)
pbmc.list <- SplitObject(object = pbmc, split.by = "SampleID")
# setup Seurat objects since both count matrices have already filtered
# cells, we do no additional filtering here
pbmc.list <- lapply(pbmc.list, function(.x){
temp <- NormalizeData(object = .x)
temp <- FindVariableFeatures(object = temp)
temp
})
### Integration of 5 PBMC cell datasets
pbmc_int <- FindIntegrationAnchors(object.list = pbmc.list, dims = 1:30)
pbmc.integrated <- IntegrateData(anchorset = pbmc_int, dims = 1:30)
## integrated analysis
DefaultAssay(object = pbmc.integrated) <- "integrated"
# Run the standard workflow for visualization and clustering
pbmc.integrated <- ScaleData(object = pbmc.integrated,
vars.to.regress = c("UmiSums", "prcntMito"),
verbose = FALSE)
pbmc.integrated <- RunPCA(object = pbmc.integrated,
features = pbmc.integrated$integrated@var.features,
npcs = 50, verbose = FALSE)
## plot variance
sd <- pbmc.integrated@reductions$pca@stdev
var <- sd^2/(sum(sd^2))*100
pdf(file.path(out_plot_dir, "1.7.Scree plot of vairance for PCA.pdf"),
height = 10, width = 10)
plot(x=1:50, y=var, pch = 16, type= "b",
ylab= "Variance (%)", xlab = "Principle component")
dev.off()
## UMAP: This depends on python package umap-learn
pbmc.integrated <- RunUMAP(object = pbmc.integrated,
reduction = "pca", dims = 1:18)
## TSNE
pbmc.integrated <- RunTSNE(object = pbmc.integrated, reduction = "pca",
dims = 1:18)
pbmc.integrated <- FindNeighbors(object = pbmc.integrated, reduction = "pca",
dims = 1:18 )
pbmc.integrated <- FindClusters(object = pbmc.integrated, reduction = "pca",
dims = 1:18, save.SNN = TRUE)
pdf(file.path(out_plot_dir, "1.8.18 PCA-Tsne and Umap plot of cell clusters.pdf"),
width = 15, height = 12)
p1 <- DimPlot(object = pbmc.integrated, reduction = "tsne",
group.by = "SampleID", pt.size =0.5)
p2 <- DimPlot(object = pbmc.integrated, reduction = "tsne",
do.return = TRUE, label = TRUE, pt.size = 0.5)
p3 <- DimPlot(object = pbmc.integrated, reduction = "umap",
group.by = "SampleID", pt.size =0.5)
p4 <- DimPlot(object = pbmc.integrated, reduction = "umap",
do.return = TRUE, label = TRUE, pt.size = 0.5)
plot_grid(p1, p2, p3, p4, nrow =2)
dev.off()
all_markers <- FindAllMarkers(object = pbmc.integrated, test.use = "wilcox")
write.table(all_markers, file = file.path(out_document_dir,
"1.0.All.marker.genes.no.imputation.txt"),
sep = "\t", quote = FALSE, row.names = FALSE)
markers.use <- subset(all_markers, avg_logFC >= 1)$gene
pdf(file.path(out_plot_dir,"1.8.Markers.plot.pdf"), height = 40, width = 15)
DoHeatmap(object = pbmc.integrated,
features = markers.use,
cells = NULL,
group.by = "ident", size =1.5,
group.bar = TRUE, disp.min = -2.5, disp.max = NULL,
slot = "scale.data", assay = NULL, label = TRUE,
hjust = 0, angle = 90, combine = TRUE)
dev.off()
markers <- c("CD3E", "CD4","CD5", "CD8A", "CD8B", "TRDC",
"GZMB", "IFNG", "CD79A", "CD79B", "CD19",
"CD69", "MS4A1", "FCER1G", "MS4A2", "JCHAIN",
"ITGAM","FCGR1A", "CD14", "SERPING1", "MX1",
"IL1RAP", "IFNGR1", "CST3", "TLR4",
"NCR1", "KLRB1", "GNLY", "LYZ", "MCM2",
"MCM3", "TOP2A", "CCNB1", "PCNA")
features <- do.call("c", lapply(markers, function(.x) {
rownames(fD)[grepl(paste0("-", .x, "$"), rownames(fD), perl = TRUE)]
}))
## add hemoglobin alpha gene, ""ENSSSCG00000007978"
features <- c(features, "ENSSSCG00000007978")
pdf(file.path(out_plot_dir,"1.9.Overlay of markers.pdf"),
height = 42, width = 31)
FeaturePlot(object = pbmc.integrated, features = features, dims = c(1, 2),
cells = NULL, cols = c("lightgrey", "red"),
pt.size = 1, min.cutoff = "q9",
max.cutoff = NA, reduction = "tsne", split.by = NULL,
shape.by = NULL, blend = FALSE, blend.threshold = 0.5,
order = NULL, label = TRUE, label.size = 4, ncol = 5,
combine = TRUE, coord.fixed = TRUE, sort.cell = TRUE)
dev.off()
pdf(file.path(out_plot_dir,"2.0.Overlay of markers on umap.pdf"),
height = 42, width = 31)
FeaturePlot(object = pbmc.integrated, features = features,
dims = c(1, 2), cells = NULL,
cols = c("lightgrey", "red"), pt.size = 1, min.cutoff = "q9",
max.cutoff = NA, reduction = "umap", split.by = NULL,
shape.by = NULL, blend = FALSE, blend.threshold = 0.5,
order = NULL, label = TRUE, label.size = 4, ncol = 5,
combine = TRUE, coord.fixed = TRUE, sort.cell = TRUE)
dev.off()
save.image(file = file.path(out_objects_dir,
"Seurat.integrated.without.imputation.RData"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterC.R
\name{filterC}
\alias{filterC}
\title{Filter IGNORE=C}
\usage{
filterC(ds, ignore = "C")
}
\arguments{
\item{ds}{Longform dataset with first column marked to indicate ignored rows.}
\item{ignore}{Character in the first column used to indicate which rows to
ignore. Defaults to "C".}
}
\value{
Data.frame \code{ds} with the rows marked ignore removed, and the
entire first column removed.
}
\description{
This function filters out commented rows from a NONMEM-style dataset, and
removes the comment column for plotting.
}
\details{
This function takes a data.frame with the first column marked to indicate
rows to ignore. The default value for ignore is "C", similar to NONMEM,
however any alphanumeric character can be used. The function will return the
data.frame \code{ds} with all indicated rows removed as well as the entire
first column.
}
\examples{
dataset <- data.frame(rep(c("C","."), 10), c(1:20), LETTERS[1:20], letters[1:20])
names(dataset) <- c("C", "SID", "cov1", "cov2")
output <- filterC(dataset)
output
}
\author{
Samuel Callisto \email{calli055@umn.edu} filterC()
}
| /man/filterC.Rd | no_license | ftuhin2828/dataTools | R | false | true | 1,176 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filterC.R
\name{filterC}
\alias{filterC}
\title{Filter IGNORE=C}
\usage{
filterC(ds, ignore = "C")
}
\arguments{
\item{ds}{Longform dataset with first column marked to indicate ignored rows.}
\item{ignore}{Character in the first column used to indicate which rows to
ignore. Defaults to "C".}
}
\value{
Data.frame \code{ds} with the rows marked ignore removed, and the
entire first column removed.
}
\description{
This function filters out commented rows from a NONMEM-style dataset, and
removes the comment column for plotting.
}
\details{
This function takes a data.frame with the first column marked to indicate
rows to ignore. The default value for ignore is "C", similar to NONMEM,
however any alphanumeric character can be used. The function will return the
data.frame \code{ds} with all indicated rows removed as well as the entire
first column.
}
\examples{
dataset <- data.frame(rep(c("C","."), 10), c(1:20), LETTERS[1:20], letters[1:20])
names(dataset) <- c("C", "SID", "cov1", "cov2")
output <- filterC(dataset)
output
}
\author{
Samuel Callisto \email{calli055@umn.edu} filterC()
}
|
tempplot <- function(){
require(ggplot2)
require(scales)
file_pisum <- "pisum.csv"
df_pisum <- read.table(file=file_pisum, header=T, sep=",", stringsAsFactors=F)
print(df_pisum)
# p <- ggplot( df_pisum, aes(x=names, y=values) ) + geom_bar(position="dodge", stat="identity") #+ coord_trans(y="log10")
# p <- p + scale_y_log10()
# p + scale_y_continuous(trans = log2_trans(),
# breaks = trans_breaks("log2", function(x) 2^x),
# labels = trans_format("log2", math_format(2^.x)))
# p <- p + scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
# labels = trans_format("log10", math_format(10^.x)))
# p <- p + annotation_logticks(sides="trbl")
#
# library("scales")
# reverselog_trans <- function(base = exp(1)) {
# trans <- function(x) -log(x, base)
# inv <- function(x) base^(-x)
# trans_new(paste0("reverselog-", format(base)), trans, inv,
# log_breaks(base = base),
# domain = c(1e-100, Inf))
# }
# p <- p + scale_y_continuous(trans=reverselog_trans(10))
#
# p <- p + theme_bw()
print(p)
}
| /no_arma/maketable/tempplot.R | no_license | deanbodenham/benchmarks_rpycpp | R | false | false | 1,133 | r | tempplot <- function(){
require(ggplot2)
require(scales)
file_pisum <- "pisum.csv"
df_pisum <- read.table(file=file_pisum, header=T, sep=",", stringsAsFactors=F)
print(df_pisum)
# p <- ggplot( df_pisum, aes(x=names, y=values) ) + geom_bar(position="dodge", stat="identity") #+ coord_trans(y="log10")
# p <- p + scale_y_log10()
# p + scale_y_continuous(trans = log2_trans(),
# breaks = trans_breaks("log2", function(x) 2^x),
# labels = trans_format("log2", math_format(2^.x)))
# p <- p + scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
# labels = trans_format("log10", math_format(10^.x)))
# p <- p + annotation_logticks(sides="trbl")
#
# library("scales")
# reverselog_trans <- function(base = exp(1)) {
# trans <- function(x) -log(x, base)
# inv <- function(x) base^(-x)
# trans_new(paste0("reverselog-", format(base)), trans, inv,
# log_breaks(base = base),
# domain = c(1e-100, Inf))
# }
# p <- p + scale_y_continuous(trans=reverselog_trans(10))
#
# p <- p + theme_bw()
print(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alertAreas.R
\docType{data}
\name{alertAreas}
\alias{alertAreas}
\title{Alert Areas Used by the National Weather Service}
\format{
A \code{\link[sp:SpatialPolygons-class]{SpatialPolygons}} object of
length 7526 whose names are 2-letter state abbreviations and 6-character
Universal Geographic Code (\acronym{UGC}) county and zone codes. Polygons
are specified in WGS84 coordinates.
}
\source{
\url{https://www.weather.gov/gis/AWIPSShapefiles}
}
\usage{
alertAreas
}
\description{
Polygons defining the states, counties, and zones used by the United States
National Weather Service (\acronym{NWS}) to define alert areas.
}
\details{
Package will be periodically updated following updates to alert
areas by the National Weather Service.
}
\seealso{
Package \pkg{weatherAlerts}
}
\keyword{datasets}
| /man/alertAreas.Rd | no_license | ianmcook/weatherAlertAreas | R | false | true | 882 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alertAreas.R
\docType{data}
\name{alertAreas}
\alias{alertAreas}
\title{Alert Areas Used by the National Weather Service}
\format{
A \code{\link[sp:SpatialPolygons-class]{SpatialPolygons}} object of
length 7526 whose names are 2-letter state abbreviations and 6-character
Universal Geographic Code (\acronym{UGC}) county and zone codes. Polygons
are specified in WGS84 coordinates.
}
\source{
\url{https://www.weather.gov/gis/AWIPSShapefiles}
}
\usage{
alertAreas
}
\description{
Polygons defining the states, counties, and zones used by the United States
National Weather Service (\acronym{NWS}) to define alert areas.
}
\details{
Package will be periodically updated following updates to alert
areas by the National Weather Service.
}
\seealso{
Package \pkg{weatherAlerts}
}
\keyword{datasets}
|
### June 2, 2014 ## getting ssh anomalies for all stations using DT_MSLA (monthly mean sea level anomolies)
library(maps)
library(spam)
library(fields)
library(chron)
library(ncdf)
SSH_6_11 = open.ncdf("dt_global_allsat_msla_h_y2011_m06.nc")
lats = get.var.ncdf(SSH_6_11, "lat")
## the latsU correspond to the sla lats and longs
lons = get.var.ncdf(SSH_6_11, "lon")
# for stations 31, 10-40, PC1120, PC1140, WBSL1040- lats and longs are ~ 29.125(477), 271.124(1085)
SSH_6_11_A =get.var.ncdf(SSH_6_11, "sla", start= c(1085,477,1), count=c(1,1,1))
# for stations 14, 4-40, BR0440 - lats and longs are ~ 28.1259(473), 275.625(1103)
SSH_6_11_B = get.var.ncdf(SSH_6_11, "sla", start=c(1103, 473, 1), count= c(1,1,1))
# for stations 36, PC1320- lats and longs are ~ 28.625(475) , 269.375(1078)
SSH_6_11_C = get.var.ncdf(SSH_6_11, "sla", start=c(1078, 475, 1), count= c(1,1,1))
# for stations 38, PC1340, lats and longs ~ 28.125(473) and 269.4155(1078)
SSH_6_11_D = get.var.ncdf(SSH_6_11, "sla", start=c(1078, 473, 1), count= c(1,1,1))
# for station 58 ~ 475, 1073
SSH_6_11_E = get.var.ncdf(SSH_6_11, "sla", start=c(1073, 475, 1), count= c(1,1,1))
# for station BR3440, (472, 1103)
SSH_6_11_F = get.var.ncdf(SSH_6_11, "sla", start=c(1103, 472, 1), count= c(1,1,1))
#for station PC0610 and PC0620, ~ (478, 1098)
SSH_6_11_G = get.var.ncdf(SSH_6_11, "sla", start=c(1098, 478, 1), count= c(1,1,1))
# for PC1220, 33, 34, (476,1083)
SSH_6_11_H = get.var.ncdf(SSH_6_11, "sla", start=c(1083, 476, 1), count= c(1,1,1))
#for PC1320, He265, 37 ~ (474, 1078)
SSH_6_11_I = get.var.ncdf(SSH_6_11, "sla", start=c(1078, 474, 1), count= c(1,1,1))
# For PC1520 ~ (479, 1087)
SSH_6_11_J = get.var.ncdf(SSH_6_11, "sla", start=c(1087, 479, 1), count= c(1,1,1))
#For PC81460 (479, 1091)
SSH_6_11_K = get.var.ncdf(SSH_6_11, "sla", start=c(1091, 479, 1), count= c(1,1,1))
# For BOR0340 (471, 1104)
SSH_6_11_L = get.var.ncdf(SSH_6_11, "sla", start=c(1104, 471, 1), count= c(1,1,1))
# for BR0320 (471, 1107)
SSH_6_11_M = get.var.ncdf(SSH_6_11, "sla", start=c(1107, 471, 1), count= c(1,1,1))
#For 82 (472, 1102)
SSH_6_11_N = get.var.ncdf(SSH_6_11, "sla", start=c(1102, 472, 1), count= c(1,1,1))
# For WB16150 (475, 1080)
SSH_6_11_O = get.var.ncdf(SSH_6_11, "sla", start=c(1080, 475, 1), count= c(1,1,1))
For #51 (476, 1080)
SSH_6_11_P = get.var.ncdf(SSH_6_11, "sla", start=c(1080, 476, 1), count= c(1,1,1))
# for 16 (476, 1100)
SSH_6_11_Q = get.var.ncdf(SSH_6_11, "sla", start=c(1100, 476, 1), count= c(1,1,1))
# For 15 (476,1101)
SSH_6_11_R = get.var.ncdf(SSH_6_11, "sla", start=c(1101, 476, 1), count= c(1,1,1))
#For 28 (477, 1086)
SSH_6_11_S = get.var.ncdf(SSH_6_11, "sla", start=c(1086, 477, 1), count= c(1,1,1))
SSH_6_11_T = get.var.ncdf(SSH_6_11, "sla", start=c(1102, 477, 1), count= c(1,1,1))
#for Br 4/5 10 (477 1105)
SSH_6_11_U = get.var.ncdf(SSH_6_11, "sla", start=c(1105, 477, 1), count= c(1,1,1))
# for 27, PC1020 (478, 1086)
SSH_6_11_V = get.var.ncdf(SSH_6_11, "sla", start=c(1086, 478, 1), count= c(1,1,1))
# for PC1010 (479,1086)
SSH_6_11_W = get.var.ncdf(SSH_6_11, "sla", start=c(1086, 479, 1), count= c(1,1,1))
# for PC0920 (479, 1088)
SSH_6_11_X = get.var.ncdf(SSH_6_11, "sla", start=c(1088, 479, 1), count= c(1,1,1))
# For PC0910 (480, 1088)
SSH_6_11_Y = get.var.ncdf(SSH_6_11, "sla", start=c(1088, 480, 1), count= c(1,1,1))
# for PC1420 (480,1091)
SSH_6_11_Z = get.var.ncdf(SSH_6_11, "sla", start=c(1091, 480, 1), count= c(1,1,1))
# For WBSL840 (480, 1092)
SSH_6_11_AA = get.var.ncdf(SSH_6_11, "sla", start=c(1092, 480, 1), count= c(1,1,1))
# for PC0720 (481, 1095)
SSH_6_11_BB = get.var.ncdf(SSH_6_11, "sla", start=c(1095, 481, 1), count= c(1,1,1))
# for PC1510 (481, 1087)
SSH_6_11_CC = get.var.ncdf(SSH_6_11, "sla", start=c(1087, 481, 1), count= c(1,1,1))
#for PC0710 (482, 1096)
SSH_6_11_DD = get.var.ncdf(SSH_6_11, "sla", start=c(1096, 482, 1), count= c(1,1,1))
letters = c("A", "B", "C", "D","E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "AA", "BB", "CC", "DD")
SSH = c(SSH_6_11_A, SSH_6_11_B, SSH_6_11_C, SSH_6_11_D, SSH_6_11_E, SSH_6_11_F, SSH_6_11_G, SSH_6_11_H, SSH_6_11_I, SSH_6_11_J, SSH_6_11_K, SSH_6_11_L, SSH_6_11_M, SSH_6_11_N, SSH_6_11_O, SSH_6_11_P, SSH_6_11_Q, SSH_6_11_R, SSH_6_11_S, SSH_6_11_T, SSH_6_11_U, SSH_6_11_V, SSH_6_11_W, SSH_6_11_X, SSH_6_11_Y, SSH_6_11_Z, SSH_6_11_AA, SSH_6_11_BB, SSH_6_11_CC, SSH_6_11_DD)
working <- data.frame(cbind(letters, SSH)) | /SLA Scripts/June 2011_SLA.R | no_license | eherdter/r-work | R | false | false | 4,499 | r | ### June 2, 2014 ## getting ssh anomalies for all stations using DT_MSLA (monthly mean sea level anomolies)
library(maps)
library(spam)
library(fields)
library(chron)
library(ncdf)
SSH_6_11 = open.ncdf("dt_global_allsat_msla_h_y2011_m06.nc")
lats = get.var.ncdf(SSH_6_11, "lat")
## the latsU correspond to the sla lats and longs
lons = get.var.ncdf(SSH_6_11, "lon")
# for stations 31, 10-40, PC1120, PC1140, WBSL1040- lats and longs are ~ 29.125(477), 271.124(1085)
SSH_6_11_A =get.var.ncdf(SSH_6_11, "sla", start= c(1085,477,1), count=c(1,1,1))
# for stations 14, 4-40, BR0440 - lats and longs are ~ 28.1259(473), 275.625(1103)
SSH_6_11_B = get.var.ncdf(SSH_6_11, "sla", start=c(1103, 473, 1), count= c(1,1,1))
# for stations 36, PC1320- lats and longs are ~ 28.625(475) , 269.375(1078)
SSH_6_11_C = get.var.ncdf(SSH_6_11, "sla", start=c(1078, 475, 1), count= c(1,1,1))
# for stations 38, PC1340, lats and longs ~ 28.125(473) and 269.4155(1078)
SSH_6_11_D = get.var.ncdf(SSH_6_11, "sla", start=c(1078, 473, 1), count= c(1,1,1))
# for station 58 ~ 475, 1073
SSH_6_11_E = get.var.ncdf(SSH_6_11, "sla", start=c(1073, 475, 1), count= c(1,1,1))
# for station BR3440, (472, 1103)
SSH_6_11_F = get.var.ncdf(SSH_6_11, "sla", start=c(1103, 472, 1), count= c(1,1,1))
#for station PC0610 and PC0620, ~ (478, 1098)
SSH_6_11_G = get.var.ncdf(SSH_6_11, "sla", start=c(1098, 478, 1), count= c(1,1,1))
# for PC1220, 33, 34, (476,1083)
SSH_6_11_H = get.var.ncdf(SSH_6_11, "sla", start=c(1083, 476, 1), count= c(1,1,1))
#for PC1320, He265, 37 ~ (474, 1078)
SSH_6_11_I = get.var.ncdf(SSH_6_11, "sla", start=c(1078, 474, 1), count= c(1,1,1))
# For PC1520 ~ (479, 1087)
SSH_6_11_J = get.var.ncdf(SSH_6_11, "sla", start=c(1087, 479, 1), count= c(1,1,1))
#For PC81460 (479, 1091)
SSH_6_11_K = get.var.ncdf(SSH_6_11, "sla", start=c(1091, 479, 1), count= c(1,1,1))
# For BOR0340 (471, 1104)
SSH_6_11_L = get.var.ncdf(SSH_6_11, "sla", start=c(1104, 471, 1), count= c(1,1,1))
# for BR0320 (471, 1107)
SSH_6_11_M = get.var.ncdf(SSH_6_11, "sla", start=c(1107, 471, 1), count= c(1,1,1))
#For 82 (472, 1102)
SSH_6_11_N = get.var.ncdf(SSH_6_11, "sla", start=c(1102, 472, 1), count= c(1,1,1))
# For WB16150 (475, 1080)
SSH_6_11_O = get.var.ncdf(SSH_6_11, "sla", start=c(1080, 475, 1), count= c(1,1,1))
For #51 (476, 1080)
SSH_6_11_P = get.var.ncdf(SSH_6_11, "sla", start=c(1080, 476, 1), count= c(1,1,1))
# for 16 (476, 1100)
SSH_6_11_Q = get.var.ncdf(SSH_6_11, "sla", start=c(1100, 476, 1), count= c(1,1,1))
# For 15 (476,1101)
SSH_6_11_R = get.var.ncdf(SSH_6_11, "sla", start=c(1101, 476, 1), count= c(1,1,1))
#For 28 (477, 1086)
SSH_6_11_S = get.var.ncdf(SSH_6_11, "sla", start=c(1086, 477, 1), count= c(1,1,1))
SSH_6_11_T = get.var.ncdf(SSH_6_11, "sla", start=c(1102, 477, 1), count= c(1,1,1))
#for Br 4/5 10 (477 1105)
SSH_6_11_U = get.var.ncdf(SSH_6_11, "sla", start=c(1105, 477, 1), count= c(1,1,1))
# for 27, PC1020 (478, 1086)
SSH_6_11_V = get.var.ncdf(SSH_6_11, "sla", start=c(1086, 478, 1), count= c(1,1,1))
# for PC1010 (479,1086)
SSH_6_11_W = get.var.ncdf(SSH_6_11, "sla", start=c(1086, 479, 1), count= c(1,1,1))
# for PC0920 (479, 1088)
SSH_6_11_X = get.var.ncdf(SSH_6_11, "sla", start=c(1088, 479, 1), count= c(1,1,1))
# For PC0910 (480, 1088)
SSH_6_11_Y = get.var.ncdf(SSH_6_11, "sla", start=c(1088, 480, 1), count= c(1,1,1))
# for PC1420 (480,1091)
SSH_6_11_Z = get.var.ncdf(SSH_6_11, "sla", start=c(1091, 480, 1), count= c(1,1,1))
# For WBSL840 (480, 1092)
SSH_6_11_AA = get.var.ncdf(SSH_6_11, "sla", start=c(1092, 480, 1), count= c(1,1,1))
# for PC0720 (481, 1095)
SSH_6_11_BB = get.var.ncdf(SSH_6_11, "sla", start=c(1095, 481, 1), count= c(1,1,1))
# for PC1510 (481, 1087)
SSH_6_11_CC = get.var.ncdf(SSH_6_11, "sla", start=c(1087, 481, 1), count= c(1,1,1))
#for PC0710 (482, 1096)
SSH_6_11_DD = get.var.ncdf(SSH_6_11, "sla", start=c(1096, 482, 1), count= c(1,1,1))
letters = c("A", "B", "C", "D","E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "AA", "BB", "CC", "DD")
SSH = c(SSH_6_11_A, SSH_6_11_B, SSH_6_11_C, SSH_6_11_D, SSH_6_11_E, SSH_6_11_F, SSH_6_11_G, SSH_6_11_H, SSH_6_11_I, SSH_6_11_J, SSH_6_11_K, SSH_6_11_L, SSH_6_11_M, SSH_6_11_N, SSH_6_11_O, SSH_6_11_P, SSH_6_11_Q, SSH_6_11_R, SSH_6_11_S, SSH_6_11_T, SSH_6_11_U, SSH_6_11_V, SSH_6_11_W, SSH_6_11_X, SSH_6_11_Y, SSH_6_11_Z, SSH_6_11_AA, SSH_6_11_BB, SSH_6_11_CC, SSH_6_11_DD)
working <- data.frame(cbind(letters, SSH)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utility.R
\name{getcounts}
\alias{getcounts}
\title{getcounts}
\usage{
getcounts(input.bamfile.dir, annotation.bed.file, ld, rd, output.count.file.dir,
filter.sample)
}
\arguments{
\item{output.count.file.dir}{}
}
\description{
getcounts
}
\examples{
getcounts()
}
| /man/getcounts.Rd | no_license | aiminy/3UTR-Seq | R | false | true | 349 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utility.R
\name{getcounts}
\alias{getcounts}
\title{getcounts}
\usage{
getcounts(input.bamfile.dir, annotation.bed.file, ld, rd, output.count.file.dir,
filter.sample)
}
\arguments{
\item{output.count.file.dir}{}
}
\description{
getcounts
}
\examples{
getcounts()
}
|
deseqTibAnno <- read_tsv(here('extractedData', 'DeSeqOutputAllConds.annotated.tsv'))
deseqTibAnno %>%
filter(gene_name == 'MAP3K1') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'GPRC5A') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'EPHB2') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'RIPK4') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'ZNF469') %>%
dplyr::select(matches("integrationConstant")) | /plotScripts/printCvalsForBeeswarmPlotGenes.R | permissive | emsanford/combined_responses_paper | R | false | false | 597 | r | deseqTibAnno <- read_tsv(here('extractedData', 'DeSeqOutputAllConds.annotated.tsv'))
deseqTibAnno %>%
filter(gene_name == 'MAP3K1') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'GPRC5A') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'EPHB2') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'RIPK4') %>%
dplyr::select(matches("integrationConstant"))
deseqTibAnno %>%
filter(gene_name == 'ZNF469') %>%
dplyr::select(matches("integrationConstant")) |
library(ade4)
### Name: rlq
### Title: RLQ analysis
### Aliases: rlq print.rlq plot.rlq summary.rlq randtest.rlq
### Keywords: multivariate spatial
### ** Examples
data(aviurba)
coa1 <- dudi.coa(aviurba$fau, scannf = FALSE, nf = 2)
dudimil <- dudi.hillsmith(aviurba$mil, scannf = FALSE, nf = 2, row.w = coa1$lw)
duditrait <- dudi.hillsmith(aviurba$traits, scannf = FALSE, nf = 2, row.w = coa1$cw)
rlq1 <- rlq(dudimil, coa1, duditrait, scannf = FALSE, nf = 2)
plot(rlq1)
summary(rlq1)
randtest(rlq1)
fourthcorner.rlq(rlq1,type="Q.axes")
fourthcorner.rlq(rlq1,type="R.axes")
| /data/genthat_extracted_code/ade4/examples/rlq.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 611 | r | library(ade4)
### Name: rlq
### Title: RLQ analysis
### Aliases: rlq print.rlq plot.rlq summary.rlq randtest.rlq
### Keywords: multivariate spatial
### ** Examples
data(aviurba)
coa1 <- dudi.coa(aviurba$fau, scannf = FALSE, nf = 2)
dudimil <- dudi.hillsmith(aviurba$mil, scannf = FALSE, nf = 2, row.w = coa1$lw)
duditrait <- dudi.hillsmith(aviurba$traits, scannf = FALSE, nf = 2, row.w = coa1$cw)
rlq1 <- rlq(dudimil, coa1, duditrait, scannf = FALSE, nf = 2)
plot(rlq1)
summary(rlq1)
randtest(rlq1)
fourthcorner.rlq(rlq1,type="Q.axes")
fourthcorner.rlq(rlq1,type="R.axes")
|
#{{{
#' jbhcxval()
#
#' additional hindcast options with external foreward projections
#'
#' @param hc object (list of models) from hindcast_jabba()
#' @param stochastic if FALSE, process error sigma.proc is set to zero
#' @param AR1 if TRUE, projection account auto correlation in the process devs
#' @param rho if AR1 = TRUE, the autocorrelation coefficient is estimated from the proc devs
#' @param sigma.proc option to specify the process error other than the posterior estimate
#' @param ndevs number years on the tail to set initial proc.error for forecasting
#' @param run option to assign a scenario name other than specified in build_jabba()
#' @param thin option to thin the posterior at rates > 1
#' @return data.frame of kobe posterior model + forecast scenarios
#' @export
#' @examples
#' data(iccat)
#' whm = iccat$whm
#' # ICCAT white marlin setup
#' jb = build_jabba(catch=whm$catch,cpue=whm$cpue,se=whm$se,assessment="WHM",scenario = "BaseCase",model.type = "Pella",r.prior = c(0.181,0.18),BmsyK = 0.39,igamma = c(0.001,0.001))
#' fit = fit_jabba(jb,quickmcmc=TRUE,verbose=TRUE)
#' hc = hindcast_jabba(jbinput=jb,fit=fit,peels=1:5)
#' jbplot_retro(hc)
#' jbplot_hcxval(hc,index=c(8,11))
#' hc.ar1 = jbhcxval(hc,AR1=TRUE) # do hindcasting with AR1
#' jbplot_hcxval(hc.ar1,index=c(8,11))
# {{{
jbhcxval <- function(hindcasts,
stochastic = c(TRUE, FALSE)[1],
AR1 = c(TRUE, FALSE)[1],
sigma.proc = NULL,
rho = NULL,
ndevs=1,
run = NULL){
peels = do.call(c,lapply(hindcasts,function(x){
x$diags$retro.peels[1]
}))
peels = as.numeric(peels[peels>0])
# Cut internal forecasts
hc = lapply(hindcasts[-1],function(x){
og = x
nyears=length(tail(x$yr,x$diags$retro.peels[1]))
x$yr = x$yr[x$yr%in%tail(x$yr,x$diags$retro.peels[1])==FALSE]
x$kbtrj = x$kbtrj[x$kbtrj$year%in%x$yr,]
fwtrj = fw_jabba(x,nyears=x$diags$retro.peels[1],
imp.yr = NULL,
quant = "Catch",
initial = x$catch[-c(1:length(x$yr))],
imp.values = 1,
type="abs",
stochastic = stochastic ,
AR1 = AR1,
sigma.proc = sigma.proc,
rho = rho,ndevs=ndevs)
fc =fwtrj[fwtrj$run ==unique(fwtrj$run)[2],]
fc = fc[fc$year!=min(fc$year),]
og$kbtrj =rbind(fwtrj[fwtrj$run ==unique(fwtrj$run)[1],],fc)
og
})
# Update forecast time-series
x= hc[[1]]
y = as.list(peels)[[1]]
hcts = Map(function(x,y){
ts = x$timeseries
ny = tail(1:length(x$yr),y)
for(i in 1:length(ny)){
posteriors = x$kbtrj[x$kbtrj$year==x$yr[ny[i]],]
x$timeseries[ny[i],,1:6] = cbind(quantile(posteriors$B,c(0.5,0.025,0.975)),
quantile(posteriors$H,c(0.5,0.025,0.975)),
quantile(posteriors$stock,c(0.5,0.025,0.975)),
quantile(posteriors$harvest,c(0.5,0.025,0.975)),
quantile(posteriors$BB0,c(0.5,0.025,0.975)),
quantile(posteriors$Bdev,c(0.5,0.025,0.975)))
}
x
},x=hc,y=as.list(peels))
# Update forecast CPUE
hcI = lapply(hcts, function(x){
qs = as.matrix(x$pars_posterior[,grep("q",names(x$pars_posterior))])
sets.q = x$settings$sets.q
nq = length(sets.q)
diags=x$diags
idxs = unique(diags$name)
for(j in 1:nq){
if(tail(diags[diags$name==idxs[j],]$hindcast,1)==TRUE){
sub = diags[diags$name==idxs[j]&diags$hindcast,]
nhc = nrow(sub)
for(i in 1:nhc){
hat = c(quantile(x$kbtrj[x$kbtrj$year==sub$year[i],]$B*qs[,sets.q[j]],
c(0.5,0.025,0.975)))
diags[diags$name==idxs[j]&diags$hindcast&diags$year==sub$year[i],
c("hat","hat.lci","hat.uci")] = hat
} # end i
}} # end j
x$diags = diags
x
})
out = c(hindcasts[1],hcI)
return(out)
}
#}}}
| /R/jbhcxval.R | no_license | jabbamodel/JABBA | R | false | false | 3,888 | r | #{{{
#' jbhcxval()
#
#' additional hindcast options with external foreward projections
#'
#' @param hc object (list of models) from hindcast_jabba()
#' @param stochastic if FALSE, process error sigma.proc is set to zero
#' @param AR1 if TRUE, projection account auto correlation in the process devs
#' @param rho if AR1 = TRUE, the autocorrelation coefficient is estimated from the proc devs
#' @param sigma.proc option to specify the process error other than the posterior estimate
#' @param ndevs number years on the tail to set initial proc.error for forecasting
#' @param run option to assign a scenario name other than specified in build_jabba()
#' @param thin option to thin the posterior at rates > 1
#' @return data.frame of kobe posterior model + forecast scenarios
#' @export
#' @examples
#' data(iccat)
#' whm = iccat$whm
#' # ICCAT white marlin setup
#' jb = build_jabba(catch=whm$catch,cpue=whm$cpue,se=whm$se,assessment="WHM",scenario = "BaseCase",model.type = "Pella",r.prior = c(0.181,0.18),BmsyK = 0.39,igamma = c(0.001,0.001))
#' fit = fit_jabba(jb,quickmcmc=TRUE,verbose=TRUE)
#' hc = hindcast_jabba(jbinput=jb,fit=fit,peels=1:5)
#' jbplot_retro(hc)
#' jbplot_hcxval(hc,index=c(8,11))
#' hc.ar1 = jbhcxval(hc,AR1=TRUE) # do hindcasting with AR1
#' jbplot_hcxval(hc.ar1,index=c(8,11))
# {{{
jbhcxval <- function(hindcasts,
stochastic = c(TRUE, FALSE)[1],
AR1 = c(TRUE, FALSE)[1],
sigma.proc = NULL,
rho = NULL,
ndevs=1,
run = NULL){
peels = do.call(c,lapply(hindcasts,function(x){
x$diags$retro.peels[1]
}))
peels = as.numeric(peels[peels>0])
# Cut internal forecasts
hc = lapply(hindcasts[-1],function(x){
og = x
nyears=length(tail(x$yr,x$diags$retro.peels[1]))
x$yr = x$yr[x$yr%in%tail(x$yr,x$diags$retro.peels[1])==FALSE]
x$kbtrj = x$kbtrj[x$kbtrj$year%in%x$yr,]
fwtrj = fw_jabba(x,nyears=x$diags$retro.peels[1],
imp.yr = NULL,
quant = "Catch",
initial = x$catch[-c(1:length(x$yr))],
imp.values = 1,
type="abs",
stochastic = stochastic ,
AR1 = AR1,
sigma.proc = sigma.proc,
rho = rho,ndevs=ndevs)
fc =fwtrj[fwtrj$run ==unique(fwtrj$run)[2],]
fc = fc[fc$year!=min(fc$year),]
og$kbtrj =rbind(fwtrj[fwtrj$run ==unique(fwtrj$run)[1],],fc)
og
})
# Update forecast time-series
x= hc[[1]]
y = as.list(peels)[[1]]
hcts = Map(function(x,y){
ts = x$timeseries
ny = tail(1:length(x$yr),y)
for(i in 1:length(ny)){
posteriors = x$kbtrj[x$kbtrj$year==x$yr[ny[i]],]
x$timeseries[ny[i],,1:6] = cbind(quantile(posteriors$B,c(0.5,0.025,0.975)),
quantile(posteriors$H,c(0.5,0.025,0.975)),
quantile(posteriors$stock,c(0.5,0.025,0.975)),
quantile(posteriors$harvest,c(0.5,0.025,0.975)),
quantile(posteriors$BB0,c(0.5,0.025,0.975)),
quantile(posteriors$Bdev,c(0.5,0.025,0.975)))
}
x
},x=hc,y=as.list(peels))
# Update forecast CPUE
hcI = lapply(hcts, function(x){
qs = as.matrix(x$pars_posterior[,grep("q",names(x$pars_posterior))])
sets.q = x$settings$sets.q
nq = length(sets.q)
diags=x$diags
idxs = unique(diags$name)
for(j in 1:nq){
if(tail(diags[diags$name==idxs[j],]$hindcast,1)==TRUE){
sub = diags[diags$name==idxs[j]&diags$hindcast,]
nhc = nrow(sub)
for(i in 1:nhc){
hat = c(quantile(x$kbtrj[x$kbtrj$year==sub$year[i],]$B*qs[,sets.q[j]],
c(0.5,0.025,0.975)))
diags[diags$name==idxs[j]&diags$hindcast&diags$year==sub$year[i],
c("hat","hat.lci","hat.uci")] = hat
} # end i
}} # end j
x$diags = diags
x
})
out = c(hindcasts[1],hcI)
return(out)
}
#}}}
|
# R Lecture to VTT - Lecture 1
# Author : Oguzhan Gencoglu
# Latest Version : 27.04.2016
# Contact : oguzhan.gencoglu@tut.fi
# ---------------- Data Wrangling ----------------
# Data aggregation and reshaping
my_data <- ChickWeight
str(my_data)
summary(my_data)
boxplot(my_data$weight)
#find the mean weight depending on diet
# aggregated thing - by what - function
aggregate(list(mean_w = my_data$weight), list(diet = my_data$Diet), mean)
# find standard deviation of attributes w.r.t. different diets
aggregate(my_data, list(the_thing_that_i_am_grouping_by = my_data$Diet), sd)
# we could also aggregate on time and diet
aggregate(list(mean_w = my_data$weight),
list(time = my_data$Time, diet = my_data$Diet),
mean)
# to see the weights over time across different diets
library(ggplot2)
ggplot(my_data) + geom_line(aes(x=Time, y=weight, colour=Chick)) +
facet_wrap(~Diet) +
guides(col=guide_legend(ncol=2))
# Reshape package
id <- c(1,1,2,2)
Time <- c(1,2,1,2)
FatChange <- c(7,3,4,1)
new <- data.frame(id, Time, FatChange)
new$WeightChange <- c(-3,0,-1,2)
library(reshape)
# melt
# data.frame columns
md <- melt(new, c("id", "Time"))
# cast
cast(md, id + Time ~ variable)
cast(md, Time ~ variable, mean)
| /Lectures/Lecture_1/data_wrangling.R | permissive | ogencoglu/R_for_VTT | R | false | false | 1,336 | r | # R Lecture to VTT - Lecture 1
# Author : Oguzhan Gencoglu
# Latest Version : 27.04.2016
# Contact : oguzhan.gencoglu@tut.fi
# ---------------- Data Wrangling ----------------
# Data aggregation and reshaping
my_data <- ChickWeight
str(my_data)
summary(my_data)
boxplot(my_data$weight)
#find the mean weight depending on diet
# aggregated thing - by what - function
aggregate(list(mean_w = my_data$weight), list(diet = my_data$Diet), mean)
# find standard deviation of attributes w.r.t. different diets
aggregate(my_data, list(the_thing_that_i_am_grouping_by = my_data$Diet), sd)
# we could also aggregate on time and diet
aggregate(list(mean_w = my_data$weight),
list(time = my_data$Time, diet = my_data$Diet),
mean)
# to see the weights over time across different diets
library(ggplot2)
ggplot(my_data) + geom_line(aes(x=Time, y=weight, colour=Chick)) +
facet_wrap(~Diet) +
guides(col=guide_legend(ncol=2))
# Reshape package
id <- c(1,1,2,2)
Time <- c(1,2,1,2)
FatChange <- c(7,3,4,1)
new <- data.frame(id, Time, FatChange)
new$WeightChange <- c(-3,0,-1,2)
library(reshape)
# melt
# data.frame columns
md <- melt(new, c("id", "Time"))
# cast
cast(md, id + Time ~ variable)
cast(md, Time ~ variable, mean)
|
#######################################
maf_missing <- function(wgs, gbs){
lmiss1 <- apply(wgs[, 9:27], 1, function(x) return(sum(x==3)/19))
lmiss2 <- apply(gbs[, 3:21], 1, function(x) return(sum(x==3)/19))
imiss1 <- apply(wgs[, 9:27], 2, function(x) return(sum(x==3)/301249))
imiss2 <- apply(gbs[, 3:21], 2, function(x) return(sum(x==3)/301249))
getmaf <- function(dmx){
unlist(apply(dmx, 1, function(x){
x <- as.numeric(as.character(x))
x <- x[x!=3]
if(length(x) >0 ){
c0 <- sum(x == 0)
c1 <- sum(x == 1)
c2 <- sum(x == 2)
return(min(c(2*c0+c1, c1+2*c2))/(2*(c0 + c1 + c2)) )
} }))
}
maf1 <- getmaf(wgs[, 9:27])
maf2 <- getmaf(gbs[, 3:21])
outfile="cache/teo_gbs_wgs.RData"
message(sprintf("###>>> Data write to: [ %s]", outfile))
save(file=outfile, list=c("lmiss1", "lmiss2", "imiss1", "imiss2", "maf1", "maf2"))
}
###########################################################
comp_alleles <- function(wgs, gbs){
wgs <- wgs[order(wgs$snpid2), ]
gbs <- gbs[order(gbs$snpid2), ]
gbs$snpid2 <- gsub("S", "", gbs$snpid2)
nms <- names(gbs)[-1:-2]
heterr <- hettot <- homerr <- homtot <- 0
for(i in 1:length(nms)){
out <- merge(wgs[, c("snpid2", nms[i]) ], gbs[, c("snpid2", nms[i]) ], by="snpid2")
names(out) <- c("snpid", "g1", "g2")
out <- subset(out, g1 !=3 & g2 != 3)
if(nrow(out) >0){
heterr <- heterr + nrow(subset(out, g1 == 1 & g1 != g2))
hettot <- hettot + nrow(subset(out, g1 == 1))
homerr <- homerr + nrow(subset(out, g1 !=1 & g1 != g2))
homtot <- homtot + nrow(subset(out, g1 !=1))
}
}
message(sprintf("###>>> Heterozygote error rate [ %s ] and Homozygote error rate [ %s ]", round(heterr/hettot, 3)*100, round(homerr/homtot, 3)*100))
message(sprintf("###>>> het err=[ %s ]; het tot=[ %s ]; hom err=[ %s ]; hom err=[ %s ]", heterr, hettot, homerr, homtot))
return(c(heterr, hettot, homerr, homtot))
}
##################################################################
recode <- function(){
ob <- load("largedata/wgs_teo19.RData")
### steo: 396818; v
info <- read.csv("largedata//teo_info.csv")
info$snpid <- gsub("S", "", info$snpid)
info <- merge(info, v[, 5:6], by.x="snpid", by.y="snpid2")
names(info)[1] <- "snpid2"
comp <- merge(steo[, c("snpid", "major", "minor")], info[, c(11, 1:3)], by.x="snpid", by.y="snpid3")
message(sprintf("###>>> WGS [ %s ] | GBS [ %s ] | shared [ %s ]", nrow(steo), nrow(info), nrow(comp)))
### Teo19 WGS V3 and V4 are major/minor
idx <- which((comp$major == comp$ref & comp$minor == comp$alt) | (comp$major == comp$alt & comp$minor == comp$ref))
message(sprintf("###>>> consistent SNP calling [ %s ]", length(idx)))
steo <- merge(comp[idx, c(1,4:6)], steo, by="snpid")
### recoding ATCG=> 0, 1, 2
for(i in 9:ncol(steo)){
steo[, i] <- as.character(steo[, i])
steo$a1 <- gsub(".$", "", steo[, i])
steo$a2 <- gsub("^.", "", steo[, i])
steo[steo[, i]!= "NN" & steo$a1 == steo$alt, ]$a1 <- 1
steo[steo[, i]!= "NN" & steo$a1 == steo$ref, ]$a1 <- 0
steo[steo[, i]!= "NN" & steo$a2 == steo$alt, ]$a2 <- 1
steo[steo[, i]!= "NN" & steo$a2 == steo$ref, ]$a2 <- 0
steo[steo$a1 == "N", ]$a1 <- 1.5
steo[steo$a2 == "N", ]$a2 <- 1.5
steo[, i] <- as.numeric(as.character(steo$a1)) + as.numeric(as.character(steo$a2))
}
steo$snpid <- paste0("S", steo$snpid)
return(steo)
}
################
gbsgeno <- function(steo){
### SNP matrix comparison
library(parallel)
library(devtools)
options(mc.cores=NULL)
load_all("~/bin/tasselr")
load_all("~/bin/ProgenyArray")
ob2 <- load("largedata/cj_data.Rdata")
genos <- geno(teo)
nms <- gsub("_1\\:.*|_mrg\\:.*", "", colnames(genos))
subgeno <- genos[, which(nms %in% names(steo)[9:27])]
subgeno[is.na(subgeno)] <- 3
subgeno <- as.data.frame(subgeno)
names(subgeno) <- gsub("_1\\:.*|_mrg\\:.*", "", names(subgeno))
message(sprintf("###>>> GBS of [ %s ] SNPs and [ %s ] plants", nrow(subgeno), ncol(subgeno)))
subgeno$snpid2 <- as.character(row.names(subgeno))
steo$snpid2 <- paste0("S", steo$snpid2)
tem <- merge(steo[, 1:2], subgeno, by = "snpid2")
message(sprintf("###>>> Common SNPs [ %s ] ", nrow(tem)))
return(tem)
}
| /lib/load_data.R | no_license | rossibarra/phasing_tests | R | false | false | 4,649 | r | #######################################
maf_missing <- function(wgs, gbs){
lmiss1 <- apply(wgs[, 9:27], 1, function(x) return(sum(x==3)/19))
lmiss2 <- apply(gbs[, 3:21], 1, function(x) return(sum(x==3)/19))
imiss1 <- apply(wgs[, 9:27], 2, function(x) return(sum(x==3)/301249))
imiss2 <- apply(gbs[, 3:21], 2, function(x) return(sum(x==3)/301249))
getmaf <- function(dmx){
unlist(apply(dmx, 1, function(x){
x <- as.numeric(as.character(x))
x <- x[x!=3]
if(length(x) >0 ){
c0 <- sum(x == 0)
c1 <- sum(x == 1)
c2 <- sum(x == 2)
return(min(c(2*c0+c1, c1+2*c2))/(2*(c0 + c1 + c2)) )
} }))
}
maf1 <- getmaf(wgs[, 9:27])
maf2 <- getmaf(gbs[, 3:21])
outfile="cache/teo_gbs_wgs.RData"
message(sprintf("###>>> Data write to: [ %s]", outfile))
save(file=outfile, list=c("lmiss1", "lmiss2", "imiss1", "imiss2", "maf1", "maf2"))
}
###########################################################
comp_alleles <- function(wgs, gbs){
wgs <- wgs[order(wgs$snpid2), ]
gbs <- gbs[order(gbs$snpid2), ]
gbs$snpid2 <- gsub("S", "", gbs$snpid2)
nms <- names(gbs)[-1:-2]
heterr <- hettot <- homerr <- homtot <- 0
for(i in 1:length(nms)){
out <- merge(wgs[, c("snpid2", nms[i]) ], gbs[, c("snpid2", nms[i]) ], by="snpid2")
names(out) <- c("snpid", "g1", "g2")
out <- subset(out, g1 !=3 & g2 != 3)
if(nrow(out) >0){
heterr <- heterr + nrow(subset(out, g1 == 1 & g1 != g2))
hettot <- hettot + nrow(subset(out, g1 == 1))
homerr <- homerr + nrow(subset(out, g1 !=1 & g1 != g2))
homtot <- homtot + nrow(subset(out, g1 !=1))
}
}
message(sprintf("###>>> Heterozygote error rate [ %s ] and Homozygote error rate [ %s ]", round(heterr/hettot, 3)*100, round(homerr/homtot, 3)*100))
message(sprintf("###>>> het err=[ %s ]; het tot=[ %s ]; hom err=[ %s ]; hom err=[ %s ]", heterr, hettot, homerr, homtot))
return(c(heterr, hettot, homerr, homtot))
}
##################################################################
recode <- function(){
ob <- load("largedata/wgs_teo19.RData")
### steo: 396818; v
info <- read.csv("largedata//teo_info.csv")
info$snpid <- gsub("S", "", info$snpid)
info <- merge(info, v[, 5:6], by.x="snpid", by.y="snpid2")
names(info)[1] <- "snpid2"
comp <- merge(steo[, c("snpid", "major", "minor")], info[, c(11, 1:3)], by.x="snpid", by.y="snpid3")
message(sprintf("###>>> WGS [ %s ] | GBS [ %s ] | shared [ %s ]", nrow(steo), nrow(info), nrow(comp)))
### Teo19 WGS V3 and V4 are major/minor
idx <- which((comp$major == comp$ref & comp$minor == comp$alt) | (comp$major == comp$alt & comp$minor == comp$ref))
message(sprintf("###>>> consistent SNP calling [ %s ]", length(idx)))
steo <- merge(comp[idx, c(1,4:6)], steo, by="snpid")
### recoding ATCG=> 0, 1, 2
for(i in 9:ncol(steo)){
steo[, i] <- as.character(steo[, i])
steo$a1 <- gsub(".$", "", steo[, i])
steo$a2 <- gsub("^.", "", steo[, i])
steo[steo[, i]!= "NN" & steo$a1 == steo$alt, ]$a1 <- 1
steo[steo[, i]!= "NN" & steo$a1 == steo$ref, ]$a1 <- 0
steo[steo[, i]!= "NN" & steo$a2 == steo$alt, ]$a2 <- 1
steo[steo[, i]!= "NN" & steo$a2 == steo$ref, ]$a2 <- 0
steo[steo$a1 == "N", ]$a1 <- 1.5
steo[steo$a2 == "N", ]$a2 <- 1.5
steo[, i] <- as.numeric(as.character(steo$a1)) + as.numeric(as.character(steo$a2))
}
steo$snpid <- paste0("S", steo$snpid)
return(steo)
}
################
gbsgeno <- function(steo){
### SNP matrix comparison
library(parallel)
library(devtools)
options(mc.cores=NULL)
load_all("~/bin/tasselr")
load_all("~/bin/ProgenyArray")
ob2 <- load("largedata/cj_data.Rdata")
genos <- geno(teo)
nms <- gsub("_1\\:.*|_mrg\\:.*", "", colnames(genos))
subgeno <- genos[, which(nms %in% names(steo)[9:27])]
subgeno[is.na(subgeno)] <- 3
subgeno <- as.data.frame(subgeno)
names(subgeno) <- gsub("_1\\:.*|_mrg\\:.*", "", names(subgeno))
message(sprintf("###>>> GBS of [ %s ] SNPs and [ %s ] plants", nrow(subgeno), ncol(subgeno)))
subgeno$snpid2 <- as.character(row.names(subgeno))
steo$snpid2 <- paste0("S", steo$snpid2)
tem <- merge(steo[, 1:2], subgeno, by = "snpid2")
message(sprintf("###>>> Common SNPs [ %s ] ", nrow(tem)))
return(tem)
}
|
#' #clean a process original bases to create month staging table
#' #' @param original_path : path field where original base_list[[i]] places
#' #' @param staging_path : path field where staging base_list[[i]] places
#' #' @return : staging table
original_path <- "Y:/V2.0/data/staging"
staging_path<- "Y:/V2.0/data/comportamiento_horarios"
compare_maker <-
function(original_path,
staging_path,
month_to_create = NULL) {
setwd("Y:/V2.0/scripts/pronostico/drafts/comportamiento_horario")
source("extraer_numeros.R")
'%!in%' <- function(x,y)!('%in%'(x,y))
#Compara la data en origina contra staging para halla posibles tablas faltantes
####original####
files_original <- list.files(original_path)
position_original <-
as.vector(sapply(files_original, extraer_numeros))
files_original <-
data.frame(files = files_original , position = position_original)
####staging####
files_staging <- list.files(staging_path)
position_staging <-
sapply(str_extract_all(files_staging, "[0-9]+"), "[[", 1) %>% as.numeric
files_staging <-
data.frame(files = files_staging , position = position_staging)
####compare####
compare <-
files_original$position[(which(files_original$position %!in% files_staging$position))]
if (length(compare) == 0) {
stop("Files Complete")
}
compare <- as.list(compare)
#Evaluar deacuedo al origen del archivo.
if (original_path == "Y:/V2.0/data/staging") {
source("comportamiento_horario.R")
staging <- "Y:/V2.0/data/staging"
for (i in compare) {
print(paste0("Creando staging mes ausente ", i))
comportamiento_horario(staging, i)
}
print("Archivos completos")
}
print("xd")
}
| /scripts/pronostico/drafts/comportamiento_horario/creacion_automatica.R | no_license | DanielRZapataS/general_forecast_engine | R | false | false | 1,887 | r |
#' #clean a process original bases to create month staging table
#' #' @param original_path : path field where original base_list[[i]] places
#' #' @param staging_path : path field where staging base_list[[i]] places
#' #' @return : staging table
original_path <- "Y:/V2.0/data/staging"
staging_path<- "Y:/V2.0/data/comportamiento_horarios"
compare_maker <-
function(original_path,
staging_path,
month_to_create = NULL) {
setwd("Y:/V2.0/scripts/pronostico/drafts/comportamiento_horario")
source("extraer_numeros.R")
'%!in%' <- function(x,y)!('%in%'(x,y))
#Compara la data en origina contra staging para halla posibles tablas faltantes
####original####
files_original <- list.files(original_path)
position_original <-
as.vector(sapply(files_original, extraer_numeros))
files_original <-
data.frame(files = files_original , position = position_original)
####staging####
files_staging <- list.files(staging_path)
position_staging <-
sapply(str_extract_all(files_staging, "[0-9]+"), "[[", 1) %>% as.numeric
files_staging <-
data.frame(files = files_staging , position = position_staging)
####compare####
compare <-
files_original$position[(which(files_original$position %!in% files_staging$position))]
if (length(compare) == 0) {
stop("Files Complete")
}
compare <- as.list(compare)
#Evaluar deacuedo al origen del archivo.
if (original_path == "Y:/V2.0/data/staging") {
source("comportamiento_horario.R")
staging <- "Y:/V2.0/data/staging"
for (i in compare) {
print(paste0("Creando staging mes ausente ", i))
comportamiento_horario(staging, i)
}
print("Archivos completos")
}
print("xd")
}
|
clean.tweets = function(tweets)
{
require(stringr)
# apply scrubbing to all tweets
cleanTweets = sapply(tweets, function(tweet) {
# clean up sentences with R's regex-driven global substitute, gsub():
# remove retweet entities
tweet = gsub("(RT|Via) ((?:\\b\\W*@\\w+)+)", "", tweet)
# remove Atpeople
tweet = gsub("@\\w+", "", tweet)
# remove punctuation symbols
tweet = gsub("[[:punct:]]", "", tweet)
# remove numbers
tweet = gsub("[[:digit:]]", "", tweet)
# remove control characters
tweet = gsub("[[:cntrl:]]", "", tweet)
# remove links; unfortunately this does not remove links \\w is not a match for urls
tweet = gsub("http\\w+", "", tweet)
# and convert to lower case:
tweet = tolower(tweet)
return(tweet)
})
return(cleanTweets)
}
# This function is borrowed from Jeffrey Breen's blog on sentiment analysis
# Link: http://jeffreybreen.wordpress.com/2011/07/04/twitter-text-mining-r-slides/
#
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list or a vector as an "l" for us
# we want a simple array of scores back, so we use "l" + "a" + "ply" = laply:
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our word to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE wll be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
} | /week10/twitterSupport.r | no_license | sharadgit/IS607 | R | false | false | 2,256 | r | clean.tweets = function(tweets)
{
require(stringr)
# apply scrubbing to all tweets
cleanTweets = sapply(tweets, function(tweet) {
# clean up sentences with R's regex-driven global substitute, gsub():
# remove retweet entities
tweet = gsub("(RT|Via) ((?:\\b\\W*@\\w+)+)", "", tweet)
# remove Atpeople
tweet = gsub("@\\w+", "", tweet)
# remove punctuation symbols
tweet = gsub("[[:punct:]]", "", tweet)
# remove numbers
tweet = gsub("[[:digit:]]", "", tweet)
# remove control characters
tweet = gsub("[[:cntrl:]]", "", tweet)
# remove links; unfortunately this does not remove links \\w is not a match for urls
tweet = gsub("http\\w+", "", tweet)
# and convert to lower case:
tweet = tolower(tweet)
return(tweet)
})
return(cleanTweets)
}
# This function is borrowed from Jeffrey Breen's blog on sentiment analysis
# Link: http://jeffreybreen.wordpress.com/2011/07/04/twitter-text-mining-r-slides/
#
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list or a vector as an "l" for us
# we want a simple array of scores back, so we use "l" + "a" + "ply" = laply:
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our word to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE wll be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
} |
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.4574177509266e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) | /epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926580-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,100 | r | testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.4574177509266e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) |
###################################
#### Girlscouts Member Survery ####
###################################
# by:Yiran Sheng @ yiransheng.com #
###################################
library("Hmisc")
library("lmtest")
library("mlogit")
library("dgof")
library("timeSeries")
d1 <- read.csv("file1.csv", na.strings=c("","NA","N/A"))
### Initializing ####
opar <- par()
### Removing row 1 (question text) ###
d1.res <- d1[2:nrow(d1), ]
qids <- d1[1,]
## column one of data is unique ID, which cannot be NA (Corrupted data), thus will be removed
d1.res <- d1.res[!is.na(d1.res[,1]), ]
## Global Variables and functions
N = nrow(d1.res)
qcol <- function(i,j=F,text=F, qid="1") {
if (!j){
# quick reference for sub questions
return (names(d1)>=paste("Q",i,"_",sep="") & names(d1)<=paste("Q",i,".Z",sep=""))
}
label <- paste(i,j, sep="_")
if (text) label <- paste(label, "TEXT", sep="_")
label <- paste("Q",qid,".",label, sep="")
return (label)
}
cleanse <- function(data){
keep <- apply(data,2,is.na)
keep <- !keep
keep <- apply(keep,1,prod) == 1
return (data[keep,])
}
output <- list()
#####################################################################
#
# General Notes:
# * To get the description for a single column question, use
# qids$Qx x as the question integer id/number (10, 28...)
# * To get the column range of a composite questions, use
# qids[qcol(x)], x is the question integer id/number (12, 13...)
# * All results, stats relevant for reporting purposes are either
# rendered as figures ("M_fig_****.png"), or appended to the list
# output. Check output in the console mode of R to see details
# once the entire script is run.
# * Store and backup this file with the data files (file1.csv etc.)
#
######################################################################
### Q46 How old are you? ####
dmgrph.age <- as.numeric( d1.res$Q46[!is.na(d1.res$Q46)] )
out <- describe(dmgrph.age)
output$Q46 <- out
png(filename="M_fig_age_summary.png")
hist(dmgrph.age, main="Respondants Age Distribution (Member Survey)", xlab="Age")
dev.off()
## Q6.1 Experience as a Girl Scout ##
dmgrph.levels.preset <- c("Daisy","Brownie","Junior","Cadette","Unspecified")
colrange <- which(names(d1.res) >= "Q6" & names(d1.res) <="Q6Z")
d1.level <- d1.res[,colrange]
dmgrph.levels.other <- unique(d1.level$Q6.1_5_TEXT)
dmgrph.levels <- c(dmgrph.levels.preset, as.character(dmgrph.levels.other))
dmgrph.levels <- unique(dmgrph.levels)
dmgrph.levels <- dmgrph.levels[!is.na(dmgrph.levels)]
q6col <- function(i,j,text=F){
return (qcol(i,j,text,qid="6"))
}
level.counts <- c()
level.time <- c("<=1yr","2-3yr","4-5yr",">=6yr")
# troop size
level.size <- c("<=5","6-9","10-13",">=14")
for (i in 1:5){
level.counts <- c( level.counts, length( which( d1.res[[q6col(1,i)]]=="1") ) )
time <- d1.res[[q6col(2,i)]]
time <- as.numeric(time[!is.na(time)])
size <- d1.res[[q6col(3,i)]]
size <- as.numeric(size[!is.na(size)])
time_count <- c()
size_count <- c()
for (j in 1:4){
time_count<-c(time_count, sum(as.numeric(time)==j))
size_count<-c(size_count, sum(as.numeric(size)==j))
}
level.time <- cbind(level.time, time_count)
level.size <- cbind(level.size, size_count)
}
level.size <- as.data.frame(level.size)
level.time <- as.data.frame(level.time)
names(level.size) <- c("", dmgrph.levels.preset)
names(level.time) <- c("", dmgrph.levels.preset)
row.names(level.size)<-level.size[,1]
level.size<-level.size[,2:ncol(level.size)]
row.names(level.time)<-level.time[,1]
level.time<-level.time[,2:ncol(level.time)]
tmp <- c(N-sum(level.counts), level.counts)
names(tmp) <- c("Not a Member", dmgrph.levels.preset)
dmgrph.levels.distr <- tmp/N*100
## plot
png(filename="M_fig_memberhip_summary.png",width=500)
barplot(dmgrph.levels.distr, main="Girl Scout Levels Distribution", xlab="Level", ylab="Percentage")
dev.off()
q6plot <- function(data,main=NULL,xlab=NULL,ylab=NULL){
i <- apply(data,2,as.numeric)
row.names(i) <- row.names(data)
total <- apply(i,2,sum)
for (j in 1:ncol(i)){
tmp <- i[,j]/total[j]*100
print(tmp)
barplot(tmp,main=paste(main,colnames(i)[j],sep=":"),xlab=xlab,ylab=ylab)
}
return (i)
}
# Oops, overides the R aggregate function here, too lazy to fix that,
# made a copy of the original function.
aggr <- aggregate
aggregate <- function(level.size){
a <- apply(level.size,2,as.numeric)
a <- apply(a,1,sum)
a <- a/sum(a)*100
a <- as.table(a)
rownames(a)<-row.names(level.size)
return (a)
}
png(filename="M_fig_memberhip_detail.png",width=1200,height=700)
par(mfrow=c(3,4))
q6plot(level.time[1:4],main="Girl Scout Membership Length by Level", xlab="Years",ylab="Percentage")
q6plot(level.size[1:4],main="Girl Scout Group Size by Level", xlab="Group Size",ylab="Percentage")
barplot(aggregate(level.time),main="Girl Scout Membership Length, Aggregating all Levels",xlab="Years",ylab="Percentage")
barplot(aggregate(level.size),main="Girl Scout Group Size, Aggregating all Levels",xlab="Group Size",ylab="Percentage")
dev.off()
par(mfrow=c(1,1))
#### Q25, why girls don't go camping with their troops ? ###
y<-d1.res$Q25
y<-y[!is.na(y)]
y.reasons <- c("I don't feel comfortable going without my parents","I don't like to go camping","I go to a different summer camp","I like doing activities outside, but I don't like camping","I would be away from home for too long","I'm scared to camp with people I don't know","My parents don't allow me to go camping without them","My troop has never been camping")
y.desc <- describe(y)
tmp <- rbind(y.reasons,y.desc$values)
tmp <- t(tmp)
colnames(tmp) <- c("Reasons why Never Gone Camping", "Frequency", "%")
output$Q25 <- tmp
#### Q20 ####
q20col <- function(i,j,text=F){
return (qcol(i,j,text,qid="20"))
}
### formated data, from survey server
data20 <- read.csv("report_q20.csv")
camp <- data20$Yes/data20$Responses
names(camp) <- data20$Question
camp <- camp*100
png(filename="M-camping-type.png", width=600)
par(oma=c(0,15,5,5),las=2)
barplot(camp,horiz=T,main="Girl Scouts Camping Experience by Type",xlab="Percentage of Girls for each type")
dev.off()
data20 <- read.csv("report_q20_2.csv")
d20.numeric <- apply(data20[3:9],2,as.numeric)
q20.normalize <- function(v){
return (v/v[7])
}
d20.normalize <- apply(d20.numeric, 1, q20.normalize)
d20.time <- apply(d20.normalize,1,mean)[1:6]
d20.time <- d20.time*100
names(d20.time)<-c("1~2\ndays","3~4\ndays","5~6\ndays","1~2\nweeks","3~4\nweeks","Over\n1 month")
png(filename="M-camping-time.png", width=600)
par(opar)
barplot(d20.time, main="Girl Scouts Camping Experience Time Distribution",xlab="Percentage of girls for each duration")
dev.off()
### Outdoor activities Q9,Q23,Q12,Q13,Q15 ###
d23 <- d1.res[qcol(23)]
d23.clean <- apply(cleanse(d23), 2, as.numeric)
q23.corr <- cor(d23.clean)
for (i in 1:nrow(q23.corr)){
for (j in 1:ncol(q23.corr)){
cor.test(d23.clean[,i], d23.clean[,j], method = c("pearson"))$p.value -> p
if (p>0.05){
q23.corr[i,j]<-q23.corr[j,i] <- 0
}
}
}
q23.model1 <- (Q23_5~Q23_1+Q23_2+Q23_3+Q23_4+Q23_6)
d23.clean <- as.data.frame(d23.clean)
q23.fit1 <- lm(q23.model1, data=d23.clean)
# q23.fit2 <- mlogit(q23.model1, data=d23.clean)
#### Q13, detailed activities analysis
### Formating Data
d13.1 <- read.csv("report_q13_1.csv")
d13.2 <- read.csv("report_q13_2.csv")
d13.3 <- read.csv("report_q13_3.csv")
d13.all <- cbind(d13.1,d13.2,d13.3)
acts <- d13.all$Question
d13 <- d1.res[qcol(13)]
#done before
d13.db <- d13[,1:90]
#like
d13.lk <- d13[,91:180]
#with girl scouts
d13.wg <- d13[,181:270]
# 1 means have done it before, 2 means haven't done it before
d13.db.choices <- c(1,2)
# 1 means would like to do it with girl scouts, 2 means would not like to
d13.wg.choices <- c(1,2,3)
# rating of activities on a 1-5 scale
d13.lk.choices <- c(1,2,3,4,5)
#### Chisq-test for done before vs. rating, cross-tab ####
d13.dblk = list()
nind <- c()
nindc <- c()
nindc2 <- c()
z <- function(v){
s <- sum(v)
return (c(1:5)*v/s)
}
cat("These activities showed a improvement of rating, after being done:\n")
for (k in 1:90) {
label <- as.character(k)
d13.dblk[[label]] <-matrix(ncol=2,nrow=5)
for (i in 1:2){
for (j in 1:5){
filter = d13.db[,k]==d13.db.choices[i] & d13.lk[,k] == d13.lk.choices[j]
filter.nomissing <- !is.na(filter)
filter = filter & filter.nomissing
n <- sum(filter)
d13.dblk[[label]][j,i] = n
}
}
p<-chisq.test(d13.dblk[[label]])$p.value
if (!is.nan(p) & p<0.05){
m <- d13.dblk[[label]]
d13.dblk.diff <- nind <- c(nind, k)
ALL <- apply(m,2,sum)
five <- m[5, ]/ALL
change2 <- five[1] - five[2]
m <- apply(m,2,z)
change <- apply(m,2,sum)
change <- (change[1] - change[2])
d13.dblk.avg <- nindc <- c(nindc, change)
d13.dblk.five <- nindc2 <- c(nindc2, change2)
cat(acts[as.numeric(k)], "\nGirls love it ",change," ", change2, "\n")
}
}
cat("END of Done Before vs. Ratings, \n\n\n ######################\n")
m <- matrix(ncol=6, nrow=length(nind))
for (j in 1:length(nind)){
a <- d13.dblk[[j]]
m[j,1] <- sum(z(a[, 1]))
m[j,2] <- sum(z(a[, 2]))
five <- a[5, ]/apply(a,2,sum)
m[j,3] <- five[1]
m[j,4] <- five[2]
m[j,5] <- - m[j,2] + m[j,1]
m[j,6] <- - m[j,4] + m[j,3]
}
d13.dblk.tab <- as.table(m)
rownames(d13.dblk.tab) <- acts[nind]
colnames(d13.dblk.tab) <- c("Avg Rating, Done Before","Avg Rating, Haven't Done Before", "5 Star %, Done Before", "5 Star %, Haven't Done Before", "Improvement in Avg Rating", "Improvement in 5 Star %")
#### Chisq-test for done before with girl scouts vs. rating, cross-tab ####
d13.wglk = list()
nind <- c()
nind2 <- c()
nindc <- c()
nindc2 <- c()
nindc3 <- c()
nindc4 <- c()
cat("These activities showed a improvement of rating, after being done with Girl Scouts:\n")
for (k in 1:90) {
label <- as.character(k)
d13.wglk[[label]] <-matrix(ncol=3,nrow=5)
for (i in 1:3){
for (j in 1:5){
filter = d13.wg[,k]==d13.wg.choices[i] & d13.lk[,k] == d13.lk.choices[j]
filter.nomissing <- !is.na(filter)
filter = filter & filter.nomissing
n <- sum(filter)
d13.wglk[[label]][j,i] <- n
}
}
p<-chisq.test(d13.wglk[[label]])$p.value
if (!is.nan(p) & p<0.05){
tmp <- m <- d13.wglk[[label]]
m <- apply(m,2,z)
ALL <- apply(tmp,2,sum)
if (ALL[3]>40){
d13.wglk.diff <- nind <- c(nind, k)
five <- m[5, ]/ALL
change21 <- change2 <- five[3] - five[1]
d13.wglk.five_wl <- nindc2 <- c(nindc2, change2)
change22 <- change2 <- five[3] - five[2]
d13.wglk.five_wn <- nindc3 <- c(nindc3, change2)
change <- apply(m,2,sum)
change11 <- (change[3] - change[1])
d13.wglk.avg_wl <- nindc <- c(nindc, change11)
change12 <- (change[3] - change[2])
d13.wglk.avg_wn <- nindc4 <- c(nindc4, change12)
cat(acts[as.numeric(k)], ":->\nGirls' reactions:")
cat("\nChange in avg rating vs. would like to do it with gs: ", change11)
cat("\nChange in avg rating vs. would not like to do it with gs: ", change12)
cat("\nChange in five star % vs. would like to do it with gs: ", change21)
cat("\nChange in five star % vs. would not like to do it with gs: ", change22)
cat("\n------------------------------------------------------------------------\n")
}
}
}
cat("END of Done with Girl Scouts vs. Ratings, \n\n\n ######################\n")
m <- matrix(ncol=6, nrow=length(nind))
for (j in 1:length(nind)){
a <- d13.wglk[[j]]
m[j,1] <- sum(z(a[, 1]))
m[j,2] <- sum(z(a[, 3]))
five <- a[5, ]/apply(a,2,sum)
m[j,3] <- five[1]
m[j,4] <- five[3]
m[j,5] <- m[j,2] - m[j,1]
m[j,6] <- m[j,4] - m[j,3]
}
d13.wglk.tab1 <- as.table(m)
rownames(d13.wglk.tab1) <- acts[nind]
colnames(d13.wglk.tab1) <- c("Avg Rating, Would Like","Avg Rating, Already Done with Girl Scouts", "5 Star %, Would Like", "5 Star %, Already Done with Girl Scouts", "Improvement in Avg Rating", "Improvement in 5 Star %")
m <- matrix(ncol=6, nrow=length(nind))
for (j in 1:length(nind)){
a <- d13.wglk[[j]]
m[j,1] <- sum(z(a[, 2]))
m[j,2] <- sum(z(a[, 3]))
five <- a[5, ]/apply(a,2,sum)
m[j,3] <- five[2]
m[j,4] <- five[3]
m[j,5] <- m[j,2] - m[j,1]
m[j,6] <- m[j,4] - m[j,3]
}
d13.wglk.tab2 <- as.table(m)
rownames(d13.wglk.tab2) <- acts[nind]
colnames(d13.wglk.tab2) <- c("Avg Rating, Would Not Like","Avg Rating, Already Done with Girl Scouts", "5 Star %, Would Not Like", "5 Star %, Already Done with Girl Scouts", "Improvement in Avg Rating", "Improvement in 5 Star %")
output$Q13.1 <- d13.dblk.tab
output$Q13.2 <- d13.wglk.tab1
output$Q13.3 <- d13.wglk.tab2
#### Q10 and Q28 Comparision
colrange <- which(names(d1.res) == "Q10" | names(d1.res) =="Q28")
d10_28 <- d1.res[, colrange]
rowrange <- !( is.na(d10_28[,1]) | is.na(d10_28[, 2]) )
d10_28.clean <- cmp <- apply(d10_28[rowrange, ], 2, as.numeric)
Change.of.Attitude <- as.table(rbind(c(-4:4), hist(cmp[,2] - cmp[,1])$count))
output$Q10_Q28.COA <- Change.of.Attitude
output$Q10_Q28.TEST <- cvm.test(cmp[,2],ecdf(cmp[,1]), simulate.p.value=T)
#### Q12
colrange <- qcol(12)
d12 <- d1.res[, colrange]
filter <- function(v){
sum(as.numeric(!is.na(v) & v=="1"))
}
d12.count <- apply(d12, 2, filter)
d12.count <- d12.count/sum(d12.count) * 100
d12.tab <- as.table(cbind(as.character(acts), d12.count))
row.names(d12.tab) <- c()
colnames(d12.tab) <- c("Outdoor Activities", "Overall Preference Score")
output$Q12 <- d12.tab
#### Q29
d29 <- read.csv("report_camp_opts.csv")
| /Girlscouts Survey/membership-survey.r | no_license | usmuh/Stack-of-R-scripts | R | false | false | 13,771 | r | ###################################
#### Girlscouts Member Survery ####
###################################
# by:Yiran Sheng @ yiransheng.com #
###################################
library("Hmisc")
library("lmtest")
library("mlogit")
library("dgof")
library("timeSeries")
d1 <- read.csv("file1.csv", na.strings=c("","NA","N/A"))
### Initializing ####
opar <- par()
### Removing row 1 (question text) ###
d1.res <- d1[2:nrow(d1), ]
qids <- d1[1,]
## column one of data is unique ID, which cannot be NA (Corrupted data), thus will be removed
d1.res <- d1.res[!is.na(d1.res[,1]), ]
## Global Variables and functions
N = nrow(d1.res)
qcol <- function(i,j=F,text=F, qid="1") {
if (!j){
# quick reference for sub questions
return (names(d1)>=paste("Q",i,"_",sep="") & names(d1)<=paste("Q",i,".Z",sep=""))
}
label <- paste(i,j, sep="_")
if (text) label <- paste(label, "TEXT", sep="_")
label <- paste("Q",qid,".",label, sep="")
return (label)
}
cleanse <- function(data){
keep <- apply(data,2,is.na)
keep <- !keep
keep <- apply(keep,1,prod) == 1
return (data[keep,])
}
output <- list()
#####################################################################
#
# General Notes:
# * To get the description for a single column question, use
# qids$Qx x as the question integer id/number (10, 28...)
# * To get the column range of a composite questions, use
# qids[qcol(x)], x is the question integer id/number (12, 13...)
# * All results, stats relevant for reporting purposes are either
# rendered as figures ("M_fig_****.png"), or appended to the list
# output. Check output in the console mode of R to see details
# once the entire script is run.
# * Store and backup this file with the data files (file1.csv etc.)
#
######################################################################
### Q46 How old are you? ####
dmgrph.age <- as.numeric( d1.res$Q46[!is.na(d1.res$Q46)] )
out <- describe(dmgrph.age)
output$Q46 <- out
png(filename="M_fig_age_summary.png")
hist(dmgrph.age, main="Respondants Age Distribution (Member Survey)", xlab="Age")
dev.off()
## Q6.1 Experience as a Girl Scout ##
dmgrph.levels.preset <- c("Daisy","Brownie","Junior","Cadette","Unspecified")
colrange <- which(names(d1.res) >= "Q6" & names(d1.res) <="Q6Z")
d1.level <- d1.res[,colrange]
dmgrph.levels.other <- unique(d1.level$Q6.1_5_TEXT)
dmgrph.levels <- c(dmgrph.levels.preset, as.character(dmgrph.levels.other))
dmgrph.levels <- unique(dmgrph.levels)
dmgrph.levels <- dmgrph.levels[!is.na(dmgrph.levels)]
q6col <- function(i,j,text=F){
return (qcol(i,j,text,qid="6"))
}
level.counts <- c()
level.time <- c("<=1yr","2-3yr","4-5yr",">=6yr")
# troop size
level.size <- c("<=5","6-9","10-13",">=14")
for (i in 1:5){
level.counts <- c( level.counts, length( which( d1.res[[q6col(1,i)]]=="1") ) )
time <- d1.res[[q6col(2,i)]]
time <- as.numeric(time[!is.na(time)])
size <- d1.res[[q6col(3,i)]]
size <- as.numeric(size[!is.na(size)])
time_count <- c()
size_count <- c()
for (j in 1:4){
time_count<-c(time_count, sum(as.numeric(time)==j))
size_count<-c(size_count, sum(as.numeric(size)==j))
}
level.time <- cbind(level.time, time_count)
level.size <- cbind(level.size, size_count)
}
level.size <- as.data.frame(level.size)
level.time <- as.data.frame(level.time)
names(level.size) <- c("", dmgrph.levels.preset)
names(level.time) <- c("", dmgrph.levels.preset)
row.names(level.size)<-level.size[,1]
level.size<-level.size[,2:ncol(level.size)]
row.names(level.time)<-level.time[,1]
level.time<-level.time[,2:ncol(level.time)]
tmp <- c(N-sum(level.counts), level.counts)
names(tmp) <- c("Not a Member", dmgrph.levels.preset)
dmgrph.levels.distr <- tmp/N*100
## plot
png(filename="M_fig_memberhip_summary.png",width=500)
barplot(dmgrph.levels.distr, main="Girl Scout Levels Distribution", xlab="Level", ylab="Percentage")
dev.off()
q6plot <- function(data,main=NULL,xlab=NULL,ylab=NULL){
i <- apply(data,2,as.numeric)
row.names(i) <- row.names(data)
total <- apply(i,2,sum)
for (j in 1:ncol(i)){
tmp <- i[,j]/total[j]*100
print(tmp)
barplot(tmp,main=paste(main,colnames(i)[j],sep=":"),xlab=xlab,ylab=ylab)
}
return (i)
}
# Oops, overides the R aggregate function here, too lazy to fix that,
# made a copy of the original function.
aggr <- aggregate
aggregate <- function(level.size){
a <- apply(level.size,2,as.numeric)
a <- apply(a,1,sum)
a <- a/sum(a)*100
a <- as.table(a)
rownames(a)<-row.names(level.size)
return (a)
}
png(filename="M_fig_memberhip_detail.png",width=1200,height=700)
par(mfrow=c(3,4))
q6plot(level.time[1:4],main="Girl Scout Membership Length by Level", xlab="Years",ylab="Percentage")
q6plot(level.size[1:4],main="Girl Scout Group Size by Level", xlab="Group Size",ylab="Percentage")
barplot(aggregate(level.time),main="Girl Scout Membership Length, Aggregating all Levels",xlab="Years",ylab="Percentage")
barplot(aggregate(level.size),main="Girl Scout Group Size, Aggregating all Levels",xlab="Group Size",ylab="Percentage")
dev.off()
par(mfrow=c(1,1))
#### Q25, why girls don't go camping with their troops ? ###
y<-d1.res$Q25
y<-y[!is.na(y)]
y.reasons <- c("I don't feel comfortable going without my parents","I don't like to go camping","I go to a different summer camp","I like doing activities outside, but I don't like camping","I would be away from home for too long","I'm scared to camp with people I don't know","My parents don't allow me to go camping without them","My troop has never been camping")
y.desc <- describe(y)
tmp <- rbind(y.reasons,y.desc$values)
tmp <- t(tmp)
colnames(tmp) <- c("Reasons why Never Gone Camping", "Frequency", "%")
output$Q25 <- tmp
#### Q20 ####
q20col <- function(i,j,text=F){
return (qcol(i,j,text,qid="20"))
}
### formated data, from survey server
data20 <- read.csv("report_q20.csv")
camp <- data20$Yes/data20$Responses
names(camp) <- data20$Question
camp <- camp*100
png(filename="M-camping-type.png", width=600)
par(oma=c(0,15,5,5),las=2)
barplot(camp,horiz=T,main="Girl Scouts Camping Experience by Type",xlab="Percentage of Girls for each type")
dev.off()
data20 <- read.csv("report_q20_2.csv")
d20.numeric <- apply(data20[3:9],2,as.numeric)
q20.normalize <- function(v){
return (v/v[7])
}
d20.normalize <- apply(d20.numeric, 1, q20.normalize)
d20.time <- apply(d20.normalize,1,mean)[1:6]
d20.time <- d20.time*100
names(d20.time)<-c("1~2\ndays","3~4\ndays","5~6\ndays","1~2\nweeks","3~4\nweeks","Over\n1 month")
png(filename="M-camping-time.png", width=600)
par(opar)
barplot(d20.time, main="Girl Scouts Camping Experience Time Distribution",xlab="Percentage of girls for each duration")
dev.off()
### Outdoor activities Q9,Q23,Q12,Q13,Q15 ###
d23 <- d1.res[qcol(23)]
d23.clean <- apply(cleanse(d23), 2, as.numeric)
q23.corr <- cor(d23.clean)
for (i in 1:nrow(q23.corr)){
for (j in 1:ncol(q23.corr)){
cor.test(d23.clean[,i], d23.clean[,j], method = c("pearson"))$p.value -> p
if (p>0.05){
q23.corr[i,j]<-q23.corr[j,i] <- 0
}
}
}
q23.model1 <- (Q23_5~Q23_1+Q23_2+Q23_3+Q23_4+Q23_6)
d23.clean <- as.data.frame(d23.clean)
q23.fit1 <- lm(q23.model1, data=d23.clean)
# q23.fit2 <- mlogit(q23.model1, data=d23.clean)
#### Q13, detailed activities analysis
### Formating Data
d13.1 <- read.csv("report_q13_1.csv")
d13.2 <- read.csv("report_q13_2.csv")
d13.3 <- read.csv("report_q13_3.csv")
d13.all <- cbind(d13.1,d13.2,d13.3)
acts <- d13.all$Question
d13 <- d1.res[qcol(13)]
#done before
d13.db <- d13[,1:90]
#like
d13.lk <- d13[,91:180]
#with girl scouts
d13.wg <- d13[,181:270]
# 1 means have done it before, 2 means haven't done it before
d13.db.choices <- c(1,2)
# 1 means would like to do it with girl scouts, 2 means would not like to
d13.wg.choices <- c(1,2,3)
# rating of activities on a 1-5 scale
d13.lk.choices <- c(1,2,3,4,5)
#### Chisq-test for done before vs. rating, cross-tab ####
d13.dblk = list()
nind <- c()
nindc <- c()
nindc2 <- c()
z <- function(v){
s <- sum(v)
return (c(1:5)*v/s)
}
cat("These activities showed a improvement of rating, after being done:\n")
for (k in 1:90) {
label <- as.character(k)
d13.dblk[[label]] <-matrix(ncol=2,nrow=5)
for (i in 1:2){
for (j in 1:5){
filter = d13.db[,k]==d13.db.choices[i] & d13.lk[,k] == d13.lk.choices[j]
filter.nomissing <- !is.na(filter)
filter = filter & filter.nomissing
n <- sum(filter)
d13.dblk[[label]][j,i] = n
}
}
p<-chisq.test(d13.dblk[[label]])$p.value
if (!is.nan(p) & p<0.05){
m <- d13.dblk[[label]]
d13.dblk.diff <- nind <- c(nind, k)
ALL <- apply(m,2,sum)
five <- m[5, ]/ALL
change2 <- five[1] - five[2]
m <- apply(m,2,z)
change <- apply(m,2,sum)
change <- (change[1] - change[2])
d13.dblk.avg <- nindc <- c(nindc, change)
d13.dblk.five <- nindc2 <- c(nindc2, change2)
cat(acts[as.numeric(k)], "\nGirls love it ",change," ", change2, "\n")
}
}
cat("END of Done Before vs. Ratings, \n\n\n ######################\n")
m <- matrix(ncol=6, nrow=length(nind))
for (j in 1:length(nind)){
a <- d13.dblk[[j]]
m[j,1] <- sum(z(a[, 1]))
m[j,2] <- sum(z(a[, 2]))
five <- a[5, ]/apply(a,2,sum)
m[j,3] <- five[1]
m[j,4] <- five[2]
m[j,5] <- - m[j,2] + m[j,1]
m[j,6] <- - m[j,4] + m[j,3]
}
d13.dblk.tab <- as.table(m)
rownames(d13.dblk.tab) <- acts[nind]
colnames(d13.dblk.tab) <- c("Avg Rating, Done Before","Avg Rating, Haven't Done Before", "5 Star %, Done Before", "5 Star %, Haven't Done Before", "Improvement in Avg Rating", "Improvement in 5 Star %")
#### Chisq-test for done before with girl scouts vs. rating, cross-tab ####
d13.wglk = list()
nind <- c()
nind2 <- c()
nindc <- c()
nindc2 <- c()
nindc3 <- c()
nindc4 <- c()
cat("These activities showed a improvement of rating, after being done with Girl Scouts:\n")
for (k in 1:90) {
label <- as.character(k)
d13.wglk[[label]] <-matrix(ncol=3,nrow=5)
for (i in 1:3){
for (j in 1:5){
filter = d13.wg[,k]==d13.wg.choices[i] & d13.lk[,k] == d13.lk.choices[j]
filter.nomissing <- !is.na(filter)
filter = filter & filter.nomissing
n <- sum(filter)
d13.wglk[[label]][j,i] <- n
}
}
p<-chisq.test(d13.wglk[[label]])$p.value
if (!is.nan(p) & p<0.05){
tmp <- m <- d13.wglk[[label]]
m <- apply(m,2,z)
ALL <- apply(tmp,2,sum)
if (ALL[3]>40){
d13.wglk.diff <- nind <- c(nind, k)
five <- m[5, ]/ALL
change21 <- change2 <- five[3] - five[1]
d13.wglk.five_wl <- nindc2 <- c(nindc2, change2)
change22 <- change2 <- five[3] - five[2]
d13.wglk.five_wn <- nindc3 <- c(nindc3, change2)
change <- apply(m,2,sum)
change11 <- (change[3] - change[1])
d13.wglk.avg_wl <- nindc <- c(nindc, change11)
change12 <- (change[3] - change[2])
d13.wglk.avg_wn <- nindc4 <- c(nindc4, change12)
cat(acts[as.numeric(k)], ":->\nGirls' reactions:")
cat("\nChange in avg rating vs. would like to do it with gs: ", change11)
cat("\nChange in avg rating vs. would not like to do it with gs: ", change12)
cat("\nChange in five star % vs. would like to do it with gs: ", change21)
cat("\nChange in five star % vs. would not like to do it with gs: ", change22)
cat("\n------------------------------------------------------------------------\n")
}
}
}
cat("END of Done with Girl Scouts vs. Ratings, \n\n\n ######################\n")
m <- matrix(ncol=6, nrow=length(nind))
for (j in 1:length(nind)){
a <- d13.wglk[[j]]
m[j,1] <- sum(z(a[, 1]))
m[j,2] <- sum(z(a[, 3]))
five <- a[5, ]/apply(a,2,sum)
m[j,3] <- five[1]
m[j,4] <- five[3]
m[j,5] <- m[j,2] - m[j,1]
m[j,6] <- m[j,4] - m[j,3]
}
d13.wglk.tab1 <- as.table(m)
rownames(d13.wglk.tab1) <- acts[nind]
colnames(d13.wglk.tab1) <- c("Avg Rating, Would Like","Avg Rating, Already Done with Girl Scouts", "5 Star %, Would Like", "5 Star %, Already Done with Girl Scouts", "Improvement in Avg Rating", "Improvement in 5 Star %")
m <- matrix(ncol=6, nrow=length(nind))
for (j in 1:length(nind)){
a <- d13.wglk[[j]]
m[j,1] <- sum(z(a[, 2]))
m[j,2] <- sum(z(a[, 3]))
five <- a[5, ]/apply(a,2,sum)
m[j,3] <- five[2]
m[j,4] <- five[3]
m[j,5] <- m[j,2] - m[j,1]
m[j,6] <- m[j,4] - m[j,3]
}
d13.wglk.tab2 <- as.table(m)
rownames(d13.wglk.tab2) <- acts[nind]
colnames(d13.wglk.tab2) <- c("Avg Rating, Would Not Like","Avg Rating, Already Done with Girl Scouts", "5 Star %, Would Not Like", "5 Star %, Already Done with Girl Scouts", "Improvement in Avg Rating", "Improvement in 5 Star %")
output$Q13.1 <- d13.dblk.tab
output$Q13.2 <- d13.wglk.tab1
output$Q13.3 <- d13.wglk.tab2
#### Q10 and Q28 Comparision
colrange <- which(names(d1.res) == "Q10" | names(d1.res) =="Q28")
d10_28 <- d1.res[, colrange]
rowrange <- !( is.na(d10_28[,1]) | is.na(d10_28[, 2]) )
d10_28.clean <- cmp <- apply(d10_28[rowrange, ], 2, as.numeric)
Change.of.Attitude <- as.table(rbind(c(-4:4), hist(cmp[,2] - cmp[,1])$count))
output$Q10_Q28.COA <- Change.of.Attitude
output$Q10_Q28.TEST <- cvm.test(cmp[,2],ecdf(cmp[,1]), simulate.p.value=T)
#### Q12
colrange <- qcol(12)
d12 <- d1.res[, colrange]
filter <- function(v){
sum(as.numeric(!is.na(v) & v=="1"))
}
d12.count <- apply(d12, 2, filter)
d12.count <- d12.count/sum(d12.count) * 100
d12.tab <- as.table(cbind(as.character(acts), d12.count))
row.names(d12.tab) <- c()
colnames(d12.tab) <- c("Outdoor Activities", "Overall Preference Score")
output$Q12 <- d12.tab
#### Q29
d29 <- read.csv("report_camp_opts.csv")
|
library(ncdf4.helpers)
### Name: nc.get.variable.list
### Title: Get a list of names of data variables
### Aliases: nc.get.variable.list
### ** Examples
## Get dimension axes from file by inferring them from dimension names
## Not run:
##D f <- nc_open("pr.nc")
##D var.list <- nc.get.variable.list(f)
##D nc_close(f)
## End(Not run)
| /data/genthat_extracted_code/ncdf4.helpers/examples/nc.get.variable.list.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 342 | r | library(ncdf4.helpers)
### Name: nc.get.variable.list
### Title: Get a list of names of data variables
### Aliases: nc.get.variable.list
### ** Examples
## Get dimension axes from file by inferring them from dimension names
## Not run:
##D f <- nc_open("pr.nc")
##D var.list <- nc.get.variable.list(f)
##D nc_close(f)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comprehendmedical_operations.R
\name{comprehendmedical_start_rx_norm_inference_job}
\alias{comprehendmedical_start_rx_norm_inference_job}
\title{Starts an asynchronous job to detect medication entities and link them
to the RxNorm ontology}
\usage{
comprehendmedical_start_rx_norm_inference_job(InputDataConfig,
OutputDataConfig, DataAccessRoleArn, JobName, ClientRequestToken,
KMSKey, LanguageCode)
}
\arguments{
\item{InputDataConfig}{[required] Specifies the format and location of the input data for the job.}
\item{OutputDataConfig}{[required] Specifies where to send the output files.}
\item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) of the AWS Identity and Access Management
(IAM) role that grants Amazon Comprehend Medical read access to your
input data. For more information, see \href{https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions-med.html#auth-role-permissions-med}{Role-Based Permissions Required for Asynchronous Operations}.}
\item{JobName}{The identifier of the job.}
\item{ClientRequestToken}{A unique identifier for the request. If you don\'t set the client
request token, Amazon Comprehend Medical generates one.}
\item{KMSKey}{An AWS Key Management Service key to encrypt your output files. If you
do not specify a key, the files are written in plain text.}
\item{LanguageCode}{[required] The language of the input documents. All documents must be in the same
language.}
}
\description{
Starts an asynchronous job to detect medication entities and link them
to the RxNorm ontology. Use the \code{DescribeRxNormInferenceJob} operation
to track the status of a job.
}
\section{Request syntax}{
\preformatted{svc$start_rx_norm_inference_job(
InputDataConfig = list(
S3Bucket = "string",
S3Key = "string"
),
OutputDataConfig = list(
S3Bucket = "string",
S3Key = "string"
),
DataAccessRoleArn = "string",
JobName = "string",
ClientRequestToken = "string",
KMSKey = "string",
LanguageCode = "en"
)
}
}
\keyword{internal}
| /paws/man/comprehendmedical_start_rx_norm_inference_job.Rd | permissive | jcheng5/paws | R | false | true | 2,114 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comprehendmedical_operations.R
\name{comprehendmedical_start_rx_norm_inference_job}
\alias{comprehendmedical_start_rx_norm_inference_job}
\title{Starts an asynchronous job to detect medication entities and link them
to the RxNorm ontology}
\usage{
comprehendmedical_start_rx_norm_inference_job(InputDataConfig,
OutputDataConfig, DataAccessRoleArn, JobName, ClientRequestToken,
KMSKey, LanguageCode)
}
\arguments{
\item{InputDataConfig}{[required] Specifies the format and location of the input data for the job.}
\item{OutputDataConfig}{[required] Specifies where to send the output files.}
\item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) of the AWS Identity and Access Management
(IAM) role that grants Amazon Comprehend Medical read access to your
input data. For more information, see \href{https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions-med.html#auth-role-permissions-med}{Role-Based Permissions Required for Asynchronous Operations}.}
\item{JobName}{The identifier of the job.}
\item{ClientRequestToken}{A unique identifier for the request. If you don\'t set the client
request token, Amazon Comprehend Medical generates one.}
\item{KMSKey}{An AWS Key Management Service key to encrypt your output files. If you
do not specify a key, the files are written in plain text.}
\item{LanguageCode}{[required] The language of the input documents. All documents must be in the same
language.}
}
\description{
Starts an asynchronous job to detect medication entities and link them
to the RxNorm ontology. Use the \code{DescribeRxNormInferenceJob} operation
to track the status of a job.
}
\section{Request syntax}{
\preformatted{svc$start_rx_norm_inference_job(
InputDataConfig = list(
S3Bucket = "string",
S3Key = "string"
),
OutputDataConfig = list(
S3Bucket = "string",
S3Key = "string"
),
DataAccessRoleArn = "string",
JobName = "string",
ClientRequestToken = "string",
KMSKey = "string",
LanguageCode = "en"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NeuralNetTools_utils.R
\name{lekgrps}
\alias{lekgrps}
\title{Create optional barplot for \code{\link{lekprofile}} groups}
\usage{
lekgrps(grps, position = "dodge", grp_nms = NULL)
}
\arguments{
\item{grps}{\code{\link[base]{data.frame}} of values for each variable in each group used to create groups in \code{\link{lekprofile}}}
\item{position}{chr string indicating bar position (e.g., 'dodge', 'fill', 'stack'), passed to \code{\link[ggplot2]{geom_bar}}}
\item{grp_nms}{optional chr string of alternative names for groups in legend}
}
\value{
A \code{\link[ggplot2]{ggplot}} object
}
\description{
Create optional barplot of constant values of each variable for each group used with \code{\link{lekprofile}}
}
\examples{
## enters used with kmeans clustering
x <- neuraldat[, c('X1', 'X2', 'X3')]
grps <- kmeans(x, 6)$center
lekgrps(grps)
}
| /man/lekgrps.Rd | permissive | alfords/NeuralNetTools | R | false | true | 926 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NeuralNetTools_utils.R
\name{lekgrps}
\alias{lekgrps}
\title{Create optional barplot for \code{\link{lekprofile}} groups}
\usage{
lekgrps(grps, position = "dodge", grp_nms = NULL)
}
\arguments{
\item{grps}{\code{\link[base]{data.frame}} of values for each variable in each group used to create groups in \code{\link{lekprofile}}}
\item{position}{chr string indicating bar position (e.g., 'dodge', 'fill', 'stack'), passed to \code{\link[ggplot2]{geom_bar}}}
\item{grp_nms}{optional chr string of alternative names for groups in legend}
}
\value{
A \code{\link[ggplot2]{ggplot}} object
}
\description{
Create optional barplot of constant values of each variable for each group used with \code{\link{lekprofile}}
}
\examples{
## enters used with kmeans clustering
x <- neuraldat[, c('X1', 'X2', 'X3')]
grps <- kmeans(x, 6)$center
lekgrps(grps)
}
|
# https://towardsdatascience.com/utilizing-quosures-to-create-ultra-flexible-filtering-controls-in-r-shiny-f3e5dc461399
# https://github.com/rstudio/gt
library(shiny)
library(shinyjs)
library(gt)
library(tidyverse)
library(glue)
library(rlang)
options(shiny.deprecation.messages=FALSE)
server<-function(input,output,session){
# loading data
rawdata <- reactive({
inFile1 <- input$file
if (is.null(inFile1)) {
return(NULL)
}
read.csv(inFile1$datapath, row.names = 1)
})
# show data
output$gtTable1 <- DT::renderDataTable({
req(input$file)
dataA <- rawdata()
dataA
})
# create / modify gt table
gttableA <- reactive({
if(input$gtstub=="No stub"){
table <- gt(rawdata())
}
if(input$gtstub=="Create a table stub"){
table <- gt(rawdata())
table <- table %>%
gt(
rowname_col = input$rownameCol,
groupname_col = input$groupnameCol
)
}
return(table)
})
# show created/modified gt table
output$gttable1 <- gt::render_gt({
req(input$file)
gttableA()
})
# updated gt table
gttable1 <- reactive({
table <- gttableA()
## add header
if(!is.na(input$title)){
table1 <- table %>%
tab_header(
title=input$title,
subtitle=input$subtitle
)
if(input$saveheader){
table <- table1
}
}
## add conditional footnote
if(input$footnote!="" & input$footnotecol!="" & input$footnoterow!=""){
table3 <- table %>%
tab_footnote(
footnote=input$footnote,
locations=cells_data(
columns=vars(!!rlang::sym(input$footnotecol)),
rows=eval_tidy(parse_expr(input$footnoterow))
)
)
if(input$savefootnote){
table <- table3
}
}
## add footnote to column label itself
if(input$footnotecollab!="" & input$footnotecollabloc!=""){
columnC <- unlist(strsplit(input$footnotecollabloc,","))
table4 <- table
for(i in 1:length(columnC)){
table4 <- table4 %>%
tab_footnote(
footnote=input$footnotecollab,
locations=cells_column_labels(
columns=vars(!!rlang::sym(columnC[i]))
)
)
}
if(input$glyphs=="letters"){
table4 <- table4 %>%
tab_options(
footnote.glyph=letters
)}
if(input$glyphs=="numbers"){
table4 <- table4 %>%
tab_options(
footnote.glyph=numbers
)}
if(input$savefootnotecollab){
table <- table4
}
}
## add source note
if(input$sourcenote!=""){
table5 <- table %>%
tab_source_note(
source_note=input$sourcenote
)
if(input$savesourcenote){
table <- table5
}
}
table
})
# show updated gt table
output$gttable2 <- gt::render_gt({
req(input$file)
gttable1()
})
##### modify row from here:
output$gttable3 <- gt::render_gt({
req(input$file)
gttable1()
})
gttable4 <- reactive({
table <- gttable1()
## reorder row groups
if(input$groups!=""){
groupsA <- unlist(strsplit(input$groups,","))
table9 <- table %>%
row_group_order(
groups=groupsA
)
if(input$saveReorderGroup){
table <- table9
}
}
## add summary row
if(input$columnsForSummary!="" ){
table10 <- table %>%
summary_rows(
columns=vars(!!rlang::sym(input$columnsForSummary)),
fns=eval_tidy(parse_expr(input$fns))
)
if(input$saveSummaryRow){
table <- table10
}
}
table
})
output$gttable4 <- gt::render_gt({
req(input$file)
gttable4()
})
##### modify columns from here:
output$gttable5 <- gt::render_gt({
req(input$file)
gttable4()
})
gttable6 <- reactive({
table <- gttable1()
## set the alignment of the columns
if(input$aligns!=""){
table10 <- table %>%
cols_align(
align=input$aligns,
columns = TRUE
)
if(input$saveAlign){
table <- table10
}
}
## hide columns
if(input$hidecolumns!=""){
table2 <- table
columnA <- unlist(strsplit(input$hidecolumns,","))
for(i in 1:length(columnA)){
col <- columnA[i]
columnB <- vars(!!rlang::sym(col))
table2 <- table2 %>%
cols_hide(
columns=columnB
)
}
if(input$savehidecols){
table <- table2
}
}
## relabel columns
if(input$relabelColumns!="list2()"){
table12 <- table %>%
cols_label(
.list=eval_tidy(parse_expr(input$relabelColumns))
)
if(input$saveRelabelCols){
table <- table12
}
}
###### more modification of column
table
})
output$gttable6 <- gt::render_gt({
req(input$file)
gttable6()
})
##### format cell body from here:
output$gttable7 <- gt::render_gt({
req(input$file)
gttable6()
})
gttable8 <- reactive({
table <- gttable6()
## format number
if(input$numberformat!="" & input$pattern!=""){
columnF<- unlist(strsplit(input$numberformat,","))
table8<- table
for(i in 1:length(columnF)){
table8 <- table8 %>%
fmt_number(
columns = vars(!!rlang::sym(columnF[i])),
scale_by = input$scaleby,
pattern=input$pattern
)
}
if(input$saveformatnumber){
table <- table8
}
}
## format 'num' columns with scientific notation
if(input$scinumformat!="" & input$scinumdecimal!=""){
columnD <- unlist(strsplit(input$scinumformat,","))
table6<- table
for(i in 1:length(columnD)){
table6 <- table6 %>%
fmt_scientific(
columns = vars(!!rlang::sym(columnD[i])),
decimals = input$scinumdecimal
)
}
if(input$saveformatnum){
table <- table6
}
}
## format date columns in "Date"
if(input$dateformatc!=""){
columnE <- unlist(strsplit(input$dateformatc,","))
table7 <- table
for(i in 1:length(columnE)){
table7 <- table7 %>%
fmt_date(
columns = vars(!!rlang::sym(columnE[i])),
rows = eval_tidy(parse_expr(input$dateformatr)),
date_style = input$datestyle
)
}
if(input$saveformatdate){
table <- table7
}
}
## format currency
if(input$currencyformat!="" & input$currency!=""){
columnE <- unlist(strsplit(input$currencyformat,","))
table8 <- table
for(i in 1:length(columnE)){
table8 <- table8 %>%
fmt_currency(
columns = vars(!!rlang::sym(columnE[i])),
currency = input$currency
)
}
if(input$saveformatcurrency){
table <- table8
}
}
# color, size, stype ...
if(input$colorColumnLabelBackg!="" & input$colorColumnLabelBackg!=""){
table20 <- table %>%
tab_options(
footnote.font.size =input$footnoteSize,
heading.background.color=input$colorHeaderBackg,
table.background.color=input$colorTableBackg,
column_labels.background.color=input$colorColumnLabelBackg,
table.font.size=input$fontSize
)
table <- table20
}
########## more format here
table
})
output$gttable8 <- gt::render_gt({
req(input$file)
gttable8()
})
}
| /server.R | no_license | xwang-lilly/ics_gt_dev | R | false | false | 8,712 | r |
# https://towardsdatascience.com/utilizing-quosures-to-create-ultra-flexible-filtering-controls-in-r-shiny-f3e5dc461399
# https://github.com/rstudio/gt
library(shiny)
library(shinyjs)
library(gt)
library(tidyverse)
library(glue)
library(rlang)
options(shiny.deprecation.messages=FALSE)
server<-function(input,output,session){
# loading data
rawdata <- reactive({
inFile1 <- input$file
if (is.null(inFile1)) {
return(NULL)
}
read.csv(inFile1$datapath, row.names = 1)
})
# show data
output$gtTable1 <- DT::renderDataTable({
req(input$file)
dataA <- rawdata()
dataA
})
# create / modify gt table
gttableA <- reactive({
if(input$gtstub=="No stub"){
table <- gt(rawdata())
}
if(input$gtstub=="Create a table stub"){
table <- gt(rawdata())
table <- table %>%
gt(
rowname_col = input$rownameCol,
groupname_col = input$groupnameCol
)
}
return(table)
})
# show created/modified gt table
output$gttable1 <- gt::render_gt({
req(input$file)
gttableA()
})
# updated gt table
gttable1 <- reactive({
table <- gttableA()
## add header
if(!is.na(input$title)){
table1 <- table %>%
tab_header(
title=input$title,
subtitle=input$subtitle
)
if(input$saveheader){
table <- table1
}
}
## add conditional footnote
if(input$footnote!="" & input$footnotecol!="" & input$footnoterow!=""){
table3 <- table %>%
tab_footnote(
footnote=input$footnote,
locations=cells_data(
columns=vars(!!rlang::sym(input$footnotecol)),
rows=eval_tidy(parse_expr(input$footnoterow))
)
)
if(input$savefootnote){
table <- table3
}
}
## add footnote to column label itself
if(input$footnotecollab!="" & input$footnotecollabloc!=""){
columnC <- unlist(strsplit(input$footnotecollabloc,","))
table4 <- table
for(i in 1:length(columnC)){
table4 <- table4 %>%
tab_footnote(
footnote=input$footnotecollab,
locations=cells_column_labels(
columns=vars(!!rlang::sym(columnC[i]))
)
)
}
if(input$glyphs=="letters"){
table4 <- table4 %>%
tab_options(
footnote.glyph=letters
)}
if(input$glyphs=="numbers"){
table4 <- table4 %>%
tab_options(
footnote.glyph=numbers
)}
if(input$savefootnotecollab){
table <- table4
}
}
## add source note
if(input$sourcenote!=""){
table5 <- table %>%
tab_source_note(
source_note=input$sourcenote
)
if(input$savesourcenote){
table <- table5
}
}
table
})
# show updated gt table
output$gttable2 <- gt::render_gt({
req(input$file)
gttable1()
})
##### modify row from here:
output$gttable3 <- gt::render_gt({
req(input$file)
gttable1()
})
gttable4 <- reactive({
table <- gttable1()
## reorder row groups
if(input$groups!=""){
groupsA <- unlist(strsplit(input$groups,","))
table9 <- table %>%
row_group_order(
groups=groupsA
)
if(input$saveReorderGroup){
table <- table9
}
}
## add summary row
if(input$columnsForSummary!="" ){
table10 <- table %>%
summary_rows(
columns=vars(!!rlang::sym(input$columnsForSummary)),
fns=eval_tidy(parse_expr(input$fns))
)
if(input$saveSummaryRow){
table <- table10
}
}
table
})
output$gttable4 <- gt::render_gt({
req(input$file)
gttable4()
})
##### modify columns from here:
output$gttable5 <- gt::render_gt({
req(input$file)
gttable4()
})
gttable6 <- reactive({
table <- gttable1()
## set the alignment of the columns
if(input$aligns!=""){
table10 <- table %>%
cols_align(
align=input$aligns,
columns = TRUE
)
if(input$saveAlign){
table <- table10
}
}
## hide columns
if(input$hidecolumns!=""){
table2 <- table
columnA <- unlist(strsplit(input$hidecolumns,","))
for(i in 1:length(columnA)){
col <- columnA[i]
columnB <- vars(!!rlang::sym(col))
table2 <- table2 %>%
cols_hide(
columns=columnB
)
}
if(input$savehidecols){
table <- table2
}
}
## relabel columns
if(input$relabelColumns!="list2()"){
table12 <- table %>%
cols_label(
.list=eval_tidy(parse_expr(input$relabelColumns))
)
if(input$saveRelabelCols){
table <- table12
}
}
###### more modification of column
table
})
output$gttable6 <- gt::render_gt({
req(input$file)
gttable6()
})
##### format cell body from here:
output$gttable7 <- gt::render_gt({
req(input$file)
gttable6()
})
gttable8 <- reactive({
table <- gttable6()
## format number
if(input$numberformat!="" & input$pattern!=""){
columnF<- unlist(strsplit(input$numberformat,","))
table8<- table
for(i in 1:length(columnF)){
table8 <- table8 %>%
fmt_number(
columns = vars(!!rlang::sym(columnF[i])),
scale_by = input$scaleby,
pattern=input$pattern
)
}
if(input$saveformatnumber){
table <- table8
}
}
## format 'num' columns with scientific notation
if(input$scinumformat!="" & input$scinumdecimal!=""){
columnD <- unlist(strsplit(input$scinumformat,","))
table6<- table
for(i in 1:length(columnD)){
table6 <- table6 %>%
fmt_scientific(
columns = vars(!!rlang::sym(columnD[i])),
decimals = input$scinumdecimal
)
}
if(input$saveformatnum){
table <- table6
}
}
## format date columns in "Date"
if(input$dateformatc!=""){
columnE <- unlist(strsplit(input$dateformatc,","))
table7 <- table
for(i in 1:length(columnE)){
table7 <- table7 %>%
fmt_date(
columns = vars(!!rlang::sym(columnE[i])),
rows = eval_tidy(parse_expr(input$dateformatr)),
date_style = input$datestyle
)
}
if(input$saveformatdate){
table <- table7
}
}
## format currency
if(input$currencyformat!="" & input$currency!=""){
columnE <- unlist(strsplit(input$currencyformat,","))
table8 <- table
for(i in 1:length(columnE)){
table8 <- table8 %>%
fmt_currency(
columns = vars(!!rlang::sym(columnE[i])),
currency = input$currency
)
}
if(input$saveformatcurrency){
table <- table8
}
}
# color, size, stype ...
if(input$colorColumnLabelBackg!="" & input$colorColumnLabelBackg!=""){
table20 <- table %>%
tab_options(
footnote.font.size =input$footnoteSize,
heading.background.color=input$colorHeaderBackg,
table.background.color=input$colorTableBackg,
column_labels.background.color=input$colorColumnLabelBackg,
table.font.size=input$fontSize
)
table <- table20
}
########## more format here
table
})
output$gttable8 <- gt::render_gt({
req(input$file)
gttable8()
})
}
|
context('test_posture_dependency.r')
test_that("a_matrix with fewer than all samples trained on <- forcetrial_list <- rds",
{
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
sample_posture_path <- dir(rds_folder_path)[15]
sample_posture_data <- readRDS(paste0(rds_folder_path, sample_posture_path))
input_output_data <- converged_colmeans(sample_posture_data, last_n_milliseconds = 100)
linear_model <- generate_linear_static_model(input_output_data, fraction_training = 0.8)
print(paste(median(linear_model$euclidian_errors), "is the median euclidian err"))
lm_measured <- lm(cbind(JR3.FX + JR3.FY + JR3.FZ) ~ measured_M0+measured_M1+measured_M2+measured_M3+measured_M4+measured_M5+measured_M6, data = input_output_data)
cvlm <- cv.lm(input_output_data, lm_measured, m=10) # 3 fold cross-validation
train_test <- df_split_into_training_and_testing(input_output_data, 0.8)
trained_model <- lm(formula = cbind(JR3.FX, JR3.FY, JR3.FZ) ~ measured_M0 +
measured_M1 + measured_M2 + measured_M3 + measured_M4 + measured_M5 +
measured_M6, data = train_test$test, model = TRUE, x = TRUE, y = TRUE,
qr = TRUE)
test_results <- predict.lm(trained_model, train_test$test[, do.call("c",
lapply(muscle_names(), measured))])
input_output_data_0_mean <- apply(input_output_data, 1, function(row) row -
apply(input_output_data, 2, mean))
linear_model <- generate_linear_static_model(input_output_data_0_mean, fraction_training = 0.8)
print(paste(median(linear_model$euclidian_errors), "is the median euclidian err"))
hist(linear_model$euclidian_errors)
tensions_and_forces_colnames <- c(do.call("c", lapply(muscle_names(), measured)),
force_column_names)
expect_true(implemented <- FALSE) #TODO
})
test_that('we can apply a nn to mapping',{
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
sample_posture_path <- dir(rds_folder_path)[15]
sample_posture_data <- readRDS(paste0(rds_folder_path, sample_posture_path))
input_output_data <- converged_colmeans(sample_posture_data, last_n_milliseconds = 100)
nn <- neuralnet(
JR3.FX + JR3.FY + JR3.FZ ~ measured_M0 + measured_M1 + measured_M2 + measured_M3 + measured_M4 + measured_M5 + measured_M6,
data=input_output_data, hidden=c(6,6,6,6,6), err.fct="sse",
linear.output=FALSE)
plot(nn)
})
test_that("we can extract posture RDS files, and compute an RDS with the stabilized mapping for use with training",
{
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
list_of_input_output_data <- pbmclapply(dir(rds_folder_path), function(rdspath) {
posture <- readRDS(paste0(rds_folder_path, rdspath))
adept_coordinates <- adept_coordinates_from_ForceTrial(posture[[1]])
input_output_data <- converged_colmeans(posture, last_n_milliseconds = 100)
attr(input_output_data, "adept_coordinates") <- adept_coordinates
return(input_output_data)
})
saveRDS(list_of_input_output_data, "list_of_input_output_data.rds")
})
test_that("data for many postures can be used to create a list of A matrices", {
rds_postures <- all_file_paths("~/Resilio Sync/data/ForceTrials_at_each_posture/")
list_of_postures <- list_of_xy_to_df(pbmclapply(rds_postures, get_adept_coordinates_from_rds),
c("adept_x", "adept_y"))
list_of_A_matrices <- posture_rds_files_to_list_of_A_matrix_fits(rds_postures,
last_n_milliseconds)
vafs <- simplify2array(lapply(list_of_A_matrices, function(fit) {
variance_accounted_for(fit[[2]], fit[[3]])
}))
cb <- data.frame(cbind(list_of_postures, vafs))
expect_equal(nrow(cb), 1206)
fix_x_vaf <- cb[cb$adept_x == -525, ]
fix_y_vaf <- cb[cb$adept_y == 68, ]
expect_equal(nrow(fix_x_vaf), 206)
expect_equal(nrow(fix_y_vaf), 1000)
# Plot figure
fix_y <- posture_dependency_plot(fix_y_vaf, "adept_x", "vafs")
fix_x <- posture_dependency_plot(fix_x_vaf, "adept_y", "vafs")
require(gridExtra)
final <- gridExtra::grid.arrange(fix_y, fix_x, ncol = 2)
ggsave("../../output/posture_dependency_adept_xy.pdf", final, width = 14, height = 8,
dpi = 600)
})
test_that("list of posture RDS paths to list of A matrices", {
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
rds_postures <- simplify2array(lapply(dir(rds_folder_path), prepend_string, rds_folder_path))
})
test_that("a_matrix <- forcetrial_list <- rds", {
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
sample_posture_path <- dir(rds_folder_path)[1]
sample_posture_data <- readRDS(paste0(rds_folder_path, sample_posture_path))
input_output_data <- converged_colmeans(sample_posture_data, last_n_milliseconds)
A_1 <- find_A_matrix(input_output_data)
expect_equal(length(A_1), 3)
})
| /tests/fulldata/test_posture_dependency.r | no_license | bc/frontiers2017 | R | false | false | 4,819 | r | context('test_posture_dependency.r')
test_that("a_matrix with fewer than all samples trained on <- forcetrial_list <- rds",
{
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
sample_posture_path <- dir(rds_folder_path)[15]
sample_posture_data <- readRDS(paste0(rds_folder_path, sample_posture_path))
input_output_data <- converged_colmeans(sample_posture_data, last_n_milliseconds = 100)
linear_model <- generate_linear_static_model(input_output_data, fraction_training = 0.8)
print(paste(median(linear_model$euclidian_errors), "is the median euclidian err"))
lm_measured <- lm(cbind(JR3.FX + JR3.FY + JR3.FZ) ~ measured_M0+measured_M1+measured_M2+measured_M3+measured_M4+measured_M5+measured_M6, data = input_output_data)
cvlm <- cv.lm(input_output_data, lm_measured, m=10) # 3 fold cross-validation
train_test <- df_split_into_training_and_testing(input_output_data, 0.8)
trained_model <- lm(formula = cbind(JR3.FX, JR3.FY, JR3.FZ) ~ measured_M0 +
measured_M1 + measured_M2 + measured_M3 + measured_M4 + measured_M5 +
measured_M6, data = train_test$test, model = TRUE, x = TRUE, y = TRUE,
qr = TRUE)
test_results <- predict.lm(trained_model, train_test$test[, do.call("c",
lapply(muscle_names(), measured))])
input_output_data_0_mean <- apply(input_output_data, 1, function(row) row -
apply(input_output_data, 2, mean))
linear_model <- generate_linear_static_model(input_output_data_0_mean, fraction_training = 0.8)
print(paste(median(linear_model$euclidian_errors), "is the median euclidian err"))
hist(linear_model$euclidian_errors)
tensions_and_forces_colnames <- c(do.call("c", lapply(muscle_names(), measured)),
force_column_names)
expect_true(implemented <- FALSE) #TODO
})
test_that('we can apply a nn to mapping',{
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
sample_posture_path <- dir(rds_folder_path)[15]
sample_posture_data <- readRDS(paste0(rds_folder_path, sample_posture_path))
input_output_data <- converged_colmeans(sample_posture_data, last_n_milliseconds = 100)
nn <- neuralnet(
JR3.FX + JR3.FY + JR3.FZ ~ measured_M0 + measured_M1 + measured_M2 + measured_M3 + measured_M4 + measured_M5 + measured_M6,
data=input_output_data, hidden=c(6,6,6,6,6), err.fct="sse",
linear.output=FALSE)
plot(nn)
})
test_that("we can extract posture RDS files, and compute an RDS with the stabilized mapping for use with training",
{
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
list_of_input_output_data <- pbmclapply(dir(rds_folder_path), function(rdspath) {
posture <- readRDS(paste0(rds_folder_path, rdspath))
adept_coordinates <- adept_coordinates_from_ForceTrial(posture[[1]])
input_output_data <- converged_colmeans(posture, last_n_milliseconds = 100)
attr(input_output_data, "adept_coordinates") <- adept_coordinates
return(input_output_data)
})
saveRDS(list_of_input_output_data, "list_of_input_output_data.rds")
})
test_that("data for many postures can be used to create a list of A matrices", {
rds_postures <- all_file_paths("~/Resilio Sync/data/ForceTrials_at_each_posture/")
list_of_postures <- list_of_xy_to_df(pbmclapply(rds_postures, get_adept_coordinates_from_rds),
c("adept_x", "adept_y"))
list_of_A_matrices <- posture_rds_files_to_list_of_A_matrix_fits(rds_postures,
last_n_milliseconds)
vafs <- simplify2array(lapply(list_of_A_matrices, function(fit) {
variance_accounted_for(fit[[2]], fit[[3]])
}))
cb <- data.frame(cbind(list_of_postures, vafs))
expect_equal(nrow(cb), 1206)
fix_x_vaf <- cb[cb$adept_x == -525, ]
fix_y_vaf <- cb[cb$adept_y == 68, ]
expect_equal(nrow(fix_x_vaf), 206)
expect_equal(nrow(fix_y_vaf), 1000)
# Plot figure
fix_y <- posture_dependency_plot(fix_y_vaf, "adept_x", "vafs")
fix_x <- posture_dependency_plot(fix_x_vaf, "adept_y", "vafs")
require(gridExtra)
final <- gridExtra::grid.arrange(fix_y, fix_x, ncol = 2)
ggsave("../../output/posture_dependency_adept_xy.pdf", final, width = 14, height = 8,
dpi = 600)
})
test_that("list of posture RDS paths to list of A matrices", {
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
rds_postures <- simplify2array(lapply(dir(rds_folder_path), prepend_string, rds_folder_path))
})
test_that("a_matrix <- forcetrial_list <- rds", {
rds_folder_path <- "~/Resilio Sync/data/ForceTrials_at_each_posture/"
sample_posture_path <- dir(rds_folder_path)[1]
sample_posture_data <- readRDS(paste0(rds_folder_path, sample_posture_path))
input_output_data <- converged_colmeans(sample_posture_data, last_n_milliseconds)
A_1 <- find_A_matrix(input_output_data)
expect_equal(length(A_1), 3)
})
|
# Exercise 5: large data sets: Baby Name Popularity Over Time
# Read in the female baby names data file found in the `data` folder into a
# variable called `names`. Remember to NOT treat the strings as factors!
names <- read.csv('data/female_names.csv', stringsAsFactors=FALSE)
# Create a data frame `names_2013` that contains only the rows for the year 2013
names_2013 <- names[names$year == 2013,]
# What was the most popular female name in 2013?
popular <- names_2013[names_2013$prop == max(names_2013$prop), "name"]
# Write a function `most_popular_in_year` that takes in a year as a value and
# returns the most popular name in that year
most_popular_in_year <- function(y) {
names_year <- names[names$year == y,]
names_year[names_year$prop == max(names_year$prop), "name"]
}
# What was the most popular female name in 1994?
popular_1994 <- most_popular_in_year(1994)
# Write a function `number_in_million` that takes in a name and a year, and
# returns statistically how many babies out of 1 million born that year have
# that name.
# Hint: get the popularity percentage, and take that percentage out of 1 million.
number_in_million <- function(n, y) {
proportion <- names[names$name == n & names$year == y, "prop"]
round(proportion * 1000000, 1)
}
# How many babies out of 1 million had the name 'Laura' in 1995?
laura_babies <- number_in_million("Laura", 1995)
# How many babies out of 1 million had your name in the year you were born?
allison_babies <- number_in_million("Allison", 1998)
## Consider: what does this tell you about how easy it is to identify you with
## just your name and birth year?
| /exercise-5/exercise.R | permissive | alliL/ch9-data-frames | R | false | false | 1,634 | r | # Exercise 5: large data sets: Baby Name Popularity Over Time
# Read in the female baby names data file found in the `data` folder into a
# variable called `names`. Remember to NOT treat the strings as factors!
names <- read.csv('data/female_names.csv', stringsAsFactors=FALSE)
# Create a data frame `names_2013` that contains only the rows for the year 2013
names_2013 <- names[names$year == 2013,]
# What was the most popular female name in 2013?
popular <- names_2013[names_2013$prop == max(names_2013$prop), "name"]
# Write a function `most_popular_in_year` that takes in a year as a value and
# returns the most popular name in that year
most_popular_in_year <- function(y) {
names_year <- names[names$year == y,]
names_year[names_year$prop == max(names_year$prop), "name"]
}
# What was the most popular female name in 1994?
popular_1994 <- most_popular_in_year(1994)
# Write a function `number_in_million` that takes in a name and a year, and
# returns statistically how many babies out of 1 million born that year have
# that name.
# Hint: get the popularity percentage, and take that percentage out of 1 million.
number_in_million <- function(n, y) {
proportion <- names[names$name == n & names$year == y, "prop"]
round(proportion * 1000000, 1)
}
# How many babies out of 1 million had the name 'Laura' in 1995?
laura_babies <- number_in_million("Laura", 1995)
# How many babies out of 1 million had your name in the year you were born?
allison_babies <- number_in_million("Allison", 1998)
## Consider: what does this tell you about how easy it is to identify you with
## just your name and birth year?
|
testlist <- list(id = integer(0), x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610029481-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 234 | r | testlist <- list(id = integer(0), x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
library(shiny)
library(tidyverse)
library(lubridate)
load("Table_construction.Rdata") ## You can comment out if data already loaded. The app will load faster.
date_data_pulled = ymd("2016-03-30") ### HARDCODED. ADUST IF NEW DATASET
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Recidivism Data"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# span(textOutput("message"), style="color:red"),
actionButton("go", label = "Update"),
numericInput("person_id", label = h3("Input Person ID"), value = 1),
selectInput("screening_date", label = h3("Input Screening Date"),
choices = NULL),
h3("Summary"),
tableOutput("info")
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel("Charge",
h4("Before Current Offense Date"),
DT::dataTableOutput("charge_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("charge_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("charge_after")
),
tabPanel("Arrest",
h4("Before Current Offense Date"),
DT::dataTableOutput("arrest_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("arrest_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("arrest_after")
),
tabPanel("Jail",
h4("Before Current Offense Date"),
DT::dataTableOutput("jail_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("jail_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("jail_after")
),
tabPanel("Prison",
h4("Before Current Offense Date"),
DT::dataTableOutput("prison_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("prison_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("prison_after")
),
tabPanel("Probation",
h4("Before Current Offense Date"),
DT::dataTableOutput("prob_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("prob_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("prob_after")
),
tabPanel("Profile",
h4("Profile"),
tableOutput("profile")
),
tabPanel("Features",tableOutput("features")),
tabPanel("COMPAS",tableOutput("compas"))
)
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output, session) {
### Update the screening_date dropdown
observeEvent(input$person_id,{
if(!is.na(input$person_id)){
updateSelectInput(session, "screening_date",
choices = features$screening_date[features$person_id == input$person_id])
}
## Set all tables to NULL
# Info
output$info = renderTable(NULL)
# Charge
output$charge_before <- DT::renderDataTable(NULL)
output$charge_on = DT::renderDataTable(NULL)
output$charge_after = DT::renderDataTable(NULL)
# Arrest
output$arrest_before <- DT::renderDataTable(NULL)
output$arrest_on = DT::renderDataTable(NULL)
output$arrest_after = DT::renderDataTable(NULL)
# Jail
output$jail_before <- DT::renderDataTable(NULL)
output$jail_on = DT::renderDataTable(NULL)
output$jail_after = DT::renderDataTable(NULL)
# Prison
output$prison_before <- DT::renderDataTable(NULL)
output$prison_on = DT::renderDataTable(NULL)
output$prison_after = DT::renderDataTable(NULL)
# Probation
output$prob_before <- DT::renderDataTable(NULL)
output$prob_on = DT::renderDataTable(NULL)
output$prob_after = DT::renderDataTable(NULL)
# Features
output$features = renderTable(NULL)
# Profile
output$profile = renderTable(NULL)
# COMPAS
output$compas = renderTable(NULL)
output$message = renderText("Hit Update")
})
observeEvent(input$go,{
output$message = renderText("-")
isolate({
## Info
output$info <- renderTable({
person = data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date))
bind_cols(
person %>%
select(people) %>%
unnest() %>%
select(name,sex,race),
person %>%
select(first_offense_date, current_offense_date),
compas_df_wide %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(`General Decile Score` = `Risk of Recidivism_decile_score`,
`Violence Decile Score` = `Risk of Violence_decile_score`),
outcomes %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(recid, recid_violent)
) %>%
t()
},colnames = FALSE, rownames = TRUE)
## Charge tab
output$charge_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$charge_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$charge_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Arrest tab
output$arrest_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(arrest) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$arrest_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(arrest) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$arrest_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(arrest) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Jail tab
output$jail_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(jail) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$jail_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(jail) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$jail_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(jail) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Prison tab
output$prison_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prison) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$prison_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prison) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$prison_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prison) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Probation tab
output$prob_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prob) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$prob_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prob) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$prob_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prob) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Features
output$features <- renderTable({
features %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(-person_id, -screening_date, -first_offense_date, -current_offense_date,
-`Risk of Failure to Appear_decile_score`,-`Risk of Failure to Appear_raw_score`,
-`Risk of Recidivism_decile_score`,-`Risk of Recidivism_raw_score`,
-`Risk of Violence_decile_score`,-`Risk of Violence_raw_score`) %>%
mutate_all(as.character) %>%
t()
},
colnames = FALSE, rownames = TRUE)
## COMPAS
output$compas <- renderTable({
compas_df_wide %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(-person_id, -screening_date) %>%
mutate_all(as.character) %>%
t()
},
colnames = FALSE, rownames = TRUE)
## Profile
output$profile <- renderTable({
features_person = features %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date))
person_before = data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date))
charge_onbefore = bind_rows(
person_before %>%
select(charge) %>%
.[[1,1]],
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]
)
if(nrow(charge_onbefore)>0){
charge_onbefore_sum = charge_onbefore %>%
select(charge, charge_degree) %>%
mutate(charge_degree_letters = str_extract(charge_degree,"[:alpha:]+")) %>%
group_by(charge, charge_degree_letters) %>%
summarize(count = n()) %>%
ungroup() %>%
arrange(desc(count)) %>%
summarize(charge_all = paste(pmap_chr(list(charge, charge_degree_letters, count),
function(charge, charge_degree_letters, count)
{paste0(charge," (",charge_degree_letters,",",count,")")} ), collapse=', '))
} else {
charge_onbefore_sum = data.frame(priors = NA)
}
charge_after = data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]
if(!is.null(charge_after)){
charge_after_sum = charge_after %>%
mutate(new = paste0("(",str_extract(charge_degree,"[:alpha:]+"),",",str_extract(charge_degree,"[:digit:]+"),")")) %>%
summarize(charges_after_screening = paste(map2_chr(charge, new, paste), collapse=', '))
} else {
charge_after_sum = data.frame(charges_after_screening = NA)
}
if(nrow(charge_onbefore>0) & !is.null(charge_after)){
charges_both = data.frame(charges_both=paste(base::intersect(charge_onbefore$charge, charge_after$charge), collapse=", "))
} else {
charges_both = data.frame(charges_both=NA)
}
bind_cols(
person_before %>%
select(people) %>%
unnest() %>%
select(name,sex,race),
person_before %>%
select(first_offense_date, current_offense_date),
compas_df_wide %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(`General Decile Score` = `Risk of Recidivism_decile_score`,
`Violence Decile Score` = `Risk of Violence_decile_score`),
outcomes %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(recid, recid_violent),
days_recid_info = as.numeric(as.period(interval(person_before$screening_date,date_data_pulled)), "days"),
data.frame(p_charge = features_person$p_charge),
charge_onbefore_sum,
charge_after_sum,
charges_both
) %>%
t()
},colnames = FALSE, rownames = TRUE)
})
})
}
shinyApp(ui = ui, server = server)
| /app.R | no_license | Knabi/age_of_unfairness | R | false | false | 15,075 | r | library(shiny)
library(tidyverse)
library(lubridate)
load("Table_construction.Rdata") ## You can comment out if data already loaded. The app will load faster.
date_data_pulled = ymd("2016-03-30") ### HARDCODED. ADUST IF NEW DATASET
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Recidivism Data"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# span(textOutput("message"), style="color:red"),
actionButton("go", label = "Update"),
numericInput("person_id", label = h3("Input Person ID"), value = 1),
selectInput("screening_date", label = h3("Input Screening Date"),
choices = NULL),
h3("Summary"),
tableOutput("info")
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel("Charge",
h4("Before Current Offense Date"),
DT::dataTableOutput("charge_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("charge_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("charge_after")
),
tabPanel("Arrest",
h4("Before Current Offense Date"),
DT::dataTableOutput("arrest_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("arrest_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("arrest_after")
),
tabPanel("Jail",
h4("Before Current Offense Date"),
DT::dataTableOutput("jail_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("jail_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("jail_after")
),
tabPanel("Prison",
h4("Before Current Offense Date"),
DT::dataTableOutput("prison_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("prison_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("prison_after")
),
tabPanel("Probation",
h4("Before Current Offense Date"),
DT::dataTableOutput("prob_before"),
h4("On Current Offense Date"),
DT::dataTableOutput("prob_on"),
h4("After Current Offense Date"),
DT::dataTableOutput("prob_after")
),
tabPanel("Profile",
h4("Profile"),
tableOutput("profile")
),
tabPanel("Features",tableOutput("features")),
tabPanel("COMPAS",tableOutput("compas"))
)
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output, session) {
### Update the screening_date dropdown
observeEvent(input$person_id,{
if(!is.na(input$person_id)){
updateSelectInput(session, "screening_date",
choices = features$screening_date[features$person_id == input$person_id])
}
## Set all tables to NULL
# Info
output$info = renderTable(NULL)
# Charge
output$charge_before <- DT::renderDataTable(NULL)
output$charge_on = DT::renderDataTable(NULL)
output$charge_after = DT::renderDataTable(NULL)
# Arrest
output$arrest_before <- DT::renderDataTable(NULL)
output$arrest_on = DT::renderDataTable(NULL)
output$arrest_after = DT::renderDataTable(NULL)
# Jail
output$jail_before <- DT::renderDataTable(NULL)
output$jail_on = DT::renderDataTable(NULL)
output$jail_after = DT::renderDataTable(NULL)
# Prison
output$prison_before <- DT::renderDataTable(NULL)
output$prison_on = DT::renderDataTable(NULL)
output$prison_after = DT::renderDataTable(NULL)
# Probation
output$prob_before <- DT::renderDataTable(NULL)
output$prob_on = DT::renderDataTable(NULL)
output$prob_after = DT::renderDataTable(NULL)
# Features
output$features = renderTable(NULL)
# Profile
output$profile = renderTable(NULL)
# COMPAS
output$compas = renderTable(NULL)
output$message = renderText("Hit Update")
})
observeEvent(input$go,{
output$message = renderText("-")
isolate({
## Info
output$info <- renderTable({
person = data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date))
bind_cols(
person %>%
select(people) %>%
unnest() %>%
select(name,sex,race),
person %>%
select(first_offense_date, current_offense_date),
compas_df_wide %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(`General Decile Score` = `Risk of Recidivism_decile_score`,
`Violence Decile Score` = `Risk of Violence_decile_score`),
outcomes %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(recid, recid_violent)
) %>%
t()
},colnames = FALSE, rownames = TRUE)
## Charge tab
output$charge_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$charge_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$charge_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Arrest tab
output$arrest_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(arrest) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$arrest_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(arrest) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$arrest_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(arrest) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Jail tab
output$jail_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(jail) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$jail_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(jail) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$jail_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(jail) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Prison tab
output$prison_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prison) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$prison_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prison) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$prison_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prison) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Probation tab
output$prob_before <- DT::renderDataTable({
DT::datatable({
data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prob) %>%
.[[1,1]]},
options = list(paging = FALSE))}
)
output$prob_on <- DT::renderDataTable(
DT::datatable({
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prob) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
output$prob_after <- DT::renderDataTable(
DT::datatable({
data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(prob) %>%
.[[1,1]]},
options = list(paging = FALSE))
)
## Features
output$features <- renderTable({
features %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(-person_id, -screening_date, -first_offense_date, -current_offense_date,
-`Risk of Failure to Appear_decile_score`,-`Risk of Failure to Appear_raw_score`,
-`Risk of Recidivism_decile_score`,-`Risk of Recidivism_raw_score`,
-`Risk of Violence_decile_score`,-`Risk of Violence_raw_score`) %>%
mutate_all(as.character) %>%
t()
},
colnames = FALSE, rownames = TRUE)
## COMPAS
output$compas <- renderTable({
compas_df_wide %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(-person_id, -screening_date) %>%
mutate_all(as.character) %>%
t()
},
colnames = FALSE, rownames = TRUE)
## Profile
output$profile <- renderTable({
features_person = features %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date))
person_before = data_before %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date))
charge_onbefore = bind_rows(
person_before %>%
select(charge) %>%
.[[1,1]],
data_on %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]
)
if(nrow(charge_onbefore)>0){
charge_onbefore_sum = charge_onbefore %>%
select(charge, charge_degree) %>%
mutate(charge_degree_letters = str_extract(charge_degree,"[:alpha:]+")) %>%
group_by(charge, charge_degree_letters) %>%
summarize(count = n()) %>%
ungroup() %>%
arrange(desc(count)) %>%
summarize(charge_all = paste(pmap_chr(list(charge, charge_degree_letters, count),
function(charge, charge_degree_letters, count)
{paste0(charge," (",charge_degree_letters,",",count,")")} ), collapse=', '))
} else {
charge_onbefore_sum = data.frame(priors = NA)
}
charge_after = data_after %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(charge) %>%
.[[1,1]]
if(!is.null(charge_after)){
charge_after_sum = charge_after %>%
mutate(new = paste0("(",str_extract(charge_degree,"[:alpha:]+"),",",str_extract(charge_degree,"[:digit:]+"),")")) %>%
summarize(charges_after_screening = paste(map2_chr(charge, new, paste), collapse=', '))
} else {
charge_after_sum = data.frame(charges_after_screening = NA)
}
if(nrow(charge_onbefore>0) & !is.null(charge_after)){
charges_both = data.frame(charges_both=paste(base::intersect(charge_onbefore$charge, charge_after$charge), collapse=", "))
} else {
charges_both = data.frame(charges_both=NA)
}
bind_cols(
person_before %>%
select(people) %>%
unnest() %>%
select(name,sex,race),
person_before %>%
select(first_offense_date, current_offense_date),
compas_df_wide %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(`General Decile Score` = `Risk of Recidivism_decile_score`,
`Violence Decile Score` = `Risk of Violence_decile_score`),
outcomes %>%
filter(person_id == input$person_id, screening_date == as_date(input$screening_date)) %>%
select(recid, recid_violent),
days_recid_info = as.numeric(as.period(interval(person_before$screening_date,date_data_pulled)), "days"),
data.frame(p_charge = features_person$p_charge),
charge_onbefore_sum,
charge_after_sum,
charges_both
) %>%
t()
},colnames = FALSE, rownames = TRUE)
})
})
}
shinyApp(ui = ui, server = server)
|
# Let's practice!
# Generate a sequence of numbers from 1 to 1000 and save to a variable called df. Hint: Use seq().
df <- seq(1:1000)
seq(1:1000) -> "df"
# Plot df in a line chart. Use plot().
plot(df)
# Compute and print the log of the 6th highest value. Hint: you could use order() to order a variable.
log(order(df, decreasing=TRUE) [6])
# [1] 6.902743
# Subtract 1 from all uneven numbers. Hint: Use modulus %% to find uneven numbers.
# what exactly does %% do/mean?
# A
is.odd <- seq(1,1000,by=2)
on <- is.odd - 1
for (df in 1:1000) {
if (df %% 2 ==1) print(df-1)
}
# B
is.odd <- seq(1,1000, by=2)
on <- is.odd - 1
on <- data.frame(is.odd -1)
for (df in 1:1000) {
if (df %% 2 == 1) print(df-1)
}
# Add dates (starting point doesn't matter as long as it's a sequence) of the same length as your variable and add them together to make a data frame.
# Hint: use seq again. "from" doesnt matter, use "length.out".
dates <- data.frame(seq(as.Date('2000/1/1'), by = 'day', length.out = 500))
ALL <- cbind(is.odd, dates)
# Take the sqrt of the values in your data frame (overwrite the original values).
ALL$square_root = '^'(ALL$is.odd,1/2)
ALL = subset(ALL,select = -c(is.odd))
# Remove all values above 15 from the data frame.
ALL <- data.frame(ALL[!rowSums(ALL[-1] >15),])
# Only keep each third row (1-4-7-10-13-etc) in your data frame. Hint: use seq().
ALL = ALL[seq(1, nrow(ALL), 3),]
# Remove the top 5 values in your data frame. Hint: Use order(y).
ALL <- ALL[order(ALL[, 2], decreasing = TRUE),]
ALL = ALL[-1:-5,]
# OR
ALL <- ALL[-c(1:5),]
# Randomly shuffle the observations over the dates. Hint: use sample() and get just as many sampled values as the numbers of rows in your data set.
#Is this correct? They look shuffled but didn't use sample function, and don't know what it means when it says "shuffle observations over the dates"
ALL[] <- lapply(ALL, sample)
# Plot again.
plot(ALL)
# Add a column "category" to df that equals "one" if the value column is within the top 30% quantile, and "two" otherwise. Hint: use ifelse().
ALL$category <- ifelse(ALL$square_root >= (quantile(ALL$square_root, probs = (.7))), 'one', 'two')
# Check if the sum of the values in category "one" are larger than in category "two". Try using aggregate().
aggregate(ALL$square_root, by=list(category=ALL$category),FUN=sum)
# category x
# 1 one 128.6664
# 2 two 176.3850
| /Module 0 Basic Functions/solutions/Group 2 Solutions.R | no_license | Crystal-Niedbala-Bose/R-Modules | R | false | false | 2,414 | r | # Let's practice!
# Generate a sequence of numbers from 1 to 1000 and save to a variable called df. Hint: Use seq().
df <- seq(1:1000)
seq(1:1000) -> "df"
# Plot df in a line chart. Use plot().
plot(df)
# Compute and print the log of the 6th highest value. Hint: you could use order() to order a variable.
log(order(df, decreasing=TRUE) [6])
# [1] 6.902743
# Subtract 1 from all uneven numbers. Hint: Use modulus %% to find uneven numbers.
# what exactly does %% do/mean?
# A
is.odd <- seq(1,1000,by=2)
on <- is.odd - 1
for (df in 1:1000) {
if (df %% 2 ==1) print(df-1)
}
# B
is.odd <- seq(1,1000, by=2)
on <- is.odd - 1
on <- data.frame(is.odd -1)
for (df in 1:1000) {
if (df %% 2 == 1) print(df-1)
}
# Add dates (starting point doesn't matter as long as it's a sequence) of the same length as your variable and add them together to make a data frame.
# Hint: use seq again. "from" doesnt matter, use "length.out".
dates <- data.frame(seq(as.Date('2000/1/1'), by = 'day', length.out = 500))
ALL <- cbind(is.odd, dates)
# Take the sqrt of the values in your data frame (overwrite the original values).
ALL$square_root = '^'(ALL$is.odd,1/2)
ALL = subset(ALL,select = -c(is.odd))
# Remove all values above 15 from the data frame.
ALL <- data.frame(ALL[!rowSums(ALL[-1] >15),])
# Only keep each third row (1-4-7-10-13-etc) in your data frame. Hint: use seq().
ALL = ALL[seq(1, nrow(ALL), 3),]
# Remove the top 5 values in your data frame. Hint: Use order(y).
ALL <- ALL[order(ALL[, 2], decreasing = TRUE),]
ALL = ALL[-1:-5,]
# OR
ALL <- ALL[-c(1:5),]
# Randomly shuffle the observations over the dates. Hint: use sample() and get just as many sampled values as the numbers of rows in your data set.
#Is this correct? They look shuffled but didn't use sample function, and don't know what it means when it says "shuffle observations over the dates"
ALL[] <- lapply(ALL, sample)
# Plot again.
plot(ALL)
# Add a column "category" to df that equals "one" if the value column is within the top 30% quantile, and "two" otherwise. Hint: use ifelse().
ALL$category <- ifelse(ALL$square_root >= (quantile(ALL$square_root, probs = (.7))), 'one', 'two')
# Check if the sum of the values in category "one" are larger than in category "two". Try using aggregate().
aggregate(ALL$square_root, by=list(category=ALL$category),FUN=sum)
# category x
# 1 one 128.6664
# 2 two 176.3850
|
#' JobProvider
#'
#' @export
#' @keywords internal
#' @param locale (character) the locale to use. options: en_US (default),
#' fr_FR, fr_CH, hr_FR, fa_IR, pl_PL, ru_RU, uk_UA, zh_TW.
#' @details
#' \strong{Methods}
#' \describe{
#' \item{\code{render()}}{
#' Make a job
#' }
#' }
#' @format NULL
#' @usage NULL
#' @examples
#' z <- JobProvider$new()
#' z$render()
#'
#' z <- JobProvider$new(locale = "fr_FR")
#' z$locale
#' z$render()
#'
#' z <- JobProvider$new(locale = "hr_HR")
#' z$locale
#' z$render()
#'
#' z <- JobProvider$new(locale = "fa_IR")
#' z$locale
#' z$render()
JobProvider <- R6::R6Class(
inherit = BaseProvider,
'JobProvider',
public = list(
locale = NULL,
formats = NULL,
initialize = function(locale = NULL) {
if (!is.null(locale)) {
super$check_locale(locale)
self$locale <- locale
} else {
self$locale <- 'en_US'
}
self$formats <- parse_eval("job_formats_", self$locale)
},
render = function() {
super$random_element(self$formats)
}
)
)
parse_eval <- function(x, y) {
res <- tryCatch(
eval(parse(text = paste0(x, tolower(y)))),
error = function(E) E
)
if (inherits(res, "error")) {
NULL
} else {
res
}
}
| /R/jobs-provider.R | permissive | laasousa/charlatan | R | false | false | 1,259 | r | #' JobProvider
#'
#' @export
#' @keywords internal
#' @param locale (character) the locale to use. options: en_US (default),
#' fr_FR, fr_CH, hr_FR, fa_IR, pl_PL, ru_RU, uk_UA, zh_TW.
#' @details
#' \strong{Methods}
#' \describe{
#' \item{\code{render()}}{
#' Make a job
#' }
#' }
#' @format NULL
#' @usage NULL
#' @examples
#' z <- JobProvider$new()
#' z$render()
#'
#' z <- JobProvider$new(locale = "fr_FR")
#' z$locale
#' z$render()
#'
#' z <- JobProvider$new(locale = "hr_HR")
#' z$locale
#' z$render()
#'
#' z <- JobProvider$new(locale = "fa_IR")
#' z$locale
#' z$render()
JobProvider <- R6::R6Class(
inherit = BaseProvider,
'JobProvider',
public = list(
locale = NULL,
formats = NULL,
initialize = function(locale = NULL) {
if (!is.null(locale)) {
super$check_locale(locale)
self$locale <- locale
} else {
self$locale <- 'en_US'
}
self$formats <- parse_eval("job_formats_", self$locale)
},
render = function() {
super$random_element(self$formats)
}
)
)
parse_eval <- function(x, y) {
res <- tryCatch(
eval(parse(text = paste0(x, tolower(y)))),
error = function(E) E
)
if (inherits(res, "error")) {
NULL
} else {
res
}
}
|
"ssdev" <-
function(x){
n<-length(x)
sum(x**2)-n*mean(x)**2
}
| /R/ssdev.R | no_license | cran/sigma2tools | R | false | false | 69 | r | "ssdev" <-
function(x){
n<-length(x)
sum(x**2)-n*mean(x)**2
}
|
/Infnet-Analytics/MBA Big Data - Analytics com R (Aulas 07 e 08)/Arquivos Etapa 04b/Etapa_04d_(Geolocalização 0).R | no_license | xBarbosa/Data-Analytics | R | false | false | 1,372 | r | ||
/FinderDragPro.r | no_license | fruitsamples/FinderDragPro | R | false | false | 2,069 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_L2326.aluminum.R
\name{module_energy_L2326.aluminum}
\alias{module_energy_L2326.aluminum}
\title{module_energy_L2326.aluminum}
\usage{
module_energy_L2326.aluminum(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L2326.SectorLogitTables[[ curr_table ]]$data}, \code{L2326.Supplysector_aluminum}, \code{L2326.FinalEnergyKeyword_aluminum},
\code{L2326.SubsectorLogitTables[[ curr_table ]]$data}, \code{L2326.SubsectorLogit_aluminum}, \code{L2326.SubsectorShrwtFllt_aluminum},
\code{L2326.SubsectorInterp_aluminum}, \code{L2326.StubTech_aluminum}, \code{L2326.GlobalTechShrwt_aluminum}, \code{L2326.GlobalTechCoef_aluminum},
\code{L2326.GlobalTechCost_aluminum}, \code{L2326.GlobalTechCapture_aluminum}, \code{L2326.StubTechProd_aluminum}, \code{L2326.StubTechCalInput_aluminum},
\code{L2326.StubTechCoef_aluminum}, \code{L2326.PerCapitaBased_aluminum}, \code{L2326.BaseService_aluminum}, \code{L2326.PriceElasticity_aluminum},
\code{L2326.GlobalTechSecOut_aluminum},
\code{object}. The corresponding file in the
}
\description{
Compute a variety of final energy keyword, sector, share weight, and technology information for aluminum-related GCAM inputs.
}
\details{
The chunk provides final energy keyword, supplysector/subsector information, supplysector/subsector interpolation information, global technology share weight, global technology efficiency, global technology coefficients, global technology cost, price elasticity, stub technology information, stub technology interpolation information, stub technology calibrated inputs, and etc for aluminum sector.
}
\author{
Yang Liu Dec 2019
}
| /man/module_energy_L2326.aluminum.Rd | permissive | JGCRI/gcamdata | R | false | true | 1,923 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_L2326.aluminum.R
\name{module_energy_L2326.aluminum}
\alias{module_energy_L2326.aluminum}
\title{module_energy_L2326.aluminum}
\usage{
module_energy_L2326.aluminum(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L2326.SectorLogitTables[[ curr_table ]]$data}, \code{L2326.Supplysector_aluminum}, \code{L2326.FinalEnergyKeyword_aluminum},
\code{L2326.SubsectorLogitTables[[ curr_table ]]$data}, \code{L2326.SubsectorLogit_aluminum}, \code{L2326.SubsectorShrwtFllt_aluminum},
\code{L2326.SubsectorInterp_aluminum}, \code{L2326.StubTech_aluminum}, \code{L2326.GlobalTechShrwt_aluminum}, \code{L2326.GlobalTechCoef_aluminum},
\code{L2326.GlobalTechCost_aluminum}, \code{L2326.GlobalTechCapture_aluminum}, \code{L2326.StubTechProd_aluminum}, \code{L2326.StubTechCalInput_aluminum},
\code{L2326.StubTechCoef_aluminum}, \code{L2326.PerCapitaBased_aluminum}, \code{L2326.BaseService_aluminum}, \code{L2326.PriceElasticity_aluminum},
\code{L2326.GlobalTechSecOut_aluminum},
\code{object}. The corresponding file in the
}
\description{
Compute a variety of final energy keyword, sector, share weight, and technology information for aluminum-related GCAM inputs.
}
\details{
The chunk provides final energy keyword, supplysector/subsector information, supplysector/subsector interpolation information, global technology share weight, global technology efficiency, global technology coefficients, global technology cost, price elasticity, stub technology information, stub technology interpolation information, stub technology calibrated inputs, and etc for aluminum sector.
}
\author{
Yang Liu Dec 2019
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{random_FLCatches_generator}
\alias{random_FLCatches_generator}
\title{Generates an FLCatches object - a list of randomly sized and filled FLCatch objects}
\usage{
random_FLCatches_generator(min_catches = 2, max_catches = 5, ...)
}
\arguments{
\item{min_catches}{The minimum number of catches. Default is 2.}
\item{max_catches}{The maximum number of catches. Default is 5.}
\item{fixed_dims}{A vector of length 6 with the fixed length of each of the FLQuant dimensions. If any value is NA it is randomly set using the max_dims argument.}
\item{max_dims}{A vector of length 6 with maximum size of each of the FLQuant dimensions. Default value is c(5,5,5,4,4,10).}
\item{sd}{The standard deviation of the random numbers. Passed to rnorm() Default is 100.}
}
\value{
An FLCatches objects
}
\description{
Generates a list of randomly sized FLCatch objects filled with normally distributed random numbers with a mean of 0.
Used for automatic testing, particularly of the FLCatches_base<T> class in CPP.
}
\examples{
flcs <- random_FLCatches_generator()
length(flcs)
summary(flcs)
lapply(flcs, summary)
}
| /man/random_FLCatches_generator.Rd | no_license | drfinlayscott/FLRcppAdolc | R | false | false | 1,162 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{random_FLCatches_generator}
\alias{random_FLCatches_generator}
\title{Generates an FLCatches object - a list of randomly sized and filled FLCatch objects}
\usage{
random_FLCatches_generator(min_catches = 2, max_catches = 5, ...)
}
\arguments{
\item{min_catches}{The minimum number of catches. Default is 2.}
\item{max_catches}{The maximum number of catches. Default is 5.}
\item{fixed_dims}{A vector of length 6 with the fixed length of each of the FLQuant dimensions. If any value is NA it is randomly set using the max_dims argument.}
\item{max_dims}{A vector of length 6 with maximum size of each of the FLQuant dimensions. Default value is c(5,5,5,4,4,10).}
\item{sd}{The standard deviation of the random numbers. Passed to rnorm() Default is 100.}
}
\value{
An FLCatches objects
}
\description{
Generates a list of randomly sized FLCatch objects filled with normally distributed random numbers with a mean of 0.
Used for automatic testing, particularly of the FLCatches_base<T> class in CPP.
}
\examples{
flcs <- random_FLCatches_generator()
length(flcs)
summary(flcs)
lapply(flcs, summary)
}
|
#tabuas_mortalidade_ibge-2006-2011
narq <- sapply(paste0(dtibge,basep,anosel),list.files, pattern = flt, ignore.case = T)
narq[6] <- sapply(paste0(dtibge,basep,lapply(anosel[6],paste0,"/ods")),list.files, pattern = flt)
for (i in 1:(nrow(nm_tabuas)-1)) {
nome <- paste0("tm_ibge",nm_tabuas$ano[i])
print(nome)
assign(nome,cbind(read_excel(nm_tabuas$arqf[i], range = "A6:g46"), ano = nm_tabuas$ano[i]))
pedaco2 <- cbind(read_excel(nm_tabuas$arqf[i], range = "a62:g103"), ano = nm_tabuas$ano[i])
assign(nome,rbind(get(nome),pedaco2))
rm(pedaco2)
}
tm_ibge2011 <- cbind(read_ods(nm_tabuas$arqf[6], range = "a6:g45"), ano = nm_tabuas$ano[6])
pedaco2 <- cbind(read_ods(nm_tabuas$arqf[6], range = "a62:g102")
, ano = nm_tabuas$ano[6])
tm_ibge2011 <- rbind(tm_ibge2011,pedaco2)
rm(pedaco2)
| /R/tabuas_mortalidade_ibge_2006-2011-complemento-parcial.R | no_license | rodrigoesborges/microdadosbrasilpoliticasocial | R | false | false | 809 | r | #tabuas_mortalidade_ibge-2006-2011
narq <- sapply(paste0(dtibge,basep,anosel),list.files, pattern = flt, ignore.case = T)
narq[6] <- sapply(paste0(dtibge,basep,lapply(anosel[6],paste0,"/ods")),list.files, pattern = flt)
for (i in 1:(nrow(nm_tabuas)-1)) {
nome <- paste0("tm_ibge",nm_tabuas$ano[i])
print(nome)
assign(nome,cbind(read_excel(nm_tabuas$arqf[i], range = "A6:g46"), ano = nm_tabuas$ano[i]))
pedaco2 <- cbind(read_excel(nm_tabuas$arqf[i], range = "a62:g103"), ano = nm_tabuas$ano[i])
assign(nome,rbind(get(nome),pedaco2))
rm(pedaco2)
}
tm_ibge2011 <- cbind(read_ods(nm_tabuas$arqf[6], range = "a6:g45"), ano = nm_tabuas$ano[6])
pedaco2 <- cbind(read_ods(nm_tabuas$arqf[6], range = "a62:g102")
, ano = nm_tabuas$ano[6])
tm_ibge2011 <- rbind(tm_ibge2011,pedaco2)
rm(pedaco2)
|
#' @title Estimate the ADF model under the null
#'
#' @description \code{ADFres} estimates the ADF model under the null with lag
#' order selected by AIC or BIC
#'
#' @param y A Vector. Data.
#' @param IC An integer, 0 for fixed lag order (default), 1 for AIC and 2 for
#' BIC.
#' @param adflag An integer. Lag order when IC=0; maximum number of lags when
#' IC>0 (default = 0).
#'
#' @return Numeric, ADF test statistic.
#'
#' @references Phillips, P. C. B., Shi, S., & Yu, J. (2015a). Testing for
#' multiple bubbles: Historical episodes of exuberance and collapse in the S&P
#' 500. \emph{International Economic Review}, 56(4), 1034--1078. Phillips, P.
#' C. B., Shi, S., & Yu, J. (2015b). Testing for multiple bubbles: Limit
#' Theory for Real-Time Detectors. \emph{International Economic Review},
#' 56(4), 1079--1134.
#'
#'
ADFres <- function(y, IC, adflag) {
T0 <- length(y)
T1 <- length(y) - 1
const <- rep(1,T1)
dy <- y[2:T0] - y[1:T1]
x1 <- data.frame(const)
t <- T1 - adflag
if (IC > 0) {
ICC <- matrix(0,nrow = adflag+1,ncol=1)
betaM <- matrix(list(), nrow=adflag+1,ncol=1)
epsM <- matrix(list(), nrow=adflag+1,ncol=1)
for (k in 0:adflag){
# model Specification
xx<-matrix(x1[(k+1):T1,]) #@-from k+1 to the end (including y1 and x)-@
dy01<-matrix(dy[(k+1):T1]) #@-from k+1 to the end (including dy0)-@
if (k>0){
x2<-cbind(xx,matrix(0,nrow=T1-k,ncol=k))
for (j in 1:k){
x2[,ncol(xx)+j]<-dy[(k+1-j):(T1-j)] #@-including k lag variables of dy in x2-@
}
}else x2<-xx
#OLS regression
betaM[k+1] <- list(solve(t(x2)%*%x2) %*% (t(x2)%*%dy01)) #@-model A-@
epsM[[k+1]] <- dy01-x2%*%as.matrix(betaM[[k+1]])
# Information Criteria
npdf <- sum(-1/2*log(2*pi)-1/2*(epsM[[k+1]]^2))
if (IC==1){ #@ AIC @
ICC[k+1] <- -2*npdf/t+2*length(betaM[[k+1]])/t
}else if(IC==2){ #@ BIC @
ICC[k+1] <- -2*npdf/t+length(betaM[[k+1]])*log(t)/t
}
}
lag0 <- which.min(ICC)
beta<-betaM[[lag0]]
eps<-epsM[[lag0]]
lag<-lag0-1
}else if(IC==0){
# Model Specification
xx <- matrix(x1[(adflag+1):T1,]) #@-from k+1 to the end (including y1 and x)-@
dy01 <- matrix(dy[(adflag+1):T1]) #@-from k+1 to the end (including dy0)-@
if (adflag>0){
x2<-cbind(xx, matrix(0,nrow=t,ncol=adflag))
for (j in 1:adflag){
x2[,ncol(xx)+j]<-dy[(adflag+1-j):(T1-j)] # @-including k lag variables of dy in x2-@
}
}else x2 <- xx
# OLS Regression
beta <- solve(t(x2)%*%x2) %*% (t(x2)%*%dy01) #@-model A-@
eps <- dy01-x2%*%beta
lag<-adflag
}
result<-list(beta=beta,eps=eps,lag=lag)
return(result)
}
| /R/ADFres.R | no_license | cran/psymonitor | R | false | false | 2,829 | r | #' @title Estimate the ADF model under the null
#'
#' @description \code{ADFres} estimates the ADF model under the null with lag
#' order selected by AIC or BIC
#'
#' @param y A Vector. Data.
#' @param IC An integer, 0 for fixed lag order (default), 1 for AIC and 2 for
#' BIC.
#' @param adflag An integer. Lag order when IC=0; maximum number of lags when
#' IC>0 (default = 0).
#'
#' @return Numeric, ADF test statistic.
#'
#' @references Phillips, P. C. B., Shi, S., & Yu, J. (2015a). Testing for
#' multiple bubbles: Historical episodes of exuberance and collapse in the S&P
#' 500. \emph{International Economic Review}, 56(4), 1034--1078. Phillips, P.
#' C. B., Shi, S., & Yu, J. (2015b). Testing for multiple bubbles: Limit
#' Theory for Real-Time Detectors. \emph{International Economic Review},
#' 56(4), 1079--1134.
#'
#'
ADFres <- function(y, IC, adflag) {
T0 <- length(y)
T1 <- length(y) - 1
const <- rep(1,T1)
dy <- y[2:T0] - y[1:T1]
x1 <- data.frame(const)
t <- T1 - adflag
if (IC > 0) {
ICC <- matrix(0,nrow = adflag+1,ncol=1)
betaM <- matrix(list(), nrow=adflag+1,ncol=1)
epsM <- matrix(list(), nrow=adflag+1,ncol=1)
for (k in 0:adflag){
# model Specification
xx<-matrix(x1[(k+1):T1,]) #@-from k+1 to the end (including y1 and x)-@
dy01<-matrix(dy[(k+1):T1]) #@-from k+1 to the end (including dy0)-@
if (k>0){
x2<-cbind(xx,matrix(0,nrow=T1-k,ncol=k))
for (j in 1:k){
x2[,ncol(xx)+j]<-dy[(k+1-j):(T1-j)] #@-including k lag variables of dy in x2-@
}
}else x2<-xx
#OLS regression
betaM[k+1] <- list(solve(t(x2)%*%x2) %*% (t(x2)%*%dy01)) #@-model A-@
epsM[[k+1]] <- dy01-x2%*%as.matrix(betaM[[k+1]])
# Information Criteria
npdf <- sum(-1/2*log(2*pi)-1/2*(epsM[[k+1]]^2))
if (IC==1){ #@ AIC @
ICC[k+1] <- -2*npdf/t+2*length(betaM[[k+1]])/t
}else if(IC==2){ #@ BIC @
ICC[k+1] <- -2*npdf/t+length(betaM[[k+1]])*log(t)/t
}
}
lag0 <- which.min(ICC)
beta<-betaM[[lag0]]
eps<-epsM[[lag0]]
lag<-lag0-1
}else if(IC==0){
# Model Specification
xx <- matrix(x1[(adflag+1):T1,]) #@-from k+1 to the end (including y1 and x)-@
dy01 <- matrix(dy[(adflag+1):T1]) #@-from k+1 to the end (including dy0)-@
if (adflag>0){
x2<-cbind(xx, matrix(0,nrow=t,ncol=adflag))
for (j in 1:adflag){
x2[,ncol(xx)+j]<-dy[(adflag+1-j):(T1-j)] # @-including k lag variables of dy in x2-@
}
}else x2 <- xx
# OLS Regression
beta <- solve(t(x2)%*%x2) %*% (t(x2)%*%dy01) #@-model A-@
eps <- dy01-x2%*%beta
lag<-adflag
}
result<-list(beta=beta,eps=eps,lag=lag)
return(result)
}
|
library(dplyr)
# read train data
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
Sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# read test data
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# read data description
variable_names <- read.table("./UCI HAR Dataset/features.txt")
# read activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 1. Merges the training and the test sets to create one data set.
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,selected_var[,1]]
# 3. Uses descriptive activity names to name the activities in the data set
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(X_total) <- variable_names[selected_var[,1],2]
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "subject"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(list(mean = mean, median = median))
write.table(total_mean, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE) | / run_analysis.R | no_license | KrishnaSahithi1/TIDY | R | false | false | 1,846 | r | library(dplyr)
# read train data
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
Sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# read test data
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
Sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# read data description
variable_names <- read.table("./UCI HAR Dataset/features.txt")
# read activity labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 1. Merges the training and the test sets to create one data set.
X_total <- rbind(X_train, X_test)
Y_total <- rbind(Y_train, Y_test)
Sub_total <- rbind(Sub_train, Sub_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
X_total <- X_total[,selected_var[,1]]
# 3. Uses descriptive activity names to name the activities in the data set
colnames(Y_total) <- "activity"
Y_total$activitylabel <- factor(Y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- Y_total[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(X_total) <- variable_names[selected_var[,1],2]
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(Sub_total) <- "subject"
total <- cbind(X_total, activitylabel, Sub_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(list(mean = mean, median = median))
write.table(total_mean, file = "./UCI HAR Dataset/tidydata.txt", row.names = FALSE, col.names = TRUE) |
library(magrittr)
# TCGA_mat source: https://xenabrowser.net/datapages/?dataset=TumorCompendium_v10_PolyA_hugo_log2tpm_58581genes_2019-07-25.tsv&host=https%3A%2F%2Fxena.treehouse.gi.ucsc.edu%3A443
load_TCGA_mat <- function(data_dir, tumor_file='TCGA_mat.tsv') {
TCGA_mat <- readr::read_tsv(file.path(data_dir, tumor_file)) %>%
as.data.frame() %>%
tibble::column_to_rownames('Gene') %>%
as.matrix() %>%
t()
return(TCGA_mat)
}
# CCLE_mat source: depmap.org DepMap Public 19Q4 CCLE_expression_full.csv
load_CCLE_mat <- function(data_dir, cell_line_file = 'CCLE_mat.csv') {
CCLE_mat <- readr::read_csv(file.path(data_dir, cell_line_file)) %>%
as.data.frame() %>%
tibble::column_to_rownames('X1') %>%
as.matrix()
colnames(CCLE_mat) <- stringr::str_match(colnames(CCLE_mat), '\\((.+)\\)')[,2]
return(CCLE_mat)
}
# Celligner_info file available on figshare: https://figshare.com/articles/Celligner_data/11965269
load_alignment <- function(data_dir, filename = 'Celligner_info.csv') {
alignment <- data.table::fread(file.path(data_dir, filename)) %>%
as.data.frame()
rownames(alignment) <- alignment$sampleID
return(alignment)
}
load_CCLE_ann <- function(data_dir, filename = 'Celligner_info.csv') {
CCLE_ann <- data.table::fread(file.path(data_dir, filename)) %>%
as.data.frame()
CCLE_ann <- dplyr::filter(CCLE_ann, type=='CL') %>%
dplyr::select(-UMAP_1, -UMAP_2, -cluster, -uncorrected_tumor_UMAP_1, -uncorrected_tumor_UMAP_2, -uncorrected_tumor_cluster) %>%
dplyr::rename(
UMAP_1 = uncorrected_CL_UMAP_1,
UMAP_2 = uncorrected_CL_UMAP_2,
cluster = uncorrected_CL_cluster
)
rownames(CCLE_ann) <- CCLE_ann$sampleID
return(CCLE_ann)
}
load_TCGA_ann <- function(data_dir, filename = 'Celligner_info.csv') {
TCGA_ann <- data.table::fread(file.path(data_dir, filename)) %>%
as.data.frame()
TCGA_ann <- dplyr::filter(TCGA_ann, type=='tumor') %>%
dplyr::select(-UMAP_1, -UMAP_2, -cluster, -uncorrected_CL_UMAP_1, -uncorrected_CL_UMAP_2, -uncorrected_CL_cluster) %>%
dplyr::rename(
UMAP_1 = uncorrected_tumor_UMAP_1,
UMAP_2 = uncorrected_tumor_UMAP_2,
cluster = uncorrected_tumor_cluster
)
rownames(TCGA_ann) <- TCGA_ann$sampleID
return(TCGA_ann)
}
load_cPCA_values <- function(data_dir, cPCA_values = 'cPCA_values.csv') {
cPCA_values <- read_csv(file.path(data_dir, cPCA_values))
return(cPCA_values)
}
load_cPCA_vectors <- function(data_dir, cPCs = 'cPCs.csv') {
cPCA_vectors <- read_csv(file.path(data_dir, cPCs)) %>%
as.data.frame() %>%
tibble::column_to_rownames('X1')
return(cPCA_vectors)
}
load_data <- function(data_dir, tumor_file = 'TCGA_mat.tsv', cell_line_file = 'CCLE_mat.csv',
annotation_file = 'Celligner_info.csv', hgnc_file = "hgnc_complete_set_7.24.2018.txt") {
hgnc.complete.set <- data.table::fread(file.path(data_dir, hgnc_file)) %>% as.data.frame()
common_genes <- intersect(colnames(TCGA_mat), hgnc.complete.set$symbol)
TCGA_mat <- TCGA_mat[,common_genes]
hgnc.complete.set <- dplyr::filter(hgnc.complete.set, symbol %in% common_genes)
hgnc.complete.set <- hgnc.complete.set[!duplicated(hgnc.complete.set$symbol),]
rownames(hgnc.complete.set) <- hgnc.complete.set$symbol
hgnc.complete.set <- hgnc.complete.set[common_genes,]
colnames(TCGA_mat) <- hgnc.complete.set$ensembl_gene_id
if(is.null(annotation_file) | !file.exists(file.path(data_dir, annotation_file))) {
ann <- data.frame(sampleID = c(rownames(TCGA_mat), rownames(CCLE_mat)),
lineage = NA,
subtype = NA,
type = c(rep('tumor', nrow(TCGA_mat)), rep('CL', nrow(CCLE_mat))))
ann$`Primary/Metastasis` <- NA
} else {
ann <- data.table::fread(file.path(data_dir, annotation_file)) %>% as.data.frame()
if('UMAP_1' %in% colnames(ann)) {
ann <- ann %>%
dplyr::select(-UMAP_1)
}
if('UMAP_2' %in% colnames(ann)) {
ann <- ann %>%
dplyr::select(-UMAP_2)
}
if('cluster' %in% colnames(ann)) {
ann <- ann %>%
dplyr::select(-cluster)
}
}
TCGA_ann <- dplyr::filter(ann, type=='tumor')
CCLE_ann <- dplyr::filter(ann, type=='CL')
func_genes <- dplyr::filter(hgnc.complete.set, !locus_group %in% c('non-coding RNA', 'pseudogene'))$ensembl_gene_id
genes_used <- intersect(colnames(TCGA_mat), colnames(CCLE_mat))
genes_used <- intersect(genes_used, func_genes)
TCGA_mat <- TCGA_mat[,genes_used]
CCLE_mat <- CCLE_mat[,genes_used]
return(list(TCGA_mat = TCGA_mat, TCGA_ann = TCGA_ann, CCLE_mat = CCLE_mat, CCLE_ann = CCLE_ann))
}
| /src/load_figure_data.R | no_license | millergw/celligner_tasks | R | false | false | 4,708 | r | library(magrittr)
# TCGA_mat source: https://xenabrowser.net/datapages/?dataset=TumorCompendium_v10_PolyA_hugo_log2tpm_58581genes_2019-07-25.tsv&host=https%3A%2F%2Fxena.treehouse.gi.ucsc.edu%3A443
load_TCGA_mat <- function(data_dir, tumor_file='TCGA_mat.tsv') {
TCGA_mat <- readr::read_tsv(file.path(data_dir, tumor_file)) %>%
as.data.frame() %>%
tibble::column_to_rownames('Gene') %>%
as.matrix() %>%
t()
return(TCGA_mat)
}
# CCLE_mat source: depmap.org DepMap Public 19Q4 CCLE_expression_full.csv
load_CCLE_mat <- function(data_dir, cell_line_file = 'CCLE_mat.csv') {
CCLE_mat <- readr::read_csv(file.path(data_dir, cell_line_file)) %>%
as.data.frame() %>%
tibble::column_to_rownames('X1') %>%
as.matrix()
colnames(CCLE_mat) <- stringr::str_match(colnames(CCLE_mat), '\\((.+)\\)')[,2]
return(CCLE_mat)
}
# Celligner_info file available on figshare: https://figshare.com/articles/Celligner_data/11965269
load_alignment <- function(data_dir, filename = 'Celligner_info.csv') {
alignment <- data.table::fread(file.path(data_dir, filename)) %>%
as.data.frame()
rownames(alignment) <- alignment$sampleID
return(alignment)
}
load_CCLE_ann <- function(data_dir, filename = 'Celligner_info.csv') {
CCLE_ann <- data.table::fread(file.path(data_dir, filename)) %>%
as.data.frame()
CCLE_ann <- dplyr::filter(CCLE_ann, type=='CL') %>%
dplyr::select(-UMAP_1, -UMAP_2, -cluster, -uncorrected_tumor_UMAP_1, -uncorrected_tumor_UMAP_2, -uncorrected_tumor_cluster) %>%
dplyr::rename(
UMAP_1 = uncorrected_CL_UMAP_1,
UMAP_2 = uncorrected_CL_UMAP_2,
cluster = uncorrected_CL_cluster
)
rownames(CCLE_ann) <- CCLE_ann$sampleID
return(CCLE_ann)
}
load_TCGA_ann <- function(data_dir, filename = 'Celligner_info.csv') {
TCGA_ann <- data.table::fread(file.path(data_dir, filename)) %>%
as.data.frame()
TCGA_ann <- dplyr::filter(TCGA_ann, type=='tumor') %>%
dplyr::select(-UMAP_1, -UMAP_2, -cluster, -uncorrected_CL_UMAP_1, -uncorrected_CL_UMAP_2, -uncorrected_CL_cluster) %>%
dplyr::rename(
UMAP_1 = uncorrected_tumor_UMAP_1,
UMAP_2 = uncorrected_tumor_UMAP_2,
cluster = uncorrected_tumor_cluster
)
rownames(TCGA_ann) <- TCGA_ann$sampleID
return(TCGA_ann)
}
load_cPCA_values <- function(data_dir, cPCA_values = 'cPCA_values.csv') {
cPCA_values <- read_csv(file.path(data_dir, cPCA_values))
return(cPCA_values)
}
load_cPCA_vectors <- function(data_dir, cPCs = 'cPCs.csv') {
cPCA_vectors <- read_csv(file.path(data_dir, cPCs)) %>%
as.data.frame() %>%
tibble::column_to_rownames('X1')
return(cPCA_vectors)
}
load_data <- function(data_dir, tumor_file = 'TCGA_mat.tsv', cell_line_file = 'CCLE_mat.csv',
annotation_file = 'Celligner_info.csv', hgnc_file = "hgnc_complete_set_7.24.2018.txt") {
hgnc.complete.set <- data.table::fread(file.path(data_dir, hgnc_file)) %>% as.data.frame()
common_genes <- intersect(colnames(TCGA_mat), hgnc.complete.set$symbol)
TCGA_mat <- TCGA_mat[,common_genes]
hgnc.complete.set <- dplyr::filter(hgnc.complete.set, symbol %in% common_genes)
hgnc.complete.set <- hgnc.complete.set[!duplicated(hgnc.complete.set$symbol),]
rownames(hgnc.complete.set) <- hgnc.complete.set$symbol
hgnc.complete.set <- hgnc.complete.set[common_genes,]
colnames(TCGA_mat) <- hgnc.complete.set$ensembl_gene_id
if(is.null(annotation_file) | !file.exists(file.path(data_dir, annotation_file))) {
ann <- data.frame(sampleID = c(rownames(TCGA_mat), rownames(CCLE_mat)),
lineage = NA,
subtype = NA,
type = c(rep('tumor', nrow(TCGA_mat)), rep('CL', nrow(CCLE_mat))))
ann$`Primary/Metastasis` <- NA
} else {
ann <- data.table::fread(file.path(data_dir, annotation_file)) %>% as.data.frame()
if('UMAP_1' %in% colnames(ann)) {
ann <- ann %>%
dplyr::select(-UMAP_1)
}
if('UMAP_2' %in% colnames(ann)) {
ann <- ann %>%
dplyr::select(-UMAP_2)
}
if('cluster' %in% colnames(ann)) {
ann <- ann %>%
dplyr::select(-cluster)
}
}
TCGA_ann <- dplyr::filter(ann, type=='tumor')
CCLE_ann <- dplyr::filter(ann, type=='CL')
func_genes <- dplyr::filter(hgnc.complete.set, !locus_group %in% c('non-coding RNA', 'pseudogene'))$ensembl_gene_id
genes_used <- intersect(colnames(TCGA_mat), colnames(CCLE_mat))
genes_used <- intersect(genes_used, func_genes)
TCGA_mat <- TCGA_mat[,genes_used]
CCLE_mat <- CCLE_mat[,genes_used]
return(list(TCGA_mat = TCGA_mat, TCGA_ann = TCGA_ann, CCLE_mat = CCLE_mat, CCLE_ann = CCLE_ann))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_preprocessing.R
\name{Search.GEO}
\alias{Search.GEO}
\title{Searching GEO Datasets by keywords}
\usage{
Search.GEO(
db = "gds",
organism = NULL,
title = NULL,
title_lg = NULL,
description = NULL,
description_lg = NULL,
datasetType = "Expression profiling by high throughput sequencing",
datasetType_lg = "AND",
use_history = T
)
}
\arguments{
\item{db}{character, name of the database to search for.}
\item{organism}{character, organism to search for.}
\item{title}{character, keywords cantained in title}
\item{title_lg}{character, the logical connection between next search fields.}
\item{description}{character, keywords cantained in description}
\item{description_lg}{character, the logical connection between next search fields.}
\item{datasetType}{character, dataset type}
\item{datasetType_lg}{character, the logical connection between next search fields.}
\item{use_history}{logical. If TRUE return a web_history object for use in later calls to the NCBI}
}
\value{
data.frame
}
\description{
Searching GEO Datasets by keywords
}
| /man/Search.GEO.Rd | permissive | XPL1986/QRseq | R | false | true | 1,147 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_preprocessing.R
\name{Search.GEO}
\alias{Search.GEO}
\title{Searching GEO Datasets by keywords}
\usage{
Search.GEO(
db = "gds",
organism = NULL,
title = NULL,
title_lg = NULL,
description = NULL,
description_lg = NULL,
datasetType = "Expression profiling by high throughput sequencing",
datasetType_lg = "AND",
use_history = T
)
}
\arguments{
\item{db}{character, name of the database to search for.}
\item{organism}{character, organism to search for.}
\item{title}{character, keywords cantained in title}
\item{title_lg}{character, the logical connection between next search fields.}
\item{description}{character, keywords cantained in description}
\item{description_lg}{character, the logical connection between next search fields.}
\item{datasetType}{character, dataset type}
\item{datasetType_lg}{character, the logical connection between next search fields.}
\item{use_history}{logical. If TRUE return a web_history object for use in later calls to the NCBI}
}
\value{
data.frame
}
\description{
Searching GEO Datasets by keywords
}
|
### switch to matching with X.3 instead of X.5 since the latter isn't consistent
setwd("O:/PRIV/NERL_ORD_CYAN/Sentinel2/Validation/681_imgs")
mu_mci_raw <- read.csv("validation_S2_682imgs_MCI_L1C_2018-11-21.csv", stringsAsFactors = FALSE)
mu_x <- mu_mci_raw[, which(colnames(mu_mci_raw) %in% c("X.5", "X.3"))]
## raw bands for sediment
raw_bands <- read.csv("mu_rawbands_3day.csv", stringsAsFactors = FALSE)
raw_bands_x3 <- merge(raw_bands, mu_x, by = "X.5")
write.csv(raw_bands_x3, "mu_rawbands_3day_X3.csv")
## bad imagery
img_comments_orig <- read.csv("ImageCheck_0day_comments.csv", stringsAsFactors = FALSE)
img_comments_x3 <- merge(img_comments_orig, mu_x, by.x = "point_IDX5", by.y = "X.5")
write.csv(img_comments_x3, "ImageCheck_0day_comments_X3.csv")
#
mu_mci_missing <- mu_mci[which(mu_mci$X.3 %in% missing_x3),
which(colnames(mu_mci) %in% c("X.3", "PRODUCT_ID", "GRANULE_ID", "COMID", "shore_dist", "state",
"chla_corr", "chla_s2", "chl_error", "dist_shore_m"))]
write.csv(mu_mci_missing, "missing_BRR.csv")
#missing_x3 <- mu_mci$X.3[which(!(mu_mci$X.3 %in% img_comments$X.3))]
| /old/update_matching_x3.R | no_license | wbsalls/Sent2 | R | false | false | 1,185 | r | ### switch to matching with X.3 instead of X.5 since the latter isn't consistent
setwd("O:/PRIV/NERL_ORD_CYAN/Sentinel2/Validation/681_imgs")
mu_mci_raw <- read.csv("validation_S2_682imgs_MCI_L1C_2018-11-21.csv", stringsAsFactors = FALSE)
mu_x <- mu_mci_raw[, which(colnames(mu_mci_raw) %in% c("X.5", "X.3"))]
## raw bands for sediment
raw_bands <- read.csv("mu_rawbands_3day.csv", stringsAsFactors = FALSE)
raw_bands_x3 <- merge(raw_bands, mu_x, by = "X.5")
write.csv(raw_bands_x3, "mu_rawbands_3day_X3.csv")
## bad imagery
img_comments_orig <- read.csv("ImageCheck_0day_comments.csv", stringsAsFactors = FALSE)
img_comments_x3 <- merge(img_comments_orig, mu_x, by.x = "point_IDX5", by.y = "X.5")
write.csv(img_comments_x3, "ImageCheck_0day_comments_X3.csv")
#
mu_mci_missing <- mu_mci[which(mu_mci$X.3 %in% missing_x3),
which(colnames(mu_mci) %in% c("X.3", "PRODUCT_ID", "GRANULE_ID", "COMID", "shore_dist", "state",
"chla_corr", "chla_s2", "chl_error", "dist_shore_m"))]
write.csv(mu_mci_missing, "missing_BRR.csv")
#missing_x3 <- mu_mci$X.3[which(!(mu_mci$X.3 %in% img_comments$X.3))]
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dev_mode}
\alias{dev_mode}
\title{Activate and deactivate development mode.}
\usage{
dev_mode(on = NULL, path = getOption("devtools.path"))
}
\arguments{
\item{on}{turn dev mode on (\code{TRUE}) or off (\code{FALSE}). If omitted
will guess based on whether or not \code{path} is in
\code{\link{.libPaths}}}
\item{path}{directory to library.}
}
\description{
When activated, \code{dev_mode} creates a new library for storing installed
packages. This new library is automatically created when \code{dev_mode} is
activated if it does not already exist.
This allows you to test development packages in a sandbox, without
interfering with the other packages you have installed.
}
\examples{
\donttest{
dev_mode()
dev_mode()
}
}
| /devtoolsVersion/devtools 19/man/dev_mode.Rd | no_license | connectthefuture/devtools-R-Forge | R | false | false | 785 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{dev_mode}
\alias{dev_mode}
\title{Activate and deactivate development mode.}
\usage{
dev_mode(on = NULL, path = getOption("devtools.path"))
}
\arguments{
\item{on}{turn dev mode on (\code{TRUE}) or off (\code{FALSE}). If omitted
will guess based on whether or not \code{path} is in
\code{\link{.libPaths}}}
\item{path}{directory to library.}
}
\description{
When activated, \code{dev_mode} creates a new library for storing installed
packages. This new library is automatically created when \code{dev_mode} is
activated if it does not already exist.
This allows you to test development packages in a sandbox, without
interfering with the other packages you have installed.
}
\examples{
\donttest{
dev_mode()
dev_mode()
}
}
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a line chart without aggregation for any metric
#'
#' @description
#' This function creates a line chart directly from the aggregated / summarised data.
#' Unlike `create_line()` which performs a person-level aggregation, there is no
#' calculation for `create_line_asis()` and the values are rendered as they are passed
#' into the function. The only requirement is that a `date_var` is provided for the x-axis.
#'
#' @param data Plotting data as a data frame.
#' @param date_var String containing name of variable for the horizontal axis.
#' @param metric String containing name of variable representing the line.
#' @param title Title of the plot.
#' @param subtitle Subtitle of the plot.
#' @param caption Caption of the plot.
#' @param ylab Y-axis label for the plot (group axis)
#' @param xlab X-axis label of the plot (bar axis).
#' @param line_colour String to specify colour to use for the line.
#' Hex codes are accepted. You can also supply
#' RGB values via `rgb2hex()`.
#'
#' @import ggplot2
#' @import dplyr
#'
#' @family Visualization
#' @family Flexible
#' @family Time-series
#'
#' @return
#' Returns a 'ggplot' object representing a line plot.
#'
#' @examples
#' library(dplyr)
#'
#' # Median `Emails_sent` grouped by `Date`
#' # Without Person Averaging
#' med_df <-
#' sq_data %>%
#' group_by(Date) %>%
#' summarise(Emails_sent_median = median(Emails_sent))
#'
#' med_df %>%
#' create_line_asis(
#' date_var = "Date",
#' metric = "Emails_sent_median",
#' title = "Median Emails Sent",
#' subtitle = "Person Averaging Not Applied",
#' caption = extract_date_range(sq_data, return = "text")
#' )
#'
#' @export
create_line_asis <- function(data,
date_var = "Date",
metric,
title = NULL,
subtitle = NULL,
caption = NULL,
ylab = date_var,
xlab = metric,
line_colour = rgb2hex(0, 120, 212)){
returnPlot <-
data %>%
mutate_at(vars(date_var), ~as.Date(., format = "%m/%d/%Y")) %>%
ggplot(aes(x = !!sym(date_var), y = !!sym(metric))) +
geom_line(colour = line_colour)
returnPlot +
labs(title = title,
subtitle = subtitle,
caption = caption,
y = xlab,
x = ylab) +
theme_wpa_basic()
}
| /R/create_line_asis.R | permissive | standardgalactic/wpa | R | false | false | 2,777 | r | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title Create a line chart without aggregation for any metric
#'
#' @description
#' This function creates a line chart directly from the aggregated / summarised data.
#' Unlike `create_line()` which performs a person-level aggregation, there is no
#' calculation for `create_line_asis()` and the values are rendered as they are passed
#' into the function. The only requirement is that a `date_var` is provided for the x-axis.
#'
#' @param data Plotting data as a data frame.
#' @param date_var String containing name of variable for the horizontal axis.
#' @param metric String containing name of variable representing the line.
#' @param title Title of the plot.
#' @param subtitle Subtitle of the plot.
#' @param caption Caption of the plot.
#' @param ylab Y-axis label for the plot (group axis)
#' @param xlab X-axis label of the plot (bar axis).
#' @param line_colour String to specify colour to use for the line.
#' Hex codes are accepted. You can also supply
#' RGB values via `rgb2hex()`.
#'
#' @import ggplot2
#' @import dplyr
#'
#' @family Visualization
#' @family Flexible
#' @family Time-series
#'
#' @return
#' Returns a 'ggplot' object representing a line plot.
#'
#' @examples
#' library(dplyr)
#'
#' # Median `Emails_sent` grouped by `Date`
#' # Without Person Averaging
#' med_df <-
#' sq_data %>%
#' group_by(Date) %>%
#' summarise(Emails_sent_median = median(Emails_sent))
#'
#' med_df %>%
#' create_line_asis(
#' date_var = "Date",
#' metric = "Emails_sent_median",
#' title = "Median Emails Sent",
#' subtitle = "Person Averaging Not Applied",
#' caption = extract_date_range(sq_data, return = "text")
#' )
#'
#' @export
create_line_asis <- function(data,
date_var = "Date",
metric,
title = NULL,
subtitle = NULL,
caption = NULL,
ylab = date_var,
xlab = metric,
line_colour = rgb2hex(0, 120, 212)){
returnPlot <-
data %>%
mutate_at(vars(date_var), ~as.Date(., format = "%m/%d/%Y")) %>%
ggplot(aes(x = !!sym(date_var), y = !!sym(metric))) +
geom_line(colour = line_colour)
returnPlot +
labs(title = title,
subtitle = subtitle,
caption = caption,
y = xlab,
x = ylab) +
theme_wpa_basic()
}
|
# Script to GO of siCG factors
# ON beast
library(plyr)
library(ggrepel)
library(ggplot2)
wd="/share/lustre/backup/dyap/Projects/Takeda_T3/CG_factors"
setwd(wd)
# Submit this file to http://www.pantherdb.org/geneListAnalysis.do
# remove the "'" from 3' as it causes import failure
#filein="siCFfactor_GO_BP.txt"
filein="CG_RNA_motifs_full_GO_BP.txt"
fileout=paste(filein,"processed", sep="_")
# cat CG_RNA_motifs_full_GO_BP.txt | sed 's/'\''/\-prime/' > CG_RNA_motifs_full_GO_BP.txt_processed
GO<-read.table(file=fileout, header=TRUE, skip=10, sep="\t")
####################
GOin<-GO
colnames(GOin)[1]<-"PantherGO"
colnames(GOin)[6]<-"Fold_Enrichment"
colnames(GOin)[7]<-"Adjusted.P.value"
GO <- GOin[order(-GOin$Adjusted.P.value),]
# Set value >5 to 5
GO$Enrichment <- as.numeric(gsub(" > ","", GO$Fold_Enrichment))
GO$PantherGO<-gsub("\\s*\\([^\\)]+\\)","",as.character(GO$PantherGO))
#GO$Label <- do.call(paste, c(GO[c("si.factors.with.motif.den.change.in.CG..32.", "Homo.sapiens...REFLIST..20814.")], sep = "/"))
GO$Label <- do.call(paste, c(GO[c("CG.enriched_factors..46.", "Homo.sapiens...REFLIST..20814.")], sep = "/"))
# annotation of no / total in category
# Core wrapping function
wrap.it <- function(x, len)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
# Call this function with a list or vector
wrap.labels <- function(x, len)
{
if (is.list(x))
{
lapply(x, wrap.it, len)
} else {
wrap.it(x, len)
}
}
GO$value<-round(as.numeric(-log10(GO$Adjusted.P.value)),1)
#subsetting the data
gosub <-subset(GO, Enrichment > 20 & Enrichment != "Inf")
#gosub <-subset(GO, value > 10 & Enrichment != "Inf")
gosub$lab<-wrap.labels(gosub$PantherGO,20)
gosub$Fold_Enrichment<-round(gosub$Enrichment,1)
q<-ggplot(data=gosub, aes(x=reorder(lab,value), y=value)) +
geom_bar(width=0.8, stat="identity")+
geom_text(data=gosub, aes(x=lab, y=value, hjust=-0.15, label=Label)) +
geom_text(data=gosub, aes(x=lab, y=value, hjust=1.2, label=Fold_Enrichment), color="white") +
# scale_y_continuous(limits=c(0,28)) +
# ggtitle("CLK2 interactors by Gene Ontology") +
theme(panel.border = element_rect(fill = NA, colour = "black", size = 2))+
theme(axis.text=element_text(size=10),
axis.title=element_text(size=12,face="bold")) +
labs(y = "-log10(Adjusted p-value)", x="Top GO Biological Processes by Fold Enrichment") +
coord_flip()
#pdf(file="SupplFig_siCGfactors_byGO_BP.pdf", useDingbats=FALSE)
pdf(file="SupplFig_CGfactors_full_byGO_BP.pdf", useDingbats=FALSE)
q
dev.off()
#library(gridExtra)
#grid.arrange(q, r, nrow=2)
############################
# Getting the genes involved in
get=c("3-prime-UTR binding","3-prime-end processing")
#filein="siCGfactor_GOList.txt"
filein="CG_RNA_motifs_full_GOList.txt"
GO<-read.table(file=filein, header=FALSE, skip=0, sep="\t")
# grep "end processing" siCGfactor_GOList.txt | awk -F"\t" '{print $2}' | awk -F";" '{print $2}' > CG_3endproc_V7.txt
#system("grep \"3-prime-end processing\"siCGfactor_GOList.txt | awk -F\"\t\" '{print $2}' | awk -F\";\" '{print $2}' > CG_3endproc_V7.txt")
#system("grep \"3-prime-UTR binding\"siCGfactor_GOList.txt | awk -F\"\t\" '{print $2}' | awk -F\";\" '{print $2}' > CG_3UTRbind_V7.txt")
#system("grep \"poly(A) RNA binding\"siCGfactor_GOList.txt | awk -F\"\t\" '{print $2}' | awk -F\";\" '{print $2}' > CG_polyAbind_V7.txt")
####################
#GO<-mutate(GO, UTR=ifelse(grepl("3-prime-UTR binding", GO$V13), "3-prime-UTR binding (n=6/32)",""))
#GO<-mutate(GO, END=ifelse(grepl("3-prime-end processing", GO$V14), "3-prime-end Processing (n=7/32)",""))
#GO<-mutate(GO, polyA=ifelse(grepl("poly\\(A\\) RNA binding", GO$V13), "poly(A) RNA binding (n=29/32)",""))
GO<-mutate(GO, UTR=ifelse(grepl("3-prime-UTR binding", GO$V8), "3-prime-UTR binding (n=7/46)",""))
GO<-mutate(GO, END=ifelse(grepl("3-prime-end processing", GO$V7), "3-prime-end Processing (n=7/46)",""))
GO<-mutate(GO, polyA=ifelse(grepl("poly\\(A\\) RNA binding", GO$V8), "poly(A) RNA binding (n=39/46)",""))
table(GO$END)
table(GO$UTR)
table(GO$polyA)
GOcom<-GO
GOcom$END[GOcom$END == "3-prime-end Processing (n=7/46)"] <- "YES"
GOcom$UTR[GOcom$UTR == "3-prime-UTR binding (n=7/46)"] <- "YES"
GOcom$polyA[GOcom$polyA == "poly(A) RNA binding (n=39/46)"] <- "YES"
names(GOcom)
colnames(GOcom)[9]
colnames(GOcom)[9]<-"Involved_in_3'UTR_binding"
colnames(GOcom)[10]
colnames(GOcom)[10]<-"Involved_in_3'-end_Processing"
colnames(GOcom)[11]
colnames(GOcom)[11]<-"Involved_in_poly(A)_RNA_binding"
#foo <- data.frame(do.call('rbind', strsplit(as.character(GOcom$V1),'|',fixed=TRUE)))
GO1<-within(GOcom, UniProtID<-gsub("UniProtKB=","",as.character(do.call('rbind', strsplit(as.character(GOcom$V1), '|', fixed=TRUE))[,3])))
GO2<-within(GO1, GeneID<-as.character(do.call('rbind', strsplit(as.character(GO1$V3), ';', fixed=TRUE))[,2]))
GO3<-within(GO2, Description<-as.character(do.call('rbind', strsplit(as.character(GO2$V3), ';', fixed=TRUE))[,1]))
colnames(GO3)
suptab <- GO3[c(12,13,14,9,10,11)]
#suptab <- GO3[c(20,19,21,17,16,18)]
sapply(suptab,class)
write.table(suptab, file="CG_RNA_motifs_full_3endfun.tsv", quote=FALSE, sep="\t")
#write.table(suptab, file="CGfactors_3endfun.tsv", quote=FALSE, sep="\t")
####################
#Old method
U<-GO[grepl("3-prime-UTR binding", GO$V13),]
E<-GO[grepl("3-prime-end processing", GO$V14),]
A<-GO[grepl("poly\\(A\\) RNA binding", GO$V13),]
UTR<-U[c(1,2)]
END<-E[c(1,2)]
polyA<-A[c(1,2)]
write.table(UTR, file = "3'UTR", append = FALSE, quote = FALSE, sep = ",")
write.table(END, file = "3'END", append = FALSE, quote = FALSE, sep = ",")
write.table(polyA, file = "polyA", append = FALSE, quote = FALSE, sep = ",")
| /R-scripts/SuppFig4_siKDCGfactor.R | no_license | oncoapop/data_reporting | R | false | false | 5,744 | r | # Script to GO of siCG factors
# ON beast
library(plyr)
library(ggrepel)
library(ggplot2)
wd="/share/lustre/backup/dyap/Projects/Takeda_T3/CG_factors"
setwd(wd)
# Submit this file to http://www.pantherdb.org/geneListAnalysis.do
# remove the "'" from 3' as it causes import failure
#filein="siCFfactor_GO_BP.txt"
filein="CG_RNA_motifs_full_GO_BP.txt"
fileout=paste(filein,"processed", sep="_")
# cat CG_RNA_motifs_full_GO_BP.txt | sed 's/'\''/\-prime/' > CG_RNA_motifs_full_GO_BP.txt_processed
GO<-read.table(file=fileout, header=TRUE, skip=10, sep="\t")
####################
GOin<-GO
colnames(GOin)[1]<-"PantherGO"
colnames(GOin)[6]<-"Fold_Enrichment"
colnames(GOin)[7]<-"Adjusted.P.value"
GO <- GOin[order(-GOin$Adjusted.P.value),]
# Set value >5 to 5
GO$Enrichment <- as.numeric(gsub(" > ","", GO$Fold_Enrichment))
GO$PantherGO<-gsub("\\s*\\([^\\)]+\\)","",as.character(GO$PantherGO))
#GO$Label <- do.call(paste, c(GO[c("si.factors.with.motif.den.change.in.CG..32.", "Homo.sapiens...REFLIST..20814.")], sep = "/"))
GO$Label <- do.call(paste, c(GO[c("CG.enriched_factors..46.", "Homo.sapiens...REFLIST..20814.")], sep = "/"))
# annotation of no / total in category
# Core wrapping function
wrap.it <- function(x, len)
{
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
# Call this function with a list or vector
wrap.labels <- function(x, len)
{
if (is.list(x))
{
lapply(x, wrap.it, len)
} else {
wrap.it(x, len)
}
}
GO$value<-round(as.numeric(-log10(GO$Adjusted.P.value)),1)
#subsetting the data
gosub <-subset(GO, Enrichment > 20 & Enrichment != "Inf")
#gosub <-subset(GO, value > 10 & Enrichment != "Inf")
gosub$lab<-wrap.labels(gosub$PantherGO,20)
gosub$Fold_Enrichment<-round(gosub$Enrichment,1)
q<-ggplot(data=gosub, aes(x=reorder(lab,value), y=value)) +
geom_bar(width=0.8, stat="identity")+
geom_text(data=gosub, aes(x=lab, y=value, hjust=-0.15, label=Label)) +
geom_text(data=gosub, aes(x=lab, y=value, hjust=1.2, label=Fold_Enrichment), color="white") +
# scale_y_continuous(limits=c(0,28)) +
# ggtitle("CLK2 interactors by Gene Ontology") +
theme(panel.border = element_rect(fill = NA, colour = "black", size = 2))+
theme(axis.text=element_text(size=10),
axis.title=element_text(size=12,face="bold")) +
labs(y = "-log10(Adjusted p-value)", x="Top GO Biological Processes by Fold Enrichment") +
coord_flip()
#pdf(file="SupplFig_siCGfactors_byGO_BP.pdf", useDingbats=FALSE)
pdf(file="SupplFig_CGfactors_full_byGO_BP.pdf", useDingbats=FALSE)
q
dev.off()
#library(gridExtra)
#grid.arrange(q, r, nrow=2)
############################
# Getting the genes involved in
get=c("3-prime-UTR binding","3-prime-end processing")
#filein="siCGfactor_GOList.txt"
filein="CG_RNA_motifs_full_GOList.txt"
GO<-read.table(file=filein, header=FALSE, skip=0, sep="\t")
# grep "end processing" siCGfactor_GOList.txt | awk -F"\t" '{print $2}' | awk -F";" '{print $2}' > CG_3endproc_V7.txt
#system("grep \"3-prime-end processing\"siCGfactor_GOList.txt | awk -F\"\t\" '{print $2}' | awk -F\";\" '{print $2}' > CG_3endproc_V7.txt")
#system("grep \"3-prime-UTR binding\"siCGfactor_GOList.txt | awk -F\"\t\" '{print $2}' | awk -F\";\" '{print $2}' > CG_3UTRbind_V7.txt")
#system("grep \"poly(A) RNA binding\"siCGfactor_GOList.txt | awk -F\"\t\" '{print $2}' | awk -F\";\" '{print $2}' > CG_polyAbind_V7.txt")
####################
#GO<-mutate(GO, UTR=ifelse(grepl("3-prime-UTR binding", GO$V13), "3-prime-UTR binding (n=6/32)",""))
#GO<-mutate(GO, END=ifelse(grepl("3-prime-end processing", GO$V14), "3-prime-end Processing (n=7/32)",""))
#GO<-mutate(GO, polyA=ifelse(grepl("poly\\(A\\) RNA binding", GO$V13), "poly(A) RNA binding (n=29/32)",""))
GO<-mutate(GO, UTR=ifelse(grepl("3-prime-UTR binding", GO$V8), "3-prime-UTR binding (n=7/46)",""))
GO<-mutate(GO, END=ifelse(grepl("3-prime-end processing", GO$V7), "3-prime-end Processing (n=7/46)",""))
GO<-mutate(GO, polyA=ifelse(grepl("poly\\(A\\) RNA binding", GO$V8), "poly(A) RNA binding (n=39/46)",""))
table(GO$END)
table(GO$UTR)
table(GO$polyA)
GOcom<-GO
GOcom$END[GOcom$END == "3-prime-end Processing (n=7/46)"] <- "YES"
GOcom$UTR[GOcom$UTR == "3-prime-UTR binding (n=7/46)"] <- "YES"
GOcom$polyA[GOcom$polyA == "poly(A) RNA binding (n=39/46)"] <- "YES"
names(GOcom)
colnames(GOcom)[9]
colnames(GOcom)[9]<-"Involved_in_3'UTR_binding"
colnames(GOcom)[10]
colnames(GOcom)[10]<-"Involved_in_3'-end_Processing"
colnames(GOcom)[11]
colnames(GOcom)[11]<-"Involved_in_poly(A)_RNA_binding"
#foo <- data.frame(do.call('rbind', strsplit(as.character(GOcom$V1),'|',fixed=TRUE)))
GO1<-within(GOcom, UniProtID<-gsub("UniProtKB=","",as.character(do.call('rbind', strsplit(as.character(GOcom$V1), '|', fixed=TRUE))[,3])))
GO2<-within(GO1, GeneID<-as.character(do.call('rbind', strsplit(as.character(GO1$V3), ';', fixed=TRUE))[,2]))
GO3<-within(GO2, Description<-as.character(do.call('rbind', strsplit(as.character(GO2$V3), ';', fixed=TRUE))[,1]))
colnames(GO3)
suptab <- GO3[c(12,13,14,9,10,11)]
#suptab <- GO3[c(20,19,21,17,16,18)]
sapply(suptab,class)
write.table(suptab, file="CG_RNA_motifs_full_3endfun.tsv", quote=FALSE, sep="\t")
#write.table(suptab, file="CGfactors_3endfun.tsv", quote=FALSE, sep="\t")
####################
#Old method
U<-GO[grepl("3-prime-UTR binding", GO$V13),]
E<-GO[grepl("3-prime-end processing", GO$V14),]
A<-GO[grepl("poly\\(A\\) RNA binding", GO$V13),]
UTR<-U[c(1,2)]
END<-E[c(1,2)]
polyA<-A[c(1,2)]
write.table(UTR, file = "3'UTR", append = FALSE, quote = FALSE, sep = ",")
write.table(END, file = "3'END", append = FALSE, quote = FALSE, sep = ",")
write.table(polyA, file = "polyA", append = FALSE, quote = FALSE, sep = ",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.similar.features.R
\name{find.similar.features}
\alias{find.similar.features}
\title{Find similar features with a given subnetwork.}
\usage{
find.similar.features(model, subnet.id, datamatrix = NULL, verbose =
FALSE, information.criterion = NULL)
}
\arguments{
\item{model}{NetResponseModel object.}
\item{subnet.id}{Investigated subnetwork.}
\item{datamatrix}{Optional. Can be used to compare subnetwork similarity
with new data which was not used for learning the subnetworks.}
\item{verbose}{Logical indicating whether progress of the algorithm should
be indicated on the screen.}
\item{information.criterion}{Information criterion for model selection. By
default uses the same than in the 'model' object.}
}
\value{
A data frame with elements feature.names (e.g. gene IDs) and delta,
which indicates similarity level. See details for details. The smaller, the
more similar. The data frame is ordered such that the features are listed by
decreasing similarity.
}
\description{
Given subnetwork, orders the remaining features (genes) in the input data
based on similarity with the subnetwork. Allows the identification of
similar features that are not directly connected in the input network.
}
\details{
The same similarity measure is used as when agglomerating the subnetworks:
the features are ordered by delta (change) in the cost function, assuming
that the feature would be merged in the subnetwork. The smaller the change,
the more similar the feature is (change would minimize the new cost function
value). Negative values of delta mean that the cost function would be
improved by merging the new feature in the subnetwork, indicating features
having coordinated response.
}
\examples{
data(toydata)
model <- toydata$model
subnet.id <- 'Subnet-1'
# g <- find.similar.features(model, subnet.id)
# List features that are similar to this subnetwork (delta < 0)
# (ordered by decreasing similarity)
# subset(g, delta < 0)
}
\references{
See citation('netresponse') for reference details.
}
\author{
Leo Lahti \email{leo.lahti@iki.fi}
}
\keyword{utilities}
| /man/find.similar.features.Rd | no_license | antagomir/netresponse | R | false | true | 2,152 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.similar.features.R
\name{find.similar.features}
\alias{find.similar.features}
\title{Find similar features with a given subnetwork.}
\usage{
find.similar.features(model, subnet.id, datamatrix = NULL, verbose =
FALSE, information.criterion = NULL)
}
\arguments{
\item{model}{NetResponseModel object.}
\item{subnet.id}{Investigated subnetwork.}
\item{datamatrix}{Optional. Can be used to compare subnetwork similarity
with new data which was not used for learning the subnetworks.}
\item{verbose}{Logical indicating whether progress of the algorithm should
be indicated on the screen.}
\item{information.criterion}{Information criterion for model selection. By
default uses the same than in the 'model' object.}
}
\value{
A data frame with elements feature.names (e.g. gene IDs) and delta,
which indicates similarity level. See details for details. The smaller, the
more similar. The data frame is ordered such that the features are listed by
decreasing similarity.
}
\description{
Given subnetwork, orders the remaining features (genes) in the input data
based on similarity with the subnetwork. Allows the identification of
similar features that are not directly connected in the input network.
}
\details{
The same similarity measure is used as when agglomerating the subnetworks:
the features are ordered by delta (change) in the cost function, assuming
that the feature would be merged in the subnetwork. The smaller the change,
the more similar the feature is (change would minimize the new cost function
value). Negative values of delta mean that the cost function would be
improved by merging the new feature in the subnetwork, indicating features
having coordinated response.
}
\examples{
data(toydata)
model <- toydata$model
subnet.id <- 'Subnet-1'
# g <- find.similar.features(model, subnet.id)
# List features that are similar to this subnetwork (delta < 0)
# (ordered by decreasing similarity)
# subset(g, delta < 0)
}
\references{
See citation('netresponse') for reference details.
}
\author{
Leo Lahti \email{leo.lahti@iki.fi}
}
\keyword{utilities}
|
source("~/Exercism/r/raindrops/raindrops.R")
library(testthat)
context("raindrops")
test_that("the sound for 1 is 1", {
number <- 1
expect_equal(raindrops(number), "1")
})
test_that("the sound for 3 is Pling", {
number <- 3
expect_equal(raindrops(number), "Pling")
})
test_that("the sound for 5 is Plang", {
number <- 5
expect_equal(raindrops(number), "Plang")
})
test_that("the sound for 7 is Plong", {
number <- 7
expect_equal(raindrops(number), "Plong")
})
test_that("the sound for 6 is Pling as it has a factor 3", {
number <- 6
expect_equal(raindrops(number), "Pling")
})
test_that("2 to the power 3 does not make a raindrop sound as 3 is the exponent
not the base", {
number <- 8
expect_equal(raindrops(number), "8")
})
test_that("the sound for 9 is Pling as it has a factor 3", {
number <- 9
expect_equal(raindrops(number), "Pling")
})
test_that("the sound for 10 is Plang as it has a factor 5", {
number <- 10
expect_equal(raindrops(number), "Plang")
})
test_that("the sound for 14 is Plong as it has a factor of 7", {
number <- 14
expect_equal(raindrops(number), "Plong")
})
test_that("the sound for 15 is PlingPlang as it has factors 3 and 5", {
number <- 15
expect_equal(raindrops(number), "PlingPlang")
})
test_that("the sound for 21 is PlingPlong as it has factors 3 and 7", {
number <- 21
expect_equal(raindrops(number), "PlingPlong")
})
test_that("the sound for 25 is Plang as it has a factor 5", {
number <- 25
expect_equal(raindrops(number), "Plang")
})
test_that("the sound for 27 is Pling as it has a factor 3", {
number <- 27
expect_equal(raindrops(number), "Pling")
})
test_that("the sound for 35 is PlangPlong as it has factors 5 and 7", {
number <- 35
expect_equal(raindrops(number), "PlangPlong")
})
test_that("the sound for 49 is Plong as it has a factor 7", {
number <- 49
expect_equal(raindrops(number), "Plong")
})
test_that("the sound for 52 is 52", {
number <- 52
expect_equal(raindrops(number), "52")
})
test_that("the sound for 105 is PlingPlangPlong as it has factors 3, 5 and 7", {
number <- 105
expect_equal(raindrops(number), "PlingPlangPlong")
})
test_that("the sound for 3125 is Plang as it has a factor 5", {
number <- 3125
expect_equal(raindrops(number), "Plang")
})
message("All tests passed for exercise: raindrops")
| /r/raindrops/test_raindrops.R | no_license | y0wel/exercism-r | R | false | false | 2,362 | r | source("~/Exercism/r/raindrops/raindrops.R")
library(testthat)
context("raindrops")
test_that("the sound for 1 is 1", {
number <- 1
expect_equal(raindrops(number), "1")
})
test_that("the sound for 3 is Pling", {
number <- 3
expect_equal(raindrops(number), "Pling")
})
test_that("the sound for 5 is Plang", {
number <- 5
expect_equal(raindrops(number), "Plang")
})
test_that("the sound for 7 is Plong", {
number <- 7
expect_equal(raindrops(number), "Plong")
})
test_that("the sound for 6 is Pling as it has a factor 3", {
number <- 6
expect_equal(raindrops(number), "Pling")
})
test_that("2 to the power 3 does not make a raindrop sound as 3 is the exponent
not the base", {
number <- 8
expect_equal(raindrops(number), "8")
})
test_that("the sound for 9 is Pling as it has a factor 3", {
number <- 9
expect_equal(raindrops(number), "Pling")
})
test_that("the sound for 10 is Plang as it has a factor 5", {
number <- 10
expect_equal(raindrops(number), "Plang")
})
test_that("the sound for 14 is Plong as it has a factor of 7", {
number <- 14
expect_equal(raindrops(number), "Plong")
})
test_that("the sound for 15 is PlingPlang as it has factors 3 and 5", {
number <- 15
expect_equal(raindrops(number), "PlingPlang")
})
test_that("the sound for 21 is PlingPlong as it has factors 3 and 7", {
number <- 21
expect_equal(raindrops(number), "PlingPlong")
})
test_that("the sound for 25 is Plang as it has a factor 5", {
number <- 25
expect_equal(raindrops(number), "Plang")
})
test_that("the sound for 27 is Pling as it has a factor 3", {
number <- 27
expect_equal(raindrops(number), "Pling")
})
test_that("the sound for 35 is PlangPlong as it has factors 5 and 7", {
number <- 35
expect_equal(raindrops(number), "PlangPlong")
})
test_that("the sound for 49 is Plong as it has a factor 7", {
number <- 49
expect_equal(raindrops(number), "Plong")
})
test_that("the sound for 52 is 52", {
number <- 52
expect_equal(raindrops(number), "52")
})
test_that("the sound for 105 is PlingPlangPlong as it has factors 3, 5 and 7", {
number <- 105
expect_equal(raindrops(number), "PlingPlangPlong")
})
test_that("the sound for 3125 is Plang as it has a factor 5", {
number <- 3125
expect_equal(raindrops(number), "Plang")
})
message("All tests passed for exercise: raindrops")
|
rm(list = ls(all=T))
cat("\014")
myClass = setClass("myClass",slots = c(vix="data.frame",stock_div = "data.frame",marketCap_Stocks = "data.frame",dim_subset = "data.frame",
eq_w_ret_all = "data.frame",v_w_ret_all = "data.frame",marketCap_all = "data.frame",
eq_w_ret_subset = "data.frame",v_w_ret_subset = "data.frame",marketCap_subset="data.frame"))
subsetClass = setClass("subsetClass",slot = c(bottom = "numeric",top = "numeric"))
setNormalVariables = function(theClass){
#Fill
#set eq_w_ret_all, v_w_ret_all, marketCap_all
#this can just be done using general getSubsetVariables but only use inf, -inf as bounds
#Rename the columns
return(theClass)
}
setSubsetVariables = function(theClass){
#Fill
#set
#Create 3 general dataframes
#One for eq_w_ret_subset, v_w_ret_subset, and MarketCap_subset
eq_w_ret_subset = data.frame()
v_w_ret_subset = data.frame()
marketCap_subset = data.frame()
#loop through twice 54,53,52,51,43,42....and make sure to have top and bottom be related which is greater
subsetThresholds = theClass@dim_subset
n_thresholds = nrow(subsetThresholds)
for (i in 1:(n_thresholds-1)) {
for (j in (i+1):n_thresholds) {
currentSubset = c(subsetThresholds[i,1],subsetThresholds[j,1]) #this makes a vector like [.1,.0125]
#Make sure it is in order from low to high
currentSubset = sort(currentSubset)
tempOutput = getSubsetVariables(theClass,bottom = currentSubset[1],top = currentSubset[2])
eq_w_ret_subset = merge(eq_w_ret_subset,tempOutput[[1]])
v_w_ret_subset = merge(v_w_ret_subset,tempOutput[[2]])
marketCap_subset = merge(marketCap_subset,tempOutput[[3]])
}
}
#use a function getSubsetVariables: input:(theClass,bottom, top); output:list of (3 dataframes with propperly named columns-eq,v,marketCap)
#Merge with general dataframe
theClass@eq_w_ret_subset = eq_w_ret_subset
theClass@v_w_ret_subset = v_w_ret_subset
theClass@marketCap_subset = marketCap_subset
#After looping set theClass's variable
return(theClass)
}
getSubsetVariables = function(theClass,bottom,top){
subsetGeneralName = paste0(bottom,"_",top)
#need to first get a the dates we will be looking at
#after get dates will parallelize a function that
}
setValues = function(theClass){
#need a function for all these subvariables, all take the form of input:myClass, output:myClass_updated
theClass = setNormalVariables(theClass)
theClass = setSubsetVariables(theClass)
} | /Sandbox.R | no_license | trentmckinnon/Matrix-Methods-of-Machine-Learning | R | false | false | 2,604 | r | rm(list = ls(all=T))
cat("\014")
myClass = setClass("myClass",slots = c(vix="data.frame",stock_div = "data.frame",marketCap_Stocks = "data.frame",dim_subset = "data.frame",
eq_w_ret_all = "data.frame",v_w_ret_all = "data.frame",marketCap_all = "data.frame",
eq_w_ret_subset = "data.frame",v_w_ret_subset = "data.frame",marketCap_subset="data.frame"))
subsetClass = setClass("subsetClass",slot = c(bottom = "numeric",top = "numeric"))
setNormalVariables = function(theClass){
#Fill
#set eq_w_ret_all, v_w_ret_all, marketCap_all
#this can just be done using general getSubsetVariables but only use inf, -inf as bounds
#Rename the columns
return(theClass)
}
setSubsetVariables = function(theClass){
#Fill
#set
#Create 3 general dataframes
#One for eq_w_ret_subset, v_w_ret_subset, and MarketCap_subset
eq_w_ret_subset = data.frame()
v_w_ret_subset = data.frame()
marketCap_subset = data.frame()
#loop through twice 54,53,52,51,43,42....and make sure to have top and bottom be related which is greater
subsetThresholds = theClass@dim_subset
n_thresholds = nrow(subsetThresholds)
for (i in 1:(n_thresholds-1)) {
for (j in (i+1):n_thresholds) {
currentSubset = c(subsetThresholds[i,1],subsetThresholds[j,1]) #this makes a vector like [.1,.0125]
#Make sure it is in order from low to high
currentSubset = sort(currentSubset)
tempOutput = getSubsetVariables(theClass,bottom = currentSubset[1],top = currentSubset[2])
eq_w_ret_subset = merge(eq_w_ret_subset,tempOutput[[1]])
v_w_ret_subset = merge(v_w_ret_subset,tempOutput[[2]])
marketCap_subset = merge(marketCap_subset,tempOutput[[3]])
}
}
#use a function getSubsetVariables: input:(theClass,bottom, top); output:list of (3 dataframes with propperly named columns-eq,v,marketCap)
#Merge with general dataframe
theClass@eq_w_ret_subset = eq_w_ret_subset
theClass@v_w_ret_subset = v_w_ret_subset
theClass@marketCap_subset = marketCap_subset
#After looping set theClass's variable
return(theClass)
}
getSubsetVariables = function(theClass,bottom,top){
subsetGeneralName = paste0(bottom,"_",top)
#need to first get a the dates we will be looking at
#after get dates will parallelize a function that
}
setValues = function(theClass){
#need a function for all these subvariables, all take the form of input:myClass, output:myClass_updated
theClass = setNormalVariables(theClass)
theClass = setSubsetVariables(theClass)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/palette_dataedu.R
\docType{data}
\name{dataedu_palette}
\alias{dataedu_palette}
\title{Color palette for Data Science in Education}
\format{
An object of class \code{character} of length 5.
}
\usage{
dataedu_palette
}
\description{
Color palette for Data Science in Education
}
\keyword{datasets}
| /man/dataedu_palette.Rd | permissive | Caellwyn/data-edu | R | false | true | 375 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/palette_dataedu.R
\docType{data}
\name{dataedu_palette}
\alias{dataedu_palette}
\title{Color palette for Data Science in Education}
\format{
An object of class \code{character} of length 5.
}
\usage{
dataedu_palette
}
\description{
Color palette for Data Science in Education
}
\keyword{datasets}
|
# title: "Смешанные линейные модели (случайный интерсепт и случайный угол наклона)"
# subtitle: "Линейные модели..."
# author: "Марина Варфоломеева"
# institute: "Кафедра Зоологии беспозвоночных, Биологический факультет, СПбГУ"
# ## Пример -- недосып ########################################################
# В ночь перед нулевым днем всем испытуемым давали поспать нормальное время, а в
# следующие 9 ночей --- только по 3 часа. Каждый день измеряли время реакции в
# серии тестов.
#
# Как время реакции людей зависит от бессонницы?
# Belenky et al., 2003
# - `Reaction` --- среднее время реакции в серии тестов в день наблюдения, мс
# - `Days` --- число дней депривации сна
# - `Subject` --- номер испытуемого
library(lme4)
data(sleepstudy)
sl <- sleepstudy
str(sl)
# Есть ли пропущенные значения?
colSums(is.na(sl))
# Сколько субъектов?
length(unique(sl$Subject))
# Сколько наблюдений для каждого субъекта?
table(sl$Subject)
# ## Есть ли выбросы?
library(ggplot2)
theme_set(theme_bw())
ggplot(sl, aes(x = Reaction, y = 1:nrow(sl))) +
geom_point()
# ## Как меняется время реакции разных людей?
ggplot(sl, aes(x = Reaction, y = Subject, colour = Days)) +
geom_point()
# ## Плохое решение: не учитываем группирующий фактор ########################
W1 <- glm(Reaction ~ Days, data = sl)
summary(W1)
ggplot(sl, aes(x = Days, y = Reaction)) +
geom_point() +
geom_smooth(se = TRUE, method = "lm", size = 1)
# ## Громоздкое решение: группирующий фактор как фиксированный ###############
W2 <- glm(Reaction ~ Days + Subject, data = sl)
coef(W2)
ggplot(fortify(W2), aes(x = Days, colour = Subject)) +
geom_line(aes(y = .fitted, group = Subject)) +
geom_point(data = sl, aes(y = Reaction)) +
guides(colour = guide_legend(ncol = 2))
# # GLMM со случайным отрезком ###############################################
M1 <- lmer(Reaction ~ Days + (1 | Subject), data = sl)
summary(M1)
# Данные для графика предсказаний фиксированной части модели:
library(dplyr)
NewData <- sl %>% group_by(Subject) %>%
do(data.frame(Days = seq(min(.$Days), max(.$Days), length = 10)))
head(NewData, 3)
# ## Предсказания фиксированной части модели при помощи predict()
NewData$fit <- predict(M1, NewData, type = 'response', re.form = NA)
head(NewData, 3)
# ## Предсказания фиксированной части модели в матричном виде
X <- model.matrix(~ Days, data = NewData)
betas <- fixef(M1)
NewData$fit <- X %*% betas
# Cтандартные ошибки
NewData$SE <- sqrt( diag(X %*% vcov(M1) %*% t(X)) )
NewData$lwr <- NewData$fit - 2 * NewData$SE
NewData$upr <- NewData$fit + 2 * NewData$SE
# ## График предсказаний фиксированной части модели
ggplot(data = NewData, aes(x = Days, y = fit)) +
geom_ribbon(alpha = 0.35, aes(ymin = lwr, ymax = upr)) +
geom_line() + geom_point(data = sl, aes(x = Days, y = Reaction))
# ## Предсказания для каждого уровня случайного фактора
NewData$fit_subj <- predict(M1, NewData, type = 'response')
ggplot(NewData, aes(x = Days, y = fit_subj)) +
geom_ribbon(alpha = 0.3, aes(ymin = lwr, ymax = upr)) +
geom_line(aes(colour = Subject)) +
geom_point(data = sl, aes(x = Days, y = Reaction, colour = Subject)) +
guides(colour = guide_legend(ncol = 2))
# ## Коэффициент внутриклассовой корреляции (intra-class correlation, ICC) ####
#
# $ICC = \sigma_b^2 / (\sigma^2 + \sigma_b^2)$
summary(M1)
VarCorr(M1) # Случайные эффекты отдельно
# # Диагностика модели
# ## Данные для анализа остатков
M1_diag <- data.frame(
sl,
.fitted = predict(M1),
.resid = resid(M1, type = 'pearson'),
.scresid = resid(M1, type = 'pearson', scaled = TRUE))
head(M1_diag, 4)
# ## График остатков от предсказанных значений
gg_resid <- ggplot(M1_diag, aes(y = .scresid)) +
geom_hline(yintercept = 0)
gg_resid + geom_point(aes(x = .fitted))
# ## Графики остатков от ковариат в модели и не в модели
gg_resid + geom_boxplot(aes(x = factor(Days)))
gg_resid + geom_boxplot(aes(x = Subject))
# # GLMM со случайным отрезком и углом наклона ###############################
MS1 <- lmer(Reaction ~ Days + ( 1 + Days|Subject), data = sl)
summary(MS1)
# ## Данные для графика предсказаний фиксированной части модели
library(dplyr)
NewData <- sl %>% group_by(Subject) %>%
do(data.frame(Days = seq(min(.$Days), max(.$Days), length = 10)))
NewData$fit <- predict(MS1, NewData, type = 'response', re.form = NA)
head(NewData, 3)
# ## Предсказания фиксированной части модели в матричном виде
X <- model.matrix(~ Days, data = NewData)
betas <- fixef(MS1)
NewData$fit <- X %*% betas
# Cтандартные ошибки
NewData$SE <- sqrt( diag(X %*% vcov(MS1) %*% t(X)) )
NewData$lwr <- NewData$fit - 2 * NewData$SE
NewData$upr <- NewData$fit + 2 * NewData$SE
# ## График предсказаний фиксированной части модели
gg_MS1_normal <- ggplot(data = NewData, aes(x = Days, y = fit)) +
geom_ribbon(alpha = 0.35, aes(ymin = lwr, ymax = upr)) +
geom_line() + geom_point(data = sl, aes(x = Days, y = Reaction))
gg_MS1_normal
# ## Предсказания для каждого уровня случайного фактора
NewData$fit_subj <- predict(MS1, NewData, type = 'response')
ggplot(NewData, aes(x = Days, y = fit_subj)) +
geom_ribbon(alpha = 0.3, aes(ymin = lwr, ymax = upr)) +
geom_line(aes(colour = Subject)) +
geom_point(data = sl, aes(x = Days, y = Reaction, colour = Subject)) +
guides(colour = guide_legend(ncol = 2))
# # Диагностика модели
# ## Данные для анализа остатков
MS1_diag <- data.frame(
sl,
.fitted = predict(MS1),
.resid = resid(MS1, type = 'pearson'),
.scresid = resid(MS1, type = 'pearson', scaled = TRUE))
head(MS1_diag, 4)
# ## График остатков от предсказанных значений
gg_resid <- ggplot(MS1_diag, aes(y = .scresid)) +
geom_hline(yintercept = 0)
gg_resid + geom_point(aes(x = .fitted))
# ## Графики остатков от ковариат в модели и не в модели
gg_resid + geom_boxplot(aes(x = factor(Days)))
gg_resid + geom_boxplot(aes(x = Subject))
# # Тестирование гипотез в смешанных моделях ##################################
# t-(или -z) тесты Вальда
coef(summary(MS1))
# ## Тесты отношения правдоподобий (LRT)
# ## LRT для случайных эффектов
MS1 <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sl, REML = TRUE)
MS0 <- lmer(Reaction ~ Days + (1 | Subject), data = sl, REML = TRUE)
anova(MS1, MS0, refit = FALSE)
# ## LRT для фиксированных эффектов
MS1.ml <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sl, REML = FALSE)
MS0.ml <- lmer(Reaction ~ 1 + (1 + Days | Subject), data = sl, REML = FALSE)
anova(MS1.ml, MS0.ml)
# ## Сравнение моделей по AIC
AIC(MS1.ml, MS0.ml)
# ## Бутстреп для тестирования значимости и для предсказаний ###################
# ## Параметрический бутстреп для LRT фиксированных эффектов
library(pbkrtest)
pmod <- PBmodcomp(MS1.ml, MS0.ml, nsim = 100) # 1000 и больше для реальных данных
summary(pmod)
# ## Бутстреп-оценка доверительной зоны регрессии
NewData <- sl %>% group_by(Subject) %>%
do(data.frame(Days = seq(min(.$Days), max(.$Days), length = 10)))
NewData$fit <- predict(MS1, NewData, type = 'response', re.form = NA)
# Многократно симулируем данные из модели и получаем для них предсказанные значения
bMS1 <- bootMer(x = MS1,
FUN = function(x) predict(x, new_data = NewData, re.form = NA),
nsim = 100)
# Рассчитываем квантили предсказанных значений для всех итераций бутстрепа
b_se <- apply(X = bMS1$t,
MARGIN = 2,
FUN = function(x) quantile(x, probs = c(0.025, 0.975), na.rm = TRUE))
# Доверительная зона для предсказанных значений
NewData$lwr <- b_se[1, ]
NewData$upr <- b_se[2, ]
# ## График предсказаний фиксированной части модели
gg_MS1_boot <- ggplot(data = NewData, aes(x = Days, y = fit)) +
geom_ribbon(alpha = 0.35, aes(ymin = lwr, ymax = upr)) +
geom_line() + geom_point(data = sl, aes(x = Days, y = Reaction))
gg_MS1_boot
library(cowplot)
plot_grid(gg_MS1_normal + labs(title = "normal"),
gg_MS1_boot + labs(title = "bootstrap"),
ncol = 2)
| /15.1_GLMM_gaussian_random_intercept_slope_code.R | no_license | varmara/linmodr | R | false | false | 9,858 | r | # title: "Смешанные линейные модели (случайный интерсепт и случайный угол наклона)"
# subtitle: "Линейные модели..."
# author: "Марина Варфоломеева"
# institute: "Кафедра Зоологии беспозвоночных, Биологический факультет, СПбГУ"
# ## Пример -- недосып ########################################################
# В ночь перед нулевым днем всем испытуемым давали поспать нормальное время, а в
# следующие 9 ночей --- только по 3 часа. Каждый день измеряли время реакции в
# серии тестов.
#
# Как время реакции людей зависит от бессонницы?
# Belenky et al., 2003
# - `Reaction` --- среднее время реакции в серии тестов в день наблюдения, мс
# - `Days` --- число дней депривации сна
# - `Subject` --- номер испытуемого
library(lme4)
data(sleepstudy)
sl <- sleepstudy
str(sl)
# Есть ли пропущенные значения?
colSums(is.na(sl))
# Сколько субъектов?
length(unique(sl$Subject))
# Сколько наблюдений для каждого субъекта?
table(sl$Subject)
# ## Есть ли выбросы?
library(ggplot2)
theme_set(theme_bw())
ggplot(sl, aes(x = Reaction, y = 1:nrow(sl))) +
geom_point()
# ## Как меняется время реакции разных людей?
ggplot(sl, aes(x = Reaction, y = Subject, colour = Days)) +
geom_point()
# ## Плохое решение: не учитываем группирующий фактор ########################
W1 <- glm(Reaction ~ Days, data = sl)
summary(W1)
ggplot(sl, aes(x = Days, y = Reaction)) +
geom_point() +
geom_smooth(se = TRUE, method = "lm", size = 1)
# ## Громоздкое решение: группирующий фактор как фиксированный ###############
W2 <- glm(Reaction ~ Days + Subject, data = sl)
coef(W2)
ggplot(fortify(W2), aes(x = Days, colour = Subject)) +
geom_line(aes(y = .fitted, group = Subject)) +
geom_point(data = sl, aes(y = Reaction)) +
guides(colour = guide_legend(ncol = 2))
# # GLMM со случайным отрезком ###############################################
M1 <- lmer(Reaction ~ Days + (1 | Subject), data = sl)
summary(M1)
# Данные для графика предсказаний фиксированной части модели:
library(dplyr)
NewData <- sl %>% group_by(Subject) %>%
do(data.frame(Days = seq(min(.$Days), max(.$Days), length = 10)))
head(NewData, 3)
# ## Предсказания фиксированной части модели при помощи predict()
NewData$fit <- predict(M1, NewData, type = 'response', re.form = NA)
head(NewData, 3)
# ## Предсказания фиксированной части модели в матричном виде
X <- model.matrix(~ Days, data = NewData)
betas <- fixef(M1)
NewData$fit <- X %*% betas
# Cтандартные ошибки
NewData$SE <- sqrt( diag(X %*% vcov(M1) %*% t(X)) )
NewData$lwr <- NewData$fit - 2 * NewData$SE
NewData$upr <- NewData$fit + 2 * NewData$SE
# ## График предсказаний фиксированной части модели
ggplot(data = NewData, aes(x = Days, y = fit)) +
geom_ribbon(alpha = 0.35, aes(ymin = lwr, ymax = upr)) +
geom_line() + geom_point(data = sl, aes(x = Days, y = Reaction))
# ## Предсказания для каждого уровня случайного фактора
NewData$fit_subj <- predict(M1, NewData, type = 'response')
ggplot(NewData, aes(x = Days, y = fit_subj)) +
geom_ribbon(alpha = 0.3, aes(ymin = lwr, ymax = upr)) +
geom_line(aes(colour = Subject)) +
geom_point(data = sl, aes(x = Days, y = Reaction, colour = Subject)) +
guides(colour = guide_legend(ncol = 2))
# ## Коэффициент внутриклассовой корреляции (intra-class correlation, ICC) ####
#
# $ICC = \sigma_b^2 / (\sigma^2 + \sigma_b^2)$
summary(M1)
VarCorr(M1) # Случайные эффекты отдельно
# # Диагностика модели
# ## Данные для анализа остатков
M1_diag <- data.frame(
sl,
.fitted = predict(M1),
.resid = resid(M1, type = 'pearson'),
.scresid = resid(M1, type = 'pearson', scaled = TRUE))
head(M1_diag, 4)
# ## График остатков от предсказанных значений
gg_resid <- ggplot(M1_diag, aes(y = .scresid)) +
geom_hline(yintercept = 0)
gg_resid + geom_point(aes(x = .fitted))
# ## Графики остатков от ковариат в модели и не в модели
gg_resid + geom_boxplot(aes(x = factor(Days)))
gg_resid + geom_boxplot(aes(x = Subject))
# # GLMM со случайным отрезком и углом наклона ###############################
MS1 <- lmer(Reaction ~ Days + ( 1 + Days|Subject), data = sl)
summary(MS1)
# ## Данные для графика предсказаний фиксированной части модели
library(dplyr)
NewData <- sl %>% group_by(Subject) %>%
do(data.frame(Days = seq(min(.$Days), max(.$Days), length = 10)))
NewData$fit <- predict(MS1, NewData, type = 'response', re.form = NA)
head(NewData, 3)
# ## Предсказания фиксированной части модели в матричном виде
X <- model.matrix(~ Days, data = NewData)
betas <- fixef(MS1)
NewData$fit <- X %*% betas
# Cтандартные ошибки
NewData$SE <- sqrt( diag(X %*% vcov(MS1) %*% t(X)) )
NewData$lwr <- NewData$fit - 2 * NewData$SE
NewData$upr <- NewData$fit + 2 * NewData$SE
# ## График предсказаний фиксированной части модели
gg_MS1_normal <- ggplot(data = NewData, aes(x = Days, y = fit)) +
geom_ribbon(alpha = 0.35, aes(ymin = lwr, ymax = upr)) +
geom_line() + geom_point(data = sl, aes(x = Days, y = Reaction))
gg_MS1_normal
# ## Предсказания для каждого уровня случайного фактора
NewData$fit_subj <- predict(MS1, NewData, type = 'response')
ggplot(NewData, aes(x = Days, y = fit_subj)) +
geom_ribbon(alpha = 0.3, aes(ymin = lwr, ymax = upr)) +
geom_line(aes(colour = Subject)) +
geom_point(data = sl, aes(x = Days, y = Reaction, colour = Subject)) +
guides(colour = guide_legend(ncol = 2))
# # Диагностика модели
# ## Данные для анализа остатков
MS1_diag <- data.frame(
sl,
.fitted = predict(MS1),
.resid = resid(MS1, type = 'pearson'),
.scresid = resid(MS1, type = 'pearson', scaled = TRUE))
head(MS1_diag, 4)
# ## График остатков от предсказанных значений
gg_resid <- ggplot(MS1_diag, aes(y = .scresid)) +
geom_hline(yintercept = 0)
gg_resid + geom_point(aes(x = .fitted))
# ## Графики остатков от ковариат в модели и не в модели
gg_resid + geom_boxplot(aes(x = factor(Days)))
gg_resid + geom_boxplot(aes(x = Subject))
# # Тестирование гипотез в смешанных моделях ##################################
# t-(или -z) тесты Вальда
coef(summary(MS1))
# ## Тесты отношения правдоподобий (LRT)
# ## LRT для случайных эффектов
MS1 <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sl, REML = TRUE)
MS0 <- lmer(Reaction ~ Days + (1 | Subject), data = sl, REML = TRUE)
anova(MS1, MS0, refit = FALSE)
# ## LRT для фиксированных эффектов
MS1.ml <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sl, REML = FALSE)
MS0.ml <- lmer(Reaction ~ 1 + (1 + Days | Subject), data = sl, REML = FALSE)
anova(MS1.ml, MS0.ml)
# ## Сравнение моделей по AIC
AIC(MS1.ml, MS0.ml)
# ## Бутстреп для тестирования значимости и для предсказаний ###################
# ## Параметрический бутстреп для LRT фиксированных эффектов
library(pbkrtest)
pmod <- PBmodcomp(MS1.ml, MS0.ml, nsim = 100) # 1000 и больше для реальных данных
summary(pmod)
# ## Бутстреп-оценка доверительной зоны регрессии
NewData <- sl %>% group_by(Subject) %>%
do(data.frame(Days = seq(min(.$Days), max(.$Days), length = 10)))
NewData$fit <- predict(MS1, NewData, type = 'response', re.form = NA)
# Многократно симулируем данные из модели и получаем для них предсказанные значения
bMS1 <- bootMer(x = MS1,
FUN = function(x) predict(x, new_data = NewData, re.form = NA),
nsim = 100)
# Рассчитываем квантили предсказанных значений для всех итераций бутстрепа
b_se <- apply(X = bMS1$t,
MARGIN = 2,
FUN = function(x) quantile(x, probs = c(0.025, 0.975), na.rm = TRUE))
# Доверительная зона для предсказанных значений
NewData$lwr <- b_se[1, ]
NewData$upr <- b_se[2, ]
# ## График предсказаний фиксированной части модели
gg_MS1_boot <- ggplot(data = NewData, aes(x = Days, y = fit)) +
geom_ribbon(alpha = 0.35, aes(ymin = lwr, ymax = upr)) +
geom_line() + geom_point(data = sl, aes(x = Days, y = Reaction))
gg_MS1_boot
library(cowplot)
plot_grid(gg_MS1_normal + labs(title = "normal"),
gg_MS1_boot + labs(title = "bootstrap"),
ncol = 2)
|
library(shiny)
shinyUI(
fluidPage(
titlePanel("Predict Miles/Gallon!"),
sidebarLayout(position = "right", fluid = TRUE,
sidebarPanel(
plotOutput("mpgPlot1"),
br(),
helpText("Pick predictor values:"),
sliderInput("wt", label = h5("Weight (1000 lbs)"), min = 1, max = 6, value = 1),
sliderInput("qsec", label = h5("1/4 mile time"), min = 12, max = 24, value = 12),
radioButtons("am", label = h5("Transmission"), choices = list("automatic" = 0, "manual" = 1),selected = 1),
#submitButton("Submit"),
br(),
helpText("Predicted Miles Per Gallon:"),
verbatimTextOutput("predictedMpg")
),
mainPanel(
div("This application is based on Motor Trend Car Road Test results performed back in
1974 by the Motor Trend US magazine. It comprises of fuel consumption and 10 aspects
of automobile design and performance for 32 automobiles (1973–74 models).
The dataset is loaded into mtcars data-frame.", style = "color:blue"),
br(),
div("The mtcars dataset is comprised of following variables:", style = "color:blue"),
tableOutput("strmtcars"),
div("However it has been found in various analysis that mpg is mostly dependent on wt, qsec and am.
So our prediction will take only those variables into consideration ", style = "color:blue"),
plotOutput("mpgPlot")
)
)
)
)
| /R-Programming/ui.R | no_license | libvenus/datasciencecoursera | R | false | false | 1,478 | r | library(shiny)
shinyUI(
fluidPage(
titlePanel("Predict Miles/Gallon!"),
sidebarLayout(position = "right", fluid = TRUE,
sidebarPanel(
plotOutput("mpgPlot1"),
br(),
helpText("Pick predictor values:"),
sliderInput("wt", label = h5("Weight (1000 lbs)"), min = 1, max = 6, value = 1),
sliderInput("qsec", label = h5("1/4 mile time"), min = 12, max = 24, value = 12),
radioButtons("am", label = h5("Transmission"), choices = list("automatic" = 0, "manual" = 1),selected = 1),
#submitButton("Submit"),
br(),
helpText("Predicted Miles Per Gallon:"),
verbatimTextOutput("predictedMpg")
),
mainPanel(
div("This application is based on Motor Trend Car Road Test results performed back in
1974 by the Motor Trend US magazine. It comprises of fuel consumption and 10 aspects
of automobile design and performance for 32 automobiles (1973–74 models).
The dataset is loaded into mtcars data-frame.", style = "color:blue"),
br(),
div("The mtcars dataset is comprised of following variables:", style = "color:blue"),
tableOutput("strmtcars"),
div("However it has been found in various analysis that mpg is mostly dependent on wt, qsec and am.
So our prediction will take only those variables into consideration ", style = "color:blue"),
plotOutput("mpgPlot")
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/namedist.R
\name{namedist}
\alias{namedist}
\title{namedist}
\usage{
namedist(name1, name2, r_letters = c(K = "C", W = "V", Y = "I", Z = "S"))
}
\arguments{
\item{name1}{string}
\item{name2}{string}
\item{r_letters}{vector of letters to be replaced if necessary}
}
\value{
distance between the two names as integer value
}
\description{
Apply stringdist function to two names in which the option
replace_letters is available
}
| /man/namedist.Rd | no_license | AurelieFrechet/neaReastName | R | false | true | 507 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/namedist.R
\name{namedist}
\alias{namedist}
\title{namedist}
\usage{
namedist(name1, name2, r_letters = c(K = "C", W = "V", Y = "I", Z = "S"))
}
\arguments{
\item{name1}{string}
\item{name2}{string}
\item{r_letters}{vector of letters to be replaced if necessary}
}
\value{
distance between the two names as integer value
}
\description{
Apply stringdist function to two names in which the option
replace_letters is available
}
|
\name{brownfat}
\alias{brownfat}
\docType{data}
\title{The brown fat data set}
\description{Brown fat (or brown adipose tissue) is found in hibernating mammals, its function being to increase tolerance to the cold. It is also present in newborn humans. In adult humans it is more rare and is known to vary considerably with ambient temperature. \cite{RouthierLabadie2011} analysed data on 4,842 subjects over the period 2007-2008, of whom 328 (6.8\%) had brown fat. Brown fat mass and other demographic and clinical variables were recorded. The purpose of the study was to investigate the factors associated with brown fat occurrence and mass in humans. %% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("brownfat")}
\format{
A data frame with 4842 observations on the following 14 variables.
\describe{
\item{\code{sex}}{1=female, 2=male}
\item{\code{diabetes}}{ 0=no, 1=yes}
\item{\code{age}}{age in years}
\item{\code{day}}{day of observation (1=1 January, ..., 365=31 December)}
\item{\code{exttemp}}{external temperature (degrees Centigrade)}
\item{\code{season}}{ Spring=1, Summer=2, Autumn=3, Winter=4}
\item{\code{weight}}{weight in kg}
\item{\code{height}}{height in cm}
\item{\code{BMI}}{body mass index}
\item{\code{glycemy}}{glycemia (mmol/L)}
\item{\code{LBW}}{lean body weight}
\item{\code{cancerstatus}}{0=no, 1=yes, 99=missing}
\item{\code{brownfat}}{presence of brown fat (0=no, 1=yes)}
\item{\code{bfmass}}{brown fat mass (g) (zero if \code{brownfat}=0)}
}
}
\source{
Determinants of the Presence and Volume of Brown Fat in Humans (2011),
Statistical Society of Canada,
\url{https://ssc.ca/en/case-study/determinants-presence-and-volume-brown-fat-human},
, Accessed 13 February 2019,
}
\references{ Routhier-Labadie, A., Ouellet, V., Bellemare, W., Richard, D., Lakhal-Chaieb, L., Turcotte, E., and Carpentier, A. C. (2011), Outdoor Temperature, Age, Sex, Body Mass Index, and Diabetic Status Determine the Prevalence, Mass, and Glucose-Uptake Activity of 18{F}-{FDG}-Detected {BAT} in Humans.\emph{The Journal of Clinical Endocrinology and Metabolism}, Volume \bold{96}, number 1, pp 192-199.
}
\examples{
data(brownfat)
}
\keyword{datasets}
| /man/brownfat.Rd | no_license | cran/gamlss.data | R | false | false | 2,260 | rd | \name{brownfat}
\alias{brownfat}
\docType{data}
\title{The brown fat data set}
\description{Brown fat (or brown adipose tissue) is found in hibernating mammals, its function being to increase tolerance to the cold. It is also present in newborn humans. In adult humans it is more rare and is known to vary considerably with ambient temperature. \cite{RouthierLabadie2011} analysed data on 4,842 subjects over the period 2007-2008, of whom 328 (6.8\%) had brown fat. Brown fat mass and other demographic and clinical variables were recorded. The purpose of the study was to investigate the factors associated with brown fat occurrence and mass in humans. %% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("brownfat")}
\format{
A data frame with 4842 observations on the following 14 variables.
\describe{
\item{\code{sex}}{1=female, 2=male}
\item{\code{diabetes}}{ 0=no, 1=yes}
\item{\code{age}}{age in years}
\item{\code{day}}{day of observation (1=1 January, ..., 365=31 December)}
\item{\code{exttemp}}{external temperature (degrees Centigrade)}
\item{\code{season}}{ Spring=1, Summer=2, Autumn=3, Winter=4}
\item{\code{weight}}{weight in kg}
\item{\code{height}}{height in cm}
\item{\code{BMI}}{body mass index}
\item{\code{glycemy}}{glycemia (mmol/L)}
\item{\code{LBW}}{lean body weight}
\item{\code{cancerstatus}}{0=no, 1=yes, 99=missing}
\item{\code{brownfat}}{presence of brown fat (0=no, 1=yes)}
\item{\code{bfmass}}{brown fat mass (g) (zero if \code{brownfat}=0)}
}
}
\source{
Determinants of the Presence and Volume of Brown Fat in Humans (2011),
Statistical Society of Canada,
\url{https://ssc.ca/en/case-study/determinants-presence-and-volume-brown-fat-human},
, Accessed 13 February 2019,
}
\references{ Routhier-Labadie, A., Ouellet, V., Bellemare, W., Richard, D., Lakhal-Chaieb, L., Turcotte, E., and Carpentier, A. C. (2011), Outdoor Temperature, Age, Sex, Body Mass Index, and Diabetic Status Determine the Prevalence, Mass, and Glucose-Uptake Activity of 18{F}-{FDG}-Detected {BAT} in Humans.\emph{The Journal of Clinical Endocrinology and Metabolism}, Volume \bold{96}, number 1, pp 192-199.
}
\examples{
data(brownfat)
}
\keyword{datasets}
|
\name{testdat.csv}
\alias{testdat}
\title{
testdat
}
\description{
Example test data for cctgui() \cr
This data should load as 20 respondents by 25 items, and as binary data \cr
It is an example of 1 culture data \cr
}
%\usage{
%}
%\details{
%}
%\value{
%}
\note{
csv or text data files need not use header or row names \cr
Though respondents should be by the rows, and items by the columns
}
%\examples{
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/testdat.Rd | no_license | cran/CCTpack | R | false | false | 597 | rd | \name{testdat.csv}
\alias{testdat}
\title{
testdat
}
\description{
Example test data for cctgui() \cr
This data should load as 20 respondents by 25 items, and as binary data \cr
It is an example of 1 culture data \cr
}
%\usage{
%}
%\details{
%}
%\value{
%}
\note{
csv or text data files need not use header or row names \cr
Though respondents should be by the rows, and items by the columns
}
%\examples{
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
rm(list=ls())
gc()
library(tidyverse)
#This script extracts only features used for modelling in initial Strongbridge experiment by matching features in 12/13 month cohorts to original dataset
df_original <- readRDS ('F:/Projects/Strongbridge/data/matching_experiments/01_pre_modelling/00_matched_train_unmatched_test/01_combined_freq_datediff_topcoded.rds') %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)
#Select features for 13 month + cohort
data_dir <-"F:/Projects/Strongbridge/data/matching_experiments/01_pre_modelling/"
cohort_dir <- '02_gt_13_months_train/'
df_13_freqs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_common_freq_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_13_date_diffs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_date_differences_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)%>%
setNames(gsub('EXPDT',"EXP_DT", names(.))) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_13_freqs_datediffs <- data.frame(df_13_freqs, df_13_date_diffs)
saveRDS(df_13_freqs_datediffs, paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds'))
#Select features for <= 12 month
cohort_dir <- '01_lte_12_months_train/'
df_12_freqs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_common_freq_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_12_date_diffs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_date_differences_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)%>%
setNames(gsub('EXPDT',"EXP_DT", names(.))) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_12_freqs_datediffs <- data.frame(df_12_freqs, df_12_date_diffs)
saveRDS(df_12_freqs_datediffs, paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds'))
#Create combined feature set
cohort_dir <- '01_lte_12_months_train/'
df_12 <- readRDS(paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)
cohort_dir <- '02_gt_13_months_train/'
df_13 <- readRDS(paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)
df_12_suffixed <- df_12 %>%
dplyr::select(matches('AVG|DIFF')) %>%
setNames(paste0(names(.), '_FIXED_12_MONTHS'))
df_13_suffixed <- df_13 %>%
dplyr::select(matches('AVG|DIFF')) %>%
setNames(paste0(names(.), '_13_PLUS_MONTHS'))
df_12_13 <- data.frame(df_12_suffixed, df_13_suffixed, dplyr::select(df_12,- one_of(colnames(df_12[grep('AVG|DIFF', colnames(df_12))]))))
cohort_dir <- '03_lt_12_gt_13_months_train/'
dir.create(paste0(data_dir, cohort_dir), recursive = TRUE, showWarnings = FALSE)
saveRDS(df_12_13, paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds'))
| /matching_experiments/01_pre_modelling/06_create_final_modelling_datasets.R | no_license | jzhao0802/strongbridge | R | false | false | 3,195 | r | rm(list=ls())
gc()
library(tidyverse)
#This script extracts only features used for modelling in initial Strongbridge experiment by matching features in 12/13 month cohorts to original dataset
df_original <- readRDS ('F:/Projects/Strongbridge/data/matching_experiments/01_pre_modelling/00_matched_train_unmatched_test/01_combined_freq_datediff_topcoded.rds') %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)
#Select features for 13 month + cohort
data_dir <-"F:/Projects/Strongbridge/data/matching_experiments/01_pre_modelling/"
cohort_dir <- '02_gt_13_months_train/'
df_13_freqs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_common_freq_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_13_date_diffs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_date_differences_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)%>%
setNames(gsub('EXPDT',"EXP_DT", names(.))) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_13_freqs_datediffs <- data.frame(df_13_freqs, df_13_date_diffs)
saveRDS(df_13_freqs_datediffs, paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds'))
#Select features for <= 12 month
cohort_dir <- '01_lte_12_months_train/'
df_12_freqs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_common_freq_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_12_date_diffs <- readRDS(paste0(data_dir, cohort_dir, '01_combined_date_differences_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)%>%
setNames(gsub('EXPDT',"EXP_DT", names(.))) %>%
dplyr::select(intersect(colnames(.), colnames(df_original)))
df_12_freqs_datediffs <- data.frame(df_12_freqs, df_12_date_diffs)
saveRDS(df_12_freqs_datediffs, paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds'))
#Create combined feature set
cohort_dir <- '01_lte_12_months_train/'
df_12 <- readRDS(paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)
cohort_dir <- '02_gt_13_months_train/'
df_13 <- readRDS(paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds')) %>%
dplyr::mutate(PATIENT_ID=as.numeric(PATIENT_ID)) %>%
dplyr::arrange(PATIENT_ID)
df_12_suffixed <- df_12 %>%
dplyr::select(matches('AVG|DIFF')) %>%
setNames(paste0(names(.), '_FIXED_12_MONTHS'))
df_13_suffixed <- df_13 %>%
dplyr::select(matches('AVG|DIFF')) %>%
setNames(paste0(names(.), '_13_PLUS_MONTHS'))
df_12_13 <- data.frame(df_12_suffixed, df_13_suffixed, dplyr::select(df_12,- one_of(colnames(df_12[grep('AVG|DIFF', colnames(df_12))]))))
cohort_dir <- '03_lt_12_gt_13_months_train/'
dir.create(paste0(data_dir, cohort_dir), recursive = TRUE, showWarnings = FALSE)
saveRDS(df_12_13, paste0(data_dir, cohort_dir, '02_combined_freq_datediff_topcoded.rds'))
|
#' Obtain estimates of U_msy and S_msy given alpha and beta
#'
#' Converts alpha and beta into Smsy and Umsy.
#'
#' @param @alpha a numeric vector representing the alpha parameter from a SRA.
#' Can be length > 1.
#' @param @beta a numeric vector representing the beta parameter from a SRA.
#' Can be length > 1.
#' @note The conversion of alpha to U_msy is an approximation, as there is no analytical solution.
#'
#' @export
gen_lm_mgmt = function(alpha, beta) {
log_alpha = log(alpha)
U_msy = log_alpha * (0.5 - (0.65 * log_alpha ^1.27)/(8.7 + log_alpha^1.27))
U_msy[U_msy == "NaN"] = 0
U_msy[U_msy < 0] = 0
S_msy = U_msy/beta
return(list(U_msy = U_msy, S_msy = S_msy))
}
| /R/z3_gen_lm_mgmt.R | no_license | bstaton1/SimSR | R | false | false | 693 | r | #' Obtain estimates of U_msy and S_msy given alpha and beta
#'
#' Converts alpha and beta into Smsy and Umsy.
#'
#' @param @alpha a numeric vector representing the alpha parameter from a SRA.
#' Can be length > 1.
#' @param @beta a numeric vector representing the beta parameter from a SRA.
#' Can be length > 1.
#' @note The conversion of alpha to U_msy is an approximation, as there is no analytical solution.
#'
#' @export
gen_lm_mgmt = function(alpha, beta) {
log_alpha = log(alpha)
U_msy = log_alpha * (0.5 - (0.65 * log_alpha ^1.27)/(8.7 + log_alpha^1.27))
U_msy[U_msy == "NaN"] = 0
U_msy[U_msy < 0] = 0
S_msy = U_msy/beta
return(list(U_msy = U_msy, S_msy = S_msy))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common.R
\name{two_seven}
\alias{two_seven}
\title{Steps 2--7 of Algorithm 2.1, factored into a common function that can be used by a variety of distance metrics}
\usage{
two_seven(A, L, t, filter = c("distributed", "local"), normlim = 2 * (1 -
t), full_dist_fun = function(idx) vapply(1:nrow(idx), function(k) cor(A[,
idx[k, 1]], A[, idx[k, 2]]), 1), filter_fun = function(v, t) v >= t,
dry_run = FALSE, anti = FALSE, group = NULL)
}
\arguments{
\item{A}{data matrix}
\item{L}{truncated SVD of A}
\item{t}{scalar threshold value}
\item{filter}{"distributed" for full threshold evaluation of pruned set on parallel workers,
"local" for sequential evaluation of full threshold of pruned set to avoid copying data matrix.}
\item{normlim}{the squared norm limit in step 4, default value is for correlation}
\item{full_dist_fun}{non-projected distance function of a two-column matrix of rows of column
indices that needs scoped access to A (step 7), default function is for correlation}
\item{filter_fun}{filter function of a vector and scalar that thresholds vector values
from full_dist_fun, returning a logical vector of same length as v (step 7), default function is for correlation}
\item{dry_run}{a logical value, if \code{TRUE} quickly return statistics useful for tuning \code{p}}
\item{anti}{a logical value, if \code{TRUE} also include anti-correlated vectors}
\item{group}{either \code{NULL} for no grouping, or a vector of length \code{ncol(A)} consisting of \code{-1, 1} values
indicating group membership of the columns.}
}
\value{
a list with indices, ell, tot, and longest_run entries, unless dry_run=\code{TRUE} in which case
a list with ell and tot is returned
}
\description{
Steps 2--7 of Algorithm 2.1, factored into a common function that can be used by a variety of distance metrics
}
\keyword{internal}
| /man/two_seven.Rd | no_license | bwlewis/tcor | R | false | true | 1,917 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common.R
\name{two_seven}
\alias{two_seven}
\title{Steps 2--7 of Algorithm 2.1, factored into a common function that can be used by a variety of distance metrics}
\usage{
two_seven(A, L, t, filter = c("distributed", "local"), normlim = 2 * (1 -
t), full_dist_fun = function(idx) vapply(1:nrow(idx), function(k) cor(A[,
idx[k, 1]], A[, idx[k, 2]]), 1), filter_fun = function(v, t) v >= t,
dry_run = FALSE, anti = FALSE, group = NULL)
}
\arguments{
\item{A}{data matrix}
\item{L}{truncated SVD of A}
\item{t}{scalar threshold value}
\item{filter}{"distributed" for full threshold evaluation of pruned set on parallel workers,
"local" for sequential evaluation of full threshold of pruned set to avoid copying data matrix.}
\item{normlim}{the squared norm limit in step 4, default value is for correlation}
\item{full_dist_fun}{non-projected distance function of a two-column matrix of rows of column
indices that needs scoped access to A (step 7), default function is for correlation}
\item{filter_fun}{filter function of a vector and scalar that thresholds vector values
from full_dist_fun, returning a logical vector of same length as v (step 7), default function is for correlation}
\item{dry_run}{a logical value, if \code{TRUE} quickly return statistics useful for tuning \code{p}}
\item{anti}{a logical value, if \code{TRUE} also include anti-correlated vectors}
\item{group}{either \code{NULL} for no grouping, or a vector of length \code{ncol(A)} consisting of \code{-1, 1} values
indicating group membership of the columns.}
}
\value{
a list with indices, ell, tot, and longest_run entries, unless dry_run=\code{TRUE} in which case
a list with ell and tot is returned
}
\description{
Steps 2--7 of Algorithm 2.1, factored into a common function that can be used by a variety of distance metrics
}
\keyword{internal}
|
# @file InjectSignals.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of PopEstMethodEvaluation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @export
injectSignals <- function(connectionDetails,
cdmDatabaseSchema,
oracleTempSchema = NULL,
outcomeDatabaseSchema = cdmDatabaseSchema,
outcomeTable = "cohort",
workFolder,
maxCores = 1) {
injectionFolder <- file.path(workFolder, "SignalInjection")
if (!file.exists(injectionFolder))
dir.create(injectionFolder)
injectionSummaryFile <- file.path(workFolder, "injectionSummary.rds")
if (!file.exists(injectionSummaryFile)) {
ohdsiNegativeControls <- readRDS(system.file("ohdsiNegativeControls.rds", package = "MethodEvaluation"))
exposureOutcomePairs <- data.frame(exposureId = ohdsiNegativeControls$targetId,
outcomeId = ohdsiNegativeControls$outcomeId)
exposureOutcomePairs <- unique(exposureOutcomePairs)
#
# connection <- DatabaseConnector::connect(connectionDetails)
# sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @resultsDatabaseSchema.@outcomeTable GROUP BY cohort_definition_id"
# sql <- SqlRender::renderSql(sql, resultsDatabaseSchema = outcomeDatabaseSchema, outcomeTable = outcomeTable)$sql
# sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
# print(DatabaseConnector::querySql(connection, sql))
# dbDisconnect(connection)
prior = Cyclops::createPrior("laplace", exclude = 0, useCrossValidation = TRUE)
control = Cyclops::createControl(cvType = "auto",
startingVariance = 0.01,
noiseLevel = "quiet",
cvRepetitions = 1,
threads = min(c(10, maxCores)))
covariateSettings <- FeatureExtraction::createCovariateSettings(useDemographicsAgeGroup = TRUE,
useDemographicsGender = TRUE,
useDemographicsIndexYear = TRUE,
useDemographicsIndexMonth = TRUE,
useConditionGroupEraLongTerm = TRUE,
useDrugGroupEraLongTerm = TRUE,
useProcedureOccurrenceLongTerm = TRUE,
useMeasurementLongTerm = TRUE,
useObservationLongTerm = TRUE,
useCharlsonIndex = TRUE,
useDcsi = TRUE,
useChads2Vasc = TRUE,
longTermStartDays = 365,
endDays = 0)
result <- MethodEvaluation::injectSignals(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
outcomeDatabaseSchema = outcomeDatabaseSchema,
outcomeTable = outcomeTable,
outputDatabaseSchema = outcomeDatabaseSchema,
outputTable = outcomeTable,
createOutputTable = FALSE,
outputIdOffset = 10000,
exposureOutcomePairs = exposureOutcomePairs,
firstExposureOnly = FALSE,
firstOutcomeOnly = TRUE,
removePeopleWithPriorOutcomes = TRUE,
modelType = "survival",
washoutPeriod = 365,
riskWindowStart = 0,
riskWindowEnd = 0,
addExposureDaysToEnd = TRUE,
effectSizes = c(1.5, 2, 4),
precision = 0.01,
prior = prior,
control = control,
maxSubjectsForModel = 250000,
minOutcomeCountForModel = 100,
minOutcomeCountForInjection = 25,
workFolder = injectionFolder,
modelThreads = max(1, round(maxCores/8)),
generationThreads = min(6, maxCores),
covariateSettings = covariateSettings)
saveRDS(result, injectionSummaryFile)
}
ohdsiNegativeControls <- readRDS(system.file("ohdsiNegativeControls.rds", package = "MethodEvaluation"))
injectedSignals <- readRDS(injectionSummaryFile)
injectedSignals$targetId <- injectedSignals$exposureId
injectedSignals <- merge(injectedSignals, ohdsiNegativeControls)
injectedSignals <- injectedSignals[injectedSignals$trueEffectSize != 0, ]
injectedSignals$outcomeName <- paste0(injectedSignals$outcomeName, ", RR=", injectedSignals$targetEffectSize)
injectedSignals$oldOutcomeId <- injectedSignals$outcomeId
injectedSignals$outcomeId <- injectedSignals$newOutcomeId
ohdsiNegativeControls$targetEffectSize <- 1
ohdsiNegativeControls$trueEffectSize <- 1
ohdsiNegativeControls$trueEffectSizeFirstExposure <- 1
ohdsiNegativeControls$oldOutcomeId <- ohdsiNegativeControls$outcomeId
allControls <- rbind(ohdsiNegativeControls, injectedSignals[, names(ohdsiNegativeControls)])
exposureOutcomes <- data.frame()
exposureOutcomes <- rbind(exposureOutcomes, data.frame(exposureId = allControls$targetId,
outcomeId = allControls$outcomeId))
exposureOutcomes <- rbind(exposureOutcomes, data.frame(exposureId = allControls$comparatorId,
outcomeId = allControls$outcomeId))
exposureOutcomes <- unique(exposureOutcomes)
mdrr <- MethodEvaluation::computeMdrr(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureOutcomePairs = exposureOutcomes,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
outcomeDatabaseSchema = outcomeDatabaseSchema,
outcomeTable = outcomeTable,
cdmVersion = cdmVersion)
allControls <- merge(allControls, data.frame(targetId = mdrr$exposureId,
outcomeId = mdrr$outcomeId,
mdrrTarget = mdrr$mdrr))
allControls <- merge(allControls,
data.frame(comparatorId = mdrr$exposureId,
outcomeId = mdrr$outcomeId,
mdrrComparator = mdrr$mdrr),
all.x = TRUE)
write.csv(allControls, file.path(workFolder, "allControls.csv"), row.names = FALSE)
}
| /PopEstMethodEvaluation/R/InjectSignals.R | no_license | NEONKID/StudyProtocolSandbox | R | false | false | 9,239 | r | # @file InjectSignals.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of PopEstMethodEvaluation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @export
injectSignals <- function(connectionDetails,
cdmDatabaseSchema,
oracleTempSchema = NULL,
outcomeDatabaseSchema = cdmDatabaseSchema,
outcomeTable = "cohort",
workFolder,
maxCores = 1) {
injectionFolder <- file.path(workFolder, "SignalInjection")
if (!file.exists(injectionFolder))
dir.create(injectionFolder)
injectionSummaryFile <- file.path(workFolder, "injectionSummary.rds")
if (!file.exists(injectionSummaryFile)) {
ohdsiNegativeControls <- readRDS(system.file("ohdsiNegativeControls.rds", package = "MethodEvaluation"))
exposureOutcomePairs <- data.frame(exposureId = ohdsiNegativeControls$targetId,
outcomeId = ohdsiNegativeControls$outcomeId)
exposureOutcomePairs <- unique(exposureOutcomePairs)
#
# connection <- DatabaseConnector::connect(connectionDetails)
# sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @resultsDatabaseSchema.@outcomeTable GROUP BY cohort_definition_id"
# sql <- SqlRender::renderSql(sql, resultsDatabaseSchema = outcomeDatabaseSchema, outcomeTable = outcomeTable)$sql
# sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
# print(DatabaseConnector::querySql(connection, sql))
# dbDisconnect(connection)
prior = Cyclops::createPrior("laplace", exclude = 0, useCrossValidation = TRUE)
control = Cyclops::createControl(cvType = "auto",
startingVariance = 0.01,
noiseLevel = "quiet",
cvRepetitions = 1,
threads = min(c(10, maxCores)))
covariateSettings <- FeatureExtraction::createCovariateSettings(useDemographicsAgeGroup = TRUE,
useDemographicsGender = TRUE,
useDemographicsIndexYear = TRUE,
useDemographicsIndexMonth = TRUE,
useConditionGroupEraLongTerm = TRUE,
useDrugGroupEraLongTerm = TRUE,
useProcedureOccurrenceLongTerm = TRUE,
useMeasurementLongTerm = TRUE,
useObservationLongTerm = TRUE,
useCharlsonIndex = TRUE,
useDcsi = TRUE,
useChads2Vasc = TRUE,
longTermStartDays = 365,
endDays = 0)
result <- MethodEvaluation::injectSignals(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
outcomeDatabaseSchema = outcomeDatabaseSchema,
outcomeTable = outcomeTable,
outputDatabaseSchema = outcomeDatabaseSchema,
outputTable = outcomeTable,
createOutputTable = FALSE,
outputIdOffset = 10000,
exposureOutcomePairs = exposureOutcomePairs,
firstExposureOnly = FALSE,
firstOutcomeOnly = TRUE,
removePeopleWithPriorOutcomes = TRUE,
modelType = "survival",
washoutPeriod = 365,
riskWindowStart = 0,
riskWindowEnd = 0,
addExposureDaysToEnd = TRUE,
effectSizes = c(1.5, 2, 4),
precision = 0.01,
prior = prior,
control = control,
maxSubjectsForModel = 250000,
minOutcomeCountForModel = 100,
minOutcomeCountForInjection = 25,
workFolder = injectionFolder,
modelThreads = max(1, round(maxCores/8)),
generationThreads = min(6, maxCores),
covariateSettings = covariateSettings)
saveRDS(result, injectionSummaryFile)
}
ohdsiNegativeControls <- readRDS(system.file("ohdsiNegativeControls.rds", package = "MethodEvaluation"))
injectedSignals <- readRDS(injectionSummaryFile)
injectedSignals$targetId <- injectedSignals$exposureId
injectedSignals <- merge(injectedSignals, ohdsiNegativeControls)
injectedSignals <- injectedSignals[injectedSignals$trueEffectSize != 0, ]
injectedSignals$outcomeName <- paste0(injectedSignals$outcomeName, ", RR=", injectedSignals$targetEffectSize)
injectedSignals$oldOutcomeId <- injectedSignals$outcomeId
injectedSignals$outcomeId <- injectedSignals$newOutcomeId
ohdsiNegativeControls$targetEffectSize <- 1
ohdsiNegativeControls$trueEffectSize <- 1
ohdsiNegativeControls$trueEffectSizeFirstExposure <- 1
ohdsiNegativeControls$oldOutcomeId <- ohdsiNegativeControls$outcomeId
allControls <- rbind(ohdsiNegativeControls, injectedSignals[, names(ohdsiNegativeControls)])
exposureOutcomes <- data.frame()
exposureOutcomes <- rbind(exposureOutcomes, data.frame(exposureId = allControls$targetId,
outcomeId = allControls$outcomeId))
exposureOutcomes <- rbind(exposureOutcomes, data.frame(exposureId = allControls$comparatorId,
outcomeId = allControls$outcomeId))
exposureOutcomes <- unique(exposureOutcomes)
mdrr <- MethodEvaluation::computeMdrr(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureOutcomePairs = exposureOutcomes,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
outcomeDatabaseSchema = outcomeDatabaseSchema,
outcomeTable = outcomeTable,
cdmVersion = cdmVersion)
allControls <- merge(allControls, data.frame(targetId = mdrr$exposureId,
outcomeId = mdrr$outcomeId,
mdrrTarget = mdrr$mdrr))
allControls <- merge(allControls,
data.frame(comparatorId = mdrr$exposureId,
outcomeId = mdrr$outcomeId,
mdrrComparator = mdrr$mdrr),
all.x = TRUE)
write.csv(allControls, file.path(workFolder, "allControls.csv"), row.names = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpp11-package.R
\docType{package}
\name{cpp11-package}
\alias{cpp11}
\alias{cpp11-package}
\title{cpp11: A C++11 Interface for R's C Interface}
\description{
Provides a header only, C++11 interface to R's C
interface. Compared to other approaches 'cpp11' strives to be safe
against long jumps from the C API as well as C++ exceptions, conform
to normal R function semantics and supports interaction with 'ALTREP'
vectors.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/r-lib/cpp11}
\item Report bugs at \url{https://github.com/r-lib/cpp11/issues}
}
}
\author{
\strong{Maintainer}: Jim Hester \email{jim.hester@rstudio.com} (\href{https://orcid.org/0000-0002-2739-7082}{ORCID})
Other contributors:
\itemize{
\item Romain François [contributor]
\item Benjamin Kietzman [contributor]
\item RStudio [copyright holder, funder]
}
}
\keyword{internal}
| /man/cpp11-package.Rd | permissive | honghaoli42/cpp11 | R | false | true | 973 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpp11-package.R
\docType{package}
\name{cpp11-package}
\alias{cpp11}
\alias{cpp11-package}
\title{cpp11: A C++11 Interface for R's C Interface}
\description{
Provides a header only, C++11 interface to R's C
interface. Compared to other approaches 'cpp11' strives to be safe
against long jumps from the C API as well as C++ exceptions, conform
to normal R function semantics and supports interaction with 'ALTREP'
vectors.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/r-lib/cpp11}
\item Report bugs at \url{https://github.com/r-lib/cpp11/issues}
}
}
\author{
\strong{Maintainer}: Jim Hester \email{jim.hester@rstudio.com} (\href{https://orcid.org/0000-0002-2739-7082}{ORCID})
Other contributors:
\itemize{
\item Romain François [contributor]
\item Benjamin Kietzman [contributor]
\item RStudio [copyright holder, funder]
}
}
\keyword{internal}
|
# Input data - B.Pr.KPFM.01.data.FishA2F2.01.R
# KPFM predator
# notes on development of general function - mark completed as done
# Allocate_breeders (completed) B.Pr.KPFM.Allocate_breeders.01.R
# Consume (completed) B.Pr.KPFM.Consume.01.R
# UpdateReprodHealth (completed) B.Pr.KPFM.UpdateReprodHealth.01.R
# Update_age (completed) B.Pr.KPFM.Update_age.01.R
# Reproduce (completed) B.Pr.KPFM.Reproduce.01.R
# Mortality (completed) B.Pr.KPFM.Mortality.01.R
# BreedersToNonbreeders (completed) B.Pr.KPFM.BreedersToNonbreeders.01.R
# Trial_setup (completed) B.Pr.KPFM.Time0.fn.01.R
# StatePrint (completed) B.Pr.KPFM.printState.01.R
# TransitionSetup (completed) B.Pr.KPFM.TransitionSetup.01.R
# StateUpdate (completed) B.Pr.KPFM.update_State.01.R
# Watters etal data
# SSMU RecAge PengID InitAbund M Mswitch Mprop Ralpha Rphi Hq PCsummer H50 PCwinter
# 1 APPA 2 N 8402727132 0.369327 0 0 10 0.37 0 1950 5 272
# 2 APW 3 N 834429245 0.24829 0 0 10 0.37 0 3386 5 674
# 3 APDPW 3 N 301932508 0.293375 0 0 10 0.37 0 2881 5 493
# 4 APDPE 3 N 322045302 0.282369 0 0 10 0.37 0 3008 5 533
# 5 APBSW 3 N 438862551 0.278247 0 0 10 0.37 0 3054 5 548
# 6 APBSE 3 N 574691426 0.277919 0 0 10 0.37 0 3058 5 550
# 7 APEI 2 N 679683642 0.353186 0 0 10 0.37 0 2157 5 313
# 8 APE 3 N 1553175192 0.233962 0 0 10 0.37 0 3540 5 743
# 9 SOPA 2 N 69055728795 0.484497 0 0 10 0.37 0 304 5 52
# 10 SOW 2 N 320095594 0.380949 0 0 10 0.37 0 1797 5 243
# 11 SONE 2 N 196909670 0.345772 0 0 10 0.37 0 2250 5 333
# 12 SOSE 3 N 389311850 0.236438 0 0 10 0.37 0 3514 5 730
# 13 SGPA 2 N 2.48777E+11 0.496202 0 0 10 0.37 0 93 5 35
# 14 SGW 2 N 1140779048 0.335801 0 0 10 0.37 0 1433 5 383
# 15 SGE 2 N 1477796628 0.343818 0 0 10 0.37 0 1372 5 359
# sorted by RecAge & feeding per capita rate
# A2F1
# 9 SOPA 2 32005 69055728795 0.484497 0 0 10 0.37 0 304 5 52
# 13 SGPA 2 32005 2.48777E+11 0.496202 0 0 10 0.37 0 93 5 35
# A2F2
# SSMU RecAge PengID InitAbund M Mswitch Mprop Ralpha Rphi Hq PCsummer H50 PCwinter
# 14 SGW 2 32006 1140779048 0.335801 0 0 10 0.37 0 1433 5 383
# 15 SGE 2 32006 1477796628 0.343818 0 0 10 0.37 0 1372 5 359
# rec rate = print(10*exp(c()*2))
# A2F3
# 1 APPA 2 32007 8402727132 0.369327 0 0 10 0.37 0 1950 5 272
# 7 APEI 2 32007 679683642 0.353186 0 0 10 0.37 0 2157 5 313
# 10 SOW 2 32007 320095594 0.380949 0 0 10 0.37 0 1797 5 243
# 11 SONE 2 32007 196909670 0.345772 0 0 10 0.37 0 2250 5 333
################################################################################
# start data set
Fish <- list()
#-------------------------------------------------------------------------------
Fish$signature <- list(
ClassName = "Predator",
ID = 23006,
Name.full = "KPFM fish RecAge 2 Feed 2 - approx 1400",
Name.short = "FishA2F2",
Morph = "KPFM",
Revision = "01",
Authors = "A.Constable",
Last.edit = "7 July 2008"
)
Fish$polygonsN <- 2
Fish$polygons <- c(14,15)
# reference numbers to polygons in the list of
# defined polygons
Fish$birthdate <- list(Day = 1, Month = 4)
# day and month to be used as time 0 in the year
# for the taxon
#-------------------------------------------------------------------------------
Fish$ScaleToTonnes <- 0.005
#-------------------------------------------------------------------------------
Fish$Init.abundance <- c(
1140779048 # 14 SGW
,1477796628 # 15 SGE
)
#-------------------------------------------------------------------------------
Fish$Stage <- list(StageN = 4 # "pups", number of age classes in juveniles + nonbreeders (5) and breeders (6)
,JuveAgeN = 2 # equivalent to lag in KPFM
,StageStrUnits = 1 # (1 = N, 2 = B)
,StageStr = NULL # established as a list by polygon in setup
,StageSize = rep(list(c(0.0001 # Age 0
,0.0002 # Age 1
,0.0005 # nonbreeders
,0.0005 # breeders
)),Fish$polygonsN)
)
#-------------------------------------------------------------------------------
Fish$Mortality <- list(summer = list(
# M = nominal mortality over period
# z = max proportion of nominal mortality that is subject to variation
# v= effect of density dependence on dependent variable
Age0 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,Age1 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,nonBreeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,breeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
) # end summer
,winter = list(
Age0 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,Age1 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,nonBreeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,breeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
) # end winter
)
#-------------------------------------------------------------------------------
Fish$Allocate.breeders <- list(
StageNonbreeder = 3 # designated stage of nonbreeder - should always be one less than breeder
,StageBreeder = 4 # designated stage of breeder
,Phi = rep(3.5,Fish$polygonsN)
,maxPropBreeders = rep(1,Fish$polygonsN) # max proportion of non-breeders that can become breeders
,SSMUdest = matrix(c( # proportion of breeders from origin breeding SSMU (rows) going to destination SSMU (cols)
1,0
,0,1
),ncol=Fish$polygonsN,byrow=TRUE)
,RepConditionRemaining = 0 # reproductive condition remaining after allocation to breeders
)
#-------------------------------------------------------------------------------
Fish$Reproduction <- list(
StageBreeder = 4 # designated stage of breeder
# offspring mortality parameters - vector for polygons
,M = c(0.335801,0.343818) # nominal mortality of offspring over breeding period
,z = rep(0,Fish$polygonsN) # max proportion of nominal mortality that is subject to variation
,v = rep(1.5,Fish$polygonsN) # effect of density dependence on dependent variable
#calculation of alpha from Watters etal Ralpha
# print( Ralpha vector *exp(c(M vector))*AgeRec)
,alpha = (0.1*c(38.31299, 39.56153)) # maximum reproductive rate per female
,propfemale = rep(1,Fish$polygonsN) # proportion of breeding population that is female
,RepConditionRemaining = 1 # reproductive condition remaining after allocation to breeders
)
#-------------------------------------------------------------------------------
Fish$Consume <- list(
relatedElements = matrix(c("Biota", "Krill"),ncol=2,byrow=TRUE) # krill
,feeding.SSMUs = c(1:18) # reference numbers for polygons in polygon
# list in which feeding can occur by local populations
# this list is used to set up the proportions
# of prey polygons in the feeding polygons below
,feeding.SSMU.N = 18
# reference to related elements below is by relative row number
,dset = list(
#-----------------------------------
summer = list( # by predator stage - if NULL then no consumption by that stage
Age0 = NULL
,Age1 = NULL
#-------------------------
,NonBreeder = list(
feeding.SSMU.N = 18
,PropFeedInPolygon = matrix(c( # rows - local populations,
# cols - feeding polygons
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 # colony 14 SGW
,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 # colony 15 SGE
),ncol=18,byrow=TRUE)
,Prey = list(
Krill = list(
PerCapita = 1400 # maximum per capita consumption of krill
,PropInDiet = 1 # proportion of krill in diet
,Holling_q = 1
,Holling_D = 15
,Holling_units = 1
,Holling_availability = c(1) # krill stage structure (like selectivity) used to calculate prey density for Holling equation (i.e. what predators can see)
,Prey_selectivity = c(1)
,PreyPn = list( # for each predator feeding polygon, list prey polygons (relative reference) and proportion of prey polygon in predator polygon
P1 = list(Pns = matrix(c(1,1),ncol=2,byrow=TRUE) # polygon number, proportion in pred polygon
,PnN = 1)
,P2 = list(Pns = matrix(c(2,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P3 = list(Pns = matrix(c(3,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P4 = list(Pns = matrix(c(4,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P5 = list(Pns = matrix(c(5,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P6 = list(Pns = matrix(c(6,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P7 = list(Pns = matrix(c(7,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P8 = list(Pns = matrix(c(8,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P9 = list(Pns = matrix(c(9,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P10 = list(Pns = matrix(c(10,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P11 = list(Pns = matrix(c(11,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P12 = list(Pns = matrix(c(12,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P13 = list(Pns = matrix(c(13,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P14 = list(Pns = matrix(c(14,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P15 = list(Pns = matrix(c(15,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P16 = list(Pns = matrix(c(16,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P17 = list(Pns = matrix(c(17,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P18 = list(Pns = matrix(c(18,1),ncol=2,byrow=TRUE)
,PnN = 1)
) # end PreyPn list
) # end krill list
) # end Prey list
) # end NonBreeder list
#-------------------------
,Breeder = list(
feeding.SSMU.N = 18
,PropFeedInPolygon = matrix(c( # rows - local populations,
# cols - feeding polygons
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 # colony 14 SGW
,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 # colony 15 SGE
),ncol=18,byrow=TRUE)
,Prey = list(
Krill = list(
PerCapita = 1400 # maximum per capita consumption of krill
,PropInDiet = 1 # proportion of krill in diet
,Holling_q = 1
,Holling_D = 15
,Holling_units = 1
,Holling_availability = c(1) # krill stage structure (like selectivity) used to calculate prey density for Holling equation
,Prey_selectivity = c(1)
,PreyPn = list( # for each predator feeding polygon, list prey polygons (relative reference) and proportion of prey polygon in predator polygon
P1 = list(Pns = matrix(c(1,1),ncol=2,byrow=TRUE) # polygon number, proportion in pred polygon
,PnN = 1)
,P2 = list(Pns = matrix(c(2,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P3 = list(Pns = matrix(c(3,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P4 = list(Pns = matrix(c(4,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P5 = list(Pns = matrix(c(5,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P6 = list(Pns = matrix(c(6,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P7 = list(Pns = matrix(c(7,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P8 = list(Pns = matrix(c(8,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P9 = list(Pns = matrix(c(9,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P10 = list(Pns = matrix(c(10,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P11 = list(Pns = matrix(c(11,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P12 = list(Pns = matrix(c(12,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P13 = list(Pns = matrix(c(13,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P14 = list(Pns = matrix(c(14,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P15 = list(Pns = matrix(c(15,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P16 = list(Pns = matrix(c(16,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P17 = list(Pns = matrix(c(17,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P18 = list(Pns = matrix(c(18,1),ncol=2,byrow=TRUE)
,PnN = 1)
) # end PreyPn list
) # end krill list
) # end Prey list
) # end Breeder list
) # end summer list
#-----------------------------------
,winter = list( # by predator stage - if NULL then no consumption by that stage
Age0 = NULL
,Age1 = NULL
#-------------------------
,NonBreeder = list(
feeding.SSMU.N = 18
,PropFeedInPolygon = matrix(c( # rows - local populations,
# cols - feeding polygons
#1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 # colony 14 SGW
,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 # colony 15 SGE
),ncol=18,byrow=TRUE)
,Prey = list(
Krill = list(
PerCapita = 371 # maximum per capita consumption of krill
,PropInDiet = 1 # proportion of krill in diet
,Holling_q = 1
,Holling_D = 15
,Holling_units = 1
,Holling_availability = c(1) # krill stage structure (like selectivity) used to calculate prey density for Holling equation
,Prey_selectivity = c(1)
,PreyPn = list( # for each predator feeding polygon, list prey polygons (relative reference) and proportion of prey polygon in predator polygon
P1 = list(Pns = matrix(c(1,1),ncol=2,byrow=TRUE) # polygon number, proportion in pred polygon
,PnN = 1)
,P2 = list(Pns = matrix(c(2,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P3 = list(Pns = matrix(c(3,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P4 = list(Pns = matrix(c(4,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P5 = list(Pns = matrix(c(5,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P6 = list(Pns = matrix(c(6,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P7 = list(Pns = matrix(c(7,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P8 = list(Pns = matrix(c(8,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P9 = list(Pns = matrix(c(9,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P10 = list(Pns = matrix(c(10,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P11 = list(Pns = matrix(c(11,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P12 = list(Pns = matrix(c(12,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P13 = list(Pns = matrix(c(13,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P14 = list(Pns = matrix(c(14,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P15 = list(Pns = matrix(c(15,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P16 = list(Pns = matrix(c(16,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P17 = list(Pns = matrix(c(17,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P18 = list(Pns = matrix(c(18,1),ncol=2,byrow=TRUE)
,PnN = 1)
) # end PreyPn list
) # end krill list
) # end Prey list
) # end NonBreeder list
,Breeder = NULL
#-------------------------
) # end winter list
) # end dset list
)
#-------------------------------------------------------------------------------
Fish$ReprodHealth <- list(summer = list(
FoodValue=c(1) # vector of food values for each prey (in sequence according to list in Consume)
)
,winter = list(
FoodValue=c(1) # vector of food values for each prey (in sequence according to list in Consume)
)
)
#-------------------------------------------------------------------------------
Fish$Update.age <- NULL
#-------------------------------------------------------------------------------
Fish$Breeders.to.nonbreeders <- list(
StageNonbreeder = 3 # designated stage of nonbreeder
,StageBreeder = 4 # designated stage of breeder
,Breeders = matrix(0,nrow=Fish$polygonsN,ncol=Fish$polygonsN)
)
#-------------------------------------------------------------------------------
Fish$Initialise <- list(NULL)
Fish$Transition.data <- list()
Fish$PrintState <- list(OutDir = NULL, OutFiles = NULL)
Fish$FunctionsList <- list(Allocate_breeders = list(actionMethod = "allocateBreeders",
actionFile = file.path("code", "B.Pr.KPFM.Allocate_breeders.01.R")),
Consume = list(actionMethod = "consume",
actionFile = file.path("code", "B.Pr.KPFM.Consume.01.R")),
Consume_setup = list(actionMethod = "consumeSetup",
actionFile = file.path("code", "B.Pr.KPFM.Consume.setup.01.R")),
UpdateReprodHealth = list(actionMethod = "updateReprodHealth",
actionFile = file.path("code", "B.Pr.KPFM.UpdateReprodHealth.01.R")),
Update_age = list(actionMethod = "updateAge",
actionFile = file.path("code", "B.Pr.KPFM.Update_age.01.R")),
Reproduce = list(actionMethod = "reproduce",
actionFile = file.path("code", "B.Pr.KPFM.Reproduce.01.R")),
Reproduce_setup = list(actionMethod = "reproduceSetup",
actionFile = file.path("code", "B.Pr.KPFM.Reproduce.setup.01.R")),
Mortality = list(actionMethod = "mortality",
actionFile = file.path("code", "B.Pr.KPFM.Mortality.01.R")),
Mortality_setup = list(actionMethod = "mortalitySetup",
actionFile = file.path("code", "B.Pr.KPFM.Mortality.setup.01.R")),
BreedersToNonbreeders = list(actionMethod = "breedersToNonbreeders",
actionFile = file.path("code", "B.Pr.KPFM.BreedersToNonbreeders.01.R")),
StatePrint = list(actionMethod = "printState",
actionFile = file.path("code", "B.Pr.KPFM.printState.01.R")),
StateUpdate = list(actionMethod = "updateState",
actionFile = file.path("code", "B.Pr.KPFM.update_State.01.R"))
)
#-------------------------------------------------------------------------------
Fish$OutputFiles <- list(State_N = "Biota.FishA2F2.State.N.dat"
,State_B = "Biota.FishA2F2.State.B.dat"
,State_Stage = "Biota.FishA2F2.State.Stage.dat"
,State_RepCond = "Biota.FishA2F2.State.RepCond.dat"
,State_Health = "Biota.FishA2F2.State.Health.dat"
)
#-------------------------------------------------------------------------------
Fish$OutputFlags <- list(PrintState_N = TRUE
,PrintState_B = TRUE
,PrintState_Stage = TRUE
,PrintState_RepCond = FALSE
,PrintState_Health = FALSE
)
Fish$Functions <- list(
# function to undertake element-specific setup of actions
# (not including the generalised actions)
# e.g. setup = list (ContEnv = list(fn = NULL, dset = NULL))
setup = NULL,
# data and function to initialise element at the beginning of each trial
# i.e. how should the element be reset at time 0
printState = list(actionMethod = Fish$FunctionsList$StatePrint$actionMethod,
actionFile = Fish$FunctionsList$StatePrint$actionFile,
dset = list( # List because may need more than one file to print state
Number = list(output = Fish$OutputFlags$PrintState_N,
fname = Fish$OutputFiles$State_N,
path = NULL),
Biomass = list(output = Fish$OutputFlags$PrintState_B,
fname = Fish$OutputFiles$State_B,
path = NULL),
Stage = list(output = Fish$OutputFlags$PrintState_Stage,
fname = Fish$OutputFiles$State_Stage,
path = NULL),
Reprod_Cond = list(output = Fish$OutputFlags$PrintState_RepCond,
fname = Fish$OutputFiles$State_RepCond,
path = NULL),
Health = list(output = Fish$OutputFlags$PrintState_Health,
fname = Fish$OutputFiles$State_Health,
path = NULL)
)
),
stateUpdate = list(actionMethod = Fish$FunctionsList$StateUpdate$actionMethod,
actionFile = Fish$FunctionsList$StateUpdate$actionFile,
dset = NULL
)
)
# #############################################################
# Taxon$TimeSteps
# #############################################################
# the characteristics of a time step between the previous time and the specified time (in days)
# is given in a list(days in calendar year, number of functions to be carried out, list of named functions)
# knife-edge functions can be included by repeating the same day
# Actions (s = summer, w = winter) in InputData$Functions
#s - Allocate_breeders
#s/w - Consume
#s/w - Update_health
#s/w - Update_reprod_cond
#s - Update_age
#s - Reproduce
#s - BreedersToNonbreeders
Fish$Timesteps <- list(
Summer = list(calday=dayFromDate(31,3),
actionsN=NULL, # will be updated below
actions=list(
allocate_breeders = list(actionMethod = Fish$FunctionsList$Allocate_breeders$actionMethod,
actionFile = Fish$FunctionsList$Allocate_breeders$actionFile,
tsType = "FirstPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "Before", # "Before","During","After"
transAction = NULL,
relatedElements = NULL,
dset = Fish$Allocate.breeders
),
consume = list(actionMethod = Fish$FunctionsList$Consume$actionMethod,
actionFile = Fish$FunctionsList$Consume$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Consume_setup$actionMethod,
actionFile = Fish$FunctionsList$Consume_setup$actionFile,
dset = NULL),
relatedElements = Fish$Consume$relatedElements,
dset = Fish$Consume$dset[[1]]
),
mortality = list(actionMethod = Fish$FunctionsList$Mortality$actionMethod,
actionFile = Fish$FunctionsList$Mortality$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Mortality_setup$actionMethod,
actionFile = Fish$FunctionsList$Mortality_setup$actionFile,
dset = NULL),
relatedElements = NULL,
dset = Fish$Mortality[[1]]
),
update_rep_health = list(actionMethod = Fish$FunctionsList$UpdateReprodHealth$actionMethod,
actionFile = Fish$FunctionsList$UpdateReprodHealth$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL,
relatedElements = Fish$Consume$relatedElements,
dset = Fish$ReprodHealth[[1]]
),
update_age = list(actionMethod = Fish$FunctionsList$Update_age$actionMethod,
actionFile = Fish$FunctionsList$Update_age$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL,
relatedElements = NULL,
dset = Fish$Update.age
),
reproduce = list(actionMethod = Fish$FunctionsList$Reproduce$actionMethod,
actionFile = Fish$FunctionsList$Reproduce$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Reproduce_setup$actionMethod,
actionFile = Fish$FunctionsList$Reproduce_setup$actionFile,
dset = NULL),
relatedElements = NULL,
dset = Fish$Reproduction
),
breedersToNonbreeders = list(actionMethod = Fish$FunctionsList$BreedersToNonbreeders$actionMethod,
actionFile = Fish$FunctionsList$BreedersToNonbreeders$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL, # list(fn = , dset = )
relatedElements = NULL,
dset = Fish$Breeders.to.nonbreeders
),
printState = list(actionMethod = Fish$FunctionsList$StatePrint$actionMethod,
actionFile = Fish$FunctionsList$StatePrint$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
relatedElements = NULL,
dset = list( # List because may need more than one file to print state
Number = list(output = Fish$OutputFlags$PrintState_N,
fname = Fish$OutputFiles$State_N,
path = NULL),
Biomass = list(output = Fish$OutputFlags$PrintState_B,
fname = Fish$OutputFiles$State_B,
path = NULL),
Stage = list(output = Fish$OutputFlags$PrintState_Stage,
fname = Fish$OutputFiles$State_Stage,
path = NULL),
Reprod_Cond = list(output = Fish$OutputFlags$PrintState_RepCond,
fname = Fish$OutputFiles$State_RepCond,
path = NULL),
Health = list(output = Fish$OutputFlags$PrintState_Health,
fname = Fish$OutputFiles$State_Health,
path = NULL)
)
)
)
),
Winter = list(calday=dayFromDate(30,9),
actionsN=NULL, # will be updated below
actions=list(
consume = list(actionMethod = Fish$FunctionsList$Consume$actionMethod,
actionFile = Fish$FunctionsList$Consume$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = NULL,
relatedElements = Fish$Consume$relatedElements,
dset = Fish$Consume$dset[[2]]
),
mortality = list(actionMethod = Fish$FunctionsList$Mortality$actionMethod,
actionFile = Fish$FunctionsList$Mortality$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Mortality_setup$actionMethod,
actionFile = Fish$FunctionsList$Mortality_setup$actionFile,
dset = NULL),
relatedElements = NULL,
dset = Fish$Mortality[[2]]
),
update_rep_health_cond = list(actionMethod = Fish$FunctionsList$UpdateReprodHealth$actionMethod,
actionFile = Fish$FunctionsList$UpdateReprodHealth$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL,
relatedElements = Fish$Consume$relatedElements,
dset = Fish$ReprodHealth[[2]]
),
printState = list(actionMethod = Fish$FunctionsList$StatePrint$actionMethod,
actionFile = Fish$FunctionsList$StatePrint$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
relatedElements = NULL,
dset = list( # List because may need more than one file to print state
Number = list(output = Fish$OutputFlags$PrintState_N,
fname = Fish$OutputFiles$State_N,
path = NULL),
Biomass = list(output = Fish$OutputFlags$PrintState_B,
fname = Fish$OutputFiles$State_B,
path = NULL),
Stage = list(output = Fish$OutputFlags$PrintState_Stage,
fname = Fish$OutputFiles$State_Stage,
path = NULL),
Reprod_Cond = list(output = Fish$OutputFlags$PrintState_RepCond,
fname = Fish$OutputFiles$State_RepCond,
path = NULL),
Health = list(output = Fish$OutputFlags$PrintState_Health,
fname = Fish$OutputFiles$State_Health,
path = NULL)
)
)
)
)
)
# declare variable to be sourced
Fish
| /_Scenarios/KPFM 20120525/data/KPFM.B.Pr.01.data.FishA2F2.01.R | no_license | AndrewJConstable/EPOCuniverse | R | false | false | 39,873 | r | # Input data - B.Pr.KPFM.01.data.FishA2F2.01.R
# KPFM predator
# notes on development of general function - mark completed as done
# Allocate_breeders (completed) B.Pr.KPFM.Allocate_breeders.01.R
# Consume (completed) B.Pr.KPFM.Consume.01.R
# UpdateReprodHealth (completed) B.Pr.KPFM.UpdateReprodHealth.01.R
# Update_age (completed) B.Pr.KPFM.Update_age.01.R
# Reproduce (completed) B.Pr.KPFM.Reproduce.01.R
# Mortality (completed) B.Pr.KPFM.Mortality.01.R
# BreedersToNonbreeders (completed) B.Pr.KPFM.BreedersToNonbreeders.01.R
# Trial_setup (completed) B.Pr.KPFM.Time0.fn.01.R
# StatePrint (completed) B.Pr.KPFM.printState.01.R
# TransitionSetup (completed) B.Pr.KPFM.TransitionSetup.01.R
# StateUpdate (completed) B.Pr.KPFM.update_State.01.R
# Watters etal data
# SSMU RecAge PengID InitAbund M Mswitch Mprop Ralpha Rphi Hq PCsummer H50 PCwinter
# 1 APPA 2 N 8402727132 0.369327 0 0 10 0.37 0 1950 5 272
# 2 APW 3 N 834429245 0.24829 0 0 10 0.37 0 3386 5 674
# 3 APDPW 3 N 301932508 0.293375 0 0 10 0.37 0 2881 5 493
# 4 APDPE 3 N 322045302 0.282369 0 0 10 0.37 0 3008 5 533
# 5 APBSW 3 N 438862551 0.278247 0 0 10 0.37 0 3054 5 548
# 6 APBSE 3 N 574691426 0.277919 0 0 10 0.37 0 3058 5 550
# 7 APEI 2 N 679683642 0.353186 0 0 10 0.37 0 2157 5 313
# 8 APE 3 N 1553175192 0.233962 0 0 10 0.37 0 3540 5 743
# 9 SOPA 2 N 69055728795 0.484497 0 0 10 0.37 0 304 5 52
# 10 SOW 2 N 320095594 0.380949 0 0 10 0.37 0 1797 5 243
# 11 SONE 2 N 196909670 0.345772 0 0 10 0.37 0 2250 5 333
# 12 SOSE 3 N 389311850 0.236438 0 0 10 0.37 0 3514 5 730
# 13 SGPA 2 N 2.48777E+11 0.496202 0 0 10 0.37 0 93 5 35
# 14 SGW 2 N 1140779048 0.335801 0 0 10 0.37 0 1433 5 383
# 15 SGE 2 N 1477796628 0.343818 0 0 10 0.37 0 1372 5 359
# sorted by RecAge & feeding per capita rate
# A2F1
# 9 SOPA 2 32005 69055728795 0.484497 0 0 10 0.37 0 304 5 52
# 13 SGPA 2 32005 2.48777E+11 0.496202 0 0 10 0.37 0 93 5 35
# A2F2
# SSMU RecAge PengID InitAbund M Mswitch Mprop Ralpha Rphi Hq PCsummer H50 PCwinter
# 14 SGW 2 32006 1140779048 0.335801 0 0 10 0.37 0 1433 5 383
# 15 SGE 2 32006 1477796628 0.343818 0 0 10 0.37 0 1372 5 359
# rec rate = print(10*exp(c()*2))
# A2F3
# 1 APPA 2 32007 8402727132 0.369327 0 0 10 0.37 0 1950 5 272
# 7 APEI 2 32007 679683642 0.353186 0 0 10 0.37 0 2157 5 313
# 10 SOW 2 32007 320095594 0.380949 0 0 10 0.37 0 1797 5 243
# 11 SONE 2 32007 196909670 0.345772 0 0 10 0.37 0 2250 5 333
################################################################################
# start data set
Fish <- list()
#-------------------------------------------------------------------------------
Fish$signature <- list(
ClassName = "Predator",
ID = 23006,
Name.full = "KPFM fish RecAge 2 Feed 2 - approx 1400",
Name.short = "FishA2F2",
Morph = "KPFM",
Revision = "01",
Authors = "A.Constable",
Last.edit = "7 July 2008"
)
Fish$polygonsN <- 2
Fish$polygons <- c(14,15)
# reference numbers to polygons in the list of
# defined polygons
Fish$birthdate <- list(Day = 1, Month = 4)
# day and month to be used as time 0 in the year
# for the taxon
#-------------------------------------------------------------------------------
Fish$ScaleToTonnes <- 0.005
#-------------------------------------------------------------------------------
Fish$Init.abundance <- c(
1140779048 # 14 SGW
,1477796628 # 15 SGE
)
#-------------------------------------------------------------------------------
Fish$Stage <- list(StageN = 4 # "pups", number of age classes in juveniles + nonbreeders (5) and breeders (6)
,JuveAgeN = 2 # equivalent to lag in KPFM
,StageStrUnits = 1 # (1 = N, 2 = B)
,StageStr = NULL # established as a list by polygon in setup
,StageSize = rep(list(c(0.0001 # Age 0
,0.0002 # Age 1
,0.0005 # nonbreeders
,0.0005 # breeders
)),Fish$polygonsN)
)
#-------------------------------------------------------------------------------
Fish$Mortality <- list(summer = list(
# M = nominal mortality over period
# z = max proportion of nominal mortality that is subject to variation
# v= effect of density dependence on dependent variable
Age0 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,Age1 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,nonBreeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,breeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
) # end summer
,winter = list(
Age0 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,Age1 = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,nonBreeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
,breeders = list (M = c(0.335801,0.343818)
,z = rep(0,Fish$polygonsN)
,v = rep(0,Fish$polygonsN))
) # end winter
)
#-------------------------------------------------------------------------------
Fish$Allocate.breeders <- list(
StageNonbreeder = 3 # designated stage of nonbreeder - should always be one less than breeder
,StageBreeder = 4 # designated stage of breeder
,Phi = rep(3.5,Fish$polygonsN)
,maxPropBreeders = rep(1,Fish$polygonsN) # max proportion of non-breeders that can become breeders
,SSMUdest = matrix(c( # proportion of breeders from origin breeding SSMU (rows) going to destination SSMU (cols)
1,0
,0,1
),ncol=Fish$polygonsN,byrow=TRUE)
,RepConditionRemaining = 0 # reproductive condition remaining after allocation to breeders
)
#-------------------------------------------------------------------------------
Fish$Reproduction <- list(
StageBreeder = 4 # designated stage of breeder
# offspring mortality parameters - vector for polygons
,M = c(0.335801,0.343818) # nominal mortality of offspring over breeding period
,z = rep(0,Fish$polygonsN) # max proportion of nominal mortality that is subject to variation
,v = rep(1.5,Fish$polygonsN) # effect of density dependence on dependent variable
#calculation of alpha from Watters etal Ralpha
# print( Ralpha vector *exp(c(M vector))*AgeRec)
,alpha = (0.1*c(38.31299, 39.56153)) # maximum reproductive rate per female
,propfemale = rep(1,Fish$polygonsN) # proportion of breeding population that is female
,RepConditionRemaining = 1 # reproductive condition remaining after allocation to breeders
)
#-------------------------------------------------------------------------------
Fish$Consume <- list(
relatedElements = matrix(c("Biota", "Krill"),ncol=2,byrow=TRUE) # krill
,feeding.SSMUs = c(1:18) # reference numbers for polygons in polygon
# list in which feeding can occur by local populations
# this list is used to set up the proportions
# of prey polygons in the feeding polygons below
,feeding.SSMU.N = 18
# reference to related elements below is by relative row number
,dset = list(
#-----------------------------------
summer = list( # by predator stage - if NULL then no consumption by that stage
Age0 = NULL
,Age1 = NULL
#-------------------------
,NonBreeder = list(
feeding.SSMU.N = 18
,PropFeedInPolygon = matrix(c( # rows - local populations,
# cols - feeding polygons
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 # colony 14 SGW
,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 # colony 15 SGE
),ncol=18,byrow=TRUE)
,Prey = list(
Krill = list(
PerCapita = 1400 # maximum per capita consumption of krill
,PropInDiet = 1 # proportion of krill in diet
,Holling_q = 1
,Holling_D = 15
,Holling_units = 1
,Holling_availability = c(1) # krill stage structure (like selectivity) used to calculate prey density for Holling equation (i.e. what predators can see)
,Prey_selectivity = c(1)
,PreyPn = list( # for each predator feeding polygon, list prey polygons (relative reference) and proportion of prey polygon in predator polygon
P1 = list(Pns = matrix(c(1,1),ncol=2,byrow=TRUE) # polygon number, proportion in pred polygon
,PnN = 1)
,P2 = list(Pns = matrix(c(2,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P3 = list(Pns = matrix(c(3,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P4 = list(Pns = matrix(c(4,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P5 = list(Pns = matrix(c(5,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P6 = list(Pns = matrix(c(6,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P7 = list(Pns = matrix(c(7,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P8 = list(Pns = matrix(c(8,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P9 = list(Pns = matrix(c(9,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P10 = list(Pns = matrix(c(10,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P11 = list(Pns = matrix(c(11,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P12 = list(Pns = matrix(c(12,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P13 = list(Pns = matrix(c(13,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P14 = list(Pns = matrix(c(14,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P15 = list(Pns = matrix(c(15,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P16 = list(Pns = matrix(c(16,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P17 = list(Pns = matrix(c(17,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P18 = list(Pns = matrix(c(18,1),ncol=2,byrow=TRUE)
,PnN = 1)
) # end PreyPn list
) # end krill list
) # end Prey list
) # end NonBreeder list
#-------------------------
,Breeder = list(
feeding.SSMU.N = 18
,PropFeedInPolygon = matrix(c( # rows - local populations,
# cols - feeding polygons
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 # colony 14 SGW
,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 # colony 15 SGE
),ncol=18,byrow=TRUE)
,Prey = list(
Krill = list(
PerCapita = 1400 # maximum per capita consumption of krill
,PropInDiet = 1 # proportion of krill in diet
,Holling_q = 1
,Holling_D = 15
,Holling_units = 1
,Holling_availability = c(1) # krill stage structure (like selectivity) used to calculate prey density for Holling equation
,Prey_selectivity = c(1)
,PreyPn = list( # for each predator feeding polygon, list prey polygons (relative reference) and proportion of prey polygon in predator polygon
P1 = list(Pns = matrix(c(1,1),ncol=2,byrow=TRUE) # polygon number, proportion in pred polygon
,PnN = 1)
,P2 = list(Pns = matrix(c(2,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P3 = list(Pns = matrix(c(3,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P4 = list(Pns = matrix(c(4,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P5 = list(Pns = matrix(c(5,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P6 = list(Pns = matrix(c(6,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P7 = list(Pns = matrix(c(7,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P8 = list(Pns = matrix(c(8,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P9 = list(Pns = matrix(c(9,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P10 = list(Pns = matrix(c(10,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P11 = list(Pns = matrix(c(11,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P12 = list(Pns = matrix(c(12,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P13 = list(Pns = matrix(c(13,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P14 = list(Pns = matrix(c(14,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P15 = list(Pns = matrix(c(15,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P16 = list(Pns = matrix(c(16,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P17 = list(Pns = matrix(c(17,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P18 = list(Pns = matrix(c(18,1),ncol=2,byrow=TRUE)
,PnN = 1)
) # end PreyPn list
) # end krill list
) # end Prey list
) # end Breeder list
) # end summer list
#-----------------------------------
,winter = list( # by predator stage - if NULL then no consumption by that stage
Age0 = NULL
,Age1 = NULL
#-------------------------
,NonBreeder = list(
feeding.SSMU.N = 18
,PropFeedInPolygon = matrix(c( # rows - local populations,
# cols - feeding polygons
#1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 # colony 14 SGW
,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 # colony 15 SGE
),ncol=18,byrow=TRUE)
,Prey = list(
Krill = list(
PerCapita = 371 # maximum per capita consumption of krill
,PropInDiet = 1 # proportion of krill in diet
,Holling_q = 1
,Holling_D = 15
,Holling_units = 1
,Holling_availability = c(1) # krill stage structure (like selectivity) used to calculate prey density for Holling equation
,Prey_selectivity = c(1)
,PreyPn = list( # for each predator feeding polygon, list prey polygons (relative reference) and proportion of prey polygon in predator polygon
P1 = list(Pns = matrix(c(1,1),ncol=2,byrow=TRUE) # polygon number, proportion in pred polygon
,PnN = 1)
,P2 = list(Pns = matrix(c(2,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P3 = list(Pns = matrix(c(3,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P4 = list(Pns = matrix(c(4,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P5 = list(Pns = matrix(c(5,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P6 = list(Pns = matrix(c(6,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P7 = list(Pns = matrix(c(7,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P8 = list(Pns = matrix(c(8,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P9 = list(Pns = matrix(c(9,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P10 = list(Pns = matrix(c(10,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P11 = list(Pns = matrix(c(11,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P12 = list(Pns = matrix(c(12,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P13 = list(Pns = matrix(c(13,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P14 = list(Pns = matrix(c(14,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P15 = list(Pns = matrix(c(15,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P16 = list(Pns = matrix(c(16,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P17 = list(Pns = matrix(c(17,1),ncol=2,byrow=TRUE)
,PnN = 1)
,P18 = list(Pns = matrix(c(18,1),ncol=2,byrow=TRUE)
,PnN = 1)
) # end PreyPn list
) # end krill list
) # end Prey list
) # end NonBreeder list
,Breeder = NULL
#-------------------------
) # end winter list
) # end dset list
)
#-------------------------------------------------------------------------------
Fish$ReprodHealth <- list(summer = list(
FoodValue=c(1) # vector of food values for each prey (in sequence according to list in Consume)
)
,winter = list(
FoodValue=c(1) # vector of food values for each prey (in sequence according to list in Consume)
)
)
#-------------------------------------------------------------------------------
Fish$Update.age <- NULL
#-------------------------------------------------------------------------------
Fish$Breeders.to.nonbreeders <- list(
StageNonbreeder = 3 # designated stage of nonbreeder
,StageBreeder = 4 # designated stage of breeder
,Breeders = matrix(0,nrow=Fish$polygonsN,ncol=Fish$polygonsN)
)
#-------------------------------------------------------------------------------
Fish$Initialise <- list(NULL)
Fish$Transition.data <- list()
Fish$PrintState <- list(OutDir = NULL, OutFiles = NULL)
Fish$FunctionsList <- list(Allocate_breeders = list(actionMethod = "allocateBreeders",
actionFile = file.path("code", "B.Pr.KPFM.Allocate_breeders.01.R")),
Consume = list(actionMethod = "consume",
actionFile = file.path("code", "B.Pr.KPFM.Consume.01.R")),
Consume_setup = list(actionMethod = "consumeSetup",
actionFile = file.path("code", "B.Pr.KPFM.Consume.setup.01.R")),
UpdateReprodHealth = list(actionMethod = "updateReprodHealth",
actionFile = file.path("code", "B.Pr.KPFM.UpdateReprodHealth.01.R")),
Update_age = list(actionMethod = "updateAge",
actionFile = file.path("code", "B.Pr.KPFM.Update_age.01.R")),
Reproduce = list(actionMethod = "reproduce",
actionFile = file.path("code", "B.Pr.KPFM.Reproduce.01.R")),
Reproduce_setup = list(actionMethod = "reproduceSetup",
actionFile = file.path("code", "B.Pr.KPFM.Reproduce.setup.01.R")),
Mortality = list(actionMethod = "mortality",
actionFile = file.path("code", "B.Pr.KPFM.Mortality.01.R")),
Mortality_setup = list(actionMethod = "mortalitySetup",
actionFile = file.path("code", "B.Pr.KPFM.Mortality.setup.01.R")),
BreedersToNonbreeders = list(actionMethod = "breedersToNonbreeders",
actionFile = file.path("code", "B.Pr.KPFM.BreedersToNonbreeders.01.R")),
StatePrint = list(actionMethod = "printState",
actionFile = file.path("code", "B.Pr.KPFM.printState.01.R")),
StateUpdate = list(actionMethod = "updateState",
actionFile = file.path("code", "B.Pr.KPFM.update_State.01.R"))
)
#-------------------------------------------------------------------------------
Fish$OutputFiles <- list(State_N = "Biota.FishA2F2.State.N.dat"
,State_B = "Biota.FishA2F2.State.B.dat"
,State_Stage = "Biota.FishA2F2.State.Stage.dat"
,State_RepCond = "Biota.FishA2F2.State.RepCond.dat"
,State_Health = "Biota.FishA2F2.State.Health.dat"
)
#-------------------------------------------------------------------------------
Fish$OutputFlags <- list(PrintState_N = TRUE
,PrintState_B = TRUE
,PrintState_Stage = TRUE
,PrintState_RepCond = FALSE
,PrintState_Health = FALSE
)
Fish$Functions <- list(
# function to undertake element-specific setup of actions
# (not including the generalised actions)
# e.g. setup = list (ContEnv = list(fn = NULL, dset = NULL))
setup = NULL,
# data and function to initialise element at the beginning of each trial
# i.e. how should the element be reset at time 0
printState = list(actionMethod = Fish$FunctionsList$StatePrint$actionMethod,
actionFile = Fish$FunctionsList$StatePrint$actionFile,
dset = list( # List because may need more than one file to print state
Number = list(output = Fish$OutputFlags$PrintState_N,
fname = Fish$OutputFiles$State_N,
path = NULL),
Biomass = list(output = Fish$OutputFlags$PrintState_B,
fname = Fish$OutputFiles$State_B,
path = NULL),
Stage = list(output = Fish$OutputFlags$PrintState_Stage,
fname = Fish$OutputFiles$State_Stage,
path = NULL),
Reprod_Cond = list(output = Fish$OutputFlags$PrintState_RepCond,
fname = Fish$OutputFiles$State_RepCond,
path = NULL),
Health = list(output = Fish$OutputFlags$PrintState_Health,
fname = Fish$OutputFiles$State_Health,
path = NULL)
)
),
stateUpdate = list(actionMethod = Fish$FunctionsList$StateUpdate$actionMethod,
actionFile = Fish$FunctionsList$StateUpdate$actionFile,
dset = NULL
)
)
# #############################################################
# Taxon$TimeSteps
# #############################################################
# the characteristics of a time step between the previous time and the specified time (in days)
# is given in a list(days in calendar year, number of functions to be carried out, list of named functions)
# knife-edge functions can be included by repeating the same day
# Actions (s = summer, w = winter) in InputData$Functions
#s - Allocate_breeders
#s/w - Consume
#s/w - Update_health
#s/w - Update_reprod_cond
#s - Update_age
#s - Reproduce
#s - BreedersToNonbreeders
Fish$Timesteps <- list(
Summer = list(calday=dayFromDate(31,3),
actionsN=NULL, # will be updated below
actions=list(
allocate_breeders = list(actionMethod = Fish$FunctionsList$Allocate_breeders$actionMethod,
actionFile = Fish$FunctionsList$Allocate_breeders$actionFile,
tsType = "FirstPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "Before", # "Before","During","After"
transAction = NULL,
relatedElements = NULL,
dset = Fish$Allocate.breeders
),
consume = list(actionMethod = Fish$FunctionsList$Consume$actionMethod,
actionFile = Fish$FunctionsList$Consume$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Consume_setup$actionMethod,
actionFile = Fish$FunctionsList$Consume_setup$actionFile,
dset = NULL),
relatedElements = Fish$Consume$relatedElements,
dset = Fish$Consume$dset[[1]]
),
mortality = list(actionMethod = Fish$FunctionsList$Mortality$actionMethod,
actionFile = Fish$FunctionsList$Mortality$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Mortality_setup$actionMethod,
actionFile = Fish$FunctionsList$Mortality_setup$actionFile,
dset = NULL),
relatedElements = NULL,
dset = Fish$Mortality[[1]]
),
update_rep_health = list(actionMethod = Fish$FunctionsList$UpdateReprodHealth$actionMethod,
actionFile = Fish$FunctionsList$UpdateReprodHealth$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL,
relatedElements = Fish$Consume$relatedElements,
dset = Fish$ReprodHealth[[1]]
),
update_age = list(actionMethod = Fish$FunctionsList$Update_age$actionMethod,
actionFile = Fish$FunctionsList$Update_age$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL,
relatedElements = NULL,
dset = Fish$Update.age
),
reproduce = list(actionMethod = Fish$FunctionsList$Reproduce$actionMethod,
actionFile = Fish$FunctionsList$Reproduce$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Reproduce_setup$actionMethod,
actionFile = Fish$FunctionsList$Reproduce_setup$actionFile,
dset = NULL),
relatedElements = NULL,
dset = Fish$Reproduction
),
breedersToNonbreeders = list(actionMethod = Fish$FunctionsList$BreedersToNonbreeders$actionMethod,
actionFile = Fish$FunctionsList$BreedersToNonbreeders$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL, # list(fn = , dset = )
relatedElements = NULL,
dset = Fish$Breeders.to.nonbreeders
),
printState = list(actionMethod = Fish$FunctionsList$StatePrint$actionMethod,
actionFile = Fish$FunctionsList$StatePrint$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
relatedElements = NULL,
dset = list( # List because may need more than one file to print state
Number = list(output = Fish$OutputFlags$PrintState_N,
fname = Fish$OutputFiles$State_N,
path = NULL),
Biomass = list(output = Fish$OutputFlags$PrintState_B,
fname = Fish$OutputFiles$State_B,
path = NULL),
Stage = list(output = Fish$OutputFlags$PrintState_Stage,
fname = Fish$OutputFiles$State_Stage,
path = NULL),
Reprod_Cond = list(output = Fish$OutputFlags$PrintState_RepCond,
fname = Fish$OutputFiles$State_RepCond,
path = NULL),
Health = list(output = Fish$OutputFlags$PrintState_Health,
fname = Fish$OutputFiles$State_Health,
path = NULL)
)
)
)
),
Winter = list(calday=dayFromDate(30,9),
actionsN=NULL, # will be updated below
actions=list(
consume = list(actionMethod = Fish$FunctionsList$Consume$actionMethod,
actionFile = Fish$FunctionsList$Consume$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = NULL,
relatedElements = Fish$Consume$relatedElements,
dset = Fish$Consume$dset[[2]]
),
mortality = list(actionMethod = Fish$FunctionsList$Mortality$actionMethod,
actionFile = Fish$FunctionsList$Mortality$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "During", # "Before","During","After"
transAction = list(actionMethod = Fish$FunctionsList$Mortality_setup$actionMethod,
actionFile = Fish$FunctionsList$Mortality_setup$actionFile,
dset = NULL),
relatedElements = NULL,
dset = Fish$Mortality[[2]]
),
update_rep_health_cond = list(actionMethod = Fish$FunctionsList$UpdateReprodHealth$actionMethod,
actionFile = Fish$FunctionsList$UpdateReprodHealth$actionFile,
tsType = "AllPeriods", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
transAction = NULL,
relatedElements = Fish$Consume$relatedElements,
dset = Fish$ReprodHealth[[2]]
),
printState = list(actionMethod = Fish$FunctionsList$StatePrint$actionMethod,
actionFile = Fish$FunctionsList$StatePrint$actionFile,
tsType = "LastPeriod", # "AllPeriods","FirstPeriod","LastPeriod") input KnifeEdge as LastPeriod
tsTiming = "After", # "Before","During","After"
relatedElements = NULL,
dset = list( # List because may need more than one file to print state
Number = list(output = Fish$OutputFlags$PrintState_N,
fname = Fish$OutputFiles$State_N,
path = NULL),
Biomass = list(output = Fish$OutputFlags$PrintState_B,
fname = Fish$OutputFiles$State_B,
path = NULL),
Stage = list(output = Fish$OutputFlags$PrintState_Stage,
fname = Fish$OutputFiles$State_Stage,
path = NULL),
Reprod_Cond = list(output = Fish$OutputFlags$PrintState_RepCond,
fname = Fish$OutputFiles$State_RepCond,
path = NULL),
Health = list(output = Fish$OutputFlags$PrintState_Health,
fname = Fish$OutputFiles$State_Health,
path = NULL)
)
)
)
)
)
# declare variable to be sourced
Fish
|
# This primary author of this script is Daniel Maloney and the secondary author is Cailin Harris
# Question: Given a species of Echinodermata, does it form a community with a specific group of other Echinodermata species?
# Echinodermata is a phylum which includes a diverse group of species such as: starfish, sea urchins, and sea cucumbers (ref1).
# Some of these species have become increasingly imfamous due to their effect on the environment.
# For example, the Crown of Thorns sea star is being studied due to its involvment in reef degradation (ref2).
# They are often found within complex communities.
# The purpose of this project is to discover if data from the BOLD database can be used to predict the communities these species form.
# An understanding of the dependencies between Echinodermata species will help in understanding how invasive Echinodermata species can be controlled.
# Reference 1: https://authors.library.caltech.edu/35244/
# Reference 2: https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0047363
#install.packages("tidyverse")
#install.packages("stringi")
#install.packages("vegan")
#install.packages("iNEXT")
#install.packages("gridExtra")
# Loading the libraries
library(tidyverse)
library(vegan)
library(stringi)
library(iNEXT)
library(gridExtra)
# Download a database of Echinodermata
Echi <- read_tsv("http://www.boldsystems.org/index.php/API_Public/combined?taxon=Echinodermata&format=tsv")
#Basic Filters, bin uri are used to represent species, regions a being used to represent a communities
Echi.filtered <- Echi %>%
filter(str_detect(bin_uri, "[:]")) %>%
filter(!is.na(region))
#Find the species with the most records
Echi.top.bins <- Echi.filtered %>%
group_by(bin_uri) %>%
summarize(count = length(unique(processid))) %>%
arrange(desc(count))
#At least one common species must be found in order to calculate a distance.
#Filter dataset for regions that contain the species with the most records: BOLD:ACF3333 to avoid missing values when calculating distance
#Compare the top 5 regions with the most records
Echi.with.control <- Echi.filtered %>%
group_by(region) %>%
mutate(count = sum(str_detect(bin_uri, "BOLD:ACF3333"), na.rm = TRUE)) %>%
filter(count > 20)
#Generate a histogram showing the community of Echinodermata found in the same regions as BOLD:ACF3333
ggplot(Echi.with.control) +
geom_bar(mapping = aes(x = region, fill = bin_uri))
#Find the distance between these populations in terms of community diversity
#Create a community object from the dataset for use in Vegan
comm.Echi <- Echi.with.control %>%
group_by(region, bin_uri) %>%
count(bin_uri) %>%
spread(bin_uri, n)
comm.Echi <- comm.Echi %>%
remove_rownames %>%
column_to_rownames(var="region")
#calculate distances between communities based on the number of collected samples of each species in each region
Echi.dis <- vegdist(comm.Echi)
#perform clustering analysis to determine which communities are the most similar
clus <- hclust(Echi.dis, "single")
plot(clus)
####use iNext to create a rarification curve to estimate species diversity ----
#turn comm.Echi into a data frame
comm.Echi.dataFrame <- as.data.frame(comm.Echi)
#transpose the data so that the regions are now the columns and the accession numbers are the rows
comm.Echi.dataFrame2 <- t(comm.Echi.dataFrame)
#check the class to ensure it is in the correct format (matrix)
class(comm.Echi.dataFrame2)
#rename the regions so there are no spaces in the names
colnames(dataframe.test) <- c("Coats.Land", "Scotia.Arc", "Scotia.Island", "Terres.Australes", "Wilhelm.Land")
#separate them into new dataframes.
coats.land.data <- subset(dataframe.test, select = 1)
scotia.arc.data <- subset(dataframe.test, select = 2)
scotia.island.data <- subset(dataframe.test, select = 3)
terres.austales.data <- subset(dataframe.test, select = 4)
wilhelm.land.data <- subset(dataframe.test, select = 5)
# Now remove all of the missing data.
coats.land.data2 <- na.omit(coats.land.data)
scotia.arc.data2 <- na.omit(scotia.arc.data)
scotia.island.data2 <- na.omit(scotia.island.data)
terres.austales.data2 <- na.omit(terres.austales.data)
wilhelm.land.data2 <- na.omit(wilhelm.land.data)
#clean up the workspace
rm(coats.land.data)
rm(scotia.arc.data)
rm(scotia.island.data)
rm(terres.austales.data)
rm(wilhelm.land.data)
#create iNEXT objects for all of my regions.
coats.land.iNext <- iNEXT(coats.land.data2)
scotia.arc.iNext <- iNEXT(scotia.arc.data2)
scotia.island.iNext <- iNEXT(scotia.island.data2)
terres.australes.iNext <- iNEXT(terres.austales.data2)
wilhelm.land.iNext <- iNEXT(wilhelm.land.data2)
#create the rarefaction curves
p1 <- ggiNEXT(coats.land.iNext)
p2 <- ggiNEXT(scotia.arc.iNext)
p3 <- ggiNEXT(scotia.island.iNext)
p4 <- ggiNEXT(terres.australes.iNext)
p5 <- ggiNEXT(wilhelm.land.iNext)
#format plots onto one frame so comparison
grid.arrange(p1, p2, p3, p4, p5, nrow = 3)
####determining simpson's diversity for each region ----
ChaoSimpson(coats.land.data2, datatype = "abundance", B=200)
ChaoSimpson(scotia.arc.data2, datatype = "abundance", B=200)
ChaoSimpson(scotia.island.data2, datatype = "abundance", B=200)
ChaoSimpson(terres.austales.data2, datatype = "abundance", B=200)
ChaoSimpson(wilhelm.land.data2, datatype = "abundance", B=200)
####conclusions ----
# Using data associated with bin numbers from the BOLD database, a species of Echinodermata was selected and a group of species that it consistently forms communities with could be found.
# The species with a large number of records was selected, Promachocrinus kerguelensis (BOLD:ACF3333). The regions where it was collected most were determined.
# Over the five regions of Antarctica where P. kerguelensis was collected most, it was consistently found in communities containing only a small group of other Echinodermata (figure 1).
# A cluster dendrogram was then created to determine which regions had the most similar communities using single linkage clustering (figure 2).
# Sample size based rarification curves were created based on the number of records collected in each region (figure 3).
# A strong correltation was observed between the records collected from the species with these BOLD numbers in these communities.
# A concern is that several of these BOLD bin uri's were linked to the same species including: ACF3333, AAA0602, and ABZ8776.
# Further investigation may prove that these are truly different species.
# Terres Australes Francaises has the highest diversity (0.808), while Wilhelm II Land had the lowest diversity of 0.35.
#
| /Assignment 1 Section 3.R | no_license | d2maloney/Bioinformatics-Tools-Group-Project | R | false | false | 6,718 | r | # This primary author of this script is Daniel Maloney and the secondary author is Cailin Harris
# Question: Given a species of Echinodermata, does it form a community with a specific group of other Echinodermata species?
# Echinodermata is a phylum which includes a diverse group of species such as: starfish, sea urchins, and sea cucumbers (ref1).
# Some of these species have become increasingly imfamous due to their effect on the environment.
# For example, the Crown of Thorns sea star is being studied due to its involvment in reef degradation (ref2).
# They are often found within complex communities.
# The purpose of this project is to discover if data from the BOLD database can be used to predict the communities these species form.
# An understanding of the dependencies between Echinodermata species will help in understanding how invasive Echinodermata species can be controlled.
# Reference 1: https://authors.library.caltech.edu/35244/
# Reference 2: https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0047363
#install.packages("tidyverse")
#install.packages("stringi")
#install.packages("vegan")
#install.packages("iNEXT")
#install.packages("gridExtra")
# Loading the libraries
library(tidyverse)
library(vegan)
library(stringi)
library(iNEXT)
library(gridExtra)
# Download a database of Echinodermata
Echi <- read_tsv("http://www.boldsystems.org/index.php/API_Public/combined?taxon=Echinodermata&format=tsv")
#Basic Filters, bin uri are used to represent species, regions a being used to represent a communities
Echi.filtered <- Echi %>%
filter(str_detect(bin_uri, "[:]")) %>%
filter(!is.na(region))
#Find the species with the most records
Echi.top.bins <- Echi.filtered %>%
group_by(bin_uri) %>%
summarize(count = length(unique(processid))) %>%
arrange(desc(count))
#At least one common species must be found in order to calculate a distance.
#Filter dataset for regions that contain the species with the most records: BOLD:ACF3333 to avoid missing values when calculating distance
#Compare the top 5 regions with the most records
Echi.with.control <- Echi.filtered %>%
group_by(region) %>%
mutate(count = sum(str_detect(bin_uri, "BOLD:ACF3333"), na.rm = TRUE)) %>%
filter(count > 20)
#Generate a histogram showing the community of Echinodermata found in the same regions as BOLD:ACF3333
ggplot(Echi.with.control) +
geom_bar(mapping = aes(x = region, fill = bin_uri))
#Find the distance between these populations in terms of community diversity
#Create a community object from the dataset for use in Vegan
comm.Echi <- Echi.with.control %>%
group_by(region, bin_uri) %>%
count(bin_uri) %>%
spread(bin_uri, n)
comm.Echi <- comm.Echi %>%
remove_rownames %>%
column_to_rownames(var="region")
#calculate distances between communities based on the number of collected samples of each species in each region
Echi.dis <- vegdist(comm.Echi)
#perform clustering analysis to determine which communities are the most similar
clus <- hclust(Echi.dis, "single")
plot(clus)
####use iNext to create a rarification curve to estimate species diversity ----
#turn comm.Echi into a data frame
comm.Echi.dataFrame <- as.data.frame(comm.Echi)
#transpose the data so that the regions are now the columns and the accession numbers are the rows
comm.Echi.dataFrame2 <- t(comm.Echi.dataFrame)
#check the class to ensure it is in the correct format (matrix)
class(comm.Echi.dataFrame2)
#rename the regions so there are no spaces in the names
colnames(dataframe.test) <- c("Coats.Land", "Scotia.Arc", "Scotia.Island", "Terres.Australes", "Wilhelm.Land")
#separate them into new dataframes.
coats.land.data <- subset(dataframe.test, select = 1)
scotia.arc.data <- subset(dataframe.test, select = 2)
scotia.island.data <- subset(dataframe.test, select = 3)
terres.austales.data <- subset(dataframe.test, select = 4)
wilhelm.land.data <- subset(dataframe.test, select = 5)
# Now remove all of the missing data.
coats.land.data2 <- na.omit(coats.land.data)
scotia.arc.data2 <- na.omit(scotia.arc.data)
scotia.island.data2 <- na.omit(scotia.island.data)
terres.austales.data2 <- na.omit(terres.austales.data)
wilhelm.land.data2 <- na.omit(wilhelm.land.data)
#clean up the workspace
rm(coats.land.data)
rm(scotia.arc.data)
rm(scotia.island.data)
rm(terres.austales.data)
rm(wilhelm.land.data)
#create iNEXT objects for all of my regions.
coats.land.iNext <- iNEXT(coats.land.data2)
scotia.arc.iNext <- iNEXT(scotia.arc.data2)
scotia.island.iNext <- iNEXT(scotia.island.data2)
terres.australes.iNext <- iNEXT(terres.austales.data2)
wilhelm.land.iNext <- iNEXT(wilhelm.land.data2)
#create the rarefaction curves
p1 <- ggiNEXT(coats.land.iNext)
p2 <- ggiNEXT(scotia.arc.iNext)
p3 <- ggiNEXT(scotia.island.iNext)
p4 <- ggiNEXT(terres.australes.iNext)
p5 <- ggiNEXT(wilhelm.land.iNext)
#format plots onto one frame so comparison
grid.arrange(p1, p2, p3, p4, p5, nrow = 3)
####determining simpson's diversity for each region ----
ChaoSimpson(coats.land.data2, datatype = "abundance", B=200)
ChaoSimpson(scotia.arc.data2, datatype = "abundance", B=200)
ChaoSimpson(scotia.island.data2, datatype = "abundance", B=200)
ChaoSimpson(terres.austales.data2, datatype = "abundance", B=200)
ChaoSimpson(wilhelm.land.data2, datatype = "abundance", B=200)
####conclusions ----
# Using data associated with bin numbers from the BOLD database, a species of Echinodermata was selected and a group of species that it consistently forms communities with could be found.
# The species with a large number of records was selected, Promachocrinus kerguelensis (BOLD:ACF3333). The regions where it was collected most were determined.
# Over the five regions of Antarctica where P. kerguelensis was collected most, it was consistently found in communities containing only a small group of other Echinodermata (figure 1).
# A cluster dendrogram was then created to determine which regions had the most similar communities using single linkage clustering (figure 2).
# Sample size based rarification curves were created based on the number of records collected in each region (figure 3).
# A strong correltation was observed between the records collected from the species with these BOLD numbers in these communities.
# A concern is that several of these BOLD bin uri's were linked to the same species including: ACF3333, AAA0602, and ABZ8776.
# Further investigation may prove that these are truly different species.
# Terres Australes Francaises has the highest diversity (0.808), while Wilhelm II Land had the lowest diversity of 0.35.
#
|
#! /usr/bin/Rscript
##
# This script sums metabolite data across all monte carlo trials, per
# cell per time
#
# -gepr 2013-05-15
# -aks 2017-07-25
argv <- commandArgs(TRUE)
if (length(argv) <= 0) {
print("Usage: mobileObject.r <exp directories>")
print(" directories should contain files named mobileObject_zone_1_2-[0-9]+.csv")
quit()
}
# for the color space max and min
minmean <- 9e10
maxmean <- -9e10
avgPerZone <- function(path, fileNameRoot, extractZone) {
timeisset <- FALSE
ei <- vector()
# get all the zone 1&2 reaction product files in that experiment
files <- list.files(path = path,
pattern = paste(fileNameRoot,"_",extractZone,"-[0-9]+.csv",sep=""),
recursive = TRUE)
# for each node, for each time, sum over all files
# for each file
zoneData <- vector()
count <- 1
for (f in files) {
rxndata <- read.csv(file = paste(path, f, sep="/"), colClasses = "numeric")
dims <- dim(rxndata)
cat("Read ", dims, " from file ", f, "\n")
# time column
if (timeisset == FALSE) {
rxn <- rxndata[1]
timeisset <- TRUE
}
rxnnames <- list()
for (c in 2:length(rxndata))
rxnnames[c-1] <- unlist(strsplit(unlist(strsplit(colnames(rxndata)[c],'X'))[2],'\\.'))[6]
umn <- unique(rxnnames)
numnames <- length(umn)
# the rest of the columns into respective zones
for (colNdx in seq(2,length(rxndata),numnames)) {
if (unlist(strsplit(unlist(strsplit(colnames(rxndata)[colNdx],'X'))[2],'\\.'))[1] == extractZone) {
for (mn in 1:numnames) {
if (count == 1) {
zoneData <- cbind(zoneData, rxndata[,(colNdx+mn-1)])
} else zoneData[,mn] <- zoneData[,mn] + rxndata[,(colNdx+mn-1)]
}
count <- count + 1
}
} # end colNdx loop
}
time <- rxn
zData <- zoneData
hepCount <- count
zoneData <- zoneData/count
print("Binding time, zoneData")
# table of reaction product per cell for each time
rxn <- cbind(rxn, zoneData)
# set the column names
zoneNames <- c()
for (hn in umn) {
zoneNames <- c(zoneNames, paste("Z",extractZone, hn))
}
colnames(rxn) <- c("Time", zoneNames)
colnames(zData) <- umn
print("writing data to the file")
# write the rxn sums data to a file
write.csv(x=rxn,
file=paste(path, "_", fileNameRoot, "-",extractZone,".csv", sep=""),
row.names=FALSE)
output <- list("time" = time, "zData" = zData, "hepCount" = hepCount)
return(output)
}
# for each experiment
for (expDir in argv) {
z0 <- avgPerZone(expDir, "mobileObject_zone", 0)
z1 <- avgPerZone(expDir, "mobileObject_zone", 1)
z2 <- avgPerZone(expDir, "mobileObject_zone", 2)
time <- z0[[1]]
z0data <- z0[[2]]
z0hepcount <- z0[[3]]
print(paste("z0hepcount = ",z0hepcount))
z1data <- z1[[2]]
z1hepcount <- z1[[3]]
print(paste("z1hepcount = ",z1hepcount))
z2data <- z2[[2]]
z2hepcount <- z2[[3]]
print(paste("z2hepcount = ",z2hepcount))
total_data <- z0data + z1data + z2data
total_count <- z0hepcount + z1hepcount + z2hepcount
print(paste("total hepcount = ",total_count))
totalperH <- total_data/total_count
# create data structure to output to file
data2file <- cbind("Time" = time, totalperH)
# write data to file
write.csv(x=data2file,file=paste(expDir, "_mobileObject_total.csv", sep=""),row.names=FALSE)
}
q()
| /Analysis/mobileObject.r | no_license | AndroidSim/Virtual-Experiments | R | false | false | 3,655 | r | #! /usr/bin/Rscript
##
# This script sums metabolite data across all monte carlo trials, per
# cell per time
#
# -gepr 2013-05-15
# -aks 2017-07-25
argv <- commandArgs(TRUE)
if (length(argv) <= 0) {
print("Usage: mobileObject.r <exp directories>")
print(" directories should contain files named mobileObject_zone_1_2-[0-9]+.csv")
quit()
}
# for the color space max and min
minmean <- 9e10
maxmean <- -9e10
avgPerZone <- function(path, fileNameRoot, extractZone) {
timeisset <- FALSE
ei <- vector()
# get all the zone 1&2 reaction product files in that experiment
files <- list.files(path = path,
pattern = paste(fileNameRoot,"_",extractZone,"-[0-9]+.csv",sep=""),
recursive = TRUE)
# for each node, for each time, sum over all files
# for each file
zoneData <- vector()
count <- 1
for (f in files) {
rxndata <- read.csv(file = paste(path, f, sep="/"), colClasses = "numeric")
dims <- dim(rxndata)
cat("Read ", dims, " from file ", f, "\n")
# time column
if (timeisset == FALSE) {
rxn <- rxndata[1]
timeisset <- TRUE
}
rxnnames <- list()
for (c in 2:length(rxndata))
rxnnames[c-1] <- unlist(strsplit(unlist(strsplit(colnames(rxndata)[c],'X'))[2],'\\.'))[6]
umn <- unique(rxnnames)
numnames <- length(umn)
# the rest of the columns into respective zones
for (colNdx in seq(2,length(rxndata),numnames)) {
if (unlist(strsplit(unlist(strsplit(colnames(rxndata)[colNdx],'X'))[2],'\\.'))[1] == extractZone) {
for (mn in 1:numnames) {
if (count == 1) {
zoneData <- cbind(zoneData, rxndata[,(colNdx+mn-1)])
} else zoneData[,mn] <- zoneData[,mn] + rxndata[,(colNdx+mn-1)]
}
count <- count + 1
}
} # end colNdx loop
}
time <- rxn
zData <- zoneData
hepCount <- count
zoneData <- zoneData/count
print("Binding time, zoneData")
# table of reaction product per cell for each time
rxn <- cbind(rxn, zoneData)
# set the column names
zoneNames <- c()
for (hn in umn) {
zoneNames <- c(zoneNames, paste("Z",extractZone, hn))
}
colnames(rxn) <- c("Time", zoneNames)
colnames(zData) <- umn
print("writing data to the file")
# write the rxn sums data to a file
write.csv(x=rxn,
file=paste(path, "_", fileNameRoot, "-",extractZone,".csv", sep=""),
row.names=FALSE)
output <- list("time" = time, "zData" = zData, "hepCount" = hepCount)
return(output)
}
# for each experiment
for (expDir in argv) {
z0 <- avgPerZone(expDir, "mobileObject_zone", 0)
z1 <- avgPerZone(expDir, "mobileObject_zone", 1)
z2 <- avgPerZone(expDir, "mobileObject_zone", 2)
time <- z0[[1]]
z0data <- z0[[2]]
z0hepcount <- z0[[3]]
print(paste("z0hepcount = ",z0hepcount))
z1data <- z1[[2]]
z1hepcount <- z1[[3]]
print(paste("z1hepcount = ",z1hepcount))
z2data <- z2[[2]]
z2hepcount <- z2[[3]]
print(paste("z2hepcount = ",z2hepcount))
total_data <- z0data + z1data + z2data
total_count <- z0hepcount + z1hepcount + z2hepcount
print(paste("total hepcount = ",total_count))
totalperH <- total_data/total_count
# create data structure to output to file
data2file <- cbind("Time" = time, totalperH)
# write data to file
write.csv(x=data2file,file=paste(expDir, "_mobileObject_total.csv", sep=""),row.names=FALSE)
}
q()
|
# Random Effects Models ----
#Updated 9/1/2020 by C. Tribuzio
#adapted from Dana's ranef.r code, double checked by Pete
# Still to do list ----
##1) add in option to turn off the subregions
##2) make start year adaptable, currently set at first year of survey
##3) make end year adaptable
##4) why does regional==F return repeats?
# Packages ----
library(plyr)
library(reshape2)
library(stringr)
# Function ----
RFX_fx<-function(outname,AYR,endyr,datadir,outdir,regional=T){ #note: outname needs to match the RACE biomass file
# Data Prep ----
RFX_data<-read.csv(paste(datadir,"/RACE_Biomass_",outname,".csv",sep=""),header=T)
RFX_data$SE[RFX_data$SE==0]<-0.1 #model can't have zero for SE or variance
RFX_data$Variance[RFX_data$Variance==0]<-0.1
RFX_data$CV[RFX_data$CV==0]<-999
unqkey<-unique(RFX_data[,c("SURVEY","Group")]) #list of all of the RFX models to run
#runs models by each reg area as well as whole surveys
#does not deal with separate depths yet
outmat<-matrix(nrow=0,ncol=7)
colnames(outmat)<-c("Biom_est","Biom_LL","Biom_UL","Biom_CV","YEAR","REG_AREA","Group")
### loop through each group and survey/area to be modeled ----
for (i in 1:nrow(unqkey)){
loopdat<-RFX_data[RFX_data$SURVEY==unqkey[i,1] &
RFX_data$Group==unqkey[i,2],]
styr <-min(loopdat$YEAR) #first year to be run through the RFX model
### .dat build for ADMB ----
yrs_srv<-unique(loopdat$YEAR) #list of years which have data
nobs<-length(yrs_srv) #number of years with data
yrs<-c(styr,endyr)
#loopdat has full survey and regional estimates, need to drop full survey for GOA and AI surveys
if(unqkey[i,1]=="GOA") ld2<-loopdat[loopdat$SURVEY=="GOA" & loopdat$REGULATORY_AREA_NAME!="GOA",]
if(unqkey[i,1]=="AI") ld2<-loopdat[loopdat$SURVEY=="AI" & loopdat$REGULATORY_AREA_NAME!="AI",]
#wanted to run it by full survey ONLY turn on regional
if(regional==F) ld2<-loopdat[loopdat$SURVEY==unqkey[i,1] &
loopdat$REGULATORY_AREA_NAME==as.character(unqkey[i,1]),]
#there are no sub regions for either EBS survey in this code, so loopdat is the same as ld2
if(str_detect(unqkey[i,1], "^(EBS_)")) ld2<-loopdat
regnames<-unique(ld2$REGULATORY_AREA_NAME)
nregs<-length(regnames)
PEI<-rep(1,nregs)
tempB<-dcast(ld2,YEAR~REGULATORY_AREA_NAME,value.var="Biomass",fun.aggregate = mean)
srv_est<-tempB[, names(tempB) %in% regnames]
unname(srv_est) # gets rid of column names
srv_est[is.na(srv_est)] <- "-9" # ADMB flag
tempSE<-dcast(loopdat,YEAR~REGULATORY_AREA_NAME,value.var="SE",fun.aggregate = mean)
srv_SE<-tempSE[, names(tempSE) %in% regnames]
unname(srv_SE)
srv_SE[is.na(srv_SE)] <- "-9"
#this creates the dat file for ADMB
cat("# Model start and end years","\n",yrs,"\n",
"# Number of survey indices fit (i.e., regions/depth strata)","\n",nregs,"\n",
"# Number or process error parameters","\n",1,"\n",
"# Process error index","\n",PEI,"\n",
"# Number of surveys","\n",nobs,"\n",
"# Survey years","\n",yrs_srv,"\n",
"# Survey biomass","\n",
sep=" ",file=paste(codedir,"/re.dat",sep=""))
write.table(srv_est, file = paste(codedir,"/re.dat",sep=""), sep = " ", append = TRUE, quote = FALSE, row.names = FALSE, col.names = FALSE)
write.table(paste0("# Survey biomass SE"), file = paste(codedir,"/re.dat",sep=""), sep = " ", append = TRUE, quote = FALSE, row.names = FALSE, col.names = FALSE)
write.table(srv_SE, file = paste(codedir,"/re.dat",sep=""), sep = " ", append = TRUE, quote = FALSE, row.names = FALSE, col.names = FALSE)
### ADMB compiled model ----
#change the working directory so ADMB puts output in the right place
projdir<-getwd()
setwd(codedir)
#system(paste(codedir,"/re.exe",sep=""))
try(system("re.exe"),silent=T)
#if ('try-error' %in% class(fit)) next
setwd(projdir)
### Summary ----
#nlines is the number of lines of data to be read, or the total number of years of the model
#skip is the number of lines to skip, # cooresponds the line with "biomA"
#these set up the skips for each data summary
totyr<-endyr-styr+1
modyrs<-seq(styr,endyr)
LLst<-17+(nobs+1)*2+2
bst<-LLst+totyr+1
ULst<-bst+totyr+1
CVst<-(ULst+totyr+1)+totyr+1
#Biomass
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biom<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=bst),ncol=nregs,byrow=T)
if(nrow(re_biom)==0) re_biom<-matrix(nrow=totyr,ncol=nregs,0)
re_biom<-cbind(modyrs,re_biom)
colnames(re_biom)<-c("YEAR", as.character(regnames))
re_b2<-try(melt(as.data.frame(re_biom),id=c("YEAR")),silent=T)
if ('try-error' %in% class(re_b2)) next
#re_b2<-melt(as.data.frame(re_biom),id=c("YEAR"))
colnames(re_b2)<-c("YEAR","REGULATORY_AREA_NAME","Biomass")
}
#CV
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biomCV<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=CVst),ncol=nregs,byrow=T)
if(nrow(re_biomCV)==0) re_biomCV<-matrix(nrow=totyr,ncol=nregs,0)
re_biomCV<-cbind(modyrs,re_biomCV)
colnames(re_biomCV)<-c("YEAR",as.character(regnames))
re_CV2<-melt(as.data.frame(re_biomCV),id=c("YEAR"))
colnames(re_CV2)<-c("YEAR","REGULATORY_AREA_NAME","CV")
}
#Biomass LL
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biomLL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=LLst),ncol=nregs,byrow=T)
if(nrow(re_biomLL)==0) re_biomLL<-matrix(nrow=totyr,ncol=nregs,0)
re_biomLL<-cbind(modyrs,re_biomLL)
colnames(re_biomLL)<-c("YEAR",as.character(regnames))
re_bLL2<-melt(as.data.frame(re_biomLL),id=c("YEAR"))
colnames(re_bLL2)<-c("YEAR","REGULATORY_AREA_NAME","LL")
}
#Biomass UL
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biomUL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=ULst),ncol=nregs,byrow=T)
if(nrow(re_biomUL)==0) re_biomUL<-matrix(nrow=totyr,ncol=nregs,0)
re_biomUL<-cbind(modyrs,re_biomUL)
colnames(re_biomUL)<-c("YEAR",as.character(regnames))
re_bUL2<-melt(as.data.frame(re_biomUL),id=c("YEAR"))
colnames(re_bUL2)<-c("YEAR","REGULATORY_AREA_NAME","UL")
}
#Total Survey Area
re_biomSURVEY<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=7),ncol=1,byrow=T)
re_biomSURVEY<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEY))
if(nrow(re_biomSURVEY)==0) re_biomSURVEY<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEY)<-c("YEAR","REGULATORY_AREA_NAME","Biomass")
re_biomSURVEYCV<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=9),ncol=1,byrow=T)
re_biomSURVEYCV<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEYCV))
if(nrow(re_biomSURVEYCV)==0) re_biomSURVEYCV<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEYCV)<-c("YEAR","REGULATORY_AREA_NAME","CV")
re_biomSURVEYLL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=13),ncol=1,byrow=T)
re_biomSURVEYLL<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEYLL))
if(nrow(re_biomSURVEYLL)==0) re_biomSURVEYLL<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEYLL)<-c("YEAR","REGULATORY_AREA_NAME","LL")
re_biomSURVEYUL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=11),ncol=1,byrow=T)
re_biomSURVEYUL<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEYUL))
if(nrow(re_biomSURVEYUL)==0) re_biomSURVEYUL<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEYUL)<-c("YEAR","REGULATORY_AREA_NAME","UL")
## make a data frame of results
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rB<-data.frame(rbind(re_b2,re_biomSURVEY)),rB<-re_biomSURVEY)
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rCV<-data.frame(rbind(re_CV2,re_biomSURVEYCV)),rCV<-re_biomSURVEYCV)
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rLL<-data.frame(rbind(re_bLL2,re_biomSURVEYLL)),rLL<-re_biomSURVEYLL)
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rUL<-data.frame(rbind(re_bUL2,re_biomSURVEYUL)),rUL<-re_biomSURVEYUL)
rout<-merge(rB,rCV,by=c("YEAR","REGULATORY_AREA_NAME"))
rout<-merge(rout,rLL,by=c("YEAR","REGULATORY_AREA_NAME"))
rout<-merge(rout,rUL,by=c("YEAR","REGULATORY_AREA_NAME"))
colnames(rout)<-c("YEAR","REGULATORY_AREA_NAME","Biom_est","Biom_CV","Biom_LL","Biom_UL")
rout$Group<-unqkey[i,2]
outmat<-rbind(outmat,rout)
}
## write to output directory
write.csv(outmat, paste(outdir,"RFX_Biomass_",outname,".csv",sep=""),row.names=F)
}
| /Code/RFX/RFX_functions.R | no_license | CindyTribuzio-NOAA/Tier_4_5_Improvements | R | false | false | 8,935 | r | # Random Effects Models ----
#Updated 9/1/2020 by C. Tribuzio
#adapted from Dana's ranef.r code, double checked by Pete
# Still to do list ----
##1) add in option to turn off the subregions
##2) make start year adaptable, currently set at first year of survey
##3) make end year adaptable
##4) why does regional==F return repeats?
# Packages ----
library(plyr)
library(reshape2)
library(stringr)
# Function ----
RFX_fx<-function(outname,AYR,endyr,datadir,outdir,regional=T){ #note: outname needs to match the RACE biomass file
# Data Prep ----
RFX_data<-read.csv(paste(datadir,"/RACE_Biomass_",outname,".csv",sep=""),header=T)
RFX_data$SE[RFX_data$SE==0]<-0.1 #model can't have zero for SE or variance
RFX_data$Variance[RFX_data$Variance==0]<-0.1
RFX_data$CV[RFX_data$CV==0]<-999
unqkey<-unique(RFX_data[,c("SURVEY","Group")]) #list of all of the RFX models to run
#runs models by each reg area as well as whole surveys
#does not deal with separate depths yet
outmat<-matrix(nrow=0,ncol=7)
colnames(outmat)<-c("Biom_est","Biom_LL","Biom_UL","Biom_CV","YEAR","REG_AREA","Group")
### loop through each group and survey/area to be modeled ----
for (i in 1:nrow(unqkey)){
loopdat<-RFX_data[RFX_data$SURVEY==unqkey[i,1] &
RFX_data$Group==unqkey[i,2],]
styr <-min(loopdat$YEAR) #first year to be run through the RFX model
### .dat build for ADMB ----
yrs_srv<-unique(loopdat$YEAR) #list of years which have data
nobs<-length(yrs_srv) #number of years with data
yrs<-c(styr,endyr)
#loopdat has full survey and regional estimates, need to drop full survey for GOA and AI surveys
if(unqkey[i,1]=="GOA") ld2<-loopdat[loopdat$SURVEY=="GOA" & loopdat$REGULATORY_AREA_NAME!="GOA",]
if(unqkey[i,1]=="AI") ld2<-loopdat[loopdat$SURVEY=="AI" & loopdat$REGULATORY_AREA_NAME!="AI",]
#wanted to run it by full survey ONLY turn on regional
if(regional==F) ld2<-loopdat[loopdat$SURVEY==unqkey[i,1] &
loopdat$REGULATORY_AREA_NAME==as.character(unqkey[i,1]),]
#there are no sub regions for either EBS survey in this code, so loopdat is the same as ld2
if(str_detect(unqkey[i,1], "^(EBS_)")) ld2<-loopdat
regnames<-unique(ld2$REGULATORY_AREA_NAME)
nregs<-length(regnames)
PEI<-rep(1,nregs)
tempB<-dcast(ld2,YEAR~REGULATORY_AREA_NAME,value.var="Biomass",fun.aggregate = mean)
srv_est<-tempB[, names(tempB) %in% regnames]
unname(srv_est) # gets rid of column names
srv_est[is.na(srv_est)] <- "-9" # ADMB flag
tempSE<-dcast(loopdat,YEAR~REGULATORY_AREA_NAME,value.var="SE",fun.aggregate = mean)
srv_SE<-tempSE[, names(tempSE) %in% regnames]
unname(srv_SE)
srv_SE[is.na(srv_SE)] <- "-9"
#this creates the dat file for ADMB
cat("# Model start and end years","\n",yrs,"\n",
"# Number of survey indices fit (i.e., regions/depth strata)","\n",nregs,"\n",
"# Number or process error parameters","\n",1,"\n",
"# Process error index","\n",PEI,"\n",
"# Number of surveys","\n",nobs,"\n",
"# Survey years","\n",yrs_srv,"\n",
"# Survey biomass","\n",
sep=" ",file=paste(codedir,"/re.dat",sep=""))
write.table(srv_est, file = paste(codedir,"/re.dat",sep=""), sep = " ", append = TRUE, quote = FALSE, row.names = FALSE, col.names = FALSE)
write.table(paste0("# Survey biomass SE"), file = paste(codedir,"/re.dat",sep=""), sep = " ", append = TRUE, quote = FALSE, row.names = FALSE, col.names = FALSE)
write.table(srv_SE, file = paste(codedir,"/re.dat",sep=""), sep = " ", append = TRUE, quote = FALSE, row.names = FALSE, col.names = FALSE)
### ADMB compiled model ----
#change the working directory so ADMB puts output in the right place
projdir<-getwd()
setwd(codedir)
#system(paste(codedir,"/re.exe",sep=""))
try(system("re.exe"),silent=T)
#if ('try-error' %in% class(fit)) next
setwd(projdir)
### Summary ----
#nlines is the number of lines of data to be read, or the total number of years of the model
#skip is the number of lines to skip, # cooresponds the line with "biomA"
#these set up the skips for each data summary
totyr<-endyr-styr+1
modyrs<-seq(styr,endyr)
LLst<-17+(nobs+1)*2+2
bst<-LLst+totyr+1
ULst<-bst+totyr+1
CVst<-(ULst+totyr+1)+totyr+1
#Biomass
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biom<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=bst),ncol=nregs,byrow=T)
if(nrow(re_biom)==0) re_biom<-matrix(nrow=totyr,ncol=nregs,0)
re_biom<-cbind(modyrs,re_biom)
colnames(re_biom)<-c("YEAR", as.character(regnames))
re_b2<-try(melt(as.data.frame(re_biom),id=c("YEAR")),silent=T)
if ('try-error' %in% class(re_b2)) next
#re_b2<-melt(as.data.frame(re_biom),id=c("YEAR"))
colnames(re_b2)<-c("YEAR","REGULATORY_AREA_NAME","Biomass")
}
#CV
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biomCV<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=CVst),ncol=nregs,byrow=T)
if(nrow(re_biomCV)==0) re_biomCV<-matrix(nrow=totyr,ncol=nregs,0)
re_biomCV<-cbind(modyrs,re_biomCV)
colnames(re_biomCV)<-c("YEAR",as.character(regnames))
re_CV2<-melt(as.data.frame(re_biomCV),id=c("YEAR"))
colnames(re_CV2)<-c("YEAR","REGULATORY_AREA_NAME","CV")
}
#Biomass LL
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biomLL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=LLst),ncol=nregs,byrow=T)
if(nrow(re_biomLL)==0) re_biomLL<-matrix(nrow=totyr,ncol=nregs,0)
re_biomLL<-cbind(modyrs,re_biomLL)
colnames(re_biomLL)<-c("YEAR",as.character(regnames))
re_bLL2<-melt(as.data.frame(re_biomLL),id=c("YEAR"))
colnames(re_bLL2)<-c("YEAR","REGULATORY_AREA_NAME","LL")
}
#Biomass UL
if(!str_detect(unqkey[i,1], "^(EBS_)")){
re_biomUL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=totyr,skip=ULst),ncol=nregs,byrow=T)
if(nrow(re_biomUL)==0) re_biomUL<-matrix(nrow=totyr,ncol=nregs,0)
re_biomUL<-cbind(modyrs,re_biomUL)
colnames(re_biomUL)<-c("YEAR",as.character(regnames))
re_bUL2<-melt(as.data.frame(re_biomUL),id=c("YEAR"))
colnames(re_bUL2)<-c("YEAR","REGULATORY_AREA_NAME","UL")
}
#Total Survey Area
re_biomSURVEY<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=7),ncol=1,byrow=T)
re_biomSURVEY<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEY))
if(nrow(re_biomSURVEY)==0) re_biomSURVEY<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEY)<-c("YEAR","REGULATORY_AREA_NAME","Biomass")
re_biomSURVEYCV<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=9),ncol=1,byrow=T)
re_biomSURVEYCV<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEYCV))
if(nrow(re_biomSURVEYCV)==0) re_biomSURVEYCV<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEYCV)<-c("YEAR","REGULATORY_AREA_NAME","CV")
re_biomSURVEYLL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=13),ncol=1,byrow=T)
re_biomSURVEYLL<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEYLL))
if(nrow(re_biomSURVEYLL)==0) re_biomSURVEYLL<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEYLL)<-c("YEAR","REGULATORY_AREA_NAME","LL")
re_biomSURVEYUL<-matrix(scan(file=paste(codedir,"/rwout.rep",sep=""),nlines=1,skip=11),ncol=1,byrow=T)
re_biomSURVEYUL<-as.data.frame(cbind(seq(styr,endyr),as.character(unqkey[i,1]),re_biomSURVEYUL))
if(nrow(re_biomSURVEYUL)==0) re_biomSURVEYUL<-cbind(modyrs,as.data.frame(as.character(unqkey[i,1])),0)
colnames(re_biomSURVEYUL)<-c("YEAR","REGULATORY_AREA_NAME","UL")
## make a data frame of results
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rB<-data.frame(rbind(re_b2,re_biomSURVEY)),rB<-re_biomSURVEY)
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rCV<-data.frame(rbind(re_CV2,re_biomSURVEYCV)),rCV<-re_biomSURVEYCV)
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rLL<-data.frame(rbind(re_bLL2,re_biomSURVEYLL)),rLL<-re_biomSURVEYLL)
ifelse(!str_detect(unqkey[i,1], "^(EBS_)"),rUL<-data.frame(rbind(re_bUL2,re_biomSURVEYUL)),rUL<-re_biomSURVEYUL)
rout<-merge(rB,rCV,by=c("YEAR","REGULATORY_AREA_NAME"))
rout<-merge(rout,rLL,by=c("YEAR","REGULATORY_AREA_NAME"))
rout<-merge(rout,rUL,by=c("YEAR","REGULATORY_AREA_NAME"))
colnames(rout)<-c("YEAR","REGULATORY_AREA_NAME","Biom_est","Biom_CV","Biom_LL","Biom_UL")
rout$Group<-unqkey[i,2]
outmat<-rbind(outmat,rout)
}
## write to output directory
write.csv(outmat, paste(outdir,"RFX_Biomass_",outname,".csv",sep=""),row.names=F)
}
|
/grafica_especies_distancia.R | no_license | JSoriano2/sortidacamp | R | false | false | 403 | r |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.