content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
#Set working directory setwd("~/Documents/Projects/PoritesRADseq/P.ast-transplant/analyses/SymbiontGenoytping/") #Get list of filenames to import with *.blast suffix filenames <- list.files(pattern="*.blast", full.names=TRUE) #import files as a list of dataframes sym <- lapply(filenames, read.delim, sep = "\t", skip =1, header = F) #extract sample names from filenames sampnames <- gsub('^.*./\\s*|\\s*_.*$', '', filenames) #add sample names to list sym <- Map(cbind, sym, sample = sampnames) #determine clades from blast output clade <- lapply(sym, function(x) ifelse(grepl("sp. A|clade A", x$V3, ignore.case = T), "A", ifelse(grepl("sp. B|clade B", x$V3, ignore.case = T), "B", ifelse(grepl("sp. C|clade C", x$V3, ignore.case = T), "C", ifelse(grepl("sp. D|clade D", x$V3, ignore.case = T), "D", NA))))) #add clades to list sym2 <- mapply(cbind, sym, clade = clade, SIMPLIFY = FALSE) #remove instances where one read mapped to more than one clade sym3 <- lapply(sym2, function(x) x[!duplicated(c(x$V1, x$clade)),]) #rename list elements with sample names names(sym3) <- sampnames #bind rows library(dplyr) sym4 <- bind_rows(sym3, .id="df") #remove all instances of 18S rDNA, since this gives ambiguous #matches to many clades sym5 <- sym4[!grepl("18S", sym4$V3),] #omit NA values sym6 <- na.omit(sym5) library(ggplot2) #summarize data symsum <- sym6 %>% group_by(sample,clade) %>% summarise(count=n()) %>% mutate(perc=count/sum(count)) #add vector for species symsum$species <- c(rep("Branching Porites", 32), rep("Porites astreoides", 72)) library(ggstance) #plot it ggplot(subset(symsum,sample %in% c("pa10-15", "pa10-16","pa11-15", "pa11-16", "pa2-15", "pa2-16", "pa3-15", "pa3-16", "pa5-15", "pa5-16", "pa6-15", "pa6-16", "pa8-15", "pa8-16", "pa9-15", "pa9-16", "103", "104", "105", "124", "125", "126", "128", "129", "130", "131"))) + aes(x = perc, y = sample, fill = factor(clade)) + geom_barh(stat="identity", width = 0.9) + scale_x_continuous(limits = c(0,1), expand = c(0, 0))+ scale_fill_manual(values=c("#66c2a5","#fc8d62","#8da0cb","#e78ac3"))+ labs(x = "Proportion", fill = "Clade") + theme_gray()+ theme(axis.text.y = element_blank(), axis.ticks.y=element_blank(), axis.title.y = element_blank(), legend.position="top")+ facet_grid(species ~ ., scales = "free_y", space = "free_y") #plot before and after transplantation ggplot(subset(symsum,sample %in% c("pa10-15", "pa11-15", "pa12-15", "pa17-15", "pa18-15", "pa2-15", "pa3-15", "pa5-15", "pa6-15", "pa7-15", "pa8-15", "pa9-15", "pa10-16", "pa11-16", "pa12-16", "pa17-16", "pa18-16", "pa2-16", "pa3-16", "pa5-16", "pa6-16", "pa7-16", "pa8-16", "pa9-16"))) + aes(x = factor(sample), y = perc*100, fill = factor(clade)) + geom_bar(stat="identity", width = 0.7) + labs(x = "Sample", y = "percent", fill = "clade") + theme_minimal(base_size = 14)
/analyses/SymbiontGenoytping/CladeCount.R
no_license
jldimond/P.ast-transplant
R
false
false
3,296
r
#Set working directory setwd("~/Documents/Projects/PoritesRADseq/P.ast-transplant/analyses/SymbiontGenoytping/") #Get list of filenames to import with *.blast suffix filenames <- list.files(pattern="*.blast", full.names=TRUE) #import files as a list of dataframes sym <- lapply(filenames, read.delim, sep = "\t", skip =1, header = F) #extract sample names from filenames sampnames <- gsub('^.*./\\s*|\\s*_.*$', '', filenames) #add sample names to list sym <- Map(cbind, sym, sample = sampnames) #determine clades from blast output clade <- lapply(sym, function(x) ifelse(grepl("sp. A|clade A", x$V3, ignore.case = T), "A", ifelse(grepl("sp. B|clade B", x$V3, ignore.case = T), "B", ifelse(grepl("sp. C|clade C", x$V3, ignore.case = T), "C", ifelse(grepl("sp. D|clade D", x$V3, ignore.case = T), "D", NA))))) #add clades to list sym2 <- mapply(cbind, sym, clade = clade, SIMPLIFY = FALSE) #remove instances where one read mapped to more than one clade sym3 <- lapply(sym2, function(x) x[!duplicated(c(x$V1, x$clade)),]) #rename list elements with sample names names(sym3) <- sampnames #bind rows library(dplyr) sym4 <- bind_rows(sym3, .id="df") #remove all instances of 18S rDNA, since this gives ambiguous #matches to many clades sym5 <- sym4[!grepl("18S", sym4$V3),] #omit NA values sym6 <- na.omit(sym5) library(ggplot2) #summarize data symsum <- sym6 %>% group_by(sample,clade) %>% summarise(count=n()) %>% mutate(perc=count/sum(count)) #add vector for species symsum$species <- c(rep("Branching Porites", 32), rep("Porites astreoides", 72)) library(ggstance) #plot it ggplot(subset(symsum,sample %in% c("pa10-15", "pa10-16","pa11-15", "pa11-16", "pa2-15", "pa2-16", "pa3-15", "pa3-16", "pa5-15", "pa5-16", "pa6-15", "pa6-16", "pa8-15", "pa8-16", "pa9-15", "pa9-16", "103", "104", "105", "124", "125", "126", "128", "129", "130", "131"))) + aes(x = perc, y = sample, fill = factor(clade)) + geom_barh(stat="identity", width = 0.9) + scale_x_continuous(limits = c(0,1), expand = c(0, 0))+ scale_fill_manual(values=c("#66c2a5","#fc8d62","#8da0cb","#e78ac3"))+ labs(x = "Proportion", fill = "Clade") + theme_gray()+ theme(axis.text.y = element_blank(), axis.ticks.y=element_blank(), axis.title.y = element_blank(), legend.position="top")+ facet_grid(species ~ ., scales = "free_y", space = "free_y") #plot before and after transplantation ggplot(subset(symsum,sample %in% c("pa10-15", "pa11-15", "pa12-15", "pa17-15", "pa18-15", "pa2-15", "pa3-15", "pa5-15", "pa6-15", "pa7-15", "pa8-15", "pa9-15", "pa10-16", "pa11-16", "pa12-16", "pa17-16", "pa18-16", "pa2-16", "pa3-16", "pa5-16", "pa6-16", "pa7-16", "pa8-16", "pa9-16"))) + aes(x = factor(sample), y = perc*100, fill = factor(clade)) + geom_bar(stat="identity", width = 0.7) + labs(x = "Sample", y = "percent", fill = "clade") + theme_minimal(base_size = 14)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{Discrete_Occupations_Stations} \alias{Discrete_Occupations_Stations} \title{Discrete Occupation Station data} \format{ A dataframe \describe{ \item{station}{The name of the station where data was collected} \item{latitude}{The latitude at which data was collected} \item{longitude}{The longitude at which data was collected} \item{year}{The year in which data was collected} \item{month}{The month in which data was collected (numeric)} \item{day}{The day on which data was collected} \item{event_id}{A unique identifier for the sampling event} \item{sample_id}{A unique identifier for the sample} \item{depth}{The depth at which data was actually collected} \item{nominal_depth}{The depth at which data was planned to be collected - sometime differs slightly from actual collection depth} \item{nitrate}{Discrete measurements of nitrate concentration in the water column at a range of depths } \item{silicate}{Discrete measurements of silicate concentration in the water column at a range of depths } \item{phosphate}{Discrete measurements of phosphate concentration in the water column at a range of depths } \item{chlorophyll}{Discrete measurements of chlorophyll concentration in the water column at a range of depths } \item{sea_temperature}{Discrete sea temperature measurements over a range of depths} \item{salinity}{Discrete salinity measurements over a range of depths} \item{sigmaTheta}{Discrete density measurements over a range of depths} } } \usage{ Discrete_Occupations_Stations } \description{ Discrete data collected at fixed stations during individual occupations. } \details{ The data can be cited as follows: Casault, B., Johnson, C., Devred, E., Head, E., Cogswell, A., and Spry, J. 2020. Optical, Chemical, and Biological Oceanographic Conditions on the Scotian Shelf and in the Eastern Gulf of Maine during 2018. DFO Can. Sci. Advis. Sec. Res. Doc. 2020/037. v + 66 p. Hebert, D., Pettipas, R., and Brickman, D. 2020. Physical Oceanographic Conditions on the Scotian Shelf and in the Gulf of Maine during 2018. DFO Can. Sci. Advis. Sec. Res. Doc. 2020/036 iv + 52 p. } \note{ \describe{ \item{time_scale}{Occupation} \item{regional_scale}{Station} \item{category}{ biological, phytoplankton, biochemical, physical} } } \keyword{datasets}
/man/Discrete_Occupations_Stations.Rd
permissive
casaultb/azmpdata
R
false
true
2,464
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{Discrete_Occupations_Stations} \alias{Discrete_Occupations_Stations} \title{Discrete Occupation Station data} \format{ A dataframe \describe{ \item{station}{The name of the station where data was collected} \item{latitude}{The latitude at which data was collected} \item{longitude}{The longitude at which data was collected} \item{year}{The year in which data was collected} \item{month}{The month in which data was collected (numeric)} \item{day}{The day on which data was collected} \item{event_id}{A unique identifier for the sampling event} \item{sample_id}{A unique identifier for the sample} \item{depth}{The depth at which data was actually collected} \item{nominal_depth}{The depth at which data was planned to be collected - sometime differs slightly from actual collection depth} \item{nitrate}{Discrete measurements of nitrate concentration in the water column at a range of depths } \item{silicate}{Discrete measurements of silicate concentration in the water column at a range of depths } \item{phosphate}{Discrete measurements of phosphate concentration in the water column at a range of depths } \item{chlorophyll}{Discrete measurements of chlorophyll concentration in the water column at a range of depths } \item{sea_temperature}{Discrete sea temperature measurements over a range of depths} \item{salinity}{Discrete salinity measurements over a range of depths} \item{sigmaTheta}{Discrete density measurements over a range of depths} } } \usage{ Discrete_Occupations_Stations } \description{ Discrete data collected at fixed stations during individual occupations. } \details{ The data can be cited as follows: Casault, B., Johnson, C., Devred, E., Head, E., Cogswell, A., and Spry, J. 2020. Optical, Chemical, and Biological Oceanographic Conditions on the Scotian Shelf and in the Eastern Gulf of Maine during 2018. DFO Can. Sci. Advis. Sec. Res. Doc. 2020/037. v + 66 p. Hebert, D., Pettipas, R., and Brickman, D. 2020. Physical Oceanographic Conditions on the Scotian Shelf and in the Gulf of Maine during 2018. DFO Can. Sci. Advis. Sec. Res. Doc. 2020/036 iv + 52 p. } \note{ \describe{ \item{time_scale}{Occupation} \item{regional_scale}{Station} \item{category}{ biological, phytoplankton, biochemical, physical} } } \keyword{datasets}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### SEAK Chinook Spring Troll 2010-2017 #### # Kyle Shedd Tue Nov 07 14:08:51 2017 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ date() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Introduction #### # The goal of this script is to revisit Chinook salmon mixtures from the SEAK # commercial spring troll harvests from 2010-2017 looking at D14 using the GAPS3.0 # baseline containing 357 populations in 26 reporting groups characterized by # 13 uSATs. All mixtures are to be analyzed with the program BAYES. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Specific Objectives #### # This script will: # 1) Import mixture data # 2) Add attribute data # 3) Define spatio-temporal strata # 4) Perform a data QC on mixtures # 5) Prepare BAYES input files # 6) Summarize BAYES results # 7) Generate plots and tables of results #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Initial Setup #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017") source("H:/R Source Scripts/Functions.GCL_KS.R") source("C:/Users/krshedd/Documents/R/Functions.GCL.R") username <- "krshedd" password <- "********" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Pull all data for each silly code and create .gcl objects for each Spring2010Mixtures <- c("KSPRING10H", "KSPRING10J", "KSPRING10K", "KSPRING10P", "KSPRING10S", "KSPRING10W") Spring2011Mixtures <- c("KSPRING11J", "KSPRING11K", "KSPRING11P", "KSPRING11S", "KSPRING11W") Spring2012Mixtures <- c("KSPRING12J", "KSPRING12K", "KSPRING12P", "KSPRING12S", "KSPRING12W") # Stikine and Taku directed fishery samples never extracted "KTROL12SR" "KTROL12TR" Spring2013Mixtures <- c("KSPRING13J", "KSPRING13K", "KSPRING13P", "KSPRING13S", "KSPRING13W", "KSPRING13Y") Spring2014Mixtures <- c("KSPRING14C", "KSPRING14J", "KSPRING14K", "KSPRING14P", "KSPRING14S", "KSPRING14W", "KSPRING14Y") Spring2015Mixtures <- c("KSPRING15C", "KSPRING15J", "KSPRING15K", "KSPRING15P", "KSPRING15S", "KSPRING15W", "KSPRING15Y") Spring2016Mixtures <- c("KTROL16SP") # "KTROL16D8" not used, no extractions Spring2017Mixtures <- c("KTROL17SP") ## Pull genotypes LOKI2R_GAPS.GCL(sillyvec = unlist(sapply(objects(pattern = "Spring"), get)), username = username, password = password) ## Save unaltered .gcls # dir.create("Raw genotypes") # dir.create("Raw genotypes/OriginalCollections") invisible(sapply(unlist(sapply(objects(pattern = "Spring"), get)), function(silly) {dput(x = get(paste0(silly, ".gcl")), file = paste0("Raw genotypes/OriginalCollections/" , silly, ".txt"))} )); beep(8) # dir.create("Objects") dput(x = LocusControl, file = "Objects/LocusControl.txt") invisible(sapply(objects(pattern = "Mixtures"), function(mix) {dput(x = get(mix), file = paste0("Objects/", mix, ".txt"))})) file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GAPSLoci_reordered.txt", to = "Objects") GAPSLoci_reordered <- dget(file = "Objects/GAPSLoci_reordered.txt") dimnames(KTROL16SP.gcl$counts)[[2]] GAPSLoci_reordered #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Pool into a single silly per year PoolCollections.GCL(collections = Spring2010Mixtures, loci = GAPSLoci_reordered, newname = "KTROL10SP") PoolCollections.GCL(collections = Spring2011Mixtures, loci = GAPSLoci_reordered, newname = "KTROL11SP") PoolCollections.GCL(collections = Spring2012Mixtures, loci = GAPSLoci_reordered, newname = "KTROL12SP") PoolCollections.GCL(collections = Spring2013Mixtures, loci = GAPSLoci_reordered, newname = "KTROL13SP") PoolCollections.GCL(collections = Spring2014Mixtures, loci = GAPSLoci_reordered, newname = "KTROL14SP") PoolCollections.GCL(collections = Spring2015Mixtures, loci = GAPSLoci_reordered, newname = "KTROL15SP") PoolCollections.GCL(collections = Spring2016Mixtures, loci = GAPSLoci_reordered, newname = "KTROL16SP") PoolCollections.GCL(collections = Spring2017Mixtures, loci = GAPSLoci_reordered, newname = "KTROL17SP") dimnames(KTROL16SP.gcl$counts)[[2]] sapply(paste0("KTROL", 10:17, "SP"), function(silly) {get(paste0(silly, ".gcl"))$n} ) # KTROL10SP KTROL11SP KTROL12SP KTROL13SP KTROL14SP KTROL15SP KTROL16SP KTROL17SP # 1106 1260 1072 1427 1162 1105 1114 1010 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Change FK_FISH_ID to the back end of SillySource str(KTROL10SP.gcl$attributes$FK_FISH_ID) str(KTROL16SP.gcl$attributes$FK_FISH_ID) KTROL10SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL10SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL11SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL11SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL12SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL12SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL13SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL13SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL14SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL14SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL15SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL15SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL16SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL16SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL17SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL17SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) # dir.create("Raw genotypes/PooledCollections") invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {dput(x = get(paste0(silly, ".gcl")), file = paste0("Raw genotypes/PooledCollections/" , silly, ".txt"))} )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Pair with metadata #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Pair with district data from Anne # NOTE that fish from 113-95 and 113-97 are going to be included in D114 along with 112-65 # require(xlsx) # spring_troll.df <- read.xlsx(file = "2010-2017 Spring troll asl by district_gh_ks.xlsx", sheetName = "original data", startRow = 1) # str(spring_troll.df) spring_troll.df <- read.table(file = "2010-2017 Spring troll asl by district_original_data.txt", sep = "\t", header = TRUE) str(spring_troll.df) table(spring_troll.df$Year, spring_troll.df$District) # all samples spring_troll.df$Sub.District.char <- sapply(as.character(spring_troll.df$Sub.District), function(i) {if(!is.na(i) & nchar(i) == 1) {paste0(0, i)} else {i} } ) spring_troll.df$Stat.Area <- paste0(spring_troll.df$District, spring_troll.df$Sub.District.char) spring_troll.df$Stat.Area[is.na(spring_troll.df$District)] <- NA table(spring_troll.df$Year, spring_troll.df$Stat.Area) # all samples ids <- sapply(paste0("KTROL", 10:17, "SP"), function(silly) {get(paste0(silly, ".gcl"))$attributes$FK_FISH_ID} ) str(ids) # Are we missing metadata for fish we have genotyped? table(unlist(ids) %in% spring_troll.df$Dna.Specimen.No) # FALSE TRUE # 252 9004 # Which years are missing metadata table(sapply(names(unlist(ids)[!unlist(ids) %in% spring_troll.df$Dna.Specimen.No]), function(id) { unlist(strsplit(x = unlist(strsplit(x = id, split = "KTROL"))[2], split = "SP"))[1] } )) # 10 11 12 13 15 16 # 1 2 6 1 1 241 # Paste the missing fish into ASL .csv to see what project they are from writeClipboard(as.character(unlist(ids)[!unlist(ids) %in% spring_troll.df$Dna.Specimen.No])) # All of the 2016 fish are from "District 108 Spring Troll" project # E-mailed Anne to see if it is safe to assume that they were all caught in 108 # Match data up by year and look at district breakdowns for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) match.yr <- match(my.gcl$attributes$FK_FISH_ID, spring_troll.df$Dna.Specimen.No) # table(spring_troll.df$Year[match.yr]) my.gcl$attributes$StatWeek <- spring_troll.df$Stat.Week[match.yr] my.gcl$attributes$Port <- spring_troll.df$Port.Code[match.yr] my.gcl$attributes$Quadrant <- spring_troll.df$Quadrant[match.yr] my.gcl$attributes$District <- spring_troll.df$District[match.yr] my.gcl$attributes$SubDistrict <- spring_troll.df$Sub.District.char[match.yr] my.gcl$attributes$StatArea <- spring_troll.df$Stat.Area[match.yr] my.gcl$attributes$Age <- spring_troll.df$Age.European[match.yr] my.gcl$attributes$LengthType <- spring_troll.df$Length.Type[match.yr] my.gcl$attributes$Length <- spring_troll.df$Length.Millimeters[match.yr] assign(x = paste0("match.20", yr), value = match.yr) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) addmargins(table(my.gcl$attributes$District, useNA = "always")) } ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Data QC/Massage #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ require(xlsx) Spring10_17_Strata <- paste0("KTROL", 10:17, "SP") dput(x = Spring10_17_Strata, file = "Objects/Spring10_17_Strata.txt") Spring10_17_Strata_SampleSizes <- matrix(data = NA, nrow = length(Spring10_17_Strata), ncol = 4, dimnames = list(Spring10_17_Strata, c("Genotyped", "Missing", "Duplicate", "Final"))) #### Check loci ## Get sample size by locus Original_Spring10_17_Strata_SampleSizebyLocus <- SampSizeByLocus.GCL(sillyvec = Spring10_17_Strata, loci = GAPSLoci_reordered) min(Original_Spring10_17_Strata_SampleSizebyLocus) ## 991 apply(Original_Spring10_17_Strata_SampleSizebyLocus, 1, min) / apply(Original_Spring10_17_Strata_SampleSizebyLocus, 1, max) ## Good, 0.928 Original_Spring10_17_Strata_PercentbyLocus <- apply(Original_Spring10_17_Strata_SampleSizebyLocus, 1, function(row) {row / max(row)} ) which(apply(Original_Spring10_17_Strata_PercentbyLocus, 2, min) < 0.8) # no re-runs! require(lattice) new.colors <- colorRampPalette(c("black", "white")) levelplot(t(Original_Spring10_17_Strata_PercentbyLocus), col.regions = new.colors, at = seq(from = 0, to = 1, length.out = 100), main = "% Genotyped", xlab = "SILLY", ylab = "Locus", scales = list(x = list(rot = 90)), aspect = "fill") # aspect = "iso" will make squares #### Check individuals ### Initial ## Get number of individuals per silly before removing missing loci individuals Original_Spring10_17_Strata_ColSize <- sapply(paste0(Spring10_17_Strata, ".gcl"), function(x) get(x)$n) Spring10_17_Strata_SampleSizes[, "Genotyped"] <- Original_Spring10_17_Strata_ColSize ### Missing ## Remove individuals with >20% missing data Spring10_17_Strata_MissLoci <- RemoveIndMissLoci.GCL(sillyvec = Spring10_17_Strata, proportion = 0.8) dput(x = Spring10_17_Strata_MissLoci, file = "Objects/Spring10_17_Strata_MissLoci.txt") ## Get number of individuals per silly after removing missing loci individuals ColSize_Spring10_17_Strata_PostMissLoci <- sapply(paste0(Spring10_17_Strata, ".gcl"), function(x) get(x)$n) Spring10_17_Strata_SampleSizes[, "Missing"] <- Original_Spring10_17_Strata_ColSize - ColSize_Spring10_17_Strata_PostMissLoci ### Duplicate ## Check within collections for duplicate individuals. Spring10_17_Strata_DuplicateCheck95MinProportion <- CheckDupWithinSilly.GCL(sillyvec = Spring10_17_Strata, loci = GAPSLoci_reordered, quantile = NULL, minproportion = 0.95) Spring10_17_Strata_DuplicateCheckReportSummary <- sapply(Spring10_17_Strata, function(x) Spring10_17_Strata_DuplicateCheck95MinProportion[[x]]$report) Spring10_17_Strata_DuplicateCheckReportSummary dput(x = Spring10_17_Strata_DuplicateCheckReportSummary, file = "Objects/Spring10_17_Strata_DuplicateCheckReportSummary.txt") ## Remove duplicate individuals Spring10_17_Strata_RemovedDups <- RemoveDups.GCL(Spring10_17_Strata_DuplicateCheck95MinProportion) ## Get number of individuals per silly after removing duplicate individuals ColSize_Spring10_17_Strata_PostDuplicate <- sapply(paste0(Spring10_17_Strata, ".gcl"), function(x) get(x)$n) Spring10_17_Strata_SampleSizes[, "Duplicate"] <- ColSize_Spring10_17_Strata_PostMissLoci-ColSize_Spring10_17_Strata_PostDuplicate ### Final Spring10_17_Strata_SampleSizes[, "Final"] <- ColSize_Spring10_17_Strata_PostDuplicate Spring10_17_Strata_SampleSizes dput(x = Spring10_17_Strata_SampleSizes, file = "Objects/Spring10_17_Strata_SampleSizes.txt") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Save PostQC .gcl's as back-up: # dir.create("Raw genotypes/PooledCollections_PostQC") invisible(sapply(Spring10_17_Strata, function(silly) { dput(x = get(paste(silly, ".gcl", sep = '')), file = paste0("Raw genotypes/PooledCollections_PostQC/" , silly, ".txt")) } )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections_PostQC/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Tables by District #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Pivots to show number of fish by district by year that we have genotypes for t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) addmargins(table(my.gcl$attributes$District, useNA = "always")) } )) # Anne's data only has quadrant projects table(spring_troll.df$Year, spring_troll.df$Project) # Read in OceanAK ASL data to add District info to D108 and D111 projects spring_troll_oceanAK.df <- read.csv(file = "Harvest - Detailed ASL Samples 2010-2017.csv") str(spring_troll_oceanAK.df) # Which years? table(spring_troll_oceanAK.df$ï..Year, spring_troll_oceanAK.df$Project) # 2012 and 2016 # Fish IDs ids_D108 <- spring_troll_oceanAK.df$Dna.Specimen.No[spring_troll_oceanAK.df$Project == "District 108 Spring Troll"] ids_D111 <- spring_troll_oceanAK.df$Dna.Specimen.No[spring_troll_oceanAK.df$Project == "District 111 Spring Troll"] table(KTROL12SP.gcl$attributes$FK_FISH_ID %in% ids_D108) table(KTROL12SP.gcl$attributes$FK_FISH_ID %in% ids_D111) table(KTROL16SP.gcl$attributes$FK_FISH_ID %in% ids_D108) # Create character vector of district for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$District.chr <- as.character(my.gcl$attributes$District) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } # Add 108 data from 2016 table(KTROL16SP.gcl$attributes$District) table(KTROL16SP.gcl$attributes$District.chr) match.oceanAK.2016 <- match(KTROL16SP.gcl$attributes$FK_FISH_ID[KTROL16SP.gcl$attributes$FK_FISH_ID %in% ids_D108], spring_troll_oceanAK.df$Dna.Specimen.No) KTROL16SP.gcl$attributes$District.chr[KTROL16SP.gcl$attributes$FK_FISH_ID %in% ids_D108] <- as.character(spring_troll_oceanAK.df$District[match.oceanAK.2016]) table(KTROL16SP.gcl$attributes$District.chr) levels(KTROL10SP.gcl$attributes$District) # Create new factor with all districts for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$District.fac <- factor(x = my.gcl$attributes$District.chr, levels = c(" ", as.character(101:115), "183")) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } # Pivot of years by district addmargins(t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) table(my.gcl$attributes$District.fac, useNA = "always") } ))) KTROL17SP.gcl$attributes$FK_FISH_ID[is.na(KTROL17SP.gcl$attributes$District.fac)] # Add column in Anne's sheet to denote which samples have been genotyped ids.genotyped <- sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$FK_FISH_ID }) str(ids.genotyped) table(unlist(ids.genotyped) %in% spring_troll.df$Dna.Specimen.No) # all but District 108 fish table(spring_troll.df$Dna.Specimen.No %in% unlist(ids.genotyped)) # not all fish from District 171-174 were genotyped (and passed data QC) spring_troll.df$Genotyped <- spring_troll.df$Dna.Specimen.No %in% unlist(ids.genotyped) addmargins(table(spring_troll.df$Year, spring_troll.df$District, spring_troll.df$Genotyped)) # options(java.parameters = "-Xmx100g") # write.xlsx(x = spring_troll.df, file = "2010-2017 Spring troll asl by district.xlsx", sheetName = "D114com_genotyped", append = TRUE) write.table(x = spring_troll.df, file = "2010-2017 Spring troll asl by district_original_data_genotyped.txt", row.names = FALSE) # save.image("SpringTroll2010-2017.RData") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Look at how representative our sampling is by SW # Read in ASL spring_troll.df <- read.table(file = "2010-2017 Spring troll asl by district_original_data_genotyped.txt", stringsAsFactors = FALSE, header = TRUE) str(spring_troll.df) Spring171.Vials <- spring_troll.df$Dna.Specimen.No[spring_troll.df$Year == 2013 & spring_troll.df$Quadrant == 171 & spring_troll.df$Genotyped == TRUE] SpringNO.Vials # what was run in 2013 all(SpringNO.Vials %in% Spring171.Vials) # TRUE # Number of samples by District and SW table(spring_troll.df$District[spring_troll.df$Genotyped == TRUE & spring_troll.df$Year == 2013], spring_troll.df$Stat.Week[spring_troll.df$Genotyped == TRUE & spring_troll.df$Year == 2013]) # Read in harvest require(xlsx) spring_harvest.df <- read.xlsx(file = "2010-2017 Spring troll asl by district_gh_ks.xlsx", sheetName = "CE001353", startRow = 23, header = TRUE) # spring_harvest.df <- read.csv(file = "CE001353.csv", skip = 22, row.names = NULL, stringsAsFactors = FALSE) str(spring_harvest.df) require(tidyr) require(dplyr) require(reshape2) # Specific to 2013 spring_harvest.df %>% filter(Year == 2013) %>% cast(Area.Value ~ Time.Value, value = "N.Catch") spring_troll.df %>% filter(Year == 2013) %>% cast(District ~ Stat.Week, value = "Genotyped", fun.aggregate = sum) # What would sample sizes be if we subsampled? D113_harvest_tall <- spring_harvest.df %>% filter(Area.Value == 113) %>% select(Year, Time.Value, N.Catch) D113_samples_tall <- spring_troll.df %>% filter(District == 113) %>% select(Year, Stat.Week, Genotyped) %>% group_by(Year, Stat.Week) %>% summarise(n = sum(Genotyped)) # Left join to combine harvest and n_samples D113_harvest_samples <- full_join(x = D113_samples_tall, y = D113_harvest_tall, by = c("Year" = "Year", "Stat.Week" = "Time.Value")) # What is sample size for max harvest stat week D113_harvest_samples %>% group_by(Year) %>% summarise(max_n = n[which.max(N.Catch)]) # Look at IA to see if % Alaska stocks < over time require(ggplot2) spring_troll.df %>% filter(District == 113 & Year == 2013) %>% ggplot(aes(Sample.Date, Dna.Specimen.No)) + geom_point() IndividualAssignmentSummary.GCL(GroupNames = GroupNames4Pub, groupvec = GroupVec4, mixnames = "D113Troll_2013", BAYESoutputDir = "BAYES/Output", nchains = 5, nreps = 40000, burn = 0.5, thin = 100) # This failed for 2 reasons # 1) since there are more than 300 pops in the baseline, the CLS files wraps rows (i.e. row 1 has 300 columns, row 2 has the remaining 357) # 2) a mixture individual was removed due to missing allele, while it indicates which individual it was in the .SUM file, it will be a pain to figurue it all out. ## Write functions to visualize proportional harvest by SW for specific District harvest_level.f <- function(district) { require(tidyr) require(dplyr) require(reshape2) D_harvest <- spring_harvest.df %>% filter(Area.Value == district) %>% select(Year, Time.Value, N.Catch) %>% spread(Time.Value, N.Catch) # Visualize how unrepresentative samples are require(lattice) new.colors <- colorRampPalette(c("white", "darkgreen")) data.mat <- as.matrix(D_harvest[, -1] / rowSums(D_harvest[, -1], na.rm = TRUE)) rownames(data.mat) <- 2010:2017 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = max(data.mat, na.rm = TRUE), length.out = 100), main = paste0("Total Harvest D", district), xlab = "Year", ylab = "Stat Week", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares } ## Write functions to visualize proportional samples by SW for specific District samples_level.f <- function(district) { require(tidyr) require(dplyr) require(reshape2) D_samples <- spring_troll.df %>% filter(District == district) %>% select(Year, Stat.Week, Genotyped) %>% group_by(Year, Stat.Week) %>% summarise(n = sum(Genotyped)) %>% spread(Stat.Week, n) # Visualize how unrepresentative samples are require(lattice) new.colors <- colorRampPalette(c("white", "darkgreen")) data.mat <- as.matrix(D_samples[, -1] / rowSums(D_samples[, -1], na.rm = TRUE)) rownames(data.mat) <- 2010:2017 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = max(data.mat, na.rm = TRUE), length.out = 100), main = paste0("Total Samples D", district), xlab = "Year", ylab = "Stat Week", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares } harvest_level.f(district = 114) samples_level.f(district = 114) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Create Variable for Mixture #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ table(KTROL15SP.gcl$attributes$District.fac %in% 101:102) table(KTROL15SP.gcl$attributes$District.fac %in% 103) table(KTROL15SP.gcl$attributes$District.fac %in% 106:108 & KTROL15SP.gcl$attributes$StatArea != 10643) table(KTROL15SP.gcl$attributes$District.fac %in% c(109:110, 112) & KTROL15SP.gcl$attributes$StatArea != 11265) table(KTROL15SP.gcl$attributes$District.fac %in% 113) table(KTROL15SP.gcl$attributes$District.fac %in% 114 | KTROL15SP.gcl$attributes$StatArea %in% c(11265, 11395, 11397)) table(KTROL15SP.gcl$attributes$District.fac %in% 183) # Add new factor with mixtures for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$Mixture <- NA my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 101:102] <- "101/102" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 103] <- "103" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 106:108 & my.gcl$attributes$StatArea != 10643] <- "106/107/108" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% c(109:110, 112) & my.gcl$attributes$StatArea != 11265] <- "109/110/112" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 113] <- "113" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 114 | my.gcl$attributes$StatArea %in% c(11265, 11395, 11397)] <- "114" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 183] <- "183" my.gcl$attributes$Mixture <- factor(x = my.gcl$attributes$Mixture, levels = c("101/102", "103", "106/107/108", "109/110/112", "113", "114", "183")) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } # Pivot of years by mixtures with NA addmargins(t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) table(my.gcl$attributes$Mixture, useNA = "always") } ))) # Pivot of years by mixtures without NA addmargins(t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) table(my.gcl$attributes$Mixture) } ))) # 101/102 103 106/107/108 109/110/112 113 114 183 Sum # 10 148 0 10 16 296 242 0 712 # 11 152 0 193 70 459 111 0 985 # 12 128 0 257 132 301 168 0 986 # 13 133 0 105 97 251 117 497 1200 # 14 142 96 126 94 209 69 377 1113 # 15 109 100 156 112 177 24 316 994 # 16 51 102 135 140 104 78 95 705 # 17 50 101 125 90 187 61 0 614 # Sum 913 399 1107 751 1984 870 1285 7309 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Save PostQC_Metadata .gcl's as back-up: # dir.create("Raw genotypes/PooledCollections_PostQC_Metadata") invisible(sapply(Spring10_17_Strata, function(silly) { dput(x = get(paste(silly, ".gcl", sep = '')), file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata/" , silly, ".txt")) } )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Change rownames for scores and counts to FK_FISH_ID #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## All of the .gcl functions that rely on IDs look in the rownames for scores, ## FK_FISH_ID and rownames(scores) + rownames(counts) need to match!!! for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) rownames(my.gcl$scores) <- rownames(my.gcl$counts) <- as.character(my.gcl$attributes$FK_FISH_ID) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Save PostQC_Metadata .gcl's as back-up: # dir.create("Raw genotypes/PooledCollections_PostQC_Metadata_Rename") invisible(sapply(Spring10_17_Strata, function(silly) { dput(x = get(paste(silly, ".gcl", sep = '')), file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata_Rename/" , silly, ".txt")) } )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata_Rename/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Create Mixtures #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ mixtures <- levels(KTROL10SP.gcl$attributes$Mixture) dput(x = mixtures, file = "Objects/mixtures.txt") mixtures.names <- setNames(object = c("D101102Troll", "D103Troll", "D106107108Troll", "D109110112Troll", "D113Troll", "D114Troll", "D183Troll"), nm = mixtures) dput(x = mixtures.names, file = "Objects/mixtures.names.txt") # dir.create("BAYES") # dir.create("BAYES/Mixture") # Loop over years and mixtures for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) for(mix in mixtures) { IDs <- list("my" = na.omit(AttributesToIDs.GCL(silly = "my", attribute = "Mixture", matching = mix))) if(length(IDs[["my"]])) { invisible(CreateMixture.GCL(sillys = "my", loci = GAPSLoci_reordered, IDs = IDs, mixname = paste0(mixtures.names[mix], "_20", yr), dir = "BAYES/Mixture/", type = "BAYES", PT = FALSE)) } # if IDS } # mixture within year } # year # Double check mixture files sum(sapply(list.files(path = "BAYES/Mixture/", full.names = TRUE), function(fle) {nrow(read.table(file = fle, header = FALSE))} )) # Remove mixtures we do not intend to run! setwd("BAYES/Mixture/") unlink(x = c("D106107108Troll_2010.mix")) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("BAYES/Baseline") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/BAYES/Baseline/GAPS357pops13loci.bse", to = "BAYES/Baseline/") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupVec26RG_357.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupNames26.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/SEAKPops357.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GAPS357PopsInits.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/WASSIPSockeyeSeeds.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/mixfortran.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/bayesfortran_357.txt", to = "Objects") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("BAYES/Control") SEAKobjects <- list.files(path = "Objects", recursive = FALSE) invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); rm(SEAKobjects); beep(2) # Flat Pop prior GAPS357PopFlatPrior <- Prior.GCL(groupvec = 1:357, groupweights = rep(1/357, 357), minval = 0.001) dput(x = GAPS357PopFlatPrior, file = "Objects/GAPS357PopFlatPrior.txt") # Dump Control Files GroupVec26RG_357 <- dget(file = "Objects/GroupVec26RG_357.txt") SEAKPops357 <- dget(file = "Objects/SEAKPops357.txt") all.mixtures <- sapply(list.files(path = "BAYES/Mixture"), function(mix) {unlist(strsplit(x = mix, split = ".mix"))[1]}, USE.NAMES = FALSE) dput(x = all.mixtures, file = "Objects/all.mixtures.txt") sapply(all.mixtures, function(Mix) { invisible(CreateControlFile.GCL(sillyvec = SEAKPops357, loci = GAPSLoci_reordered, mixname = Mix, basename = "GAPS357pops13loci", suffix = "", nreps = 40000, nchains = 5, groupvec = GroupVec26RG_357, priorvec = GAPS357PopFlatPrior, initmat = GAPS357PopsInits, dir = "BAYES/Control", seeds = WASSIPSockeyeSeeds, thin = c(1, 1, 100), mixfortran = mixfortran, basefortran = bayesfortran_357, switches = "F T F T T T F")) } ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("BAYES/Output") sapply(all.mixtures, function(Mix) { invisible(dir.create(path = paste0("BAYES/Output/", Mix))) }) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Compare BAYES and genetic_msa #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BAYES_Estimates <- CustomCombineBAYESOutput.GCL(groupvec = 1:26, groupnames = groupnames, maindir = "BAYES/Output/", mixvec = mixname, prior = '', ext = "RGN", nchains = 5, burn = 0.5, alpha = 0.1, PosteriorOutput = FALSE) genetic_msa_Estimates require(gplots) barplot2(height = rbind(BAYES_Estimates$D114Troll_2010[, "mean"], genetic_msa_Estimates$mean), beside = TRUE) # very bad legend("topright", legend = c("BAYES", "genetic_msa"), fill = c("red", "yellow")) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Summarize BAYES #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Spring10_17_26RG_Estimates <- CustomCombineBAYESOutput.GCL(groupvec = 1:26, groupnames = GroupNames26, maindir = "BAYES/Output", mixvec = all.mixtures, prior = '', ext = "RGN", nchains = 5, burn = 0.5, alpha = 0.1, PosteriorOutput = FALSE) # dir.create("Estimates objects") dput(x = Spring10_17_26RG_Estimates, file = "Estimates objects/Spring10_17_26RG_Estimates.txt") sapply(Spring10_17_26RG_Estimates, function(mix) {table(mix[, "GR"] > 1.2)}) file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupNames4.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupNames4Pub.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupVec4.txt", to = "Objects") Spring10_17_4RG_Estimates <- CustomCombineBAYESOutput.GCL(groupvec = GroupVec4, groupnames = GroupNames4Pub, maindir = "BAYES/Output", mixvec = all.mixtures, prior = '', ext = "RGN", nchains = 5, burn = 0.5, alpha = 0.1, PosteriorOutput = FALSE) dput(x = Spring10_17_4RG_Estimates, file = "Estimates objects/Spring10_17_4RG_Estimates.txt") Spring10_17_4RG_Estimates <- dget(file = "Estimates objects/Spring10_17_4RG_Estimates.txt") sapply(Spring10_17_4RG_Estimates, function(mix) {table(mix[, "GR"] > 1.2)}) sapply(Spring10_17_4RG_Estimates, function(mix) {mix[c("Alaska", "TBR"), "mean"]}) sapply(Spring10_17_4RG_Estimates, function(mix) {mix[, "mean"]} ) sapply(Spring10_17_4RG_Estimates, function(mix) {mix[, "sd"] / mix[, "mean"]} ) # CV sapply(Spring10_17_4RG_Estimates, function(mix) {sum(mix[, "sd"] / mix[, "mean"] < 0.20)} ) # how many RGs with CV < 20% sapply(Spring10_17_4RG_Estimates, function(mix) {mix[, "95%"] - mix[, "5%"]} ) # 90% CI range #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Read in Harvest and Sample Size Data #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ require(xlsx) harvest.df <- read.xlsx(file = "2010-2017 Spring troll asl by district_gh_ks.xlsx", sheetName = "CE000522", startRow = 23, header = TRUE) str(harvest.df) harvest.df$Mixture <- NA harvest.df$Mixture[harvest.df$District %in% 101:102] <- "101/102" harvest.df$Mixture[harvest.df$District %in% 103] <- "103" harvest.df$Mixture[harvest.df$District %in% 106:108 & harvest.df$Area.Value != 10643] <- "106/107/108" harvest.df$Mixture[harvest.df$District %in% c(109:110, 112) & harvest.df$Area.Value != 11265] <- "109/110/112" harvest.df$Mixture[harvest.df$District %in% 113] <- "113" harvest.df$Mixture[harvest.df$District %in% 114 | harvest.df$Area.Value %in% c(11265, 11395, 11397)] <- "114" harvest.df$Mixture[harvest.df$District %in% 183] <- "183" harvest.df$Mixture <- factor(x = harvest.df$Mixture, levels = c("101/102", "103", "106/107/108", "109/110/112", "113", "114", "183")) dput(x = harvest.df, file = "Objects/harvest.df.txt") require(reshape) harvest_mix.df <- aggregate(N.Catch ~ Year + Mixture, data = harvest.df, sum) str(harvest_mix.df) cast(harvest_mix.df, Year ~ Mixture, value = "N.Catch") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Sample sizes all.mixtures.samplesize <- sapply(all.mixtures, function(mix) {dim(read.table(file = paste0("BAYES/Mixture/", mix, ".mix")))[1]} ) dput(x = all.mixtures.samplesize, file = "Objects/all.mixtures.samplesize.txt") mixtures.names mixtures.names2 <- names(mixtures.names) names(mixtures.names2) <- mixtures.names mixtures.df <- as.data.frame(t(sapply(all.mixtures, function(mix) {unlist(strsplit(x = mix, split = "_"))} )), stringsAsFactors = FALSE) names(mixtures.df) <- c("Mixname", "Year") mixtures.df$Year <- as.numeric(mixtures.df$Year) mixtures.df$Full.Mixname <- all.mixtures mixtures.df$Mixture <- factor(x = mixtures.names2[mixtures.df$Mixname], levels = levels(harvest_mix.df$Mixture)) mixtures.df$n <- all.mixtures.samplesize dput(x = mixtures.df, file = "Objects/mixtures.df.txt") str(mixtures.df) all.mixtures.n100 <- names(which(all.mixtures.samplesize >= 100)) dput(x = all.mixtures.n100, file = "Objects/all.mixtures.n100.txt") # Subset data for n >= 100 Spring10_17_4RG_Estimates_n100 <- Spring10_17_4RG_Estimates[all.mixtures.n100] dput(x = Spring10_17_4RG_Estimates_n100, file = "Estimates objects/Spring10_17_4RG_Estimates_n100.txt") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Merge harvest and sample size spring.estimates.df <- merge(x = harvest_mix.df, y = mixtures.df, by = c("Mixture", "Year"), all = FALSE) spring.estimates.df$Alaska.mean.p <- sapply(Spring10_17_4RG_Estimates, function(mix) {mix["Alaska", "mean"]}) spring.estimates.df$TBR.mean.p <- sapply(Spring10_17_4RG_Estimates, function(mix) {mix["TBR", "mean"]}) spring.estimates.df$Alaska.mean.C <- spring.estimates.df$Alaska.mean.p * spring.estimates.df$N.Catch spring.estimates.df$TBR.mean.C <- spring.estimates.df$TBR.mean.p * spring.estimates.df$N.Catch round(cast(spring.estimates.df, Year ~ Mixture, value = "Alaska.mean.C")) round(cast(spring.estimates.df, Year ~ Mixture, value = "TBR.mean.C")) round(cast(spring.estimates.df, Year ~ Mixture, value = "n")) dput(x = spring.estimates.df, file = "Objects/spring.estimates.df.txt") harvest <- setNames(object = spring.estimates.df$N.Catch, nm = spring.estimates.df$Full.Mixname) dput(x = harvest, file = "Objects/harvest.txt") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Subset to only mixtures with >= 100 fish spring.estimates.n100.df <- spring.estimates.df spring.estimates.n100.df[spring.estimates.n100.df$n < 100, c("Alaska.mean.p", "Alaska.mean.C", "TBR.mean.p", "TBR.mean.C")] <- NA # Heatmap of total Catch require(lattice) new.colors <- colorRampPalette(c("white", "darkgreen")) data.mat <- as.matrix(round(cast(spring.estimates.n100.df, Year ~ Mixture, value = "N.Catch"))) # data.mat[is.na(data.mat)] <- 0 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = max(data.mat, na.rm = TRUE), length.out = 100), main = "Total Catch", xlab = "Year", ylab = "District Area", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares # Heatmap of mean Alaska require(lattice) new.colors <- colorRampPalette(c("white", "darkblue")) data.mat <- as.matrix(cast(spring.estimates.n100.df, Year ~ Mixture, value = "Alaska.mean.p")) * 100 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = 100, length.out = 100), main = "Mean Alaska %", xlab = "Year", ylab = "District Area", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares # Heatmap of mean TBR % require(lattice) new.colors <- colorRampPalette(c("white", "darkblue")) data.mat <- as.matrix(cast(spring.estimates.n100.df, Year ~ Mixture, value = "TBR.mean.p")) * 100 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = 100, length.out = 100), main = "Mean TBR %", xlab = "Year", ylab = "District Area", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Create 4RG Summary Tables #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("Estimates tables") require(xlsx) EstimatesStats <- Spring10_17_4RG_Estimates_n100 SampSizes <- all.mixtures.samplesize HarvestVec <- harvest PubNames <- setNames(object = paste("Spring Troll", spring.estimates.df$Year, "District(s)", spring.estimates.df$Mixture), nm = spring.estimates.df$Full.Mixname) for(mix in all.mixtures.n100) { TableX <- matrix(data = "", nrow = 7, ncol = 7) TableX[1, 1] <- paste0(PubNames[mix], " (n=", SampSizes[mix], ", catch=", formatC(x = HarvestVec[mix], digits = 0, big.mark = ",", format = "f"), ")") TableX[2, 6] <- "90% CI" TableX[3, 2:7] <- c("Reporting Group", "Mean", "SD", "Median", "5%", "95%") TableX[4:7, 1] <- 1:4 TableX[4:7, 2] <- rownames(EstimatesStats[[mix]]) TableX[4:7, 3:7] <- formatC(x = EstimatesStats[[mix]][, c("mean", "sd", "median", "5%", "95%")], digits = 3, format = "f") write.xlsx(x = TableX, file = "Estimates tables/SpringTroll2017_4RG_Estimates.xlsx", col.names = FALSE, row.names = FALSE, sheetName = paste(mix, " 4RG"), append = TRUE) } # save.image("SpringTroll2010-2017.RData")
/SpringTroll2010-2017.R
no_license
krshedd/Chinook-Spring-Troll-Retro-2010-2017
R
false
false
44,385
r
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### SEAK Chinook Spring Troll 2010-2017 #### # Kyle Shedd Tue Nov 07 14:08:51 2017 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ date() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Introduction #### # The goal of this script is to revisit Chinook salmon mixtures from the SEAK # commercial spring troll harvests from 2010-2017 looking at D14 using the GAPS3.0 # baseline containing 357 populations in 26 reporting groups characterized by # 13 uSATs. All mixtures are to be analyzed with the program BAYES. #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Specific Objectives #### # This script will: # 1) Import mixture data # 2) Add attribute data # 3) Define spatio-temporal strata # 4) Perform a data QC on mixtures # 5) Prepare BAYES input files # 6) Summarize BAYES results # 7) Generate plots and tables of results #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Initial Setup #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017") source("H:/R Source Scripts/Functions.GCL_KS.R") source("C:/Users/krshedd/Documents/R/Functions.GCL.R") username <- "krshedd" password <- "********" #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Pull all data for each silly code and create .gcl objects for each Spring2010Mixtures <- c("KSPRING10H", "KSPRING10J", "KSPRING10K", "KSPRING10P", "KSPRING10S", "KSPRING10W") Spring2011Mixtures <- c("KSPRING11J", "KSPRING11K", "KSPRING11P", "KSPRING11S", "KSPRING11W") Spring2012Mixtures <- c("KSPRING12J", "KSPRING12K", "KSPRING12P", "KSPRING12S", "KSPRING12W") # Stikine and Taku directed fishery samples never extracted "KTROL12SR" "KTROL12TR" Spring2013Mixtures <- c("KSPRING13J", "KSPRING13K", "KSPRING13P", "KSPRING13S", "KSPRING13W", "KSPRING13Y") Spring2014Mixtures <- c("KSPRING14C", "KSPRING14J", "KSPRING14K", "KSPRING14P", "KSPRING14S", "KSPRING14W", "KSPRING14Y") Spring2015Mixtures <- c("KSPRING15C", "KSPRING15J", "KSPRING15K", "KSPRING15P", "KSPRING15S", "KSPRING15W", "KSPRING15Y") Spring2016Mixtures <- c("KTROL16SP") # "KTROL16D8" not used, no extractions Spring2017Mixtures <- c("KTROL17SP") ## Pull genotypes LOKI2R_GAPS.GCL(sillyvec = unlist(sapply(objects(pattern = "Spring"), get)), username = username, password = password) ## Save unaltered .gcls # dir.create("Raw genotypes") # dir.create("Raw genotypes/OriginalCollections") invisible(sapply(unlist(sapply(objects(pattern = "Spring"), get)), function(silly) {dput(x = get(paste0(silly, ".gcl")), file = paste0("Raw genotypes/OriginalCollections/" , silly, ".txt"))} )); beep(8) # dir.create("Objects") dput(x = LocusControl, file = "Objects/LocusControl.txt") invisible(sapply(objects(pattern = "Mixtures"), function(mix) {dput(x = get(mix), file = paste0("Objects/", mix, ".txt"))})) file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GAPSLoci_reordered.txt", to = "Objects") GAPSLoci_reordered <- dget(file = "Objects/GAPSLoci_reordered.txt") dimnames(KTROL16SP.gcl$counts)[[2]] GAPSLoci_reordered #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Pool into a single silly per year PoolCollections.GCL(collections = Spring2010Mixtures, loci = GAPSLoci_reordered, newname = "KTROL10SP") PoolCollections.GCL(collections = Spring2011Mixtures, loci = GAPSLoci_reordered, newname = "KTROL11SP") PoolCollections.GCL(collections = Spring2012Mixtures, loci = GAPSLoci_reordered, newname = "KTROL12SP") PoolCollections.GCL(collections = Spring2013Mixtures, loci = GAPSLoci_reordered, newname = "KTROL13SP") PoolCollections.GCL(collections = Spring2014Mixtures, loci = GAPSLoci_reordered, newname = "KTROL14SP") PoolCollections.GCL(collections = Spring2015Mixtures, loci = GAPSLoci_reordered, newname = "KTROL15SP") PoolCollections.GCL(collections = Spring2016Mixtures, loci = GAPSLoci_reordered, newname = "KTROL16SP") PoolCollections.GCL(collections = Spring2017Mixtures, loci = GAPSLoci_reordered, newname = "KTROL17SP") dimnames(KTROL16SP.gcl$counts)[[2]] sapply(paste0("KTROL", 10:17, "SP"), function(silly) {get(paste0(silly, ".gcl"))$n} ) # KTROL10SP KTROL11SP KTROL12SP KTROL13SP KTROL14SP KTROL15SP KTROL16SP KTROL17SP # 1106 1260 1072 1427 1162 1105 1114 1010 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Change FK_FISH_ID to the back end of SillySource str(KTROL10SP.gcl$attributes$FK_FISH_ID) str(KTROL16SP.gcl$attributes$FK_FISH_ID) KTROL10SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL10SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL11SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL11SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL12SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL12SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL13SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL13SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL14SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL14SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL15SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL15SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL16SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL16SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) KTROL17SP.gcl$attributes$FK_FISH_ID <- sapply(as.character(KTROL17SP.gcl$attributes$SillySource), function(ind) {as.numeric(unlist(strsplit(x = ind, split = "_"))[2])} ) # dir.create("Raw genotypes/PooledCollections") invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {dput(x = get(paste0(silly, ".gcl")), file = paste0("Raw genotypes/PooledCollections/" , silly, ".txt"))} )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Pair with metadata #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Pair with district data from Anne # NOTE that fish from 113-95 and 113-97 are going to be included in D114 along with 112-65 # require(xlsx) # spring_troll.df <- read.xlsx(file = "2010-2017 Spring troll asl by district_gh_ks.xlsx", sheetName = "original data", startRow = 1) # str(spring_troll.df) spring_troll.df <- read.table(file = "2010-2017 Spring troll asl by district_original_data.txt", sep = "\t", header = TRUE) str(spring_troll.df) table(spring_troll.df$Year, spring_troll.df$District) # all samples spring_troll.df$Sub.District.char <- sapply(as.character(spring_troll.df$Sub.District), function(i) {if(!is.na(i) & nchar(i) == 1) {paste0(0, i)} else {i} } ) spring_troll.df$Stat.Area <- paste0(spring_troll.df$District, spring_troll.df$Sub.District.char) spring_troll.df$Stat.Area[is.na(spring_troll.df$District)] <- NA table(spring_troll.df$Year, spring_troll.df$Stat.Area) # all samples ids <- sapply(paste0("KTROL", 10:17, "SP"), function(silly) {get(paste0(silly, ".gcl"))$attributes$FK_FISH_ID} ) str(ids) # Are we missing metadata for fish we have genotyped? table(unlist(ids) %in% spring_troll.df$Dna.Specimen.No) # FALSE TRUE # 252 9004 # Which years are missing metadata table(sapply(names(unlist(ids)[!unlist(ids) %in% spring_troll.df$Dna.Specimen.No]), function(id) { unlist(strsplit(x = unlist(strsplit(x = id, split = "KTROL"))[2], split = "SP"))[1] } )) # 10 11 12 13 15 16 # 1 2 6 1 1 241 # Paste the missing fish into ASL .csv to see what project they are from writeClipboard(as.character(unlist(ids)[!unlist(ids) %in% spring_troll.df$Dna.Specimen.No])) # All of the 2016 fish are from "District 108 Spring Troll" project # E-mailed Anne to see if it is safe to assume that they were all caught in 108 # Match data up by year and look at district breakdowns for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) match.yr <- match(my.gcl$attributes$FK_FISH_ID, spring_troll.df$Dna.Specimen.No) # table(spring_troll.df$Year[match.yr]) my.gcl$attributes$StatWeek <- spring_troll.df$Stat.Week[match.yr] my.gcl$attributes$Port <- spring_troll.df$Port.Code[match.yr] my.gcl$attributes$Quadrant <- spring_troll.df$Quadrant[match.yr] my.gcl$attributes$District <- spring_troll.df$District[match.yr] my.gcl$attributes$SubDistrict <- spring_troll.df$Sub.District.char[match.yr] my.gcl$attributes$StatArea <- spring_troll.df$Stat.Area[match.yr] my.gcl$attributes$Age <- spring_troll.df$Age.European[match.yr] my.gcl$attributes$LengthType <- spring_troll.df$Length.Type[match.yr] my.gcl$attributes$Length <- spring_troll.df$Length.Millimeters[match.yr] assign(x = paste0("match.20", yr), value = match.yr) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) addmargins(table(my.gcl$attributes$District, useNA = "always")) } ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Data QC/Massage #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ require(xlsx) Spring10_17_Strata <- paste0("KTROL", 10:17, "SP") dput(x = Spring10_17_Strata, file = "Objects/Spring10_17_Strata.txt") Spring10_17_Strata_SampleSizes <- matrix(data = NA, nrow = length(Spring10_17_Strata), ncol = 4, dimnames = list(Spring10_17_Strata, c("Genotyped", "Missing", "Duplicate", "Final"))) #### Check loci ## Get sample size by locus Original_Spring10_17_Strata_SampleSizebyLocus <- SampSizeByLocus.GCL(sillyvec = Spring10_17_Strata, loci = GAPSLoci_reordered) min(Original_Spring10_17_Strata_SampleSizebyLocus) ## 991 apply(Original_Spring10_17_Strata_SampleSizebyLocus, 1, min) / apply(Original_Spring10_17_Strata_SampleSizebyLocus, 1, max) ## Good, 0.928 Original_Spring10_17_Strata_PercentbyLocus <- apply(Original_Spring10_17_Strata_SampleSizebyLocus, 1, function(row) {row / max(row)} ) which(apply(Original_Spring10_17_Strata_PercentbyLocus, 2, min) < 0.8) # no re-runs! require(lattice) new.colors <- colorRampPalette(c("black", "white")) levelplot(t(Original_Spring10_17_Strata_PercentbyLocus), col.regions = new.colors, at = seq(from = 0, to = 1, length.out = 100), main = "% Genotyped", xlab = "SILLY", ylab = "Locus", scales = list(x = list(rot = 90)), aspect = "fill") # aspect = "iso" will make squares #### Check individuals ### Initial ## Get number of individuals per silly before removing missing loci individuals Original_Spring10_17_Strata_ColSize <- sapply(paste0(Spring10_17_Strata, ".gcl"), function(x) get(x)$n) Spring10_17_Strata_SampleSizes[, "Genotyped"] <- Original_Spring10_17_Strata_ColSize ### Missing ## Remove individuals with >20% missing data Spring10_17_Strata_MissLoci <- RemoveIndMissLoci.GCL(sillyvec = Spring10_17_Strata, proportion = 0.8) dput(x = Spring10_17_Strata_MissLoci, file = "Objects/Spring10_17_Strata_MissLoci.txt") ## Get number of individuals per silly after removing missing loci individuals ColSize_Spring10_17_Strata_PostMissLoci <- sapply(paste0(Spring10_17_Strata, ".gcl"), function(x) get(x)$n) Spring10_17_Strata_SampleSizes[, "Missing"] <- Original_Spring10_17_Strata_ColSize - ColSize_Spring10_17_Strata_PostMissLoci ### Duplicate ## Check within collections for duplicate individuals. Spring10_17_Strata_DuplicateCheck95MinProportion <- CheckDupWithinSilly.GCL(sillyvec = Spring10_17_Strata, loci = GAPSLoci_reordered, quantile = NULL, minproportion = 0.95) Spring10_17_Strata_DuplicateCheckReportSummary <- sapply(Spring10_17_Strata, function(x) Spring10_17_Strata_DuplicateCheck95MinProportion[[x]]$report) Spring10_17_Strata_DuplicateCheckReportSummary dput(x = Spring10_17_Strata_DuplicateCheckReportSummary, file = "Objects/Spring10_17_Strata_DuplicateCheckReportSummary.txt") ## Remove duplicate individuals Spring10_17_Strata_RemovedDups <- RemoveDups.GCL(Spring10_17_Strata_DuplicateCheck95MinProportion) ## Get number of individuals per silly after removing duplicate individuals ColSize_Spring10_17_Strata_PostDuplicate <- sapply(paste0(Spring10_17_Strata, ".gcl"), function(x) get(x)$n) Spring10_17_Strata_SampleSizes[, "Duplicate"] <- ColSize_Spring10_17_Strata_PostMissLoci-ColSize_Spring10_17_Strata_PostDuplicate ### Final Spring10_17_Strata_SampleSizes[, "Final"] <- ColSize_Spring10_17_Strata_PostDuplicate Spring10_17_Strata_SampleSizes dput(x = Spring10_17_Strata_SampleSizes, file = "Objects/Spring10_17_Strata_SampleSizes.txt") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Save PostQC .gcl's as back-up: # dir.create("Raw genotypes/PooledCollections_PostQC") invisible(sapply(Spring10_17_Strata, function(silly) { dput(x = get(paste(silly, ".gcl", sep = '')), file = paste0("Raw genotypes/PooledCollections_PostQC/" , silly, ".txt")) } )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections_PostQC/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Tables by District #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Pivots to show number of fish by district by year that we have genotypes for t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) addmargins(table(my.gcl$attributes$District, useNA = "always")) } )) # Anne's data only has quadrant projects table(spring_troll.df$Year, spring_troll.df$Project) # Read in OceanAK ASL data to add District info to D108 and D111 projects spring_troll_oceanAK.df <- read.csv(file = "Harvest - Detailed ASL Samples 2010-2017.csv") str(spring_troll_oceanAK.df) # Which years? table(spring_troll_oceanAK.df$ï..Year, spring_troll_oceanAK.df$Project) # 2012 and 2016 # Fish IDs ids_D108 <- spring_troll_oceanAK.df$Dna.Specimen.No[spring_troll_oceanAK.df$Project == "District 108 Spring Troll"] ids_D111 <- spring_troll_oceanAK.df$Dna.Specimen.No[spring_troll_oceanAK.df$Project == "District 111 Spring Troll"] table(KTROL12SP.gcl$attributes$FK_FISH_ID %in% ids_D108) table(KTROL12SP.gcl$attributes$FK_FISH_ID %in% ids_D111) table(KTROL16SP.gcl$attributes$FK_FISH_ID %in% ids_D108) # Create character vector of district for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$District.chr <- as.character(my.gcl$attributes$District) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } # Add 108 data from 2016 table(KTROL16SP.gcl$attributes$District) table(KTROL16SP.gcl$attributes$District.chr) match.oceanAK.2016 <- match(KTROL16SP.gcl$attributes$FK_FISH_ID[KTROL16SP.gcl$attributes$FK_FISH_ID %in% ids_D108], spring_troll_oceanAK.df$Dna.Specimen.No) KTROL16SP.gcl$attributes$District.chr[KTROL16SP.gcl$attributes$FK_FISH_ID %in% ids_D108] <- as.character(spring_troll_oceanAK.df$District[match.oceanAK.2016]) table(KTROL16SP.gcl$attributes$District.chr) levels(KTROL10SP.gcl$attributes$District) # Create new factor with all districts for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$District.fac <- factor(x = my.gcl$attributes$District.chr, levels = c(" ", as.character(101:115), "183")) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } # Pivot of years by district addmargins(t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) table(my.gcl$attributes$District.fac, useNA = "always") } ))) KTROL17SP.gcl$attributes$FK_FISH_ID[is.na(KTROL17SP.gcl$attributes$District.fac)] # Add column in Anne's sheet to denote which samples have been genotyped ids.genotyped <- sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$FK_FISH_ID }) str(ids.genotyped) table(unlist(ids.genotyped) %in% spring_troll.df$Dna.Specimen.No) # all but District 108 fish table(spring_troll.df$Dna.Specimen.No %in% unlist(ids.genotyped)) # not all fish from District 171-174 were genotyped (and passed data QC) spring_troll.df$Genotyped <- spring_troll.df$Dna.Specimen.No %in% unlist(ids.genotyped) addmargins(table(spring_troll.df$Year, spring_troll.df$District, spring_troll.df$Genotyped)) # options(java.parameters = "-Xmx100g") # write.xlsx(x = spring_troll.df, file = "2010-2017 Spring troll asl by district.xlsx", sheetName = "D114com_genotyped", append = TRUE) write.table(x = spring_troll.df, file = "2010-2017 Spring troll asl by district_original_data_genotyped.txt", row.names = FALSE) # save.image("SpringTroll2010-2017.RData") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Look at how representative our sampling is by SW # Read in ASL spring_troll.df <- read.table(file = "2010-2017 Spring troll asl by district_original_data_genotyped.txt", stringsAsFactors = FALSE, header = TRUE) str(spring_troll.df) Spring171.Vials <- spring_troll.df$Dna.Specimen.No[spring_troll.df$Year == 2013 & spring_troll.df$Quadrant == 171 & spring_troll.df$Genotyped == TRUE] SpringNO.Vials # what was run in 2013 all(SpringNO.Vials %in% Spring171.Vials) # TRUE # Number of samples by District and SW table(spring_troll.df$District[spring_troll.df$Genotyped == TRUE & spring_troll.df$Year == 2013], spring_troll.df$Stat.Week[spring_troll.df$Genotyped == TRUE & spring_troll.df$Year == 2013]) # Read in harvest require(xlsx) spring_harvest.df <- read.xlsx(file = "2010-2017 Spring troll asl by district_gh_ks.xlsx", sheetName = "CE001353", startRow = 23, header = TRUE) # spring_harvest.df <- read.csv(file = "CE001353.csv", skip = 22, row.names = NULL, stringsAsFactors = FALSE) str(spring_harvest.df) require(tidyr) require(dplyr) require(reshape2) # Specific to 2013 spring_harvest.df %>% filter(Year == 2013) %>% cast(Area.Value ~ Time.Value, value = "N.Catch") spring_troll.df %>% filter(Year == 2013) %>% cast(District ~ Stat.Week, value = "Genotyped", fun.aggregate = sum) # What would sample sizes be if we subsampled? D113_harvest_tall <- spring_harvest.df %>% filter(Area.Value == 113) %>% select(Year, Time.Value, N.Catch) D113_samples_tall <- spring_troll.df %>% filter(District == 113) %>% select(Year, Stat.Week, Genotyped) %>% group_by(Year, Stat.Week) %>% summarise(n = sum(Genotyped)) # Left join to combine harvest and n_samples D113_harvest_samples <- full_join(x = D113_samples_tall, y = D113_harvest_tall, by = c("Year" = "Year", "Stat.Week" = "Time.Value")) # What is sample size for max harvest stat week D113_harvest_samples %>% group_by(Year) %>% summarise(max_n = n[which.max(N.Catch)]) # Look at IA to see if % Alaska stocks < over time require(ggplot2) spring_troll.df %>% filter(District == 113 & Year == 2013) %>% ggplot(aes(Sample.Date, Dna.Specimen.No)) + geom_point() IndividualAssignmentSummary.GCL(GroupNames = GroupNames4Pub, groupvec = GroupVec4, mixnames = "D113Troll_2013", BAYESoutputDir = "BAYES/Output", nchains = 5, nreps = 40000, burn = 0.5, thin = 100) # This failed for 2 reasons # 1) since there are more than 300 pops in the baseline, the CLS files wraps rows (i.e. row 1 has 300 columns, row 2 has the remaining 357) # 2) a mixture individual was removed due to missing allele, while it indicates which individual it was in the .SUM file, it will be a pain to figurue it all out. ## Write functions to visualize proportional harvest by SW for specific District harvest_level.f <- function(district) { require(tidyr) require(dplyr) require(reshape2) D_harvest <- spring_harvest.df %>% filter(Area.Value == district) %>% select(Year, Time.Value, N.Catch) %>% spread(Time.Value, N.Catch) # Visualize how unrepresentative samples are require(lattice) new.colors <- colorRampPalette(c("white", "darkgreen")) data.mat <- as.matrix(D_harvest[, -1] / rowSums(D_harvest[, -1], na.rm = TRUE)) rownames(data.mat) <- 2010:2017 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = max(data.mat, na.rm = TRUE), length.out = 100), main = paste0("Total Harvest D", district), xlab = "Year", ylab = "Stat Week", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares } ## Write functions to visualize proportional samples by SW for specific District samples_level.f <- function(district) { require(tidyr) require(dplyr) require(reshape2) D_samples <- spring_troll.df %>% filter(District == district) %>% select(Year, Stat.Week, Genotyped) %>% group_by(Year, Stat.Week) %>% summarise(n = sum(Genotyped)) %>% spread(Stat.Week, n) # Visualize how unrepresentative samples are require(lattice) new.colors <- colorRampPalette(c("white", "darkgreen")) data.mat <- as.matrix(D_samples[, -1] / rowSums(D_samples[, -1], na.rm = TRUE)) rownames(data.mat) <- 2010:2017 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = max(data.mat, na.rm = TRUE), length.out = 100), main = paste0("Total Samples D", district), xlab = "Year", ylab = "Stat Week", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares } harvest_level.f(district = 114) samples_level.f(district = 114) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Create Variable for Mixture #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ table(KTROL15SP.gcl$attributes$District.fac %in% 101:102) table(KTROL15SP.gcl$attributes$District.fac %in% 103) table(KTROL15SP.gcl$attributes$District.fac %in% 106:108 & KTROL15SP.gcl$attributes$StatArea != 10643) table(KTROL15SP.gcl$attributes$District.fac %in% c(109:110, 112) & KTROL15SP.gcl$attributes$StatArea != 11265) table(KTROL15SP.gcl$attributes$District.fac %in% 113) table(KTROL15SP.gcl$attributes$District.fac %in% 114 | KTROL15SP.gcl$attributes$StatArea %in% c(11265, 11395, 11397)) table(KTROL15SP.gcl$attributes$District.fac %in% 183) # Add new factor with mixtures for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) my.gcl$attributes$Mixture <- NA my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 101:102] <- "101/102" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 103] <- "103" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 106:108 & my.gcl$attributes$StatArea != 10643] <- "106/107/108" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% c(109:110, 112) & my.gcl$attributes$StatArea != 11265] <- "109/110/112" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 113] <- "113" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 114 | my.gcl$attributes$StatArea %in% c(11265, 11395, 11397)] <- "114" my.gcl$attributes$Mixture[my.gcl$attributes$District.fac %in% 183] <- "183" my.gcl$attributes$Mixture <- factor(x = my.gcl$attributes$Mixture, levels = c("101/102", "103", "106/107/108", "109/110/112", "113", "114", "183")) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } # Pivot of years by mixtures with NA addmargins(t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) table(my.gcl$attributes$Mixture, useNA = "always") } ))) # Pivot of years by mixtures without NA addmargins(t(sapply(as.character(10:17), function(yr) { my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) table(my.gcl$attributes$Mixture) } ))) # 101/102 103 106/107/108 109/110/112 113 114 183 Sum # 10 148 0 10 16 296 242 0 712 # 11 152 0 193 70 459 111 0 985 # 12 128 0 257 132 301 168 0 986 # 13 133 0 105 97 251 117 497 1200 # 14 142 96 126 94 209 69 377 1113 # 15 109 100 156 112 177 24 316 994 # 16 51 102 135 140 104 78 95 705 # 17 50 101 125 90 187 61 0 614 # Sum 913 399 1107 751 1984 870 1285 7309 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Save PostQC_Metadata .gcl's as back-up: # dir.create("Raw genotypes/PooledCollections_PostQC_Metadata") invisible(sapply(Spring10_17_Strata, function(silly) { dput(x = get(paste(silly, ".gcl", sep = '')), file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata/" , silly, ".txt")) } )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Change rownames for scores and counts to FK_FISH_ID #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## All of the .gcl functions that rely on IDs look in the rownames for scores, ## FK_FISH_ID and rownames(scores) + rownames(counts) need to match!!! for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) rownames(my.gcl$scores) <- rownames(my.gcl$counts) <- as.character(my.gcl$attributes$FK_FISH_ID) assign(x = paste0("KTROL", yr, "SP.gcl"), value = my.gcl) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Save PostQC_Metadata .gcl's as back-up: # dir.create("Raw genotypes/PooledCollections_PostQC_Metadata_Rename") invisible(sapply(Spring10_17_Strata, function(silly) { dput(x = get(paste(silly, ".gcl", sep = '')), file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata_Rename/" , silly, ".txt")) } )); beep(8) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Clean workspace; dget .gcl objects and Locus Control #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rm(list = ls(all = TRUE)) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017/") # This sources all of the new GCL functions to this workspace source("C:/Users/krshedd/Documents/R/Functions.GCL.R") source("H:/R Source Scripts/Functions.GCL_KS.R") ## Get objects SEAKobjects <- list.files(path = "Objects", recursive = FALSE) # SEAKobjects <- SEAKobjects[-which(SEAKobjects == "Vials" | SEAKobjects == "OLD_BAD_LOCUSCONTROL")] SEAKobjects invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); beep(2) ## Get un-altered mixtures invisible(sapply(paste0("KTROL", 10:17, "SP"), function(silly) {assign(x = paste0(silly, ".gcl"), value = dget(file = paste0("Raw genotypes/PooledCollections_PostQC_Metadata_Rename/", silly, ".txt")), pos = 1)} )); beep(2) objects(pattern = "\\.gcl") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Create Mixtures #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ mixtures <- levels(KTROL10SP.gcl$attributes$Mixture) dput(x = mixtures, file = "Objects/mixtures.txt") mixtures.names <- setNames(object = c("D101102Troll", "D103Troll", "D106107108Troll", "D109110112Troll", "D113Troll", "D114Troll", "D183Troll"), nm = mixtures) dput(x = mixtures.names, file = "Objects/mixtures.names.txt") # dir.create("BAYES") # dir.create("BAYES/Mixture") # Loop over years and mixtures for(yr in 10:17){ my.gcl <- get(paste0("KTROL", yr, "SP.gcl")) for(mix in mixtures) { IDs <- list("my" = na.omit(AttributesToIDs.GCL(silly = "my", attribute = "Mixture", matching = mix))) if(length(IDs[["my"]])) { invisible(CreateMixture.GCL(sillys = "my", loci = GAPSLoci_reordered, IDs = IDs, mixname = paste0(mixtures.names[mix], "_20", yr), dir = "BAYES/Mixture/", type = "BAYES", PT = FALSE)) } # if IDS } # mixture within year } # year # Double check mixture files sum(sapply(list.files(path = "BAYES/Mixture/", full.names = TRUE), function(fle) {nrow(read.table(file = fle, header = FALSE))} )) # Remove mixtures we do not intend to run! setwd("BAYES/Mixture/") unlink(x = c("D106107108Troll_2010.mix")) setwd("V:/Analysis/1_SEAK/Chinook/Mixture/Spring Troll 2010-2017") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("BAYES/Baseline") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/BAYES/Baseline/GAPS357pops13loci.bse", to = "BAYES/Baseline/") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupVec26RG_357.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupNames26.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/SEAKPops357.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GAPS357PopsInits.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/WASSIPSockeyeSeeds.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/mixfortran.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/bayesfortran_357.txt", to = "Objects") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("BAYES/Control") SEAKobjects <- list.files(path = "Objects", recursive = FALSE) invisible(sapply(SEAKobjects, function(objct) {assign(x = unlist(strsplit(x = objct, split = ".txt")), value = dget(file = paste(getwd(), "Objects", objct, sep = "/")), pos = 1) })); rm(SEAKobjects); beep(2) # Flat Pop prior GAPS357PopFlatPrior <- Prior.GCL(groupvec = 1:357, groupweights = rep(1/357, 357), minval = 0.001) dput(x = GAPS357PopFlatPrior, file = "Objects/GAPS357PopFlatPrior.txt") # Dump Control Files GroupVec26RG_357 <- dget(file = "Objects/GroupVec26RG_357.txt") SEAKPops357 <- dget(file = "Objects/SEAKPops357.txt") all.mixtures <- sapply(list.files(path = "BAYES/Mixture"), function(mix) {unlist(strsplit(x = mix, split = ".mix"))[1]}, USE.NAMES = FALSE) dput(x = all.mixtures, file = "Objects/all.mixtures.txt") sapply(all.mixtures, function(Mix) { invisible(CreateControlFile.GCL(sillyvec = SEAKPops357, loci = GAPSLoci_reordered, mixname = Mix, basename = "GAPS357pops13loci", suffix = "", nreps = 40000, nchains = 5, groupvec = GroupVec26RG_357, priorvec = GAPS357PopFlatPrior, initmat = GAPS357PopsInits, dir = "BAYES/Control", seeds = WASSIPSockeyeSeeds, thin = c(1, 1, 100), mixfortran = mixfortran, basefortran = bayesfortran_357, switches = "F T F T T T F")) } ) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("BAYES/Output") sapply(all.mixtures, function(Mix) { invisible(dir.create(path = paste0("BAYES/Output/", Mix))) }) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Compare BAYES and genetic_msa #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BAYES_Estimates <- CustomCombineBAYESOutput.GCL(groupvec = 1:26, groupnames = groupnames, maindir = "BAYES/Output/", mixvec = mixname, prior = '', ext = "RGN", nchains = 5, burn = 0.5, alpha = 0.1, PosteriorOutput = FALSE) genetic_msa_Estimates require(gplots) barplot2(height = rbind(BAYES_Estimates$D114Troll_2010[, "mean"], genetic_msa_Estimates$mean), beside = TRUE) # very bad legend("topright", legend = c("BAYES", "genetic_msa"), fill = c("red", "yellow")) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Summarize BAYES #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Spring10_17_26RG_Estimates <- CustomCombineBAYESOutput.GCL(groupvec = 1:26, groupnames = GroupNames26, maindir = "BAYES/Output", mixvec = all.mixtures, prior = '', ext = "RGN", nchains = 5, burn = 0.5, alpha = 0.1, PosteriorOutput = FALSE) # dir.create("Estimates objects") dput(x = Spring10_17_26RG_Estimates, file = "Estimates objects/Spring10_17_26RG_Estimates.txt") sapply(Spring10_17_26RG_Estimates, function(mix) {table(mix[, "GR"] > 1.2)}) file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupNames4.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupNames4Pub.txt", to = "Objects") file.copy(from = "V:/Analysis/1_SEAK/Chinook/Mixture/SEAK17/Objects/GroupVec4.txt", to = "Objects") Spring10_17_4RG_Estimates <- CustomCombineBAYESOutput.GCL(groupvec = GroupVec4, groupnames = GroupNames4Pub, maindir = "BAYES/Output", mixvec = all.mixtures, prior = '', ext = "RGN", nchains = 5, burn = 0.5, alpha = 0.1, PosteriorOutput = FALSE) dput(x = Spring10_17_4RG_Estimates, file = "Estimates objects/Spring10_17_4RG_Estimates.txt") Spring10_17_4RG_Estimates <- dget(file = "Estimates objects/Spring10_17_4RG_Estimates.txt") sapply(Spring10_17_4RG_Estimates, function(mix) {table(mix[, "GR"] > 1.2)}) sapply(Spring10_17_4RG_Estimates, function(mix) {mix[c("Alaska", "TBR"), "mean"]}) sapply(Spring10_17_4RG_Estimates, function(mix) {mix[, "mean"]} ) sapply(Spring10_17_4RG_Estimates, function(mix) {mix[, "sd"] / mix[, "mean"]} ) # CV sapply(Spring10_17_4RG_Estimates, function(mix) {sum(mix[, "sd"] / mix[, "mean"] < 0.20)} ) # how many RGs with CV < 20% sapply(Spring10_17_4RG_Estimates, function(mix) {mix[, "95%"] - mix[, "5%"]} ) # 90% CI range #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Read in Harvest and Sample Size Data #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ require(xlsx) harvest.df <- read.xlsx(file = "2010-2017 Spring troll asl by district_gh_ks.xlsx", sheetName = "CE000522", startRow = 23, header = TRUE) str(harvest.df) harvest.df$Mixture <- NA harvest.df$Mixture[harvest.df$District %in% 101:102] <- "101/102" harvest.df$Mixture[harvest.df$District %in% 103] <- "103" harvest.df$Mixture[harvest.df$District %in% 106:108 & harvest.df$Area.Value != 10643] <- "106/107/108" harvest.df$Mixture[harvest.df$District %in% c(109:110, 112) & harvest.df$Area.Value != 11265] <- "109/110/112" harvest.df$Mixture[harvest.df$District %in% 113] <- "113" harvest.df$Mixture[harvest.df$District %in% 114 | harvest.df$Area.Value %in% c(11265, 11395, 11397)] <- "114" harvest.df$Mixture[harvest.df$District %in% 183] <- "183" harvest.df$Mixture <- factor(x = harvest.df$Mixture, levels = c("101/102", "103", "106/107/108", "109/110/112", "113", "114", "183")) dput(x = harvest.df, file = "Objects/harvest.df.txt") require(reshape) harvest_mix.df <- aggregate(N.Catch ~ Year + Mixture, data = harvest.df, sum) str(harvest_mix.df) cast(harvest_mix.df, Year ~ Mixture, value = "N.Catch") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Sample sizes all.mixtures.samplesize <- sapply(all.mixtures, function(mix) {dim(read.table(file = paste0("BAYES/Mixture/", mix, ".mix")))[1]} ) dput(x = all.mixtures.samplesize, file = "Objects/all.mixtures.samplesize.txt") mixtures.names mixtures.names2 <- names(mixtures.names) names(mixtures.names2) <- mixtures.names mixtures.df <- as.data.frame(t(sapply(all.mixtures, function(mix) {unlist(strsplit(x = mix, split = "_"))} )), stringsAsFactors = FALSE) names(mixtures.df) <- c("Mixname", "Year") mixtures.df$Year <- as.numeric(mixtures.df$Year) mixtures.df$Full.Mixname <- all.mixtures mixtures.df$Mixture <- factor(x = mixtures.names2[mixtures.df$Mixname], levels = levels(harvest_mix.df$Mixture)) mixtures.df$n <- all.mixtures.samplesize dput(x = mixtures.df, file = "Objects/mixtures.df.txt") str(mixtures.df) all.mixtures.n100 <- names(which(all.mixtures.samplesize >= 100)) dput(x = all.mixtures.n100, file = "Objects/all.mixtures.n100.txt") # Subset data for n >= 100 Spring10_17_4RG_Estimates_n100 <- Spring10_17_4RG_Estimates[all.mixtures.n100] dput(x = Spring10_17_4RG_Estimates_n100, file = "Estimates objects/Spring10_17_4RG_Estimates_n100.txt") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Merge harvest and sample size spring.estimates.df <- merge(x = harvest_mix.df, y = mixtures.df, by = c("Mixture", "Year"), all = FALSE) spring.estimates.df$Alaska.mean.p <- sapply(Spring10_17_4RG_Estimates, function(mix) {mix["Alaska", "mean"]}) spring.estimates.df$TBR.mean.p <- sapply(Spring10_17_4RG_Estimates, function(mix) {mix["TBR", "mean"]}) spring.estimates.df$Alaska.mean.C <- spring.estimates.df$Alaska.mean.p * spring.estimates.df$N.Catch spring.estimates.df$TBR.mean.C <- spring.estimates.df$TBR.mean.p * spring.estimates.df$N.Catch round(cast(spring.estimates.df, Year ~ Mixture, value = "Alaska.mean.C")) round(cast(spring.estimates.df, Year ~ Mixture, value = "TBR.mean.C")) round(cast(spring.estimates.df, Year ~ Mixture, value = "n")) dput(x = spring.estimates.df, file = "Objects/spring.estimates.df.txt") harvest <- setNames(object = spring.estimates.df$N.Catch, nm = spring.estimates.df$Full.Mixname) dput(x = harvest, file = "Objects/harvest.txt") #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Subset to only mixtures with >= 100 fish spring.estimates.n100.df <- spring.estimates.df spring.estimates.n100.df[spring.estimates.n100.df$n < 100, c("Alaska.mean.p", "Alaska.mean.C", "TBR.mean.p", "TBR.mean.C")] <- NA # Heatmap of total Catch require(lattice) new.colors <- colorRampPalette(c("white", "darkgreen")) data.mat <- as.matrix(round(cast(spring.estimates.n100.df, Year ~ Mixture, value = "N.Catch"))) # data.mat[is.na(data.mat)] <- 0 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = max(data.mat, na.rm = TRUE), length.out = 100), main = "Total Catch", xlab = "Year", ylab = "District Area", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares # Heatmap of mean Alaska require(lattice) new.colors <- colorRampPalette(c("white", "darkblue")) data.mat <- as.matrix(cast(spring.estimates.n100.df, Year ~ Mixture, value = "Alaska.mean.p")) * 100 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = 100, length.out = 100), main = "Mean Alaska %", xlab = "Year", ylab = "District Area", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares # Heatmap of mean TBR % require(lattice) new.colors <- colorRampPalette(c("white", "darkblue")) data.mat <- as.matrix(cast(spring.estimates.n100.df, Year ~ Mixture, value = "TBR.mean.p")) * 100 levelplot(data.mat, col.regions = new.colors, at = seq(from = 0, to = 100, length.out = 100), main = "Mean TBR %", xlab = "Year", ylab = "District Area", scales = list(x = list(rot = 90)), aspect = "fill", panel = function(...) { panel.fill("black") panel.levelplot(...)} ) # aspect = "iso" will make squares #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #### Create 4RG Summary Tables #### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # dir.create("Estimates tables") require(xlsx) EstimatesStats <- Spring10_17_4RG_Estimates_n100 SampSizes <- all.mixtures.samplesize HarvestVec <- harvest PubNames <- setNames(object = paste("Spring Troll", spring.estimates.df$Year, "District(s)", spring.estimates.df$Mixture), nm = spring.estimates.df$Full.Mixname) for(mix in all.mixtures.n100) { TableX <- matrix(data = "", nrow = 7, ncol = 7) TableX[1, 1] <- paste0(PubNames[mix], " (n=", SampSizes[mix], ", catch=", formatC(x = HarvestVec[mix], digits = 0, big.mark = ",", format = "f"), ")") TableX[2, 6] <- "90% CI" TableX[3, 2:7] <- c("Reporting Group", "Mean", "SD", "Median", "5%", "95%") TableX[4:7, 1] <- 1:4 TableX[4:7, 2] <- rownames(EstimatesStats[[mix]]) TableX[4:7, 3:7] <- formatC(x = EstimatesStats[[mix]][, c("mean", "sd", "median", "5%", "95%")], digits = 3, format = "f") write.xlsx(x = TableX, file = "Estimates tables/SpringTroll2017_4RG_Estimates.xlsx", col.names = FALSE, row.names = FALSE, sheetName = paste(mix, " 4RG"), append = TRUE) } # save.image("SpringTroll2010-2017.RData")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reproduces.r \name{mate.clone} \alias{mate.clone} \title{Clone process} \usage{ mate.clone(pop1, pop1.geno.id, pop1.geno, incols = 2, ind.stay, num.prog) } \arguments{ \item{pop1}{population information of population1} \item{pop1.geno.id}{genotype id of population1} \item{pop1.geno}{genotype matrix of population1} \item{incols}{the column number of an individual in the input genotype matrix, it can be 1 or 2} \item{ind.stay}{selected individuals regarded as parents} \item{num.prog}{litter size of dams} } \value{ population information and genotype matrix of population after clone procecss } \description{ Build date: Nov 14, 2018 Last update: Aug 1, 2019 } \examples{ basepop <- getpop(nind = 100, from = 1, ratio = 0.1) basepop.geno <- genotype(num.marker = 48353, num.ind = 100, verbose = TRUE) ind.stay <- list(sir = basepop$index, dam = basepop$index) pop.clone <- mate.clone(pop1 = basepop, pop1.geno.id = basepop$index, pop1.geno = basepop.geno, ind.stay = ind.stay, num.prog = 2) pop <- pop.clone$pop geno <- pop.clone$geno str(pop) str(geno) } \author{ Dong Yin }
/man/mate.clone.Rd
permissive
kant/SIMER
R
false
true
1,168
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reproduces.r \name{mate.clone} \alias{mate.clone} \title{Clone process} \usage{ mate.clone(pop1, pop1.geno.id, pop1.geno, incols = 2, ind.stay, num.prog) } \arguments{ \item{pop1}{population information of population1} \item{pop1.geno.id}{genotype id of population1} \item{pop1.geno}{genotype matrix of population1} \item{incols}{the column number of an individual in the input genotype matrix, it can be 1 or 2} \item{ind.stay}{selected individuals regarded as parents} \item{num.prog}{litter size of dams} } \value{ population information and genotype matrix of population after clone procecss } \description{ Build date: Nov 14, 2018 Last update: Aug 1, 2019 } \examples{ basepop <- getpop(nind = 100, from = 1, ratio = 0.1) basepop.geno <- genotype(num.marker = 48353, num.ind = 100, verbose = TRUE) ind.stay <- list(sir = basepop$index, dam = basepop$index) pop.clone <- mate.clone(pop1 = basepop, pop1.geno.id = basepop$index, pop1.geno = basepop.geno, ind.stay = ind.stay, num.prog = 2) pop <- pop.clone$pop geno <- pop.clone$geno str(pop) str(geno) } \author{ Dong Yin }
setwd("myDirectory") #Sets the working directory #Reads the file #Separatory is ";" #Header is present in the file, therefore, arg header is set to TRUE #Missing values encoded as "?", therefore "?" read as NA x <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors= FALSE, na.strings = "?") #Convert dates to the Date class x$Date <- as.Date(x$Date, "%d/%m/%Y") #Convert times to the Time class x$Time <- strptime(x$Time, "%H:%M:%S") #first day whose data is going to be used as a variable of class Date day1 = as.Date("2007-02-01") #second day whose data is going to be used as a variable of class Date day2 = as.Date("2007-02-02") #Subset the data from the given days and save it in a new variable y y <- subset(x, x[,1] == day1 | x[,1] == day2) #Open a png device setting the height and width to 480 pixels each png("plot1.png", height = 480, width=480) #Use hist function to create the histogram with the arguments to reproduce the #image on the course website hist(y$Global_active_power, col = 2, main = "Global Active Power", ylab = "Frequency", xlab = "Global Active Power (kilowatts)") #Close the graphic device and return the file handle to OS dev.off()
/plot1.R
no_license
iWaNtToLeArNmOrE/ExData_Plotting1
R
false
false
1,233
r
setwd("myDirectory") #Sets the working directory #Reads the file #Separatory is ";" #Header is present in the file, therefore, arg header is set to TRUE #Missing values encoded as "?", therefore "?" read as NA x <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors= FALSE, na.strings = "?") #Convert dates to the Date class x$Date <- as.Date(x$Date, "%d/%m/%Y") #Convert times to the Time class x$Time <- strptime(x$Time, "%H:%M:%S") #first day whose data is going to be used as a variable of class Date day1 = as.Date("2007-02-01") #second day whose data is going to be used as a variable of class Date day2 = as.Date("2007-02-02") #Subset the data from the given days and save it in a new variable y y <- subset(x, x[,1] == day1 | x[,1] == day2) #Open a png device setting the height and width to 480 pixels each png("plot1.png", height = 480, width=480) #Use hist function to create the histogram with the arguments to reproduce the #image on the course website hist(y$Global_active_power, col = 2, main = "Global Active Power", ylab = "Frequency", xlab = "Global Active Power (kilowatts)") #Close the graphic device and return the file handle to OS dev.off()
#generate initial graph set as linear numerical code a<-alkenum(wordlength,maxedgevec) #convert codes in a to connectivity matrices, marking ill-written codes with remaining zeros. The matrices are "igraph-ready" b<-code2cm(a,wordlength) #Use igraph package to filter out isomorphic graphs c<-isofilter(b,a) #display all unique isomers c[[1]]
/workflow.R
no_license
matthewmaclennan/R-WLN-DAST-BC
R
false
false
343
r
#generate initial graph set as linear numerical code a<-alkenum(wordlength,maxedgevec) #convert codes in a to connectivity matrices, marking ill-written codes with remaining zeros. The matrices are "igraph-ready" b<-code2cm(a,wordlength) #Use igraph package to filter out isomorphic graphs c<-isofilter(b,a) #display all unique isomers c[[1]]
# Dirty data project # Task 4 - Halloween Candy # This is an R script to clean the raw input data ------------------------------------------------- # 1 Reading in the raw data ## installing packages in the terminal: e.g. install.packages("here") ## uploading the libraries library(here) library(readr) library(readxl) library(janitor) library(tidyverse) ## Test where the top level of the project directory is here::here() ## Reading in the data, changing the column heading to snake_case style bbc_2015 <- read_excel("raw_data/boing-boing-candy-2015.xlsx") %>% clean_names() bbc_2016 <- read_excel("raw_data/boing-boing-candy-2016.xlsx") %>% clean_names() bbc_2017 <- read_excel("raw_data/boing-boing-candy-2017.xlsx") %>% clean_names() # 2 Cleaning the data ## combining the three data frames together (they have all different amount of rows and columns) ## to see the differences in naming across the three data frames, I create a table using column heading names from each data frame length(bbc_2015) # 124 length(bbc_2016) # 123 <--- to create a table, need to match the length of the longest (124) length(bbc_2017) # 120 <--- column_names <- tibble(names(bbc_2015), c(names(bbc_2016), NA), c(names(bbc_2017), NA, NA, NA, NA)) ## creating vector 'year' so the observations can be identified by the year they came from ## renaming candy heading names to match across the three data frames ### 2015 year_vector <- c(rep(2015, times = nrow(bbc_2015))) bbc_2015 <- bbc_2015 %>% rename(age = how_old_are_you, going_out_trick_or_treating_yourself = are_you_going_actually_going_trick_or_treating_yourself, bonkers_the_candy = bonkers, hersheys_kisses = hershey_s_kissables, hersheys_milk_chocolate = hershey_s_milk_chocolate, licorice_yes_black = licorice, sweetums_a_friend_to_diabetes = sweetums, "100_grand_bar" = x100_grand_bar) %>% mutate(timestamp = year_vector) %>% rename(year = timestamp) ### 2016 year_vector <- c(rep(2016, times = nrow(bbc_2016))) bbc_2016 <- bbc_2016 %>% rename(age = how_old_are_you, going_out_trick_or_treating_yourself = are_you_going_actually_going_trick_or_treating_yourself, country = which_country_do_you_live_in, state = which_state_province_county_do_you_live_in, gender = your_gender, box_o_raisins = boxo_raisins, hersheys_milk_chocolate = hershey_s_milk_chocolate, "100_grand_bar" = x100_grand_bar) %>% mutate(timestamp = year_vector) %>% rename(year = timestamp) ### 2017 #### getting rid of 'q1_', 'q2_', ... prefix in column headings colnames(bbc_2017) <- gsub(pattern = "q[0-9]*_", '', colnames(bbc_2017)) year_vector <- c(rep(2017, times = nrow(bbc_2017))) bbc_2017 <- bbc_2017 %>% rename(state = state_province_county_etc, anonymous_brown_globs_that_come_in_black_and_orange_wrappers = anonymous_brown_globs_that_come_in_black_and_orange_wrappers_a_k_a_mary_janes, box_o_raisins = boxo_raisins, hersheys_milk_chocolate = hershey_s_milk_chocolate) %>% mutate(year = year_vector) # joining the three data frames by rows join_all <- bind_rows(bbc_2015, bbc_2016, bbc_2017) ## subsetting candy columns by keeping only those columns containing JOY, DESPAIR or MEH candies <- join_all %>% mutate_if(is.character, ~replace(., is.na(.), "MISSING")) %>% mutate_all(~str_detect(. , "JOY|DESPAIR|MISSING|MEH")) %>% summarise_all(~sum(., na.rm = TRUE)) %>% pivot_longer(cols = everything(), names_to = 'col_names', values_to = 'values') %>% filter(values == nrow(join_all)) %>% select(col_names) %>% pull() # removing columns that do not sound candy to me candies <- candies[!candies %in% c("abstained_from_m_ming", "bonkers_the_board_game", "cash_or_other_forms_of_legal_tender", "chardonnay", "dental_paraphenalia", "hugs_actual_physical_hugs", "generic_brand_acetaminophen", "person_of_interest_season_3_dvd_box_set_not_including_disc_4_with_hilarious_outtakes", "please_list_any_items_not_included_above_that_give_you_despair", "real_housewives_of_orange_county_season_9_blue_ray", "vicodin")] join_all <- join_all %>% select(year, age, gender, country, state, going_out_trick_or_treating_yourself, all_of(candies)) # cleaning the data points in columns age and country ## age ### age is data class character, also contains other then number values, e. g. 30's, enough, very etc. ### I am going to change it to integer and drop age outliers join_all <- join_all %>% mutate( age = str_extract(age, "\\d+") %>% as.numeric(age)) %>% mutate( age = if_else(age < 4, NA_real_, age), age = if_else(age > 99, NA_real_, age)) ### checking all the unique values, age range is now 4 to 99 # join_all$age %>% # unique() %>% # sort() ## country ### checking all distinct values in column country - so many! Here I would recommend to the survey authors to use a "choose from" type of questionnaire.. # join_all_candy$country %>% # unique() ### renaming typos and gathering all the nonsense ones in value "others" join_all <- join_all %>% mutate( country = recode(country, "usa" = "USA", "US" = "USA", "United States of America" = "USA", "uSA" = "USA", "united states" = "USA", "Canada`" = "Canada", "canada" = "Canada", "United States" = "USA", "us" = "USA", "france" = "France", "USSA" = "USA", "U.S.A." = "USA", "A tropical island south of the equator" = "other", "england" = "UK", "uk" = "UK", "United Kingdom" = "UK", "Neverland" = "other", #!!!!!!!!!!!!! "USA!" = "USA", "this one" = "USA", "USA (I think but it's an election year so who can really tell)" = "USA", "51.0" = "other", "Usa" = "USA", "U.S." = "USA", "Us" = "USA", "America" = "USA", "Units States" = "USA", "belgium" = "Belgium", "croatia"= "Croatia", "United states" = "USA", "England" = "UK", "USA USA USA" = "USA", "the best one - usa" = "USA", "USA! USA! USA!" = "USA", "47.0" = "other", "españa" = "Spain", "u.s." = "USA", "there isn't one for old men" = "other", #!!!!!!!!!!!!! "one of the best ones" = "other", "The Yoo Ess of Aaayyyyyy" = "USA", "United Kindom" = "UK", "hungary" = "Hungary", "united states of america" = "USA", "Somewhere" = "other", "54.0" = "other", "44.0" = "other", "god's country"= "USA", "USA!!!!!!" = "USA", "EUA" = "other", "USA! USA!" = "USA", "45.0" = "other", "sweden" = "Sweden", "United Sates" = "USA", "Sub-Canadian North America... 'Merica" = "USA", "The Netherlands" = "Netherlands", "Trumpistan" = "USA", #!!!!!!!!!!!!! "U.s." = "USA", "Merica" = "USA", "germany" = "Germany", "See above" = "other", "UNited States" = "USA", "kenya" = "Kenya", "30.0" = "other", "The republic of Cascadia" = "Cascadia", "United Stetes" = "USA", "america" = "USA", "Not the USA or Canada" = "other", "USA USA USA USA" = "USA", "United States of America" = "USA", "netherlands" = "Netherlands", "Denial" = "other", "United State" = "USA", "United staes" = "USA", "u.s.a." = "USA", "USAUSAUSA"= "USA", "35" = "other", "finland" = "Finland", "unhinged states" = "USA", "US of A" = "USA", "Unites States" = "USA", "The United States" = "USA", "North Carolina" = "USA", "Unied States" = "USA", "Europe" = "other", "Earth" = "other", "U S" = "USA", "U.K." = "UK", "The United States of America" = "USA", "unite states" = "USA", "46" = "other", "cascadia" = "Cascadia", "insanity lately" = "other", "USA? Hard to tell anymore.." = "USA", "'merica" = "USA", "usas" = "USA", "Pittsburgh" = "USA", "45" = "other", "32" = "other", "australia" = "Australia", "A" = "other", "Can" = "Canada", "Canae" = "Canada", "New York" = "USA", "California" = "USA", "USa" = "USA", "South africa" = "South Africa", "I pretend to be from Canada, but I am really from the United States." = "USA", #!!!!!!!!!!!!! "Uk" = "UK", "United Stated" = "USA", "Ahem....Amerca" = "USA", "UD" = "other", "New Jersey" = "USA", "CANADA" = "Canada", "United ststes" = "USA", "United Statss" = "USA", "endland" = "UK", "Atlantis" = "other", "murrika" = "USA", "USAA" = "USA", "Alaska" = "USA", "united States" = "USA", "soviet canuckistan" = "other", "N. America" = "USA", "hong kong"= "China", "spain" = "Spain", "Hong Kong" = "China", "Narnia" = "other", #!!!!!!!!!!!!! "u s a" = "USA", "United Statea" = "USA", "united ststes" = "USA", "1" = "other", "subscribe to dm4uz3 on youtube" = "other", "United kingdom" = "UK", "USA USA USA!!!!" = "USA", "I don't know anymore" = "other", "Fear and Loathing" = "other", "Scotland" = "UK", "Korea" = "South Korea", "Murica" = "USA" ) ) ### checking all the unique values country (sorted alphabetically), ### we have now 36 countries and 1 other category represented in the data frame # join_all$country %>% # unique() %>% # sort() # HURRAY! clean_data data frame nearly ready for analysis # just bringing the non candy columns forward and sorting the candies alphabetically candy_clean <- join_all[,order(colnames(join_all))] %>% relocate(year, .before = 1) %>% relocate(age, .after = 1) %>% relocate(gender, .after = 2) %>% relocate(country, .after = 3) %>% relocate(state, .after = 4) %>% relocate(going_out_trick_or_treating_yourself, .after = 5) View(candy_clean) # saving the new clean_data data frame as a csv file write_csv(candy_clean, path = "clean_data/candy_clean.csv") #### note, to have the data frame perfectly clean, I should also gather all the candy types in just one column #### this step would look like this, but for the analysis purpose, I want to keep both # pivoting the data longer to have the data tidy for some of the analysis # candy_clean_longer <- candy_clean %>% # pivot_longer( # cols = ("100_grand_bar":"york_peppermint_patties"), # names_to = "candy", # values_to = "rating" # )
/task_4/data_cleaning_scripts/data_cleaning_script_task_4.R
no_license
LenkaRo/dirty_data_project
R
false
false
19,356
r
# Dirty data project # Task 4 - Halloween Candy # This is an R script to clean the raw input data ------------------------------------------------- # 1 Reading in the raw data ## installing packages in the terminal: e.g. install.packages("here") ## uploading the libraries library(here) library(readr) library(readxl) library(janitor) library(tidyverse) ## Test where the top level of the project directory is here::here() ## Reading in the data, changing the column heading to snake_case style bbc_2015 <- read_excel("raw_data/boing-boing-candy-2015.xlsx") %>% clean_names() bbc_2016 <- read_excel("raw_data/boing-boing-candy-2016.xlsx") %>% clean_names() bbc_2017 <- read_excel("raw_data/boing-boing-candy-2017.xlsx") %>% clean_names() # 2 Cleaning the data ## combining the three data frames together (they have all different amount of rows and columns) ## to see the differences in naming across the three data frames, I create a table using column heading names from each data frame length(bbc_2015) # 124 length(bbc_2016) # 123 <--- to create a table, need to match the length of the longest (124) length(bbc_2017) # 120 <--- column_names <- tibble(names(bbc_2015), c(names(bbc_2016), NA), c(names(bbc_2017), NA, NA, NA, NA)) ## creating vector 'year' so the observations can be identified by the year they came from ## renaming candy heading names to match across the three data frames ### 2015 year_vector <- c(rep(2015, times = nrow(bbc_2015))) bbc_2015 <- bbc_2015 %>% rename(age = how_old_are_you, going_out_trick_or_treating_yourself = are_you_going_actually_going_trick_or_treating_yourself, bonkers_the_candy = bonkers, hersheys_kisses = hershey_s_kissables, hersheys_milk_chocolate = hershey_s_milk_chocolate, licorice_yes_black = licorice, sweetums_a_friend_to_diabetes = sweetums, "100_grand_bar" = x100_grand_bar) %>% mutate(timestamp = year_vector) %>% rename(year = timestamp) ### 2016 year_vector <- c(rep(2016, times = nrow(bbc_2016))) bbc_2016 <- bbc_2016 %>% rename(age = how_old_are_you, going_out_trick_or_treating_yourself = are_you_going_actually_going_trick_or_treating_yourself, country = which_country_do_you_live_in, state = which_state_province_county_do_you_live_in, gender = your_gender, box_o_raisins = boxo_raisins, hersheys_milk_chocolate = hershey_s_milk_chocolate, "100_grand_bar" = x100_grand_bar) %>% mutate(timestamp = year_vector) %>% rename(year = timestamp) ### 2017 #### getting rid of 'q1_', 'q2_', ... prefix in column headings colnames(bbc_2017) <- gsub(pattern = "q[0-9]*_", '', colnames(bbc_2017)) year_vector <- c(rep(2017, times = nrow(bbc_2017))) bbc_2017 <- bbc_2017 %>% rename(state = state_province_county_etc, anonymous_brown_globs_that_come_in_black_and_orange_wrappers = anonymous_brown_globs_that_come_in_black_and_orange_wrappers_a_k_a_mary_janes, box_o_raisins = boxo_raisins, hersheys_milk_chocolate = hershey_s_milk_chocolate) %>% mutate(year = year_vector) # joining the three data frames by rows join_all <- bind_rows(bbc_2015, bbc_2016, bbc_2017) ## subsetting candy columns by keeping only those columns containing JOY, DESPAIR or MEH candies <- join_all %>% mutate_if(is.character, ~replace(., is.na(.), "MISSING")) %>% mutate_all(~str_detect(. , "JOY|DESPAIR|MISSING|MEH")) %>% summarise_all(~sum(., na.rm = TRUE)) %>% pivot_longer(cols = everything(), names_to = 'col_names', values_to = 'values') %>% filter(values == nrow(join_all)) %>% select(col_names) %>% pull() # removing columns that do not sound candy to me candies <- candies[!candies %in% c("abstained_from_m_ming", "bonkers_the_board_game", "cash_or_other_forms_of_legal_tender", "chardonnay", "dental_paraphenalia", "hugs_actual_physical_hugs", "generic_brand_acetaminophen", "person_of_interest_season_3_dvd_box_set_not_including_disc_4_with_hilarious_outtakes", "please_list_any_items_not_included_above_that_give_you_despair", "real_housewives_of_orange_county_season_9_blue_ray", "vicodin")] join_all <- join_all %>% select(year, age, gender, country, state, going_out_trick_or_treating_yourself, all_of(candies)) # cleaning the data points in columns age and country ## age ### age is data class character, also contains other then number values, e. g. 30's, enough, very etc. ### I am going to change it to integer and drop age outliers join_all <- join_all %>% mutate( age = str_extract(age, "\\d+") %>% as.numeric(age)) %>% mutate( age = if_else(age < 4, NA_real_, age), age = if_else(age > 99, NA_real_, age)) ### checking all the unique values, age range is now 4 to 99 # join_all$age %>% # unique() %>% # sort() ## country ### checking all distinct values in column country - so many! Here I would recommend to the survey authors to use a "choose from" type of questionnaire.. # join_all_candy$country %>% # unique() ### renaming typos and gathering all the nonsense ones in value "others" join_all <- join_all %>% mutate( country = recode(country, "usa" = "USA", "US" = "USA", "United States of America" = "USA", "uSA" = "USA", "united states" = "USA", "Canada`" = "Canada", "canada" = "Canada", "United States" = "USA", "us" = "USA", "france" = "France", "USSA" = "USA", "U.S.A." = "USA", "A tropical island south of the equator" = "other", "england" = "UK", "uk" = "UK", "United Kingdom" = "UK", "Neverland" = "other", #!!!!!!!!!!!!! "USA!" = "USA", "this one" = "USA", "USA (I think but it's an election year so who can really tell)" = "USA", "51.0" = "other", "Usa" = "USA", "U.S." = "USA", "Us" = "USA", "America" = "USA", "Units States" = "USA", "belgium" = "Belgium", "croatia"= "Croatia", "United states" = "USA", "England" = "UK", "USA USA USA" = "USA", "the best one - usa" = "USA", "USA! USA! USA!" = "USA", "47.0" = "other", "españa" = "Spain", "u.s." = "USA", "there isn't one for old men" = "other", #!!!!!!!!!!!!! "one of the best ones" = "other", "The Yoo Ess of Aaayyyyyy" = "USA", "United Kindom" = "UK", "hungary" = "Hungary", "united states of america" = "USA", "Somewhere" = "other", "54.0" = "other", "44.0" = "other", "god's country"= "USA", "USA!!!!!!" = "USA", "EUA" = "other", "USA! USA!" = "USA", "45.0" = "other", "sweden" = "Sweden", "United Sates" = "USA", "Sub-Canadian North America... 'Merica" = "USA", "The Netherlands" = "Netherlands", "Trumpistan" = "USA", #!!!!!!!!!!!!! "U.s." = "USA", "Merica" = "USA", "germany" = "Germany", "See above" = "other", "UNited States" = "USA", "kenya" = "Kenya", "30.0" = "other", "The republic of Cascadia" = "Cascadia", "United Stetes" = "USA", "america" = "USA", "Not the USA or Canada" = "other", "USA USA USA USA" = "USA", "United States of America" = "USA", "netherlands" = "Netherlands", "Denial" = "other", "United State" = "USA", "United staes" = "USA", "u.s.a." = "USA", "USAUSAUSA"= "USA", "35" = "other", "finland" = "Finland", "unhinged states" = "USA", "US of A" = "USA", "Unites States" = "USA", "The United States" = "USA", "North Carolina" = "USA", "Unied States" = "USA", "Europe" = "other", "Earth" = "other", "U S" = "USA", "U.K." = "UK", "The United States of America" = "USA", "unite states" = "USA", "46" = "other", "cascadia" = "Cascadia", "insanity lately" = "other", "USA? Hard to tell anymore.." = "USA", "'merica" = "USA", "usas" = "USA", "Pittsburgh" = "USA", "45" = "other", "32" = "other", "australia" = "Australia", "A" = "other", "Can" = "Canada", "Canae" = "Canada", "New York" = "USA", "California" = "USA", "USa" = "USA", "South africa" = "South Africa", "I pretend to be from Canada, but I am really from the United States." = "USA", #!!!!!!!!!!!!! "Uk" = "UK", "United Stated" = "USA", "Ahem....Amerca" = "USA", "UD" = "other", "New Jersey" = "USA", "CANADA" = "Canada", "United ststes" = "USA", "United Statss" = "USA", "endland" = "UK", "Atlantis" = "other", "murrika" = "USA", "USAA" = "USA", "Alaska" = "USA", "united States" = "USA", "soviet canuckistan" = "other", "N. America" = "USA", "hong kong"= "China", "spain" = "Spain", "Hong Kong" = "China", "Narnia" = "other", #!!!!!!!!!!!!! "u s a" = "USA", "United Statea" = "USA", "united ststes" = "USA", "1" = "other", "subscribe to dm4uz3 on youtube" = "other", "United kingdom" = "UK", "USA USA USA!!!!" = "USA", "I don't know anymore" = "other", "Fear and Loathing" = "other", "Scotland" = "UK", "Korea" = "South Korea", "Murica" = "USA" ) ) ### checking all the unique values country (sorted alphabetically), ### we have now 36 countries and 1 other category represented in the data frame # join_all$country %>% # unique() %>% # sort() # HURRAY! clean_data data frame nearly ready for analysis # just bringing the non candy columns forward and sorting the candies alphabetically candy_clean <- join_all[,order(colnames(join_all))] %>% relocate(year, .before = 1) %>% relocate(age, .after = 1) %>% relocate(gender, .after = 2) %>% relocate(country, .after = 3) %>% relocate(state, .after = 4) %>% relocate(going_out_trick_or_treating_yourself, .after = 5) View(candy_clean) # saving the new clean_data data frame as a csv file write_csv(candy_clean, path = "clean_data/candy_clean.csv") #### note, to have the data frame perfectly clean, I should also gather all the candy types in just one column #### this step would look like this, but for the analysis purpose, I want to keep both # pivoting the data longer to have the data tidy for some of the analysis # candy_clean_longer <- candy_clean %>% # pivot_longer( # cols = ("100_grand_bar":"york_peppermint_patties"), # names_to = "candy", # values_to = "rating" # )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/for_fst_coerce_date_to_character.R \name{for_fst_coerce_date_to_character} \alias{for_fst_coerce_date_to_character} \title{Coerce timestamps to write out to fst} \usage{ for_fst_coerce_date_to_character(.ds) } \arguments{ \item{.ds}{Opensky-Network flight table with day, firstseen, and lastseen} } \value{ tibble with coerced timestamps } \description{ Utility function to ensure character string for fst (binary file format). }
/man/for_fst_coerce_date_to_character.Rd
permissive
rainer-rq-koelle/COVID19airtraffic
R
false
true
508
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/for_fst_coerce_date_to_character.R \name{for_fst_coerce_date_to_character} \alias{for_fst_coerce_date_to_character} \title{Coerce timestamps to write out to fst} \usage{ for_fst_coerce_date_to_character(.ds) } \arguments{ \item{.ds}{Opensky-Network flight table with day, firstseen, and lastseen} } \value{ tibble with coerced timestamps } \description{ Utility function to ensure character string for fst (binary file format). }
# List files that are present before the start of testing if (!exists("okfiles")) { okfiles <- list.files(here::here("tests/"), ".Rdata", full.names = T) } `%>%` <- magrittr::`%>%` testthat::setup({ # Download data if it is not present if (!dir.exists(here::here("tests/Bullet1")) | !dir.exists(here::here("tests/Bullet2"))) { dir.create(here::here("tests/Bullet1")) dir.create(here::here("tests/Bullet2")) } hambyb1l2 <- here::here("tests/Bullet1/Hamby252_Barrel1_Bullet1_Land2.x3p") hambyb2l4 <- here::here("tests/Bullet2/Hamby252_Barrel1_Bullet2_Land4.x3p") if (!file.exists(hambyb1l2)) { download.file(hamby252demo[[1]][2], destfile = hambyb1l2, quiet = T ) } if (!file.exists(hambyb2l4)) { download.file(hamby252demo[[2]][4], destfile = hambyb2l4, quiet = T ) } # Download from github only if NBTRD is down if (!file.exists(hambyb1l2)) { download.file(hamby252demo_github[[1]][2], destfile = hambyb1l2, quiet = T ) } if (!file.exists(hambyb2l4)) { download.file(hamby252demo_github[[2]][4], destfile = hambyb2l4, quiet = T ) } }) # testthat::teardown({ # file.remove(here::here("tests/Bullet1/Hamby252_Barrel1_Bullet1_Land2.x3p")) # unlink(here::here("tests/Bullet1"), recursive = T) # file.remove(here::here("tests/Bullet2/Hamby252_Barrel1_Bullet2_Land4.x3p")) # unlink(here::here("tests/Bullet2"), recursive = T) # }) # test_read.R # test_grooves.R # test_signatures.R if (!file.exists(here::here("tests/bullet1_only.Rdata"))) { message("Generating data file for bullet 1 land 2 with crosscuts, grooves, and signatures") set.seed(3402953) b1_l2_x3p <- read_bullet(here::here("tests/Bullet1"), "x3p") %>% # dplyr::filter(dplyr::row_number() == 3) %>% # turn the scans such that (0,0) is bottom left dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) x %>% x3ptools::rotate_x3p(angle = -90) %>% x3ptools::y_flip_x3p()) ) %>% dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) { # make sure all measurements are in microns x$surface.matrix <- x$surface.matrix * 10^6 x$header.info$incrementY <- x$header.info$incrementY * 10^6 x$header.info$incrementX <- x$header.info$incrementX * 10^6 x }) ) %>% dplyr::mutate(crosscut = x3p %>% purrr::map_dbl(.f = x3p_crosscut_optimize)) %>% dplyr::mutate(ccdata = purrr::map2( .x = x3p, .y = crosscut, .f = x3p_crosscut )) %>% dplyr::mutate( loess = purrr::map(ccdata, cc_fit_loess, span = .75), gauss = purrr::map(ccdata, cc_fit_gaussian, span = 600) ) %>% # dplyr::mutate(ccdata_hough = purrr::map( # b1_l2_x3p$x3p, # x3ptools::x3p_to_df # )) %>% dplyr::mutate( grooves = purrr::map(ccdata, cc_locate_grooves, return_plot = T), grooves_mid = purrr::map(ccdata, cc_locate_grooves, method = "middle", return_plot = T ), grooves_quad = purrr::map(ccdata, cc_locate_grooves, method = "quadratic", return_plot = F ), grooves_log = purrr::map(ccdata, cc_locate_grooves, method = "logisticlegacy", return_plot = F ), grooves_lassofull = purrr::map(ccdata, cc_locate_grooves, method = "lassofull", return_plot = F ), grooves_lassobasic = purrr::map(ccdata, cc_locate_grooves, method = "lassobasic", return_plot = F ), grooves_bcp = purrr::map(ccdata, cc_locate_grooves, method = "bcp", return_plot = F )#, # grooves_hough = purrr::map(ccdata_hough, cc_locate_grooves, # method = "hough", return_plot = F # ) ) %>% dplyr::mutate( sigs = purrr::map2( .x = ccdata, .y = grooves, .f = function(x, y) { cc_get_signature(ccdata = x, grooves = y, span1 = 0.75, span2 = 0.03) } ) ) save(b1_l2_x3p, file = here::here("tests/bullet1_only.Rdata")) } if (!file.exists(here::here("tests/bullet1_crosscut_extra.Rdata"))) { load(here::here("tests/bullet1_only.Rdata")) b1_l2 <- b1_l2_x3p$x3p[[1]] b1_l2_df <- x3ptools::x3p_to_df(b1_l2) cc1 <- bulletxtrctr:::land_cc(50, b1_l2_df) save(b1_l2, b1_l2_df, cc1, file = here::here("tests/bullet1_crosscut_extra.Rdata") ) } if (!file.exists(here::here("tests/bullets_signatures.Rdata"))) { message("Generating data file for bullet 1 land 2 and bullet 2 land 4 with crosscut, ccdata, grooves, and sigs.") load(here::here("tests/bullet1_only.Rdata")) b2_l4_x3p <- read_bullet(here::here("tests/Bullet2"), "x3p") %>% # dplyr::filter(dplyr::row_number() == 5) %>% # turn the scans such that (0,0) is bottom left dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) x %>% x3ptools::rotate_x3p(angle = -90) %>% x3ptools::y_flip_x3p()) ) %>% dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) { # make sure all measurements are in microns x$surface.matrix <- x$surface.matrix * 10^6 x$header.info$incrementY <- x$header.info$incrementY * 10^6 x$header.info$incrementX <- x$header.info$incrementX * 10^6 x }) ) %>% dplyr::mutate(crosscut = x3p %>% purrr::map_dbl(.f = x3p_crosscut_optimize)) %>% dplyr::mutate(ccdata = purrr::map2( .x = x3p, .y = crosscut, .f = x3p_crosscut )) %>% dplyr::mutate( loess = purrr::map(ccdata, cc_fit_loess, span = .75), gauss = purrr::map(ccdata, cc_fit_gaussian, span = 600) ) %>% dplyr::mutate(grooves = purrr::map(ccdata, cc_locate_grooves, return_plot = T )) %>% dplyr::mutate(grooves_mid = purrr::map(ccdata, cc_locate_grooves, method = "middle", return_plot = T )) %>% dplyr::mutate( sigs = purrr::map2( .x = ccdata, .y = grooves, .f = function(x, y) { cc_get_signature(ccdata = x, grooves = y, span1 = 0.75, span2 = 0.03) } ) ) save(b1_l2_x3p, b2_l4_x3p, file = here::here("tests/bullets_signatures.Rdata") ) } # test_align.R # test_cms.R # test_features.R if (!file.exists(here::here("tests/bullets_match.Rdata"))) { message("Generating align.R data file for testing correctness.") load(here::here("tests/bullets_signatures.Rdata")) alignment <- sig_align( b1_l2_x3p$sigs[[1]]$sig, b2_l4_x3p$sigs[[1]]$sig ) peaks <- list( sig1 = sig_get_peaks(alignment$lands$sig1), sig2 = sig_get_peaks(alignment$lands$sig2) ) matches <- bulletxtrctr:::striation_identify_matches( peaks$sig1$lines, peaks$sig2$lines ) maxcms <- sig_cms_max(alignment) features_legacy <- extract_features_all_legacy(maxcms, resolution = 1.5625) features <- extract_features_all(aligned = alignment, striae = maxcms, resolution = 1.5625) match <- list( alignment = alignment, peaks = peaks, matches = matches, maxcms = maxcms, features_legacy = features_legacy, features = features ) save(match, file = here::here("tests/bullets_match.Rdata")) } # test_bullet-scores.R if (!file.exists(here::here("tests/rf_features.Rdata"))) { load(here::here("tests/bullets_match.Rdata")) requireNamespace("randomForest") rf_features <- match$features_legacy rf_features$rfscore <- predict(bulletr::rtrees, newdata = rf_features, type = "prob" )[, 2] save(rf_features, file = here::here("tests/rf_features.Rdata") ) } # test_smooth.R if (!file.exists(here::here("tests/smooth.Rdata"))) { message("Generating smooth.R data file for testing correctness") set.seed(3240583) tmp <- tibble::tibble( x = seq(-sqrt(5), sqrt(5), .03) %>% jitter(), y = rnorm(length(x), x^2, .1) ) smoothres <- smoothloess(tmp$y, .5) sigsmoothres <- raw_sig_smooth(tmp$y, .5, c(-5, 5)) save(sigsmoothres, smoothres, file = here::here("tests/smooth.Rdata")) }
/tests/testthat/setup.R
no_license
heike/bulletxtrctr
R
false
false
7,894
r
# List files that are present before the start of testing if (!exists("okfiles")) { okfiles <- list.files(here::here("tests/"), ".Rdata", full.names = T) } `%>%` <- magrittr::`%>%` testthat::setup({ # Download data if it is not present if (!dir.exists(here::here("tests/Bullet1")) | !dir.exists(here::here("tests/Bullet2"))) { dir.create(here::here("tests/Bullet1")) dir.create(here::here("tests/Bullet2")) } hambyb1l2 <- here::here("tests/Bullet1/Hamby252_Barrel1_Bullet1_Land2.x3p") hambyb2l4 <- here::here("tests/Bullet2/Hamby252_Barrel1_Bullet2_Land4.x3p") if (!file.exists(hambyb1l2)) { download.file(hamby252demo[[1]][2], destfile = hambyb1l2, quiet = T ) } if (!file.exists(hambyb2l4)) { download.file(hamby252demo[[2]][4], destfile = hambyb2l4, quiet = T ) } # Download from github only if NBTRD is down if (!file.exists(hambyb1l2)) { download.file(hamby252demo_github[[1]][2], destfile = hambyb1l2, quiet = T ) } if (!file.exists(hambyb2l4)) { download.file(hamby252demo_github[[2]][4], destfile = hambyb2l4, quiet = T ) } }) # testthat::teardown({ # file.remove(here::here("tests/Bullet1/Hamby252_Barrel1_Bullet1_Land2.x3p")) # unlink(here::here("tests/Bullet1"), recursive = T) # file.remove(here::here("tests/Bullet2/Hamby252_Barrel1_Bullet2_Land4.x3p")) # unlink(here::here("tests/Bullet2"), recursive = T) # }) # test_read.R # test_grooves.R # test_signatures.R if (!file.exists(here::here("tests/bullet1_only.Rdata"))) { message("Generating data file for bullet 1 land 2 with crosscuts, grooves, and signatures") set.seed(3402953) b1_l2_x3p <- read_bullet(here::here("tests/Bullet1"), "x3p") %>% # dplyr::filter(dplyr::row_number() == 3) %>% # turn the scans such that (0,0) is bottom left dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) x %>% x3ptools::rotate_x3p(angle = -90) %>% x3ptools::y_flip_x3p()) ) %>% dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) { # make sure all measurements are in microns x$surface.matrix <- x$surface.matrix * 10^6 x$header.info$incrementY <- x$header.info$incrementY * 10^6 x$header.info$incrementX <- x$header.info$incrementX * 10^6 x }) ) %>% dplyr::mutate(crosscut = x3p %>% purrr::map_dbl(.f = x3p_crosscut_optimize)) %>% dplyr::mutate(ccdata = purrr::map2( .x = x3p, .y = crosscut, .f = x3p_crosscut )) %>% dplyr::mutate( loess = purrr::map(ccdata, cc_fit_loess, span = .75), gauss = purrr::map(ccdata, cc_fit_gaussian, span = 600) ) %>% # dplyr::mutate(ccdata_hough = purrr::map( # b1_l2_x3p$x3p, # x3ptools::x3p_to_df # )) %>% dplyr::mutate( grooves = purrr::map(ccdata, cc_locate_grooves, return_plot = T), grooves_mid = purrr::map(ccdata, cc_locate_grooves, method = "middle", return_plot = T ), grooves_quad = purrr::map(ccdata, cc_locate_grooves, method = "quadratic", return_plot = F ), grooves_log = purrr::map(ccdata, cc_locate_grooves, method = "logisticlegacy", return_plot = F ), grooves_lassofull = purrr::map(ccdata, cc_locate_grooves, method = "lassofull", return_plot = F ), grooves_lassobasic = purrr::map(ccdata, cc_locate_grooves, method = "lassobasic", return_plot = F ), grooves_bcp = purrr::map(ccdata, cc_locate_grooves, method = "bcp", return_plot = F )#, # grooves_hough = purrr::map(ccdata_hough, cc_locate_grooves, # method = "hough", return_plot = F # ) ) %>% dplyr::mutate( sigs = purrr::map2( .x = ccdata, .y = grooves, .f = function(x, y) { cc_get_signature(ccdata = x, grooves = y, span1 = 0.75, span2 = 0.03) } ) ) save(b1_l2_x3p, file = here::here("tests/bullet1_only.Rdata")) } if (!file.exists(here::here("tests/bullet1_crosscut_extra.Rdata"))) { load(here::here("tests/bullet1_only.Rdata")) b1_l2 <- b1_l2_x3p$x3p[[1]] b1_l2_df <- x3ptools::x3p_to_df(b1_l2) cc1 <- bulletxtrctr:::land_cc(50, b1_l2_df) save(b1_l2, b1_l2_df, cc1, file = here::here("tests/bullet1_crosscut_extra.Rdata") ) } if (!file.exists(here::here("tests/bullets_signatures.Rdata"))) { message("Generating data file for bullet 1 land 2 and bullet 2 land 4 with crosscut, ccdata, grooves, and sigs.") load(here::here("tests/bullet1_only.Rdata")) b2_l4_x3p <- read_bullet(here::here("tests/Bullet2"), "x3p") %>% # dplyr::filter(dplyr::row_number() == 5) %>% # turn the scans such that (0,0) is bottom left dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) x %>% x3ptools::rotate_x3p(angle = -90) %>% x3ptools::y_flip_x3p()) ) %>% dplyr::mutate( x3p = x3p %>% purrr::map(.f = function(x) { # make sure all measurements are in microns x$surface.matrix <- x$surface.matrix * 10^6 x$header.info$incrementY <- x$header.info$incrementY * 10^6 x$header.info$incrementX <- x$header.info$incrementX * 10^6 x }) ) %>% dplyr::mutate(crosscut = x3p %>% purrr::map_dbl(.f = x3p_crosscut_optimize)) %>% dplyr::mutate(ccdata = purrr::map2( .x = x3p, .y = crosscut, .f = x3p_crosscut )) %>% dplyr::mutate( loess = purrr::map(ccdata, cc_fit_loess, span = .75), gauss = purrr::map(ccdata, cc_fit_gaussian, span = 600) ) %>% dplyr::mutate(grooves = purrr::map(ccdata, cc_locate_grooves, return_plot = T )) %>% dplyr::mutate(grooves_mid = purrr::map(ccdata, cc_locate_grooves, method = "middle", return_plot = T )) %>% dplyr::mutate( sigs = purrr::map2( .x = ccdata, .y = grooves, .f = function(x, y) { cc_get_signature(ccdata = x, grooves = y, span1 = 0.75, span2 = 0.03) } ) ) save(b1_l2_x3p, b2_l4_x3p, file = here::here("tests/bullets_signatures.Rdata") ) } # test_align.R # test_cms.R # test_features.R if (!file.exists(here::here("tests/bullets_match.Rdata"))) { message("Generating align.R data file for testing correctness.") load(here::here("tests/bullets_signatures.Rdata")) alignment <- sig_align( b1_l2_x3p$sigs[[1]]$sig, b2_l4_x3p$sigs[[1]]$sig ) peaks <- list( sig1 = sig_get_peaks(alignment$lands$sig1), sig2 = sig_get_peaks(alignment$lands$sig2) ) matches <- bulletxtrctr:::striation_identify_matches( peaks$sig1$lines, peaks$sig2$lines ) maxcms <- sig_cms_max(alignment) features_legacy <- extract_features_all_legacy(maxcms, resolution = 1.5625) features <- extract_features_all(aligned = alignment, striae = maxcms, resolution = 1.5625) match <- list( alignment = alignment, peaks = peaks, matches = matches, maxcms = maxcms, features_legacy = features_legacy, features = features ) save(match, file = here::here("tests/bullets_match.Rdata")) } # test_bullet-scores.R if (!file.exists(here::here("tests/rf_features.Rdata"))) { load(here::here("tests/bullets_match.Rdata")) requireNamespace("randomForest") rf_features <- match$features_legacy rf_features$rfscore <- predict(bulletr::rtrees, newdata = rf_features, type = "prob" )[, 2] save(rf_features, file = here::here("tests/rf_features.Rdata") ) } # test_smooth.R if (!file.exists(here::here("tests/smooth.Rdata"))) { message("Generating smooth.R data file for testing correctness") set.seed(3240583) tmp <- tibble::tibble( x = seq(-sqrt(5), sqrt(5), .03) %>% jitter(), y = rnorm(length(x), x^2, .1) ) smoothres <- smoothloess(tmp$y, .5) sigsmoothres <- raw_sig_smooth(tmp$y, .5, c(-5, 5)) save(sigsmoothres, smoothres, file = here::here("tests/smooth.Rdata")) }
library(rethinking) data(Howell1) d <- Howell1 d2 <- d[ d$age >= 18, ] sample_mu <- rnorm( 1e4, 178, 20) sample_sig <- runif( 1e4, 0, 50) prior_h <- rnorm( 1e4, sample_mu, sample_sig) mu.list <- seq(from = 120, to = 160, length.out = 200) sig.list <- seq(from = 4, to = 9, length.out = 200) post <- expand.grid(mu = mu.list, sig = sig.list) post$LL <- sapply( 1:nrow(post), function(i) sum( dnorm( d2$height, mean = post$mu[i], sd = post$sig[i], log = TRUE))) post$prod <- post$LL + dnorm(post$mu, 178, 20, TRUE) + dunif(post$sig, 0, 50, TRUE) post$prob <- exp(post$prod - max(post$prod)) # contour_xyz( post$mu , post$sig , post$prob, ylim = c(7.1, 8.4), xlim = c(153.5,155.5)) # image_xyz( post$mu , post$sig , post$prob, ylim = c(7.1, 8.4), xlim = c(153.5,155.5)) # We sample from the distribtion sample.row <- sample( 1:nrow(post), 1e4, replace = TRUE, prob = post$prob) sample.mu <- post$mu[sample.row] sample.sig <- post$sig[sample.row] plot( sample.mu , sample.sig , cex=0.5 , pch=16 , col=col.alpha(rangi2,0.1) )
/Howell1.r
no_license
RedGeryon/Bayesian-Modeling-HMC-NUTS
R
false
false
1,029
r
library(rethinking) data(Howell1) d <- Howell1 d2 <- d[ d$age >= 18, ] sample_mu <- rnorm( 1e4, 178, 20) sample_sig <- runif( 1e4, 0, 50) prior_h <- rnorm( 1e4, sample_mu, sample_sig) mu.list <- seq(from = 120, to = 160, length.out = 200) sig.list <- seq(from = 4, to = 9, length.out = 200) post <- expand.grid(mu = mu.list, sig = sig.list) post$LL <- sapply( 1:nrow(post), function(i) sum( dnorm( d2$height, mean = post$mu[i], sd = post$sig[i], log = TRUE))) post$prod <- post$LL + dnorm(post$mu, 178, 20, TRUE) + dunif(post$sig, 0, 50, TRUE) post$prob <- exp(post$prod - max(post$prod)) # contour_xyz( post$mu , post$sig , post$prob, ylim = c(7.1, 8.4), xlim = c(153.5,155.5)) # image_xyz( post$mu , post$sig , post$prob, ylim = c(7.1, 8.4), xlim = c(153.5,155.5)) # We sample from the distribtion sample.row <- sample( 1:nrow(post), 1e4, replace = TRUE, prob = post$prob) sample.mu <- post$mu[sample.row] sample.sig <- post$sig[sample.row] plot( sample.mu , sample.sig , cex=0.5 , pch=16 , col=col.alpha(rangi2,0.1) )
# #print(y) z=read.xlsx("h.xlsx",sheetIndex = 1) print(z) write.xlsx(z,"g.xlsx",sheetName = "Sheet1")
/x.R
no_license
bishal145/AP_LAB6
R
false
false
111
r
# #print(y) z=read.xlsx("h.xlsx",sheetIndex = 1) print(z) write.xlsx(z,"g.xlsx",sheetName = "Sheet1")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Utils_formula_parser.R \name{formula_to_list} \alias{formula_to_list} \title{Parse a chemical formula into a named list This function parses a chemical chem_formula into a named vector} \usage{ formula_to_list(chem_formula) } \arguments{ \item{chem_formula}{Single string with chemical chem_formula} } \description{ Parse a chemical formula into a named list This function parses a chemical chem_formula into a named vector } \examples{ library(metabolomicsUtils) parseChemFormula("C6H12O6") } \seealso{ \code{\link{list_to_formula}} \code{\link{standardize_formula}} } \author{ Michael Witting, \email{michael.witting@helmholtz-muenchen.de} }
/man/formula_to_list.Rd
no_license
michaelwitting/metabolomicsUtils
R
false
true
726
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Utils_formula_parser.R \name{formula_to_list} \alias{formula_to_list} \title{Parse a chemical formula into a named list This function parses a chemical chem_formula into a named vector} \usage{ formula_to_list(chem_formula) } \arguments{ \item{chem_formula}{Single string with chemical chem_formula} } \description{ Parse a chemical formula into a named list This function parses a chemical chem_formula into a named vector } \examples{ library(metabolomicsUtils) parseChemFormula("C6H12O6") } \seealso{ \code{\link{list_to_formula}} \code{\link{standardize_formula}} } \author{ Michael Witting, \email{michael.witting@helmholtz-muenchen.de} }
library(car) ### Name: linearHypothesis ### Title: Test Linear Hypothesis ### Aliases: linearHypothesis lht linearHypothesis.lm linearHypothesis.glm ### linearHypothesis.mlm linearHypothesis.polr linearHypothesis.default ### linearHypothesis.mer linearHypothesis.merMod linearHypothesis.lme ### linearHypothesis.svyglm linearHypothesis.rlm ### print.linearHypothesis.mlm matchCoefs matchCoefs.default ### matchCoefs.mer matchCoefs.merMod matchCoefs.lme matchCoefs.mlm ### linearHypothesis.nlsList makeHypothesis printHypothesis ### Keywords: htest models regression ### ** Examples mod.davis <- lm(weight ~ repwt, data=Davis) ## the following are equivalent: linearHypothesis(mod.davis, diag(2), c(0,1)) linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1")) linearHypothesis(mod.davis, c("(Intercept)", "repwt"), c(0,1)) linearHypothesis(mod.davis, c("(Intercept)", "repwt = 1")) ## use asymptotic Chi-squared statistic linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), test = "Chisq") ## the following are equivalent: ## use HC3 standard errors via white.adjust option linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), white.adjust = TRUE) ## covariance matrix *function* linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), vcov = hccm) ## covariance matrix *estimate* linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), vcov = hccm(mod.davis, type = "hc3")) mod.duncan <- lm(prestige ~ income + education, data=Duncan) ## the following are all equivalent: linearHypothesis(mod.duncan, "1*income - 1*education = 0") linearHypothesis(mod.duncan, "income = education") linearHypothesis(mod.duncan, "income - education") linearHypothesis(mod.duncan, "1income - 1education = 0") linearHypothesis(mod.duncan, "0 = 1*income - 1*education") linearHypothesis(mod.duncan, "income-education=0") linearHypothesis(mod.duncan, "1*income - 1*education + 1 = 1") linearHypothesis(mod.duncan, "2income = 2*education") mod.duncan.2 <- lm(prestige ~ type*(income + education), data=Duncan) coefs <- names(coef(mod.duncan.2)) ## test against the null model (i.e., only the intercept is not set to 0) linearHypothesis(mod.duncan.2, coefs[-1]) ## test all interaction coefficients equal to 0 linearHypothesis(mod.duncan.2, coefs[grep(":", coefs)], verbose=TRUE) linearHypothesis(mod.duncan.2, matchCoefs(mod.duncan.2, ":"), verbose=TRUE) # equivalent lh <- linearHypothesis(mod.duncan.2, coefs[grep(":", coefs)]) attr(lh, "value") # value of linear function attr(lh, "vcov") # covariance matrix of linear function ## a multivariate linear model for repeated-measures data ## see ?OBrienKaiser for a description of the data set used in this example. mod.ok <- lm(cbind(pre.1, pre.2, pre.3, pre.4, pre.5, post.1, post.2, post.3, post.4, post.5, fup.1, fup.2, fup.3, fup.4, fup.5) ~ treatment*gender, data=OBrienKaiser) coef(mod.ok) ## specify the model for the repeated measures: phase <- factor(rep(c("pretest", "posttest", "followup"), c(5, 5, 5)), levels=c("pretest", "posttest", "followup")) hour <- ordered(rep(1:5, 3)) idata <- data.frame(phase, hour) idata ## test the four-way interaction among the between-subject factors ## treatment and gender, and the intra-subject factors ## phase and hour linearHypothesis(mod.ok, c("treatment1:gender1", "treatment2:gender1"), title="treatment:gender:phase:hour", idata=idata, idesign=~phase*hour, iterms="phase:hour") ## mixed-effects models examples: ## Not run: ##D library(nlme) ##D example(lme) ##D linearHypothesis(fm2, "age = 0") ## End(Not run) ## Not run: ##D library(lme4) ##D example(glmer) ##D linearHypothesis(gm1, matchCoefs(gm1, "period")) ## End(Not run)
/data/genthat_extracted_code/car/examples/linearHypothesis.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
3,814
r
library(car) ### Name: linearHypothesis ### Title: Test Linear Hypothesis ### Aliases: linearHypothesis lht linearHypothesis.lm linearHypothesis.glm ### linearHypothesis.mlm linearHypothesis.polr linearHypothesis.default ### linearHypothesis.mer linearHypothesis.merMod linearHypothesis.lme ### linearHypothesis.svyglm linearHypothesis.rlm ### print.linearHypothesis.mlm matchCoefs matchCoefs.default ### matchCoefs.mer matchCoefs.merMod matchCoefs.lme matchCoefs.mlm ### linearHypothesis.nlsList makeHypothesis printHypothesis ### Keywords: htest models regression ### ** Examples mod.davis <- lm(weight ~ repwt, data=Davis) ## the following are equivalent: linearHypothesis(mod.davis, diag(2), c(0,1)) linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1")) linearHypothesis(mod.davis, c("(Intercept)", "repwt"), c(0,1)) linearHypothesis(mod.davis, c("(Intercept)", "repwt = 1")) ## use asymptotic Chi-squared statistic linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), test = "Chisq") ## the following are equivalent: ## use HC3 standard errors via white.adjust option linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), white.adjust = TRUE) ## covariance matrix *function* linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), vcov = hccm) ## covariance matrix *estimate* linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"), vcov = hccm(mod.davis, type = "hc3")) mod.duncan <- lm(prestige ~ income + education, data=Duncan) ## the following are all equivalent: linearHypothesis(mod.duncan, "1*income - 1*education = 0") linearHypothesis(mod.duncan, "income = education") linearHypothesis(mod.duncan, "income - education") linearHypothesis(mod.duncan, "1income - 1education = 0") linearHypothesis(mod.duncan, "0 = 1*income - 1*education") linearHypothesis(mod.duncan, "income-education=0") linearHypothesis(mod.duncan, "1*income - 1*education + 1 = 1") linearHypothesis(mod.duncan, "2income = 2*education") mod.duncan.2 <- lm(prestige ~ type*(income + education), data=Duncan) coefs <- names(coef(mod.duncan.2)) ## test against the null model (i.e., only the intercept is not set to 0) linearHypothesis(mod.duncan.2, coefs[-1]) ## test all interaction coefficients equal to 0 linearHypothesis(mod.duncan.2, coefs[grep(":", coefs)], verbose=TRUE) linearHypothesis(mod.duncan.2, matchCoefs(mod.duncan.2, ":"), verbose=TRUE) # equivalent lh <- linearHypothesis(mod.duncan.2, coefs[grep(":", coefs)]) attr(lh, "value") # value of linear function attr(lh, "vcov") # covariance matrix of linear function ## a multivariate linear model for repeated-measures data ## see ?OBrienKaiser for a description of the data set used in this example. mod.ok <- lm(cbind(pre.1, pre.2, pre.3, pre.4, pre.5, post.1, post.2, post.3, post.4, post.5, fup.1, fup.2, fup.3, fup.4, fup.5) ~ treatment*gender, data=OBrienKaiser) coef(mod.ok) ## specify the model for the repeated measures: phase <- factor(rep(c("pretest", "posttest", "followup"), c(5, 5, 5)), levels=c("pretest", "posttest", "followup")) hour <- ordered(rep(1:5, 3)) idata <- data.frame(phase, hour) idata ## test the four-way interaction among the between-subject factors ## treatment and gender, and the intra-subject factors ## phase and hour linearHypothesis(mod.ok, c("treatment1:gender1", "treatment2:gender1"), title="treatment:gender:phase:hour", idata=idata, idesign=~phase*hour, iterms="phase:hour") ## mixed-effects models examples: ## Not run: ##D library(nlme) ##D example(lme) ##D linearHypothesis(fm2, "age = 0") ## End(Not run) ## Not run: ##D library(lme4) ##D example(glmer) ##D linearHypothesis(gm1, matchCoefs(gm1, "period")) ## End(Not run)
testlist <- list(rates = numeric(0), thresholds = c(3.45845952088873e-323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = NaN) result <- do.call(grattan::IncomeTax,testlist) str(result)
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052643-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
192
r
testlist <- list(rates = numeric(0), thresholds = c(3.45845952088873e-323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = NaN) result <- do.call(grattan::IncomeTax,testlist) str(result)
\name{plotMeProtein} \alias{plotMeProtein} \title{Plot interaction plots of proteins} \usage{ plotMeProtein(datP) } \arguments{ \item{datP}{subframe of protein data} } \description{ Plot interaction plots of proteins }
/man/plotMeProtein.Rd
no_license
pariswu1988/proteomics
R
false
false
226
rd
\name{plotMeProtein} \alias{plotMeProtein} \title{Plot interaction plots of proteins} \usage{ plotMeProtein(datP) } \arguments{ \item{datP}{subframe of protein data} } \description{ Plot interaction plots of proteins }
#'x' is the column of a data.frame that holds 2 digit state codes stateFromLower <-function(x) { #read 52 state codes into local variable [includes DC (Washington D.C. and PR (Puerto Rico)] st.codes<-data.frame( state=as.factor(c("AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DC", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS", "KY", "LA", "MA", "MD", "ME", "MI", "MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV", "NY", "OH", "OK", "OR", "PA", "PR", "RI", "SC", "SD", "TN", "TX", "UT", "VA", "VT", "WA", "WI", "WV", "WY")), full=as.factor(c("alaska","alabama","arkansas","arizona","california","colorado", "connecticut","district of columbia","delaware","florida","georgia", "hawaii","iowa","idaho","illinois","indiana","kansas","kentucky", "louisiana","massachusetts","maryland","maine","michigan","minnesota", "missouri","mississippi","montana","north carolina","north dakota", "nebraska","new hampshire","new jersey","new mexico","nevada", "new york","ohio","oklahoma","oregon","pennsylvania","puerto rico", "rhode island","south carolina","south dakota","tennessee","texas", "utah","virginia","vermont","washington","wisconsin", "west virginia","wyoming")) ) #create an nx1 data.frame of state codes from source column st.x<-data.frame(state=x) #match source codes with codes from 'st.codes' local variable and use to return the full state name refac.x<-st.codes$full[match(st.x$state,st.codes$state)] #return the full state names in the same order in which they appeared in the original source return(refac.x) }
/function_stateFromLower.R
no_license
besio1/bi_project
R
false
false
1,844
r
#'x' is the column of a data.frame that holds 2 digit state codes stateFromLower <-function(x) { #read 52 state codes into local variable [includes DC (Washington D.C. and PR (Puerto Rico)] st.codes<-data.frame( state=as.factor(c("AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DC", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS", "KY", "LA", "MA", "MD", "ME", "MI", "MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM", "NV", "NY", "OH", "OK", "OR", "PA", "PR", "RI", "SC", "SD", "TN", "TX", "UT", "VA", "VT", "WA", "WI", "WV", "WY")), full=as.factor(c("alaska","alabama","arkansas","arizona","california","colorado", "connecticut","district of columbia","delaware","florida","georgia", "hawaii","iowa","idaho","illinois","indiana","kansas","kentucky", "louisiana","massachusetts","maryland","maine","michigan","minnesota", "missouri","mississippi","montana","north carolina","north dakota", "nebraska","new hampshire","new jersey","new mexico","nevada", "new york","ohio","oklahoma","oregon","pennsylvania","puerto rico", "rhode island","south carolina","south dakota","tennessee","texas", "utah","virginia","vermont","washington","wisconsin", "west virginia","wyoming")) ) #create an nx1 data.frame of state codes from source column st.x<-data.frame(state=x) #match source codes with codes from 'st.codes' local variable and use to return the full state name refac.x<-st.codes$full[match(st.x$state,st.codes$state)] #return the full state names in the same order in which they appeared in the original source return(refac.x) }
#' Remove left and right full and partial patterns #' @param subject \code{\link[Biostrings:DNAString-class]{DNAString}} or #' \code{\link[Biostrings:XStringSet-class]{DNAStringSet}} object #' @param Lpattern 5' pattern, #' \code{\link[Biostrings:DNAString-class]{DNAString}} object #' @param Rpattern 3' pattern, #' \code{\link[Biostrings:DNAString-class]{DNAString}} object #' @param error_rate Error rate (value in [0, 1]). #' The error rate is the proportion of mismatches allowed between #' the adapter and the aligned portion of the subject. #' For a given adapter A, the number of allowed mismatches between each #' subsequence s of A and the subject is computed as: error_rate * L_s, #' where L_s is the length of the subsequence s. #' @param with.indels Allow indels? #' @param anchored Can the adapter or partial adapter be within #' the sequence? (anchored = FALSE) #' or only in the terminal regions of the sequence? (anchored = TRUE). #' Default TRUE (trim only flanking regions) #' @param fixed Parameter passed to #' \code{\link[Biostrings]{trimLRpatterns}} #' Default 'subject', ambiguities in the pattern only are interpreted #' as wildcard. See the argument fixed in #' \code{\link[Biostrings]{trimLRpatterns}} #' @param ranges Return ranges? Default FALSE #' @param checks Perform internal checks? Default TRUE #' @param min_match_flank Do not trim in flanks of the subject, #' if a match has min_match_flank of less length. Default 1L #' (only trim with >=2 coincidences in a flank match) #' @param ... additional parameters passed to #' \code{\link[Biostrings]{trimLRpatterns]}} #' @return Edited \code{\link[Biostrings:DNAString-class]{DNAString}} or #' \code{\link[Biostrings:XStringSet-class]{DNAStringSet}} object #' @description This set of programs are internal, #' and the function adapter_filter is recommended for trimming. #' The programs can remove adapters and partial #' adapters from 3' and 5'. The adapters can be anchored or not. #' When indels are allowed, the error rate consists in the edit distance. #' IUPAC simbols are allowed. The methods use the #' \code{\link[Biostrings]{trimLRpatterns}} function #' of the \pkg{Biostrings} package, with some additions #' to take into account e.g., partial adaptors. #' IUPAC symbols are allowed in all the cases. The present function #' also removes partial adapters, without the need of additional steps #' (for example, creating a padded adapter with 'Ns', etc). #' A similar result to the output of \code{\link[Biostrings]{trimLRPatterns}} #' can be obtained with the option anchored = TRUE. #' When several matches are found, the function removes the subsequence #' that starts in the first match when cutRseq is used, or ends #' in the last match when cutLseq is used. #' #' @examples #' library(Biostrings) #' #' subject <- DNAStringSet(c('ATCATGCCATCATGAT', #' 'CATGATATTA', 'TCATG', 'AAAAAA', 'AGGTCATG')) #' #' Lpattern <- Rpattern <- 'TCATG' #' #' cutLseq(subject, Lpattern) #' cutLseq(subject, Lpattern, ranges = TRUE) #' cutRseq(subject, Rpattern) #' #' #' cutLseq(subject, Lpattern, anchored = FALSE) #' cutLseq(subject, Lpattern, error_rate = 0.2) #' cutLseq(subject, Lpattern, error_rate = 0.2, #' with.indels = TRUE) #' #' @author Leandro Roser \email{learoser@@gmail.com} #' @rdname matching #' @keywords internal cutRseq <- function(subject, Rpattern, with.indels = FALSE, fixed = "subject", error_rate = 0.2, anchored = TRUE, ranges = FALSE, checks = TRUE, min_match_flank = 2L, ...) { Rpattern <- DNAString(Rpattern) if (error_rate > 1 || error_rate < 0) { stop("error_rate must be a number between 0 and 1") } if (checks) { if (!is(Rpattern, "DNAString")) { stop("Rpattern must be a character string or a DNAString object") } csub <- class(subject) if (csub != "DNAStringSet") { stop("subject must be a DNAString or DNAStringSet object") } if (csub == "DNAString") { subject <- as(subject[[1]], "DNAStringSet") } } p <- length(Rpattern) s_width <- width(subject) s <- max(width(subject)) if(error_rate > 0) { flank_seq <- as.integer(seq_len(p) * error_rate) } else { flank_seq <- rep(0, length(seq_len(p))) } if (min_match_flank >= 1L) { if (p > min_match_flank) { flank_seq[seq_len(min_match_flank)] <- -1 } else { return(subject) } } if(!anchored) { Rpattern <- as.character(Rpattern) maxlen <- max(width(subject)) - nchar(Rpattern) if(maxlen > 0) { Rpattern <- paste0(Rpattern, paste(rep("N",maxlen), collapse = "")) } Rpattern <- DNAString(Rpattern) flank_seq <- c(flank_seq, rep(0,maxlen)) } out <- trimLRPatterns(Rpattern = Rpattern, subject = subject, max.Rmismatch = flank_seq, with.Rindels = with.indels, Rfixed = fixed, ...) if (ranges) { out <- IRanges::IRanges(start = rep(1, length(out)), end = width(out)) } out } #' Remove left and right full and partial patterns #' @rdname matching cutLseq <- function(subject, Lpattern, with.indels = FALSE, fixed = "subject", error_rate = 0.2, anchored = TRUE, ranges = FALSE, min_match_flank = 3L, checks = TRUE, ...) { Lpattern <- DNAString(Lpattern) if (checks) { if (!is(Lpattern, "DNAString")) { stop("Rpattern must be a character string or a DNAString object") } csub <- class(subject) if (csub != "DNAStringSet") { stop("subject must be a DNAStringSet object") } if (csub == "DNAString") { subject <- as(subject[[1]], "DNAStringSet") } } p <- length(Lpattern) s_width <- width(subject) s <- max(width(subject)) if(error_rate > 0) { flank_seq <- as.integer(seq_len(p) * error_rate) } else { flank_seq <- rep(0, length(seq_len(p))) } if (min_match_flank >= 1L) { if (p > min_match_flank) { flank_seq[seq_len(min_match_flank)] <- -1 } else { return(subject) } } if(!anchored) { Lpattern <- as.character(Lpattern) maxlen <- max(width(subject)) - nchar(Lpattern) if(maxlen > 0) { Lpattern <- paste0(paste(rep("N",maxlen), collapse = ""), Lpattern) } Lpattern <- DNAString(Lpattern) flank_seq <- c(flank_seq, rep(0,maxlen)) } out <- trimLRPatterns(Lpattern = Lpattern, subject = subject, max.Lmismatch = flank_seq, with.Lindels = with.indels, Lfixed = fixed, ...) if (ranges) { out <- IRanges::IRanges(start = rep(1, length(out)), end = width(out)) } out }
/FastqCleaner Filters/matching.R
no_license
imanemessak/Projet-R-Fastq-quality-filtering
R
false
false
7,000
r
#' Remove left and right full and partial patterns #' @param subject \code{\link[Biostrings:DNAString-class]{DNAString}} or #' \code{\link[Biostrings:XStringSet-class]{DNAStringSet}} object #' @param Lpattern 5' pattern, #' \code{\link[Biostrings:DNAString-class]{DNAString}} object #' @param Rpattern 3' pattern, #' \code{\link[Biostrings:DNAString-class]{DNAString}} object #' @param error_rate Error rate (value in [0, 1]). #' The error rate is the proportion of mismatches allowed between #' the adapter and the aligned portion of the subject. #' For a given adapter A, the number of allowed mismatches between each #' subsequence s of A and the subject is computed as: error_rate * L_s, #' where L_s is the length of the subsequence s. #' @param with.indels Allow indels? #' @param anchored Can the adapter or partial adapter be within #' the sequence? (anchored = FALSE) #' or only in the terminal regions of the sequence? (anchored = TRUE). #' Default TRUE (trim only flanking regions) #' @param fixed Parameter passed to #' \code{\link[Biostrings]{trimLRpatterns}} #' Default 'subject', ambiguities in the pattern only are interpreted #' as wildcard. See the argument fixed in #' \code{\link[Biostrings]{trimLRpatterns}} #' @param ranges Return ranges? Default FALSE #' @param checks Perform internal checks? Default TRUE #' @param min_match_flank Do not trim in flanks of the subject, #' if a match has min_match_flank of less length. Default 1L #' (only trim with >=2 coincidences in a flank match) #' @param ... additional parameters passed to #' \code{\link[Biostrings]{trimLRpatterns]}} #' @return Edited \code{\link[Biostrings:DNAString-class]{DNAString}} or #' \code{\link[Biostrings:XStringSet-class]{DNAStringSet}} object #' @description This set of programs are internal, #' and the function adapter_filter is recommended for trimming. #' The programs can remove adapters and partial #' adapters from 3' and 5'. The adapters can be anchored or not. #' When indels are allowed, the error rate consists in the edit distance. #' IUPAC simbols are allowed. The methods use the #' \code{\link[Biostrings]{trimLRpatterns}} function #' of the \pkg{Biostrings} package, with some additions #' to take into account e.g., partial adaptors. #' IUPAC symbols are allowed in all the cases. The present function #' also removes partial adapters, without the need of additional steps #' (for example, creating a padded adapter with 'Ns', etc). #' A similar result to the output of \code{\link[Biostrings]{trimLRPatterns}} #' can be obtained with the option anchored = TRUE. #' When several matches are found, the function removes the subsequence #' that starts in the first match when cutRseq is used, or ends #' in the last match when cutLseq is used. #' #' @examples #' library(Biostrings) #' #' subject <- DNAStringSet(c('ATCATGCCATCATGAT', #' 'CATGATATTA', 'TCATG', 'AAAAAA', 'AGGTCATG')) #' #' Lpattern <- Rpattern <- 'TCATG' #' #' cutLseq(subject, Lpattern) #' cutLseq(subject, Lpattern, ranges = TRUE) #' cutRseq(subject, Rpattern) #' #' #' cutLseq(subject, Lpattern, anchored = FALSE) #' cutLseq(subject, Lpattern, error_rate = 0.2) #' cutLseq(subject, Lpattern, error_rate = 0.2, #' with.indels = TRUE) #' #' @author Leandro Roser \email{learoser@@gmail.com} #' @rdname matching #' @keywords internal cutRseq <- function(subject, Rpattern, with.indels = FALSE, fixed = "subject", error_rate = 0.2, anchored = TRUE, ranges = FALSE, checks = TRUE, min_match_flank = 2L, ...) { Rpattern <- DNAString(Rpattern) if (error_rate > 1 || error_rate < 0) { stop("error_rate must be a number between 0 and 1") } if (checks) { if (!is(Rpattern, "DNAString")) { stop("Rpattern must be a character string or a DNAString object") } csub <- class(subject) if (csub != "DNAStringSet") { stop("subject must be a DNAString or DNAStringSet object") } if (csub == "DNAString") { subject <- as(subject[[1]], "DNAStringSet") } } p <- length(Rpattern) s_width <- width(subject) s <- max(width(subject)) if(error_rate > 0) { flank_seq <- as.integer(seq_len(p) * error_rate) } else { flank_seq <- rep(0, length(seq_len(p))) } if (min_match_flank >= 1L) { if (p > min_match_flank) { flank_seq[seq_len(min_match_flank)] <- -1 } else { return(subject) } } if(!anchored) { Rpattern <- as.character(Rpattern) maxlen <- max(width(subject)) - nchar(Rpattern) if(maxlen > 0) { Rpattern <- paste0(Rpattern, paste(rep("N",maxlen), collapse = "")) } Rpattern <- DNAString(Rpattern) flank_seq <- c(flank_seq, rep(0,maxlen)) } out <- trimLRPatterns(Rpattern = Rpattern, subject = subject, max.Rmismatch = flank_seq, with.Rindels = with.indels, Rfixed = fixed, ...) if (ranges) { out <- IRanges::IRanges(start = rep(1, length(out)), end = width(out)) } out } #' Remove left and right full and partial patterns #' @rdname matching cutLseq <- function(subject, Lpattern, with.indels = FALSE, fixed = "subject", error_rate = 0.2, anchored = TRUE, ranges = FALSE, min_match_flank = 3L, checks = TRUE, ...) { Lpattern <- DNAString(Lpattern) if (checks) { if (!is(Lpattern, "DNAString")) { stop("Rpattern must be a character string or a DNAString object") } csub <- class(subject) if (csub != "DNAStringSet") { stop("subject must be a DNAStringSet object") } if (csub == "DNAString") { subject <- as(subject[[1]], "DNAStringSet") } } p <- length(Lpattern) s_width <- width(subject) s <- max(width(subject)) if(error_rate > 0) { flank_seq <- as.integer(seq_len(p) * error_rate) } else { flank_seq <- rep(0, length(seq_len(p))) } if (min_match_flank >= 1L) { if (p > min_match_flank) { flank_seq[seq_len(min_match_flank)] <- -1 } else { return(subject) } } if(!anchored) { Lpattern <- as.character(Lpattern) maxlen <- max(width(subject)) - nchar(Lpattern) if(maxlen > 0) { Lpattern <- paste0(paste(rep("N",maxlen), collapse = ""), Lpattern) } Lpattern <- DNAString(Lpattern) flank_seq <- c(flank_seq, rep(0,maxlen)) } out <- trimLRPatterns(Lpattern = Lpattern, subject = subject, max.Lmismatch = flank_seq, with.Lindels = with.indels, Lfixed = fixed, ...) if (ranges) { out <- IRanges::IRanges(start = rep(1, length(out)), end = width(out)) } out }
NULL #' FAO Consumer Price Indices #' #' A dataset containing consumer price indices from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Date #' \item Year #' \item MonthNum #' \item Month #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Consumer Prices, Food Indices (2015 = 100) #' \item Consumer Prices, General Indices (2015 = 100) #' \item Food price inflation (%) #' } #' @source http://www.fao.org/faostat/en/#data/CP #' @docType data #' @keywords datasets #' @name agData_FAO_Consumer_Prices NULL #' FAO Country Table #' #' A table of country information. #' @section Keys: #' \itemize{ #' \item Country #' \item FAO_TABLE_NAME #' \item ISO2 #' \item ISO3 #' \item Lat #' \item Lon #' \item Region #' \item SubRegion #' \item DVDDVG #' } #' @docType data #' @keywords datasets #' @name agData_FAO_Country_Table NULL #' FAO Crop Production Data #' #' A dataset containing crop data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Production (Tonnes) #' \item Area Harvested (Hectares) #' \item Yield (kg/ha) #' } #' @source http://www.fao.org/faostat/en/#data/QC #' @docType data #' @keywords datasets #' @name agData_FAO_Crops NULL #' FAO Crop Production Data #' #' A dataset containing crop data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Production (Tonnes) #' \item Area Harvested (Hectares) #' \item Yield (kg/ha) #' } #' @source http://www.fao.org/faostat/en/#data/QC #' @docType data #' @keywords datasets #' @name agData_FAO_Crops2 NULL #' FAO Crop Production Data #' #' A dataset containing crop data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Production (Tonnes) #' \item Area Harvested (Hectares) #' \item Yield (kg/ha) #' } #' @source http://www.fao.org/faostat/en/#data/QC #' @docType data #' @keywords datasets #' @name agData_FAO_Crops3 NULL #' FAO fertilizer data #' #' FAO fertilizer data #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement: #' \itemize{ #' \item Production #' \item Import Quantity #' \item Export Quantity #' \item Agricultural Use #' \item Prices Paid by Farmers #' } #' @source http://www.fao.org/faostat/en/#data/RFN #' @docType data #' @keywords datasets #' @name agData_FAO_Fertilizers NULL #' FAO Food Price Index #' #' A dataset containing food price indices from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Date #' \item Year #' \item Month #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @source https://www.fao.org/worldfoodsituation/foodpricesindex/en/ #' @docType data #' @keywords datasets #' @name agData_FAO_Food_Prices NULL #' FAO Land Use Data #' #' A dataset containing land use data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Item: #' \itemize{ #' \item Country area #' \item Land area #' \item Agricultural area #' \item ... #' } #' @section Measurement (Unit): #' \itemize{ #' \item Area (1000 hectares) #' \item Carbon stock in living biomass (million tonnes) #' } #' @source http://www.fao.org/faostat/en/#data/RL #' @docType data #' @keywords datasets #' @name agData_FAO_LandUse NULL #' FAO Livestock Data #' #' A dataset containing livestock data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Stocks (head or 1000 head) #' } #' @source http://www.fao.org/faostat/en/#data/QA #' @docType data #' @keywords datasets #' @name agData_FAO_Livestock NULL #' FAO world population data #' #' FAO world population data #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurements: #' \itemize{ #' \item Total #' \item Male #' \item Female #' \item Rural #' \item Urban #' } #' @source http://www.fao.org/faostat/en/#data/OA #' @docType data #' @keywords datasets #' @name agData_FAO_Population NULL #' FAO Producer Price Indices #' #' A dataset containing producer price indices from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Item #' \item Year #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Producer Price Index (2014-2016 = 100) #' \item Producer Price (USD/Tonne) #' } #' @source http://www.fao.org/faostat/en/#data/PP #' @docType data #' @keywords datasets #' @name agData_FAO_Producer_Prices NULL #' FAO Region Table #' #' A table of region information. #' @section Keys: #' \itemize{ #' \item FAO_TABLE_NAME #' \item Region #' \item SubRegion #' \item Name #' } #' @docType data #' @keywords datasets #' @name agData_FAO_Region_Table NULL #' FAO Trade Data #' #' A dataset containing trade data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Export Quantity (Tonnes) #' \item Import Quantity (Tonnes) #' } #' @source http://www.fao.org/faostat/en/#data/TP #' @docType data #' @keywords datasets #' @name agData_FAO_Trade_Quantity NULL #' FAO Trade Data #' #' A dataset containing trade data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Export Value (USD) #' \item Import Value (USD) #' } #' @source http://www.fao.org/faostat/en/#data/TP #' @docType data #' @keywords datasets #' @name agData_FAO_Trade_Value NULL
/R/Datasets_FAO.R
no_license
derekmichaelwright/agData
R
false
false
6,486
r
NULL #' FAO Consumer Price Indices #' #' A dataset containing consumer price indices from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Date #' \item Year #' \item MonthNum #' \item Month #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Consumer Prices, Food Indices (2015 = 100) #' \item Consumer Prices, General Indices (2015 = 100) #' \item Food price inflation (%) #' } #' @source http://www.fao.org/faostat/en/#data/CP #' @docType data #' @keywords datasets #' @name agData_FAO_Consumer_Prices NULL #' FAO Country Table #' #' A table of country information. #' @section Keys: #' \itemize{ #' \item Country #' \item FAO_TABLE_NAME #' \item ISO2 #' \item ISO3 #' \item Lat #' \item Lon #' \item Region #' \item SubRegion #' \item DVDDVG #' } #' @docType data #' @keywords datasets #' @name agData_FAO_Country_Table NULL #' FAO Crop Production Data #' #' A dataset containing crop data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Production (Tonnes) #' \item Area Harvested (Hectares) #' \item Yield (kg/ha) #' } #' @source http://www.fao.org/faostat/en/#data/QC #' @docType data #' @keywords datasets #' @name agData_FAO_Crops NULL #' FAO Crop Production Data #' #' A dataset containing crop data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Production (Tonnes) #' \item Area Harvested (Hectares) #' \item Yield (kg/ha) #' } #' @source http://www.fao.org/faostat/en/#data/QC #' @docType data #' @keywords datasets #' @name agData_FAO_Crops2 NULL #' FAO Crop Production Data #' #' A dataset containing crop data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Production (Tonnes) #' \item Area Harvested (Hectares) #' \item Yield (kg/ha) #' } #' @source http://www.fao.org/faostat/en/#data/QC #' @docType data #' @keywords datasets #' @name agData_FAO_Crops3 NULL #' FAO fertilizer data #' #' FAO fertilizer data #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement: #' \itemize{ #' \item Production #' \item Import Quantity #' \item Export Quantity #' \item Agricultural Use #' \item Prices Paid by Farmers #' } #' @source http://www.fao.org/faostat/en/#data/RFN #' @docType data #' @keywords datasets #' @name agData_FAO_Fertilizers NULL #' FAO Food Price Index #' #' A dataset containing food price indices from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Date #' \item Year #' \item Month #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @source https://www.fao.org/worldfoodsituation/foodpricesindex/en/ #' @docType data #' @keywords datasets #' @name agData_FAO_Food_Prices NULL #' FAO Land Use Data #' #' A dataset containing land use data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Item: #' \itemize{ #' \item Country area #' \item Land area #' \item Agricultural area #' \item ... #' } #' @section Measurement (Unit): #' \itemize{ #' \item Area (1000 hectares) #' \item Carbon stock in living biomass (million tonnes) #' } #' @source http://www.fao.org/faostat/en/#data/RL #' @docType data #' @keywords datasets #' @name agData_FAO_LandUse NULL #' FAO Livestock Data #' #' A dataset containing livestock data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Stocks (head or 1000 head) #' } #' @source http://www.fao.org/faostat/en/#data/QA #' @docType data #' @keywords datasets #' @name agData_FAO_Livestock NULL #' FAO world population data #' #' FAO world population data #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurements: #' \itemize{ #' \item Total #' \item Male #' \item Female #' \item Rural #' \item Urban #' } #' @source http://www.fao.org/faostat/en/#data/OA #' @docType data #' @keywords datasets #' @name agData_FAO_Population NULL #' FAO Producer Price Indices #' #' A dataset containing producer price indices from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Item #' \item Year #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Producer Price Index (2014-2016 = 100) #' \item Producer Price (USD/Tonne) #' } #' @source http://www.fao.org/faostat/en/#data/PP #' @docType data #' @keywords datasets #' @name agData_FAO_Producer_Prices NULL #' FAO Region Table #' #' A table of region information. #' @section Keys: #' \itemize{ #' \item FAO_TABLE_NAME #' \item Region #' \item SubRegion #' \item Name #' } #' @docType data #' @keywords datasets #' @name agData_FAO_Region_Table NULL #' FAO Trade Data #' #' A dataset containing trade data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Export Quantity (Tonnes) #' \item Import Quantity (Tonnes) #' } #' @source http://www.fao.org/faostat/en/#data/TP #' @docType data #' @keywords datasets #' @name agData_FAO_Trade_Quantity NULL #' FAO Trade Data #' #' A dataset containing trade data from FAOSTAT. #' @section Keys: #' \itemize{ #' \item Area #' \item Year #' \item Item #' \item Measurement #' \item Unit #' \item Value #' } #' @section Measurement (Unit): #' \itemize{ #' \item Export Value (USD) #' \item Import Value (USD) #' } #' @source http://www.fao.org/faostat/en/#data/TP #' @docType data #' @keywords datasets #' @name agData_FAO_Trade_Value NULL
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats.R \name{existsSavedNlStat} \alias{existsSavedNlStat} \title{Check whether an nlStat exists in the saved nlStats} \usage{ existsSavedNlStat(nlStatSig, nlStatHash) } \arguments{ \item{nlStatHash}{character The hash of the nlStat to check} \item{nlStatName}{character The signature of the nlStat to check} } \value{ logical Whether the nlStat was found in the saved nlStats } \description{ Check whether an nlStat exists in the saved nlStats given the nlStat signature and the nlStatHash to ensure a unique hit } \examples{ \dontrun{ existsSavedNlStat(nlStatName = "sum()", nlStatHash = "f0fbe35d81578311ba8f362137832e779b7b4f39") #returns TRUE/FALSE } }
/man/existsSavedNlStat.Rd
no_license
mjdhasan/Rnightlights
R
false
true
746
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats.R \name{existsSavedNlStat} \alias{existsSavedNlStat} \title{Check whether an nlStat exists in the saved nlStats} \usage{ existsSavedNlStat(nlStatSig, nlStatHash) } \arguments{ \item{nlStatHash}{character The hash of the nlStat to check} \item{nlStatName}{character The signature of the nlStat to check} } \value{ logical Whether the nlStat was found in the saved nlStats } \description{ Check whether an nlStat exists in the saved nlStats given the nlStat signature and the nlStatHash to ensure a unique hit } \examples{ \dontrun{ existsSavedNlStat(nlStatName = "sum()", nlStatHash = "f0fbe35d81578311ba8f362137832e779b7b4f39") #returns TRUE/FALSE } }
source("subset_selection.R") X = load_data("Data/prostate.data") train_data = X$train_data test_data = X$test_data n = X$num_of_train_sample p = X$num_of_predictors for(k in 0:p){ cat("Testing", k, "-predictor models \n\n") stats = best_subset(k, p, train_data, 10) complexity_parameter <- if(k==0) k else cbind(complexity_parameter, k) cv_results <- if(k==0) stats[[1]] else cbind(cv_results, stats[[1]]) best_formula <- if(k==0) stats[[3]] else c(best_formula, stats[[3]]) } OSE = one_standard_error_rule(complexity_parameter, cv_results) complexity_parameter_idx = OSE$index predictor_formula = best_formula[complexity_parameter_idx] result = predicting(predictor_formula, train_data, test_data, 8) y = result$y_test pdt = result$pdt n = result$n_test mse = mean((y - pdt)^2) sErr = sqrt( var((y - pdt)^2) / n) print(result$coef) print(mse) print(sErr)
/best_subset.R
no_license
qubiroot/ESL
R
false
false
902
r
source("subset_selection.R") X = load_data("Data/prostate.data") train_data = X$train_data test_data = X$test_data n = X$num_of_train_sample p = X$num_of_predictors for(k in 0:p){ cat("Testing", k, "-predictor models \n\n") stats = best_subset(k, p, train_data, 10) complexity_parameter <- if(k==0) k else cbind(complexity_parameter, k) cv_results <- if(k==0) stats[[1]] else cbind(cv_results, stats[[1]]) best_formula <- if(k==0) stats[[3]] else c(best_formula, stats[[3]]) } OSE = one_standard_error_rule(complexity_parameter, cv_results) complexity_parameter_idx = OSE$index predictor_formula = best_formula[complexity_parameter_idx] result = predicting(predictor_formula, train_data, test_data, 8) y = result$y_test pdt = result$pdt n = result$n_test mse = mean((y - pdt)^2) sErr = sqrt( var((y - pdt)^2) / n) print(result$coef) print(mse) print(sErr)
### Jinliang Yang ### march 22th, 2016 library(farmeR) ### downloading data hmp2 <- read.delim("data/SraRunTable_hmp2.txt") library("plyr") res <- ddply(hmp2, .(Sample_Name_s), summarise, mbase = sum(MBases_l)) res <- res[order(res$mbase),] res$mbase <- res$mbase/1000*2/2.5 hist(res$mbase/1000*2/2.5) ##### only download B73 and mo17 idx1 <- grep( ".*MO17", hmp2$Sample_Name_s) idx2 <- grep( ".*B73", hmp2$Sample_Name_s) sra <- hmp2[c(idx1, idx2),] sra <- sra[, c("Run_s", "Experiment_s", "MBases_l", "Sample_Name_s")] names(sra) <- c("SRR", "SRX", "mbase", "pid") write.table(sra, "/home/jolyang/dbcenter/BMfastq/sampleid.txt", sep="\t", row.names=FALSE, quote=FALSE ) res <- ddply(sra, .(pid), summarise, mbase = sum(mbase), cov = sum(mbase)/2000) run_aspera(sra, maxspeed="200m", outdir="/home/jolyang/dbcenter/BMfastq", arrayjobs="1-16", jobid="aspera", email="yangjl0930@gmail.com") system("sbatch -p med slurm-script/run_aspera_array.sh") ##### dump the pe data into fastq run_fq_dump(filepath = "/home/jolyang/dbcenter/BMfastq", slurmsh = "slurm-script/dump_BM.sh", rmsra = TRUE, email = "yangjl0930@gmail.com") system("sbatch -p bigmemh slurm-script/dump_BM.sh") run_fq_dump2(filepath = "/home/jolyang/dbcenter/BMfastq", rmsra=TRUE, gzip=TRUE, email = "yangjl0930@gmail.com", run=c(TRUE, "med", "2600", "1"))
/profiling/0.SRA_align/0.D.1_B73_Mo17_Hapmap2.R
no_license
RILAB/methylation
R
false
false
1,420
r
### Jinliang Yang ### march 22th, 2016 library(farmeR) ### downloading data hmp2 <- read.delim("data/SraRunTable_hmp2.txt") library("plyr") res <- ddply(hmp2, .(Sample_Name_s), summarise, mbase = sum(MBases_l)) res <- res[order(res$mbase),] res$mbase <- res$mbase/1000*2/2.5 hist(res$mbase/1000*2/2.5) ##### only download B73 and mo17 idx1 <- grep( ".*MO17", hmp2$Sample_Name_s) idx2 <- grep( ".*B73", hmp2$Sample_Name_s) sra <- hmp2[c(idx1, idx2),] sra <- sra[, c("Run_s", "Experiment_s", "MBases_l", "Sample_Name_s")] names(sra) <- c("SRR", "SRX", "mbase", "pid") write.table(sra, "/home/jolyang/dbcenter/BMfastq/sampleid.txt", sep="\t", row.names=FALSE, quote=FALSE ) res <- ddply(sra, .(pid), summarise, mbase = sum(mbase), cov = sum(mbase)/2000) run_aspera(sra, maxspeed="200m", outdir="/home/jolyang/dbcenter/BMfastq", arrayjobs="1-16", jobid="aspera", email="yangjl0930@gmail.com") system("sbatch -p med slurm-script/run_aspera_array.sh") ##### dump the pe data into fastq run_fq_dump(filepath = "/home/jolyang/dbcenter/BMfastq", slurmsh = "slurm-script/dump_BM.sh", rmsra = TRUE, email = "yangjl0930@gmail.com") system("sbatch -p bigmemh slurm-script/dump_BM.sh") run_fq_dump2(filepath = "/home/jolyang/dbcenter/BMfastq", rmsra=TRUE, gzip=TRUE, email = "yangjl0930@gmail.com", run=c(TRUE, "med", "2600", "1"))
##Workspace: beta_master6 #summaries BM #BM summary DS_pred #predicted data #for paper resize.win(6,6) #PIC vs sinkvel cell, real data, saved SI with lm and no lm PIC$group <- reorder.factor (PIC$group, new.order = c("Nc", "Cc")) PIC_newdata$group <- reorder.factor (PIC_newdata$group, new.order = c("Nc", "Cc")) ggplot(data=PIC, aes(x=PICpercellpg, y=SinkVel, color=group, shape=Strain)) + geom_point(size=7)+theme_Publication2()+ labs(y = expression("sinking velocity "~("m"~day^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + #geom_smooth (method="lm", aes(group=1), color="#525252") + scale_color_manual(values=c("#e41a1c", "#377eb8")) + scale_shape_manual(values=c(1:12)) #get regression #edited as lab suggested resize.win (8,6) ggplot(data=PIC, aes(x=PICpercellpg, y=SinkVel, color=group)) + geom_point(aes(size=Den_celltotal))+ scale_size(range = c(1,10)) + theme_Publication()+ labs(y = expression("sinking velocity "~("m"~d^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "vertical", legend.box = "vertical", legend.position="right", legend.title = element_blank()) + scale_color_manual(values=c("#e41a1c","#377eb8")) + scale_shape_manual(values=c(1:11)) + guides(colour = guide_legend(override.aes = list(size=5))) ggplot(data=PIC, aes(x=PICpercellpg, y=log10(beta_DS), color=group, shape=Strain)) + geom_point(size=5)+theme_Publication()+ labs(y = expression(log[10]~beta[S]~("encounters "~mL~day^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical")+ scale_color_manual(values=c("#e41a1c", "#377eb8")) + scale_shape_manual(values=c(1:12)) PIC$group <- factor (PIC$group,labels= c("naked", "calcified")) PIC$group <- reorder.factor (PIC$group, new.order = c("naked", "calcified")) liths <- data.frame (group=c("coccolith"), Den_celltotal =2.6, SinkVel = 0.45, beta_DS = 2.70*10^-6) #edited as lab suggested resize.win (6,7) ggplot(data=PIC, aes(x=SinkVel, y=log10(beta_DS), color=group, shape=Strain)) + geom_point(size=5)+theme_Publication()+ geom_point (data=liths, aes(x=SinkVel, y=log10(beta_DS), color="coccolith"), size=8, shape=20) + labs(y = expression(log[10]~beta[S]~("encounters "~mL~d^-1)), x = expression("sinking velocity "~("m"~d^-1))) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank())+ scale_color_manual(values=c( "#377eb8", "#4daf4a", "#e41a1c")) + scale_shape_manual(values=c(1:12)) resize.win(6,6) #Predicted data (equally spaced data) sinkvel PIC_newdata$group <- factor (PIC_newdata$group,labels= c("naked", "calcified")) ggplot(data=PIC_newdata, aes(x=PICpercellpg, y=SinkVel, color=group)) + geom_point(size=5)+theme_Publication()+ labs(y = expression("sinking velocity "~("m"~d^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + scale_color_manual(values=c("#e41a1c", "#377eb8")) #Predicted data (equally spaced data) betas, with liths, beta vs sinkvel ggplot(data=PIC_newdata, aes(x=SinkVel, y=log10(beta_DS), color=group)) + geom_point(size=5)+theme_Publication()+ geom_point (data=liths, aes(x=SinkVel, y=log10(beta_DS), color="coccolith"), size=8, shape=20) + labs(y = expression(log[10]~beta[S]~("encounters "~mL~d^-1)), x = expression("sinking velocity "~("m"~d^-1))) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + scale_color_manual(values=c("#377eb8", "#4daf4a", "#e41a1c")) #Predicted data (equally spaced data) betas, without liths, beta vs. PIC ggplot(data=PIC_newdata, aes(x=PICpercellpg, y=log10(beta_DS), color=group)) + geom_point(size=5)+theme_Publication()+ labs(y = expression(log[10]~beta[S]~("encounters "~mL~d^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + scale_color_manual(values=c("#e41a1c", "#377eb8")) #turbulence resize.win(6,6) turb$group <- factor (turb$group,labels= c("naked", "calcified", "coccolith")) ggplot(data = turb, aes(x = log10(disrate), y = log10(beta_turb), color=group)) + geom_line(size =2) + theme_Publication() + labs(y = expression(log[10]~beta[T]~("encounters "~mL~d^-1)), x = expression(log[10]~epsilon~(m^2~s^-3))) + theme(legend.title = element_blank()) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) + guides(colour = guide_legend(override.aes = list(size=5))) #encounters all betas #viral encounters per day per cell resize.win(8,6) all$group <- reorder.factor (all$group, new.order = c("Nc", "Cc", "Li")) all$group <- factor (all$group,labels= c("naked", "calcified", "coccolith")) ggplot(data=all, aes(x=log10(disrate),y = log10(E_all_low) , color=group)) + geom_line(size=2, position=position_jitter(w=0.02, h=0), aes(linetype="shelf slope"))+ geom_line(size=2, data = all, aes(y= log10(E_all_high), color=group,linetype="open ocean")) + theme_Publication() + theme(legend.title = element_blank(), legend.key.width=unit(1,"cm"))+ labs(y = expression(log[10]~"viral encounters " ~entity^-1~d^-1), x = expression(log[10]~epsilon~(m^2~s^-3))) + theme(legend.title = element_blank()) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) + scale_fill_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) #for all encounters resize.win (8.5,6) ggplot(data=all, aes(x=log10(disrate),y = log10(E_all_low_resvi) , color=group, fill=group)) + geom_line(size=2, position=position_jitter(w=0.02, h=0), aes(linetype="open ocean"))+ geom_line(size=2, data = all, aes(y= log10(E_all_high_resvi), color=group, linetype="shelf/slope")) + theme_Publication() + theme(legend.title = element_blank(), legend.key.width=unit(1,"cm"))+ labs(y = expression(log[10]~"total encounters " ~mL^-1~d^-1), x = expression(log[10]~epsilon~(m^2~s^-3))) + theme(legend.title = element_blank()) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) + scale_fill_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) #change color schemes for everything. follow scheme for naked, calc, lith #merge 3 disrates (10e-3, 10e-5, 10e-8) calm <- all %>% filter (disrate %in% c ("1e-08")) %>% select (group, beta_BM, beta_DS, disrate, beta_turb) calm$watcon <- "calm" stormy <- all %>% filter (disrate %in% c ("0.001")) %>% select (group, beta_BM, beta_DS, disrate, beta_turb) stormy$watcon <- "stormy" mid <- all %>% filter (disrate %in% c ("1e-05")) %>% select (group, beta_BM, beta_DS, disrate, beta_turb) mid$watcon <- "mid" calmstormy <- rbind (calm, stormy, mid) calmstormy$beta_all <- calmstormy$beta_BM + calmstormy$beta_DS + calmstormy$beta_turb ##probabilities check alpha v4 probs <- as.data.frame(list (group = as.factor (rep(c("calcified","coccolith", "naked"), 4)), virus = rep(c("high", "low"), 1, each=6), condition=rep(c("open ocean", "shelf/slope"), 2, each=3), hostnum = rep(c(10^3, 10^3, 10^2, 10^5, 10^5, 10^4), 1), virnum = rep(c(((10^3)*30), ((10^5)*30)), 2, each=3), prophost = rep(c(1, 25, 0.1), 4), propvir= rep(c(0.33, 0.67), 1, each=6), ads = rep(c(0.0243, 0.0302, 0.2994), 4), inf = rep(c(0.3, NA, 0.3, 0.3, NA, 0.3, 0.06, NA, 0.06, 0.06, NA, 0.06)))) #calmstormy_backup <- calmstormy calmstormy <- calmstormy_backup #join calmstormy and probs calmstormy <- left_join(calmstormy, probs) #calculate propEhV calmstormy$propEhV <- calmstormy$virnum* calmstormy$propvir #calculate prophost calmstormy$prophostnum <- calmstormy$hostnum* calmstormy$prophost #calculate encounters fast slow calmstormy$encounters <- calmstormy$beta_all*calmstormy$propEhV*calmstormy$prophostnum #total enc #calculate total adsorption by virus props calmstormy$adstot <- calmstormy$encounters*calmstormy$ads #calculate total successful infections calmstormy$sucinf <- calmstormy$encounters*calmstormy$ads*calmstormy$inf calmstormy$group <- reorder.factor (calmstormy$group, new.order = c("naked", "calcified", "coccolith")) #edit in excel to make life easier write.table(calmstormy, "Postdoc-R/Exported Tables/calmstormy_master7.csv", sep=";", col.names=T, row.names=F) #make individual data table for getting the distilled plot conc <- calmstormy[1:12, ] %>% select (group, virus, condition, prophostnum) conc$parameter="concentration" conc$calm <- NA conc$stormy <- NA conc$mid <-conc$prophostnum enc <- reshape(calmstormy %>% select(group, watcon, virus, condition, prophostnum, encounters), idvar=c("group", "virus", "condition", "prophostnum"), timevar="watcon", direction="wide") enc$parameter= "encounters" ads <- reshape(calmstormy %>% select(group, watcon, virus, condition, prophostnum, adstot), idvar=c("group", "virus", "condition", "prophostnum"), timevar="watcon", direction="wide") ads$parameter= "adsorption" inf <- reshape(calmstormy %>% select(group, watcon, virus, condition, prophostnum, sucinf), idvar=c("group", "virus", "condition", "prophostnum"), timevar="watcon", direction="wide") inf$parameter= "infections" #renaming enc <- setnames (enc, c("encounters.calm", "encounters.stormy", "encounters.mid"), c("calm", "stormy", "mid")) ads <- setnames (ads, c("adstot.calm", "adstot.stormy", "adstot.mid"), c("calm", "stormy", "mid")) inf <- setnames (inf, c("sucinf.calm", "sucinf.stormy", "sucinf.mid"), c("calm", "stormy", "mid")) #combined allsteps <- rbind (conc, enc, ads, inf) resize.win(6,6) #correct ggplot(enc, aes(x=as.factor(group), y=log10(mid), color=as.factor(group), shape=as.factor(group))) + geom_point (size=5) + geom_errorbar(aes (ymax=log10(mid + stormy), ymin= log10(mid-calm))) + facet_grid(condition~virus) ##make the plot variable_labs <- c( `concentration` = 'concentration~(mL^{-1})', `encounters` = 'encounter~rate~(mL^{-1}~d^{-1})', `adsorption` = 'adsorption~rate~(mL^{-1}~d^{-1})', `infections` = 'infection~rate~(mL^{-1}~d^{-1})' ) resize.win (11,7)#change virus to low and high allsteps$parameter <- reorder.factor (allsteps$parameter, new.order = c("concentration", "encounters", "adsorption", "infections")) ggplot(allsteps %>% filter (virus=="high"), aes(x=group, y=log10(mid), color=group, shape=group)) + geom_point (size=6) + geom_errorbar(aes (ymax=log10(mid + stormy), ymin= log10(mid-calm)), width=0.5, size=1) + facet_grid(condition~parameter,labeller = labeller(parameter = as_labeller(variable_labs, label_parsed))) + theme_Publication2() + theme (axis.title.x = element_blank(), legend.position = "none") + labs (y = expression(log[10])) + geom_hline(yintercept = log10(1), linetype="dashed") + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) ##edited according to the lab library(lemon) resize.win (11,8) ggplot(allsteps %>% filter (virus=="low"), aes(x=group, y=log10(stormy), color="stormy")) + geom_point (size=6, position = position_dodge()) + geom_point (data=allsteps %>% filter (virus=="high"), aes (x=group, y=log10(mid), color="mid"), size=6, position = position_dodge()) + geom_point (data=allsteps %>% filter (virus=="high"), aes (x=group, y=log10(calm), color="calm"), size=6, position = position_dodge()) + facet_rep_grid(condition~parameter, ##tick marks labeller = labeller(parameter = as_labeller(variable_labs, label_parsed))) + theme_Publication2() + theme (axis.title.x = element_blank(), axis.text.x = element_text(angle = 45, vjust = 0.7), strip.background.x = element_blank(), axis.ticks.length = unit(5, "pt"),panel.spacing.y = unit(1, "lines"), legend.title = element_blank()) + labs (y = expression(log[10])) + geom_hline(yintercept = log10(1), linetype="dashed") + scale_color_manual (values=c('#2b57a7', '#ffc4b4', '#b11346')) #allsteps split #combined high and low high <- reshape(allsteps %>% filter (virus=="high") %>% select (c (group, condition, prophostnum, parameter, mid)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") low <- reshape(allsteps %>% filter (virus=="low") %>% select (c (group, condition, prophostnum, parameter, mid)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") allsteps_comb <- high %>% select (c(group, condition, prophostnum)) #combine high and low, chose mid turbulence allsteps_comb$enccomb <- high$mid.encounters + low$mid.encounters allsteps_comb$adscomb <- high$mid.adsorption + low$mid.adsorption allsteps_comb$infcomb <- high$mid.infections + low$mid.infections ##percentage of parameters allsteps_comb$perencounters <- (allsteps_comb$enccomb/allsteps_comb$prophostnum) allsteps_comb$peradsorbed <- (allsteps_comb$adscomb/allsteps_comb$prophostnum) allsteps_comb$perinf <- (allsteps_comb$infcomb/allsteps_comb$prophostnum) allsteps_comb <- allsteps_comb %>% mutate(perencounters= if_else(perencounters > 1, 1, perencounters)) %>% mutate(peradsorbed= if_else(peradsorbed > 1, 1, peradsorbed)) %>% mutate(perinf= if_else(perinf > 1, 1, perinf)) allsteps_comb$perencounters_noenc <- 1- allsteps_comb$perencounters allsteps_comb$perads_noads <- 1- allsteps_comb$peradsorbed allsteps_comb$perinf_noinf <- 1- allsteps_comb$perinf #melt data melt_enc <- reshape2::melt(allsteps_comb %>% select ("group", "condition", "perencounters", "perencounters_noenc"), id.vars=c("group", "condition")) melt_ads <- reshape2::melt(allsteps_comb %>% select ("group", "condition", "peradsorbed", "perads_noads" ), id.vars=c("group","condition")) melt_inf <- reshape2::melt(allsteps_comb %>% select ("group", "condition", "perinf", "perinf_noinf" ), id.vars=c("group", "condition")) #encounters resize.win (6,4) #no watcon ggplot(melt_enc, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#808000", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.4f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #adsorbed ggplot(melt_ads, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#008080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #infected ggplot(melt_inf %>% filter(!(group %in% c("Li"))), aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#800080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #####under stormy conditions percentages #combined high.stormy and low.stormy high.stormy<- reshape(allsteps %>% filter (virus=="high") %>% select (c (group, condition, prophostnum, parameter, stormy)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") low.stormy <- reshape(allsteps %>% filter (virus=="low") %>% select (c (group, condition, prophostnum, parameter, stormy)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") allsteps_comb.stormy <- high.stormy %>% select (c(group, condition, prophostnum)) #combine high.stormy and low.stormy, chose mid turbulence allsteps_comb.stormy$enccomb <- high.stormy$stormy.encounters + low.stormy$stormy.encounters allsteps_comb.stormy$adscomb <- high.stormy$stormy.adsorption + low.stormy$stormy.adsorption allsteps_comb.stormy$infcomb <- high.stormy$stormy.infections + low.stormy$stormy.infections ##percentage of parameters allsteps_comb.stormy$perencounters <- (allsteps_comb.stormy$enccomb/allsteps_comb.stormy$prophostnum) allsteps_comb.stormy$peradsorbed <- (allsteps_comb.stormy$adscomb/allsteps_comb.stormy$prophostnum) allsteps_comb.stormy$perinf <- (allsteps_comb.stormy$infcomb/allsteps_comb.stormy$prophostnum) allsteps_comb.stormy <- allsteps_comb.stormy %>% mutate(perencounters= if_else(perencounters > 1, 1, perencounters)) %>% mutate(peradsorbed= if_else(peradsorbed > 1, 1, peradsorbed)) %>% mutate(perinf= if_else(perinf > 1, 1, perinf)) allsteps_comb.stormy$perencounters_noenc <- 1- allsteps_comb.stormy$perencounters allsteps_comb.stormy$perads_noads <- 1- allsteps_comb.stormy$peradsorbed allsteps_comb.stormy$perinf_noinf <- 1- allsteps_comb.stormy$perinf #melt data melt_enc.stormy <- reshape2::melt(allsteps_comb.stormy %>% select ("group", "condition", "perencounters", "perencounters_noenc"), id.vars=c("group", "condition")) melt_ads.stormy <- reshape2::melt(allsteps_comb.stormy %>% select ("group", "condition", "peradsorbed", "perads_noads" ), id.vars=c("group","condition")) melt_inf.stormy <- reshape2::melt(allsteps_comb.stormy %>% select ("group", "condition", "perinf", "perinf_noinf" ), id.vars=c("group", "condition")) #encounters resize.win (6,4) #no watcon ggplot(melt_enc.stormy, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#808000", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.4f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #adsorbed ggplot(melt_ads.stormy, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#008080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #infected ggplot(melt_inf.stormy %>% filter(!(group %in% c("Li"))), aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#800080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) ####under calm conditions percentages #combined high.calm and low.calm high.calm<- reshape(allsteps %>% filter (virus=="high") %>% select (c (group, condition, prophostnum, parameter, calm)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") low.calm <- reshape(allsteps %>% filter (virus=="low") %>% select (c (group, condition, prophostnum, parameter, calm)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") allsteps_comb.calm <- high.calm %>% select (c(group, condition, prophostnum)) #combine high.calm and low.calm, chose mid turbulence allsteps_comb.calm$enccomb <- high.calm$calm.encounters + low.calm$calm.encounters allsteps_comb.calm$adscomb <- high.calm$calm.adsorption + low.calm$calm.adsorption allsteps_comb.calm$infcomb <- high.calm$calm.infections + low.calm$calm.infections ##percentage of parameters allsteps_comb.calm$perencounters <- (allsteps_comb.calm$enccomb/allsteps_comb.calm$prophostnum) allsteps_comb.calm$peradsorbed <- (allsteps_comb.calm$adscomb/allsteps_comb.calm$prophostnum) allsteps_comb.calm$perinf <- (allsteps_comb.calm$infcomb/allsteps_comb.calm$prophostnum) allsteps_comb.calm <- allsteps_comb.calm %>% mutate(perencounters= if_else(perencounters > 1, 1, perencounters)) %>% mutate(peradsorbed= if_else(peradsorbed > 1, 1, peradsorbed)) %>% mutate(perinf= if_else(perinf > 1, 1, perinf)) allsteps_comb.calm$perencounters_noenc <- 1- allsteps_comb.calm$perencounters allsteps_comb.calm$perads_noads <- 1- allsteps_comb.calm$peradsorbed allsteps_comb.calm$perinf_noinf <- 1- allsteps_comb.calm$perinf #melt data melt_enc.calm <- reshape2::melt(allsteps_comb.calm %>% select ("group", "condition", "perencounters", "perencounters_noenc"), id.vars=c("group", "condition")) melt_ads.calm <- reshape2::melt(allsteps_comb.calm %>% select ("group", "condition", "peradsorbed", "perads_noads" ), id.vars=c("group","condition")) melt_inf.calm <- reshape2::melt(allsteps_comb.calm %>% select ("group", "condition", "perinf", "perinf_noinf" ), id.vars=c("group", "condition")) #encounters resize.win (6,4) #no watcon ggplot(melt_enc.calm, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#808000", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.4f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #adsorbed ggplot(melt_ads.calm, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#008080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #infected ggplot(melt_inf.calm %>% filter(!(group %in% c("coccolith"))), aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#800080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) ####all host ##summaries of all host entities ##summaries of all host entities allhost <- calmstormy %>% filter (condition=="shelf/slope") %>% select(c(group, beta_all, watcon, virus, propvir, ads, inf)) allhost <- allhost[rep(seq_len(nrow(allhost)), 4), ] #4*18=72 allhost$hostnum <- rep(c (1 %o% 10^(seq(0, 3, 1))), 1, each=18) allhost <- allhost %>% mutate (prophost = case_when (group=="calcified" ~ hostnum, group=="naked" ~ hostnum*0.1, group=="coccolith" ~ hostnum*25)) allhost <- allhost %>% mutate (virnum = case_when (group=="calcified" ~ hostnum*30)) allhost$virnum <- rep(c (3 %o% 10^(seq(1, 4, 1))), 1, each=18) #allhost <- allhost %>% mutate(virnum2= if_else(group == "Cc", virnum, virnum)) #calculate propEhV allhost$propEhV <- allhost$virnum* allhost$propvir #calculate prophost allhost$prophostnum <- allhost$hostnum* allhost$prophost #calculate encounters fast slow allhost$encounters <- allhost$beta_all*allhost$propEhV*allhost$prophostnum #total enc #calculate total adsorption by virus props allhost$adstot <- allhost$encounters*allhost$ads #calculate total successful infections allhost$sucinf <- allhost$encounters*allhost$ads*allhost$inf #allhost_conc <- allhost[c(1:6, 13:18, 25:30, 37:42, 49:54, 61:66), ] %>% select (group, virus, prophostnum) #choose the concentrations (length of df/3) allhost_conc <- allhost %>% filter (watcon=="mid") %>% select (group, virus, prophostnum) #choose the concentrations (length of df/3) allhost_conc$parameter="concentration" allhost_conc$calm <- NA allhost_conc$stormy <- NA allhost_conc$mid <-allhost_conc$prophostnum allhost_enc <- reshape(allhost %>% select (group, virus, prophostnum, watcon, encounters), idvar=c("group", "virus", "prophostnum"), timevar="watcon", direction="wide") allhost_enc$parameter= "encounters" allhost_ads <- reshape(allhost %>% select (group, virus, prophostnum, watcon, adstot), idvar=c("group", "virus", "prophostnum"), timevar="watcon", direction="wide") allhost_ads$parameter= "adsorption" allhost_inf <- reshape(allhost %>% select (group, virus, prophostnum, watcon, sucinf), idvar=c("group", "virus", "prophostnum"), timevar="watcon", direction="wide") allhost_inf$parameter= "infections" #renaming allhost_enc <- setnames (allhost_enc, c("encounters.calm", "encounters.stormy", "encounters.mid"), c("calm", "stormy", "mid")) allhost_ads <- setnames (allhost_ads, c("adstot.calm", "adstot.stormy", "adstot.mid"), c("calm", "stormy", "mid")) allhost_inf <- setnames (allhost_inf, c("sucinf.calm", "sucinf.stormy", "sucinf.mid"), c("calm", "stormy", "mid")) #combined allhost_allsteps <- rbind (allhost_conc, allhost_enc, allhost_ads, allhost_inf) #correct ggplot(allhost_enc %>% filter (virus=="high"), aes(x=log10(prophostnum), y=log10(mid), color=as.factor(group), shape=as.factor(group))) + geom_point (size=5) + geom_errorbar(aes (ymax=log10(mid + stormy), ymin= log10(mid-calm))) #+ facet_grid(~group) ##plot allhost_allsteps$parameter <- reorder.factor (allhost_allsteps$parameter, new.order = c("concentration", "encounters", "adsorption", "infections")) resize.win (14,7) #change virus to low and high ggplot(allhost_allsteps %>% filter (virus=="high"), aes(x=log10(prophostnum), y=log10(stormy), color="stormy")) + geom_point (size=6, position = position_dodge()) + geom_point (data=allhost_allsteps %>% filter (virus=="high"), aes (x=log10(prophostnum), y=log10(mid), color="mid"), size=6, position = position_dodge()) + geom_point (data=allhost_allsteps %>% filter (virus=="high"), aes (x=log10(prophostnum), y=log10(calm), color="calm"), size=6, position = position_dodge()) + facet_rep_grid(group~parameter, ##tick marks labeller = labeller(parameter = as_labeller(variable_labs, label_parsed))) + theme_Publication2() + theme (strip.background.x = element_blank(), axis.ticks.length = unit(5, "pt"),panel.spacing.y = unit(1, "lines"), legend.title = element_blank()) + labs (y = expression(log[10]), x =expression (log[10]~"host concentration"~mL^-1)) + geom_hline(yintercept = log10(1), linetype="dashed") + scale_color_manual (values=c('#2b57a7', '#ffc4b4', '#b11346')) ###adsorption plot library(readxl) ads_plot <- read_excel("Postdoc-R/CSV Files/ads.xlsx") ads_plot$group <- reorder.factor (ads_plot$group, new.order = c("Nc", "Cc", "Li")) ads_plot$group <- factor (ads_plot$group,labels= c("naked", "calcified", "coccolith")) resize.win(4,4) ggplot (ads_plot, aes(x=group, y=log10(adscoef), color=group)) + geom_boxplot() + theme_Publication() + theme (axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45, vjust = 0.6)) + labs (y= expression(log[10]~K[d]~("mL"~"day"^-1))) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) ggplot (ads_plot, aes(x=group, y=adsef*100, color=group)) + geom_boxplot() + theme_Publication() + theme (axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45, vjust = 0.6)) + labs(y = bquote(delta ~ "(%)")) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a"))
/Sep 2020/beta master7_plots.R
no_license
kaye11/Postdoc-R
R
false
false
28,966
r
##Workspace: beta_master6 #summaries BM #BM summary DS_pred #predicted data #for paper resize.win(6,6) #PIC vs sinkvel cell, real data, saved SI with lm and no lm PIC$group <- reorder.factor (PIC$group, new.order = c("Nc", "Cc")) PIC_newdata$group <- reorder.factor (PIC_newdata$group, new.order = c("Nc", "Cc")) ggplot(data=PIC, aes(x=PICpercellpg, y=SinkVel, color=group, shape=Strain)) + geom_point(size=7)+theme_Publication2()+ labs(y = expression("sinking velocity "~("m"~day^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + #geom_smooth (method="lm", aes(group=1), color="#525252") + scale_color_manual(values=c("#e41a1c", "#377eb8")) + scale_shape_manual(values=c(1:12)) #get regression #edited as lab suggested resize.win (8,6) ggplot(data=PIC, aes(x=PICpercellpg, y=SinkVel, color=group)) + geom_point(aes(size=Den_celltotal))+ scale_size(range = c(1,10)) + theme_Publication()+ labs(y = expression("sinking velocity "~("m"~d^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "vertical", legend.box = "vertical", legend.position="right", legend.title = element_blank()) + scale_color_manual(values=c("#e41a1c","#377eb8")) + scale_shape_manual(values=c(1:11)) + guides(colour = guide_legend(override.aes = list(size=5))) ggplot(data=PIC, aes(x=PICpercellpg, y=log10(beta_DS), color=group, shape=Strain)) + geom_point(size=5)+theme_Publication()+ labs(y = expression(log[10]~beta[S]~("encounters "~mL~day^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical")+ scale_color_manual(values=c("#e41a1c", "#377eb8")) + scale_shape_manual(values=c(1:12)) PIC$group <- factor (PIC$group,labels= c("naked", "calcified")) PIC$group <- reorder.factor (PIC$group, new.order = c("naked", "calcified")) liths <- data.frame (group=c("coccolith"), Den_celltotal =2.6, SinkVel = 0.45, beta_DS = 2.70*10^-6) #edited as lab suggested resize.win (6,7) ggplot(data=PIC, aes(x=SinkVel, y=log10(beta_DS), color=group, shape=Strain)) + geom_point(size=5)+theme_Publication()+ geom_point (data=liths, aes(x=SinkVel, y=log10(beta_DS), color="coccolith"), size=8, shape=20) + labs(y = expression(log[10]~beta[S]~("encounters "~mL~d^-1)), x = expression("sinking velocity "~("m"~d^-1))) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank())+ scale_color_manual(values=c( "#377eb8", "#4daf4a", "#e41a1c")) + scale_shape_manual(values=c(1:12)) resize.win(6,6) #Predicted data (equally spaced data) sinkvel PIC_newdata$group <- factor (PIC_newdata$group,labels= c("naked", "calcified")) ggplot(data=PIC_newdata, aes(x=PICpercellpg, y=SinkVel, color=group)) + geom_point(size=5)+theme_Publication()+ labs(y = expression("sinking velocity "~("m"~d^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + scale_color_manual(values=c("#e41a1c", "#377eb8")) #Predicted data (equally spaced data) betas, with liths, beta vs sinkvel ggplot(data=PIC_newdata, aes(x=SinkVel, y=log10(beta_DS), color=group)) + geom_point(size=5)+theme_Publication()+ geom_point (data=liths, aes(x=SinkVel, y=log10(beta_DS), color="coccolith"), size=8, shape=20) + labs(y = expression(log[10]~beta[S]~("encounters "~mL~d^-1)), x = expression("sinking velocity "~("m"~d^-1))) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + scale_color_manual(values=c("#377eb8", "#4daf4a", "#e41a1c")) #Predicted data (equally spaced data) betas, without liths, beta vs. PIC ggplot(data=PIC_newdata, aes(x=PICpercellpg, y=log10(beta_DS), color=group)) + geom_point(size=5)+theme_Publication()+ labs(y = expression(log[10]~beta[S]~("encounters "~mL~d^-1)), x = expression("PIC pg"~cell^-1)) + theme(legend.direction = "horizontal", legend.box = "vertical", legend.title = element_blank()) + scale_color_manual(values=c("#e41a1c", "#377eb8")) #turbulence resize.win(6,6) turb$group <- factor (turb$group,labels= c("naked", "calcified", "coccolith")) ggplot(data = turb, aes(x = log10(disrate), y = log10(beta_turb), color=group)) + geom_line(size =2) + theme_Publication() + labs(y = expression(log[10]~beta[T]~("encounters "~mL~d^-1)), x = expression(log[10]~epsilon~(m^2~s^-3))) + theme(legend.title = element_blank()) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) + guides(colour = guide_legend(override.aes = list(size=5))) #encounters all betas #viral encounters per day per cell resize.win(8,6) all$group <- reorder.factor (all$group, new.order = c("Nc", "Cc", "Li")) all$group <- factor (all$group,labels= c("naked", "calcified", "coccolith")) ggplot(data=all, aes(x=log10(disrate),y = log10(E_all_low) , color=group)) + geom_line(size=2, position=position_jitter(w=0.02, h=0), aes(linetype="shelf slope"))+ geom_line(size=2, data = all, aes(y= log10(E_all_high), color=group,linetype="open ocean")) + theme_Publication() + theme(legend.title = element_blank(), legend.key.width=unit(1,"cm"))+ labs(y = expression(log[10]~"viral encounters " ~entity^-1~d^-1), x = expression(log[10]~epsilon~(m^2~s^-3))) + theme(legend.title = element_blank()) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) + scale_fill_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) #for all encounters resize.win (8.5,6) ggplot(data=all, aes(x=log10(disrate),y = log10(E_all_low_resvi) , color=group, fill=group)) + geom_line(size=2, position=position_jitter(w=0.02, h=0), aes(linetype="open ocean"))+ geom_line(size=2, data = all, aes(y= log10(E_all_high_resvi), color=group, linetype="shelf/slope")) + theme_Publication() + theme(legend.title = element_blank(), legend.key.width=unit(1,"cm"))+ labs(y = expression(log[10]~"total encounters " ~mL^-1~d^-1), x = expression(log[10]~epsilon~(m^2~s^-3))) + theme(legend.title = element_blank()) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) + scale_fill_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) #change color schemes for everything. follow scheme for naked, calc, lith #merge 3 disrates (10e-3, 10e-5, 10e-8) calm <- all %>% filter (disrate %in% c ("1e-08")) %>% select (group, beta_BM, beta_DS, disrate, beta_turb) calm$watcon <- "calm" stormy <- all %>% filter (disrate %in% c ("0.001")) %>% select (group, beta_BM, beta_DS, disrate, beta_turb) stormy$watcon <- "stormy" mid <- all %>% filter (disrate %in% c ("1e-05")) %>% select (group, beta_BM, beta_DS, disrate, beta_turb) mid$watcon <- "mid" calmstormy <- rbind (calm, stormy, mid) calmstormy$beta_all <- calmstormy$beta_BM + calmstormy$beta_DS + calmstormy$beta_turb ##probabilities check alpha v4 probs <- as.data.frame(list (group = as.factor (rep(c("calcified","coccolith", "naked"), 4)), virus = rep(c("high", "low"), 1, each=6), condition=rep(c("open ocean", "shelf/slope"), 2, each=3), hostnum = rep(c(10^3, 10^3, 10^2, 10^5, 10^5, 10^4), 1), virnum = rep(c(((10^3)*30), ((10^5)*30)), 2, each=3), prophost = rep(c(1, 25, 0.1), 4), propvir= rep(c(0.33, 0.67), 1, each=6), ads = rep(c(0.0243, 0.0302, 0.2994), 4), inf = rep(c(0.3, NA, 0.3, 0.3, NA, 0.3, 0.06, NA, 0.06, 0.06, NA, 0.06)))) #calmstormy_backup <- calmstormy calmstormy <- calmstormy_backup #join calmstormy and probs calmstormy <- left_join(calmstormy, probs) #calculate propEhV calmstormy$propEhV <- calmstormy$virnum* calmstormy$propvir #calculate prophost calmstormy$prophostnum <- calmstormy$hostnum* calmstormy$prophost #calculate encounters fast slow calmstormy$encounters <- calmstormy$beta_all*calmstormy$propEhV*calmstormy$prophostnum #total enc #calculate total adsorption by virus props calmstormy$adstot <- calmstormy$encounters*calmstormy$ads #calculate total successful infections calmstormy$sucinf <- calmstormy$encounters*calmstormy$ads*calmstormy$inf calmstormy$group <- reorder.factor (calmstormy$group, new.order = c("naked", "calcified", "coccolith")) #edit in excel to make life easier write.table(calmstormy, "Postdoc-R/Exported Tables/calmstormy_master7.csv", sep=";", col.names=T, row.names=F) #make individual data table for getting the distilled plot conc <- calmstormy[1:12, ] %>% select (group, virus, condition, prophostnum) conc$parameter="concentration" conc$calm <- NA conc$stormy <- NA conc$mid <-conc$prophostnum enc <- reshape(calmstormy %>% select(group, watcon, virus, condition, prophostnum, encounters), idvar=c("group", "virus", "condition", "prophostnum"), timevar="watcon", direction="wide") enc$parameter= "encounters" ads <- reshape(calmstormy %>% select(group, watcon, virus, condition, prophostnum, adstot), idvar=c("group", "virus", "condition", "prophostnum"), timevar="watcon", direction="wide") ads$parameter= "adsorption" inf <- reshape(calmstormy %>% select(group, watcon, virus, condition, prophostnum, sucinf), idvar=c("group", "virus", "condition", "prophostnum"), timevar="watcon", direction="wide") inf$parameter= "infections" #renaming enc <- setnames (enc, c("encounters.calm", "encounters.stormy", "encounters.mid"), c("calm", "stormy", "mid")) ads <- setnames (ads, c("adstot.calm", "adstot.stormy", "adstot.mid"), c("calm", "stormy", "mid")) inf <- setnames (inf, c("sucinf.calm", "sucinf.stormy", "sucinf.mid"), c("calm", "stormy", "mid")) #combined allsteps <- rbind (conc, enc, ads, inf) resize.win(6,6) #correct ggplot(enc, aes(x=as.factor(group), y=log10(mid), color=as.factor(group), shape=as.factor(group))) + geom_point (size=5) + geom_errorbar(aes (ymax=log10(mid + stormy), ymin= log10(mid-calm))) + facet_grid(condition~virus) ##make the plot variable_labs <- c( `concentration` = 'concentration~(mL^{-1})', `encounters` = 'encounter~rate~(mL^{-1}~d^{-1})', `adsorption` = 'adsorption~rate~(mL^{-1}~d^{-1})', `infections` = 'infection~rate~(mL^{-1}~d^{-1})' ) resize.win (11,7)#change virus to low and high allsteps$parameter <- reorder.factor (allsteps$parameter, new.order = c("concentration", "encounters", "adsorption", "infections")) ggplot(allsteps %>% filter (virus=="high"), aes(x=group, y=log10(mid), color=group, shape=group)) + geom_point (size=6) + geom_errorbar(aes (ymax=log10(mid + stormy), ymin= log10(mid-calm)), width=0.5, size=1) + facet_grid(condition~parameter,labeller = labeller(parameter = as_labeller(variable_labs, label_parsed))) + theme_Publication2() + theme (axis.title.x = element_blank(), legend.position = "none") + labs (y = expression(log[10])) + geom_hline(yintercept = log10(1), linetype="dashed") + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) ##edited according to the lab library(lemon) resize.win (11,8) ggplot(allsteps %>% filter (virus=="low"), aes(x=group, y=log10(stormy), color="stormy")) + geom_point (size=6, position = position_dodge()) + geom_point (data=allsteps %>% filter (virus=="high"), aes (x=group, y=log10(mid), color="mid"), size=6, position = position_dodge()) + geom_point (data=allsteps %>% filter (virus=="high"), aes (x=group, y=log10(calm), color="calm"), size=6, position = position_dodge()) + facet_rep_grid(condition~parameter, ##tick marks labeller = labeller(parameter = as_labeller(variable_labs, label_parsed))) + theme_Publication2() + theme (axis.title.x = element_blank(), axis.text.x = element_text(angle = 45, vjust = 0.7), strip.background.x = element_blank(), axis.ticks.length = unit(5, "pt"),panel.spacing.y = unit(1, "lines"), legend.title = element_blank()) + labs (y = expression(log[10])) + geom_hline(yintercept = log10(1), linetype="dashed") + scale_color_manual (values=c('#2b57a7', '#ffc4b4', '#b11346')) #allsteps split #combined high and low high <- reshape(allsteps %>% filter (virus=="high") %>% select (c (group, condition, prophostnum, parameter, mid)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") low <- reshape(allsteps %>% filter (virus=="low") %>% select (c (group, condition, prophostnum, parameter, mid)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") allsteps_comb <- high %>% select (c(group, condition, prophostnum)) #combine high and low, chose mid turbulence allsteps_comb$enccomb <- high$mid.encounters + low$mid.encounters allsteps_comb$adscomb <- high$mid.adsorption + low$mid.adsorption allsteps_comb$infcomb <- high$mid.infections + low$mid.infections ##percentage of parameters allsteps_comb$perencounters <- (allsteps_comb$enccomb/allsteps_comb$prophostnum) allsteps_comb$peradsorbed <- (allsteps_comb$adscomb/allsteps_comb$prophostnum) allsteps_comb$perinf <- (allsteps_comb$infcomb/allsteps_comb$prophostnum) allsteps_comb <- allsteps_comb %>% mutate(perencounters= if_else(perencounters > 1, 1, perencounters)) %>% mutate(peradsorbed= if_else(peradsorbed > 1, 1, peradsorbed)) %>% mutate(perinf= if_else(perinf > 1, 1, perinf)) allsteps_comb$perencounters_noenc <- 1- allsteps_comb$perencounters allsteps_comb$perads_noads <- 1- allsteps_comb$peradsorbed allsteps_comb$perinf_noinf <- 1- allsteps_comb$perinf #melt data melt_enc <- reshape2::melt(allsteps_comb %>% select ("group", "condition", "perencounters", "perencounters_noenc"), id.vars=c("group", "condition")) melt_ads <- reshape2::melt(allsteps_comb %>% select ("group", "condition", "peradsorbed", "perads_noads" ), id.vars=c("group","condition")) melt_inf <- reshape2::melt(allsteps_comb %>% select ("group", "condition", "perinf", "perinf_noinf" ), id.vars=c("group", "condition")) #encounters resize.win (6,4) #no watcon ggplot(melt_enc, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#808000", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.4f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #adsorbed ggplot(melt_ads, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#008080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #infected ggplot(melt_inf %>% filter(!(group %in% c("Li"))), aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#800080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #####under stormy conditions percentages #combined high.stormy and low.stormy high.stormy<- reshape(allsteps %>% filter (virus=="high") %>% select (c (group, condition, prophostnum, parameter, stormy)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") low.stormy <- reshape(allsteps %>% filter (virus=="low") %>% select (c (group, condition, prophostnum, parameter, stormy)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") allsteps_comb.stormy <- high.stormy %>% select (c(group, condition, prophostnum)) #combine high.stormy and low.stormy, chose mid turbulence allsteps_comb.stormy$enccomb <- high.stormy$stormy.encounters + low.stormy$stormy.encounters allsteps_comb.stormy$adscomb <- high.stormy$stormy.adsorption + low.stormy$stormy.adsorption allsteps_comb.stormy$infcomb <- high.stormy$stormy.infections + low.stormy$stormy.infections ##percentage of parameters allsteps_comb.stormy$perencounters <- (allsteps_comb.stormy$enccomb/allsteps_comb.stormy$prophostnum) allsteps_comb.stormy$peradsorbed <- (allsteps_comb.stormy$adscomb/allsteps_comb.stormy$prophostnum) allsteps_comb.stormy$perinf <- (allsteps_comb.stormy$infcomb/allsteps_comb.stormy$prophostnum) allsteps_comb.stormy <- allsteps_comb.stormy %>% mutate(perencounters= if_else(perencounters > 1, 1, perencounters)) %>% mutate(peradsorbed= if_else(peradsorbed > 1, 1, peradsorbed)) %>% mutate(perinf= if_else(perinf > 1, 1, perinf)) allsteps_comb.stormy$perencounters_noenc <- 1- allsteps_comb.stormy$perencounters allsteps_comb.stormy$perads_noads <- 1- allsteps_comb.stormy$peradsorbed allsteps_comb.stormy$perinf_noinf <- 1- allsteps_comb.stormy$perinf #melt data melt_enc.stormy <- reshape2::melt(allsteps_comb.stormy %>% select ("group", "condition", "perencounters", "perencounters_noenc"), id.vars=c("group", "condition")) melt_ads.stormy <- reshape2::melt(allsteps_comb.stormy %>% select ("group", "condition", "peradsorbed", "perads_noads" ), id.vars=c("group","condition")) melt_inf.stormy <- reshape2::melt(allsteps_comb.stormy %>% select ("group", "condition", "perinf", "perinf_noinf" ), id.vars=c("group", "condition")) #encounters resize.win (6,4) #no watcon ggplot(melt_enc.stormy, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#808000", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.4f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #adsorbed ggplot(melt_ads.stormy, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#008080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #infected ggplot(melt_inf.stormy %>% filter(!(group %in% c("Li"))), aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#800080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) ####under calm conditions percentages #combined high.calm and low.calm high.calm<- reshape(allsteps %>% filter (virus=="high") %>% select (c (group, condition, prophostnum, parameter, calm)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") low.calm <- reshape(allsteps %>% filter (virus=="low") %>% select (c (group, condition, prophostnum, parameter, calm)), idvar=c("group", "condition", "prophostnum"), timevar="parameter", direction="wide") allsteps_comb.calm <- high.calm %>% select (c(group, condition, prophostnum)) #combine high.calm and low.calm, chose mid turbulence allsteps_comb.calm$enccomb <- high.calm$calm.encounters + low.calm$calm.encounters allsteps_comb.calm$adscomb <- high.calm$calm.adsorption + low.calm$calm.adsorption allsteps_comb.calm$infcomb <- high.calm$calm.infections + low.calm$calm.infections ##percentage of parameters allsteps_comb.calm$perencounters <- (allsteps_comb.calm$enccomb/allsteps_comb.calm$prophostnum) allsteps_comb.calm$peradsorbed <- (allsteps_comb.calm$adscomb/allsteps_comb.calm$prophostnum) allsteps_comb.calm$perinf <- (allsteps_comb.calm$infcomb/allsteps_comb.calm$prophostnum) allsteps_comb.calm <- allsteps_comb.calm %>% mutate(perencounters= if_else(perencounters > 1, 1, perencounters)) %>% mutate(peradsorbed= if_else(peradsorbed > 1, 1, peradsorbed)) %>% mutate(perinf= if_else(perinf > 1, 1, perinf)) allsteps_comb.calm$perencounters_noenc <- 1- allsteps_comb.calm$perencounters allsteps_comb.calm$perads_noads <- 1- allsteps_comb.calm$peradsorbed allsteps_comb.calm$perinf_noinf <- 1- allsteps_comb.calm$perinf #melt data melt_enc.calm <- reshape2::melt(allsteps_comb.calm %>% select ("group", "condition", "perencounters", "perencounters_noenc"), id.vars=c("group", "condition")) melt_ads.calm <- reshape2::melt(allsteps_comb.calm %>% select ("group", "condition", "peradsorbed", "perads_noads" ), id.vars=c("group","condition")) melt_inf.calm <- reshape2::melt(allsteps_comb.calm %>% select ("group", "condition", "perinf", "perinf_noinf" ), id.vars=c("group", "condition")) #encounters resize.win (6,4) #no watcon ggplot(melt_enc.calm, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#808000", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.4f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #adsorbed ggplot(melt_ads.calm, aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#008080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) #infected ggplot(melt_inf.calm %>% filter(!(group %in% c("coccolith"))), aes(x = "", y = value, fill=variable)) + geom_bar(width = 1, stat = "identity", color = "black") + coord_polar("y", start = 0)+ scale_fill_manual(values=c("#800080", "#FFFFFF")) + theme_void() + theme (legend.position = "bottom", legend.text = element_text(size = 15), strip.text = element_text(size=15), legend.title=element_blank()) + facet_grid(condition~group) + geom_text(aes(y = value, label=sprintf("%0.6f", round(value, digits = 4))), size=5, position = position_stack(vjust = 0.5)) ####all host ##summaries of all host entities ##summaries of all host entities allhost <- calmstormy %>% filter (condition=="shelf/slope") %>% select(c(group, beta_all, watcon, virus, propvir, ads, inf)) allhost <- allhost[rep(seq_len(nrow(allhost)), 4), ] #4*18=72 allhost$hostnum <- rep(c (1 %o% 10^(seq(0, 3, 1))), 1, each=18) allhost <- allhost %>% mutate (prophost = case_when (group=="calcified" ~ hostnum, group=="naked" ~ hostnum*0.1, group=="coccolith" ~ hostnum*25)) allhost <- allhost %>% mutate (virnum = case_when (group=="calcified" ~ hostnum*30)) allhost$virnum <- rep(c (3 %o% 10^(seq(1, 4, 1))), 1, each=18) #allhost <- allhost %>% mutate(virnum2= if_else(group == "Cc", virnum, virnum)) #calculate propEhV allhost$propEhV <- allhost$virnum* allhost$propvir #calculate prophost allhost$prophostnum <- allhost$hostnum* allhost$prophost #calculate encounters fast slow allhost$encounters <- allhost$beta_all*allhost$propEhV*allhost$prophostnum #total enc #calculate total adsorption by virus props allhost$adstot <- allhost$encounters*allhost$ads #calculate total successful infections allhost$sucinf <- allhost$encounters*allhost$ads*allhost$inf #allhost_conc <- allhost[c(1:6, 13:18, 25:30, 37:42, 49:54, 61:66), ] %>% select (group, virus, prophostnum) #choose the concentrations (length of df/3) allhost_conc <- allhost %>% filter (watcon=="mid") %>% select (group, virus, prophostnum) #choose the concentrations (length of df/3) allhost_conc$parameter="concentration" allhost_conc$calm <- NA allhost_conc$stormy <- NA allhost_conc$mid <-allhost_conc$prophostnum allhost_enc <- reshape(allhost %>% select (group, virus, prophostnum, watcon, encounters), idvar=c("group", "virus", "prophostnum"), timevar="watcon", direction="wide") allhost_enc$parameter= "encounters" allhost_ads <- reshape(allhost %>% select (group, virus, prophostnum, watcon, adstot), idvar=c("group", "virus", "prophostnum"), timevar="watcon", direction="wide") allhost_ads$parameter= "adsorption" allhost_inf <- reshape(allhost %>% select (group, virus, prophostnum, watcon, sucinf), idvar=c("group", "virus", "prophostnum"), timevar="watcon", direction="wide") allhost_inf$parameter= "infections" #renaming allhost_enc <- setnames (allhost_enc, c("encounters.calm", "encounters.stormy", "encounters.mid"), c("calm", "stormy", "mid")) allhost_ads <- setnames (allhost_ads, c("adstot.calm", "adstot.stormy", "adstot.mid"), c("calm", "stormy", "mid")) allhost_inf <- setnames (allhost_inf, c("sucinf.calm", "sucinf.stormy", "sucinf.mid"), c("calm", "stormy", "mid")) #combined allhost_allsteps <- rbind (allhost_conc, allhost_enc, allhost_ads, allhost_inf) #correct ggplot(allhost_enc %>% filter (virus=="high"), aes(x=log10(prophostnum), y=log10(mid), color=as.factor(group), shape=as.factor(group))) + geom_point (size=5) + geom_errorbar(aes (ymax=log10(mid + stormy), ymin= log10(mid-calm))) #+ facet_grid(~group) ##plot allhost_allsteps$parameter <- reorder.factor (allhost_allsteps$parameter, new.order = c("concentration", "encounters", "adsorption", "infections")) resize.win (14,7) #change virus to low and high ggplot(allhost_allsteps %>% filter (virus=="high"), aes(x=log10(prophostnum), y=log10(stormy), color="stormy")) + geom_point (size=6, position = position_dodge()) + geom_point (data=allhost_allsteps %>% filter (virus=="high"), aes (x=log10(prophostnum), y=log10(mid), color="mid"), size=6, position = position_dodge()) + geom_point (data=allhost_allsteps %>% filter (virus=="high"), aes (x=log10(prophostnum), y=log10(calm), color="calm"), size=6, position = position_dodge()) + facet_rep_grid(group~parameter, ##tick marks labeller = labeller(parameter = as_labeller(variable_labs, label_parsed))) + theme_Publication2() + theme (strip.background.x = element_blank(), axis.ticks.length = unit(5, "pt"),panel.spacing.y = unit(1, "lines"), legend.title = element_blank()) + labs (y = expression(log[10]), x =expression (log[10]~"host concentration"~mL^-1)) + geom_hline(yintercept = log10(1), linetype="dashed") + scale_color_manual (values=c('#2b57a7', '#ffc4b4', '#b11346')) ###adsorption plot library(readxl) ads_plot <- read_excel("Postdoc-R/CSV Files/ads.xlsx") ads_plot$group <- reorder.factor (ads_plot$group, new.order = c("Nc", "Cc", "Li")) ads_plot$group <- factor (ads_plot$group,labels= c("naked", "calcified", "coccolith")) resize.win(4,4) ggplot (ads_plot, aes(x=group, y=log10(adscoef), color=group)) + geom_boxplot() + theme_Publication() + theme (axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45, vjust = 0.6)) + labs (y= expression(log[10]~K[d]~("mL"~"day"^-1))) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a")) ggplot (ads_plot, aes(x=group, y=adsef*100, color=group)) + geom_boxplot() + theme_Publication() + theme (axis.title.x = element_blank(), legend.position = "none", axis.text.x = element_text(angle = 45, vjust = 0.6)) + labs(y = bquote(delta ~ "(%)")) + scale_color_manual (values=c("#e41a1c", "#377eb8", "#4daf4a"))
source("incl/start,load-only.R") message("*** Tweaking future strategies ...") message("*** y <- tweak(future::sequential) ...") sequential2 <- future::tweak(future::sequential) print(args(sequential2)) stopifnot(identical(sequential2, future::sequential)) stopifnot(!inherits(sequential2, "tweaked")) message("*** y <- tweak(future::sequential, local = FALSE) ...") sequential2 <- future::tweak(future::sequential, local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y <- tweak('sequential', local = FALSE) ...") sequential2 <- future::tweak("sequential", local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) library("future") message("*** y <- tweak(sequential, local = FALSE) ...") sequential2 <- future::tweak(sequential, local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y <- tweak('sequential', local = FALSE) ...") sequential2 <- future::tweak('sequential', local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y <- tweak('sequential', local = FALSE, abc = 1, def = TRUE) ...") res <- tryCatch({ sequential2 <- future::tweak('sequential', local = FALSE, abc = 1, def = TRUE) }, warning = function(w) { w }) stopifnot(inherits(res, "warning")) sequential2 <- future::tweak('sequential', local = FALSE, abc = 1, def = TRUE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y %<-% { expr } %tweak% tweaks ...") plan(sequential) a <- 0 x %<-% { a <- 1; a } print(x) stopifnot(a == 0, x == 1) x %<-% { a <- 2; a } %tweak% list(local = FALSE) print(x) stopifnot(a == 2, x == 2) plan(sequential, local = FALSE) a <- 0 x %<-% { a <- 1; a } print(x) stopifnot(a == 1, x == 1) x %<-% { a <- 2; a } %tweak% list(local = TRUE) print(x) stopifnot(a == 1, x == 2) # Preserve nested futures plan(list(A = sequential, B = tweak(sequential, local = FALSE))) a <- 0 x %<-% { stopifnot(identical(names(plan("list")), "B")) a <- 1 a } print(x) stopifnot(a == 0, x == 1) x %<-% { stopifnot(identical(names(plan("list")), "B")) a <- 2 a } %tweak% list(local = FALSE) print(x) stopifnot(a == 2, x == 2) message("*** y %<-% { expr } %tweak% tweaks ... DONE") message("*** tweak() - gc = TRUE ...") res <- tryCatch(tweak(multisession, gc = TRUE), condition = identity) stopifnot(inherits(res, "tweaked")) ## Argument 'gc' is unknown res <- tryCatch(tweak(sequential, gc = TRUE), condition = identity) stopifnot(inherits(res, "warning")) res <- tryCatch(tweak(multicore, gc = TRUE), condition = identity) stopifnot(inherits(res, "warning")) message("*** tweak() - gc = TRUE ... DONE") message("*** tweak() - exceptions ...") res <- try(tweak("<unknown-future-strategy>"), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(base::eval), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(sequential, "unnamed-argument"), silent = TRUE) stopifnot(inherits(res, "try-error")) ## Arguments that must not be tweaked res <- try(tweak(sequential, lazy = TRUE), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(sequential, asynchronous = FALSE), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(sequential, seed = 42L), silent = TRUE) stopifnot(inherits(res, "try-error")) message("*** tweak() - exceptions ... DONE") message("*** Tweaking future strategies ... DONE") source("incl/end.R")
/tests/tweak.R
no_license
jcheng5/future
R
false
false
4,059
r
source("incl/start,load-only.R") message("*** Tweaking future strategies ...") message("*** y <- tweak(future::sequential) ...") sequential2 <- future::tweak(future::sequential) print(args(sequential2)) stopifnot(identical(sequential2, future::sequential)) stopifnot(!inherits(sequential2, "tweaked")) message("*** y <- tweak(future::sequential, local = FALSE) ...") sequential2 <- future::tweak(future::sequential, local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y <- tweak('sequential', local = FALSE) ...") sequential2 <- future::tweak("sequential", local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) library("future") message("*** y <- tweak(sequential, local = FALSE) ...") sequential2 <- future::tweak(sequential, local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y <- tweak('sequential', local = FALSE) ...") sequential2 <- future::tweak('sequential', local = FALSE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y <- tweak('sequential', local = FALSE, abc = 1, def = TRUE) ...") res <- tryCatch({ sequential2 <- future::tweak('sequential', local = FALSE, abc = 1, def = TRUE) }, warning = function(w) { w }) stopifnot(inherits(res, "warning")) sequential2 <- future::tweak('sequential', local = FALSE, abc = 1, def = TRUE) print(args(sequential2)) stopifnot(!identical(sequential2, future::sequential)) stopifnot(inherits(sequential2, "tweaked")) stopifnot(identical(formals(sequential2)$local, FALSE)) message("*** y %<-% { expr } %tweak% tweaks ...") plan(sequential) a <- 0 x %<-% { a <- 1; a } print(x) stopifnot(a == 0, x == 1) x %<-% { a <- 2; a } %tweak% list(local = FALSE) print(x) stopifnot(a == 2, x == 2) plan(sequential, local = FALSE) a <- 0 x %<-% { a <- 1; a } print(x) stopifnot(a == 1, x == 1) x %<-% { a <- 2; a } %tweak% list(local = TRUE) print(x) stopifnot(a == 1, x == 2) # Preserve nested futures plan(list(A = sequential, B = tweak(sequential, local = FALSE))) a <- 0 x %<-% { stopifnot(identical(names(plan("list")), "B")) a <- 1 a } print(x) stopifnot(a == 0, x == 1) x %<-% { stopifnot(identical(names(plan("list")), "B")) a <- 2 a } %tweak% list(local = FALSE) print(x) stopifnot(a == 2, x == 2) message("*** y %<-% { expr } %tweak% tweaks ... DONE") message("*** tweak() - gc = TRUE ...") res <- tryCatch(tweak(multisession, gc = TRUE), condition = identity) stopifnot(inherits(res, "tweaked")) ## Argument 'gc' is unknown res <- tryCatch(tweak(sequential, gc = TRUE), condition = identity) stopifnot(inherits(res, "warning")) res <- tryCatch(tweak(multicore, gc = TRUE), condition = identity) stopifnot(inherits(res, "warning")) message("*** tweak() - gc = TRUE ... DONE") message("*** tweak() - exceptions ...") res <- try(tweak("<unknown-future-strategy>"), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(base::eval), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(sequential, "unnamed-argument"), silent = TRUE) stopifnot(inherits(res, "try-error")) ## Arguments that must not be tweaked res <- try(tweak(sequential, lazy = TRUE), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(sequential, asynchronous = FALSE), silent = TRUE) stopifnot(inherits(res, "try-error")) res <- try(tweak(sequential, seed = 42L), silent = TRUE) stopifnot(inherits(res, "try-error")) message("*** tweak() - exceptions ... DONE") message("*** Tweaking future strategies ... DONE") source("incl/end.R")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ps_pairsPlot.R \name{ps_pairsPlot} \alias{ps_pairsPlot} \title{ps_pairsPlot} \usage{ ps_pairsPlot( doc = "ps_pairsPlot", data, GroupVar, Groups, AnalyticVars, Span = 2/3 ) } \arguments{ \item{doc}{A string documenting use added to the output list, default is the function name} \item{data}{A matrix or data frame containing the data to be analyzed} \item{GroupVar}{The name for variable defining grouping (required)} \item{Groups}{A vector of values of group variable for which plots are to be done; if "All": use all groups; if " ": no grouping} \item{AnalyticVars}{A vector of names (character values) of analytic results} \item{Span}{A value >0, <=1 defining the proportion of data used to estimate the lowess smooth. The default value (2/3) is the default value for the lowess function.} } \value{ A set of pairs plots as described above and a list with the following components: \itemize{ \item{usage:}{ A vector with the value of the argument doc, date run, version of R used} \item{dataUsed:}{ A data frame with the observations in data restricted to the groups analyzed} \item{dataNA:}{ A data frame with observations containing a least one missing value for an analysis variable, NA if no missing values} \item{analyticVars:}{ The vector specified by the parameter AnalyticVars} \item{params:}{ A list with the values of the grouping and numeric arguments} \item{analyticVars:}{ A vector with the value of the argument AnalyticVars} } } \description{ Pairs plots of specified analytic values, by specified groups } \section{DETAILS}{ The function produces a pairs plot with a lowess smooth through the scatter plot for each pair of variables in AnalyticVars. If Groups != " ", there is a scatter plot for each group in Groups. If Groups=" ", there is one pairs plot with the data for all groups. Executing the function produces warnings ("span is not a graphical parameter") that can be ignored (changing the value of Span does change the lowess smooths). As coded, in RStudio all plots are produced without a pause; use the back arrow in the plot pane to see the plots. In base R, remove the comment symbol (#) from the browser command at the end of the final loop, so that the function will stop after producing each plot. } \examples{ data(ObsidianSources) analyticVars<-c("Rb","Sr","Y","Zr","Nb") pairsPlot <- ps_pairsPlot(data=ObsidianSources, GroupVar="Code", Groups="All", AnalyticVars=analyticVars) }
/man/ps_pairsPlot.Rd
permissive
benmarwick/predictSource
R
false
true
2,539
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ps_pairsPlot.R \name{ps_pairsPlot} \alias{ps_pairsPlot} \title{ps_pairsPlot} \usage{ ps_pairsPlot( doc = "ps_pairsPlot", data, GroupVar, Groups, AnalyticVars, Span = 2/3 ) } \arguments{ \item{doc}{A string documenting use added to the output list, default is the function name} \item{data}{A matrix or data frame containing the data to be analyzed} \item{GroupVar}{The name for variable defining grouping (required)} \item{Groups}{A vector of values of group variable for which plots are to be done; if "All": use all groups; if " ": no grouping} \item{AnalyticVars}{A vector of names (character values) of analytic results} \item{Span}{A value >0, <=1 defining the proportion of data used to estimate the lowess smooth. The default value (2/3) is the default value for the lowess function.} } \value{ A set of pairs plots as described above and a list with the following components: \itemize{ \item{usage:}{ A vector with the value of the argument doc, date run, version of R used} \item{dataUsed:}{ A data frame with the observations in data restricted to the groups analyzed} \item{dataNA:}{ A data frame with observations containing a least one missing value for an analysis variable, NA if no missing values} \item{analyticVars:}{ The vector specified by the parameter AnalyticVars} \item{params:}{ A list with the values of the grouping and numeric arguments} \item{analyticVars:}{ A vector with the value of the argument AnalyticVars} } } \description{ Pairs plots of specified analytic values, by specified groups } \section{DETAILS}{ The function produces a pairs plot with a lowess smooth through the scatter plot for each pair of variables in AnalyticVars. If Groups != " ", there is a scatter plot for each group in Groups. If Groups=" ", there is one pairs plot with the data for all groups. Executing the function produces warnings ("span is not a graphical parameter") that can be ignored (changing the value of Span does change the lowess smooths). As coded, in RStudio all plots are produced without a pause; use the back arrow in the plot pane to see the plots. In base R, remove the comment symbol (#) from the browser command at the end of the final loop, so that the function will stop after producing each plot. } \examples{ data(ObsidianSources) analyticVars<-c("Rb","Sr","Y","Zr","Nb") pairsPlot <- ps_pairsPlot(data=ObsidianSources, GroupVar="Code", Groups="All", AnalyticVars=analyticVars) }
###Machine Learning Final Project### ###Spotify Song Attributes### library(ISLR) library(MASS) library(class) require(boot) rm(list = ls()) data <- read.csv(file = "data.csv", header = T) n <- nrow(data) #Logistic Regression glm.fit <- glm(target ~ acousticness+danceability+duration_ms+energy+instrumentalness+key+liveness+loudness+mode+speechiness+tempo+time_signature+valence, family = binomial, data = data) glm.probs <- predict(glm.fit, type = "response") glm.pred <- rep(0,n) glm.pred[glm.probs > 0.5] = 1 mytable <- table(data$target, glm.pred) mytable #Overall fraction of correct prediction mean(glm.pred == data$target) #Overall error rate mean(glm.pred != data$target) #Type I error type1ErrorRate <- mytable[1, 2] / sum(mytable[1, ]) type1ErrorRate #Type II error type2ErrorRate <- mytable[2, 1] / sum(mytable[2, ]) type2ErrorRate #Power of the model Power <- mytable[2, 2] / sum(mytable[2, ]) Power #Precision of the model precision <- mytable["1", "1"] / sum(mytable[, "1"]) precision #Predictors that are significant coef(glm.fit) summary(glm.fit) summary(glm.fit)$coefficients[,4] which(summary(glm.fit)$coefficients[,4] < 0.05) #Logistic Regression Using Statistically Significant Predictors glm.fit2 <- glm(target ~ acousticness+danceability+duration_ms+instrumentalness+loudness+speechiness+tempo+valence, family = binomial, data = data) glm.probs2 <- predict(glm.fit2, type = "response") glm.pred2 <- rep(0,n) glm.pred2[glm.probs2 > 0.5] = 1 mytable2 <- table(data$target, glm.pred2) mytable2 #Overall fraction of correct prediction mean(glm.pred2 == data$target) #Overall error rate mean(glm.pred2 != data$target) #Type I error type1ErrorRate2 <- mytable2[1, 2] / sum(mytable2[1, ]) type1ErrorRate2 #Type II error type2ErrorRate2 <- mytable2[2, 1] / sum(mytable2[2, ]) type2ErrorRate2 #Power of the model Power2 <- mytable2[2, 2] / sum(mytable2[2, ]) Power2 #Precision of the model precision2 <- mytable2["1", "1"] / sum(mytable2[, "1"]) precision2 ###LDA### lda.fit <- lda(target ~ acousticness+danceability+duration_ms+instrumentalness+loudness+speechiness+tempo+valence, data = data) lda.pred <- predict(lda.fit, data) lda.class <- lda.pred$class mytable3 <- table(data$target,lda.class) mytable3 #Overall fraction of correct prediction mean(lda.class == data$target) #Overall error rate mean(lda.class != data$target) #Type I error type1FalsePositiveErrorRate <- mytable[1, 2] / sum(mytable[1, ]) type1FalsePositiveErrorRate #Type II error type2FalseNegativeErrorRate <- mytable[2, 1] / sum(mytable[2, ]) type2FalseNegativeErrorRate #Power of the model sensitivityPowerRecall <- mytable[2, 2] / sum(mytable[2, ]) sensitivityPowerRecall #Precision of the model precision <- mytable["Up", "Up"] / sum(mytable[, "Up"]) precision ###QDA### qda.fit <- qda(target ~ acousticness+danceability+duration_ms+instrumentalness+loudness+speechiness+tempo+valence, data = data) qda.pred <- predict(qda.fit, data) qda.class <- qda.pred$class mytable4 <- table(data$target,qda.class) mytable4 #Overall fraction of correct prediction mean(qda.class == data$target) #Overall error rate mean(qda.class != data$target)
/final.R
no_license
rmmaw/MachineLearning-Final
R
false
false
3,295
r
###Machine Learning Final Project### ###Spotify Song Attributes### library(ISLR) library(MASS) library(class) require(boot) rm(list = ls()) data <- read.csv(file = "data.csv", header = T) n <- nrow(data) #Logistic Regression glm.fit <- glm(target ~ acousticness+danceability+duration_ms+energy+instrumentalness+key+liveness+loudness+mode+speechiness+tempo+time_signature+valence, family = binomial, data = data) glm.probs <- predict(glm.fit, type = "response") glm.pred <- rep(0,n) glm.pred[glm.probs > 0.5] = 1 mytable <- table(data$target, glm.pred) mytable #Overall fraction of correct prediction mean(glm.pred == data$target) #Overall error rate mean(glm.pred != data$target) #Type I error type1ErrorRate <- mytable[1, 2] / sum(mytable[1, ]) type1ErrorRate #Type II error type2ErrorRate <- mytable[2, 1] / sum(mytable[2, ]) type2ErrorRate #Power of the model Power <- mytable[2, 2] / sum(mytable[2, ]) Power #Precision of the model precision <- mytable["1", "1"] / sum(mytable[, "1"]) precision #Predictors that are significant coef(glm.fit) summary(glm.fit) summary(glm.fit)$coefficients[,4] which(summary(glm.fit)$coefficients[,4] < 0.05) #Logistic Regression Using Statistically Significant Predictors glm.fit2 <- glm(target ~ acousticness+danceability+duration_ms+instrumentalness+loudness+speechiness+tempo+valence, family = binomial, data = data) glm.probs2 <- predict(glm.fit2, type = "response") glm.pred2 <- rep(0,n) glm.pred2[glm.probs2 > 0.5] = 1 mytable2 <- table(data$target, glm.pred2) mytable2 #Overall fraction of correct prediction mean(glm.pred2 == data$target) #Overall error rate mean(glm.pred2 != data$target) #Type I error type1ErrorRate2 <- mytable2[1, 2] / sum(mytable2[1, ]) type1ErrorRate2 #Type II error type2ErrorRate2 <- mytable2[2, 1] / sum(mytable2[2, ]) type2ErrorRate2 #Power of the model Power2 <- mytable2[2, 2] / sum(mytable2[2, ]) Power2 #Precision of the model precision2 <- mytable2["1", "1"] / sum(mytable2[, "1"]) precision2 ###LDA### lda.fit <- lda(target ~ acousticness+danceability+duration_ms+instrumentalness+loudness+speechiness+tempo+valence, data = data) lda.pred <- predict(lda.fit, data) lda.class <- lda.pred$class mytable3 <- table(data$target,lda.class) mytable3 #Overall fraction of correct prediction mean(lda.class == data$target) #Overall error rate mean(lda.class != data$target) #Type I error type1FalsePositiveErrorRate <- mytable[1, 2] / sum(mytable[1, ]) type1FalsePositiveErrorRate #Type II error type2FalseNegativeErrorRate <- mytable[2, 1] / sum(mytable[2, ]) type2FalseNegativeErrorRate #Power of the model sensitivityPowerRecall <- mytable[2, 2] / sum(mytable[2, ]) sensitivityPowerRecall #Precision of the model precision <- mytable["Up", "Up"] / sum(mytable[, "Up"]) precision ###QDA### qda.fit <- qda(target ~ acousticness+danceability+duration_ms+instrumentalness+loudness+speechiness+tempo+valence, data = data) qda.pred <- predict(qda.fit, data) qda.class <- qda.pred$class mytable4 <- table(data$target,qda.class) mytable4 #Overall fraction of correct prediction mean(qda.class == data$target) #Overall error rate mean(qda.class != data$target)
# Exercise 5: dplyr grouped operations # Install the `nycflights13` package. Load (`library()`) the package. # You'll also need to load `dplyr` #install.packages("nycflights13") # should be done already library(nycflights13) library(dplyr) # What was the average departure delay in each month? # Save this as a data frame `dep_delay_by_month` # Hint: you'll have to perform a grouping operation then summarizing your data dep_delay_by_month <- flights %>% group_by(month) %>% summarise(delay = mean(dep_delay, na.rm = T)) View(dep_delay_by_month) # Which month had the greatest average departure delay? filter(dep_delay_by_month, delay == max(delay)) %>% select(month) # If your above data frame contains just two columns (e.g., "month", and "delay" in that order), you can create # a scatterplot by passing that data frame to the 'plot()' function plot(dep_delay_by_month) # To which destinations were the average arrival delays the highest? # Hint: you'll have to perform a grouping operation then summarize your data # You can use the `head()` function to view just the first few rows avg_arr_del <- flights %>% group_by(dest) %>% summarise(avg_arr_delay = mean(arr_delay, na.rm = T)) %>% arrange(-avg_arr_delay) # You can look up these airports in the `airports` data frame! View(airports) # Which city was flown to with the highest average speed?
/exercise-5/exercise.R
permissive
tleung22/ch10-dplyr
R
false
false
1,373
r
# Exercise 5: dplyr grouped operations # Install the `nycflights13` package. Load (`library()`) the package. # You'll also need to load `dplyr` #install.packages("nycflights13") # should be done already library(nycflights13) library(dplyr) # What was the average departure delay in each month? # Save this as a data frame `dep_delay_by_month` # Hint: you'll have to perform a grouping operation then summarizing your data dep_delay_by_month <- flights %>% group_by(month) %>% summarise(delay = mean(dep_delay, na.rm = T)) View(dep_delay_by_month) # Which month had the greatest average departure delay? filter(dep_delay_by_month, delay == max(delay)) %>% select(month) # If your above data frame contains just two columns (e.g., "month", and "delay" in that order), you can create # a scatterplot by passing that data frame to the 'plot()' function plot(dep_delay_by_month) # To which destinations were the average arrival delays the highest? # Hint: you'll have to perform a grouping operation then summarize your data # You can use the `head()` function to view just the first few rows avg_arr_del <- flights %>% group_by(dest) %>% summarise(avg_arr_delay = mean(arr_delay, na.rm = T)) %>% arrange(-avg_arr_delay) # You can look up these airports in the `airports` data frame! View(airports) # Which city was flown to with the highest average speed?
#' relatedProject #' #' relatedProject #' #' #' @inheritParams common_attributes #' @param title Title of the project. See [title()] #' @param personnel Contact and role information for people involved in the research project. See [personnel()] #' @param abstract Project Abstract. See [abstract()] #' @param funding Funding information. See [funding()] #' @param award Award information. See [award()] #' @param studyAreaDescription Description of the physical area associated with the research project, potentially including coverage, climate, geology, disturbances, etc. See [studyAreaDescription()] #' @param designDescription Description of the design of the research project See [designDescription()] #' @param relatedProject This field is a recursive link to another project. See [relatedProject()] #' @param references The id of another element in this EML document to be used to here in this context. See [references()] #' #' @return a relatedProject list object #' #' @export relatedProject <- function(title = NULL, personnel = NULL, abstract = NULL, funding = NULL, award = NULL, studyAreaDescription = NULL, designDescription = NULL, relatedProject = NULL, references = NULL, id = NULL, system = NULL, scope = NULL){ Filter(Negate(is.null), list( title = title, personnel = personnel, abstract = abstract, funding = funding, award = award, studyAreaDescription = studyAreaDescription, designDescription = designDescription, relatedProject = relatedProject, references = references, id = id, system = system, scope = scope))}
/R/relatedProject.R
permissive
cboettig/build.eml
R
false
false
1,556
r
#' relatedProject #' #' relatedProject #' #' #' @inheritParams common_attributes #' @param title Title of the project. See [title()] #' @param personnel Contact and role information for people involved in the research project. See [personnel()] #' @param abstract Project Abstract. See [abstract()] #' @param funding Funding information. See [funding()] #' @param award Award information. See [award()] #' @param studyAreaDescription Description of the physical area associated with the research project, potentially including coverage, climate, geology, disturbances, etc. See [studyAreaDescription()] #' @param designDescription Description of the design of the research project See [designDescription()] #' @param relatedProject This field is a recursive link to another project. See [relatedProject()] #' @param references The id of another element in this EML document to be used to here in this context. See [references()] #' #' @return a relatedProject list object #' #' @export relatedProject <- function(title = NULL, personnel = NULL, abstract = NULL, funding = NULL, award = NULL, studyAreaDescription = NULL, designDescription = NULL, relatedProject = NULL, references = NULL, id = NULL, system = NULL, scope = NULL){ Filter(Negate(is.null), list( title = title, personnel = personnel, abstract = abstract, funding = funding, award = award, studyAreaDescription = studyAreaDescription, designDescription = designDescription, relatedProject = relatedProject, references = references, id = id, system = system, scope = scope))}
##' transparent background theme ##' ##' ##' @title theme_transparent ##' @param ... additional parameter to tweak the theme ##' @return ggplot object ##' @importFrom ggplot2 theme ##' @importFrom ggplot2 element_rect ##' @export ##' @author Guangchuang Yu with contributions from Hugo Gruson theme_transparent <- function(...) { theme(panel.background = element_rect( fill = "transparent", colour = NA), plot.background = element_rect( fill = "transparent", colour = NA), legend.key = element_rect( fill = "transparent", colour = NA), legend.background = element_rect( fill = "transparent", colour = NA), ...) } ##' A theme that only show the plot panel ##' ##' ##' @title theme_nothing ##' @param base_size font size ##' @param base_family font family ##' @importFrom ggplot2 %+replace% ##' @importFrom ggplot2 aes_ ##' @importFrom ggplot2 theme_void ##' @return ggplot2 theme ##' @export ##' @author Guangchuang Yu theme_nothing <- function(base_size = 11, base_family = "") { theme_void(base_size = base_size, base_family = base_family) %+replace% theme(plot.margin=unit(c(0,0, -.2, -.2), "lines")) }
/R/theme.R
no_license
GuangchuangYu/ggimage
R
false
false
1,262
r
##' transparent background theme ##' ##' ##' @title theme_transparent ##' @param ... additional parameter to tweak the theme ##' @return ggplot object ##' @importFrom ggplot2 theme ##' @importFrom ggplot2 element_rect ##' @export ##' @author Guangchuang Yu with contributions from Hugo Gruson theme_transparent <- function(...) { theme(panel.background = element_rect( fill = "transparent", colour = NA), plot.background = element_rect( fill = "transparent", colour = NA), legend.key = element_rect( fill = "transparent", colour = NA), legend.background = element_rect( fill = "transparent", colour = NA), ...) } ##' A theme that only show the plot panel ##' ##' ##' @title theme_nothing ##' @param base_size font size ##' @param base_family font family ##' @importFrom ggplot2 %+replace% ##' @importFrom ggplot2 aes_ ##' @importFrom ggplot2 theme_void ##' @return ggplot2 theme ##' @export ##' @author Guangchuang Yu theme_nothing <- function(base_size = 11, base_family = "") { theme_void(base_size = base_size, base_family = base_family) %+replace% theme(plot.margin=unit(c(0,0, -.2, -.2), "lines")) }
drawObs <- function(){ file <- list.files(path="./vis")[1] vis.jpeg <- readJPEG(paste("./vis/",file,sep="")) vis.red <- raster(vis.jpeg[,,1]) vis.green <- raster(vis.jpeg[,,2]) vis.blue <- raster(vis.jpeg[,,3]) rgb <- stack(vis.red, vis.green, vis.blue) options(warn = -1) plotRGB(rgb, scale = 1, asp = nrow(vis.red)/ncol(vis.red)) message("Click at points along the boundary of the observation area in the plotted image. Press the escape key when finished.") poly <- drawPoly() options(warn = 0) message("Extracting cells. Please wait.") cells <- data.frame(extract(vis.red, poly, cellnumbers = T))[,1] out <- data.frame(cells, rowColFromCell(vis.red, cells), xyFromCell(vis.red,cells)) return(out) }
/R/drawObs.R
no_license
mosscoder/crustCover
R
false
false
737
r
drawObs <- function(){ file <- list.files(path="./vis")[1] vis.jpeg <- readJPEG(paste("./vis/",file,sep="")) vis.red <- raster(vis.jpeg[,,1]) vis.green <- raster(vis.jpeg[,,2]) vis.blue <- raster(vis.jpeg[,,3]) rgb <- stack(vis.red, vis.green, vis.blue) options(warn = -1) plotRGB(rgb, scale = 1, asp = nrow(vis.red)/ncol(vis.red)) message("Click at points along the boundary of the observation area in the plotted image. Press the escape key when finished.") poly <- drawPoly() options(warn = 0) message("Extracting cells. Please wait.") cells <- data.frame(extract(vis.red, poly, cellnumbers = T))[,1] out <- data.frame(cells, rowColFromCell(vis.red, cells), xyFromCell(vis.red,cells)) return(out) }
beta<-1 expit<-function(eta) exp(eta)/(1+exp(eta)) logit<-function(p) log(p/(1-p)) one.sim<-function(beta,m=1) replicate(1500,{ y<-rep(0:1,c(5000*m,5000)) x<-c(rnorm(5000*m,mean=-beta/(m+1)),rnorm(5000,mean=beta*m/(m+1))) w<-1+(1-y)*100 X<-cbind(1,x) m1<-glm(y~x,family=binomial) m1w<-glm(y~x,family=binomial,weights=w) u<-(X*(y-fitted(m1)))%*%vcov(m1) uw<-(X*(w/mean(w))*(y-fitted(m1w)))%*%vcov(m1w) pi0<-1/100 alpha<-logit(mean(y-x*beta))+log(pi0) mu<-expit(as.vector(X%*%c(alpha-log(pi0),beta))) muw<-expit(as.vector(X%*%c(alpha,beta))) u0<-(X*(y-mu))%*%solve(crossprod(X*mu*(1-mu),X)) uw0<-(X*w*(y-muw))%*%solve(crossprod(X*w*muw*(1-muw),X)) c(coef(m1)[2],coef(m1w)[2],cor(u[,2],uw[,2]),cor(u0[,2],uw0[,2])) }) betasim<-c(-1, -0.5, 0,0.1,0.5,1,1.5,2) rr<-lapply(betasim,one.sim) one.inf<-function(beta,m=1) replicate(5000,{ y<-rep(0:1,c(5000*m,5000)) x<-c(rnorm(5000*m,mean=-beta/(m+1)),rnorm(5000,mean=beta*m/(m+1))) w<-1+(1-y)*100 X<-cbind(1,x) pi0<-1/100 alpha<-logit(mean(y-x*beta))+log(pi0) mu<-expit(as.vector(X%*%c(alpha-log(pi0),beta))) muw<-expit(as.vector(X%*%c(alpha,beta))) u0<-(X*(y-mu))%*%solve(crossprod(X*mu*(1-mu),X)) uw0<-(X*w*(y-muw))%*%solve(crossprod(X*w*muw*(1-muw),X)) cor(u0[,2],uw0[,2]) }) betainf<-c(-1, -0.5, 0,0.5,1,1.5,2,2.5,3,4,5,6) r1<-lapply(betainf,one.inf) save(r1, rr, betasim,betainf,file="~/cc-score4efficiency.rda") einf<-sapply(r1,function(x) mean(x^2)) esim<-sapply(rr,function(d) var(d[1,])/var(d[2,])) plot(betasim,esim, xlim=c(-1,6),ylim=c(0,1),type="b",ylab="Efficiency",xlab=expression(beta)) points(betainf,einf,type="b",col="blue",lty=2) betamatch<-seq(-3,3,length=50) r2<-lapply(betamatch,function(b) c(mean(one.inf(b,m=1)^2),mean(one.inf(b,m=5)^2))) r3<-sapply(betamatch,function(b) mean(one.inf(b,m=10)^2)) plot(betamatch,sapply(r2,function(r) r[1]),type="b",xlab=expression(beta),ylab="Efficiency relative to MLE") points(betamatch,sapply(r2,function(r) r[2]),type="b",pch=19,col="darkgrey") points(betamatch,r3,type="b",pch=19,col="black") legend("topleft",pch=c(1,19,19),col=c("black","darkgrey","black"), legend=c("1 control per case","5 controls per case","10 controls per case"),bty="n") save(r2,r3, betamatch, file="~/cc-matchingratio.rda")
/cc-eff-scoreconv.R
no_license
tslumley/cc-efficiency
R
false
false
2,229
r
beta<-1 expit<-function(eta) exp(eta)/(1+exp(eta)) logit<-function(p) log(p/(1-p)) one.sim<-function(beta,m=1) replicate(1500,{ y<-rep(0:1,c(5000*m,5000)) x<-c(rnorm(5000*m,mean=-beta/(m+1)),rnorm(5000,mean=beta*m/(m+1))) w<-1+(1-y)*100 X<-cbind(1,x) m1<-glm(y~x,family=binomial) m1w<-glm(y~x,family=binomial,weights=w) u<-(X*(y-fitted(m1)))%*%vcov(m1) uw<-(X*(w/mean(w))*(y-fitted(m1w)))%*%vcov(m1w) pi0<-1/100 alpha<-logit(mean(y-x*beta))+log(pi0) mu<-expit(as.vector(X%*%c(alpha-log(pi0),beta))) muw<-expit(as.vector(X%*%c(alpha,beta))) u0<-(X*(y-mu))%*%solve(crossprod(X*mu*(1-mu),X)) uw0<-(X*w*(y-muw))%*%solve(crossprod(X*w*muw*(1-muw),X)) c(coef(m1)[2],coef(m1w)[2],cor(u[,2],uw[,2]),cor(u0[,2],uw0[,2])) }) betasim<-c(-1, -0.5, 0,0.1,0.5,1,1.5,2) rr<-lapply(betasim,one.sim) one.inf<-function(beta,m=1) replicate(5000,{ y<-rep(0:1,c(5000*m,5000)) x<-c(rnorm(5000*m,mean=-beta/(m+1)),rnorm(5000,mean=beta*m/(m+1))) w<-1+(1-y)*100 X<-cbind(1,x) pi0<-1/100 alpha<-logit(mean(y-x*beta))+log(pi0) mu<-expit(as.vector(X%*%c(alpha-log(pi0),beta))) muw<-expit(as.vector(X%*%c(alpha,beta))) u0<-(X*(y-mu))%*%solve(crossprod(X*mu*(1-mu),X)) uw0<-(X*w*(y-muw))%*%solve(crossprod(X*w*muw*(1-muw),X)) cor(u0[,2],uw0[,2]) }) betainf<-c(-1, -0.5, 0,0.5,1,1.5,2,2.5,3,4,5,6) r1<-lapply(betainf,one.inf) save(r1, rr, betasim,betainf,file="~/cc-score4efficiency.rda") einf<-sapply(r1,function(x) mean(x^2)) esim<-sapply(rr,function(d) var(d[1,])/var(d[2,])) plot(betasim,esim, xlim=c(-1,6),ylim=c(0,1),type="b",ylab="Efficiency",xlab=expression(beta)) points(betainf,einf,type="b",col="blue",lty=2) betamatch<-seq(-3,3,length=50) r2<-lapply(betamatch,function(b) c(mean(one.inf(b,m=1)^2),mean(one.inf(b,m=5)^2))) r3<-sapply(betamatch,function(b) mean(one.inf(b,m=10)^2)) plot(betamatch,sapply(r2,function(r) r[1]),type="b",xlab=expression(beta),ylab="Efficiency relative to MLE") points(betamatch,sapply(r2,function(r) r[2]),type="b",pch=19,col="darkgrey") points(betamatch,r3,type="b",pch=19,col="black") legend("topleft",pch=c(1,19,19),col=c("black","darkgrey","black"), legend=c("1 control per case","5 controls per case","10 controls per case"),bty="n") save(r2,r3, betamatch, file="~/cc-matchingratio.rda")
#load packages and data source("src/packages.R") # Extract conditional posteriors # diet model -------------------------------------------------------------- diet_brms <- readRDS("models/diet_brms.rds") fish_totals <- read_csv(file = "data/raw_data/fish_totals.csv") diet_brm_postpreds <- diet_brms$data %>% distinct(prey_stage, date2, species) %>% add_epred_draws(diet_brms, dpar = T, re_formula = NULL) %>% # filter(.draw <= 1000) %>% left_join(fish_totals %>% mutate(date2 = as.factor(date2))) %>% mutate(population_epred = .epred*total_abund) %>% arrange(-.epred) # taxon_stage <- diet_brm_postpreds %>% ungroup() %>% distinct(prey_taxon) %>% # separate(prey_taxon, c("prey_taxon_only", "prey_stage"), remove = F) diet_ind_postpreds_wide <- diet_brm_postpreds %>% group_by(prey_stage, date2, species, .draw) %>% summarize(sum = sum(.epred)) %>% pivot_wider(names_from = prey_stage, values_from = sum) %>% replace(is.na(.), 0) %>% mutate(total = pa + not_pa, prop_pa = pa/total) %>% mutate(data_level = "Per capita") diet_pop_postpreds_wide <- diet_brm_postpreds %>% group_by(prey_stage, date2, species, .draw) %>% summarize(sum = sum(population_epred)) %>% pivot_wider(names_from = prey_stage, values_from = sum) %>% replace(is.na(.), 0) %>% mutate(total = pa + not_pa, prop_pa = pa/total) %>% mutate(data_level = "Per population") diet_community <- diet_pop_postpreds_wide %>% group_by(date2, .draw) %>% summarize(pa = sum(pa), total = sum(total), not_pa = sum(not_pa)) %>% mutate(prop_pa = pa/total) %>% mutate(data_level = "Per community", species = "Community") all_diet_posts <- bind_rows(diet_ind_postpreds_wide, diet_pop_postpreds_wide, diet_community) %>% mutate(species = case_when(species == "spotfin" ~ "Spotfin Shiner", species == "bluegill" ~ "Bluegill", species == "largemouth" ~ "Largemouth Bass", species == "rivershiner" ~ "River Shiner", TRUE ~ species)) %>% ungroup() %>% mutate(data_level = fct_relevel(data_level, "Per capita", "Per population")) saveRDS(all_diet_posts, file = "posteriors/all_diet_posts.rds") # emergence model --------------------------------------------------------- emerge_dm_model <- readRDS("models/emerge_dm_model.rds") emerge_cond_posts <- emerge_reu_mg %>% data_grid(date, trt2) %>% add_epred_draws(emerge_dm_model, re_formula = NA) %>% mutate(date = mdy(date), trt = case_when(trt2 == "ctrl" ~ "fish", TRUE ~ "no fish")) saveRDS(emerge_cond_posts, file = "posteriors/emerge_cond_posts.rds") # benthic model --------------- brm_ben_m2 <- readRDS("models/brm_ben_m2.rds") benthic_cond_posts <- ben_dm_tot %>% data_grid(date, trt, taxon) %>% add_epred_draws(brm_ben_m2, re_formula = NA) %>% mutate(date = mdy(date), trt = case_when(trt == "ctrl" ~ "fish", TRUE ~ "no fish")) saveRDS(benthic_cond_posts, file = "posteriors/benthic_cond_posts.rds") # spider model --------------- spiders_brm <- readRDS("models/spiders_brm.rds") spiders_cond_posts <- spider_abund %>% select(-trt) %>% mutate(date = mdy(date)) %>% rename(trt = treatment) %>% mutate(trt = case_when(trt == "fish" ~ "ctrl", TRUE ~ "exc")) %>% data_grid(date, trt) %>% add_epred_draws(spiders_brm, re_formula = NA) %>% mutate(trt = case_when(trt == "ctrl" ~ "fish", TRUE ~ "no fish")) saveRDS(spiders_cond_posts, file = "posteriors/spiders_cond_posts.rds") # make prior table -------------------------------------------------------- prior_diet_brms <- diet_brms$prior %>% as_tibble() %>% mutate(model = "diet_brms.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) emerge_prior <- emerge_dm_model$prior %>% mutate(model = "emerge_dm_model.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) benthic_prior <- brm_ben_m2$prior %>% mutate(model = "brm_ben_m2.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) spiders_prior <- spiders_brm$prior %>% mutate(model = "spiders_brm.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) all_prior <- bind_rows(prior_diet_brms, emerge_prior, benthic_prior, spiders_prior) write_csv(all_prior, file = "models/prior_predictive/all_prior.csv")
/code/3) extract posteriors.R
no_license
jswesner/reu_bcra
R
false
false
4,662
r
#load packages and data source("src/packages.R") # Extract conditional posteriors # diet model -------------------------------------------------------------- diet_brms <- readRDS("models/diet_brms.rds") fish_totals <- read_csv(file = "data/raw_data/fish_totals.csv") diet_brm_postpreds <- diet_brms$data %>% distinct(prey_stage, date2, species) %>% add_epred_draws(diet_brms, dpar = T, re_formula = NULL) %>% # filter(.draw <= 1000) %>% left_join(fish_totals %>% mutate(date2 = as.factor(date2))) %>% mutate(population_epred = .epred*total_abund) %>% arrange(-.epred) # taxon_stage <- diet_brm_postpreds %>% ungroup() %>% distinct(prey_taxon) %>% # separate(prey_taxon, c("prey_taxon_only", "prey_stage"), remove = F) diet_ind_postpreds_wide <- diet_brm_postpreds %>% group_by(prey_stage, date2, species, .draw) %>% summarize(sum = sum(.epred)) %>% pivot_wider(names_from = prey_stage, values_from = sum) %>% replace(is.na(.), 0) %>% mutate(total = pa + not_pa, prop_pa = pa/total) %>% mutate(data_level = "Per capita") diet_pop_postpreds_wide <- diet_brm_postpreds %>% group_by(prey_stage, date2, species, .draw) %>% summarize(sum = sum(population_epred)) %>% pivot_wider(names_from = prey_stage, values_from = sum) %>% replace(is.na(.), 0) %>% mutate(total = pa + not_pa, prop_pa = pa/total) %>% mutate(data_level = "Per population") diet_community <- diet_pop_postpreds_wide %>% group_by(date2, .draw) %>% summarize(pa = sum(pa), total = sum(total), not_pa = sum(not_pa)) %>% mutate(prop_pa = pa/total) %>% mutate(data_level = "Per community", species = "Community") all_diet_posts <- bind_rows(diet_ind_postpreds_wide, diet_pop_postpreds_wide, diet_community) %>% mutate(species = case_when(species == "spotfin" ~ "Spotfin Shiner", species == "bluegill" ~ "Bluegill", species == "largemouth" ~ "Largemouth Bass", species == "rivershiner" ~ "River Shiner", TRUE ~ species)) %>% ungroup() %>% mutate(data_level = fct_relevel(data_level, "Per capita", "Per population")) saveRDS(all_diet_posts, file = "posteriors/all_diet_posts.rds") # emergence model --------------------------------------------------------- emerge_dm_model <- readRDS("models/emerge_dm_model.rds") emerge_cond_posts <- emerge_reu_mg %>% data_grid(date, trt2) %>% add_epred_draws(emerge_dm_model, re_formula = NA) %>% mutate(date = mdy(date), trt = case_when(trt2 == "ctrl" ~ "fish", TRUE ~ "no fish")) saveRDS(emerge_cond_posts, file = "posteriors/emerge_cond_posts.rds") # benthic model --------------- brm_ben_m2 <- readRDS("models/brm_ben_m2.rds") benthic_cond_posts <- ben_dm_tot %>% data_grid(date, trt, taxon) %>% add_epred_draws(brm_ben_m2, re_formula = NA) %>% mutate(date = mdy(date), trt = case_when(trt == "ctrl" ~ "fish", TRUE ~ "no fish")) saveRDS(benthic_cond_posts, file = "posteriors/benthic_cond_posts.rds") # spider model --------------- spiders_brm <- readRDS("models/spiders_brm.rds") spiders_cond_posts <- spider_abund %>% select(-trt) %>% mutate(date = mdy(date)) %>% rename(trt = treatment) %>% mutate(trt = case_when(trt == "fish" ~ "ctrl", TRUE ~ "exc")) %>% data_grid(date, trt) %>% add_epred_draws(spiders_brm, re_formula = NA) %>% mutate(trt = case_when(trt == "ctrl" ~ "fish", TRUE ~ "no fish")) saveRDS(spiders_cond_posts, file = "posteriors/spiders_cond_posts.rds") # make prior table -------------------------------------------------------- prior_diet_brms <- diet_brms$prior %>% as_tibble() %>% mutate(model = "diet_brms.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) emerge_prior <- emerge_dm_model$prior %>% mutate(model = "emerge_dm_model.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) benthic_prior <- brm_ben_m2$prior %>% mutate(model = "brm_ben_m2.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) spiders_prior <- spiders_brm$prior %>% mutate(model = "spiders_brm.rds") %>% mutate(prior = case_when(nchar(prior) != 0 ~ prior), coef = case_when(nchar(coef) != 0 ~ coef)) %>% fill(prior) all_prior <- bind_rows(prior_diet_brms, emerge_prior, benthic_prior, spiders_prior) write_csv(all_prior, file = "models/prior_predictive/all_prior.csv")
## Put comments here that give an overall description of what your ## functions do ## makeCacheMatrix creates a list of four functions ## these functions either get or set the value of the ## matrix or inverse. These functions are then used ## by the function below. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve takes as an argument, a list created by ## the function makeCacheMatrix. It then uses the elements ## (which are functions) of the list to either calculate ## the inverse or if it's already been calculated it ## return the cached value. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data) x$setinverse(inv) inv }
/cachematrix.R
no_license
achaldo/ProgrammingAssignment2
R
false
false
1,232
r
## Put comments here that give an overall description of what your ## functions do ## makeCacheMatrix creates a list of four functions ## these functions either get or set the value of the ## matrix or inverse. These functions are then used ## by the function below. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve takes as an argument, a list created by ## the function makeCacheMatrix. It then uses the elements ## (which are functions) of the list to either calculate ## the inverse or if it's already been calculated it ## return the cached value. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data) x$setinverse(inv) inv }
## ---- qchunk #analyze qualtrics religion representative sample rm(list=ls()) library(lattice) library(stargazer) library(tidyverse) library(lme4) library(lavaan) library(semPlot) #**-------------- define working directory but don't change to it so all output files end up in folder where paper is being compiled wd <- "C:/Users/Lewan/Documents/Research Projects/Climate Change/religion human special status and science Klaus/Study 1 -- part of original Cognition submission 2019/Q rep sample/Q religion data" source(paste(wd,"religionQfuncs.R",sep="/")) #This does not work from Sweave or Knitr: setwd(dirname(rstudioapi::getSourceEditorContext()$path)) relig <- read.csv(paste(wd,"Attitudes_towards_science_for_Qualtrics_Representative_Sample (2).csv",sep="/"),header=TRUE,row.names=NULL) #row 2 (with verbose names) manually deleted from Q file. # 10.3.17: Also deleted last record with single stray number in one column (after soft launch) #read variable names for table of raw responses vn <- read.csv(paste(wd,"varNames.csv",sep="/"),header=TRUE,row.names=NULL,stringsAsFactors = FALSE) vnfin <- vn[grep("Q3",vn$qvarname)[-length(grep("Q3",vn$qvarname))],] #**----------- clean up data relig15 <- relig %>% filter(Q2.1 == Q4.2) %>% filter(Q3.40 == 4) %>% #table is not an animal select(contains("Q")) %>% select(-contains("Q_Tot")) #drop the Q that ain't a q. # first fix the Qualtrics-induced scale problems relig15 <- relig15 %>% mutate_at(c(paste("Q3.",c(2:19),sep=""), paste("Q3.",c(25:39),sep=""),"Q4.1"),fixscore,mm=14) %>% mutate_at("Q2.4",fixscore,mm=15) %>% mutate_at("Q3.1",fixscore,mm=22) %>% mutate_at("Q3.20",fixscore,mm=28) # identify people who hit the same key (excepting neutral) for all items in a cluster neutral <- 0 #if set to zero, any sequence of identical keys is eliminated. If set to 4, only non-neutral responses are dropped keyhitters <- cbind( (relig15 %>% select(num_range("Q3.",1:5)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #humans special (relig15 %>% select(num_range("Q3.",6:12)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #nationalism is good (relig15 %>% select(num_range("Q3.",13:19)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #IQ environmental (relig15 %>% select(num_range("Q3.",20:24)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #religiosity (relig15 %>% select(num_range("Q3.",25:29)) %>%apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #free markets (relig15 %>% select(num_range("Q3.",30:34)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #climate (relig15 %>% select(num_range("Q3.",35:39)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0)))) #vax #eliminate the key hitters table(rowSums(keyhitters)) relig15 <- filter(relig15,!(rowSums(keyhitters)>1)) #demographics males <- table(relig15$Q2.2)["1"] females <- table(relig15$Q2.2)["2"] mage <- round(mean(relig15$Q2.1),1) mdage <- round(median(relig15$Q2.1),1) minage <- min(relig15$Q2.1) maxage <- max(relig15$Q2.1) lateEnglish <- table(relig15$Q2.3)["5"] #**----------- get raw responses before reverse scoring #function to grab a row and interleave with parentheses for printing interleave <- function(x){ hlx <- length(x)/2 retstr <- paste(sapply(1:hlx, FUN=function(i) paste(" & ",as.character(x[i])," & (",as.character(x[i+hlx]),")",sep="")),collapse="",sep="") } itemResppercent <- relig15 %>% select(num_range("Q3.",1:39)) %>% lapply(table) %>% lapply(as.numeric) %>% lapply(FUN=function(x) c(x,round(x/sum(x)*100))) #this generates latex code for insertion into document t4l <- NULL for (i in 1:length(itemResppercent)) { mychar <-paste(vnfin$shortname[i], interleave(itemResppercent[[i]]), "\\","\\", sep="") t4l<-rbind(t4l,mychar,deparse.level = 0) } write.table(t4l[1:5,],file="_t.exceptionalism.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[6:12,],file="_t.nationalism.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[13:19,],file="_t.malleability.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[20:24,],file="_t.religiosity.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[25:29,],file="_t.freemarket.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[30:34,],file="_t.climate.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[35:39,],file="_t.vax.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #now reverse score such that polarity is: # humans are special # nationalism is great # IQ is heritable (flipped from initial analysis) # being religious # free market endorsement # accept climate change # accept vaccinations relig2 <- relig15 %>% mutate_at(c("Q3.4", "Q3.6", "Q3.7", "Q3.10", "Q3.14", "Q3.15", "Q3.16", "Q3.19", #polarity of IQ towards heritability "Q3.26", "Q3.28", "Q3.29", "Q3.30", "Q3.34", "Q3.36", "Q3.38"),revscore,mm=7) %>% mutate_at(c("Q3.21","Q3.23","Q3.24"),revscore,mm=5) # compute pairwise correlations within each cluster relig2 %>% select(num_range("Q3.",1:5)) %>% cor(use="complete.obs") #humans special relig2 %>% select(num_range("Q3.",6:12)) %>% cor(use="complete.obs") #nationalism is good relig2 %>% select(num_range("Q3.",13:19)) %>% cor(use="complete.obs") #IQ environmental relig2 %>% select(num_range("Q3.",20:24)) %>% cor(use="complete.obs") #religiosity relig2 %>% select(num_range("Q3.",25:29)) %>% cor(use="complete.obs") #free markets relig2 %>% select(num_range("Q3.",30:34)) %>% cor(use="complete.obs") #climate relig2 %>% select(num_range("Q3.",35:39)) %>% cor(use="complete.obs") #vax # construct histogram for summary statistics pdf(file="histoSummary.pdf",height=9,width=6.5) #this will go into paper directory that is being weaved (since no setwd) par(mfrow=c(4,2)) relig2 %>% select(num_range("Q3.",1:5)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Exceptionalism",col="light gray") #humans special relig2 %>% select(num_range("Q3.",6:12)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Nationalism",col="light gray") relig2 %>% select(num_range("Q3.",13:19)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="IQ largely heritable",col="light gray") #IQ environmental relig2 %>% select(num_range("Q3.",20:24)) %>% rowMeans %>% hist(las=1,xlim=c(1,6),xlab="Average score",main="Religiosity",col="light gray") #religiosity relig2 %>% select(num_range("Q3.",25:29)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Free market",col="light gray") #free markets relig2 %>% select(num_range("Q3.",30:34)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Climate science",col="light gray") #climate relig2 %>% select(num_range("Q3.",35:39)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Vaccinations",col="light gray") #vax dev.off() #write.csv(relig2,paste(wd,"QreligProcessed.csv",sep="/"),row.names=FALSE) #**-------------- now compute measurement models for all constructs so we can use single-indicator models later #exceptionalism humvars <- paste("Q3.",c(1:5),sep="") humspecmod <- c("humspec =~ ",paste(humvars,collapse=" + "),"Q3.3 ~~ Q3.5") humspecgof <- fitMM(humspecmod,relig2) #nationalism intvars <- paste("Q3.",c(7,8,9,10,11,12),sep="") internatmod <- c("nationalism =~ ", paste(intvars,collapse=" + "),"Q3.7 ~~ Q3.10") internatgof <- fitMM (internatmod,relig2) # separate IQ models for positive and reverse-scored items IQ.env <- paste("Q3.",c(19,14:16),sep="") IQmodel.env <-c("IQ.env =~ ",paste(IQ.env,collapse=" + "),NULL) fitMM (IQmodel.env,relig2) IQ.her <- paste("Q3.",c(13,17,18),sep="") IQmodel.her <-c("IQ.her =~ ",paste(IQ.her,collapse=" + "), NULL) fitMM (IQmodel.her,relig2) # two-correlated factors model IQmodel.twocf <- c("IQ.her =~ ", paste(IQ.her, collapse=" + "), "\n", "IQ.env =~ ", paste(IQ.env, collapse=" + ")) twocfacIQfit <- sem(IQmodel.twocf,relig2) twocfacIQcor <- lavInspect(twocfacIQfit, what = "cor.lv") twocfacIQp <- pnorm(abs(inspect(twocfacIQfit,what="est")$psi/inspect(twocfacIQfit,what="se")$psi),lower.tail = FALSE)*2 # hierarchical two-factor model IQmodel.two <- c("IQ.her =~ ", paste(IQ.her, collapse=" + "), "\n", "IQ.env =~ ", paste(IQ.env, collapse=" + "), "\n", "IQ.two =~ a*IQ.her + a*IQ.env") twofacIQfit <- sem(IQmodel.two,relig2) summary(twofacIQfit, standardized=TRUE, fit.measures=TRUE) twofacIQgof <- fitmeasures(twofacIQfit) semPaths(twofacIQfit, "std", title =FALSE, curvePivot = TRUE) # bi-factor IQ model for positive and reverse-scored items IQ.bi <- paste("Q3.", c(13:19), sep="") IQ.her <- paste("Q3.",c(13,17,18),sep="") IQmodel.bi <- c("IQ.bi =~ ", paste(IQ.bi, collapse=" + "), "\n", "IQ.her =~ ", paste(IQ.her, collapse= " + "), "\n", "IQ.bi ~~ 0*IQ.her") IQbigof <- fitMM (IQmodel.bi,relig2) # only consider items that are tentative in their propositions IQ.tentative <- paste("Q3.", c(14,15,17,19), sep="") IQmodel.tent <- c("IQ.tent =~ ", paste(IQ.tentative, collapse=" + ")) IQtentgof <- fitMM (IQmodel.tent,relig2) #religiosity Relvars <- paste("Q3.",c(20:24),sep="") religmodel <- c("religiosity =~ ",paste(Relvars,collapse=" + ")) religgof <- fitMM (religmodel,relig2) #FM FMvars <- paste("Q3.",c(25:29),sep="") FMmodel <- c("FM =~ ", paste(FMvars,collapse=" + "),"Q3.25 ~~ Q3.27") #as before for PLOS ONE fmgof <- fitMM (FMmodel,relig2) # hierarchical two-factor model for conservatism Consmodel.two <- c("C.FM =~ ", paste(FMvars, collapse=" + "),"Q3.25 ~~ Q3.27", "\n", "C.Rel =~ ", paste(Relvars, collapse=" + "), "\n", "C.Int =~ ", paste(intvars,collapse=" + "),"Q3.7 ~~ Q3.10","\n", "C.hum =~ ", paste(humvars,collapse=" + "),"Q3.3 ~~ Q3.5","\n", "C.two =~ NA*C.FM + C.Rel + C.Int + C.hum","\n", "C.two ~~ 1*C.two") C2fit <- sem(Consmodel.two,relig2) summary(C2fit, standardized=TRUE, fit.measures=TRUE) #climate Climvars <- paste("Q3.",c(30:34),sep="") climmodel <- c("climate =~ ", paste(Climvars,collapse=" + "), "Q3.30 ~~ Q3.34") #as before for PLOS ONE climategof <- fitMM (climmodel,relig2) #vaccination Vaxvars <- paste("Q3.",c(35:39),sep="") vaxmodel <- c("vax =~ ",paste(Vaxvars,collapse=" + "), "Q3.36 ~~ Q3.38") #as before for PLOS ONE vaxgof <- fitMM (vaxmodel,relig2) #**----------- compute single-indicators models (humSI <- singleindmodel(humvars,list(c("Q3.3","Q3.5")),relig2)) (intSI <- singleindmodel(intvars, list(c("Q3.7","Q3.10")),relig2)) (RelSI <- singleindmodel(Relvars,NULL,relig2)) (FMSI <- singleindmodel(FMvars,list(c("Q3.25","Q3.27")),relig2)) (ClimSI <- singleindmodel(Climvars,list(c("Q3.30","Q3.34")),relig2)) (VaxSI <- singleindmodel(Vaxvars,list(c("Q3.36","Q3.38")),relig2)) (IQSI <- singleindmodel(IQ.tentative,NULL,relig2)) #put error estimates for single indicators into named array for use in function eSImods <- c(humSI$eSImod, intSI$eSImod, RelSI$eSImod, FMSI$eSImod, ClimSI$eSImod, VaxSI$eSImod) names(eSImods) <- c ("hum","int","Rel","FM","Clim","Vax") #compute composite scores for the SI models compositeRelig <- data.frame ( hum = apply(relig2[,humvars], 1,mean), int = apply(relig2[,intvars], 1,mean), Rel = apply(relig2[,Relvars], 1,mean), FM = apply(relig2[,FMvars], 1,mean), Clim = apply(relig2[,Climvars], 1,mean), Vax = apply(relig2[,Vaxvars], 1,mean), IQ = apply(relig2[,IQ.tentative], 1,mean), select(relig2,num_range("Q3.",13:29))) #**------ correlation structure among 6 unidimensional latent constructs modelCorrel <- c(" humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitCorrel <- sem(modelCorrel, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitCorrel,standardized=TRUE, fit.measures=TRUE) #**------ correlation structure among all 7 unidimensional latent constructs (including tentative IQ items) modelxCorrel <- c(" humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQFac =~ IQ hum ~~ ", humSI$eSImod, "*hum", "IQ ~~", IQSI$eSImod, "*IQ", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitCorrelx <- sem(modelxCorrel, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitCorrelx,standardized=TRUE, fit.measures=TRUE) #parameterEstimates(fitCorrelx, standardized=TRUE) #get correlation matrix for latent variables lvcormat <- lavInspect(fitCorrel, what = "cor.lv") colnames(lvcormat) <- rownames(lvcormat) <- (c("Exceptionalism", "Nationalism", "Religiosity", "Free market", "Climate", "Vaccinations")) cormat <- stargazer(lvcormat, title="Correlations among 6 unidimensional latent variables") write.table(cormat[10:18],file="_t.lvcor.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #now compute p-values pvals2tailed <- pnorm(abs(inspect(fitCorrel,what="est")$psi/inspect(fitCorrel,what="se")$psi),lower.tail = FALSE)*2 colnames(pvals2tailed) <- rownames(pvals2tailed) <- (c("Exceptionalism", "Nationalism", "Religiosity", "Free market", "Climate", "Vaccinations")) pvals2tailed[upper.tri(pvals2tailed)] <- 0 maxloc <- which(pvals2tailed == max(pvals2tailed), arr.ind = TRUE) maxpval <- max(pvals2tailed) rownames(pvals2tailed)[maxloc[1]] colnames(pvals2tailed)[maxloc[2]] #**----------- correlations including the two-factor hierarchical model for IQ modelCorrel3 <- c(" humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitCorrel3 <- sem(modelCorrel3, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitCorrel3,standardized=TRUE, fit.measures=TRUE) #get correlation matrix for latent variables lvcormat3 <- lavInspect(fitCorrel3, what = "cor.lv") colnames(lvcormat3) <- rownames(lvcormat3) <- (c("Exceptionalism", "Nationalism", "Religiosity", "Free market", "Climate", "Vaccinations", "junk1", "junk2", "IQ Heritable")) cormat3 <- stargazer(lvcormat3[,1:6], title="") #row 18 contains corrs with main factor in IQ model. Chop junk columns of subordinate factors write.table(cormat3[20],file="_t.IQcor.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #now compute p-values pvals2tailed3 <- pnorm(abs(inspect(fitCorrel3,what="est")$psi/inspect(fitCorrel3,what="se")$psi),lower.tail = FALSE)*2 twofacsigcorrs <- (colnames(lvcormat3)[1:6]) [pvals2tailed3[9,1:6]<.05] #**-------------- now turn to specific prediction models #**---------- IQ hierarchical two-factor: start with full model ... fullmodel2fIQ <- c(" IQ.two ~ FMFac + intFac + RelFac + humFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM" ) fitfull2fIQ <- sem(fullmodel2fIQ, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitfull2fIQ,standardized=TRUE, fit.measures=TRUE) modificationindices(fitfull2fIQ,sort.=TRUE,maximum.number=20) full2fIQgof <- fitMeasures(fitfull2fIQ) #.... smaller model for comparison ... smmodel2fIQ <- c(" IQ.two ~ intFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM" ) fitsm2fIQ <- sem(smmodel2fIQ, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitsm2fIQ,standardized=TRUE, fit.measures=TRUE) fitsm2fIQgof <- fitMeasures(fitsm2fIQ) # .... which turns out to fit equally well ... anova(fitfull2fIQ,fitsm2fIQ) # .... so we can extract a small model without the other constructs tinymodel2fIQ <- c(" IQ.two ~ intFac intFac =~ int IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env int ~~", intSI$eSImod, "*int" ) fittiny2fIQ <- sem(tinymodel2fIQ, compositeRelig, std.lv=TRUE, estimator="ML") summary(fittiny2fIQ,standardized=TRUE, fit.measures=TRUE) #**---------- climate: start with full model... bigmodelclim <- c(" ClimFac ~ FMFac + intFac + RelFac + humFac ClimFac =~ Clim intFac =~ int RelFac =~ Rel humFac =~ hum FMFac =~ FM Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitbigmodclim <- sem(bigmodelclim, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitbigmodclim,standardized=TRUE, fit.measures=TRUE) fullclimgof <- fitMeasures(fitbigmodclim) #.... eliminate predictors .... modelclim <- c(" ClimFac ~ FMFac + RelFac + humFac ClimFac =~ Clim intFac =~ int FMFac =~ FM RelFac =~ Rel humFac =~ hum Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitmodclim <- sem(modelclim, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitmodclim,standardized=TRUE, fit.measures=TRUE) smclimgof <- fitMeasures(fitmodclim) #parameterEstimates(fitmodclim, standardized=TRUE) modindices(fitmodclim,sort. = TRUE, maximum.number = 4) # ... which turns out not to make a difference ... anova(fitbigmodclim,fitmodclim) # and we have a final small model smmodelclim <- c(" ClimFac ~ FMFac + intFac ClimFac =~ Clim intFac =~ int FMFac =~ FM int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitsmmodclim <- sem(smmodelclim, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitsmmodclim,standardized=TRUE, fit.measures=TRUE) # mediation? mediateclimmodel <- c(" humFac ~ alpha1 * FMFac intFac ~ alpha2 * FMFac ClimFac ~ direct * FMFac + beta1 *humFac + beta2 *intFac indirect1 := alpha1 * beta1 indirect2 := alpha2 * beta2 total:= indirect1 + indirect2 + direct proportion := (indirect1 + indirect2)/total ClimFac =~ Clim humFac =~ hum intFac =~ int FMFac =~ FM hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitmediateclimmodel <- sem(mediateclimmodel, compositeRelig, std.lv=TRUE, estimator="ML") semPaths(fitmediateclimmodel, what="std", title =FALSE, curvePivot = TRUE,residuals=FALSE, structural=TRUE, layout="tree2",rotation=2) summary(fitmediateclimmodel,standardized=TRUE, fit.measures=TRUE) fullclimgof <- fitMeasures(fitmediateclimmodel) # doube mediation for both climate and vax? mediate2climmodel <- c(" ClimFac ~ d1 * FMFac + d2 * RelFac + b2 *humFac + b1 *intFac VaxFac ~ d4 * FMFac + d3 * RelFac + b4 *humFac + b3 *intFac humFac ~ a4 * FMFac + a2 * RelFac intFac ~ a3 * FMFac + a1 * RelFac indirectInt := a1 * b1 + a3 * b1 + a1 * b3 + a3 * b3 indirecthum := a2 * b2 + a4 * b2 + a2 * b4 + a2 * b4 total:= indirectInt + indirecthum + d1 + d2 proportion := (indirectInt + indirecthum)/total ClimFac =~ Clim humFac =~ hum intFac =~ int FMFac =~ FM RelFac =~ Rel VaxFac =~ Vax hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Rel ~~ ", RelSI$eSImod, "*Rel", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitmediate2climmodel <- sem(mediate2climmodel, compositeRelig, std.lv=TRUE, estimator="ML") semPaths(fitmediate2climmodel, what="std", title =FALSE, curvePivot = TRUE,residuals=FALSE, structural=TRUE, layout="tree2",rotation=2) summary(fitmediate2climmodel,standardized=TRUE, fit.measures=TRUE) fullclimgof <- fitMeasures(fitmediate2climmodel) #**------------ vax:start with full model fullmodelvax <- c(" VaxFac ~ FMFac + intFac + RelFac + humFac VaxFac =~ Vax intFac =~ int RelFac =~ Rel FMFac =~ FM humFac =~ hum int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "FM ~~ ", FMSI$eSImod, "*FM", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitfullmodvax <- sem(fullmodelvax, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitfullmodvax,standardized=TRUE, fit.measures=TRUE) fullvaxgof <- fitMeasures(fitfullmodvax) #parameterEstimates(fitfullmodvax, standardized=TRUE) modindices(fitfullmodvax, sort.=TRUE, maximum.number=4) #.... now a smaller one .... smmodelvax <- c(" VaxFac ~ FMFac + intFac + RelFac VaxFac =~ Vax intFac =~ int RelFac =~ Rel FMFac =~ FM humFac =~ hum int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "FM ~~ ", FMSI$eSImod, "*FM", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitsmmodvax <- sem(smmodelvax, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitsmmodvax,standardized=TRUE, fit.measures=TRUE) smvaxgof <- fitMeasures(fitsmmodvax) anova(fitfullmodvax,fitsmmodvax) #**--------------- Now fit all scientific constructs together (two-factor IQ) allfull2fmod <- c(" ClimFac ~ FMFac + intFac + RelFac + humFac VaxFac ~ FMFac + intFac + RelFac + humFac IQ.two ~ FMFac + intFac + RelFac + humFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitall2ffull <- sem(allfull2fmod, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitall2ffull,standardized=TRUE, fit.measures=TRUE) #parameterEstimates(fitall2ffull, standardized=TRUE) modindices(fitall2ffull, sort.=TRUE, maximum.number=4) #.... and now a constrained model .... modelall2f <- c(" ClimFac ~ FMFac + intFac VaxFac ~ FMFac + intFac + RelFac + humFac IQ.two ~ intFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) modelall2ffit <- sem(modelall2f, compositeRelig, std.lv=TRUE, estimator="ML") summary(modelall2ffit,standardized=TRUE, fit.measures=TRUE) anova(fitall2ffull,modelall2ffit) finalmodelgof <- fitMeasures(modelall2ffit) semPaths(modelall2ffit, what="std", title =FALSE, curvePivot = TRUE,residuals=FALSE, structural=TRUE, layout="tree2",rotation=2) #** ---- now apportion variance ------------------------------------- # call to getr2(criterion,predictors,vars), # e.g.: getr2("Clim", c("int", "FM"), c("Clim", "int", "Rel", "hum", "FM")) # this decomposes the r^2 embedded in model fitall2ffull for the 3 latent criteria # Note: this is climcomps <- getr2comps("Clim") inspect(fitall2ffull,'r2')["ClimFac"] str(climcomps) vaxcomps <- getr2comps("Vax") inspect(fitall2ffull,'r2')["VaxFac"] str(vaxcomps) sum(unlist(vaxcomps)) IQcomps <- getr2comps("IQ") inspect(fitall2ffull,'r2')["IQ.two"] str(IQcomps) #rearrange data into a more suitable format compnts <- cbind(unlist(climcomps),unlist(vaxcomps),unlist(IQcomps))[substr(names(climcomps),1,1)=="u",] sharvar <- cbind(unlist(climcomps),unlist(vaxcomps),unlist(IQcomps))[substr(names(climcomps),1,1)!="u",] compnts <- rbind(compnts,colSums(sharvar),cbind(sum(unlist(climcomps)),sum(unlist(vaxcomps)),sum(unlist(IQcomps)))) row.names(compnts) <- c("Nationalism","Religiosity","Exceptionalism","Free market","All shared","Total") compmat <- stargazer(compnts, title="Decomposition of variance explained",digits=2,digits.extra=0) #compmat <- str_replace(compmat,"\\$-\\$","") write.table(compmat[c(12,10,11,13,14,15)],file="_t.compnts.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #--- all constructs, predict all criteria from single 2nd-order conservatism factor --------------- all2ndorderfac <- c(" ClimFac ~ C.two VaxFac ~ C.two IQ.two ~ C.two C.two =~ NA*FMFac + RelFac + intFac + humFac C.two ~~ 1*C.two humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env humFac ~~ RelFac hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fit2ndorderfac <- sem(all2ndorderfac, compositeRelig, std.lv=TRUE, estimator="ML") summary(fit2ndorderfac,standardized=TRUE, fit.measures=TRUE) modindices(fit2ndorderfac, sort.=TRUE, maximum.number=4)
/religionQualtrics.r
no_license
StephanLewandowsky/Norms-of-Science
R
false
false
30,299
r
## ---- qchunk #analyze qualtrics religion representative sample rm(list=ls()) library(lattice) library(stargazer) library(tidyverse) library(lme4) library(lavaan) library(semPlot) #**-------------- define working directory but don't change to it so all output files end up in folder where paper is being compiled wd <- "C:/Users/Lewan/Documents/Research Projects/Climate Change/religion human special status and science Klaus/Study 1 -- part of original Cognition submission 2019/Q rep sample/Q religion data" source(paste(wd,"religionQfuncs.R",sep="/")) #This does not work from Sweave or Knitr: setwd(dirname(rstudioapi::getSourceEditorContext()$path)) relig <- read.csv(paste(wd,"Attitudes_towards_science_for_Qualtrics_Representative_Sample (2).csv",sep="/"),header=TRUE,row.names=NULL) #row 2 (with verbose names) manually deleted from Q file. # 10.3.17: Also deleted last record with single stray number in one column (after soft launch) #read variable names for table of raw responses vn <- read.csv(paste(wd,"varNames.csv",sep="/"),header=TRUE,row.names=NULL,stringsAsFactors = FALSE) vnfin <- vn[grep("Q3",vn$qvarname)[-length(grep("Q3",vn$qvarname))],] #**----------- clean up data relig15 <- relig %>% filter(Q2.1 == Q4.2) %>% filter(Q3.40 == 4) %>% #table is not an animal select(contains("Q")) %>% select(-contains("Q_Tot")) #drop the Q that ain't a q. # first fix the Qualtrics-induced scale problems relig15 <- relig15 %>% mutate_at(c(paste("Q3.",c(2:19),sep=""), paste("Q3.",c(25:39),sep=""),"Q4.1"),fixscore,mm=14) %>% mutate_at("Q2.4",fixscore,mm=15) %>% mutate_at("Q3.1",fixscore,mm=22) %>% mutate_at("Q3.20",fixscore,mm=28) # identify people who hit the same key (excepting neutral) for all items in a cluster neutral <- 0 #if set to zero, any sequence of identical keys is eliminated. If set to 4, only non-neutral responses are dropped keyhitters <- cbind( (relig15 %>% select(num_range("Q3.",1:5)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #humans special (relig15 %>% select(num_range("Q3.",6:12)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #nationalism is good (relig15 %>% select(num_range("Q3.",13:19)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #IQ environmental (relig15 %>% select(num_range("Q3.",20:24)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #religiosity (relig15 %>% select(num_range("Q3.",25:29)) %>%apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #free markets (relig15 %>% select(num_range("Q3.",30:34)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0))), #climate (relig15 %>% select(num_range("Q3.",35:39)) %>% apply(.,1,FUN=function(x) ifelse((var(x)==0 & mean(x)!=neutral),1,0)))) #vax #eliminate the key hitters table(rowSums(keyhitters)) relig15 <- filter(relig15,!(rowSums(keyhitters)>1)) #demographics males <- table(relig15$Q2.2)["1"] females <- table(relig15$Q2.2)["2"] mage <- round(mean(relig15$Q2.1),1) mdage <- round(median(relig15$Q2.1),1) minage <- min(relig15$Q2.1) maxage <- max(relig15$Q2.1) lateEnglish <- table(relig15$Q2.3)["5"] #**----------- get raw responses before reverse scoring #function to grab a row and interleave with parentheses for printing interleave <- function(x){ hlx <- length(x)/2 retstr <- paste(sapply(1:hlx, FUN=function(i) paste(" & ",as.character(x[i])," & (",as.character(x[i+hlx]),")",sep="")),collapse="",sep="") } itemResppercent <- relig15 %>% select(num_range("Q3.",1:39)) %>% lapply(table) %>% lapply(as.numeric) %>% lapply(FUN=function(x) c(x,round(x/sum(x)*100))) #this generates latex code for insertion into document t4l <- NULL for (i in 1:length(itemResppercent)) { mychar <-paste(vnfin$shortname[i], interleave(itemResppercent[[i]]), "\\","\\", sep="") t4l<-rbind(t4l,mychar,deparse.level = 0) } write.table(t4l[1:5,],file="_t.exceptionalism.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[6:12,],file="_t.nationalism.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[13:19,],file="_t.malleability.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[20:24,],file="_t.religiosity.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[25:29,],file="_t.freemarket.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[30:34,],file="_t.climate.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) write.table(t4l[35:39,],file="_t.vax.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #now reverse score such that polarity is: # humans are special # nationalism is great # IQ is heritable (flipped from initial analysis) # being religious # free market endorsement # accept climate change # accept vaccinations relig2 <- relig15 %>% mutate_at(c("Q3.4", "Q3.6", "Q3.7", "Q3.10", "Q3.14", "Q3.15", "Q3.16", "Q3.19", #polarity of IQ towards heritability "Q3.26", "Q3.28", "Q3.29", "Q3.30", "Q3.34", "Q3.36", "Q3.38"),revscore,mm=7) %>% mutate_at(c("Q3.21","Q3.23","Q3.24"),revscore,mm=5) # compute pairwise correlations within each cluster relig2 %>% select(num_range("Q3.",1:5)) %>% cor(use="complete.obs") #humans special relig2 %>% select(num_range("Q3.",6:12)) %>% cor(use="complete.obs") #nationalism is good relig2 %>% select(num_range("Q3.",13:19)) %>% cor(use="complete.obs") #IQ environmental relig2 %>% select(num_range("Q3.",20:24)) %>% cor(use="complete.obs") #religiosity relig2 %>% select(num_range("Q3.",25:29)) %>% cor(use="complete.obs") #free markets relig2 %>% select(num_range("Q3.",30:34)) %>% cor(use="complete.obs") #climate relig2 %>% select(num_range("Q3.",35:39)) %>% cor(use="complete.obs") #vax # construct histogram for summary statistics pdf(file="histoSummary.pdf",height=9,width=6.5) #this will go into paper directory that is being weaved (since no setwd) par(mfrow=c(4,2)) relig2 %>% select(num_range("Q3.",1:5)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Exceptionalism",col="light gray") #humans special relig2 %>% select(num_range("Q3.",6:12)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Nationalism",col="light gray") relig2 %>% select(num_range("Q3.",13:19)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="IQ largely heritable",col="light gray") #IQ environmental relig2 %>% select(num_range("Q3.",20:24)) %>% rowMeans %>% hist(las=1,xlim=c(1,6),xlab="Average score",main="Religiosity",col="light gray") #religiosity relig2 %>% select(num_range("Q3.",25:29)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Free market",col="light gray") #free markets relig2 %>% select(num_range("Q3.",30:34)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Climate science",col="light gray") #climate relig2 %>% select(num_range("Q3.",35:39)) %>% rowMeans %>% hist(las=1,xlim=c(1,7),xlab="Average score",main="Vaccinations",col="light gray") #vax dev.off() #write.csv(relig2,paste(wd,"QreligProcessed.csv",sep="/"),row.names=FALSE) #**-------------- now compute measurement models for all constructs so we can use single-indicator models later #exceptionalism humvars <- paste("Q3.",c(1:5),sep="") humspecmod <- c("humspec =~ ",paste(humvars,collapse=" + "),"Q3.3 ~~ Q3.5") humspecgof <- fitMM(humspecmod,relig2) #nationalism intvars <- paste("Q3.",c(7,8,9,10,11,12),sep="") internatmod <- c("nationalism =~ ", paste(intvars,collapse=" + "),"Q3.7 ~~ Q3.10") internatgof <- fitMM (internatmod,relig2) # separate IQ models for positive and reverse-scored items IQ.env <- paste("Q3.",c(19,14:16),sep="") IQmodel.env <-c("IQ.env =~ ",paste(IQ.env,collapse=" + "),NULL) fitMM (IQmodel.env,relig2) IQ.her <- paste("Q3.",c(13,17,18),sep="") IQmodel.her <-c("IQ.her =~ ",paste(IQ.her,collapse=" + "), NULL) fitMM (IQmodel.her,relig2) # two-correlated factors model IQmodel.twocf <- c("IQ.her =~ ", paste(IQ.her, collapse=" + "), "\n", "IQ.env =~ ", paste(IQ.env, collapse=" + ")) twocfacIQfit <- sem(IQmodel.twocf,relig2) twocfacIQcor <- lavInspect(twocfacIQfit, what = "cor.lv") twocfacIQp <- pnorm(abs(inspect(twocfacIQfit,what="est")$psi/inspect(twocfacIQfit,what="se")$psi),lower.tail = FALSE)*2 # hierarchical two-factor model IQmodel.two <- c("IQ.her =~ ", paste(IQ.her, collapse=" + "), "\n", "IQ.env =~ ", paste(IQ.env, collapse=" + "), "\n", "IQ.two =~ a*IQ.her + a*IQ.env") twofacIQfit <- sem(IQmodel.two,relig2) summary(twofacIQfit, standardized=TRUE, fit.measures=TRUE) twofacIQgof <- fitmeasures(twofacIQfit) semPaths(twofacIQfit, "std", title =FALSE, curvePivot = TRUE) # bi-factor IQ model for positive and reverse-scored items IQ.bi <- paste("Q3.", c(13:19), sep="") IQ.her <- paste("Q3.",c(13,17,18),sep="") IQmodel.bi <- c("IQ.bi =~ ", paste(IQ.bi, collapse=" + "), "\n", "IQ.her =~ ", paste(IQ.her, collapse= " + "), "\n", "IQ.bi ~~ 0*IQ.her") IQbigof <- fitMM (IQmodel.bi,relig2) # only consider items that are tentative in their propositions IQ.tentative <- paste("Q3.", c(14,15,17,19), sep="") IQmodel.tent <- c("IQ.tent =~ ", paste(IQ.tentative, collapse=" + ")) IQtentgof <- fitMM (IQmodel.tent,relig2) #religiosity Relvars <- paste("Q3.",c(20:24),sep="") religmodel <- c("religiosity =~ ",paste(Relvars,collapse=" + ")) religgof <- fitMM (religmodel,relig2) #FM FMvars <- paste("Q3.",c(25:29),sep="") FMmodel <- c("FM =~ ", paste(FMvars,collapse=" + "),"Q3.25 ~~ Q3.27") #as before for PLOS ONE fmgof <- fitMM (FMmodel,relig2) # hierarchical two-factor model for conservatism Consmodel.two <- c("C.FM =~ ", paste(FMvars, collapse=" + "),"Q3.25 ~~ Q3.27", "\n", "C.Rel =~ ", paste(Relvars, collapse=" + "), "\n", "C.Int =~ ", paste(intvars,collapse=" + "),"Q3.7 ~~ Q3.10","\n", "C.hum =~ ", paste(humvars,collapse=" + "),"Q3.3 ~~ Q3.5","\n", "C.two =~ NA*C.FM + C.Rel + C.Int + C.hum","\n", "C.two ~~ 1*C.two") C2fit <- sem(Consmodel.two,relig2) summary(C2fit, standardized=TRUE, fit.measures=TRUE) #climate Climvars <- paste("Q3.",c(30:34),sep="") climmodel <- c("climate =~ ", paste(Climvars,collapse=" + "), "Q3.30 ~~ Q3.34") #as before for PLOS ONE climategof <- fitMM (climmodel,relig2) #vaccination Vaxvars <- paste("Q3.",c(35:39),sep="") vaxmodel <- c("vax =~ ",paste(Vaxvars,collapse=" + "), "Q3.36 ~~ Q3.38") #as before for PLOS ONE vaxgof <- fitMM (vaxmodel,relig2) #**----------- compute single-indicators models (humSI <- singleindmodel(humvars,list(c("Q3.3","Q3.5")),relig2)) (intSI <- singleindmodel(intvars, list(c("Q3.7","Q3.10")),relig2)) (RelSI <- singleindmodel(Relvars,NULL,relig2)) (FMSI <- singleindmodel(FMvars,list(c("Q3.25","Q3.27")),relig2)) (ClimSI <- singleindmodel(Climvars,list(c("Q3.30","Q3.34")),relig2)) (VaxSI <- singleindmodel(Vaxvars,list(c("Q3.36","Q3.38")),relig2)) (IQSI <- singleindmodel(IQ.tentative,NULL,relig2)) #put error estimates for single indicators into named array for use in function eSImods <- c(humSI$eSImod, intSI$eSImod, RelSI$eSImod, FMSI$eSImod, ClimSI$eSImod, VaxSI$eSImod) names(eSImods) <- c ("hum","int","Rel","FM","Clim","Vax") #compute composite scores for the SI models compositeRelig <- data.frame ( hum = apply(relig2[,humvars], 1,mean), int = apply(relig2[,intvars], 1,mean), Rel = apply(relig2[,Relvars], 1,mean), FM = apply(relig2[,FMvars], 1,mean), Clim = apply(relig2[,Climvars], 1,mean), Vax = apply(relig2[,Vaxvars], 1,mean), IQ = apply(relig2[,IQ.tentative], 1,mean), select(relig2,num_range("Q3.",13:29))) #**------ correlation structure among 6 unidimensional latent constructs modelCorrel <- c(" humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitCorrel <- sem(modelCorrel, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitCorrel,standardized=TRUE, fit.measures=TRUE) #**------ correlation structure among all 7 unidimensional latent constructs (including tentative IQ items) modelxCorrel <- c(" humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQFac =~ IQ hum ~~ ", humSI$eSImod, "*hum", "IQ ~~", IQSI$eSImod, "*IQ", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitCorrelx <- sem(modelxCorrel, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitCorrelx,standardized=TRUE, fit.measures=TRUE) #parameterEstimates(fitCorrelx, standardized=TRUE) #get correlation matrix for latent variables lvcormat <- lavInspect(fitCorrel, what = "cor.lv") colnames(lvcormat) <- rownames(lvcormat) <- (c("Exceptionalism", "Nationalism", "Religiosity", "Free market", "Climate", "Vaccinations")) cormat <- stargazer(lvcormat, title="Correlations among 6 unidimensional latent variables") write.table(cormat[10:18],file="_t.lvcor.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #now compute p-values pvals2tailed <- pnorm(abs(inspect(fitCorrel,what="est")$psi/inspect(fitCorrel,what="se")$psi),lower.tail = FALSE)*2 colnames(pvals2tailed) <- rownames(pvals2tailed) <- (c("Exceptionalism", "Nationalism", "Religiosity", "Free market", "Climate", "Vaccinations")) pvals2tailed[upper.tri(pvals2tailed)] <- 0 maxloc <- which(pvals2tailed == max(pvals2tailed), arr.ind = TRUE) maxpval <- max(pvals2tailed) rownames(pvals2tailed)[maxloc[1]] colnames(pvals2tailed)[maxloc[2]] #**----------- correlations including the two-factor hierarchical model for IQ modelCorrel3 <- c(" humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitCorrel3 <- sem(modelCorrel3, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitCorrel3,standardized=TRUE, fit.measures=TRUE) #get correlation matrix for latent variables lvcormat3 <- lavInspect(fitCorrel3, what = "cor.lv") colnames(lvcormat3) <- rownames(lvcormat3) <- (c("Exceptionalism", "Nationalism", "Religiosity", "Free market", "Climate", "Vaccinations", "junk1", "junk2", "IQ Heritable")) cormat3 <- stargazer(lvcormat3[,1:6], title="") #row 18 contains corrs with main factor in IQ model. Chop junk columns of subordinate factors write.table(cormat3[20],file="_t.IQcor.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #now compute p-values pvals2tailed3 <- pnorm(abs(inspect(fitCorrel3,what="est")$psi/inspect(fitCorrel3,what="se")$psi),lower.tail = FALSE)*2 twofacsigcorrs <- (colnames(lvcormat3)[1:6]) [pvals2tailed3[9,1:6]<.05] #**-------------- now turn to specific prediction models #**---------- IQ hierarchical two-factor: start with full model ... fullmodel2fIQ <- c(" IQ.two ~ FMFac + intFac + RelFac + humFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM" ) fitfull2fIQ <- sem(fullmodel2fIQ, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitfull2fIQ,standardized=TRUE, fit.measures=TRUE) modificationindices(fitfull2fIQ,sort.=TRUE,maximum.number=20) full2fIQgof <- fitMeasures(fitfull2fIQ) #.... smaller model for comparison ... smmodel2fIQ <- c(" IQ.two ~ intFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM" ) fitsm2fIQ <- sem(smmodel2fIQ, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitsm2fIQ,standardized=TRUE, fit.measures=TRUE) fitsm2fIQgof <- fitMeasures(fitsm2fIQ) # .... which turns out to fit equally well ... anova(fitfull2fIQ,fitsm2fIQ) # .... so we can extract a small model without the other constructs tinymodel2fIQ <- c(" IQ.two ~ intFac intFac =~ int IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env int ~~", intSI$eSImod, "*int" ) fittiny2fIQ <- sem(tinymodel2fIQ, compositeRelig, std.lv=TRUE, estimator="ML") summary(fittiny2fIQ,standardized=TRUE, fit.measures=TRUE) #**---------- climate: start with full model... bigmodelclim <- c(" ClimFac ~ FMFac + intFac + RelFac + humFac ClimFac =~ Clim intFac =~ int RelFac =~ Rel humFac =~ hum FMFac =~ FM Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitbigmodclim <- sem(bigmodelclim, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitbigmodclim,standardized=TRUE, fit.measures=TRUE) fullclimgof <- fitMeasures(fitbigmodclim) #.... eliminate predictors .... modelclim <- c(" ClimFac ~ FMFac + RelFac + humFac ClimFac =~ Clim intFac =~ int FMFac =~ FM RelFac =~ Rel humFac =~ hum Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitmodclim <- sem(modelclim, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitmodclim,standardized=TRUE, fit.measures=TRUE) smclimgof <- fitMeasures(fitmodclim) #parameterEstimates(fitmodclim, standardized=TRUE) modindices(fitmodclim,sort. = TRUE, maximum.number = 4) # ... which turns out not to make a difference ... anova(fitbigmodclim,fitmodclim) # and we have a final small model smmodelclim <- c(" ClimFac ~ FMFac + intFac ClimFac =~ Clim intFac =~ int FMFac =~ FM int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitsmmodclim <- sem(smmodelclim, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitsmmodclim,standardized=TRUE, fit.measures=TRUE) # mediation? mediateclimmodel <- c(" humFac ~ alpha1 * FMFac intFac ~ alpha2 * FMFac ClimFac ~ direct * FMFac + beta1 *humFac + beta2 *intFac indirect1 := alpha1 * beta1 indirect2 := alpha2 * beta2 total:= indirect1 + indirect2 + direct proportion := (indirect1 + indirect2)/total ClimFac =~ Clim humFac =~ hum intFac =~ int FMFac =~ FM hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim" ) fitmediateclimmodel <- sem(mediateclimmodel, compositeRelig, std.lv=TRUE, estimator="ML") semPaths(fitmediateclimmodel, what="std", title =FALSE, curvePivot = TRUE,residuals=FALSE, structural=TRUE, layout="tree2",rotation=2) summary(fitmediateclimmodel,standardized=TRUE, fit.measures=TRUE) fullclimgof <- fitMeasures(fitmediateclimmodel) # doube mediation for both climate and vax? mediate2climmodel <- c(" ClimFac ~ d1 * FMFac + d2 * RelFac + b2 *humFac + b1 *intFac VaxFac ~ d4 * FMFac + d3 * RelFac + b4 *humFac + b3 *intFac humFac ~ a4 * FMFac + a2 * RelFac intFac ~ a3 * FMFac + a1 * RelFac indirectInt := a1 * b1 + a3 * b1 + a1 * b3 + a3 * b3 indirecthum := a2 * b2 + a4 * b2 + a2 * b4 + a2 * b4 total:= indirectInt + indirecthum + d1 + d2 proportion := (indirectInt + indirecthum)/total ClimFac =~ Clim humFac =~ hum intFac =~ int FMFac =~ FM RelFac =~ Rel VaxFac =~ Vax hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "FM ~~ ", FMSI$eSImod, "*FM", "Rel ~~ ", RelSI$eSImod, "*Rel", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitmediate2climmodel <- sem(mediate2climmodel, compositeRelig, std.lv=TRUE, estimator="ML") semPaths(fitmediate2climmodel, what="std", title =FALSE, curvePivot = TRUE,residuals=FALSE, structural=TRUE, layout="tree2",rotation=2) summary(fitmediate2climmodel,standardized=TRUE, fit.measures=TRUE) fullclimgof <- fitMeasures(fitmediate2climmodel) #**------------ vax:start with full model fullmodelvax <- c(" VaxFac ~ FMFac + intFac + RelFac + humFac VaxFac =~ Vax intFac =~ int RelFac =~ Rel FMFac =~ FM humFac =~ hum int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "FM ~~ ", FMSI$eSImod, "*FM", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitfullmodvax <- sem(fullmodelvax, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitfullmodvax,standardized=TRUE, fit.measures=TRUE) fullvaxgof <- fitMeasures(fitfullmodvax) #parameterEstimates(fitfullmodvax, standardized=TRUE) modindices(fitfullmodvax, sort.=TRUE, maximum.number=4) #.... now a smaller one .... smmodelvax <- c(" VaxFac ~ FMFac + intFac + RelFac VaxFac =~ Vax intFac =~ int RelFac =~ Rel FMFac =~ FM humFac =~ hum int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "hum ~~ ", humSI$eSImod, "*hum", "FM ~~ ", FMSI$eSImod, "*FM", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitsmmodvax <- sem(smmodelvax, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitsmmodvax,standardized=TRUE, fit.measures=TRUE) smvaxgof <- fitMeasures(fitsmmodvax) anova(fitfullmodvax,fitsmmodvax) #**--------------- Now fit all scientific constructs together (two-factor IQ) allfull2fmod <- c(" ClimFac ~ FMFac + intFac + RelFac + humFac VaxFac ~ FMFac + intFac + RelFac + humFac IQ.two ~ FMFac + intFac + RelFac + humFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fitall2ffull <- sem(allfull2fmod, compositeRelig, std.lv=TRUE, estimator="ML") summary(fitall2ffull,standardized=TRUE, fit.measures=TRUE) #parameterEstimates(fitall2ffull, standardized=TRUE) modindices(fitall2ffull, sort.=TRUE, maximum.number=4) #.... and now a constrained model .... modelall2f <- c(" ClimFac ~ FMFac + intFac VaxFac ~ FMFac + intFac + RelFac + humFac IQ.two ~ intFac humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) modelall2ffit <- sem(modelall2f, compositeRelig, std.lv=TRUE, estimator="ML") summary(modelall2ffit,standardized=TRUE, fit.measures=TRUE) anova(fitall2ffull,modelall2ffit) finalmodelgof <- fitMeasures(modelall2ffit) semPaths(modelall2ffit, what="std", title =FALSE, curvePivot = TRUE,residuals=FALSE, structural=TRUE, layout="tree2",rotation=2) #** ---- now apportion variance ------------------------------------- # call to getr2(criterion,predictors,vars), # e.g.: getr2("Clim", c("int", "FM"), c("Clim", "int", "Rel", "hum", "FM")) # this decomposes the r^2 embedded in model fitall2ffull for the 3 latent criteria # Note: this is climcomps <- getr2comps("Clim") inspect(fitall2ffull,'r2')["ClimFac"] str(climcomps) vaxcomps <- getr2comps("Vax") inspect(fitall2ffull,'r2')["VaxFac"] str(vaxcomps) sum(unlist(vaxcomps)) IQcomps <- getr2comps("IQ") inspect(fitall2ffull,'r2')["IQ.two"] str(IQcomps) #rearrange data into a more suitable format compnts <- cbind(unlist(climcomps),unlist(vaxcomps),unlist(IQcomps))[substr(names(climcomps),1,1)=="u",] sharvar <- cbind(unlist(climcomps),unlist(vaxcomps),unlist(IQcomps))[substr(names(climcomps),1,1)!="u",] compnts <- rbind(compnts,colSums(sharvar),cbind(sum(unlist(climcomps)),sum(unlist(vaxcomps)),sum(unlist(IQcomps)))) row.names(compnts) <- c("Nationalism","Religiosity","Exceptionalism","Free market","All shared","Total") compmat <- stargazer(compnts, title="Decomposition of variance explained",digits=2,digits.extra=0) #compmat <- str_replace(compmat,"\\$-\\$","") write.table(compmat[c(12,10,11,13,14,15)],file="_t.compnts.tex",quote=FALSE,col.names=FALSE,row.names=FALSE) #--- all constructs, predict all criteria from single 2nd-order conservatism factor --------------- all2ndorderfac <- c(" ClimFac ~ C.two VaxFac ~ C.two IQ.two ~ C.two C.two =~ NA*FMFac + RelFac + intFac + humFac C.two ~~ 1*C.two humFac =~ hum intFac =~ int RelFac =~ Rel FMFac =~ FM ClimFac =~ Clim VaxFac =~ Vax IQ.her =~ Q3.13 + Q3.17 + Q3.18 IQ.env =~ Q3.19 + Q3.14 + Q3.15 + Q3.16 IQ.two =~ a*IQ.her + a*IQ.env humFac ~~ RelFac hum ~~ ", humSI$eSImod, "*hum", "int ~~", intSI$eSImod, "*int", "Rel ~~ ", RelSI$eSImod, "*Rel", "FM ~~ ", FMSI$eSImod, "*FM", "Clim ~~ ",ClimSI$eSImod,"*Clim", "Vax ~~ ", VaxSI$eSImod, "*Vax" ) fit2ndorderfac <- sem(all2ndorderfac, compositeRelig, std.lv=TRUE, estimator="ML") summary(fit2ndorderfac,standardized=TRUE, fit.measures=TRUE) modindices(fit2ndorderfac, sort.=TRUE, maximum.number=4)
model_detectability_abundance <- function(crab_tbl, collection_event, habitat_pca_components, dist_shore){ counts <- crab_tbl col_event <- collection_event hab <- habitat_pca_components loc_dist <- dist_shore counts %<>% dplyr::inner_join(col_event, by = "col_id") %>% dplyr::filter(!is.na(date), !is.na(area), !is.na(locality), !is.na(distance)) %>% dplyr::group_by(date, area, locality, distance) %>% dplyr::summarise(n = n()) y <- expand.grid(date = unique(counts$date), area = unique(counts$area), locality = unique(counts$locality), distance = 0:4) %>% dplyr::left_join(counts) %>% dplyr::mutate(n = replace(n, is.na(n), 0), distance = distance , distance = paste("d", distance, sep = "")) unmarkedFrames <- plyr::dlply(y, "date", function(x){ d <- x %>% tidyr::spread(distance, n) %>% dplyr::inner_join(hab, by = c("area", "locality")) %>% dplyr::inner_join(col_event, by = c("date", "area")) %>% dplyr::inner_join(loc_dist, by = c("area", "locality")) %>% dplyr::mutate(locality = as.numeric(locality), dist_shore = dist_shore, length = length) %>% dplyr::filter(!(area == "CP" & locality <= 12)) unmarked::unmarkedFrameDS( y = as.matrix(dplyr::select(d, 4:8)), siteCovs = dplyr::select(d, area, dist_shore, habitat), dist.breaks = 0:5, tlength = dplyr::select(d, length)[[1]], survey = "line", unitsIn = "m" ) }, .progress = "text") abu0 <- plyr::llply(unmarkedFrames, function(x) try(unmarked::distsamp(~ habitat ~ habitat, x)), .progress = "text") abu1 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ habitat ~ 1, x), .progress = "text") abu2 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ habitat, x), .progress = "text") abu3 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x), .progress = "text") # abu4 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ PC1 + PC2 ~ 1, x), # .progress = "text") # abu5 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ PC1 ~ 1, x), # .progress = "text") # abu6 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ PC2 ~ 1, x), # .progress = "text") # abu7 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ dist_shore ~ 1, x), # .progress = "text") abu8 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x), .progress = "text") abu9 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x, keyfun = "exp"), .progress = "text") abu10 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x, keyfun = "hazard"), .progress = "text") abu11 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x, keyfun = "uniform"), .progress = "text") list(abu0 = abu0, abu1 = abu1, abu2 = abu2, abu3 = abu3, # abu4 = abu4, # abu5 = abu5, # abu6 = abu6, # abu7 = abu7, abu8 = abu8, abu9 = abu9, abu10 = abu10, abu11 = abu11 ) } model_detectability_abundance_global <- function(crab_tbl, collection_event, habitat_pca_components, dist_shore){ counts <- crab_tbl col_event <- collection_event hab <- habitat_pca_components loc_dist <- dist_shore counts %<>% dplyr::inner_join(col_event, by = "col_id") %>% dplyr::filter(!is.na(date), !is.na(area), !is.na(locality), !is.na(distance)) %>% dplyr::group_by(date, area, locality, distance) %>% dplyr::summarise(n = n()) y <- expand.grid(date = unique(counts$date), area = unique(counts$area), locality = unique(counts$locality), distance = 0:4) %>% dplyr::left_join(counts) %>% dplyr::mutate(n = replace(n, is.na(n), 0), distance = distance , distance = paste("d", distance, sep = "")) d <- y %>% tidyr::spread(distance, n) %>% dplyr::inner_join(hab, by = c("area", "locality")) %>% dplyr::inner_join(col_event, by = c("date", "area")) %>% dplyr::inner_join(loc_dist, by = c("area", "locality")) %>% dplyr::mutate(locality = as.numeric(locality), dist_shore = dist_shore, length = length) %>% dplyr::filter(!(area == "CP" & locality <= 12)) umkf <- unmarked::unmarkedFrameDS( y = as.matrix(dplyr::select(d, 4:8)), siteCovs = dplyr::select(d, area, dist_shore, habitat), dist.breaks = 0:5, tlength = dplyr::select(d, length)[[1]], survey = "line", unitsIn = "m" ) abu0 <- unmarked::distsamp(~ habitat ~ habitat, umkf) abu1 <- unmarked::distsamp(~ 1 ~ habitat, umkf) abu2 <- unmarked::distsamp(~ habitat ~ 1, umkf) abu3 <- unmarked::distsamp(~ 1 ~ 1, umkf) list(abu0 = abu0, abu1 = abu1, abu2 = abu2, abu3 = abu3) } determine_best_detectability_abundance_model <- function(detectability_abundance_model){ require(foreach) attach(detectability_abundance_model) extract_aics <- . %>% purrr::map_dfr(~dplyr::tibble(AIC = .@AIC), .id = "date") aic_table <- detectability_abundance_model %>% purrr::map_dfr(extract_aics, .id = "model") summary_delta_aic <- . %>% dplyr::group_by(date) %>% dplyr::mutate(delta_AIC = AIC - min(AIC)) %>% dplyr::select(-AIC) %>% dplyr::group_by(model) %>% dplyr::summarise_if(is.numeric, .funs = list(mean = mean, median = median, min = min, max = max)) aic_table %>% dplyr::filter(model %in% c("abu0", "abu1", "abu2", "abu3")) %>% summary_delta_aic() } extract_coeficients <- function(detectability_abundance_model, habitat_simple){ # baseline_habitat <- unique(habitat_simple$habitat)[!unique(habitat_simple$habitat) %in% habitat_names] library(unmarked) # loadd(detectability_abundance_model) extract_detectability_coef <- . %>% coef(type = "det") %>% as.data.frame() %>% tibble::rownames_to_column() %>% dplyr::rename(habitat = rowname, sigma = ".") %>% dplyr::mutate(habitat = stringr::str_extract(habitat, "[A-Z]+"), habitat = dplyr::if_else(habitat == "I", unique(habitat_simple$habitat)[1], habitat), sigma = dplyr::if_else(sigma == dplyr::first(sigma), sigma, sigma + dplyr::first(sigma)), sigma = exp(sigma)) %>% dplyr::rowwise() %>% dplyr::mutate(eshw = integrate(unmarked::gxhn, 0, 5, sigma)$value, det_probability = eshw / 5) %>% dplyr::select(-sigma) extract_density_coef <- . %>% coef(type = "state") %>% as.data.frame() %>% tibble::rownames_to_column() %>% dplyr::rename(habitat = rowname, sigma = ".") %>% dplyr::mutate(habitat = stringr::str_extract(habitat, "[A-Z]+"), habitat = dplyr::if_else(habitat == "I", unique(habitat_simple$habitat)[1], habitat), sigma = dplyr::if_else(sigma == dplyr::first(sigma), sigma, sigma + dplyr::first(sigma))) detectability_abundance_model$abu1 %>% purrr::map_dfr(extract_detectability_coef, .id = "date") %>% dplyr::group_by(habitat) %>% dplyr::summarise_if(is.numeric, .funs = list(mean =mean)) # , se = ~ sd(.)/sqrt(dplyr::n())) detectability_abundance_model$abu2 %>% purrr::map_dfr(extract_density_coef, .id = "date") %>% dplyr::group_by(habitat) %>% dplyr::summarise_if(is.numeric, .funs = list(mean =mean)) %>% dplyr::mutate(mean = exp(mean)) } calculate_abundance_per_day <- function(detectability_abundance_model){ ab <- detectability_abundance_model$abu3 %>% lapply(function(x) { xx <- x@estimates@estimates$state ests <- xx@estimates SEs <- unmarked::SE(xx) Z <- ests/SEs p <- 2 * pnorm(abs(Z), lower.tail = FALSE) # print(c(ests, SEs, Z,p)) if(!is.na(p)){ if(p < 0.05){ # print(backTransform(x, type = "state")@estimate) return(unmarked::backTransform(x, type = "state")@estimate) } } else return(NA) }) %>% unlist %>% as.data.frame.vector() %>% dplyr::add_rownames() names(ab) <- c("date", "density") return(ab) } calculate_detectability_per_day <- function(detectability_abundance_model, habitat_simple) { require(unmarked) extract_detectability_coef <- . %>% coef(type = "det") %>% as.data.frame() %>% tibble::rownames_to_column() %>% dplyr::rename(habitat = rowname, sigma = ".") %>% dplyr::mutate(habitat = stringr::str_extract(habitat, "[A-Z]+"), habitat = dplyr::if_else(habitat == "I", unique(habitat_simple$habitat)[1], habitat), sigma = dplyr::if_else(sigma == dplyr::first(sigma), sigma, sigma + dplyr::first(sigma)), sigma = exp(sigma)) %>% dplyr::rowwise() %>% dplyr::mutate(eshw = integrate(unmarked::gxhn, 0, 5, sigma)$value, det_probability = eshw / 5) %>% dplyr::select(-sigma) detectability_abundance_model$abu3 %>% purrr::map_dfr(extract_detectability_coef, .id = "date") %>% dplyr::ungroup() %>% dplyr::summarise_if(is.numeric, .funs = list(mean = mean, max = max, min = min)) } check_env_effect_detectability <- function(detectability_abundance_model){ library(ggplot2) attach(detectability_abundance_model) # no significant effect of environment on the detection estimates: extract_distance_coeff <- function(y, which = "det"){ plyr::ldply(y, function(x){ if (class(x) == "unmarkedFitDS"){ e <- x@estimates@estimates[[which]]@estimates %>% as.data.frame() %>% dplyr::add_rownames() names(e) <- c("coeficient", "estimate") e } }) } det_pvalue_hist <- abu0 %>% extract_distance_coeff() %>% dplyr::filter(coeficient != "sigma(Intercept)") %>% ggplot(aes(x = plogis(estimate))) + geom_density() + facet_wrap(~coeficient) + xlab("p-value") + theme_bw() # detection estimates # ggsave("./paper/supp_figures/detection-coovariates.pdf", width = 4.7,height = 1.65, scale = 1.5) det_pvalue_yday <- abu0 %>% extract_distance_coeff() %>% dplyr::mutate(date = as.Date(date), yday = lubridate::yday(date)) %>% ggplot(aes(x = yday, y = plogis(estimate))) + geom_point() + facet_wrap(~coeficient) + geom_smooth() det_pvalue_moonph <- abu0 %>% extract_distance_coeff() %>% dplyr::mutate(date = as.Date(date), date_utc = as.POSIXct(as.POSIXlt(date, tz = "UTC")), moon_ph = oce::moonAngle(date_utc, lon = 46.2, lat = -9.4)$phase, moon_ph = moon_ph - floor(moon_ph)) %>% ggplot(aes(x = moon_ph, y = plogis(estimate))) + geom_point() + facet_wrap(~coeficient) + geom_smooth() # no significant effect of environment on the abundance estimates: abu_pvalue_hist <- abu0 %>% extract_distance_coeff("state") %>% dplyr::filter(coeficient != "(Intercept)") %>% ggplot(aes(x = plogis(estimate))) + geom_density() + facet_wrap(~coeficient) + xlab("p-value") + theme_bw() # detection estimates # ggsave("./paper/supp_figures/density-coovariates.pdf", width = 4.7,height = 1.65, scale = 1.5) list(det_pvalue_hist = det_pvalue_hist, det_pvalue_yday = det_pvalue_yday, det_pvalue_moonph = det_pvalue_moonph, abu_pvalue_hist = abu_pvalue_hist) } plot_abundance_from_density_model <- function(abundance_per_day){ require(ggplot2) p1 <- abundance_per_day %>% dplyr::mutate(date = as.Date(date)) %>% ggplot(aes(x = date, y = density)) + geom_smooth(method = "glm", method.args = list(family = "poisson")) + geom_point() + ylim(c(0, 125)) p2 <- abundance_per_day %>% dplyr::mutate(date = as.Date(date), yday = lubridate::yday(date)) %>% ggplot(aes(x = yday, y = density)) + geom_point() + geom_smooth() list(p1, p2) } model_density <- function(abundance_per_day, collection_event){ dens <- abundance_per_day col_event <- collection_event col_event_date <- col_event %>% # fi(!is.na(rain)) %>% dplyr::group_by(date) %>% dplyr::mutate(moon_ph = mean(moon_ph), rain = rain[1]) m_d_d <- dens %>% dplyr::mutate(date = as.Date(date), yday = lubridate::yday(date), year = lubridate::year(date)) %>% dplyr::inner_join(col_event_date) m_d <- mgcv::gam(density ~ s(yday, bs = "cc") + s(as.numeric(date), k = 4), data = m_d_d) # summary(m_d) n <- 362 m_d_p <- plot(m_d, n = n, pages = 1) list(mod = m_d, pred = m_d_p) } plot_density <- function(density_models){ require(ggplot2) m_d <- density_models ylim <- c(0,44) intercept_d <- c(m_d[[1]]$coefficients[1]) data_points <- m_d[[1]]$model %>% dplyr::rename(density = 1, yday = 2, date = 3) %>% dplyr::mutate(date = as.Date(date, origin = as.Date("1970-01-01")), yday = as.Date("2016-01-01") + yday) pd1 <- extract_fit(m_d[[2]][[1]]) %>% dplyr::mutate(fit = fit + intercept_d, fitmin = fitmin + intercept_d, fitmax = fitmax + intercept_d, x = as.Date("2016-01-01") + x) %>% ggplot(aes(x = x, y = fit)) + geom_point(data = data_points, mapping = aes(x = yday, y = density), size = 0.25, colour = "grey70") + geom_hline(yintercept = intercept_d, linetype = 2, colour = "grey25", size = 0.25) + geom_ribbon(aes(ymin = fitmin, ymax = fitmax), alpha = 0.25, fill = "grey50") + geom_line() + scale_x_date(date_labels = "%b", expand = c(0,0), name = "month", date_breaks = "2 month") + scale_y_continuous(limits = ylim, name = "crabs / hectare") + pub_theme() + labs(tag = "B.") pd2 <- extract_fit(m_d[[2]][[2]]) %>% dplyr::mutate(fit = fit + intercept_d, fitmin = fitmin + intercept_d, fitmax = fitmax + intercept_d, x = as.Date("1970-01-01") + x) %>% ggplot(aes(x = x, y = fit)) + geom_point(data = data_points, mapping = aes(x = date, y = density), size = 0.25, colour = "grey70") + geom_hline(yintercept = intercept_d, linetype = 2, colour = "grey25", size = 0.25) + # geom_point(data = m_d[[1]]$model, aes(y = density, x = as.Date("2016-01-01") + yday), size = 1, shape = 21, alpha = 0.5) + geom_ribbon(aes(ymin = fitmin, ymax = fitmax), alpha = 0.25, fill = "grey50") + geom_line() + scale_x_date(expand = c(0,0), name = "date") + scale_y_continuous(limits = ylim, name = "crabs / hectare") + pub_theme() + labs(tag = "A.") cowplot::plot_grid(pd2, pd1, ncol = 1, align = "hv") } get_density_selection_table <- function(detectability_abundance_model){ purrr::pmap( detectability_abundance_model, function(...){ models <- list(...) unmarked::modSel(unmarked::fitList(fits = models[1:4]), nullmod = "abu3")@Full} %>% tibble::rownames_to_column(var = "m")) %>% purrr::imap_dfr(~ dplyr::mutate(.x, date = .y)) } plot_density_models_aic <- function(model_comparison){ require(ggplot2) model_comparison %>% dplyr::mutate(model = forcats::fct_reorder(model, delta)) %>% ggplot(aes(x = delta)) + geom_histogram(binwidth = 1) + facet_grid(rows = vars(formula)) + pub_theme() + labs(x = "∆ AIC", title = "Delta AIC") } plot_density_models_rsquared <- function(model_comparison){ require(ggplot2) model_comparison %>% dplyr::filter(model != "abu3") %>% dplyr::mutate(model = forcats::fct_reorder(model, Rsq)) %>% ggplot(aes(x = Rsq)) + geom_histogram(binwidth = 0.01) + facet_grid(rows = vars(formula)) + pub_theme() + labs(x = "R-squared", title = "Nagelkerke's (1991) R-squared index") }
/code/density.R
no_license
efcaguab/aldabra-coconut-crabs
R
false
false
17,127
r
model_detectability_abundance <- function(crab_tbl, collection_event, habitat_pca_components, dist_shore){ counts <- crab_tbl col_event <- collection_event hab <- habitat_pca_components loc_dist <- dist_shore counts %<>% dplyr::inner_join(col_event, by = "col_id") %>% dplyr::filter(!is.na(date), !is.na(area), !is.na(locality), !is.na(distance)) %>% dplyr::group_by(date, area, locality, distance) %>% dplyr::summarise(n = n()) y <- expand.grid(date = unique(counts$date), area = unique(counts$area), locality = unique(counts$locality), distance = 0:4) %>% dplyr::left_join(counts) %>% dplyr::mutate(n = replace(n, is.na(n), 0), distance = distance , distance = paste("d", distance, sep = "")) unmarkedFrames <- plyr::dlply(y, "date", function(x){ d <- x %>% tidyr::spread(distance, n) %>% dplyr::inner_join(hab, by = c("area", "locality")) %>% dplyr::inner_join(col_event, by = c("date", "area")) %>% dplyr::inner_join(loc_dist, by = c("area", "locality")) %>% dplyr::mutate(locality = as.numeric(locality), dist_shore = dist_shore, length = length) %>% dplyr::filter(!(area == "CP" & locality <= 12)) unmarked::unmarkedFrameDS( y = as.matrix(dplyr::select(d, 4:8)), siteCovs = dplyr::select(d, area, dist_shore, habitat), dist.breaks = 0:5, tlength = dplyr::select(d, length)[[1]], survey = "line", unitsIn = "m" ) }, .progress = "text") abu0 <- plyr::llply(unmarkedFrames, function(x) try(unmarked::distsamp(~ habitat ~ habitat, x)), .progress = "text") abu1 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ habitat ~ 1, x), .progress = "text") abu2 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ habitat, x), .progress = "text") abu3 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x), .progress = "text") # abu4 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ PC1 + PC2 ~ 1, x), # .progress = "text") # abu5 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ PC1 ~ 1, x), # .progress = "text") # abu6 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ PC2 ~ 1, x), # .progress = "text") # abu7 <- plyr::llply(unmarkedFrames, # function(x) unmarked::distsamp(~ dist_shore ~ 1, x), # .progress = "text") abu8 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x), .progress = "text") abu9 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x, keyfun = "exp"), .progress = "text") abu10 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x, keyfun = "hazard"), .progress = "text") abu11 <- plyr::llply(unmarkedFrames, function(x) unmarked::distsamp(~ 1 ~ 1, x, keyfun = "uniform"), .progress = "text") list(abu0 = abu0, abu1 = abu1, abu2 = abu2, abu3 = abu3, # abu4 = abu4, # abu5 = abu5, # abu6 = abu6, # abu7 = abu7, abu8 = abu8, abu9 = abu9, abu10 = abu10, abu11 = abu11 ) } model_detectability_abundance_global <- function(crab_tbl, collection_event, habitat_pca_components, dist_shore){ counts <- crab_tbl col_event <- collection_event hab <- habitat_pca_components loc_dist <- dist_shore counts %<>% dplyr::inner_join(col_event, by = "col_id") %>% dplyr::filter(!is.na(date), !is.na(area), !is.na(locality), !is.na(distance)) %>% dplyr::group_by(date, area, locality, distance) %>% dplyr::summarise(n = n()) y <- expand.grid(date = unique(counts$date), area = unique(counts$area), locality = unique(counts$locality), distance = 0:4) %>% dplyr::left_join(counts) %>% dplyr::mutate(n = replace(n, is.na(n), 0), distance = distance , distance = paste("d", distance, sep = "")) d <- y %>% tidyr::spread(distance, n) %>% dplyr::inner_join(hab, by = c("area", "locality")) %>% dplyr::inner_join(col_event, by = c("date", "area")) %>% dplyr::inner_join(loc_dist, by = c("area", "locality")) %>% dplyr::mutate(locality = as.numeric(locality), dist_shore = dist_shore, length = length) %>% dplyr::filter(!(area == "CP" & locality <= 12)) umkf <- unmarked::unmarkedFrameDS( y = as.matrix(dplyr::select(d, 4:8)), siteCovs = dplyr::select(d, area, dist_shore, habitat), dist.breaks = 0:5, tlength = dplyr::select(d, length)[[1]], survey = "line", unitsIn = "m" ) abu0 <- unmarked::distsamp(~ habitat ~ habitat, umkf) abu1 <- unmarked::distsamp(~ 1 ~ habitat, umkf) abu2 <- unmarked::distsamp(~ habitat ~ 1, umkf) abu3 <- unmarked::distsamp(~ 1 ~ 1, umkf) list(abu0 = abu0, abu1 = abu1, abu2 = abu2, abu3 = abu3) } determine_best_detectability_abundance_model <- function(detectability_abundance_model){ require(foreach) attach(detectability_abundance_model) extract_aics <- . %>% purrr::map_dfr(~dplyr::tibble(AIC = .@AIC), .id = "date") aic_table <- detectability_abundance_model %>% purrr::map_dfr(extract_aics, .id = "model") summary_delta_aic <- . %>% dplyr::group_by(date) %>% dplyr::mutate(delta_AIC = AIC - min(AIC)) %>% dplyr::select(-AIC) %>% dplyr::group_by(model) %>% dplyr::summarise_if(is.numeric, .funs = list(mean = mean, median = median, min = min, max = max)) aic_table %>% dplyr::filter(model %in% c("abu0", "abu1", "abu2", "abu3")) %>% summary_delta_aic() } extract_coeficients <- function(detectability_abundance_model, habitat_simple){ # baseline_habitat <- unique(habitat_simple$habitat)[!unique(habitat_simple$habitat) %in% habitat_names] library(unmarked) # loadd(detectability_abundance_model) extract_detectability_coef <- . %>% coef(type = "det") %>% as.data.frame() %>% tibble::rownames_to_column() %>% dplyr::rename(habitat = rowname, sigma = ".") %>% dplyr::mutate(habitat = stringr::str_extract(habitat, "[A-Z]+"), habitat = dplyr::if_else(habitat == "I", unique(habitat_simple$habitat)[1], habitat), sigma = dplyr::if_else(sigma == dplyr::first(sigma), sigma, sigma + dplyr::first(sigma)), sigma = exp(sigma)) %>% dplyr::rowwise() %>% dplyr::mutate(eshw = integrate(unmarked::gxhn, 0, 5, sigma)$value, det_probability = eshw / 5) %>% dplyr::select(-sigma) extract_density_coef <- . %>% coef(type = "state") %>% as.data.frame() %>% tibble::rownames_to_column() %>% dplyr::rename(habitat = rowname, sigma = ".") %>% dplyr::mutate(habitat = stringr::str_extract(habitat, "[A-Z]+"), habitat = dplyr::if_else(habitat == "I", unique(habitat_simple$habitat)[1], habitat), sigma = dplyr::if_else(sigma == dplyr::first(sigma), sigma, sigma + dplyr::first(sigma))) detectability_abundance_model$abu1 %>% purrr::map_dfr(extract_detectability_coef, .id = "date") %>% dplyr::group_by(habitat) %>% dplyr::summarise_if(is.numeric, .funs = list(mean =mean)) # , se = ~ sd(.)/sqrt(dplyr::n())) detectability_abundance_model$abu2 %>% purrr::map_dfr(extract_density_coef, .id = "date") %>% dplyr::group_by(habitat) %>% dplyr::summarise_if(is.numeric, .funs = list(mean =mean)) %>% dplyr::mutate(mean = exp(mean)) } calculate_abundance_per_day <- function(detectability_abundance_model){ ab <- detectability_abundance_model$abu3 %>% lapply(function(x) { xx <- x@estimates@estimates$state ests <- xx@estimates SEs <- unmarked::SE(xx) Z <- ests/SEs p <- 2 * pnorm(abs(Z), lower.tail = FALSE) # print(c(ests, SEs, Z,p)) if(!is.na(p)){ if(p < 0.05){ # print(backTransform(x, type = "state")@estimate) return(unmarked::backTransform(x, type = "state")@estimate) } } else return(NA) }) %>% unlist %>% as.data.frame.vector() %>% dplyr::add_rownames() names(ab) <- c("date", "density") return(ab) } calculate_detectability_per_day <- function(detectability_abundance_model, habitat_simple) { require(unmarked) extract_detectability_coef <- . %>% coef(type = "det") %>% as.data.frame() %>% tibble::rownames_to_column() %>% dplyr::rename(habitat = rowname, sigma = ".") %>% dplyr::mutate(habitat = stringr::str_extract(habitat, "[A-Z]+"), habitat = dplyr::if_else(habitat == "I", unique(habitat_simple$habitat)[1], habitat), sigma = dplyr::if_else(sigma == dplyr::first(sigma), sigma, sigma + dplyr::first(sigma)), sigma = exp(sigma)) %>% dplyr::rowwise() %>% dplyr::mutate(eshw = integrate(unmarked::gxhn, 0, 5, sigma)$value, det_probability = eshw / 5) %>% dplyr::select(-sigma) detectability_abundance_model$abu3 %>% purrr::map_dfr(extract_detectability_coef, .id = "date") %>% dplyr::ungroup() %>% dplyr::summarise_if(is.numeric, .funs = list(mean = mean, max = max, min = min)) } check_env_effect_detectability <- function(detectability_abundance_model){ library(ggplot2) attach(detectability_abundance_model) # no significant effect of environment on the detection estimates: extract_distance_coeff <- function(y, which = "det"){ plyr::ldply(y, function(x){ if (class(x) == "unmarkedFitDS"){ e <- x@estimates@estimates[[which]]@estimates %>% as.data.frame() %>% dplyr::add_rownames() names(e) <- c("coeficient", "estimate") e } }) } det_pvalue_hist <- abu0 %>% extract_distance_coeff() %>% dplyr::filter(coeficient != "sigma(Intercept)") %>% ggplot(aes(x = plogis(estimate))) + geom_density() + facet_wrap(~coeficient) + xlab("p-value") + theme_bw() # detection estimates # ggsave("./paper/supp_figures/detection-coovariates.pdf", width = 4.7,height = 1.65, scale = 1.5) det_pvalue_yday <- abu0 %>% extract_distance_coeff() %>% dplyr::mutate(date = as.Date(date), yday = lubridate::yday(date)) %>% ggplot(aes(x = yday, y = plogis(estimate))) + geom_point() + facet_wrap(~coeficient) + geom_smooth() det_pvalue_moonph <- abu0 %>% extract_distance_coeff() %>% dplyr::mutate(date = as.Date(date), date_utc = as.POSIXct(as.POSIXlt(date, tz = "UTC")), moon_ph = oce::moonAngle(date_utc, lon = 46.2, lat = -9.4)$phase, moon_ph = moon_ph - floor(moon_ph)) %>% ggplot(aes(x = moon_ph, y = plogis(estimate))) + geom_point() + facet_wrap(~coeficient) + geom_smooth() # no significant effect of environment on the abundance estimates: abu_pvalue_hist <- abu0 %>% extract_distance_coeff("state") %>% dplyr::filter(coeficient != "(Intercept)") %>% ggplot(aes(x = plogis(estimate))) + geom_density() + facet_wrap(~coeficient) + xlab("p-value") + theme_bw() # detection estimates # ggsave("./paper/supp_figures/density-coovariates.pdf", width = 4.7,height = 1.65, scale = 1.5) list(det_pvalue_hist = det_pvalue_hist, det_pvalue_yday = det_pvalue_yday, det_pvalue_moonph = det_pvalue_moonph, abu_pvalue_hist = abu_pvalue_hist) } plot_abundance_from_density_model <- function(abundance_per_day){ require(ggplot2) p1 <- abundance_per_day %>% dplyr::mutate(date = as.Date(date)) %>% ggplot(aes(x = date, y = density)) + geom_smooth(method = "glm", method.args = list(family = "poisson")) + geom_point() + ylim(c(0, 125)) p2 <- abundance_per_day %>% dplyr::mutate(date = as.Date(date), yday = lubridate::yday(date)) %>% ggplot(aes(x = yday, y = density)) + geom_point() + geom_smooth() list(p1, p2) } model_density <- function(abundance_per_day, collection_event){ dens <- abundance_per_day col_event <- collection_event col_event_date <- col_event %>% # fi(!is.na(rain)) %>% dplyr::group_by(date) %>% dplyr::mutate(moon_ph = mean(moon_ph), rain = rain[1]) m_d_d <- dens %>% dplyr::mutate(date = as.Date(date), yday = lubridate::yday(date), year = lubridate::year(date)) %>% dplyr::inner_join(col_event_date) m_d <- mgcv::gam(density ~ s(yday, bs = "cc") + s(as.numeric(date), k = 4), data = m_d_d) # summary(m_d) n <- 362 m_d_p <- plot(m_d, n = n, pages = 1) list(mod = m_d, pred = m_d_p) } plot_density <- function(density_models){ require(ggplot2) m_d <- density_models ylim <- c(0,44) intercept_d <- c(m_d[[1]]$coefficients[1]) data_points <- m_d[[1]]$model %>% dplyr::rename(density = 1, yday = 2, date = 3) %>% dplyr::mutate(date = as.Date(date, origin = as.Date("1970-01-01")), yday = as.Date("2016-01-01") + yday) pd1 <- extract_fit(m_d[[2]][[1]]) %>% dplyr::mutate(fit = fit + intercept_d, fitmin = fitmin + intercept_d, fitmax = fitmax + intercept_d, x = as.Date("2016-01-01") + x) %>% ggplot(aes(x = x, y = fit)) + geom_point(data = data_points, mapping = aes(x = yday, y = density), size = 0.25, colour = "grey70") + geom_hline(yintercept = intercept_d, linetype = 2, colour = "grey25", size = 0.25) + geom_ribbon(aes(ymin = fitmin, ymax = fitmax), alpha = 0.25, fill = "grey50") + geom_line() + scale_x_date(date_labels = "%b", expand = c(0,0), name = "month", date_breaks = "2 month") + scale_y_continuous(limits = ylim, name = "crabs / hectare") + pub_theme() + labs(tag = "B.") pd2 <- extract_fit(m_d[[2]][[2]]) %>% dplyr::mutate(fit = fit + intercept_d, fitmin = fitmin + intercept_d, fitmax = fitmax + intercept_d, x = as.Date("1970-01-01") + x) %>% ggplot(aes(x = x, y = fit)) + geom_point(data = data_points, mapping = aes(x = date, y = density), size = 0.25, colour = "grey70") + geom_hline(yintercept = intercept_d, linetype = 2, colour = "grey25", size = 0.25) + # geom_point(data = m_d[[1]]$model, aes(y = density, x = as.Date("2016-01-01") + yday), size = 1, shape = 21, alpha = 0.5) + geom_ribbon(aes(ymin = fitmin, ymax = fitmax), alpha = 0.25, fill = "grey50") + geom_line() + scale_x_date(expand = c(0,0), name = "date") + scale_y_continuous(limits = ylim, name = "crabs / hectare") + pub_theme() + labs(tag = "A.") cowplot::plot_grid(pd2, pd1, ncol = 1, align = "hv") } get_density_selection_table <- function(detectability_abundance_model){ purrr::pmap( detectability_abundance_model, function(...){ models <- list(...) unmarked::modSel(unmarked::fitList(fits = models[1:4]), nullmod = "abu3")@Full} %>% tibble::rownames_to_column(var = "m")) %>% purrr::imap_dfr(~ dplyr::mutate(.x, date = .y)) } plot_density_models_aic <- function(model_comparison){ require(ggplot2) model_comparison %>% dplyr::mutate(model = forcats::fct_reorder(model, delta)) %>% ggplot(aes(x = delta)) + geom_histogram(binwidth = 1) + facet_grid(rows = vars(formula)) + pub_theme() + labs(x = "∆ AIC", title = "Delta AIC") } plot_density_models_rsquared <- function(model_comparison){ require(ggplot2) model_comparison %>% dplyr::filter(model != "abu3") %>% dplyr::mutate(model = forcats::fct_reorder(model, Rsq)) %>% ggplot(aes(x = Rsq)) + geom_histogram(binwidth = 0.01) + facet_grid(rows = vars(formula)) + pub_theme() + labs(x = "R-squared", title = "Nagelkerke's (1991) R-squared index") }
\name{DropEmpty} \alias{DropEmpty} \title{ \code{DropEmpty} pseudo-function } \description{ Pseudo-function to indicate that rows or columns containing no observations should be dropped. } \usage{ DropEmpty(empty = "", which = c("row", "col", "cell")) } \arguments{ \item{empty}{ String to use in empty cells. } \item{which}{ A vector indicating what should be dropped. See the Details below. } } \details{ If the \code{which} argument contains \code{"row"}, then any row in the table in which all cells are empty will be dropped. Similarly, if it contains \code{"col"}, empty columns will be dropped. If it contains \code{"cell"}, then cells in rows and columns that are not dropped will be set to the \code{empty} string. } \section{Pseudo-functions}{ This is a \dQuote{pseudo-function}: it takes the form of a function call, but is never actually called: it is handled specially by \code{\link{tabular}}. } \examples{ df <- data.frame(row = factor(1:10), value = rnorm(10)) subset <- df[sample(10, 5),, drop = FALSE] # Some rows did not get selected, so this looks ugly tabular(row ~ value*mean, data = subset) # This only shows rows with data in them tabular(row*DropEmpty() ~ value*mean, data = subset) # This shows empty cells as "(empty)" tabular(row*DropEmpty("(empty)", "cell") ~ value*mean, data = subset) }
/man/DropEmpty.Rd
no_license
dmurdoch/tables
R
false
false
1,335
rd
\name{DropEmpty} \alias{DropEmpty} \title{ \code{DropEmpty} pseudo-function } \description{ Pseudo-function to indicate that rows or columns containing no observations should be dropped. } \usage{ DropEmpty(empty = "", which = c("row", "col", "cell")) } \arguments{ \item{empty}{ String to use in empty cells. } \item{which}{ A vector indicating what should be dropped. See the Details below. } } \details{ If the \code{which} argument contains \code{"row"}, then any row in the table in which all cells are empty will be dropped. Similarly, if it contains \code{"col"}, empty columns will be dropped. If it contains \code{"cell"}, then cells in rows and columns that are not dropped will be set to the \code{empty} string. } \section{Pseudo-functions}{ This is a \dQuote{pseudo-function}: it takes the form of a function call, but is never actually called: it is handled specially by \code{\link{tabular}}. } \examples{ df <- data.frame(row = factor(1:10), value = rnorm(10)) subset <- df[sample(10, 5),, drop = FALSE] # Some rows did not get selected, so this looks ugly tabular(row ~ value*mean, data = subset) # This only shows rows with data in them tabular(row*DropEmpty() ~ value*mean, data = subset) # This shows empty cells as "(empty)" tabular(row*DropEmpty("(empty)", "cell") ~ value*mean, data = subset) }
library(shiny) library(leaflet) library(leaflet.extras) library(scales) library(shinycssloaders) library(sf) library(raster) library(htmltools) library(rgdal) library(dplyr) library(zoo) library(rowr) library(precintcon) library(gridExtra) library(fitdistrplus) library(tictoc) library(ncdf4) library(lubridate) library(plotly) #SPI data current_spi_30 = raster::raster("../spi_app/maps/current_spi/current_spi_30.tif") current_spi_60 = raster::raster("../spi_app/maps/current_spi/current_spi_60.tif") current_spi_90 = raster::raster("../spi_app/maps/current_spi/current_spi_90.tif") current_spi_180 = raster::raster("../spi_app/maps/current_spi/current_spi_180.tif") current_spi_300 = raster::raster("../spi_app/maps/current_spi/current_spi_300.tif") watersheds_30 = st_read("../spi_app/shp/current_spi/current_spi_watershed_30.shp") watersheds_60 = st_read("../spi_app/shp/current_spi/current_spi_watershed_60.shp") watersheds_90 = st_read("../spi_app/shp/current_spi/current_spi_watershed_90.shp") watersheds_180 = st_read("../spi_app/shp/current_spi/current_spi_watershed_180.shp") watersheds_300 = st_read("../spi_app/shp/current_spi/current_spi_watershed_300.shp") county_30 = st_read("../spi_app/shp/current_spi/current_spi_county_30.shp") county_60 = st_read("../spi_app/shp/current_spi/current_spi_county_60.shp") county_90 = st_read("../spi_app/shp/current_spi/current_spi_county_90.shp") county_180 = st_read("../spi_app/shp/current_spi/current_spi_county_180.shp") county_300 = st_read("../spi_app/shp/current_spi/current_spi_county_300.shp") #SPEI Data current_spei_30 = raster::raster("../spei_app/maps/current_spei/current_spei_30.tif") current_spei_60 = raster::raster("../spei_app/maps/current_spei/current_spei_60.tif") current_spei_90 = raster::raster("../spei_app/maps/current_spei/current_spei_90.tif") current_spei_180 = raster::raster("../spei_app/maps/current_spei/current_spei_180.tif") current_spei_300 = raster::raster("../spei_app/maps/current_spei/current_spei_300.tif") watersheds_30 = st_read("../spei_app/shp/current_spei/current_spei_watershed_30.shp") watersheds_60 = st_read("../spei_app/shp/current_spei/current_spei_watershed_60.shp") watersheds_90 = st_read("../spei_app/shp/current_spei/current_spei_watershed_90.shp") watersheds_180 = st_read("../spei_app/shp/current_spei/current_spei_watershed_180.shp") watersheds_300 = st_read("../spei_app/shp/current_spei/current_spei_watershed_300.shp") county_30 = st_read("../spei_app/shp/current_spei/current_spei_county_30.shp") county_60 = st_read("../spei_app/shp/current_spei/current_spei_county_60.shp") county_90 = st_read("../spei_app/shp/current_spei/current_spei_county_90.shp") county_180 = st_read("../spei_app/shp/current_spei/current_spei_county_180.shp") county_300 = st_read("../spei_app/shp/current_spei/current_spei_county_300.shp")
/spi_app/R/global.R
no_license
LMXB/drought_indicators
R
false
false
2,839
r
library(shiny) library(leaflet) library(leaflet.extras) library(scales) library(shinycssloaders) library(sf) library(raster) library(htmltools) library(rgdal) library(dplyr) library(zoo) library(rowr) library(precintcon) library(gridExtra) library(fitdistrplus) library(tictoc) library(ncdf4) library(lubridate) library(plotly) #SPI data current_spi_30 = raster::raster("../spi_app/maps/current_spi/current_spi_30.tif") current_spi_60 = raster::raster("../spi_app/maps/current_spi/current_spi_60.tif") current_spi_90 = raster::raster("../spi_app/maps/current_spi/current_spi_90.tif") current_spi_180 = raster::raster("../spi_app/maps/current_spi/current_spi_180.tif") current_spi_300 = raster::raster("../spi_app/maps/current_spi/current_spi_300.tif") watersheds_30 = st_read("../spi_app/shp/current_spi/current_spi_watershed_30.shp") watersheds_60 = st_read("../spi_app/shp/current_spi/current_spi_watershed_60.shp") watersheds_90 = st_read("../spi_app/shp/current_spi/current_spi_watershed_90.shp") watersheds_180 = st_read("../spi_app/shp/current_spi/current_spi_watershed_180.shp") watersheds_300 = st_read("../spi_app/shp/current_spi/current_spi_watershed_300.shp") county_30 = st_read("../spi_app/shp/current_spi/current_spi_county_30.shp") county_60 = st_read("../spi_app/shp/current_spi/current_spi_county_60.shp") county_90 = st_read("../spi_app/shp/current_spi/current_spi_county_90.shp") county_180 = st_read("../spi_app/shp/current_spi/current_spi_county_180.shp") county_300 = st_read("../spi_app/shp/current_spi/current_spi_county_300.shp") #SPEI Data current_spei_30 = raster::raster("../spei_app/maps/current_spei/current_spei_30.tif") current_spei_60 = raster::raster("../spei_app/maps/current_spei/current_spei_60.tif") current_spei_90 = raster::raster("../spei_app/maps/current_spei/current_spei_90.tif") current_spei_180 = raster::raster("../spei_app/maps/current_spei/current_spei_180.tif") current_spei_300 = raster::raster("../spei_app/maps/current_spei/current_spei_300.tif") watersheds_30 = st_read("../spei_app/shp/current_spei/current_spei_watershed_30.shp") watersheds_60 = st_read("../spei_app/shp/current_spei/current_spei_watershed_60.shp") watersheds_90 = st_read("../spei_app/shp/current_spei/current_spei_watershed_90.shp") watersheds_180 = st_read("../spei_app/shp/current_spei/current_spei_watershed_180.shp") watersheds_300 = st_read("../spei_app/shp/current_spei/current_spei_watershed_300.shp") county_30 = st_read("../spei_app/shp/current_spei/current_spei_county_30.shp") county_60 = st_read("../spei_app/shp/current_spei/current_spei_county_60.shp") county_90 = st_read("../spei_app/shp/current_spei/current_spei_county_90.shp") county_180 = st_read("../spei_app/shp/current_spei/current_spei_county_180.shp") county_300 = st_read("../spei_app/shp/current_spei/current_spei_county_300.shp")
#' RACEseqR dataframe function #' #' Function to generate and output the RACE seq dataframe of the specified start and end positions. #' @param str The start position of the binding region. #' @param end The end position of the binding region. #' @param replicon_ref The reference sequence used in the alignment. #' @param filename The filename that will be used for the output data file. #' @keywords dataframe output #' @export datafile_out #' @examples #' out_csv() datafile_out<- function(str, end, replicon_ref, filename) { #setting function call conditions if(missing(str)) stop("str value must be set") if(missing(end)) stop("end value must be set") if(!str %in% seq(1, 10000000, by = 1)) stop("str value must be >= 1)") if(!end %in% seq(1, 10000000, by = 1)) stop("end value must be >= 1)") #reading ref sequence name from function call if(missing(replicon_ref)) stop("No input .fasta reference file available") #transforming reference sequence nt_reference <-strsplit((toString(readBStringSet(replicon_ref))), NULL , fixed = T) nt_reference<- data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F) #read the output file #input reads in .txt format out_reads<- list.files(".", pattern ="alignment", all.files = F, full.names = F) if ((length(out_reads))==0) { stop("No output alignment file available") } else if ((length(out_reads))>=2) { stop("More than one output alignment file available") } else reads<- read.delim(out_reads, header = F, quote = " ") #create dataframe with reference and reads dataframe<- data.frame(reads, nt_reference , stringsAsFactors = F) #calculating the % and log10 columns dataframe[,5] <- (dataframe[,3]/sum(dataframe[,3])*100) dataframe[,6] <- (log10(dataframe[,3])) dataframe[dataframe== -Inf] <-0 #focusing on target region can be ajusted acording to experiment binding_region <- dataframe[str:end,] colnames(binding_region)<- c("reference", "position", "count", "nucleotide", "percentage", "log10" ) out_name<- file_path_sans_ext(((strsplit(out_reads, "_")) [[1]])[[2]]) write.table(binding_region, file = paste0("datafile_",filename ,"_",out_name, ".txt") , sep = "\t", col.names = c("reference", "position", "count", "nucleotide", "percentage", "log10" ), row.names = F , quote = FALSE) return(binding_region) }
/R/df_out.r
no_license
pantastheo/RACEseqR
R
false
false
2,384
r
#' RACEseqR dataframe function #' #' Function to generate and output the RACE seq dataframe of the specified start and end positions. #' @param str The start position of the binding region. #' @param end The end position of the binding region. #' @param replicon_ref The reference sequence used in the alignment. #' @param filename The filename that will be used for the output data file. #' @keywords dataframe output #' @export datafile_out #' @examples #' out_csv() datafile_out<- function(str, end, replicon_ref, filename) { #setting function call conditions if(missing(str)) stop("str value must be set") if(missing(end)) stop("end value must be set") if(!str %in% seq(1, 10000000, by = 1)) stop("str value must be >= 1)") if(!end %in% seq(1, 10000000, by = 1)) stop("end value must be >= 1)") #reading ref sequence name from function call if(missing(replicon_ref)) stop("No input .fasta reference file available") #transforming reference sequence nt_reference <-strsplit((toString(readBStringSet(replicon_ref))), NULL , fixed = T) nt_reference<- data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F) #read the output file #input reads in .txt format out_reads<- list.files(".", pattern ="alignment", all.files = F, full.names = F) if ((length(out_reads))==0) { stop("No output alignment file available") } else if ((length(out_reads))>=2) { stop("More than one output alignment file available") } else reads<- read.delim(out_reads, header = F, quote = " ") #create dataframe with reference and reads dataframe<- data.frame(reads, nt_reference , stringsAsFactors = F) #calculating the % and log10 columns dataframe[,5] <- (dataframe[,3]/sum(dataframe[,3])*100) dataframe[,6] <- (log10(dataframe[,3])) dataframe[dataframe== -Inf] <-0 #focusing on target region can be ajusted acording to experiment binding_region <- dataframe[str:end,] colnames(binding_region)<- c("reference", "position", "count", "nucleotide", "percentage", "log10" ) out_name<- file_path_sans_ext(((strsplit(out_reads, "_")) [[1]])[[2]]) write.table(binding_region, file = paste0("datafile_",filename ,"_",out_name, ".txt") , sep = "\t", col.names = c("reference", "position", "count", "nucleotide", "percentage", "log10" ), row.names = F , quote = FALSE) return(binding_region) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GAN.R \name{FixedGANSwitcher} \alias{FixedGANSwitcher} \title{FixedGANSwitcher} \usage{ FixedGANSwitcher(n_crit = 1, n_gen = 1) } \arguments{ \item{n_crit}{n_crit} \item{n_gen}{n_gen} } \description{ Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator. } \details{ }
/man/FixedGANSwitcher.Rd
permissive
ysnghr/fastai
R
false
true
385
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GAN.R \name{FixedGANSwitcher} \alias{FixedGANSwitcher} \title{FixedGANSwitcher} \usage{ FixedGANSwitcher(n_crit = 1, n_gen = 1) } \arguments{ \item{n_crit}{n_crit} \item{n_gen}{n_gen} } \description{ Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator. } \details{ }
# -------------------------------------------------- # Carbon Cycle Exercises. # Computer Lab. # Author: Benjamin Stocker <beni@climate.unibe.ch> # Responsible: Fortunat Joos <joos@climate.unibe.ch> # Feb. 2012 # -------------------------------------------------- # 2-Box Model Atmosphere-Ocean # Set parameters lsim <- 1000 # length of simulation #A_oc <- 3.6e+14 k_ex <- 60/270 buffer <- 10 gt2ppm <- 2.123 # conversion factor from GtC to ppmv # 1000 GtC CO2 are emitted in year 100 into the atmosphere. # Plot the evolution of the atmospheric perturbation # (in units of ppm) over the course of 1000 years. eC <- array(0,c(lsim)) ## # 2.a) Pulse emission in year 100 ## eC[100] <- 1000 # ---------------------------------------------------------------- # 2.b) Step change in emissions (from 0 to 10 GtC/yr after yr 100) # eC[101:lsim] <- 10 # ---------------------------------------------------------------- # 2.c) Linearly increasing emissions (from 0.02 in year 1 # to 20 GtC in year 1000) # eC <- seq(0.02,20,0.02) # ---------------------------------------------------------------- # It is convenient to treat variables that are to be written # into output separately, while the variable that changes # its value from time step to time step should not have a # time dimension, but changes its value with each time step. pC_a_out <- array(NA,c(lsim)) # This is the output variable for atmospheric CO2 pC_o_out <- array(NA,c(lsim)) # This is the output variable for atmospheric CO2 # Initialize variables pC_a <- 270 pC_o_init <- 270 C_o_init<- 5000 # initial ocean C-pool, in GtC C_a <- 270*gt2ppm C_o <- C_o_init pC_o <- pC_o_init for (yr in seq(lsim)) { # Add emissions to atmosphere C_a <- C_a + eC[yr] # Update atmospheric CO2 partial pressure pC_a <- C_a/gt2ppm # Flux atmosphere -> ocean (lecture notes Eq.5.46) f_ao <- k_ex*(pC_a - pC_o) # Update inventories C_a <- C_a - f_ao C_o <- C_o + f_ao # Update oceanic CO2 partial pressure. The partial # pressure increase is scaled by the buffer factor # (lecture notes Eq.5.37) pC_o <- pC_o_init + buffer*(C_o - C_o_init)/C_o_init*pC_o_init # Copy atmospheric C inventory of this year to output pC_a_out[yr] <- pC_a pC_o_out[yr] <- pC_o }
/lab_bern/2box.R
no_license
stineb/teaching
R
false
false
2,255
r
# -------------------------------------------------- # Carbon Cycle Exercises. # Computer Lab. # Author: Benjamin Stocker <beni@climate.unibe.ch> # Responsible: Fortunat Joos <joos@climate.unibe.ch> # Feb. 2012 # -------------------------------------------------- # 2-Box Model Atmosphere-Ocean # Set parameters lsim <- 1000 # length of simulation #A_oc <- 3.6e+14 k_ex <- 60/270 buffer <- 10 gt2ppm <- 2.123 # conversion factor from GtC to ppmv # 1000 GtC CO2 are emitted in year 100 into the atmosphere. # Plot the evolution of the atmospheric perturbation # (in units of ppm) over the course of 1000 years. eC <- array(0,c(lsim)) ## # 2.a) Pulse emission in year 100 ## eC[100] <- 1000 # ---------------------------------------------------------------- # 2.b) Step change in emissions (from 0 to 10 GtC/yr after yr 100) # eC[101:lsim] <- 10 # ---------------------------------------------------------------- # 2.c) Linearly increasing emissions (from 0.02 in year 1 # to 20 GtC in year 1000) # eC <- seq(0.02,20,0.02) # ---------------------------------------------------------------- # It is convenient to treat variables that are to be written # into output separately, while the variable that changes # its value from time step to time step should not have a # time dimension, but changes its value with each time step. pC_a_out <- array(NA,c(lsim)) # This is the output variable for atmospheric CO2 pC_o_out <- array(NA,c(lsim)) # This is the output variable for atmospheric CO2 # Initialize variables pC_a <- 270 pC_o_init <- 270 C_o_init<- 5000 # initial ocean C-pool, in GtC C_a <- 270*gt2ppm C_o <- C_o_init pC_o <- pC_o_init for (yr in seq(lsim)) { # Add emissions to atmosphere C_a <- C_a + eC[yr] # Update atmospheric CO2 partial pressure pC_a <- C_a/gt2ppm # Flux atmosphere -> ocean (lecture notes Eq.5.46) f_ao <- k_ex*(pC_a - pC_o) # Update inventories C_a <- C_a - f_ao C_o <- C_o + f_ao # Update oceanic CO2 partial pressure. The partial # pressure increase is scaled by the buffer factor # (lecture notes Eq.5.37) pC_o <- pC_o_init + buffer*(C_o - C_o_init)/C_o_init*pC_o_init # Copy atmospheric C inventory of this year to output pC_a_out[yr] <- pC_a pC_o_out[yr] <- pC_o }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{voting} \alias{voting} \alias{voting.tr} \alias{voting.te} \title{Congressional Voting Records Data Set} \format{\code{X} is a data frame with 434 congress members and 16 attributes: 16 key votes identified by the Congressional Quarterly Almanac (CQA). All attributes are binary values, with \code{1=} yes and \code{0=} no. \tabular{ll}{ \code{X1} \tab handicapped-infants \cr \code{X2} \tab water-project-cost-sharing \cr \code{X3} \tab adoption-of-the-budget-resolution \cr \code{X4} \tab physician-fee-freeze \cr \code{X5} \tab el-salvador-aid \cr \code{X6} \tab religious-groups-in-schools \cr \code{X7} \tab anti-satellite-test-ban \cr \code{X8} \tab aid-to-nicaraguan-contras \cr \code{X9} \tab mx-missile \cr \code{X10} \tab immigration \cr \code{X11} \tab synfuels-corporation-cutback \cr \code{X12} \tab education-spending \cr \code{X13} \tab superfund-right-to-sue \cr \code{X14} \tab crime \cr \code{X15} \tab duty-free-exports \cr \code{X16} \tab export-administration-act-south-africe \cr } \code{y} consists factors which denotes whether the congress member is a \code{Republican} or a \code{Democrat}. The training set \code{voting.tr} contains a randomly selected set of 300 subjects, and \code{voting.te} contains the remaining 134 subjects. \code{voting} contains all 434 objects.} \source{ Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support vector machines, 2001. Software available at \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm}. } \description{ 1984 United Stated Congressional Voting Records; Classify as Republican or Democrat. } \details{ This data set includes votes for each of the U.S. House of Representatives Congressmen on the 16 key votes identified by the CQA. The CQA lists nine different types of votes: voted for, paired for, and announced for (these three simplified to yea), voted against, paired against, and announced against (these three simplified to nay), voted present, voted present to avoid conflict of interest, and did not vote or otherwise make a position known (these three simplified to an unknown disposition). } \examples{ attach(voting) summary(X) summary(y) } \keyword{datasets}
/man/voting.Rd
no_license
cran/SVMMaj
R
false
true
2,294
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{voting} \alias{voting} \alias{voting.tr} \alias{voting.te} \title{Congressional Voting Records Data Set} \format{\code{X} is a data frame with 434 congress members and 16 attributes: 16 key votes identified by the Congressional Quarterly Almanac (CQA). All attributes are binary values, with \code{1=} yes and \code{0=} no. \tabular{ll}{ \code{X1} \tab handicapped-infants \cr \code{X2} \tab water-project-cost-sharing \cr \code{X3} \tab adoption-of-the-budget-resolution \cr \code{X4} \tab physician-fee-freeze \cr \code{X5} \tab el-salvador-aid \cr \code{X6} \tab religious-groups-in-schools \cr \code{X7} \tab anti-satellite-test-ban \cr \code{X8} \tab aid-to-nicaraguan-contras \cr \code{X9} \tab mx-missile \cr \code{X10} \tab immigration \cr \code{X11} \tab synfuels-corporation-cutback \cr \code{X12} \tab education-spending \cr \code{X13} \tab superfund-right-to-sue \cr \code{X14} \tab crime \cr \code{X15} \tab duty-free-exports \cr \code{X16} \tab export-administration-act-south-africe \cr } \code{y} consists factors which denotes whether the congress member is a \code{Republican} or a \code{Democrat}. The training set \code{voting.tr} contains a randomly selected set of 300 subjects, and \code{voting.te} contains the remaining 134 subjects. \code{voting} contains all 434 objects.} \source{ Chih-Chung Chang and Chih-Jen Lin, LIBSVM : a library for support vector machines, 2001. Software available at \url{http://www.csie.ntu.edu.tw/~cjlin/libsvm}. } \description{ 1984 United Stated Congressional Voting Records; Classify as Republican or Democrat. } \details{ This data set includes votes for each of the U.S. House of Representatives Congressmen on the 16 key votes identified by the CQA. The CQA lists nine different types of votes: voted for, paired for, and announced for (these three simplified to yea), voted against, paired against, and announced against (these three simplified to nay), voted present, voted present to avoid conflict of interest, and did not vote or otherwise make a position known (these three simplified to an unknown disposition). } \examples{ attach(voting) summary(X) summary(y) } \keyword{datasets}
library(devtools) install_github("jennybc/gapminder") library(gapminder) data(gapminder) head(gapminder) #Create a vector 'x' of the life expectancies of each country for the year 1952. #Plot a histogram of these life expectancies to see the spread of the different countries. x<-gapminder$lifeExp[gapminder$year=="1952"] x hist(x) mean(x <= 60)-mean(x <= 40) #Create custom function for proportion under a certain number prop = function(q) { mean(x <= q) } prop(40) #Now let's build a range of q's that we can apply the function to: qs = seq(from=min(x), to=max(x), length=20) #function prop at each interval props = sapply(qs, prop) #Note that we could also have written this in one line, by defining the 'prop' function but without naming it: props = sapply(qs, function(q) mean(x <= q)) #this already exists in R! plot(ecdf(x)) #population size y<-gapminder$pop[gapminder$year=="1952"] y hist(y) hist(log10(y)) sd(log10(y)) x<-log10(y) qqnorm(x) #subtract mean and divide by ds (standardize) z<-(x-mean(x))/sd(x) qqnorm(z) abline(0,1) tail(sort(z),1) F = function(q) pnorm(q, mean=mean(x), sd=sd(x)) F(6) #proportion of countries with population less than 1 million (10^6, therefore log10(10^6)=6) n = length(x) #number of countries #Finally, using the Normal approximation, estimate the number of countries that should have a log10 1952 #population between 6 and 7 prop67=F(7)-F(6) prop67*n head(pnorm(x, mean=mean(x), sd=sd(x))) n = length(x) ps = ((1:n) - 0.5)/n #we want to find the quantiles of the standard normal distribution which are associated #with the following probabilities pnorm(ps) head(sort(x),1) index<-match(head(sort(x),1),x) plot(qnorm(ps), sort(x))
/project1/week2.R
no_license
ragak/classes
R
false
false
1,695
r
library(devtools) install_github("jennybc/gapminder") library(gapminder) data(gapminder) head(gapminder) #Create a vector 'x' of the life expectancies of each country for the year 1952. #Plot a histogram of these life expectancies to see the spread of the different countries. x<-gapminder$lifeExp[gapminder$year=="1952"] x hist(x) mean(x <= 60)-mean(x <= 40) #Create custom function for proportion under a certain number prop = function(q) { mean(x <= q) } prop(40) #Now let's build a range of q's that we can apply the function to: qs = seq(from=min(x), to=max(x), length=20) #function prop at each interval props = sapply(qs, prop) #Note that we could also have written this in one line, by defining the 'prop' function but without naming it: props = sapply(qs, function(q) mean(x <= q)) #this already exists in R! plot(ecdf(x)) #population size y<-gapminder$pop[gapminder$year=="1952"] y hist(y) hist(log10(y)) sd(log10(y)) x<-log10(y) qqnorm(x) #subtract mean and divide by ds (standardize) z<-(x-mean(x))/sd(x) qqnorm(z) abline(0,1) tail(sort(z),1) F = function(q) pnorm(q, mean=mean(x), sd=sd(x)) F(6) #proportion of countries with population less than 1 million (10^6, therefore log10(10^6)=6) n = length(x) #number of countries #Finally, using the Normal approximation, estimate the number of countries that should have a log10 1952 #population between 6 and 7 prop67=F(7)-F(6) prop67*n head(pnorm(x, mean=mean(x), sd=sd(x))) n = length(x) ps = ((1:n) - 0.5)/n #we want to find the quantiles of the standard normal distribution which are associated #with the following probabilities pnorm(ps) head(sort(x),1) index<-match(head(sort(x),1),x) plot(qnorm(ps), sort(x))
<?xml version="1.0" encoding="utf-8"?> <serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="AzureMultipleDeploy.CloudService" generation="1" functional="0" release="0" Id="24f668c8-97b5-479d-9eb5-a7dab4940268" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM"> <groups> <group name="AzureMultipleDeploy.CloudServiceGroup" generation="1" functional="0" release="0"> <componentports> <inPort name="AzureMultipleDeploy.WebRole:Endpoint1" protocol="http"> <inToChannel> <lBChannelMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/LB:AzureMultipleDeploy.WebRole:Endpoint1" /> </inToChannel> </inPort> </componentports> <settings> <aCS name="AzureMultipleDeploy.WebRole:APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue=""> <maps> <mapMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/MapAzureMultipleDeploy.WebRole:APPINSIGHTS_INSTRUMENTATIONKEY" /> </maps> </aCS> <aCS name="AzureMultipleDeploy.WebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue=""> <maps> <mapMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/MapAzureMultipleDeploy.WebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </maps> </aCS> <aCS name="AzureMultipleDeploy.WebRoleInstances" defaultValue="[1,1,1]"> <maps> <mapMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/MapAzureMultipleDeploy.WebRoleInstances" /> </maps> </aCS> </settings> <channels> <lBChannel name="LB:AzureMultipleDeploy.WebRole:Endpoint1"> <toPorts> <inPortMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole/Endpoint1" /> </toPorts> </lBChannel> </channels> <maps> <map name="MapAzureMultipleDeploy.WebRole:APPINSIGHTS_INSTRUMENTATIONKEY" kind="Identity"> <setting> <aCSMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole/APPINSIGHTS_INSTRUMENTATIONKEY" /> </setting> </map> <map name="MapAzureMultipleDeploy.WebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity"> <setting> <aCSMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </setting> </map> <map name="MapAzureMultipleDeploy.WebRoleInstances" kind="Identity"> <setting> <sCSPolicyIDMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleInstances" /> </setting> </map> </maps> <components> <groupHascomponents> <role name="AzureMultipleDeploy.WebRole" generation="1" functional="0" release="0" software="C:\Git\AzureMultipleDeploy\AzureMultipleDeploy.CloudService\csx\Release\roles\AzureMultipleDeploy.WebRole" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="-1" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2"> <componentports> <inPort name="Endpoint1" protocol="http" portRanges="80" /> </componentports> <settings> <aCS name="APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue="" /> <aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" /> <aCS name="__ModelData" defaultValue="&lt;m role=&quot;AzureMultipleDeploy.WebRole&quot; xmlns=&quot;urn:azure:m:v1&quot;&gt;&lt;r name=&quot;AzureMultipleDeploy.WebRole&quot;&gt;&lt;e name=&quot;Endpoint1&quot; /&gt;&lt;/r&gt;&lt;/m&gt;" /> </settings> <resourcereferences> <resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" /> <resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" /> </resourcereferences> </role> <sCSPolicy> <sCSPolicyIDMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleInstances" /> <sCSPolicyUpdateDomainMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleUpgradeDomains" /> <sCSPolicyFaultDomainMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleFaultDomains" /> </sCSPolicy> </groupHascomponents> </components> <sCSPolicy> <sCSPolicyUpdateDomain name="AzureMultipleDeploy.WebRoleUpgradeDomains" defaultPolicy="[5,5,5]" /> <sCSPolicyFaultDomain name="AzureMultipleDeploy.WebRoleFaultDomains" defaultPolicy="[2,2,2]" /> <sCSPolicyID name="AzureMultipleDeploy.WebRoleInstances" defaultPolicy="[1,1,1]" /> </sCSPolicy> </group> </groups> <implements> <implementation Id="d903dc28-27f1-4e01-a302-62dafff51fa7" ref="Microsoft.RedDog.Contract\ServiceContract\AzureMultipleDeploy.CloudServiceContract@ServiceDefinition"> <interfacereferences> <interfaceReference Id="d769fc9a-1eee-4e94-93c9-794d143bfde2" ref="Microsoft.RedDog.Contract\Interface\AzureMultipleDeploy.WebRole:Endpoint1@ServiceDefinition"> <inPort> <inPortMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole:Endpoint1" /> </inPort> </interfaceReference> </interfacereferences> </implementation> </implements> </serviceModel>
/AzureMultipleDeploy.CloudService/csx/Release/ServiceDefinition.rd
no_license
michaeldeongreen/AzureMultipleDeploy
R
false
false
6,123
rd
<?xml version="1.0" encoding="utf-8"?> <serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="AzureMultipleDeploy.CloudService" generation="1" functional="0" release="0" Id="24f668c8-97b5-479d-9eb5-a7dab4940268" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM"> <groups> <group name="AzureMultipleDeploy.CloudServiceGroup" generation="1" functional="0" release="0"> <componentports> <inPort name="AzureMultipleDeploy.WebRole:Endpoint1" protocol="http"> <inToChannel> <lBChannelMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/LB:AzureMultipleDeploy.WebRole:Endpoint1" /> </inToChannel> </inPort> </componentports> <settings> <aCS name="AzureMultipleDeploy.WebRole:APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue=""> <maps> <mapMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/MapAzureMultipleDeploy.WebRole:APPINSIGHTS_INSTRUMENTATIONKEY" /> </maps> </aCS> <aCS name="AzureMultipleDeploy.WebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue=""> <maps> <mapMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/MapAzureMultipleDeploy.WebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </maps> </aCS> <aCS name="AzureMultipleDeploy.WebRoleInstances" defaultValue="[1,1,1]"> <maps> <mapMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/MapAzureMultipleDeploy.WebRoleInstances" /> </maps> </aCS> </settings> <channels> <lBChannel name="LB:AzureMultipleDeploy.WebRole:Endpoint1"> <toPorts> <inPortMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole/Endpoint1" /> </toPorts> </lBChannel> </channels> <maps> <map name="MapAzureMultipleDeploy.WebRole:APPINSIGHTS_INSTRUMENTATIONKEY" kind="Identity"> <setting> <aCSMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole/APPINSIGHTS_INSTRUMENTATIONKEY" /> </setting> </map> <map name="MapAzureMultipleDeploy.WebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity"> <setting> <aCSMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </setting> </map> <map name="MapAzureMultipleDeploy.WebRoleInstances" kind="Identity"> <setting> <sCSPolicyIDMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleInstances" /> </setting> </map> </maps> <components> <groupHascomponents> <role name="AzureMultipleDeploy.WebRole" generation="1" functional="0" release="0" software="C:\Git\AzureMultipleDeploy\AzureMultipleDeploy.CloudService\csx\Release\roles\AzureMultipleDeploy.WebRole" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="-1" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2"> <componentports> <inPort name="Endpoint1" protocol="http" portRanges="80" /> </componentports> <settings> <aCS name="APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue="" /> <aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" /> <aCS name="__ModelData" defaultValue="&lt;m role=&quot;AzureMultipleDeploy.WebRole&quot; xmlns=&quot;urn:azure:m:v1&quot;&gt;&lt;r name=&quot;AzureMultipleDeploy.WebRole&quot;&gt;&lt;e name=&quot;Endpoint1&quot; /&gt;&lt;/r&gt;&lt;/m&gt;" /> </settings> <resourcereferences> <resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" /> <resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" /> </resourcereferences> </role> <sCSPolicy> <sCSPolicyIDMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleInstances" /> <sCSPolicyUpdateDomainMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleUpgradeDomains" /> <sCSPolicyFaultDomainMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRoleFaultDomains" /> </sCSPolicy> </groupHascomponents> </components> <sCSPolicy> <sCSPolicyUpdateDomain name="AzureMultipleDeploy.WebRoleUpgradeDomains" defaultPolicy="[5,5,5]" /> <sCSPolicyFaultDomain name="AzureMultipleDeploy.WebRoleFaultDomains" defaultPolicy="[2,2,2]" /> <sCSPolicyID name="AzureMultipleDeploy.WebRoleInstances" defaultPolicy="[1,1,1]" /> </sCSPolicy> </group> </groups> <implements> <implementation Id="d903dc28-27f1-4e01-a302-62dafff51fa7" ref="Microsoft.RedDog.Contract\ServiceContract\AzureMultipleDeploy.CloudServiceContract@ServiceDefinition"> <interfacereferences> <interfaceReference Id="d769fc9a-1eee-4e94-93c9-794d143bfde2" ref="Microsoft.RedDog.Contract\Interface\AzureMultipleDeploy.WebRole:Endpoint1@ServiceDefinition"> <inPort> <inPortMoniker name="/AzureMultipleDeploy.CloudService/AzureMultipleDeploy.CloudServiceGroup/AzureMultipleDeploy.WebRole:Endpoint1" /> </inPort> </interfaceReference> </interfacereferences> </implementation> </implements> </serviceModel>
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common get_config new_operation new_request send_request #' @include ecs_service.R NULL #' Creates a new capacity provider #' #' @description #' Creates a new capacity provider. Capacity providers are associated with #' an Amazon ECS cluster and are used in capacity provider strategies to #' facilitate cluster auto scaling. #' #' Only capacity providers using an Auto Scaling group can be created. #' Amazon ECS tasks on AWS Fargate use the `FARGATE` and `FARGATE_SPOT` #' capacity providers which are already created and available to all #' accounts in Regions supported by AWS Fargate. #' #' @usage #' ecs_create_capacity_provider(name, autoScalingGroupProvider, tags) #' #' @param name &#91;required&#93; The name of the capacity provider. Up to 255 characters are allowed, #' including letters (upper and lowercase), numbers, underscores, and #' hyphens. The name cannot be prefixed with "`aws`", "`ecs`", or #' "`fargate`". #' @param autoScalingGroupProvider &#91;required&#93; The details of the Auto Scaling group for the capacity provider. #' @param tags The metadata that you apply to the capacity provider to help you #' categorize and organize them. Each tag consists of a key and an optional #' value, both of which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$create_capacity_provider( #' name = "string", #' autoScalingGroupProvider = list( #' autoScalingGroupArn = "string", #' managedScaling = list( #' status = "ENABLED"|"DISABLED", #' targetCapacity = 123, #' minimumScalingStepSize = 123, #' maximumScalingStepSize = 123, #' instanceWarmupPeriod = 123 #' ), #' managedTerminationProtection = "ENABLED"|"DISABLED" #' ), #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_create_capacity_provider ecs_create_capacity_provider <- function(name, autoScalingGroupProvider, tags = NULL) { op <- new_operation( name = "CreateCapacityProvider", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_capacity_provider_input(name = name, autoScalingGroupProvider = autoScalingGroupProvider, tags = tags) output <- .ecs$create_capacity_provider_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_capacity_provider <- ecs_create_capacity_provider #' Creates a new Amazon ECS cluster #' #' @description #' Creates a new Amazon ECS cluster. By default, your account receives a #' `default` cluster when you launch your first container instance. #' However, you can create your own cluster with a unique name with the #' `CreateCluster` action. #' #' When you call the CreateCluster API operation, Amazon ECS attempts to #' create the Amazon ECS service-linked role for your account so that #' required resources in other AWS services can be managed on your behalf. #' However, if the IAM user that makes the call does not have permissions #' to create the service-linked role, it is not created. For more #' information, see [Using Service-Linked Roles for Amazon #' ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_create_cluster(clusterName, tags, settings, capacityProviders, #' defaultCapacityProviderStrategy) #' #' @param clusterName The name of your cluster. If you do not specify a name for your cluster, #' you create a cluster named `default`. Up to 255 letters (uppercase and #' lowercase), numbers, and hyphens are allowed. #' @param tags The metadata that you apply to the cluster to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param settings The setting to use when creating a cluster. This parameter is used to #' enable CloudWatch Container Insights for a cluster. If this value is #' specified, it will override the `containerInsights` value set with #' PutAccountSetting or PutAccountSettingDefault. #' @param capacityProviders The short name of one or more capacity providers to associate with the #' cluster. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created and not already associated #' with another cluster. New capacity providers can be created with the #' CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param defaultCapacityProviderStrategy The capacity provider strategy to use by default for the cluster. #' #' When creating a service or running a task on a cluster, if no capacity #' provider or launch type is specified then the default capacity provider #' strategy for the cluster is used. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' If a default capacity provider strategy is not defined for a cluster #' during creation, it can be defined later with the #' PutClusterCapacityProviders API operation. #' #' @section Request syntax: #' ``` #' svc$create_cluster( #' clusterName = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' settings = list( #' list( #' name = "containerInsights", #' value = "string" #' ) #' ), #' capacityProviders = list( #' "string" #' ), #' defaultCapacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example creates a cluster in your default region. #' svc$create_cluster( #' clusterName = "my_cluster" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_create_cluster ecs_create_cluster <- function(clusterName = NULL, tags = NULL, settings = NULL, capacityProviders = NULL, defaultCapacityProviderStrategy = NULL) { op <- new_operation( name = "CreateCluster", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_cluster_input(clusterName = clusterName, tags = tags, settings = settings, capacityProviders = capacityProviders, defaultCapacityProviderStrategy = defaultCapacityProviderStrategy) output <- .ecs$create_cluster_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_cluster <- ecs_create_cluster #' Runs and maintains a desired number of tasks from a specified task #' definition #' #' @description #' Runs and maintains a desired number of tasks from a specified task #' definition. If the number of tasks running in a service drops below the #' `desiredCount`, Amazon ECS runs another copy of the task in the #' specified cluster. To update an existing service, see the UpdateService #' action. #' #' In addition to maintaining the desired count of tasks in your service, #' you can optionally run your service behind one or more load balancers. #' The load balancers distribute traffic across the tasks that are #' associated with the service. For more information, see [Service Load #' Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' Tasks for services that *do not* use a load balancer are considered #' healthy if they're in the `RUNNING` state. Tasks for services that *do* #' use a load balancer are considered healthy if they're in the `RUNNING` #' state and the container instance that they're hosted on is reported as #' healthy by the load balancer. #' #' There are two service scheduler strategies available: #' #' - `REPLICA` - The replica scheduling strategy places and maintains the #' desired number of tasks across your cluster. By default, the service #' scheduler spreads tasks across Availability Zones. You can use task #' placement strategies and constraints to customize task placement #' decisions. For more information, see [Service Scheduler #' Concepts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' - `DAEMON` - The daemon scheduling strategy deploys exactly one task #' on each active container instance that meets all of the task #' placement constraints that you specify in your cluster. The service #' scheduler also evaluates the task placement constraints for running #' tasks and will stop tasks that do not meet the placement #' constraints. When using this strategy, you don't need to specify a #' desired number of tasks, a task placement strategy, or use Service #' Auto Scaling policies. For more information, see [Service Scheduler #' Concepts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' You can optionally specify a deployment configuration for your service. #' The deployment is triggered by changing properties, such as the task #' definition or the desired count of a service, with an UpdateService #' operation. The default value for a replica service for #' `minimumHealthyPercent` is 100\%. The default value for a daemon service #' for `minimumHealthyPercent` is 0\%. #' #' If a service is using the `ECS` deployment controller, the minimum #' healthy percent represents a lower limit on the number of tasks in a #' service that must remain in the `RUNNING` state during a deployment, as #' a percentage of the desired number of tasks (rounded up to the nearest #' integer), and while any container instances are in the `DRAINING` state #' if the service contains tasks using the EC2 launch type. This parameter #' enables you to deploy without using additional cluster capacity. For #' example, if your service has a desired number of four tasks and a #' minimum healthy percent of 50\%, the scheduler might stop two existing #' tasks to free up cluster capacity before starting two new tasks. Tasks #' for services that *do not* use a load balancer are considered healthy if #' they're in the `RUNNING` state. Tasks for services that *do* use a load #' balancer are considered healthy if they're in the `RUNNING` state and #' they're reported as healthy by the load balancer. The default value for #' minimum healthy percent is 100\%. #' #' If a service is using the `ECS` deployment controller, the **maximum #' percent** parameter represents an upper limit on the number of tasks in #' a service that are allowed in the `RUNNING` or `PENDING` state during a #' deployment, as a percentage of the desired number of tasks (rounded down #' to the nearest integer), and while any container instances are in the #' `DRAINING` state if the service contains tasks using the EC2 launch #' type. This parameter enables you to define the deployment batch size. #' For example, if your service has a desired number of four tasks and a #' maximum percent value of 200\%, the scheduler may start four new tasks #' before stopping the four older tasks (provided that the cluster #' resources required to do this are available). The default value for #' maximum percent is 200\%. #' #' If a service is using either the `CODE_DEPLOY` or `EXTERNAL` deployment #' controller types and tasks that use the EC2 launch type, the **minimum #' healthy percent** and **maximum percent** values are used only to define #' the lower and upper limit on the number of the tasks in the service that #' remain in the `RUNNING` state while the container instances are in the #' `DRAINING` state. If the tasks in the service use the Fargate launch #' type, the minimum healthy percent and maximum percent values aren't #' used, although they're currently visible when describing your service. #' #' When creating a service that uses the `EXTERNAL` deployment controller, #' you can specify only parameters that aren't controlled at the task set #' level. The only required parameter is the service name. You control your #' services using the CreateTaskSet operation. For more information, see #' [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' When the service scheduler launches new tasks, it determines task #' placement in your cluster using the following logic: #' #' - Determine which of the container instances in your cluster can #' support your service's task definition (for example, they have the #' required CPU, memory, ports, and container instance attributes). #' #' - By default, the service scheduler attempts to balance tasks across #' Availability Zones in this manner (although you can choose a #' different placement strategy) with the `placementStrategy` #' parameter): #' #' - Sort the valid container instances, giving priority to instances #' that have the fewest number of running tasks for this service in #' their respective Availability Zone. For example, if zone A has #' one running service task and zones B and C each have zero, valid #' container instances in either zone B or C are considered optimal #' for placement. #' #' - Place the new service task on a valid container instance in an #' optimal Availability Zone (based on the previous steps), #' favoring container instances with the fewest number of running #' tasks for this service. #' #' @usage #' ecs_create_service(cluster, serviceName, taskDefinition, loadBalancers, #' serviceRegistries, desiredCount, clientToken, launchType, #' capacityProviderStrategy, platformVersion, role, #' deploymentConfiguration, placementConstraints, placementStrategy, #' networkConfiguration, healthCheckGracePeriodSeconds, schedulingStrategy, #' deploymentController, tags, enableECSManagedTags, propagateTags) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster on #' which to run your service. If you do not specify a cluster, the default #' cluster is assumed. #' @param serviceName &#91;required&#93; The name of your service. Up to 255 letters (uppercase and lowercase), #' numbers, and hyphens are allowed. Service names must be unique within a #' cluster, but you can have similarly named services in multiple clusters #' within a Region or across multiple Regions. #' @param taskDefinition The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to run in your service. If a `revision` is not specified, the #' latest `ACTIVE` revision is used. #' #' A task definition must be specified if the service is using either the #' `ECS` or `CODE_DEPLOY` deployment controllers. #' @param loadBalancers A load balancer object representing the load balancers to use with your #' service. For more information, see [Service Load #' Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If the service is using the rolling update (`ECS`) deployment controller #' and using either an Application Load Balancer or Network Load Balancer, #' you must specify one or more target group ARNs to attach to the service. #' The service-linked role is required for services that make use of #' multiple target groups. For more information, see [Using Service-Linked #' Roles for Amazon #' ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If the service is using the `CODE_DEPLOY` deployment controller, the #' service is required to use either an Application Load Balancer or #' Network Load Balancer. When creating an AWS CodeDeploy deployment group, #' you specify two target groups (referred to as a `targetGroupPair`). #' During a deployment, AWS CodeDeploy determines which task set in your #' service has the status `PRIMARY` and associates one target group with #' it, and then associates the other target group with the replacement task #' set. The load balancer can also have up to two listeners: a required #' listener for production traffic and an optional listener that allows you #' perform validation tests with Lambda functions before routing production #' traffic to it. #' #' After you create a service using the `ECS` deployment controller, the #' load balancer name or target group ARN, container name, and container #' port specified in the service definition are immutable. If you are using #' the `CODE_DEPLOY` deployment controller, these values can be changed #' when updating the service. #' #' For Application Load Balancers and Network Load Balancers, this object #' must contain the load balancer target group ARN, the container name (as #' it appears in a container definition), and the container port to access #' from the load balancer. The load balancer name parameter must be #' omitted. When a task from this service is placed on a container #' instance, the container instance and port combination is registered as a #' target in the target group specified here. #' #' For Classic Load Balancers, this object must contain the load balancer #' name, the container name (as it appears in a container definition), and #' the container port to access from the load balancer. The target group #' ARN parameter must be omitted. When a task from this service is placed #' on a container instance, the container instance is registered with the #' load balancer specified here. #' #' Services with tasks that use the `awsvpc` network mode (for example, #' those with the Fargate launch type) only support Application Load #' Balancers and Network Load Balancers. Classic Load Balancers are not #' supported. Also, when you create any target groups for these services, #' you must choose `ip` as the target type, not `instance`, because tasks #' that use the `awsvpc` network mode are associated with an elastic #' network interface, not an Amazon EC2 instance. #' @param serviceRegistries The details of the service discovery registries to assign to this #' service. For more information, see [Service #' Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). #' #' Service discovery is supported for Fargate tasks if you are using #' platform version v1.1.0 or later. For more information, see [AWS Fargate #' Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). #' @param desiredCount The number of instantiations of the specified task definition to place #' and keep running on your cluster. #' #' This is required if `schedulingStrategy` is `REPLICA` or is not #' specified. If `schedulingStrategy` is `DAEMON` then this is not #' required. #' @param clientToken Unique, case-sensitive identifier that you provide to ensure the #' idempotency of the request. Up to 32 ASCII characters are allowed. #' @param launchType The launch type on which to run your service. For more information, see #' [Amazon ECS Launch #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If a `launchType` is specified, the `capacityProviderStrategy` parameter #' must be omitted. #' @param capacityProviderStrategy The capacity provider strategy to use for the service. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If a `capacityProviderStrategy` is specified, the `launchType` parameter #' must be omitted. If no `capacityProviderStrategy` or `launchType` is #' specified, the `defaultCapacityProviderStrategy` for the cluster is #' used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param platformVersion The platform version that your tasks in the service are running on. A #' platform version is specified only for tasks using the Fargate launch #' type. If one isn't specified, the `LATEST` platform version is used by #' default. For more information, see [AWS Fargate Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param role The name or full Amazon Resource Name (ARN) of the IAM role that allows #' Amazon ECS to make calls to your load balancer on your behalf. This #' parameter is only permitted if you are using a load balancer with your #' service and your task definition does not use the `awsvpc` network mode. #' If you specify the `role` parameter, you must also specify a load #' balancer object with the `loadBalancers` parameter. #' #' If your account has already created the Amazon ECS service-linked role, #' that role is used by default for your service unless you specify a role #' here. The service-linked role is required if your task definition uses #' the `awsvpc` network mode or if the service is configured to use service #' discovery, an external deployment controller, multiple target groups, or #' Elastic Inference accelerators in which case you should not specify a #' role here. For more information, see [Using Service-Linked Roles for #' Amazon #' ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If your specified role has a path other than `/`, then you must either #' specify the full role ARN (this is recommended) or prefix the role name #' with the path. For example, if a role with the name `bar` has a path of #' `/foo/` then you would specify `/foo/bar` as the role name. For more #' information, see [Friendly Names and #' Paths](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) #' in the *IAM User Guide*. #' @param deploymentConfiguration Optional deployment parameters that control how many tasks run during #' the deployment and the ordering of stopping and starting tasks. #' @param placementConstraints An array of placement constraint objects to use for tasks in your #' service. You can specify a maximum of 10 constraints per task (this #' limit includes constraints in the task definition and those specified at #' runtime). #' @param placementStrategy The placement strategy objects to use for tasks in your service. You can #' specify a maximum of five strategy rules per service. #' @param networkConfiguration The network configuration for the service. This parameter is required #' for task definitions that use the `awsvpc` network mode to receive their #' own elastic network interface, and it is not supported for other network #' modes. For more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param healthCheckGracePeriodSeconds The period of time, in seconds, that the Amazon ECS service scheduler #' should ignore unhealthy Elastic Load Balancing target health checks #' after a task has first started. This is only used when your service is #' configured to use a load balancer. If your service has a load balancer #' defined and you don't specify a health check grace period value, the #' default value of `0` is used. #' #' If your service's tasks take a while to start and respond to Elastic #' Load Balancing health checks, you can specify a health check grace #' period of up to 2,147,483,647 seconds. During that time, the Amazon ECS #' service scheduler ignores health check status. This grace period can #' prevent the service scheduler from marking tasks as unhealthy and #' stopping them before they have time to come up. #' @param schedulingStrategy The scheduling strategy to use for the service. For more information, #' see #' [Services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). #' #' There are two service scheduler strategies available: #' #' - `REPLICA`-The replica scheduling strategy places and maintains the #' desired number of tasks across your cluster. By default, the service #' scheduler spreads tasks across Availability Zones. You can use task #' placement strategies and constraints to customize task placement #' decisions. This scheduler strategy is required if the service is #' using the `CODE_DEPLOY` or `EXTERNAL` deployment controller types. #' #' - `DAEMON`-The daemon scheduling strategy deploys exactly one task on #' each active container instance that meets all of the task placement #' constraints that you specify in your cluster. The service scheduler #' also evaluates the task placement constraints for running tasks and #' will stop tasks that do not meet the placement constraints. When #' you're using this strategy, you don't need to specify a desired #' number of tasks, a task placement strategy, or use Service Auto #' Scaling policies. #' #' Tasks using the Fargate launch type or the `CODE_DEPLOY` or #' `EXTERNAL` deployment controller types don't support the `DAEMON` #' scheduling strategy. #' @param deploymentController The deployment controller to use for the service. #' @param tags The metadata that you apply to the service to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. When a service is deleted, the tags are deleted as #' well. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param enableECSManagedTags Specifies whether to enable Amazon ECS managed tags for the tasks within #' the service. For more information, see [Tagging Your Amazon ECS #' Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param propagateTags Specifies whether to propagate the tags from the task definition or the #' service to the tasks in the service. If no value is specified, the tags #' are not propagated. Tags can only be propagated to the tasks within the #' service during service creation. To add tags to a task after service #' creation, use the TagResource API action. #' #' @section Request syntax: #' ``` #' svc$create_service( #' cluster = "string", #' serviceName = "string", #' taskDefinition = "string", #' loadBalancers = list( #' list( #' targetGroupArn = "string", #' loadBalancerName = "string", #' containerName = "string", #' containerPort = 123 #' ) #' ), #' serviceRegistries = list( #' list( #' registryArn = "string", #' port = 123, #' containerName = "string", #' containerPort = 123 #' ) #' ), #' desiredCount = 123, #' clientToken = "string", #' launchType = "EC2"|"FARGATE", #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' platformVersion = "string", #' role = "string", #' deploymentConfiguration = list( #' deploymentCircuitBreaker = list( #' enable = TRUE|FALSE, #' rollback = TRUE|FALSE #' ), #' maximumPercent = 123, #' minimumHealthyPercent = 123 #' ), #' placementConstraints = list( #' list( #' type = "distinctInstance"|"memberOf", #' expression = "string" #' ) #' ), #' placementStrategy = list( #' list( #' type = "random"|"spread"|"binpack", #' field = "string" #' ) #' ), #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' healthCheckGracePeriodSeconds = 123, #' schedulingStrategy = "REPLICA"|"DAEMON", #' deploymentController = list( #' type = "ECS"|"CODE_DEPLOY"|"EXTERNAL" #' ), #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' enableECSManagedTags = TRUE|FALSE, #' propagateTags = "TASK_DEFINITION"|"SERVICE" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example creates a service in your default region called #' # `ecs-simple-service`. The service uses the `hello_world` task #' # definition and it maintains 10 copies of that task. #' svc$create_service( #' desiredCount = 10L, #' serviceName = "ecs-simple-service", #' taskDefinition = "hello_world" #' ) #' #' # This example creates a service in your default region called #' # `ecs-simple-service-elb`. The service uses the `ecs-demo` task #' # definition and it maintains 10 copies of that task. You must reference #' # an existing load balancer in the same region by its name. #' svc$create_service( #' desiredCount = 10L, #' loadBalancers = list( #' list( #' containerName = "simple-app", #' containerPort = 80L, #' loadBalancerName = "EC2Contai-EcsElast-15DCDAURT3ZO2" #' ) #' ), #' role = "ecsServiceRole", #' serviceName = "ecs-simple-service-elb", #' taskDefinition = "console-sample-app-static" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_create_service ecs_create_service <- function(cluster = NULL, serviceName, taskDefinition = NULL, loadBalancers = NULL, serviceRegistries = NULL, desiredCount = NULL, clientToken = NULL, launchType = NULL, capacityProviderStrategy = NULL, platformVersion = NULL, role = NULL, deploymentConfiguration = NULL, placementConstraints = NULL, placementStrategy = NULL, networkConfiguration = NULL, healthCheckGracePeriodSeconds = NULL, schedulingStrategy = NULL, deploymentController = NULL, tags = NULL, enableECSManagedTags = NULL, propagateTags = NULL) { op <- new_operation( name = "CreateService", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_service_input(cluster = cluster, serviceName = serviceName, taskDefinition = taskDefinition, loadBalancers = loadBalancers, serviceRegistries = serviceRegistries, desiredCount = desiredCount, clientToken = clientToken, launchType = launchType, capacityProviderStrategy = capacityProviderStrategy, platformVersion = platformVersion, role = role, deploymentConfiguration = deploymentConfiguration, placementConstraints = placementConstraints, placementStrategy = placementStrategy, networkConfiguration = networkConfiguration, healthCheckGracePeriodSeconds = healthCheckGracePeriodSeconds, schedulingStrategy = schedulingStrategy, deploymentController = deploymentController, tags = tags, enableECSManagedTags = enableECSManagedTags, propagateTags = propagateTags) output <- .ecs$create_service_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_service <- ecs_create_service #' Create a task set in the specified cluster and service #' #' @description #' Create a task set in the specified cluster and service. This is used #' when a service uses the `EXTERNAL` deployment controller type. For more #' information, see [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_create_task_set(service, cluster, externalId, taskDefinition, #' networkConfiguration, loadBalancers, serviceRegistries, launchType, #' capacityProviderStrategy, platformVersion, scale, clientToken, tags) #' #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service to #' create the task set in. #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service to create the task set in. #' @param externalId An optional non-unique tag that identifies this task set in external #' systems. If the task set is associated with a service discovery #' registry, the tasks in this task set will have the #' `ECS_TASK_SET_EXTERNAL_ID` AWS Cloud Map attribute set to the provided #' value. #' @param taskDefinition &#91;required&#93; The task definition for the tasks in the task set to use. #' @param networkConfiguration #' @param loadBalancers A load balancer object representing the load balancer to use with the #' task set. The supported load balancer types are either an Application #' Load Balancer or a Network Load Balancer. #' @param serviceRegistries The details of the service discovery registries to assign to this task #' set. For more information, see [Service #' Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). #' @param launchType The launch type that new tasks in the task set will use. For more #' information, see [Amazon ECS Launch #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If a `launchType` is specified, the `capacityProviderStrategy` parameter #' must be omitted. #' @param capacityProviderStrategy The capacity provider strategy to use for the task set. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If a `capacityProviderStrategy` is specified, the `launchType` parameter #' must be omitted. If no `capacityProviderStrategy` or `launchType` is #' specified, the `defaultCapacityProviderStrategy` for the cluster is #' used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param platformVersion The platform version that the tasks in the task set should use. A #' platform version is specified only for tasks using the Fargate launch #' type. If one isn't specified, the `LATEST` platform version is used by #' default. #' @param scale #' @param clientToken Unique, case-sensitive identifier that you provide to ensure the #' idempotency of the request. Up to 32 ASCII characters are allowed. #' @param tags The metadata that you apply to the task set to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. When a service is deleted, the tags are deleted as #' well. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$create_task_set( #' service = "string", #' cluster = "string", #' externalId = "string", #' taskDefinition = "string", #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' loadBalancers = list( #' list( #' targetGroupArn = "string", #' loadBalancerName = "string", #' containerName = "string", #' containerPort = 123 #' ) #' ), #' serviceRegistries = list( #' list( #' registryArn = "string", #' port = 123, #' containerName = "string", #' containerPort = 123 #' ) #' ), #' launchType = "EC2"|"FARGATE", #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' platformVersion = "string", #' scale = list( #' value = 123.0, #' unit = "PERCENT" #' ), #' clientToken = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_create_task_set ecs_create_task_set <- function(service, cluster, externalId = NULL, taskDefinition, networkConfiguration = NULL, loadBalancers = NULL, serviceRegistries = NULL, launchType = NULL, capacityProviderStrategy = NULL, platformVersion = NULL, scale = NULL, clientToken = NULL, tags = NULL) { op <- new_operation( name = "CreateTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_task_set_input(service = service, cluster = cluster, externalId = externalId, taskDefinition = taskDefinition, networkConfiguration = networkConfiguration, loadBalancers = loadBalancers, serviceRegistries = serviceRegistries, launchType = launchType, capacityProviderStrategy = capacityProviderStrategy, platformVersion = platformVersion, scale = scale, clientToken = clientToken, tags = tags) output <- .ecs$create_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_task_set <- ecs_create_task_set #' Disables an account setting for a specified IAM user, IAM role, or the #' root user for an account #' #' @description #' Disables an account setting for a specified IAM user, IAM role, or the #' root user for an account. #' #' @usage #' ecs_delete_account_setting(name, principalArn) #' #' @param name &#91;required&#93; The resource name for which to disable the account setting. If #' `serviceLongArnFormat` is specified, the ARN for your Amazon ECS #' services is affected. If `taskLongArnFormat` is specified, the ARN and #' resource ID for your Amazon ECS tasks is affected. If #' `containerInstanceLongArnFormat` is specified, the ARN and resource ID #' for your Amazon ECS container instances is affected. If `awsvpcTrunking` #' is specified, the ENI limit for your Amazon ECS container instances is #' affected. #' @param principalArn The ARN of the principal, which can be an IAM user, IAM role, or the #' root user. If you specify the root user, it disables the account setting #' for all IAM users, IAM roles, and the root user of the account unless an #' IAM user or role explicitly overrides these settings. If this field is #' omitted, the setting is changed only for the authenticated user. #' #' @section Request syntax: #' ``` #' svc$delete_account_setting( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' principalArn = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes the account setting for your user for the specified #' # resource type. #' svc$delete_account_setting( #' name = "serviceLongArnFormat" #' ) #' #' # This example deletes the account setting for a specific IAM user or IAM #' # role for the specified resource type. Only the root user can view or #' # modify the account settings for another user. #' svc$delete_account_setting( #' name = "containerInstanceLongArnFormat", #' principalArn = "arn:aws:iam::<aws_account_id>:user/principalName" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_delete_account_setting ecs_delete_account_setting <- function(name, principalArn = NULL) { op <- new_operation( name = "DeleteAccountSetting", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_account_setting_input(name = name, principalArn = principalArn) output <- .ecs$delete_account_setting_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_account_setting <- ecs_delete_account_setting #' Deletes one or more custom attributes from an Amazon ECS resource #' #' @description #' Deletes one or more custom attributes from an Amazon ECS resource. #' #' @usage #' ecs_delete_attributes(cluster, attributes) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' contains the resource to delete attributes. If you do not specify a #' cluster, the default cluster is assumed. #' @param attributes &#91;required&#93; The attributes to delete from your resource. You can specify up to 10 #' attributes per request. For custom attributes, specify the attribute #' name and target ID, but do not specify the value. If you specify the #' target ID using the short form, you must also specify the target type. #' #' @section Request syntax: #' ``` #' svc$delete_attributes( #' cluster = "string", #' attributes = list( #' list( #' name = "string", #' value = "string", #' targetType = "container-instance", #' targetId = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_delete_attributes ecs_delete_attributes <- function(cluster = NULL, attributes) { op <- new_operation( name = "DeleteAttributes", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_attributes_input(cluster = cluster, attributes = attributes) output <- .ecs$delete_attributes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_attributes <- ecs_delete_attributes #' Deletes the specified capacity provider #' #' @description #' Deletes the specified capacity provider. #' #' The `FARGATE` and `FARGATE_SPOT` capacity providers are reserved and #' cannot be deleted. You can disassociate them from a cluster using either #' the PutClusterCapacityProviders API or by deleting the cluster. #' #' Prior to a capacity provider being deleted, the capacity provider must #' be removed from the capacity provider strategy from all services. The #' UpdateService API can be used to remove a capacity provider from a #' service's capacity provider strategy. When updating a service, the #' `forceNewDeployment` option can be used to ensure that any tasks using #' the Amazon EC2 instance capacity provided by the capacity provider are #' transitioned to use the capacity from the remaining capacity providers. #' Only capacity providers that are not associated with a cluster can be #' deleted. To remove a capacity provider from a cluster, you can either #' use PutClusterCapacityProviders or delete the cluster. #' #' @usage #' ecs_delete_capacity_provider(capacityProvider) #' #' @param capacityProvider &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the capacity #' provider to delete. #' #' @section Request syntax: #' ``` #' svc$delete_capacity_provider( #' capacityProvider = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_delete_capacity_provider ecs_delete_capacity_provider <- function(capacityProvider) { op <- new_operation( name = "DeleteCapacityProvider", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_capacity_provider_input(capacityProvider = capacityProvider) output <- .ecs$delete_capacity_provider_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_capacity_provider <- ecs_delete_capacity_provider #' Deletes the specified cluster #' #' @description #' Deletes the specified cluster. The cluster will transition to the #' `INACTIVE` state. Clusters with an `INACTIVE` status may remain #' discoverable in your account for a period of time. However, this #' behavior is subject to change in the future, so you should not rely on #' `INACTIVE` clusters persisting. #' #' You must deregister all container instances from this cluster before you #' may delete it. You can list the container instances in a cluster with #' ListContainerInstances and deregister them with #' DeregisterContainerInstance. #' #' @usage #' ecs_delete_cluster(cluster) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster to #' delete. #' #' @section Request syntax: #' ``` #' svc$delete_cluster( #' cluster = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes an empty cluster in your default region. #' svc$delete_cluster( #' cluster = "my_cluster" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_delete_cluster ecs_delete_cluster <- function(cluster) { op <- new_operation( name = "DeleteCluster", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_cluster_input(cluster = cluster) output <- .ecs$delete_cluster_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_cluster <- ecs_delete_cluster #' Deletes a specified service within a cluster #' #' @description #' Deletes a specified service within a cluster. You can delete a service #' if you have no running tasks in it and the desired task count is zero. #' If the service is actively maintaining tasks, you cannot delete it, and #' you must update the service to a desired task count of zero. For more #' information, see UpdateService. #' #' When you delete a service, if there are still running tasks that require #' cleanup, the service status moves from `ACTIVE` to `DRAINING`, and the #' service is no longer visible in the console or in the ListServices API #' operation. After all tasks have transitioned to either `STOPPING` or #' `STOPPED` status, the service status moves from `DRAINING` to #' `INACTIVE`. Services in the `DRAINING` or `INACTIVE` status can still be #' viewed with the DescribeServices API operation. However, in the future, #' `INACTIVE` services may be cleaned up and purged from Amazon ECS record #' keeping, and DescribeServices calls on those services return a #' `ServiceNotFoundException` error. #' #' If you attempt to create a new service with the same name as an existing #' service in either `ACTIVE` or `DRAINING` status, you receive an error. #' #' @usage #' ecs_delete_service(cluster, service, force) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service to delete. If you do not specify a cluster, the #' default cluster is assumed. #' @param service &#91;required&#93; The name of the service to delete. #' @param force If `true`, allows you to delete a service even if it has not been scaled #' down to zero tasks. It is only necessary to use this if the service is #' using the `REPLICA` scheduling strategy. #' #' @section Request syntax: #' ``` #' svc$delete_service( #' cluster = "string", #' service = "string", #' force = TRUE|FALSE #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes the my-http-service service. The service must have #' # a desired count and running count of 0 before you can delete it. #' svc$delete_service( #' service = "my-http-service" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_delete_service ecs_delete_service <- function(cluster = NULL, service, force = NULL) { op <- new_operation( name = "DeleteService", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_service_input(cluster = cluster, service = service, force = force) output <- .ecs$delete_service_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_service <- ecs_delete_service #' Deletes a specified task set within a service #' #' @description #' Deletes a specified task set within a service. This is used when a #' service uses the `EXTERNAL` deployment controller type. For more #' information, see [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_delete_task_set(cluster, service, taskSet, force) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task set exists in to delete. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' hosts the task set to delete. #' @param taskSet &#91;required&#93; The task set ID or full Amazon Resource Name (ARN) of the task set to #' delete. #' @param force If `true`, this allows you to delete a task set even if it hasn't been #' scaled down to zero. #' #' @section Request syntax: #' ``` #' svc$delete_task_set( #' cluster = "string", #' service = "string", #' taskSet = "string", #' force = TRUE|FALSE #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_delete_task_set ecs_delete_task_set <- function(cluster, service, taskSet, force = NULL) { op <- new_operation( name = "DeleteTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_task_set_input(cluster = cluster, service = service, taskSet = taskSet, force = force) output <- .ecs$delete_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_task_set <- ecs_delete_task_set #' Deregisters an Amazon ECS container instance from the specified cluster #' #' @description #' Deregisters an Amazon ECS container instance from the specified cluster. #' This instance is no longer available to run tasks. #' #' If you intend to use the container instance for some other purpose after #' deregistration, you should stop all of the tasks running on the #' container instance before deregistration. That prevents any orphaned #' tasks from consuming resources. #' #' Deregistering a container instance removes the instance from a cluster, #' but it does not terminate the EC2 instance. If you are finished using #' the instance, be sure to terminate it in the Amazon EC2 console to stop #' billing. #' #' If you terminate a running container instance, Amazon ECS automatically #' deregisters the instance from your cluster (stopped container instances #' or instances with disconnected agents are not automatically deregistered #' when terminated). #' #' @usage #' ecs_deregister_container_instance(cluster, containerInstance, force) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instance to deregister. If you do not specify a #' cluster, the default cluster is assumed. #' @param containerInstance &#91;required&#93; The container instance ID or full ARN of the container instance to #' deregister. The ARN contains the `arn:aws:ecs` namespace, followed by #' the Region of the container instance, the AWS account ID of the #' container instance owner, the `container-instance` namespace, and then #' the container instance ID. For example, #' `arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID`. #' @param force Forces the deregistration of the container instance. If you have tasks #' running on the container instance when you deregister it with the #' `force` option, these tasks remain running until you terminate the #' instance or the tasks stop through some other means, but they are #' orphaned (no longer monitored or accounted for by Amazon ECS). If an #' orphaned task on your container instance is part of an Amazon ECS #' service, then the service scheduler starts another copy of that task, on #' a different container instance if possible. #' #' Any containers in orphaned service tasks that are registered with a #' Classic Load Balancer or an Application Load Balancer target group are #' deregistered. They begin connection draining according to the settings #' on the load balancer or target group. #' #' @section Request syntax: #' ``` #' svc$deregister_container_instance( #' cluster = "string", #' containerInstance = "string", #' force = TRUE|FALSE #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deregisters a container instance from the specified cluster #' # in your default region. If there are still tasks running on the #' # container instance, you must either stop those tasks before #' # deregistering, or use the force option. #' svc$deregister_container_instance( #' cluster = "default", #' containerInstance = "container_instance_UUID", #' force = TRUE #' ) #' } #' #' @keywords internal #' #' @rdname ecs_deregister_container_instance ecs_deregister_container_instance <- function(cluster = NULL, containerInstance, force = NULL) { op <- new_operation( name = "DeregisterContainerInstance", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$deregister_container_instance_input(cluster = cluster, containerInstance = containerInstance, force = force) output <- .ecs$deregister_container_instance_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$deregister_container_instance <- ecs_deregister_container_instance #' Deregisters the specified task definition by family and revision #' #' @description #' Deregisters the specified task definition by family and revision. Upon #' deregistration, the task definition is marked as `INACTIVE`. Existing #' tasks and services that reference an `INACTIVE` task definition continue #' to run without disruption. Existing services that reference an #' `INACTIVE` task definition can still scale up or down by modifying the #' service's desired count. #' #' You cannot use an `INACTIVE` task definition to run new tasks or create #' new services, and you cannot update an existing service to reference an #' `INACTIVE` task definition. However, there may be up to a 10-minute #' window following deregistration where these restrictions have not yet #' taken effect. #' #' At this time, `INACTIVE` task definitions remain discoverable in your #' account indefinitely. However, this behavior is subject to change in the #' future, so you should not rely on `INACTIVE` task definitions persisting #' beyond the lifecycle of any associated tasks and services. #' #' @usage #' ecs_deregister_task_definition(taskDefinition) #' #' @param taskDefinition &#91;required&#93; The `family` and `revision` (`family:revision`) or full Amazon Resource #' Name (ARN) of the task definition to deregister. You must specify a #' `revision`. #' #' @section Request syntax: #' ``` #' svc$deregister_task_definition( #' taskDefinition = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_deregister_task_definition ecs_deregister_task_definition <- function(taskDefinition) { op <- new_operation( name = "DeregisterTaskDefinition", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$deregister_task_definition_input(taskDefinition = taskDefinition) output <- .ecs$deregister_task_definition_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$deregister_task_definition <- ecs_deregister_task_definition #' Describes one or more of your capacity providers #' #' @description #' Describes one or more of your capacity providers. #' #' @usage #' ecs_describe_capacity_providers(capacityProviders, include, maxResults, #' nextToken) #' #' @param capacityProviders The short name or full Amazon Resource Name (ARN) of one or more #' capacity providers. Up to `100` capacity providers can be described in #' an action. #' @param include Specifies whether or not you want to see the resource tags for the #' capacity provider. If `TAGS` is specified, the tags are included in the #' response. If this field is omitted, tags are not included in the #' response. #' @param maxResults The maximum number of account setting results returned by #' `DescribeCapacityProviders` in paginated output. When this parameter is #' used, `DescribeCapacityProviders` only returns `maxResults` results in a #' single page along with a `nextToken` response element. The remaining #' results of the initial request can be seen by sending another #' `DescribeCapacityProviders` request with the returned `nextToken` value. #' This value can be between 1 and 10. If this parameter is not used, then #' `DescribeCapacityProviders` returns up to 10 results and a `nextToken` #' value if applicable. #' @param nextToken The `nextToken` value returned from a previous paginated #' `DescribeCapacityProviders` request where `maxResults` was used and the #' results exceeded the value of that parameter. Pagination continues from #' the end of the previous results that returned the `nextToken` value. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' #' @section Request syntax: #' ``` #' svc$describe_capacity_providers( #' capacityProviders = list( #' "string" #' ), #' include = list( #' "TAGS" #' ), #' maxResults = 123, #' nextToken = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_describe_capacity_providers ecs_describe_capacity_providers <- function(capacityProviders = NULL, include = NULL, maxResults = NULL, nextToken = NULL) { op <- new_operation( name = "DescribeCapacityProviders", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_capacity_providers_input(capacityProviders = capacityProviders, include = include, maxResults = maxResults, nextToken = nextToken) output <- .ecs$describe_capacity_providers_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_capacity_providers <- ecs_describe_capacity_providers #' Describes one or more of your clusters #' #' @description #' Describes one or more of your clusters. #' #' @usage #' ecs_describe_clusters(clusters, include) #' #' @param clusters A list of up to 100 cluster names or full cluster Amazon Resource Name #' (ARN) entries. If you do not specify a cluster, the default cluster is #' assumed. #' @param include Whether to include additional information about your clusters in the #' response. If this field is omitted, the attachments, statistics, and #' tags are not included. #' #' If `ATTACHMENTS` is specified, the attachments for the container #' instances or tasks within the cluster are included. #' #' If `SETTINGS` is specified, the settings for the cluster are included. #' #' If `STATISTICS` is specified, the following additional information, #' separated by launch type, is included: #' #' - runningEC2TasksCount #' #' - runningFargateTasksCount #' #' - pendingEC2TasksCount #' #' - pendingFargateTasksCount #' #' - activeEC2ServiceCount #' #' - activeFargateServiceCount #' #' - drainingEC2ServiceCount #' #' - drainingFargateServiceCount #' #' If `TAGS` is specified, the metadata tags associated with the cluster #' are included. #' #' @section Request syntax: #' ``` #' svc$describe_clusters( #' clusters = list( #' "string" #' ), #' include = list( #' "ATTACHMENTS"|"SETTINGS"|"STATISTICS"|"TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified cluster in your #' # default region. #' svc$describe_clusters( #' clusters = list( #' "default" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_clusters ecs_describe_clusters <- function(clusters = NULL, include = NULL) { op <- new_operation( name = "DescribeClusters", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_clusters_input(clusters = clusters, include = include) output <- .ecs$describe_clusters_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_clusters <- ecs_describe_clusters #' Describes Amazon Elastic Container Service container instances #' #' @description #' Describes Amazon Elastic Container Service container instances. Returns #' metadata about registered and remaining resources on each container #' instance requested. #' #' @usage #' ecs_describe_container_instances(cluster, containerInstances, include) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instances to describe. If you do not specify a #' cluster, the default cluster is assumed. This parameter is required if #' the container instance or container instances you are describing were #' launched in any cluster other than the default cluster. #' @param containerInstances &#91;required&#93; A list of up to 100 container instance IDs or full Amazon Resource Name #' (ARN) entries. #' @param include Specifies whether you want to see the resource tags for the container #' instance. If `TAGS` is specified, the tags are included in the response. #' If this field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_container_instances( #' cluster = "string", #' containerInstances = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified container instance #' # in your default region, using the container instance UUID as an #' # identifier. #' svc$describe_container_instances( #' cluster = "default", #' containerInstances = list( #' "f2756532-8f13-4d53-87c9-aed50dc94cd7" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_container_instances ecs_describe_container_instances <- function(cluster = NULL, containerInstances, include = NULL) { op <- new_operation( name = "DescribeContainerInstances", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_container_instances_input(cluster = cluster, containerInstances = containerInstances, include = include) output <- .ecs$describe_container_instances_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_container_instances <- ecs_describe_container_instances #' Describes the specified services running in your cluster #' #' @description #' Describes the specified services running in your cluster. #' #' @usage #' ecs_describe_services(cluster, services, include) #' #' @param cluster The short name or full Amazon Resource Name (ARN)the cluster that hosts #' the service to describe. If you do not specify a cluster, the default #' cluster is assumed. This parameter is required if the service or #' services you are describing were launched in any cluster other than the #' default cluster. #' @param services &#91;required&#93; A list of services to describe. You may specify up to 10 services to #' describe in a single operation. #' @param include Specifies whether you want to see the resource tags for the service. If #' `TAGS` is specified, the tags are included in the response. If this #' field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_services( #' cluster = "string", #' services = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides descriptive information about the service named #' # `ecs-simple-service`. #' svc$describe_services( #' services = list( #' "ecs-simple-service" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_services ecs_describe_services <- function(cluster = NULL, services, include = NULL) { op <- new_operation( name = "DescribeServices", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_services_input(cluster = cluster, services = services, include = include) output <- .ecs$describe_services_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_services <- ecs_describe_services #' Describes a task definition #' #' @description #' Describes a task definition. You can specify a `family` and `revision` #' to find information about a specific task definition, or you can simply #' specify the family to find the latest `ACTIVE` revision in that family. #' #' You can only describe `INACTIVE` task definitions while an active task #' or service references them. #' #' @usage #' ecs_describe_task_definition(taskDefinition, include) #' #' @param taskDefinition &#91;required&#93; The `family` for the latest `ACTIVE` revision, `family` and `revision` #' (`family:revision`) for a specific revision in the family, or full #' Amazon Resource Name (ARN) of the task definition to describe. #' @param include Specifies whether to see the resource tags for the task definition. If #' `TAGS` is specified, the tags are included in the response. If this #' field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_task_definition( #' taskDefinition = "string", #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified task definition. #' svc$describe_task_definition( #' taskDefinition = "hello_world:8" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_task_definition ecs_describe_task_definition <- function(taskDefinition, include = NULL) { op <- new_operation( name = "DescribeTaskDefinition", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_task_definition_input(taskDefinition = taskDefinition, include = include) output <- .ecs$describe_task_definition_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_task_definition <- ecs_describe_task_definition #' Describes the task sets in the specified cluster and service #' #' @description #' Describes the task sets in the specified cluster and service. This is #' used when a service uses the `EXTERNAL` deployment controller type. For #' more information, see [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_describe_task_sets(cluster, service, taskSets, include) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task sets exist in. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' the task sets exist in. #' @param taskSets The ID or full Amazon Resource Name (ARN) of task sets to describe. #' @param include Specifies whether to see the resource tags for the task set. If `TAGS` #' is specified, the tags are included in the response. If this field is #' omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_task_sets( #' cluster = "string", #' service = "string", #' taskSets = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_describe_task_sets ecs_describe_task_sets <- function(cluster, service, taskSets = NULL, include = NULL) { op <- new_operation( name = "DescribeTaskSets", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_task_sets_input(cluster = cluster, service = service, taskSets = taskSets, include = include) output <- .ecs$describe_task_sets_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_task_sets <- ecs_describe_task_sets #' Describes a specified task or tasks #' #' @description #' Describes a specified task or tasks. #' #' @usage #' ecs_describe_tasks(cluster, tasks, include) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the task or tasks to describe. If you do not specify a cluster, #' the default cluster is assumed. This parameter is required if the task #' or tasks you are describing were launched in any cluster other than the #' default cluster. #' @param tasks &#91;required&#93; A list of up to 100 task IDs or full ARN entries. #' @param include Specifies whether you want to see the resource tags for the task. If #' `TAGS` is specified, the tags are included in the response. If this #' field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_tasks( #' cluster = "string", #' tasks = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified task, using the #' # task UUID as an identifier. #' svc$describe_tasks( #' tasks = list( #' "c5cba4eb-5dad-405e-96db-71ef8eefe6a8" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_tasks ecs_describe_tasks <- function(cluster = NULL, tasks, include = NULL) { op <- new_operation( name = "DescribeTasks", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_tasks_input(cluster = cluster, tasks = tasks, include = include) output <- .ecs$describe_tasks_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_tasks <- ecs_describe_tasks #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Returns an endpoint for the Amazon ECS agent to poll for updates. #' #' @usage #' ecs_discover_poll_endpoint(containerInstance, cluster) #' #' @param containerInstance The container instance ID or full ARN of the container instance. The ARN #' contains the `arn:aws:ecs` namespace, followed by the Region of the #' container instance, the AWS account ID of the container instance owner, #' the `container-instance` namespace, and then the container instance ID. #' For example, #' `arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID`. #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster to #' which the container instance belongs. #' #' @section Request syntax: #' ``` #' svc$discover_poll_endpoint( #' containerInstance = "string", #' cluster = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_discover_poll_endpoint ecs_discover_poll_endpoint <- function(containerInstance = NULL, cluster = NULL) { op <- new_operation( name = "DiscoverPollEndpoint", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$discover_poll_endpoint_input(containerInstance = containerInstance, cluster = cluster) output <- .ecs$discover_poll_endpoint_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$discover_poll_endpoint <- ecs_discover_poll_endpoint #' Lists the account settings for a specified principal #' #' @description #' Lists the account settings for a specified principal. #' #' @usage #' ecs_list_account_settings(name, value, principalArn, effectiveSettings, #' nextToken, maxResults) #' #' @param name The name of the account setting you want to list the settings for. #' @param value The value of the account settings with which to filter results. You must #' also specify an account setting name to use this parameter. #' @param principalArn The ARN of the principal, which can be an IAM user, IAM role, or the #' root user. If this field is omitted, the account settings are listed #' only for the authenticated user. #' @param effectiveSettings Specifies whether to return the effective settings. If `true`, the #' account settings for the root user or the default setting for the #' `principalArn` are returned. If `false`, the account settings for the #' `principalArn` are returned if they are set. Otherwise, no account #' settings are returned. #' @param nextToken The `nextToken` value returned from a `ListAccountSettings` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of account setting results returned by #' `ListAccountSettings` in paginated output. When this parameter is used, #' `ListAccountSettings` only returns `maxResults` results in a single page #' along with a `nextToken` response element. The remaining results of the #' initial request can be seen by sending another `ListAccountSettings` #' request with the returned `nextToken` value. This value can be between 1 #' and 10. If this parameter is not used, then `ListAccountSettings` #' returns up to 10 results and a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_account_settings( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' value = "string", #' principalArn = "string", #' effectiveSettings = TRUE|FALSE, #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example displays the effective account settings for your account. #' svc$list_account_settings( #' effectiveSettings = TRUE #' ) #' #' # This example displays the effective account settings for the specified #' # user or role. #' svc$list_account_settings( #' effectiveSettings = TRUE, #' principalArn = "arn:aws:iam::<aws_account_id>:user/principalName" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_account_settings ecs_list_account_settings <- function(name = NULL, value = NULL, principalArn = NULL, effectiveSettings = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListAccountSettings", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_account_settings_input(name = name, value = value, principalArn = principalArn, effectiveSettings = effectiveSettings, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_account_settings_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_account_settings <- ecs_list_account_settings #' Lists the attributes for Amazon ECS resources within a specified target #' type and cluster #' #' @description #' Lists the attributes for Amazon ECS resources within a specified target #' type and cluster. When you specify a target type and cluster, #' `ListAttributes` returns a list of attribute objects, one for each #' attribute on each resource. You can filter the list of results to a #' single attribute name to only return results that have that name. You #' can also filter the results by attribute name and value, for example, to #' see which container instances in a cluster are running a Linux AMI #' (`ecs.os-type=linux`). #' #' @usage #' ecs_list_attributes(cluster, targetType, attributeName, attributeValue, #' nextToken, maxResults) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster to list #' attributes. If you do not specify a cluster, the default cluster is #' assumed. #' @param targetType &#91;required&#93; The type of the target with which to list attributes. #' @param attributeName The name of the attribute with which to filter the results. #' @param attributeValue The value of the attribute with which to filter results. You must also #' specify an attribute name to use this parameter. #' @param nextToken The `nextToken` value returned from a `ListAttributes` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of cluster results returned by `ListAttributes` in #' paginated output. When this parameter is used, `ListAttributes` only #' returns `maxResults` results in a single page along with a `nextToken` #' response element. The remaining results of the initial request can be #' seen by sending another `ListAttributes` request with the returned #' `nextToken` value. This value can be between 1 and 100. If this #' parameter is not used, then `ListAttributes` returns up to 100 results #' and a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_attributes( #' cluster = "string", #' targetType = "container-instance", #' attributeName = "string", #' attributeValue = "string", #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_list_attributes ecs_list_attributes <- function(cluster = NULL, targetType, attributeName = NULL, attributeValue = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListAttributes", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_attributes_input(cluster = cluster, targetType = targetType, attributeName = attributeName, attributeValue = attributeValue, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_attributes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_attributes <- ecs_list_attributes #' Returns a list of existing clusters #' #' @description #' Returns a list of existing clusters. #' #' @usage #' ecs_list_clusters(nextToken, maxResults) #' #' @param nextToken The `nextToken` value returned from a `ListClusters` request indicating #' that more results are available to fulfill the request and further calls #' will be needed. If `maxResults` was provided, it is possible the number #' of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of cluster results returned by `ListClusters` in #' paginated output. When this parameter is used, `ListClusters` only #' returns `maxResults` results in a single page along with a `nextToken` #' response element. The remaining results of the initial request can be #' seen by sending another `ListClusters` request with the returned #' `nextToken` value. This value can be between 1 and 100. If this #' parameter is not used, then `ListClusters` returns up to 100 results and #' a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_clusters( #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your available clusters in your default #' # region. #' svc$list_clusters() #' } #' #' @keywords internal #' #' @rdname ecs_list_clusters ecs_list_clusters <- function(nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListClusters", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_clusters_input(nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_clusters_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_clusters <- ecs_list_clusters #' Returns a list of container instances in a specified cluster #' #' @description #' Returns a list of container instances in a specified cluster. You can #' filter the results of a `ListContainerInstances` operation with cluster #' query language statements inside the `filter` parameter. For more #' information, see [Cluster Query #' Language](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_list_container_instances(cluster, filter, nextToken, maxResults, #' status) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instances to list. If you do not specify a cluster, #' the default cluster is assumed. #' @param filter You can filter the results of a `ListContainerInstances` operation with #' cluster query language statements. For more information, see [Cluster #' Query #' Language](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param nextToken The `nextToken` value returned from a `ListContainerInstances` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of container instance results returned by #' `ListContainerInstances` in paginated output. When this parameter is #' used, `ListContainerInstances` only returns `maxResults` results in a #' single page along with a `nextToken` response element. The remaining #' results of the initial request can be seen by sending another #' `ListContainerInstances` request with the returned `nextToken` value. #' This value can be between 1 and 100. If this parameter is not used, then #' `ListContainerInstances` returns up to 100 results and a `nextToken` #' value if applicable. #' @param status Filters the container instances by status. For example, if you specify #' the `DRAINING` status, the results include only container instances that #' have been set to `DRAINING` using UpdateContainerInstancesState. If you #' do not specify this parameter, the default is to include container #' instances set to all states other than `INACTIVE`. #' #' @section Request syntax: #' ``` #' svc$list_container_instances( #' cluster = "string", #' filter = "string", #' nextToken = "string", #' maxResults = 123, #' status = "ACTIVE"|"DRAINING"|"REGISTERING"|"DEREGISTERING"|"REGISTRATION_FAILED" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your available container instances in the #' # specified cluster in your default region. #' svc$list_container_instances( #' cluster = "default" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_container_instances ecs_list_container_instances <- function(cluster = NULL, filter = NULL, nextToken = NULL, maxResults = NULL, status = NULL) { op <- new_operation( name = "ListContainerInstances", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_container_instances_input(cluster = cluster, filter = filter, nextToken = nextToken, maxResults = maxResults, status = status) output <- .ecs$list_container_instances_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_container_instances <- ecs_list_container_instances #' Lists the services that are running in a specified cluster #' #' @description #' Lists the services that are running in a specified cluster. #' #' @usage #' ecs_list_services(cluster, nextToken, maxResults, launchType, #' schedulingStrategy) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the services to list. If you do not specify a cluster, the default #' cluster is assumed. #' @param nextToken The `nextToken` value returned from a `ListServices` request indicating #' that more results are available to fulfill the request and further calls #' will be needed. If `maxResults` was provided, it is possible the number #' of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of service results returned by `ListServices` in #' paginated output. When this parameter is used, `ListServices` only #' returns `maxResults` results in a single page along with a `nextToken` #' response element. The remaining results of the initial request can be #' seen by sending another `ListServices` request with the returned #' `nextToken` value. This value can be between 1 and 100. If this #' parameter is not used, then `ListServices` returns up to 10 results and #' a `nextToken` value if applicable. #' @param launchType The launch type for the services to list. #' @param schedulingStrategy The scheduling strategy for services to list. #' #' @section Request syntax: #' ``` #' svc$list_services( #' cluster = "string", #' nextToken = "string", #' maxResults = 123, #' launchType = "EC2"|"FARGATE", #' schedulingStrategy = "REPLICA"|"DAEMON" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists the services running in the default cluster for an #' # account. #' svc$list_services() #' } #' #' @keywords internal #' #' @rdname ecs_list_services ecs_list_services <- function(cluster = NULL, nextToken = NULL, maxResults = NULL, launchType = NULL, schedulingStrategy = NULL) { op <- new_operation( name = "ListServices", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_services_input(cluster = cluster, nextToken = nextToken, maxResults = maxResults, launchType = launchType, schedulingStrategy = schedulingStrategy) output <- .ecs$list_services_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_services <- ecs_list_services #' List the tags for an Amazon ECS resource #' #' @description #' List the tags for an Amazon ECS resource. #' #' @usage #' ecs_list_tags_for_resource(resourceArn) #' #' @param resourceArn &#91;required&#93; The Amazon Resource Name (ARN) that identifies the resource for which to #' list the tags. Currently, the supported resources are Amazon ECS tasks, #' services, task definitions, clusters, and container instances. #' #' @section Request syntax: #' ``` #' svc$list_tags_for_resource( #' resourceArn = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists the tags for the 'dev' cluster. #' svc$list_tags_for_resource( #' resourceArn = "arn:aws:ecs:region:aws_account_id:cluster/dev" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_tags_for_resource ecs_list_tags_for_resource <- function(resourceArn) { op <- new_operation( name = "ListTagsForResource", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_tags_for_resource_input(resourceArn = resourceArn) output <- .ecs$list_tags_for_resource_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_tags_for_resource <- ecs_list_tags_for_resource #' Returns a list of task definition families that are registered to your #' account (which may include task definition families that no longer have #' any ACTIVE task definition revisions) #' #' @description #' Returns a list of task definition families that are registered to your #' account (which may include task definition families that no longer have #' any `ACTIVE` task definition revisions). #' #' You can filter out task definition families that do not contain any #' `ACTIVE` task definition revisions by setting the `status` parameter to #' `ACTIVE`. You can also filter the results with the `familyPrefix` #' parameter. #' #' @usage #' ecs_list_task_definition_families(familyPrefix, status, nextToken, #' maxResults) #' #' @param familyPrefix The `familyPrefix` is a string that is used to filter the results of #' `ListTaskDefinitionFamilies`. If you specify a `familyPrefix`, only task #' definition family names that begin with the `familyPrefix` string are #' returned. #' @param status The task definition family status with which to filter the #' `ListTaskDefinitionFamilies` results. By default, both `ACTIVE` and #' `INACTIVE` task definition families are listed. If this parameter is set #' to `ACTIVE`, only task definition families that have an `ACTIVE` task #' definition revision are returned. If this parameter is set to #' `INACTIVE`, only task definition families that do not have any `ACTIVE` #' task definition revisions are returned. If you paginate the resulting #' output, be sure to keep the `status` value constant in each subsequent #' request. #' @param nextToken The `nextToken` value returned from a `ListTaskDefinitionFamilies` #' request indicating that more results are available to fulfill the #' request and further calls will be needed. If `maxResults` was provided, #' it is possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of task definition family results returned by #' `ListTaskDefinitionFamilies` in paginated output. When this parameter is #' used, `ListTaskDefinitions` only returns `maxResults` results in a #' single page along with a `nextToken` response element. The remaining #' results of the initial request can be seen by sending another #' `ListTaskDefinitionFamilies` request with the returned `nextToken` #' value. This value can be between 1 and 100. If this parameter is not #' used, then `ListTaskDefinitionFamilies` returns up to 100 results and a #' `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_task_definition_families( #' familyPrefix = "string", #' status = "ACTIVE"|"INACTIVE"|"ALL", #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your registered task definition families. #' svc$list_task_definition_families() #' #' # This example lists the task definition revisions that start with "hpcc". #' svc$list_task_definition_families( #' familyPrefix = "hpcc" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_task_definition_families ecs_list_task_definition_families <- function(familyPrefix = NULL, status = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListTaskDefinitionFamilies", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_task_definition_families_input(familyPrefix = familyPrefix, status = status, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_task_definition_families_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_task_definition_families <- ecs_list_task_definition_families #' Returns a list of task definitions that are registered to your account #' #' @description #' Returns a list of task definitions that are registered to your account. #' You can filter the results by family name with the `familyPrefix` #' parameter or by status with the `status` parameter. #' #' @usage #' ecs_list_task_definitions(familyPrefix, status, sort, nextToken, #' maxResults) #' #' @param familyPrefix The full family name with which to filter the `ListTaskDefinitions` #' results. Specifying a `familyPrefix` limits the listed task definitions #' to task definition revisions that belong to that family. #' @param status The task definition status with which to filter the #' `ListTaskDefinitions` results. By default, only `ACTIVE` task #' definitions are listed. By setting this parameter to `INACTIVE`, you can #' view task definitions that are `INACTIVE` as long as an active task or #' service still references them. If you paginate the resulting output, be #' sure to keep the `status` value constant in each subsequent request. #' @param sort The order in which to sort the results. Valid values are `ASC` and #' `DESC`. By default (`ASC`), task definitions are listed #' lexicographically by family name and in ascending numerical order by #' revision so that the newest task definitions in a family are listed #' last. Setting this parameter to `DESC` reverses the sort order on family #' name and revision so that the newest task definitions in a family are #' listed first. #' @param nextToken The `nextToken` value returned from a `ListTaskDefinitions` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of task definition results returned by #' `ListTaskDefinitions` in paginated output. When this parameter is used, #' `ListTaskDefinitions` only returns `maxResults` results in a single page #' along with a `nextToken` response element. The remaining results of the #' initial request can be seen by sending another `ListTaskDefinitions` #' request with the returned `nextToken` value. This value can be between 1 #' and 100. If this parameter is not used, then `ListTaskDefinitions` #' returns up to 100 results and a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_task_definitions( #' familyPrefix = "string", #' status = "ACTIVE"|"INACTIVE", #' sort = "ASC"|"DESC", #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your registered task definitions. #' svc$list_task_definitions() #' #' # This example lists the task definition revisions of a specified family. #' svc$list_task_definitions( #' familyPrefix = "wordpress" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_task_definitions ecs_list_task_definitions <- function(familyPrefix = NULL, status = NULL, sort = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListTaskDefinitions", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_task_definitions_input(familyPrefix = familyPrefix, status = status, sort = sort, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_task_definitions_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_task_definitions <- ecs_list_task_definitions #' Returns a list of tasks for a specified cluster #' #' @description #' Returns a list of tasks for a specified cluster. You can filter the #' results by family name, by a particular container instance, or by the #' desired status of the task with the `family`, `containerInstance`, and #' `desiredStatus` parameters. #' #' Recently stopped tasks might appear in the returned results. Currently, #' stopped tasks appear in the returned results for at least one hour. #' #' @usage #' ecs_list_tasks(cluster, containerInstance, family, nextToken, #' maxResults, startedBy, serviceName, desiredStatus, launchType) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the tasks to list. If you do not specify a cluster, the default #' cluster is assumed. #' @param containerInstance The container instance ID or full ARN of the container instance with #' which to filter the `ListTasks` results. Specifying a #' `containerInstance` limits the results to tasks that belong to that #' container instance. #' @param family The name of the family with which to filter the `ListTasks` results. #' Specifying a `family` limits the results to tasks that belong to that #' family. #' @param nextToken The `nextToken` value returned from a `ListTasks` request indicating #' that more results are available to fulfill the request and further calls #' will be needed. If `maxResults` was provided, it is possible the number #' of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of task results returned by `ListTasks` in paginated #' output. When this parameter is used, `ListTasks` only returns #' `maxResults` results in a single page along with a `nextToken` response #' element. The remaining results of the initial request can be seen by #' sending another `ListTasks` request with the returned `nextToken` value. #' This value can be between 1 and 100. If this parameter is not used, then #' `ListTasks` returns up to 100 results and a `nextToken` value if #' applicable. #' @param startedBy The `startedBy` value with which to filter the task results. Specifying #' a `startedBy` value limits the results to tasks that were started with #' that value. #' @param serviceName The name of the service with which to filter the `ListTasks` results. #' Specifying a `serviceName` limits the results to tasks that belong to #' that service. #' @param desiredStatus The task desired status with which to filter the `ListTasks` results. #' Specifying a `desiredStatus` of `STOPPED` limits the results to tasks #' that Amazon ECS has set the desired status to `STOPPED`. This can be #' useful for debugging tasks that are not starting properly or have died #' or finished. The default status filter is `RUNNING`, which shows tasks #' that Amazon ECS has set the desired status to `RUNNING`. #' #' Although you can filter results based on a desired status of `PENDING`, #' this does not return any results. Amazon ECS never sets the desired #' status of a task to that value (only a task's `lastStatus` may have a #' value of `PENDING`). #' @param launchType The launch type for services to list. #' #' @section Request syntax: #' ``` #' svc$list_tasks( #' cluster = "string", #' containerInstance = "string", #' family = "string", #' nextToken = "string", #' maxResults = 123, #' startedBy = "string", #' serviceName = "string", #' desiredStatus = "RUNNING"|"PENDING"|"STOPPED", #' launchType = "EC2"|"FARGATE" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of the tasks in a cluster. #' svc$list_tasks( #' cluster = "default" #' ) #' #' # This example lists the tasks of a specified container instance. #' # Specifying a `containerInstance` value limits the results to tasks #' # that belong to that container instance. #' svc$list_tasks( #' cluster = "default", #' containerInstance = "f6bbb147-5370-4ace-8c73-c7181ded911f" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_tasks ecs_list_tasks <- function(cluster = NULL, containerInstance = NULL, family = NULL, nextToken = NULL, maxResults = NULL, startedBy = NULL, serviceName = NULL, desiredStatus = NULL, launchType = NULL) { op <- new_operation( name = "ListTasks", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_tasks_input(cluster = cluster, containerInstance = containerInstance, family = family, nextToken = nextToken, maxResults = maxResults, startedBy = startedBy, serviceName = serviceName, desiredStatus = desiredStatus, launchType = launchType) output <- .ecs$list_tasks_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_tasks <- ecs_list_tasks #' Modifies an account setting #' #' @description #' Modifies an account setting. Account settings are set on a per-Region #' basis. #' #' If you change the account setting for the root user, the default #' settings for all of the IAM users and roles for which no individual #' account setting has been specified are reset. For more information, see #' [Account #' Settings](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' When `serviceLongArnFormat`, `taskLongArnFormat`, or #' `containerInstanceLongArnFormat` are specified, the Amazon Resource Name #' (ARN) and resource ID format of the resource type for a specified IAM #' user, IAM role, or the root user for an account is affected. The opt-in #' and opt-out account setting must be set for each Amazon ECS resource #' separately. The ARN and resource ID format of a resource will be defined #' by the opt-in status of the IAM user or role that created the resource. #' You must enable this setting to use Amazon ECS features such as resource #' tagging. #' #' When `awsvpcTrunking` is specified, the elastic network interface (ENI) #' limit for any new container instances that support the feature is #' changed. If `awsvpcTrunking` is enabled, any new container instances #' that support the feature are launched have the increased ENI limits #' available to them. For more information, see [Elastic Network Interface #' Trunking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-eni.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' When `containerInsights` is specified, the default setting indicating #' whether CloudWatch Container Insights is enabled for your clusters is #' changed. If `containerInsights` is enabled, any new clusters that are #' created will have Container Insights enabled unless you disable it #' during cluster creation. For more information, see [CloudWatch Container #' Insights](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_put_account_setting(name, value, principalArn) #' #' @param name &#91;required&#93; The Amazon ECS resource name for which to modify the account setting. If #' `serviceLongArnFormat` is specified, the ARN for your Amazon ECS #' services is affected. If `taskLongArnFormat` is specified, the ARN and #' resource ID for your Amazon ECS tasks is affected. If #' `containerInstanceLongArnFormat` is specified, the ARN and resource ID #' for your Amazon ECS container instances is affected. If `awsvpcTrunking` #' is specified, the elastic network interface (ENI) limit for your Amazon #' ECS container instances is affected. If `containerInsights` is #' specified, the default setting for CloudWatch Container Insights for #' your clusters is affected. #' @param value &#91;required&#93; The account setting value for the specified principal ARN. Accepted #' values are `enabled` and `disabled`. #' @param principalArn The ARN of the principal, which can be an IAM user, IAM role, or the #' root user. If you specify the root user, it modifies the account setting #' for all IAM users, IAM roles, and the root user of the account unless an #' IAM user or role explicitly overrides these settings. If this field is #' omitted, the setting is changed only for the authenticated user. #' #' @section Request syntax: #' ``` #' svc$put_account_setting( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' value = "string", #' principalArn = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example modifies your account settings to opt in to the new ARN and #' # resource ID format for Amazon ECS services. If you’re using this command #' # as the root user, then changes apply to the entire AWS account, unless #' # an IAM user or role explicitly overrides these settings for themselves. #' svc$put_account_setting( #' name = "serviceLongArnFormat", #' value = "enabled" #' ) #' #' # This example modifies the account setting for a specific IAM user or IAM #' # role to opt in to the new ARN and resource ID format for Amazon ECS #' # container instances. If you’re using this command as the root user, then #' # changes apply to the entire AWS account, unless an IAM user or role #' # explicitly overrides these settings for themselves. #' svc$put_account_setting( #' name = "containerInstanceLongArnFormat", #' value = "enabled", #' principalArn = "arn:aws:iam::<aws_account_id>:user/principalName" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_put_account_setting ecs_put_account_setting <- function(name, value, principalArn = NULL) { op <- new_operation( name = "PutAccountSetting", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_account_setting_input(name = name, value = value, principalArn = principalArn) output <- .ecs$put_account_setting_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_account_setting <- ecs_put_account_setting #' Modifies an account setting for all IAM users on an account for whom no #' individual account setting has been specified #' #' @description #' Modifies an account setting for all IAM users on an account for whom no #' individual account setting has been specified. Account settings are set #' on a per-Region basis. #' #' @usage #' ecs_put_account_setting_default(name, value) #' #' @param name &#91;required&#93; The resource name for which to modify the account setting. If #' `serviceLongArnFormat` is specified, the ARN for your Amazon ECS #' services is affected. If `taskLongArnFormat` is specified, the ARN and #' resource ID for your Amazon ECS tasks is affected. If #' `containerInstanceLongArnFormat` is specified, the ARN and resource ID #' for your Amazon ECS container instances is affected. If `awsvpcTrunking` #' is specified, the ENI limit for your Amazon ECS container instances is #' affected. If `containerInsights` is specified, the default setting for #' CloudWatch Container Insights for your clusters is affected. #' @param value &#91;required&#93; The account setting value for the specified principal ARN. Accepted #' values are `enabled` and `disabled`. #' #' @section Request syntax: #' ``` #' svc$put_account_setting_default( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' value = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example modifies the default account setting for the specified #' # resource for all IAM users or roles on an account. These changes apply #' # to the entire AWS account, unless an IAM user or role explicitly #' # overrides these settings for themselves. #' svc$put_account_setting_default( #' name = "serviceLongArnFormat", #' value = "enabled" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_put_account_setting_default ecs_put_account_setting_default <- function(name, value) { op <- new_operation( name = "PutAccountSettingDefault", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_account_setting_default_input(name = name, value = value) output <- .ecs$put_account_setting_default_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_account_setting_default <- ecs_put_account_setting_default #' Create or update an attribute on an Amazon ECS resource #' #' @description #' Create or update an attribute on an Amazon ECS resource. If the #' attribute does not exist, it is created. If the attribute exists, its #' value is replaced with the specified value. To delete an attribute, use #' DeleteAttributes. For more information, see #' [Attributes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_put_attributes(cluster, attributes) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' contains the resource to apply attributes. If you do not specify a #' cluster, the default cluster is assumed. #' @param attributes &#91;required&#93; The attributes to apply to your resource. You can specify up to 10 #' custom attributes per resource. You can specify up to 10 attributes in a #' single call. #' #' @section Request syntax: #' ``` #' svc$put_attributes( #' cluster = "string", #' attributes = list( #' list( #' name = "string", #' value = "string", #' targetType = "container-instance", #' targetId = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_put_attributes ecs_put_attributes <- function(cluster = NULL, attributes) { op <- new_operation( name = "PutAttributes", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_attributes_input(cluster = cluster, attributes = attributes) output <- .ecs$put_attributes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_attributes <- ecs_put_attributes #' Modifies the available capacity providers and the default capacity #' provider strategy for a cluster #' #' @description #' Modifies the available capacity providers and the default capacity #' provider strategy for a cluster. #' #' You must specify both the available capacity providers and a default #' capacity provider strategy for the cluster. If the specified cluster has #' existing capacity providers associated with it, you must specify all #' existing capacity providers in addition to any new ones you want to add. #' Any existing capacity providers associated with a cluster that are #' omitted from a PutClusterCapacityProviders API call will be #' disassociated with the cluster. You can only disassociate an existing #' capacity provider from a cluster if it's not being used by any existing #' tasks. #' #' When creating a service or running a task on a cluster, if no capacity #' provider or launch type is specified, then the cluster's default #' capacity provider strategy is used. It is recommended to define a #' default capacity provider strategy for your cluster, however you may #' specify an empty array (`\\[\\]`) to bypass defining a default strategy. #' #' @usage #' ecs_put_cluster_capacity_providers(cluster, capacityProviders, #' defaultCapacityProviderStrategy) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster to #' modify the capacity provider settings for. If you do not specify a #' cluster, the default cluster is assumed. #' @param capacityProviders &#91;required&#93; The name of one or more capacity providers to associate with the #' cluster. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' @param defaultCapacityProviderStrategy &#91;required&#93; The capacity provider strategy to use by default for the cluster. #' #' When creating a service or running a task on a cluster, if no capacity #' provider or launch type is specified then the default capacity provider #' strategy for the cluster is used. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' @section Request syntax: #' ``` #' svc$put_cluster_capacity_providers( #' cluster = "string", #' capacityProviders = list( #' "string" #' ), #' defaultCapacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_put_cluster_capacity_providers ecs_put_cluster_capacity_providers <- function(cluster, capacityProviders, defaultCapacityProviderStrategy) { op <- new_operation( name = "PutClusterCapacityProviders", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_cluster_capacity_providers_input(cluster = cluster, capacityProviders = capacityProviders, defaultCapacityProviderStrategy = defaultCapacityProviderStrategy) output <- .ecs$put_cluster_capacity_providers_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_cluster_capacity_providers <- ecs_put_cluster_capacity_providers #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Registers an EC2 instance into the specified cluster. This instance #' becomes available to place containers on. #' #' @usage #' ecs_register_container_instance(cluster, instanceIdentityDocument, #' instanceIdentityDocumentSignature, totalResources, versionInfo, #' containerInstanceArn, attributes, platformDevices, tags) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster with #' which to register your container instance. If you do not specify a #' cluster, the default cluster is assumed. #' @param instanceIdentityDocument The instance identity document for the EC2 instance to register. This #' document can be found by running the following command from the #' instance: #' `curl http://169.254.169.254/latest/dynamic/instance-identity/document/` #' @param instanceIdentityDocumentSignature The instance identity document signature for the EC2 instance to #' register. This signature can be found by running the following command #' from the instance: #' `curl http://169.254.169.254/latest/dynamic/instance-identity/signature/` #' @param totalResources The resources available on the instance. #' @param versionInfo The version information for the Amazon ECS container agent and Docker #' daemon running on the container instance. #' @param containerInstanceArn The ARN of the container instance (if it was previously registered). #' @param attributes The container instance attributes that this container instance supports. #' @param platformDevices The devices that are available on the container instance. The only #' supported device type is a GPU. #' @param tags The metadata that you apply to the container instance to help you #' categorize and organize them. Each tag consists of a key and an optional #' value, both of which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$register_container_instance( #' cluster = "string", #' instanceIdentityDocument = "string", #' instanceIdentityDocumentSignature = "string", #' totalResources = list( #' list( #' name = "string", #' type = "string", #' doubleValue = 123.0, #' longValue = 123, #' integerValue = 123, #' stringSetValue = list( #' "string" #' ) #' ) #' ), #' versionInfo = list( #' agentVersion = "string", #' agentHash = "string", #' dockerVersion = "string" #' ), #' containerInstanceArn = "string", #' attributes = list( #' list( #' name = "string", #' value = "string", #' targetType = "container-instance", #' targetId = "string" #' ) #' ), #' platformDevices = list( #' list( #' id = "string", #' type = "GPU" #' ) #' ), #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_register_container_instance ecs_register_container_instance <- function(cluster = NULL, instanceIdentityDocument = NULL, instanceIdentityDocumentSignature = NULL, totalResources = NULL, versionInfo = NULL, containerInstanceArn = NULL, attributes = NULL, platformDevices = NULL, tags = NULL) { op <- new_operation( name = "RegisterContainerInstance", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$register_container_instance_input(cluster = cluster, instanceIdentityDocument = instanceIdentityDocument, instanceIdentityDocumentSignature = instanceIdentityDocumentSignature, totalResources = totalResources, versionInfo = versionInfo, containerInstanceArn = containerInstanceArn, attributes = attributes, platformDevices = platformDevices, tags = tags) output <- .ecs$register_container_instance_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$register_container_instance <- ecs_register_container_instance #' Registers a new task definition from the supplied family and #' containerDefinitions #' #' @description #' Registers a new task definition from the supplied `family` and #' `containerDefinitions`. Optionally, you can add data volumes to your #' containers with the `volumes` parameter. For more information about task #' definition parameters and defaults, see [Amazon ECS Task #' Definitions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' You can specify an IAM role for your task with the `taskRoleArn` #' parameter. When you specify an IAM role for a task, its containers can #' then use the latest versions of the AWS CLI or SDKs to make API requests #' to the AWS services that are specified in the IAM policy associated with #' the role. For more information, see [IAM Roles for #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' You can specify a Docker networking mode for the containers in your task #' definition with the `networkMode` parameter. The available network modes #' correspond to those described in [Network #' settings](https://docs.docker.com/engine/reference/run/#/network-settings) #' in the Docker run reference. If you specify the `awsvpc` network mode, #' the task is allocated an elastic network interface, and you must specify #' a NetworkConfiguration when you create a service or run a task with the #' task definition. For more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_register_task_definition(family, taskRoleArn, executionRoleArn, #' networkMode, containerDefinitions, volumes, placementConstraints, #' requiresCompatibilities, cpu, memory, tags, pidMode, ipcMode, #' proxyConfiguration, inferenceAccelerators) #' #' @param family &#91;required&#93; You must specify a `family` for a task definition, which allows you to #' track multiple versions of the same task definition. The `family` is #' used as a name for your task definition. Up to 255 letters (uppercase #' and lowercase), numbers, and hyphens are allowed. #' @param taskRoleArn The short name or full Amazon Resource Name (ARN) of the IAM role that #' containers in this task can assume. All containers in this task are #' granted the permissions that are specified in this role. For more #' information, see [IAM Roles for #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param executionRoleArn The Amazon Resource Name (ARN) of the task execution role that grants #' the Amazon ECS container agent permission to make AWS API calls on your #' behalf. The task execution IAM role is required depending on the #' requirements of your task. For more information, see [Amazon ECS task #' execution IAM #' role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param networkMode The Docker networking mode to use for the containers in the task. The #' valid values are `none`, `bridge`, `awsvpc`, and `host`. If no network #' mode is specified, the default is `bridge`. #' #' For Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. #' For Amazon ECS tasks on Amazon EC2 instances, any network mode can be #' used. If the network mode is set to `none`, you cannot specify port #' mappings in your container definitions, and the tasks containers do not #' have external connectivity. The `host` and `awsvpc` network modes offer #' the highest networking performance for containers because they use the #' EC2 network stack instead of the virtualized network stack provided by #' the `bridge` mode. #' #' With the `host` and `awsvpc` network modes, exposed container ports are #' mapped directly to the corresponding host port (for the `host` network #' mode) or the attached elastic network interface port (for the `awsvpc` #' network mode), so you cannot take advantage of dynamic host port #' mappings. #' #' When using the `host` network mode, you should not run containers using #' the root user (UID 0). It is considered best practice to use a non-root #' user. #' #' If the network mode is `awsvpc`, the task is allocated an elastic #' network interface, and you must specify a NetworkConfiguration value #' when you create a service or run a task with the task definition. For #' more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants #' with the `ecs-init` package, or AWS Fargate infrastructure support the #' `awsvpc` network mode. #' #' If the network mode is `host`, you cannot run multiple instantiations of #' the same task on a single container instance when port mappings are #' used. #' #' Docker for Windows uses different network modes than Docker for Linux. #' When you register a task definition with Windows containers, you must #' not specify a network mode. If you use the console to register a task #' definition with Windows containers, you must choose the #' `&lt;default&gt;` network mode object. #' #' For more information, see [Network #' settings](https://docs.docker.com/engine/reference/run/#network-settings) #' in the *Docker run reference*. #' @param containerDefinitions &#91;required&#93; A list of container definitions in JSON format that describe the #' different containers that make up your task. #' @param volumes A list of volume definitions in JSON format that containers in your task #' may use. #' @param placementConstraints An array of placement constraint objects to use for the task. You can #' specify a maximum of 10 constraints per task (this limit includes #' constraints in the task definition and those specified at runtime). #' @param requiresCompatibilities The task launch type that Amazon ECS should validate the task definition #' against. This ensures that the task definition parameters are compatible #' with the specified launch type. If no value is specified, it defaults to #' `EC2`. #' @param cpu The number of CPU units used by the task. It can be expressed as an #' integer using CPU units, for example `1024`, or as a string using vCPUs, #' for example `1 vCPU` or `1 vcpu`, in a task definition. String values #' are converted to an integer indicating the CPU units when the task #' definition is registered. #' #' Task-level CPU and memory parameters are ignored for Windows containers. #' We recommend specifying container-level resources for Windows #' containers. #' #' If you are using the EC2 launch type, this field is optional. Supported #' values are between `128` CPU units (`0.125` vCPUs) and `10240` CPU units #' (`10` vCPUs). #' #' If you are using the Fargate launch type, this field is required and you #' must use one of the following values, which determines your range of #' supported values for the `memory` parameter: #' #' - 256 (.25 vCPU) - Available `memory` values: 512 (0.5 GB), 1024 (1 #' GB), 2048 (2 GB) #' #' - 512 (.5 vCPU) - Available `memory` values: 1024 (1 GB), 2048 (2 GB), #' 3072 (3 GB), 4096 (4 GB) #' #' - 1024 (1 vCPU) - Available `memory` values: 2048 (2 GB), 3072 (3 GB), #' 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) #' #' - 2048 (2 vCPU) - Available `memory` values: Between 4096 (4 GB) and #' 16384 (16 GB) in increments of 1024 (1 GB) #' #' - 4096 (4 vCPU) - Available `memory` values: Between 8192 (8 GB) and #' 30720 (30 GB) in increments of 1024 (1 GB) #' @param memory The amount of memory (in MiB) used by the task. It can be expressed as #' an integer using MiB, for example `1024`, or as a string using GB, for #' example `1GB` or `1 GB`, in a task definition. String values are #' converted to an integer indicating the MiB when the task definition is #' registered. #' #' Task-level CPU and memory parameters are ignored for Windows containers. #' We recommend specifying container-level resources for Windows #' containers. #' #' If using the EC2 launch type, this field is optional. #' #' If using the Fargate launch type, this field is required and you must #' use one of the following values, which determines your range of #' supported values for the `cpu` parameter: #' #' - 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available `cpu` values: 256 #' (.25 vCPU) #' #' - 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available `cpu` #' values: 512 (.5 vCPU) #' #' - 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), #' 7168 (7 GB), 8192 (8 GB) - Available `cpu` values: 1024 (1 vCPU) #' #' - Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - #' Available `cpu` values: 2048 (2 vCPU) #' #' - Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - #' Available `cpu` values: 4096 (4 vCPU) #' @param tags The metadata that you apply to the task definition to help you #' categorize and organize them. Each tag consists of a key and an optional #' value, both of which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param pidMode The process namespace to use for the containers in the task. The valid #' values are `host` or `task`. If `host` is specified, then all containers #' within the tasks that specified the `host` PID mode on the same #' container instance share the same process namespace with the host Amazon #' EC2 instance. If `task` is specified, all containers within the #' specified task share the same process namespace. If no value is #' specified, the default is a private namespace. For more information, see #' [PID #' settings](https://docs.docker.com/engine/reference/run/#pid-settings---pid) #' in the *Docker run reference*. #' #' If the `host` PID mode is used, be aware that there is a heightened risk #' of undesired process namespace expose. For more information, see [Docker #' security](https://docs.docker.com/engine/security/security/). #' #' This parameter is not supported for Windows containers or tasks using #' the Fargate launch type. #' @param ipcMode The IPC resource namespace to use for the containers in the task. The #' valid values are `host`, `task`, or `none`. If `host` is specified, then #' all containers within the tasks that specified the `host` IPC mode on #' the same container instance share the same IPC resources with the host #' Amazon EC2 instance. If `task` is specified, all containers within the #' specified task share the same IPC resources. If `none` is specified, #' then IPC resources within the containers of a task are private and not #' shared with other containers in a task or on the container instance. If #' no value is specified, then the IPC resource namespace sharing depends #' on the Docker daemon setting on the container instance. For more #' information, see [IPC #' settings](https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) #' in the *Docker run reference*. #' #' If the `host` IPC mode is used, be aware that there is a heightened risk #' of undesired IPC namespace expose. For more information, see [Docker #' security](https://docs.docker.com/engine/security/security/). #' #' If you are setting namespaced kernel parameters using `systemControls` #' for the containers in the task, the following will apply to your IPC #' resource namespace. For more information, see [System #' Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' - For tasks that use the `host` IPC mode, IPC namespace related #' `systemControls` are not supported. #' #' - For tasks that use the `task` IPC mode, IPC namespace related #' `systemControls` will apply to all containers within a task. #' #' This parameter is not supported for Windows containers or tasks using #' the Fargate launch type. #' @param proxyConfiguration #' @param inferenceAccelerators The Elastic Inference accelerators to use for the containers in the #' task. #' #' @section Request syntax: #' ``` #' svc$register_task_definition( #' family = "string", #' taskRoleArn = "string", #' executionRoleArn = "string", #' networkMode = "bridge"|"host"|"awsvpc"|"none", #' containerDefinitions = list( #' list( #' name = "string", #' image = "string", #' repositoryCredentials = list( #' credentialsParameter = "string" #' ), #' cpu = 123, #' memory = 123, #' memoryReservation = 123, #' links = list( #' "string" #' ), #' portMappings = list( #' list( #' containerPort = 123, #' hostPort = 123, #' protocol = "tcp"|"udp" #' ) #' ), #' essential = TRUE|FALSE, #' entryPoint = list( #' "string" #' ), #' command = list( #' "string" #' ), #' environment = list( #' list( #' name = "string", #' value = "string" #' ) #' ), #' environmentFiles = list( #' list( #' value = "string", #' type = "s3" #' ) #' ), #' mountPoints = list( #' list( #' sourceVolume = "string", #' containerPath = "string", #' readOnly = TRUE|FALSE #' ) #' ), #' volumesFrom = list( #' list( #' sourceContainer = "string", #' readOnly = TRUE|FALSE #' ) #' ), #' linuxParameters = list( #' capabilities = list( #' add = list( #' "string" #' ), #' drop = list( #' "string" #' ) #' ), #' devices = list( #' list( #' hostPath = "string", #' containerPath = "string", #' permissions = list( #' "read"|"write"|"mknod" #' ) #' ) #' ), #' initProcessEnabled = TRUE|FALSE, #' sharedMemorySize = 123, #' tmpfs = list( #' list( #' containerPath = "string", #' size = 123, #' mountOptions = list( #' "string" #' ) #' ) #' ), #' maxSwap = 123, #' swappiness = 123 #' ), #' secrets = list( #' list( #' name = "string", #' valueFrom = "string" #' ) #' ), #' dependsOn = list( #' list( #' containerName = "string", #' condition = "START"|"COMPLETE"|"SUCCESS"|"HEALTHY" #' ) #' ), #' startTimeout = 123, #' stopTimeout = 123, #' hostname = "string", #' user = "string", #' workingDirectory = "string", #' disableNetworking = TRUE|FALSE, #' privileged = TRUE|FALSE, #' readonlyRootFilesystem = TRUE|FALSE, #' dnsServers = list( #' "string" #' ), #' dnsSearchDomains = list( #' "string" #' ), #' extraHosts = list( #' list( #' hostname = "string", #' ipAddress = "string" #' ) #' ), #' dockerSecurityOptions = list( #' "string" #' ), #' interactive = TRUE|FALSE, #' pseudoTerminal = TRUE|FALSE, #' dockerLabels = list( #' "string" #' ), #' ulimits = list( #' list( #' name = "core"|"cpu"|"data"|"fsize"|"locks"|"memlock"|"msgqueue"|"nice"|"nofile"|"nproc"|"rss"|"rtprio"|"rttime"|"sigpending"|"stack", #' softLimit = 123, #' hardLimit = 123 #' ) #' ), #' logConfiguration = list( #' logDriver = "json-file"|"syslog"|"journald"|"gelf"|"fluentd"|"awslogs"|"splunk"|"awsfirelens", #' options = list( #' "string" #' ), #' secretOptions = list( #' list( #' name = "string", #' valueFrom = "string" #' ) #' ) #' ), #' healthCheck = list( #' command = list( #' "string" #' ), #' interval = 123, #' timeout = 123, #' retries = 123, #' startPeriod = 123 #' ), #' systemControls = list( #' list( #' namespace = "string", #' value = "string" #' ) #' ), #' resourceRequirements = list( #' list( #' value = "string", #' type = "GPU"|"InferenceAccelerator" #' ) #' ), #' firelensConfiguration = list( #' type = "fluentd"|"fluentbit", #' options = list( #' "string" #' ) #' ) #' ) #' ), #' volumes = list( #' list( #' name = "string", #' host = list( #' sourcePath = "string" #' ), #' dockerVolumeConfiguration = list( #' scope = "task"|"shared", #' autoprovision = TRUE|FALSE, #' driver = "string", #' driverOpts = list( #' "string" #' ), #' labels = list( #' "string" #' ) #' ), #' efsVolumeConfiguration = list( #' fileSystemId = "string", #' rootDirectory = "string", #' transitEncryption = "ENABLED"|"DISABLED", #' transitEncryptionPort = 123, #' authorizationConfig = list( #' accessPointId = "string", #' iam = "ENABLED"|"DISABLED" #' ) #' ), #' fsxWindowsFileServerVolumeConfiguration = list( #' fileSystemId = "string", #' rootDirectory = "string", #' authorizationConfig = list( #' credentialsParameter = "string", #' domain = "string" #' ) #' ) #' ) #' ), #' placementConstraints = list( #' list( #' type = "memberOf", #' expression = "string" #' ) #' ), #' requiresCompatibilities = list( #' "EC2"|"FARGATE" #' ), #' cpu = "string", #' memory = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' pidMode = "host"|"task", #' ipcMode = "host"|"task"|"none", #' proxyConfiguration = list( #' type = "APPMESH", #' containerName = "string", #' properties = list( #' list( #' name = "string", #' value = "string" #' ) #' ) #' ), #' inferenceAccelerators = list( #' list( #' deviceName = "string", #' deviceType = "string" #' ) #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example registers a task definition to the specified family. #' svc$register_task_definition( #' containerDefinitions = list( #' list( #' name = "sleep", #' command = list( #' "sleep", #' "360" #' ), #' cpu = 10L, #' essential = TRUE, #' image = "busybox", #' memory = 10L #' ) #' ), #' family = "sleep360", #' taskRoleArn = "", #' volumes = list() #' ) #' } #' #' @keywords internal #' #' @rdname ecs_register_task_definition ecs_register_task_definition <- function(family, taskRoleArn = NULL, executionRoleArn = NULL, networkMode = NULL, containerDefinitions, volumes = NULL, placementConstraints = NULL, requiresCompatibilities = NULL, cpu = NULL, memory = NULL, tags = NULL, pidMode = NULL, ipcMode = NULL, proxyConfiguration = NULL, inferenceAccelerators = NULL) { op <- new_operation( name = "RegisterTaskDefinition", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$register_task_definition_input(family = family, taskRoleArn = taskRoleArn, executionRoleArn = executionRoleArn, networkMode = networkMode, containerDefinitions = containerDefinitions, volumes = volumes, placementConstraints = placementConstraints, requiresCompatibilities = requiresCompatibilities, cpu = cpu, memory = memory, tags = tags, pidMode = pidMode, ipcMode = ipcMode, proxyConfiguration = proxyConfiguration, inferenceAccelerators = inferenceAccelerators) output <- .ecs$register_task_definition_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$register_task_definition <- ecs_register_task_definition #' Starts a new task using the specified task definition #' #' @description #' Starts a new task using the specified task definition. #' #' You can allow Amazon ECS to place tasks for you, or you can customize #' how Amazon ECS places tasks using placement constraints and placement #' strategies. For more information, see [Scheduling #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' Alternatively, you can use StartTask to use your own scheduler or place #' tasks manually on specific container instances. #' #' The Amazon ECS API follows an eventual consistency model, due to the #' distributed nature of the system supporting the API. This means that the #' result of an API command you run that affects your Amazon ECS resources #' might not be immediately visible to all subsequent commands you run. #' Keep this in mind when you carry out an API command that immediately #' follows a previous API command. #' #' To manage eventual consistency, you can do the following: #' #' - Confirm the state of the resource before you run a command to modify #' it. Run the DescribeTasks command using an exponential backoff #' algorithm to ensure that you allow enough time for the previous #' command to propagate through the system. To do this, run the #' DescribeTasks command repeatedly, starting with a couple of seconds #' of wait time and increasing gradually up to five minutes of wait #' time. #' #' - Add wait time between subsequent commands, even if the DescribeTasks #' command returns an accurate response. Apply an exponential backoff #' algorithm starting with a couple of seconds of wait time, and #' increase gradually up to about five minutes of wait time. #' #' @usage #' ecs_run_task(capacityProviderStrategy, cluster, count, #' enableECSManagedTags, group, launchType, networkConfiguration, #' overrides, placementConstraints, placementStrategy, platformVersion, #' propagateTags, referenceId, startedBy, tags, taskDefinition) #' #' @param capacityProviderStrategy The capacity provider strategy to use for the task. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If a `capacityProviderStrategy` is specified, the `launchType` parameter #' must be omitted. If no `capacityProviderStrategy` or `launchType` is #' specified, the `defaultCapacityProviderStrategy` for the cluster is #' used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster on #' which to run your task. If you do not specify a cluster, the default #' cluster is assumed. #' @param count The number of instantiations of the specified task to place on your #' cluster. You can specify up to 10 tasks per call. #' @param enableECSManagedTags Specifies whether to enable Amazon ECS managed tags for the task. For #' more information, see [Tagging Your Amazon ECS #' Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param group The name of the task group to associate with the task. The default value #' is the family name of the task definition (for example, #' family:my-family-name). #' @param launchType The launch type on which to run your task. For more information, see #' [Amazon ECS Launch #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If a `launchType` is specified, the `capacityProviderStrategy` parameter #' must be omitted. #' @param networkConfiguration The network configuration for the task. This parameter is required for #' task definitions that use the `awsvpc` network mode to receive their own #' elastic network interface, and it is not supported for other network #' modes. For more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param overrides A list of container overrides in JSON format that specify the name of a #' container in the specified task definition and the overrides it should #' receive. You can override the default command for a container (that is #' specified in the task definition or Docker image) with a `command` #' override. You can also override existing environment variables (that are #' specified in the task definition or Docker image) on a container or add #' new environment variables to it with an `environment` override. #' #' A total of 8192 characters are allowed for overrides. This limit #' includes the JSON formatting characters of the override structure. #' @param placementConstraints An array of placement constraint objects to use for the task. You can #' specify up to 10 constraints per task (including constraints in the task #' definition and those specified at runtime). #' @param placementStrategy The placement strategy objects to use for the task. You can specify a #' maximum of five strategy rules per task. #' @param platformVersion The platform version the task should run. A platform version is only #' specified for tasks using the Fargate launch type. If one is not #' specified, the `LATEST` platform version is used by default. For more #' information, see [AWS Fargate Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param propagateTags Specifies whether to propagate the tags from the task definition to the #' task. If no value is specified, the tags are not propagated. Tags can #' only be propagated to the task during task creation. To add tags to a #' task after task creation, use the TagResource API action. #' #' An error will be received if you specify the `SERVICE` option when #' running a task. #' @param referenceId The reference ID to use for the task. #' @param startedBy An optional tag specified when a task is started. For example, if you #' automatically trigger a task to run a batch process job, you could apply #' a unique identifier for that job to your task with the `startedBy` #' parameter. You can then identify which tasks belong to that job by #' filtering the results of a ListTasks call with the `startedBy` value. Up #' to 36 letters (uppercase and lowercase), numbers, hyphens, and #' underscores are allowed. #' #' If a task is started by an Amazon ECS service, then the `startedBy` #' parameter contains the deployment ID of the service that starts it. #' @param tags The metadata that you apply to the task to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param taskDefinition &#91;required&#93; The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to run. If a `revision` is not specified, the latest `ACTIVE` #' revision is used. #' #' @section Request syntax: #' ``` #' svc$run_task( #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' cluster = "string", #' count = 123, #' enableECSManagedTags = TRUE|FALSE, #' group = "string", #' launchType = "EC2"|"FARGATE", #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' overrides = list( #' containerOverrides = list( #' list( #' name = "string", #' command = list( #' "string" #' ), #' environment = list( #' list( #' name = "string", #' value = "string" #' ) #' ), #' environmentFiles = list( #' list( #' value = "string", #' type = "s3" #' ) #' ), #' cpu = 123, #' memory = 123, #' memoryReservation = 123, #' resourceRequirements = list( #' list( #' value = "string", #' type = "GPU"|"InferenceAccelerator" #' ) #' ) #' ) #' ), #' cpu = "string", #' inferenceAcceleratorOverrides = list( #' list( #' deviceName = "string", #' deviceType = "string" #' ) #' ), #' executionRoleArn = "string", #' memory = "string", #' taskRoleArn = "string" #' ), #' placementConstraints = list( #' list( #' type = "distinctInstance"|"memberOf", #' expression = "string" #' ) #' ), #' placementStrategy = list( #' list( #' type = "random"|"spread"|"binpack", #' field = "string" #' ) #' ), #' platformVersion = "string", #' propagateTags = "TASK_DEFINITION"|"SERVICE", #' referenceId = "string", #' startedBy = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' taskDefinition = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example runs the specified task definition on your default cluster. #' svc$run_task( #' cluster = "default", #' taskDefinition = "sleep360:1" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_run_task ecs_run_task <- function(capacityProviderStrategy = NULL, cluster = NULL, count = NULL, enableECSManagedTags = NULL, group = NULL, launchType = NULL, networkConfiguration = NULL, overrides = NULL, placementConstraints = NULL, placementStrategy = NULL, platformVersion = NULL, propagateTags = NULL, referenceId = NULL, startedBy = NULL, tags = NULL, taskDefinition) { op <- new_operation( name = "RunTask", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$run_task_input(capacityProviderStrategy = capacityProviderStrategy, cluster = cluster, count = count, enableECSManagedTags = enableECSManagedTags, group = group, launchType = launchType, networkConfiguration = networkConfiguration, overrides = overrides, placementConstraints = placementConstraints, placementStrategy = placementStrategy, platformVersion = platformVersion, propagateTags = propagateTags, referenceId = referenceId, startedBy = startedBy, tags = tags, taskDefinition = taskDefinition) output <- .ecs$run_task_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$run_task <- ecs_run_task #' Starts a new task from the specified task definition on the specified #' container instance or instances #' #' @description #' Starts a new task from the specified task definition on the specified #' container instance or instances. #' #' Alternatively, you can use RunTask to place tasks for you. For more #' information, see [Scheduling #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_start_task(cluster, containerInstances, enableECSManagedTags, group, #' networkConfiguration, overrides, propagateTags, referenceId, startedBy, #' tags, taskDefinition) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster on #' which to start your task. If you do not specify a cluster, the default #' cluster is assumed. #' @param containerInstances &#91;required&#93; The container instance IDs or full ARN entries for the container #' instances on which you would like to place your task. You can specify up #' to 10 container instances. #' @param enableECSManagedTags Specifies whether to enable Amazon ECS managed tags for the task. For #' more information, see [Tagging Your Amazon ECS #' Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param group The name of the task group to associate with the task. The default value #' is the family name of the task definition (for example, #' family:my-family-name). #' @param networkConfiguration The VPC subnet and security group configuration for tasks that receive #' their own elastic network interface by using the `awsvpc` networking #' mode. #' @param overrides A list of container overrides in JSON format that specify the name of a #' container in the specified task definition and the overrides it should #' receive. You can override the default command for a container (that is #' specified in the task definition or Docker image) with a `command` #' override. You can also override existing environment variables (that are #' specified in the task definition or Docker image) on a container or add #' new environment variables to it with an `environment` override. #' #' A total of 8192 characters are allowed for overrides. This limit #' includes the JSON formatting characters of the override structure. #' @param propagateTags Specifies whether to propagate the tags from the task definition or the #' service to the task. If no value is specified, the tags are not #' propagated. #' @param referenceId The reference ID to use for the task. #' @param startedBy An optional tag specified when a task is started. For example, if you #' automatically trigger a task to run a batch process job, you could apply #' a unique identifier for that job to your task with the `startedBy` #' parameter. You can then identify which tasks belong to that job by #' filtering the results of a ListTasks call with the `startedBy` value. Up #' to 36 letters (uppercase and lowercase), numbers, hyphens, and #' underscores are allowed. #' #' If a task is started by an Amazon ECS service, then the `startedBy` #' parameter contains the deployment ID of the service that starts it. #' @param tags The metadata that you apply to the task to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param taskDefinition &#91;required&#93; The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to start. If a `revision` is not specified, the latest #' `ACTIVE` revision is used. #' #' @section Request syntax: #' ``` #' svc$start_task( #' cluster = "string", #' containerInstances = list( #' "string" #' ), #' enableECSManagedTags = TRUE|FALSE, #' group = "string", #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' overrides = list( #' containerOverrides = list( #' list( #' name = "string", #' command = list( #' "string" #' ), #' environment = list( #' list( #' name = "string", #' value = "string" #' ) #' ), #' environmentFiles = list( #' list( #' value = "string", #' type = "s3" #' ) #' ), #' cpu = 123, #' memory = 123, #' memoryReservation = 123, #' resourceRequirements = list( #' list( #' value = "string", #' type = "GPU"|"InferenceAccelerator" #' ) #' ) #' ) #' ), #' cpu = "string", #' inferenceAcceleratorOverrides = list( #' list( #' deviceName = "string", #' deviceType = "string" #' ) #' ), #' executionRoleArn = "string", #' memory = "string", #' taskRoleArn = "string" #' ), #' propagateTags = "TASK_DEFINITION"|"SERVICE", #' referenceId = "string", #' startedBy = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' taskDefinition = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_start_task ecs_start_task <- function(cluster = NULL, containerInstances, enableECSManagedTags = NULL, group = NULL, networkConfiguration = NULL, overrides = NULL, propagateTags = NULL, referenceId = NULL, startedBy = NULL, tags = NULL, taskDefinition) { op <- new_operation( name = "StartTask", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$start_task_input(cluster = cluster, containerInstances = containerInstances, enableECSManagedTags = enableECSManagedTags, group = group, networkConfiguration = networkConfiguration, overrides = overrides, propagateTags = propagateTags, referenceId = referenceId, startedBy = startedBy, tags = tags, taskDefinition = taskDefinition) output <- .ecs$start_task_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$start_task <- ecs_start_task #' Stops a running task #' #' @description #' Stops a running task. Any tags associated with the task will be deleted. #' #' When StopTask is called on a task, the equivalent of `docker stop` is #' issued to the containers running in the task. This results in a #' `SIGTERM` value and a default 30-second timeout, after which the #' `SIGKILL` value is sent and the containers are forcibly stopped. If the #' container handles the `SIGTERM` value gracefully and exits within 30 #' seconds from receiving it, no `SIGKILL` value is sent. #' #' The default 30-second timeout can be configured on the Amazon ECS #' container agent with the `ECS_CONTAINER_STOP_TIMEOUT` variable. For more #' information, see [Amazon ECS Container Agent #' Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_stop_task(cluster, task, reason) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the task to stop. If you do not specify a cluster, the default #' cluster is assumed. #' @param task &#91;required&#93; The task ID or full Amazon Resource Name (ARN) of the task to stop. #' @param reason An optional message specified when a task is stopped. For example, if #' you are using a custom scheduler, you can use this parameter to specify #' the reason for stopping the task here, and the message appears in #' subsequent DescribeTasks API operations on this task. Up to 255 #' characters are allowed in this message. #' #' @section Request syntax: #' ``` #' svc$stop_task( #' cluster = "string", #' task = "string", #' reason = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_stop_task ecs_stop_task <- function(cluster = NULL, task, reason = NULL) { op <- new_operation( name = "StopTask", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$stop_task_input(cluster = cluster, task = task, reason = reason) output <- .ecs$stop_task_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$stop_task <- ecs_stop_task #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Sent to acknowledge that an attachment changed states. #' #' @usage #' ecs_submit_attachment_state_changes(cluster, attachments) #' #' @param cluster The short name or full ARN of the cluster that hosts the container #' instance the attachment belongs to. #' @param attachments &#91;required&#93; Any attachments associated with the state change request. #' #' @section Request syntax: #' ``` #' svc$submit_attachment_state_changes( #' cluster = "string", #' attachments = list( #' list( #' attachmentArn = "string", #' status = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_submit_attachment_state_changes ecs_submit_attachment_state_changes <- function(cluster = NULL, attachments) { op <- new_operation( name = "SubmitAttachmentStateChanges", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$submit_attachment_state_changes_input(cluster = cluster, attachments = attachments) output <- .ecs$submit_attachment_state_changes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$submit_attachment_state_changes <- ecs_submit_attachment_state_changes #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Sent to acknowledge that a container changed states. #' #' @usage #' ecs_submit_container_state_change(cluster, task, containerName, #' runtimeId, status, exitCode, reason, networkBindings) #' #' @param cluster The short name or full ARN of the cluster that hosts the container. #' @param task The task ID or full Amazon Resource Name (ARN) of the task that hosts #' the container. #' @param containerName The name of the container. #' @param runtimeId The ID of the Docker container. #' @param status The status of the state change request. #' @param exitCode The exit code returned for the state change request. #' @param reason The reason for the state change request. #' @param networkBindings The network bindings of the container. #' #' @section Request syntax: #' ``` #' svc$submit_container_state_change( #' cluster = "string", #' task = "string", #' containerName = "string", #' runtimeId = "string", #' status = "string", #' exitCode = 123, #' reason = "string", #' networkBindings = list( #' list( #' bindIP = "string", #' containerPort = 123, #' hostPort = 123, #' protocol = "tcp"|"udp" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_submit_container_state_change ecs_submit_container_state_change <- function(cluster = NULL, task = NULL, containerName = NULL, runtimeId = NULL, status = NULL, exitCode = NULL, reason = NULL, networkBindings = NULL) { op <- new_operation( name = "SubmitContainerStateChange", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$submit_container_state_change_input(cluster = cluster, task = task, containerName = containerName, runtimeId = runtimeId, status = status, exitCode = exitCode, reason = reason, networkBindings = networkBindings) output <- .ecs$submit_container_state_change_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$submit_container_state_change <- ecs_submit_container_state_change #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Sent to acknowledge that a task changed states. #' #' @usage #' ecs_submit_task_state_change(cluster, task, status, reason, containers, #' attachments, pullStartedAt, pullStoppedAt, executionStoppedAt) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the task. #' @param task The task ID or full ARN of the task in the state change request. #' @param status The status of the state change request. #' @param reason The reason for the state change request. #' @param containers Any containers associated with the state change request. #' @param attachments Any attachments associated with the state change request. #' @param pullStartedAt The Unix timestamp for when the container image pull began. #' @param pullStoppedAt The Unix timestamp for when the container image pull completed. #' @param executionStoppedAt The Unix timestamp for when the task execution stopped. #' #' @section Request syntax: #' ``` #' svc$submit_task_state_change( #' cluster = "string", #' task = "string", #' status = "string", #' reason = "string", #' containers = list( #' list( #' containerName = "string", #' imageDigest = "string", #' runtimeId = "string", #' exitCode = 123, #' networkBindings = list( #' list( #' bindIP = "string", #' containerPort = 123, #' hostPort = 123, #' protocol = "tcp"|"udp" #' ) #' ), #' reason = "string", #' status = "string" #' ) #' ), #' attachments = list( #' list( #' attachmentArn = "string", #' status = "string" #' ) #' ), #' pullStartedAt = as.POSIXct( #' "2015-01-01" #' ), #' pullStoppedAt = as.POSIXct( #' "2015-01-01" #' ), #' executionStoppedAt = as.POSIXct( #' "2015-01-01" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_submit_task_state_change ecs_submit_task_state_change <- function(cluster = NULL, task = NULL, status = NULL, reason = NULL, containers = NULL, attachments = NULL, pullStartedAt = NULL, pullStoppedAt = NULL, executionStoppedAt = NULL) { op <- new_operation( name = "SubmitTaskStateChange", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$submit_task_state_change_input(cluster = cluster, task = task, status = status, reason = reason, containers = containers, attachments = attachments, pullStartedAt = pullStartedAt, pullStoppedAt = pullStoppedAt, executionStoppedAt = executionStoppedAt) output <- .ecs$submit_task_state_change_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$submit_task_state_change <- ecs_submit_task_state_change #' Associates the specified tags to a resource with the specified #' resourceArn #' #' @description #' Associates the specified tags to a resource with the specified #' `resourceArn`. If existing tags on a resource are not specified in the #' request parameters, they are not changed. When a resource is deleted, #' the tags associated with that resource are deleted as well. #' #' @usage #' ecs_tag_resource(resourceArn, tags) #' #' @param resourceArn &#91;required&#93; The Amazon Resource Name (ARN) of the resource to which to add tags. #' Currently, the supported resources are Amazon ECS capacity providers, #' tasks, services, task definitions, clusters, and container instances. #' @param tags &#91;required&#93; The tags to add to the resource. A tag is an array of key-value pairs. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$tag_resource( #' resourceArn = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example tags the 'dev' cluster with key 'team' and value 'dev'. #' svc$tag_resource( #' resourceArn = "arn:aws:ecs:region:aws_account_id:cluster/dev", #' tags = list( #' list( #' key = "team", #' value = "dev" #' ) #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_tag_resource ecs_tag_resource <- function(resourceArn, tags) { op <- new_operation( name = "TagResource", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$tag_resource_input(resourceArn = resourceArn, tags = tags) output <- .ecs$tag_resource_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$tag_resource <- ecs_tag_resource #' Deletes specified tags from a resource #' #' @description #' Deletes specified tags from a resource. #' #' @usage #' ecs_untag_resource(resourceArn, tagKeys) #' #' @param resourceArn &#91;required&#93; The Amazon Resource Name (ARN) of the resource from which to delete #' tags. Currently, the supported resources are Amazon ECS capacity #' providers, tasks, services, task definitions, clusters, and container #' instances. #' @param tagKeys &#91;required&#93; The keys of the tags to be removed. #' #' @section Request syntax: #' ``` #' svc$untag_resource( #' resourceArn = "string", #' tagKeys = list( #' "string" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes the 'team' tag from the 'dev' cluster. #' svc$untag_resource( #' resourceArn = "arn:aws:ecs:region:aws_account_id:cluster/dev", #' tagKeys = list( #' "team" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_untag_resource ecs_untag_resource <- function(resourceArn, tagKeys) { op <- new_operation( name = "UntagResource", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys) output <- .ecs$untag_resource_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$untag_resource <- ecs_untag_resource #' Modifies the parameters for a capacity provider #' #' @description #' Modifies the parameters for a capacity provider. #' #' @usage #' ecs_update_capacity_provider(name, autoScalingGroupProvider) #' #' @param name &#91;required&#93; An object representing the parameters to update for the Auto Scaling #' group capacity provider. #' @param autoScalingGroupProvider &#91;required&#93; The name of the capacity provider to update. #' #' @section Request syntax: #' ``` #' svc$update_capacity_provider( #' name = "string", #' autoScalingGroupProvider = list( #' managedScaling = list( #' status = "ENABLED"|"DISABLED", #' targetCapacity = 123, #' minimumScalingStepSize = 123, #' maximumScalingStepSize = 123, #' instanceWarmupPeriod = 123 #' ), #' managedTerminationProtection = "ENABLED"|"DISABLED" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_capacity_provider ecs_update_capacity_provider <- function(name, autoScalingGroupProvider) { op <- new_operation( name = "UpdateCapacityProvider", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_capacity_provider_input(name = name, autoScalingGroupProvider = autoScalingGroupProvider) output <- .ecs$update_capacity_provider_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_capacity_provider <- ecs_update_capacity_provider #' Modifies the settings to use for a cluster #' #' @description #' Modifies the settings to use for a cluster. #' #' @usage #' ecs_update_cluster_settings(cluster, settings) #' #' @param cluster &#91;required&#93; The name of the cluster to modify the settings for. #' @param settings &#91;required&#93; The setting to use by default for a cluster. This parameter is used to #' enable CloudWatch Container Insights for a cluster. If this value is #' specified, it will override the `containerInsights` value set with #' PutAccountSetting or PutAccountSettingDefault. #' #' @section Request syntax: #' ``` #' svc$update_cluster_settings( #' cluster = "string", #' settings = list( #' list( #' name = "containerInsights", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_cluster_settings ecs_update_cluster_settings <- function(cluster, settings) { op <- new_operation( name = "UpdateClusterSettings", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_cluster_settings_input(cluster = cluster, settings = settings) output <- .ecs$update_cluster_settings_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_cluster_settings <- ecs_update_cluster_settings #' Updates the Amazon ECS container agent on a specified container instance #' #' @description #' Updates the Amazon ECS container agent on a specified container #' instance. Updating the Amazon ECS container agent does not interrupt #' running tasks or services on the container instance. The process for #' updating the agent differs depending on whether your container instance #' was launched with the Amazon ECS-optimized AMI or another operating #' system. #' #' `UpdateContainerAgent` requires the Amazon ECS-optimized AMI or Amazon #' Linux with the `ecs-init` service installed and running. For help #' updating the Amazon ECS container agent on other operating systems, see #' [Manually Updating the Amazon ECS Container #' Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_update_container_agent(cluster, containerInstance) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' your container instance is running on. If you do not specify a cluster, #' the default cluster is assumed. #' @param containerInstance &#91;required&#93; The container instance ID or full ARN entries for the container instance #' on which you would like to update the Amazon ECS container agent. #' #' @section Request syntax: #' ``` #' svc$update_container_agent( #' cluster = "string", #' containerInstance = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_container_agent ecs_update_container_agent <- function(cluster = NULL, containerInstance) { op <- new_operation( name = "UpdateContainerAgent", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_container_agent_input(cluster = cluster, containerInstance = containerInstance) output <- .ecs$update_container_agent_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_container_agent <- ecs_update_container_agent #' Modifies the status of an Amazon ECS container instance #' #' @description #' Modifies the status of an Amazon ECS container instance. #' #' Once a container instance has reached an `ACTIVE` state, you can change #' the status of a container instance to `DRAINING` to manually remove an #' instance from a cluster, for example to perform system updates, update #' the Docker daemon, or scale down the cluster size. #' #' A container instance cannot be changed to `DRAINING` until it has #' reached an `ACTIVE` status. If the instance is in any other status, an #' error will be received. #' #' When you set a container instance to `DRAINING`, Amazon ECS prevents new #' tasks from being scheduled for placement on the container instance and #' replacement service tasks are started on other container instances in #' the cluster if the resources are available. Service tasks on the #' container instance that are in the `PENDING` state are stopped #' immediately. #' #' Service tasks on the container instance that are in the `RUNNING` state #' are stopped and replaced according to the service's deployment #' configuration parameters, `minimumHealthyPercent` and `maximumPercent`. #' You can change the deployment configuration of your service using #' UpdateService. #' #' - If `minimumHealthyPercent` is below 100\%, the scheduler can ignore #' `desiredCount` temporarily during task replacement. For example, #' `desiredCount` is four tasks, a minimum of 50\% allows the scheduler #' to stop two existing tasks before starting two new tasks. If the #' minimum is 100\%, the service scheduler can't remove existing tasks #' until the replacement tasks are considered healthy. Tasks for #' services that do not use a load balancer are considered healthy if #' they are in the `RUNNING` state. Tasks for services that use a load #' balancer are considered healthy if they are in the `RUNNING` state #' and the container instance they are hosted on is reported as healthy #' by the load balancer. #' #' - The `maximumPercent` parameter represents an upper limit on the #' number of running tasks during task replacement, which enables you #' to define the replacement batch size. For example, if `desiredCount` #' is four tasks, a maximum of 200\% starts four new tasks before #' stopping the four tasks to be drained, provided that the cluster #' resources required to do this are available. If the maximum is 100\%, #' then replacement tasks can't start until the draining tasks have #' stopped. #' #' Any `PENDING` or `RUNNING` tasks that do not belong to a service are not #' affected. You must wait for them to finish or stop them manually. #' #' A container instance has completed draining when it has no more #' `RUNNING` tasks. You can verify this using ListTasks. #' #' When a container instance has been drained, you can set a container #' instance to `ACTIVE` status and once it has reached that status the #' Amazon ECS scheduler can begin scheduling tasks on the instance again. #' #' @usage #' ecs_update_container_instances_state(cluster, containerInstances, #' status) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instance to update. If you do not specify a cluster, #' the default cluster is assumed. #' @param containerInstances &#91;required&#93; A list of container instance IDs or full ARN entries. #' @param status &#91;required&#93; The container instance state with which to update the container #' instance. The only valid values for this action are `ACTIVE` and #' `DRAINING`. A container instance can only be updated to `DRAINING` #' status once it has reached an `ACTIVE` state. If a container instance is #' in `REGISTERING`, `DEREGISTERING`, or `REGISTRATION_FAILED` state you #' can describe the container instance but will be unable to update the #' container instance state. #' #' @section Request syntax: #' ``` #' svc$update_container_instances_state( #' cluster = "string", #' containerInstances = list( #' "string" #' ), #' status = "ACTIVE"|"DRAINING"|"REGISTERING"|"DEREGISTERING"|"REGISTRATION_FAILED" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_container_instances_state ecs_update_container_instances_state <- function(cluster = NULL, containerInstances, status) { op <- new_operation( name = "UpdateContainerInstancesState", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_container_instances_state_input(cluster = cluster, containerInstances = containerInstances, status = status) output <- .ecs$update_container_instances_state_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_container_instances_state <- ecs_update_container_instances_state #' Updating the task placement strategies and constraints on an Amazon ECS #' service remains in preview and is a Beta Service as defined by and #' subject to the Beta Service Participation Service Terms located at #' https://aws #' #' @description #' Updating the task placement strategies and constraints on an Amazon ECS #' service remains in preview and is a Beta Service as defined by and #' subject to the Beta Service Participation Service Terms located at #' [https://aws.amazon.com/service-terms](https://aws.amazon.com/service-terms/) #' ("Beta Terms"). These Beta Terms apply to your participation in this #' preview. #' #' Modifies the parameters of a service. #' #' For services using the rolling update (`ECS`) deployment controller, the #' desired count, deployment configuration, network configuration, task #' placement constraints and strategies, or task definition used can be #' updated. #' #' For services using the blue/green (`CODE_DEPLOY`) deployment controller, #' only the desired count, deployment configuration, task placement #' constraints and strategies, and health check grace period can be updated #' using this API. If the network configuration, platform version, or task #' definition need to be updated, a new AWS CodeDeploy deployment should be #' created. For more information, see #' [CreateDeployment](https://docs.aws.amazon.com/codedeploy/latest/APIReference/API_CreateDeployment.html) #' in the *AWS CodeDeploy API Reference*. #' #' For services using an external deployment controller, you can update #' only the desired count, task placement constraints and strategies, and #' health check grace period using this API. If the launch type, load #' balancer, network configuration, platform version, or task definition #' need to be updated, you should create a new task set. For more #' information, see CreateTaskSet. #' #' You can add to or subtract from the number of instantiations of a task #' definition in a service by specifying the cluster that the service is #' running in and a new `desiredCount` parameter. #' #' If you have updated the Docker image of your application, you can create #' a new task definition with that image and deploy it to your service. The #' service scheduler uses the minimum healthy percent and maximum percent #' parameters (in the service's deployment configuration) to determine the #' deployment strategy. #' #' If your updated Docker image uses the same tag as what is in the #' existing task definition for your service (for example, #' `my_image:latest`), you do not need to create a new revision of your #' task definition. You can update the service using the #' `forceNewDeployment` option. The new tasks launched by the deployment #' pull the current image/tag combination from your repository when they #' start. #' #' You can also update the deployment configuration of a service. When a #' deployment is triggered by updating the task definition of a service, #' the service scheduler uses the deployment configuration parameters, #' `minimumHealthyPercent` and `maximumPercent`, to determine the #' deployment strategy. #' #' - If `minimumHealthyPercent` is below 100\%, the scheduler can ignore #' `desiredCount` temporarily during a deployment. For example, if #' `desiredCount` is four tasks, a minimum of 50\% allows the scheduler #' to stop two existing tasks before starting two new tasks. Tasks for #' services that do not use a load balancer are considered healthy if #' they are in the `RUNNING` state. Tasks for services that use a load #' balancer are considered healthy if they are in the `RUNNING` state #' and the container instance they are hosted on is reported as healthy #' by the load balancer. #' #' - The `maximumPercent` parameter represents an upper limit on the #' number of running tasks during a deployment, which enables you to #' define the deployment batch size. For example, if `desiredCount` is #' four tasks, a maximum of 200\% starts four new tasks before stopping #' the four older tasks (provided that the cluster resources required #' to do this are available). #' #' When UpdateService stops a task during a deployment, the equivalent of #' `docker stop` is issued to the containers running in the task. This #' results in a `SIGTERM` and a 30-second timeout, after which `SIGKILL` is #' sent and the containers are forcibly stopped. If the container handles #' the `SIGTERM` gracefully and exits within 30 seconds from receiving it, #' no `SIGKILL` is sent. #' #' When the service scheduler launches new tasks, it determines task #' placement in your cluster with the following logic: #' #' - Determine which of the container instances in your cluster can #' support your service's task definition (for example, they have the #' required CPU, memory, ports, and container instance attributes). #' #' - By default, the service scheduler attempts to balance tasks across #' Availability Zones in this manner (although you can choose a #' different placement strategy): #' #' - Sort the valid container instances by the fewest number of #' running tasks for this service in the same Availability Zone as #' the instance. For example, if zone A has one running service #' task and zones B and C each have zero, valid container instances #' in either zone B or C are considered optimal for placement. #' #' - Place the new service task on a valid container instance in an #' optimal Availability Zone (based on the previous steps), #' favoring container instances with the fewest number of running #' tasks for this service. #' #' When the service scheduler stops running tasks, it attempts to maintain #' balance across the Availability Zones in your cluster using the #' following logic: #' #' - Sort the container instances by the largest number of running tasks #' for this service in the same Availability Zone as the instance. For #' example, if zone A has one running service task and zones B and C #' each have two, container instances in either zone B or C are #' considered optimal for termination. #' #' - Stop the task on a container instance in an optimal Availability #' Zone (based on the previous steps), favoring container instances #' with the largest number of running tasks for this service. #' #' @usage #' ecs_update_service(cluster, service, desiredCount, taskDefinition, #' capacityProviderStrategy, deploymentConfiguration, networkConfiguration, #' placementConstraints, placementStrategy, platformVersion, #' forceNewDeployment, healthCheckGracePeriodSeconds) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' your service is running on. If you do not specify a cluster, the default #' cluster is assumed. #' @param service &#91;required&#93; The name of the service to update. #' @param desiredCount The number of instantiations of the task to place and keep running in #' your service. #' @param taskDefinition The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to run in your service. If a `revision` is not specified, the #' latest `ACTIVE` revision is used. If you modify the task definition with #' `UpdateService`, Amazon ECS spawns a task with the new version of the #' task definition and then stops an old task after the new version is #' running. #' @param capacityProviderStrategy The capacity provider strategy to update the service to use. #' #' If the service is using the default capacity provider strategy for the #' cluster, the service can be updated to use one or more capacity #' providers as opposed to the default capacity provider strategy. However, #' when a service is using a capacity provider strategy that is not the #' default capacity provider strategy, the service cannot be updated to use #' the cluster's default capacity provider strategy. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param deploymentConfiguration Optional deployment parameters that control how many tasks run during #' the deployment and the ordering of stopping and starting tasks. #' @param networkConfiguration #' @param placementConstraints An array of task placement constraint objects to update the service to #' use. If no value is specified, the existing placement constraints for #' the service will remain unchanged. If this value is specified, it will #' override any existing placement constraints defined for the service. To #' remove all existing placement constraints, specify an empty array. #' #' You can specify a maximum of 10 constraints per task (this limit #' includes constraints in the task definition and those specified at #' runtime). #' @param placementStrategy The task placement strategy objects to update the service to use. If no #' value is specified, the existing placement strategy for the service will #' remain unchanged. If this value is specified, it will override the #' existing placement strategy defined for the service. To remove an #' existing placement strategy, specify an empty object. #' #' You can specify a maximum of five strategy rules per service. #' @param platformVersion The platform version on which your tasks in the service are running. A #' platform version is only specified for tasks using the Fargate launch #' type. If a platform version is not specified, the `LATEST` platform #' version is used by default. For more information, see [AWS Fargate #' Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param forceNewDeployment Whether to force a new deployment of the service. Deployments are not #' forced by default. You can use this option to trigger a new deployment #' with no service definition changes. For example, you can update a #' service's tasks to use a newer Docker image with the same image/tag #' combination (`my_image:latest`) or to roll Fargate tasks onto a newer #' platform version. #' @param healthCheckGracePeriodSeconds The period of time, in seconds, that the Amazon ECS service scheduler #' should ignore unhealthy Elastic Load Balancing target health checks #' after a task has first started. This is only valid if your service is #' configured to use a load balancer. If your service's tasks take a while #' to start and respond to Elastic Load Balancing health checks, you can #' specify a health check grace period of up to 2,147,483,647 seconds. #' During that time, the Amazon ECS service scheduler ignores the Elastic #' Load Balancing health check status. This grace period can prevent the #' ECS service scheduler from marking tasks as unhealthy and stopping them #' before they have time to come up. #' #' @section Request syntax: #' ``` #' svc$update_service( #' cluster = "string", #' service = "string", #' desiredCount = 123, #' taskDefinition = "string", #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' deploymentConfiguration = list( #' deploymentCircuitBreaker = list( #' enable = TRUE|FALSE, #' rollback = TRUE|FALSE #' ), #' maximumPercent = 123, #' minimumHealthyPercent = 123 #' ), #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' placementConstraints = list( #' list( #' type = "distinctInstance"|"memberOf", #' expression = "string" #' ) #' ), #' placementStrategy = list( #' list( #' type = "random"|"spread"|"binpack", #' field = "string" #' ) #' ), #' platformVersion = "string", #' forceNewDeployment = TRUE|FALSE, #' healthCheckGracePeriodSeconds = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example updates the my-http-service service to use the #' # amazon-ecs-sample task definition. #' svc$update_service( #' service = "my-http-service", #' taskDefinition = "amazon-ecs-sample" #' ) #' #' # This example updates the desired count of the my-http-service service to #' # 10. #' svc$update_service( #' desiredCount = 10L, #' service = "my-http-service" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_update_service ecs_update_service <- function(cluster = NULL, service, desiredCount = NULL, taskDefinition = NULL, capacityProviderStrategy = NULL, deploymentConfiguration = NULL, networkConfiguration = NULL, placementConstraints = NULL, placementStrategy = NULL, platformVersion = NULL, forceNewDeployment = NULL, healthCheckGracePeriodSeconds = NULL) { op <- new_operation( name = "UpdateService", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_service_input(cluster = cluster, service = service, desiredCount = desiredCount, taskDefinition = taskDefinition, capacityProviderStrategy = capacityProviderStrategy, deploymentConfiguration = deploymentConfiguration, networkConfiguration = networkConfiguration, placementConstraints = placementConstraints, placementStrategy = placementStrategy, platformVersion = platformVersion, forceNewDeployment = forceNewDeployment, healthCheckGracePeriodSeconds = healthCheckGracePeriodSeconds) output <- .ecs$update_service_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_service <- ecs_update_service #' Modifies which task set in a service is the primary task set #' #' @description #' Modifies which task set in a service is the primary task set. Any #' parameters that are updated on the primary task set in a service will #' transition to the service. This is used when a service uses the #' `EXTERNAL` deployment controller type. For more information, see [Amazon #' ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_update_service_primary_task_set(cluster, service, primaryTaskSet) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task set exists in. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' the task set exists in. #' @param primaryTaskSet &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the task set to set #' as the primary task set in the deployment. #' #' @section Request syntax: #' ``` #' svc$update_service_primary_task_set( #' cluster = "string", #' service = "string", #' primaryTaskSet = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_service_primary_task_set ecs_update_service_primary_task_set <- function(cluster, service, primaryTaskSet) { op <- new_operation( name = "UpdateServicePrimaryTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_service_primary_task_set_input(cluster = cluster, service = service, primaryTaskSet = primaryTaskSet) output <- .ecs$update_service_primary_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_service_primary_task_set <- ecs_update_service_primary_task_set #' Modifies a task set #' #' @description #' Modifies a task set. This is used when a service uses the `EXTERNAL` #' deployment controller type. For more information, see [Amazon ECS #' Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_update_task_set(cluster, service, taskSet, scale) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task set exists in. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' the task set exists in. #' @param taskSet &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the task set to #' update. #' @param scale &#91;required&#93; #' #' @section Request syntax: #' ``` #' svc$update_task_set( #' cluster = "string", #' service = "string", #' taskSet = "string", #' scale = list( #' value = 123.0, #' unit = "PERCENT" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_task_set ecs_update_task_set <- function(cluster, service, taskSet, scale) { op <- new_operation( name = "UpdateTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_task_set_input(cluster = cluster, service = service, taskSet = taskSet, scale = scale) output <- .ecs$update_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_task_set <- ecs_update_task_set
/paws/R/ecs_operations.R
permissive
jfontestad/paws
R
false
false
211,985
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common get_config new_operation new_request send_request #' @include ecs_service.R NULL #' Creates a new capacity provider #' #' @description #' Creates a new capacity provider. Capacity providers are associated with #' an Amazon ECS cluster and are used in capacity provider strategies to #' facilitate cluster auto scaling. #' #' Only capacity providers using an Auto Scaling group can be created. #' Amazon ECS tasks on AWS Fargate use the `FARGATE` and `FARGATE_SPOT` #' capacity providers which are already created and available to all #' accounts in Regions supported by AWS Fargate. #' #' @usage #' ecs_create_capacity_provider(name, autoScalingGroupProvider, tags) #' #' @param name &#91;required&#93; The name of the capacity provider. Up to 255 characters are allowed, #' including letters (upper and lowercase), numbers, underscores, and #' hyphens. The name cannot be prefixed with "`aws`", "`ecs`", or #' "`fargate`". #' @param autoScalingGroupProvider &#91;required&#93; The details of the Auto Scaling group for the capacity provider. #' @param tags The metadata that you apply to the capacity provider to help you #' categorize and organize them. Each tag consists of a key and an optional #' value, both of which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$create_capacity_provider( #' name = "string", #' autoScalingGroupProvider = list( #' autoScalingGroupArn = "string", #' managedScaling = list( #' status = "ENABLED"|"DISABLED", #' targetCapacity = 123, #' minimumScalingStepSize = 123, #' maximumScalingStepSize = 123, #' instanceWarmupPeriod = 123 #' ), #' managedTerminationProtection = "ENABLED"|"DISABLED" #' ), #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_create_capacity_provider ecs_create_capacity_provider <- function(name, autoScalingGroupProvider, tags = NULL) { op <- new_operation( name = "CreateCapacityProvider", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_capacity_provider_input(name = name, autoScalingGroupProvider = autoScalingGroupProvider, tags = tags) output <- .ecs$create_capacity_provider_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_capacity_provider <- ecs_create_capacity_provider #' Creates a new Amazon ECS cluster #' #' @description #' Creates a new Amazon ECS cluster. By default, your account receives a #' `default` cluster when you launch your first container instance. #' However, you can create your own cluster with a unique name with the #' `CreateCluster` action. #' #' When you call the CreateCluster API operation, Amazon ECS attempts to #' create the Amazon ECS service-linked role for your account so that #' required resources in other AWS services can be managed on your behalf. #' However, if the IAM user that makes the call does not have permissions #' to create the service-linked role, it is not created. For more #' information, see [Using Service-Linked Roles for Amazon #' ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_create_cluster(clusterName, tags, settings, capacityProviders, #' defaultCapacityProviderStrategy) #' #' @param clusterName The name of your cluster. If you do not specify a name for your cluster, #' you create a cluster named `default`. Up to 255 letters (uppercase and #' lowercase), numbers, and hyphens are allowed. #' @param tags The metadata that you apply to the cluster to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param settings The setting to use when creating a cluster. This parameter is used to #' enable CloudWatch Container Insights for a cluster. If this value is #' specified, it will override the `containerInsights` value set with #' PutAccountSetting or PutAccountSettingDefault. #' @param capacityProviders The short name of one or more capacity providers to associate with the #' cluster. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created and not already associated #' with another cluster. New capacity providers can be created with the #' CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param defaultCapacityProviderStrategy The capacity provider strategy to use by default for the cluster. #' #' When creating a service or running a task on a cluster, if no capacity #' provider or launch type is specified then the default capacity provider #' strategy for the cluster is used. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' If a default capacity provider strategy is not defined for a cluster #' during creation, it can be defined later with the #' PutClusterCapacityProviders API operation. #' #' @section Request syntax: #' ``` #' svc$create_cluster( #' clusterName = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' settings = list( #' list( #' name = "containerInsights", #' value = "string" #' ) #' ), #' capacityProviders = list( #' "string" #' ), #' defaultCapacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example creates a cluster in your default region. #' svc$create_cluster( #' clusterName = "my_cluster" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_create_cluster ecs_create_cluster <- function(clusterName = NULL, tags = NULL, settings = NULL, capacityProviders = NULL, defaultCapacityProviderStrategy = NULL) { op <- new_operation( name = "CreateCluster", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_cluster_input(clusterName = clusterName, tags = tags, settings = settings, capacityProviders = capacityProviders, defaultCapacityProviderStrategy = defaultCapacityProviderStrategy) output <- .ecs$create_cluster_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_cluster <- ecs_create_cluster #' Runs and maintains a desired number of tasks from a specified task #' definition #' #' @description #' Runs and maintains a desired number of tasks from a specified task #' definition. If the number of tasks running in a service drops below the #' `desiredCount`, Amazon ECS runs another copy of the task in the #' specified cluster. To update an existing service, see the UpdateService #' action. #' #' In addition to maintaining the desired count of tasks in your service, #' you can optionally run your service behind one or more load balancers. #' The load balancers distribute traffic across the tasks that are #' associated with the service. For more information, see [Service Load #' Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' Tasks for services that *do not* use a load balancer are considered #' healthy if they're in the `RUNNING` state. Tasks for services that *do* #' use a load balancer are considered healthy if they're in the `RUNNING` #' state and the container instance that they're hosted on is reported as #' healthy by the load balancer. #' #' There are two service scheduler strategies available: #' #' - `REPLICA` - The replica scheduling strategy places and maintains the #' desired number of tasks across your cluster. By default, the service #' scheduler spreads tasks across Availability Zones. You can use task #' placement strategies and constraints to customize task placement #' decisions. For more information, see [Service Scheduler #' Concepts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' - `DAEMON` - The daemon scheduling strategy deploys exactly one task #' on each active container instance that meets all of the task #' placement constraints that you specify in your cluster. The service #' scheduler also evaluates the task placement constraints for running #' tasks and will stop tasks that do not meet the placement #' constraints. When using this strategy, you don't need to specify a #' desired number of tasks, a task placement strategy, or use Service #' Auto Scaling policies. For more information, see [Service Scheduler #' Concepts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' You can optionally specify a deployment configuration for your service. #' The deployment is triggered by changing properties, such as the task #' definition or the desired count of a service, with an UpdateService #' operation. The default value for a replica service for #' `minimumHealthyPercent` is 100\%. The default value for a daemon service #' for `minimumHealthyPercent` is 0\%. #' #' If a service is using the `ECS` deployment controller, the minimum #' healthy percent represents a lower limit on the number of tasks in a #' service that must remain in the `RUNNING` state during a deployment, as #' a percentage of the desired number of tasks (rounded up to the nearest #' integer), and while any container instances are in the `DRAINING` state #' if the service contains tasks using the EC2 launch type. This parameter #' enables you to deploy without using additional cluster capacity. For #' example, if your service has a desired number of four tasks and a #' minimum healthy percent of 50\%, the scheduler might stop two existing #' tasks to free up cluster capacity before starting two new tasks. Tasks #' for services that *do not* use a load balancer are considered healthy if #' they're in the `RUNNING` state. Tasks for services that *do* use a load #' balancer are considered healthy if they're in the `RUNNING` state and #' they're reported as healthy by the load balancer. The default value for #' minimum healthy percent is 100\%. #' #' If a service is using the `ECS` deployment controller, the **maximum #' percent** parameter represents an upper limit on the number of tasks in #' a service that are allowed in the `RUNNING` or `PENDING` state during a #' deployment, as a percentage of the desired number of tasks (rounded down #' to the nearest integer), and while any container instances are in the #' `DRAINING` state if the service contains tasks using the EC2 launch #' type. This parameter enables you to define the deployment batch size. #' For example, if your service has a desired number of four tasks and a #' maximum percent value of 200\%, the scheduler may start four new tasks #' before stopping the four older tasks (provided that the cluster #' resources required to do this are available). The default value for #' maximum percent is 200\%. #' #' If a service is using either the `CODE_DEPLOY` or `EXTERNAL` deployment #' controller types and tasks that use the EC2 launch type, the **minimum #' healthy percent** and **maximum percent** values are used only to define #' the lower and upper limit on the number of the tasks in the service that #' remain in the `RUNNING` state while the container instances are in the #' `DRAINING` state. If the tasks in the service use the Fargate launch #' type, the minimum healthy percent and maximum percent values aren't #' used, although they're currently visible when describing your service. #' #' When creating a service that uses the `EXTERNAL` deployment controller, #' you can specify only parameters that aren't controlled at the task set #' level. The only required parameter is the service name. You control your #' services using the CreateTaskSet operation. For more information, see #' [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' When the service scheduler launches new tasks, it determines task #' placement in your cluster using the following logic: #' #' - Determine which of the container instances in your cluster can #' support your service's task definition (for example, they have the #' required CPU, memory, ports, and container instance attributes). #' #' - By default, the service scheduler attempts to balance tasks across #' Availability Zones in this manner (although you can choose a #' different placement strategy) with the `placementStrategy` #' parameter): #' #' - Sort the valid container instances, giving priority to instances #' that have the fewest number of running tasks for this service in #' their respective Availability Zone. For example, if zone A has #' one running service task and zones B and C each have zero, valid #' container instances in either zone B or C are considered optimal #' for placement. #' #' - Place the new service task on a valid container instance in an #' optimal Availability Zone (based on the previous steps), #' favoring container instances with the fewest number of running #' tasks for this service. #' #' @usage #' ecs_create_service(cluster, serviceName, taskDefinition, loadBalancers, #' serviceRegistries, desiredCount, clientToken, launchType, #' capacityProviderStrategy, platformVersion, role, #' deploymentConfiguration, placementConstraints, placementStrategy, #' networkConfiguration, healthCheckGracePeriodSeconds, schedulingStrategy, #' deploymentController, tags, enableECSManagedTags, propagateTags) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster on #' which to run your service. If you do not specify a cluster, the default #' cluster is assumed. #' @param serviceName &#91;required&#93; The name of your service. Up to 255 letters (uppercase and lowercase), #' numbers, and hyphens are allowed. Service names must be unique within a #' cluster, but you can have similarly named services in multiple clusters #' within a Region or across multiple Regions. #' @param taskDefinition The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to run in your service. If a `revision` is not specified, the #' latest `ACTIVE` revision is used. #' #' A task definition must be specified if the service is using either the #' `ECS` or `CODE_DEPLOY` deployment controllers. #' @param loadBalancers A load balancer object representing the load balancers to use with your #' service. For more information, see [Service Load #' Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If the service is using the rolling update (`ECS`) deployment controller #' and using either an Application Load Balancer or Network Load Balancer, #' you must specify one or more target group ARNs to attach to the service. #' The service-linked role is required for services that make use of #' multiple target groups. For more information, see [Using Service-Linked #' Roles for Amazon #' ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If the service is using the `CODE_DEPLOY` deployment controller, the #' service is required to use either an Application Load Balancer or #' Network Load Balancer. When creating an AWS CodeDeploy deployment group, #' you specify two target groups (referred to as a `targetGroupPair`). #' During a deployment, AWS CodeDeploy determines which task set in your #' service has the status `PRIMARY` and associates one target group with #' it, and then associates the other target group with the replacement task #' set. The load balancer can also have up to two listeners: a required #' listener for production traffic and an optional listener that allows you #' perform validation tests with Lambda functions before routing production #' traffic to it. #' #' After you create a service using the `ECS` deployment controller, the #' load balancer name or target group ARN, container name, and container #' port specified in the service definition are immutable. If you are using #' the `CODE_DEPLOY` deployment controller, these values can be changed #' when updating the service. #' #' For Application Load Balancers and Network Load Balancers, this object #' must contain the load balancer target group ARN, the container name (as #' it appears in a container definition), and the container port to access #' from the load balancer. The load balancer name parameter must be #' omitted. When a task from this service is placed on a container #' instance, the container instance and port combination is registered as a #' target in the target group specified here. #' #' For Classic Load Balancers, this object must contain the load balancer #' name, the container name (as it appears in a container definition), and #' the container port to access from the load balancer. The target group #' ARN parameter must be omitted. When a task from this service is placed #' on a container instance, the container instance is registered with the #' load balancer specified here. #' #' Services with tasks that use the `awsvpc` network mode (for example, #' those with the Fargate launch type) only support Application Load #' Balancers and Network Load Balancers. Classic Load Balancers are not #' supported. Also, when you create any target groups for these services, #' you must choose `ip` as the target type, not `instance`, because tasks #' that use the `awsvpc` network mode are associated with an elastic #' network interface, not an Amazon EC2 instance. #' @param serviceRegistries The details of the service discovery registries to assign to this #' service. For more information, see [Service #' Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). #' #' Service discovery is supported for Fargate tasks if you are using #' platform version v1.1.0 or later. For more information, see [AWS Fargate #' Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). #' @param desiredCount The number of instantiations of the specified task definition to place #' and keep running on your cluster. #' #' This is required if `schedulingStrategy` is `REPLICA` or is not #' specified. If `schedulingStrategy` is `DAEMON` then this is not #' required. #' @param clientToken Unique, case-sensitive identifier that you provide to ensure the #' idempotency of the request. Up to 32 ASCII characters are allowed. #' @param launchType The launch type on which to run your service. For more information, see #' [Amazon ECS Launch #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If a `launchType` is specified, the `capacityProviderStrategy` parameter #' must be omitted. #' @param capacityProviderStrategy The capacity provider strategy to use for the service. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If a `capacityProviderStrategy` is specified, the `launchType` parameter #' must be omitted. If no `capacityProviderStrategy` or `launchType` is #' specified, the `defaultCapacityProviderStrategy` for the cluster is #' used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param platformVersion The platform version that your tasks in the service are running on. A #' platform version is specified only for tasks using the Fargate launch #' type. If one isn't specified, the `LATEST` platform version is used by #' default. For more information, see [AWS Fargate Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param role The name or full Amazon Resource Name (ARN) of the IAM role that allows #' Amazon ECS to make calls to your load balancer on your behalf. This #' parameter is only permitted if you are using a load balancer with your #' service and your task definition does not use the `awsvpc` network mode. #' If you specify the `role` parameter, you must also specify a load #' balancer object with the `loadBalancers` parameter. #' #' If your account has already created the Amazon ECS service-linked role, #' that role is used by default for your service unless you specify a role #' here. The service-linked role is required if your task definition uses #' the `awsvpc` network mode or if the service is configured to use service #' discovery, an external deployment controller, multiple target groups, or #' Elastic Inference accelerators in which case you should not specify a #' role here. For more information, see [Using Service-Linked Roles for #' Amazon #' ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If your specified role has a path other than `/`, then you must either #' specify the full role ARN (this is recommended) or prefix the role name #' with the path. For example, if a role with the name `bar` has a path of #' `/foo/` then you would specify `/foo/bar` as the role name. For more #' information, see [Friendly Names and #' Paths](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) #' in the *IAM User Guide*. #' @param deploymentConfiguration Optional deployment parameters that control how many tasks run during #' the deployment and the ordering of stopping and starting tasks. #' @param placementConstraints An array of placement constraint objects to use for tasks in your #' service. You can specify a maximum of 10 constraints per task (this #' limit includes constraints in the task definition and those specified at #' runtime). #' @param placementStrategy The placement strategy objects to use for tasks in your service. You can #' specify a maximum of five strategy rules per service. #' @param networkConfiguration The network configuration for the service. This parameter is required #' for task definitions that use the `awsvpc` network mode to receive their #' own elastic network interface, and it is not supported for other network #' modes. For more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param healthCheckGracePeriodSeconds The period of time, in seconds, that the Amazon ECS service scheduler #' should ignore unhealthy Elastic Load Balancing target health checks #' after a task has first started. This is only used when your service is #' configured to use a load balancer. If your service has a load balancer #' defined and you don't specify a health check grace period value, the #' default value of `0` is used. #' #' If your service's tasks take a while to start and respond to Elastic #' Load Balancing health checks, you can specify a health check grace #' period of up to 2,147,483,647 seconds. During that time, the Amazon ECS #' service scheduler ignores health check status. This grace period can #' prevent the service scheduler from marking tasks as unhealthy and #' stopping them before they have time to come up. #' @param schedulingStrategy The scheduling strategy to use for the service. For more information, #' see #' [Services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). #' #' There are two service scheduler strategies available: #' #' - `REPLICA`-The replica scheduling strategy places and maintains the #' desired number of tasks across your cluster. By default, the service #' scheduler spreads tasks across Availability Zones. You can use task #' placement strategies and constraints to customize task placement #' decisions. This scheduler strategy is required if the service is #' using the `CODE_DEPLOY` or `EXTERNAL` deployment controller types. #' #' - `DAEMON`-The daemon scheduling strategy deploys exactly one task on #' each active container instance that meets all of the task placement #' constraints that you specify in your cluster. The service scheduler #' also evaluates the task placement constraints for running tasks and #' will stop tasks that do not meet the placement constraints. When #' you're using this strategy, you don't need to specify a desired #' number of tasks, a task placement strategy, or use Service Auto #' Scaling policies. #' #' Tasks using the Fargate launch type or the `CODE_DEPLOY` or #' `EXTERNAL` deployment controller types don't support the `DAEMON` #' scheduling strategy. #' @param deploymentController The deployment controller to use for the service. #' @param tags The metadata that you apply to the service to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. When a service is deleted, the tags are deleted as #' well. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param enableECSManagedTags Specifies whether to enable Amazon ECS managed tags for the tasks within #' the service. For more information, see [Tagging Your Amazon ECS #' Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param propagateTags Specifies whether to propagate the tags from the task definition or the #' service to the tasks in the service. If no value is specified, the tags #' are not propagated. Tags can only be propagated to the tasks within the #' service during service creation. To add tags to a task after service #' creation, use the TagResource API action. #' #' @section Request syntax: #' ``` #' svc$create_service( #' cluster = "string", #' serviceName = "string", #' taskDefinition = "string", #' loadBalancers = list( #' list( #' targetGroupArn = "string", #' loadBalancerName = "string", #' containerName = "string", #' containerPort = 123 #' ) #' ), #' serviceRegistries = list( #' list( #' registryArn = "string", #' port = 123, #' containerName = "string", #' containerPort = 123 #' ) #' ), #' desiredCount = 123, #' clientToken = "string", #' launchType = "EC2"|"FARGATE", #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' platformVersion = "string", #' role = "string", #' deploymentConfiguration = list( #' deploymentCircuitBreaker = list( #' enable = TRUE|FALSE, #' rollback = TRUE|FALSE #' ), #' maximumPercent = 123, #' minimumHealthyPercent = 123 #' ), #' placementConstraints = list( #' list( #' type = "distinctInstance"|"memberOf", #' expression = "string" #' ) #' ), #' placementStrategy = list( #' list( #' type = "random"|"spread"|"binpack", #' field = "string" #' ) #' ), #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' healthCheckGracePeriodSeconds = 123, #' schedulingStrategy = "REPLICA"|"DAEMON", #' deploymentController = list( #' type = "ECS"|"CODE_DEPLOY"|"EXTERNAL" #' ), #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' enableECSManagedTags = TRUE|FALSE, #' propagateTags = "TASK_DEFINITION"|"SERVICE" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example creates a service in your default region called #' # `ecs-simple-service`. The service uses the `hello_world` task #' # definition and it maintains 10 copies of that task. #' svc$create_service( #' desiredCount = 10L, #' serviceName = "ecs-simple-service", #' taskDefinition = "hello_world" #' ) #' #' # This example creates a service in your default region called #' # `ecs-simple-service-elb`. The service uses the `ecs-demo` task #' # definition and it maintains 10 copies of that task. You must reference #' # an existing load balancer in the same region by its name. #' svc$create_service( #' desiredCount = 10L, #' loadBalancers = list( #' list( #' containerName = "simple-app", #' containerPort = 80L, #' loadBalancerName = "EC2Contai-EcsElast-15DCDAURT3ZO2" #' ) #' ), #' role = "ecsServiceRole", #' serviceName = "ecs-simple-service-elb", #' taskDefinition = "console-sample-app-static" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_create_service ecs_create_service <- function(cluster = NULL, serviceName, taskDefinition = NULL, loadBalancers = NULL, serviceRegistries = NULL, desiredCount = NULL, clientToken = NULL, launchType = NULL, capacityProviderStrategy = NULL, platformVersion = NULL, role = NULL, deploymentConfiguration = NULL, placementConstraints = NULL, placementStrategy = NULL, networkConfiguration = NULL, healthCheckGracePeriodSeconds = NULL, schedulingStrategy = NULL, deploymentController = NULL, tags = NULL, enableECSManagedTags = NULL, propagateTags = NULL) { op <- new_operation( name = "CreateService", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_service_input(cluster = cluster, serviceName = serviceName, taskDefinition = taskDefinition, loadBalancers = loadBalancers, serviceRegistries = serviceRegistries, desiredCount = desiredCount, clientToken = clientToken, launchType = launchType, capacityProviderStrategy = capacityProviderStrategy, platformVersion = platformVersion, role = role, deploymentConfiguration = deploymentConfiguration, placementConstraints = placementConstraints, placementStrategy = placementStrategy, networkConfiguration = networkConfiguration, healthCheckGracePeriodSeconds = healthCheckGracePeriodSeconds, schedulingStrategy = schedulingStrategy, deploymentController = deploymentController, tags = tags, enableECSManagedTags = enableECSManagedTags, propagateTags = propagateTags) output <- .ecs$create_service_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_service <- ecs_create_service #' Create a task set in the specified cluster and service #' #' @description #' Create a task set in the specified cluster and service. This is used #' when a service uses the `EXTERNAL` deployment controller type. For more #' information, see [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_create_task_set(service, cluster, externalId, taskDefinition, #' networkConfiguration, loadBalancers, serviceRegistries, launchType, #' capacityProviderStrategy, platformVersion, scale, clientToken, tags) #' #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service to #' create the task set in. #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service to create the task set in. #' @param externalId An optional non-unique tag that identifies this task set in external #' systems. If the task set is associated with a service discovery #' registry, the tasks in this task set will have the #' `ECS_TASK_SET_EXTERNAL_ID` AWS Cloud Map attribute set to the provided #' value. #' @param taskDefinition &#91;required&#93; The task definition for the tasks in the task set to use. #' @param networkConfiguration #' @param loadBalancers A load balancer object representing the load balancer to use with the #' task set. The supported load balancer types are either an Application #' Load Balancer or a Network Load Balancer. #' @param serviceRegistries The details of the service discovery registries to assign to this task #' set. For more information, see [Service #' Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html). #' @param launchType The launch type that new tasks in the task set will use. For more #' information, see [Amazon ECS Launch #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If a `launchType` is specified, the `capacityProviderStrategy` parameter #' must be omitted. #' @param capacityProviderStrategy The capacity provider strategy to use for the task set. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If a `capacityProviderStrategy` is specified, the `launchType` parameter #' must be omitted. If no `capacityProviderStrategy` or `launchType` is #' specified, the `defaultCapacityProviderStrategy` for the cluster is #' used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param platformVersion The platform version that the tasks in the task set should use. A #' platform version is specified only for tasks using the Fargate launch #' type. If one isn't specified, the `LATEST` platform version is used by #' default. #' @param scale #' @param clientToken Unique, case-sensitive identifier that you provide to ensure the #' idempotency of the request. Up to 32 ASCII characters are allowed. #' @param tags The metadata that you apply to the task set to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. When a service is deleted, the tags are deleted as #' well. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$create_task_set( #' service = "string", #' cluster = "string", #' externalId = "string", #' taskDefinition = "string", #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' loadBalancers = list( #' list( #' targetGroupArn = "string", #' loadBalancerName = "string", #' containerName = "string", #' containerPort = 123 #' ) #' ), #' serviceRegistries = list( #' list( #' registryArn = "string", #' port = 123, #' containerName = "string", #' containerPort = 123 #' ) #' ), #' launchType = "EC2"|"FARGATE", #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' platformVersion = "string", #' scale = list( #' value = 123.0, #' unit = "PERCENT" #' ), #' clientToken = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_create_task_set ecs_create_task_set <- function(service, cluster, externalId = NULL, taskDefinition, networkConfiguration = NULL, loadBalancers = NULL, serviceRegistries = NULL, launchType = NULL, capacityProviderStrategy = NULL, platformVersion = NULL, scale = NULL, clientToken = NULL, tags = NULL) { op <- new_operation( name = "CreateTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$create_task_set_input(service = service, cluster = cluster, externalId = externalId, taskDefinition = taskDefinition, networkConfiguration = networkConfiguration, loadBalancers = loadBalancers, serviceRegistries = serviceRegistries, launchType = launchType, capacityProviderStrategy = capacityProviderStrategy, platformVersion = platformVersion, scale = scale, clientToken = clientToken, tags = tags) output <- .ecs$create_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$create_task_set <- ecs_create_task_set #' Disables an account setting for a specified IAM user, IAM role, or the #' root user for an account #' #' @description #' Disables an account setting for a specified IAM user, IAM role, or the #' root user for an account. #' #' @usage #' ecs_delete_account_setting(name, principalArn) #' #' @param name &#91;required&#93; The resource name for which to disable the account setting. If #' `serviceLongArnFormat` is specified, the ARN for your Amazon ECS #' services is affected. If `taskLongArnFormat` is specified, the ARN and #' resource ID for your Amazon ECS tasks is affected. If #' `containerInstanceLongArnFormat` is specified, the ARN and resource ID #' for your Amazon ECS container instances is affected. If `awsvpcTrunking` #' is specified, the ENI limit for your Amazon ECS container instances is #' affected. #' @param principalArn The ARN of the principal, which can be an IAM user, IAM role, or the #' root user. If you specify the root user, it disables the account setting #' for all IAM users, IAM roles, and the root user of the account unless an #' IAM user or role explicitly overrides these settings. If this field is #' omitted, the setting is changed only for the authenticated user. #' #' @section Request syntax: #' ``` #' svc$delete_account_setting( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' principalArn = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes the account setting for your user for the specified #' # resource type. #' svc$delete_account_setting( #' name = "serviceLongArnFormat" #' ) #' #' # This example deletes the account setting for a specific IAM user or IAM #' # role for the specified resource type. Only the root user can view or #' # modify the account settings for another user. #' svc$delete_account_setting( #' name = "containerInstanceLongArnFormat", #' principalArn = "arn:aws:iam::<aws_account_id>:user/principalName" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_delete_account_setting ecs_delete_account_setting <- function(name, principalArn = NULL) { op <- new_operation( name = "DeleteAccountSetting", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_account_setting_input(name = name, principalArn = principalArn) output <- .ecs$delete_account_setting_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_account_setting <- ecs_delete_account_setting #' Deletes one or more custom attributes from an Amazon ECS resource #' #' @description #' Deletes one or more custom attributes from an Amazon ECS resource. #' #' @usage #' ecs_delete_attributes(cluster, attributes) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' contains the resource to delete attributes. If you do not specify a #' cluster, the default cluster is assumed. #' @param attributes &#91;required&#93; The attributes to delete from your resource. You can specify up to 10 #' attributes per request. For custom attributes, specify the attribute #' name and target ID, but do not specify the value. If you specify the #' target ID using the short form, you must also specify the target type. #' #' @section Request syntax: #' ``` #' svc$delete_attributes( #' cluster = "string", #' attributes = list( #' list( #' name = "string", #' value = "string", #' targetType = "container-instance", #' targetId = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_delete_attributes ecs_delete_attributes <- function(cluster = NULL, attributes) { op <- new_operation( name = "DeleteAttributes", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_attributes_input(cluster = cluster, attributes = attributes) output <- .ecs$delete_attributes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_attributes <- ecs_delete_attributes #' Deletes the specified capacity provider #' #' @description #' Deletes the specified capacity provider. #' #' The `FARGATE` and `FARGATE_SPOT` capacity providers are reserved and #' cannot be deleted. You can disassociate them from a cluster using either #' the PutClusterCapacityProviders API or by deleting the cluster. #' #' Prior to a capacity provider being deleted, the capacity provider must #' be removed from the capacity provider strategy from all services. The #' UpdateService API can be used to remove a capacity provider from a #' service's capacity provider strategy. When updating a service, the #' `forceNewDeployment` option can be used to ensure that any tasks using #' the Amazon EC2 instance capacity provided by the capacity provider are #' transitioned to use the capacity from the remaining capacity providers. #' Only capacity providers that are not associated with a cluster can be #' deleted. To remove a capacity provider from a cluster, you can either #' use PutClusterCapacityProviders or delete the cluster. #' #' @usage #' ecs_delete_capacity_provider(capacityProvider) #' #' @param capacityProvider &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the capacity #' provider to delete. #' #' @section Request syntax: #' ``` #' svc$delete_capacity_provider( #' capacityProvider = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_delete_capacity_provider ecs_delete_capacity_provider <- function(capacityProvider) { op <- new_operation( name = "DeleteCapacityProvider", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_capacity_provider_input(capacityProvider = capacityProvider) output <- .ecs$delete_capacity_provider_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_capacity_provider <- ecs_delete_capacity_provider #' Deletes the specified cluster #' #' @description #' Deletes the specified cluster. The cluster will transition to the #' `INACTIVE` state. Clusters with an `INACTIVE` status may remain #' discoverable in your account for a period of time. However, this #' behavior is subject to change in the future, so you should not rely on #' `INACTIVE` clusters persisting. #' #' You must deregister all container instances from this cluster before you #' may delete it. You can list the container instances in a cluster with #' ListContainerInstances and deregister them with #' DeregisterContainerInstance. #' #' @usage #' ecs_delete_cluster(cluster) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster to #' delete. #' #' @section Request syntax: #' ``` #' svc$delete_cluster( #' cluster = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes an empty cluster in your default region. #' svc$delete_cluster( #' cluster = "my_cluster" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_delete_cluster ecs_delete_cluster <- function(cluster) { op <- new_operation( name = "DeleteCluster", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_cluster_input(cluster = cluster) output <- .ecs$delete_cluster_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_cluster <- ecs_delete_cluster #' Deletes a specified service within a cluster #' #' @description #' Deletes a specified service within a cluster. You can delete a service #' if you have no running tasks in it and the desired task count is zero. #' If the service is actively maintaining tasks, you cannot delete it, and #' you must update the service to a desired task count of zero. For more #' information, see UpdateService. #' #' When you delete a service, if there are still running tasks that require #' cleanup, the service status moves from `ACTIVE` to `DRAINING`, and the #' service is no longer visible in the console or in the ListServices API #' operation. After all tasks have transitioned to either `STOPPING` or #' `STOPPED` status, the service status moves from `DRAINING` to #' `INACTIVE`. Services in the `DRAINING` or `INACTIVE` status can still be #' viewed with the DescribeServices API operation. However, in the future, #' `INACTIVE` services may be cleaned up and purged from Amazon ECS record #' keeping, and DescribeServices calls on those services return a #' `ServiceNotFoundException` error. #' #' If you attempt to create a new service with the same name as an existing #' service in either `ACTIVE` or `DRAINING` status, you receive an error. #' #' @usage #' ecs_delete_service(cluster, service, force) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service to delete. If you do not specify a cluster, the #' default cluster is assumed. #' @param service &#91;required&#93; The name of the service to delete. #' @param force If `true`, allows you to delete a service even if it has not been scaled #' down to zero tasks. It is only necessary to use this if the service is #' using the `REPLICA` scheduling strategy. #' #' @section Request syntax: #' ``` #' svc$delete_service( #' cluster = "string", #' service = "string", #' force = TRUE|FALSE #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes the my-http-service service. The service must have #' # a desired count and running count of 0 before you can delete it. #' svc$delete_service( #' service = "my-http-service" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_delete_service ecs_delete_service <- function(cluster = NULL, service, force = NULL) { op <- new_operation( name = "DeleteService", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_service_input(cluster = cluster, service = service, force = force) output <- .ecs$delete_service_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_service <- ecs_delete_service #' Deletes a specified task set within a service #' #' @description #' Deletes a specified task set within a service. This is used when a #' service uses the `EXTERNAL` deployment controller type. For more #' information, see [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_delete_task_set(cluster, service, taskSet, force) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task set exists in to delete. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' hosts the task set to delete. #' @param taskSet &#91;required&#93; The task set ID or full Amazon Resource Name (ARN) of the task set to #' delete. #' @param force If `true`, this allows you to delete a task set even if it hasn't been #' scaled down to zero. #' #' @section Request syntax: #' ``` #' svc$delete_task_set( #' cluster = "string", #' service = "string", #' taskSet = "string", #' force = TRUE|FALSE #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_delete_task_set ecs_delete_task_set <- function(cluster, service, taskSet, force = NULL) { op <- new_operation( name = "DeleteTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$delete_task_set_input(cluster = cluster, service = service, taskSet = taskSet, force = force) output <- .ecs$delete_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$delete_task_set <- ecs_delete_task_set #' Deregisters an Amazon ECS container instance from the specified cluster #' #' @description #' Deregisters an Amazon ECS container instance from the specified cluster. #' This instance is no longer available to run tasks. #' #' If you intend to use the container instance for some other purpose after #' deregistration, you should stop all of the tasks running on the #' container instance before deregistration. That prevents any orphaned #' tasks from consuming resources. #' #' Deregistering a container instance removes the instance from a cluster, #' but it does not terminate the EC2 instance. If you are finished using #' the instance, be sure to terminate it in the Amazon EC2 console to stop #' billing. #' #' If you terminate a running container instance, Amazon ECS automatically #' deregisters the instance from your cluster (stopped container instances #' or instances with disconnected agents are not automatically deregistered #' when terminated). #' #' @usage #' ecs_deregister_container_instance(cluster, containerInstance, force) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instance to deregister. If you do not specify a #' cluster, the default cluster is assumed. #' @param containerInstance &#91;required&#93; The container instance ID or full ARN of the container instance to #' deregister. The ARN contains the `arn:aws:ecs` namespace, followed by #' the Region of the container instance, the AWS account ID of the #' container instance owner, the `container-instance` namespace, and then #' the container instance ID. For example, #' `arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID`. #' @param force Forces the deregistration of the container instance. If you have tasks #' running on the container instance when you deregister it with the #' `force` option, these tasks remain running until you terminate the #' instance or the tasks stop through some other means, but they are #' orphaned (no longer monitored or accounted for by Amazon ECS). If an #' orphaned task on your container instance is part of an Amazon ECS #' service, then the service scheduler starts another copy of that task, on #' a different container instance if possible. #' #' Any containers in orphaned service tasks that are registered with a #' Classic Load Balancer or an Application Load Balancer target group are #' deregistered. They begin connection draining according to the settings #' on the load balancer or target group. #' #' @section Request syntax: #' ``` #' svc$deregister_container_instance( #' cluster = "string", #' containerInstance = "string", #' force = TRUE|FALSE #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deregisters a container instance from the specified cluster #' # in your default region. If there are still tasks running on the #' # container instance, you must either stop those tasks before #' # deregistering, or use the force option. #' svc$deregister_container_instance( #' cluster = "default", #' containerInstance = "container_instance_UUID", #' force = TRUE #' ) #' } #' #' @keywords internal #' #' @rdname ecs_deregister_container_instance ecs_deregister_container_instance <- function(cluster = NULL, containerInstance, force = NULL) { op <- new_operation( name = "DeregisterContainerInstance", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$deregister_container_instance_input(cluster = cluster, containerInstance = containerInstance, force = force) output <- .ecs$deregister_container_instance_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$deregister_container_instance <- ecs_deregister_container_instance #' Deregisters the specified task definition by family and revision #' #' @description #' Deregisters the specified task definition by family and revision. Upon #' deregistration, the task definition is marked as `INACTIVE`. Existing #' tasks and services that reference an `INACTIVE` task definition continue #' to run without disruption. Existing services that reference an #' `INACTIVE` task definition can still scale up or down by modifying the #' service's desired count. #' #' You cannot use an `INACTIVE` task definition to run new tasks or create #' new services, and you cannot update an existing service to reference an #' `INACTIVE` task definition. However, there may be up to a 10-minute #' window following deregistration where these restrictions have not yet #' taken effect. #' #' At this time, `INACTIVE` task definitions remain discoverable in your #' account indefinitely. However, this behavior is subject to change in the #' future, so you should not rely on `INACTIVE` task definitions persisting #' beyond the lifecycle of any associated tasks and services. #' #' @usage #' ecs_deregister_task_definition(taskDefinition) #' #' @param taskDefinition &#91;required&#93; The `family` and `revision` (`family:revision`) or full Amazon Resource #' Name (ARN) of the task definition to deregister. You must specify a #' `revision`. #' #' @section Request syntax: #' ``` #' svc$deregister_task_definition( #' taskDefinition = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_deregister_task_definition ecs_deregister_task_definition <- function(taskDefinition) { op <- new_operation( name = "DeregisterTaskDefinition", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$deregister_task_definition_input(taskDefinition = taskDefinition) output <- .ecs$deregister_task_definition_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$deregister_task_definition <- ecs_deregister_task_definition #' Describes one or more of your capacity providers #' #' @description #' Describes one or more of your capacity providers. #' #' @usage #' ecs_describe_capacity_providers(capacityProviders, include, maxResults, #' nextToken) #' #' @param capacityProviders The short name or full Amazon Resource Name (ARN) of one or more #' capacity providers. Up to `100` capacity providers can be described in #' an action. #' @param include Specifies whether or not you want to see the resource tags for the #' capacity provider. If `TAGS` is specified, the tags are included in the #' response. If this field is omitted, tags are not included in the #' response. #' @param maxResults The maximum number of account setting results returned by #' `DescribeCapacityProviders` in paginated output. When this parameter is #' used, `DescribeCapacityProviders` only returns `maxResults` results in a #' single page along with a `nextToken` response element. The remaining #' results of the initial request can be seen by sending another #' `DescribeCapacityProviders` request with the returned `nextToken` value. #' This value can be between 1 and 10. If this parameter is not used, then #' `DescribeCapacityProviders` returns up to 10 results and a `nextToken` #' value if applicable. #' @param nextToken The `nextToken` value returned from a previous paginated #' `DescribeCapacityProviders` request where `maxResults` was used and the #' results exceeded the value of that parameter. Pagination continues from #' the end of the previous results that returned the `nextToken` value. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' #' @section Request syntax: #' ``` #' svc$describe_capacity_providers( #' capacityProviders = list( #' "string" #' ), #' include = list( #' "TAGS" #' ), #' maxResults = 123, #' nextToken = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_describe_capacity_providers ecs_describe_capacity_providers <- function(capacityProviders = NULL, include = NULL, maxResults = NULL, nextToken = NULL) { op <- new_operation( name = "DescribeCapacityProviders", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_capacity_providers_input(capacityProviders = capacityProviders, include = include, maxResults = maxResults, nextToken = nextToken) output <- .ecs$describe_capacity_providers_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_capacity_providers <- ecs_describe_capacity_providers #' Describes one or more of your clusters #' #' @description #' Describes one or more of your clusters. #' #' @usage #' ecs_describe_clusters(clusters, include) #' #' @param clusters A list of up to 100 cluster names or full cluster Amazon Resource Name #' (ARN) entries. If you do not specify a cluster, the default cluster is #' assumed. #' @param include Whether to include additional information about your clusters in the #' response. If this field is omitted, the attachments, statistics, and #' tags are not included. #' #' If `ATTACHMENTS` is specified, the attachments for the container #' instances or tasks within the cluster are included. #' #' If `SETTINGS` is specified, the settings for the cluster are included. #' #' If `STATISTICS` is specified, the following additional information, #' separated by launch type, is included: #' #' - runningEC2TasksCount #' #' - runningFargateTasksCount #' #' - pendingEC2TasksCount #' #' - pendingFargateTasksCount #' #' - activeEC2ServiceCount #' #' - activeFargateServiceCount #' #' - drainingEC2ServiceCount #' #' - drainingFargateServiceCount #' #' If `TAGS` is specified, the metadata tags associated with the cluster #' are included. #' #' @section Request syntax: #' ``` #' svc$describe_clusters( #' clusters = list( #' "string" #' ), #' include = list( #' "ATTACHMENTS"|"SETTINGS"|"STATISTICS"|"TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified cluster in your #' # default region. #' svc$describe_clusters( #' clusters = list( #' "default" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_clusters ecs_describe_clusters <- function(clusters = NULL, include = NULL) { op <- new_operation( name = "DescribeClusters", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_clusters_input(clusters = clusters, include = include) output <- .ecs$describe_clusters_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_clusters <- ecs_describe_clusters #' Describes Amazon Elastic Container Service container instances #' #' @description #' Describes Amazon Elastic Container Service container instances. Returns #' metadata about registered and remaining resources on each container #' instance requested. #' #' @usage #' ecs_describe_container_instances(cluster, containerInstances, include) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instances to describe. If you do not specify a #' cluster, the default cluster is assumed. This parameter is required if #' the container instance or container instances you are describing were #' launched in any cluster other than the default cluster. #' @param containerInstances &#91;required&#93; A list of up to 100 container instance IDs or full Amazon Resource Name #' (ARN) entries. #' @param include Specifies whether you want to see the resource tags for the container #' instance. If `TAGS` is specified, the tags are included in the response. #' If this field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_container_instances( #' cluster = "string", #' containerInstances = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified container instance #' # in your default region, using the container instance UUID as an #' # identifier. #' svc$describe_container_instances( #' cluster = "default", #' containerInstances = list( #' "f2756532-8f13-4d53-87c9-aed50dc94cd7" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_container_instances ecs_describe_container_instances <- function(cluster = NULL, containerInstances, include = NULL) { op <- new_operation( name = "DescribeContainerInstances", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_container_instances_input(cluster = cluster, containerInstances = containerInstances, include = include) output <- .ecs$describe_container_instances_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_container_instances <- ecs_describe_container_instances #' Describes the specified services running in your cluster #' #' @description #' Describes the specified services running in your cluster. #' #' @usage #' ecs_describe_services(cluster, services, include) #' #' @param cluster The short name or full Amazon Resource Name (ARN)the cluster that hosts #' the service to describe. If you do not specify a cluster, the default #' cluster is assumed. This parameter is required if the service or #' services you are describing were launched in any cluster other than the #' default cluster. #' @param services &#91;required&#93; A list of services to describe. You may specify up to 10 services to #' describe in a single operation. #' @param include Specifies whether you want to see the resource tags for the service. If #' `TAGS` is specified, the tags are included in the response. If this #' field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_services( #' cluster = "string", #' services = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides descriptive information about the service named #' # `ecs-simple-service`. #' svc$describe_services( #' services = list( #' "ecs-simple-service" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_services ecs_describe_services <- function(cluster = NULL, services, include = NULL) { op <- new_operation( name = "DescribeServices", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_services_input(cluster = cluster, services = services, include = include) output <- .ecs$describe_services_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_services <- ecs_describe_services #' Describes a task definition #' #' @description #' Describes a task definition. You can specify a `family` and `revision` #' to find information about a specific task definition, or you can simply #' specify the family to find the latest `ACTIVE` revision in that family. #' #' You can only describe `INACTIVE` task definitions while an active task #' or service references them. #' #' @usage #' ecs_describe_task_definition(taskDefinition, include) #' #' @param taskDefinition &#91;required&#93; The `family` for the latest `ACTIVE` revision, `family` and `revision` #' (`family:revision`) for a specific revision in the family, or full #' Amazon Resource Name (ARN) of the task definition to describe. #' @param include Specifies whether to see the resource tags for the task definition. If #' `TAGS` is specified, the tags are included in the response. If this #' field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_task_definition( #' taskDefinition = "string", #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified task definition. #' svc$describe_task_definition( #' taskDefinition = "hello_world:8" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_task_definition ecs_describe_task_definition <- function(taskDefinition, include = NULL) { op <- new_operation( name = "DescribeTaskDefinition", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_task_definition_input(taskDefinition = taskDefinition, include = include) output <- .ecs$describe_task_definition_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_task_definition <- ecs_describe_task_definition #' Describes the task sets in the specified cluster and service #' #' @description #' Describes the task sets in the specified cluster and service. This is #' used when a service uses the `EXTERNAL` deployment controller type. For #' more information, see [Amazon ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_describe_task_sets(cluster, service, taskSets, include) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task sets exist in. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' the task sets exist in. #' @param taskSets The ID or full Amazon Resource Name (ARN) of task sets to describe. #' @param include Specifies whether to see the resource tags for the task set. If `TAGS` #' is specified, the tags are included in the response. If this field is #' omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_task_sets( #' cluster = "string", #' service = "string", #' taskSets = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_describe_task_sets ecs_describe_task_sets <- function(cluster, service, taskSets = NULL, include = NULL) { op <- new_operation( name = "DescribeTaskSets", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_task_sets_input(cluster = cluster, service = service, taskSets = taskSets, include = include) output <- .ecs$describe_task_sets_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_task_sets <- ecs_describe_task_sets #' Describes a specified task or tasks #' #' @description #' Describes a specified task or tasks. #' #' @usage #' ecs_describe_tasks(cluster, tasks, include) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the task or tasks to describe. If you do not specify a cluster, #' the default cluster is assumed. This parameter is required if the task #' or tasks you are describing were launched in any cluster other than the #' default cluster. #' @param tasks &#91;required&#93; A list of up to 100 task IDs or full ARN entries. #' @param include Specifies whether you want to see the resource tags for the task. If #' `TAGS` is specified, the tags are included in the response. If this #' field is omitted, tags are not included in the response. #' #' @section Request syntax: #' ``` #' svc$describe_tasks( #' cluster = "string", #' tasks = list( #' "string" #' ), #' include = list( #' "TAGS" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example provides a description of the specified task, using the #' # task UUID as an identifier. #' svc$describe_tasks( #' tasks = list( #' "c5cba4eb-5dad-405e-96db-71ef8eefe6a8" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_describe_tasks ecs_describe_tasks <- function(cluster = NULL, tasks, include = NULL) { op <- new_operation( name = "DescribeTasks", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$describe_tasks_input(cluster = cluster, tasks = tasks, include = include) output <- .ecs$describe_tasks_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$describe_tasks <- ecs_describe_tasks #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Returns an endpoint for the Amazon ECS agent to poll for updates. #' #' @usage #' ecs_discover_poll_endpoint(containerInstance, cluster) #' #' @param containerInstance The container instance ID or full ARN of the container instance. The ARN #' contains the `arn:aws:ecs` namespace, followed by the Region of the #' container instance, the AWS account ID of the container instance owner, #' the `container-instance` namespace, and then the container instance ID. #' For example, #' `arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID`. #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster to #' which the container instance belongs. #' #' @section Request syntax: #' ``` #' svc$discover_poll_endpoint( #' containerInstance = "string", #' cluster = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_discover_poll_endpoint ecs_discover_poll_endpoint <- function(containerInstance = NULL, cluster = NULL) { op <- new_operation( name = "DiscoverPollEndpoint", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$discover_poll_endpoint_input(containerInstance = containerInstance, cluster = cluster) output <- .ecs$discover_poll_endpoint_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$discover_poll_endpoint <- ecs_discover_poll_endpoint #' Lists the account settings for a specified principal #' #' @description #' Lists the account settings for a specified principal. #' #' @usage #' ecs_list_account_settings(name, value, principalArn, effectiveSettings, #' nextToken, maxResults) #' #' @param name The name of the account setting you want to list the settings for. #' @param value The value of the account settings with which to filter results. You must #' also specify an account setting name to use this parameter. #' @param principalArn The ARN of the principal, which can be an IAM user, IAM role, or the #' root user. If this field is omitted, the account settings are listed #' only for the authenticated user. #' @param effectiveSettings Specifies whether to return the effective settings. If `true`, the #' account settings for the root user or the default setting for the #' `principalArn` are returned. If `false`, the account settings for the #' `principalArn` are returned if they are set. Otherwise, no account #' settings are returned. #' @param nextToken The `nextToken` value returned from a `ListAccountSettings` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of account setting results returned by #' `ListAccountSettings` in paginated output. When this parameter is used, #' `ListAccountSettings` only returns `maxResults` results in a single page #' along with a `nextToken` response element. The remaining results of the #' initial request can be seen by sending another `ListAccountSettings` #' request with the returned `nextToken` value. This value can be between 1 #' and 10. If this parameter is not used, then `ListAccountSettings` #' returns up to 10 results and a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_account_settings( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' value = "string", #' principalArn = "string", #' effectiveSettings = TRUE|FALSE, #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example displays the effective account settings for your account. #' svc$list_account_settings( #' effectiveSettings = TRUE #' ) #' #' # This example displays the effective account settings for the specified #' # user or role. #' svc$list_account_settings( #' effectiveSettings = TRUE, #' principalArn = "arn:aws:iam::<aws_account_id>:user/principalName" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_account_settings ecs_list_account_settings <- function(name = NULL, value = NULL, principalArn = NULL, effectiveSettings = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListAccountSettings", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_account_settings_input(name = name, value = value, principalArn = principalArn, effectiveSettings = effectiveSettings, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_account_settings_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_account_settings <- ecs_list_account_settings #' Lists the attributes for Amazon ECS resources within a specified target #' type and cluster #' #' @description #' Lists the attributes for Amazon ECS resources within a specified target #' type and cluster. When you specify a target type and cluster, #' `ListAttributes` returns a list of attribute objects, one for each #' attribute on each resource. You can filter the list of results to a #' single attribute name to only return results that have that name. You #' can also filter the results by attribute name and value, for example, to #' see which container instances in a cluster are running a Linux AMI #' (`ecs.os-type=linux`). #' #' @usage #' ecs_list_attributes(cluster, targetType, attributeName, attributeValue, #' nextToken, maxResults) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster to list #' attributes. If you do not specify a cluster, the default cluster is #' assumed. #' @param targetType &#91;required&#93; The type of the target with which to list attributes. #' @param attributeName The name of the attribute with which to filter the results. #' @param attributeValue The value of the attribute with which to filter results. You must also #' specify an attribute name to use this parameter. #' @param nextToken The `nextToken` value returned from a `ListAttributes` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of cluster results returned by `ListAttributes` in #' paginated output. When this parameter is used, `ListAttributes` only #' returns `maxResults` results in a single page along with a `nextToken` #' response element. The remaining results of the initial request can be #' seen by sending another `ListAttributes` request with the returned #' `nextToken` value. This value can be between 1 and 100. If this #' parameter is not used, then `ListAttributes` returns up to 100 results #' and a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_attributes( #' cluster = "string", #' targetType = "container-instance", #' attributeName = "string", #' attributeValue = "string", #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_list_attributes ecs_list_attributes <- function(cluster = NULL, targetType, attributeName = NULL, attributeValue = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListAttributes", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_attributes_input(cluster = cluster, targetType = targetType, attributeName = attributeName, attributeValue = attributeValue, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_attributes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_attributes <- ecs_list_attributes #' Returns a list of existing clusters #' #' @description #' Returns a list of existing clusters. #' #' @usage #' ecs_list_clusters(nextToken, maxResults) #' #' @param nextToken The `nextToken` value returned from a `ListClusters` request indicating #' that more results are available to fulfill the request and further calls #' will be needed. If `maxResults` was provided, it is possible the number #' of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of cluster results returned by `ListClusters` in #' paginated output. When this parameter is used, `ListClusters` only #' returns `maxResults` results in a single page along with a `nextToken` #' response element. The remaining results of the initial request can be #' seen by sending another `ListClusters` request with the returned #' `nextToken` value. This value can be between 1 and 100. If this #' parameter is not used, then `ListClusters` returns up to 100 results and #' a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_clusters( #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your available clusters in your default #' # region. #' svc$list_clusters() #' } #' #' @keywords internal #' #' @rdname ecs_list_clusters ecs_list_clusters <- function(nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListClusters", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_clusters_input(nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_clusters_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_clusters <- ecs_list_clusters #' Returns a list of container instances in a specified cluster #' #' @description #' Returns a list of container instances in a specified cluster. You can #' filter the results of a `ListContainerInstances` operation with cluster #' query language statements inside the `filter` parameter. For more #' information, see [Cluster Query #' Language](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_list_container_instances(cluster, filter, nextToken, maxResults, #' status) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instances to list. If you do not specify a cluster, #' the default cluster is assumed. #' @param filter You can filter the results of a `ListContainerInstances` operation with #' cluster query language statements. For more information, see [Cluster #' Query #' Language](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param nextToken The `nextToken` value returned from a `ListContainerInstances` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of container instance results returned by #' `ListContainerInstances` in paginated output. When this parameter is #' used, `ListContainerInstances` only returns `maxResults` results in a #' single page along with a `nextToken` response element. The remaining #' results of the initial request can be seen by sending another #' `ListContainerInstances` request with the returned `nextToken` value. #' This value can be between 1 and 100. If this parameter is not used, then #' `ListContainerInstances` returns up to 100 results and a `nextToken` #' value if applicable. #' @param status Filters the container instances by status. For example, if you specify #' the `DRAINING` status, the results include only container instances that #' have been set to `DRAINING` using UpdateContainerInstancesState. If you #' do not specify this parameter, the default is to include container #' instances set to all states other than `INACTIVE`. #' #' @section Request syntax: #' ``` #' svc$list_container_instances( #' cluster = "string", #' filter = "string", #' nextToken = "string", #' maxResults = 123, #' status = "ACTIVE"|"DRAINING"|"REGISTERING"|"DEREGISTERING"|"REGISTRATION_FAILED" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your available container instances in the #' # specified cluster in your default region. #' svc$list_container_instances( #' cluster = "default" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_container_instances ecs_list_container_instances <- function(cluster = NULL, filter = NULL, nextToken = NULL, maxResults = NULL, status = NULL) { op <- new_operation( name = "ListContainerInstances", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_container_instances_input(cluster = cluster, filter = filter, nextToken = nextToken, maxResults = maxResults, status = status) output <- .ecs$list_container_instances_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_container_instances <- ecs_list_container_instances #' Lists the services that are running in a specified cluster #' #' @description #' Lists the services that are running in a specified cluster. #' #' @usage #' ecs_list_services(cluster, nextToken, maxResults, launchType, #' schedulingStrategy) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the services to list. If you do not specify a cluster, the default #' cluster is assumed. #' @param nextToken The `nextToken` value returned from a `ListServices` request indicating #' that more results are available to fulfill the request and further calls #' will be needed. If `maxResults` was provided, it is possible the number #' of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of service results returned by `ListServices` in #' paginated output. When this parameter is used, `ListServices` only #' returns `maxResults` results in a single page along with a `nextToken` #' response element. The remaining results of the initial request can be #' seen by sending another `ListServices` request with the returned #' `nextToken` value. This value can be between 1 and 100. If this #' parameter is not used, then `ListServices` returns up to 10 results and #' a `nextToken` value if applicable. #' @param launchType The launch type for the services to list. #' @param schedulingStrategy The scheduling strategy for services to list. #' #' @section Request syntax: #' ``` #' svc$list_services( #' cluster = "string", #' nextToken = "string", #' maxResults = 123, #' launchType = "EC2"|"FARGATE", #' schedulingStrategy = "REPLICA"|"DAEMON" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists the services running in the default cluster for an #' # account. #' svc$list_services() #' } #' #' @keywords internal #' #' @rdname ecs_list_services ecs_list_services <- function(cluster = NULL, nextToken = NULL, maxResults = NULL, launchType = NULL, schedulingStrategy = NULL) { op <- new_operation( name = "ListServices", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_services_input(cluster = cluster, nextToken = nextToken, maxResults = maxResults, launchType = launchType, schedulingStrategy = schedulingStrategy) output <- .ecs$list_services_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_services <- ecs_list_services #' List the tags for an Amazon ECS resource #' #' @description #' List the tags for an Amazon ECS resource. #' #' @usage #' ecs_list_tags_for_resource(resourceArn) #' #' @param resourceArn &#91;required&#93; The Amazon Resource Name (ARN) that identifies the resource for which to #' list the tags. Currently, the supported resources are Amazon ECS tasks, #' services, task definitions, clusters, and container instances. #' #' @section Request syntax: #' ``` #' svc$list_tags_for_resource( #' resourceArn = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists the tags for the 'dev' cluster. #' svc$list_tags_for_resource( #' resourceArn = "arn:aws:ecs:region:aws_account_id:cluster/dev" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_tags_for_resource ecs_list_tags_for_resource <- function(resourceArn) { op <- new_operation( name = "ListTagsForResource", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_tags_for_resource_input(resourceArn = resourceArn) output <- .ecs$list_tags_for_resource_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_tags_for_resource <- ecs_list_tags_for_resource #' Returns a list of task definition families that are registered to your #' account (which may include task definition families that no longer have #' any ACTIVE task definition revisions) #' #' @description #' Returns a list of task definition families that are registered to your #' account (which may include task definition families that no longer have #' any `ACTIVE` task definition revisions). #' #' You can filter out task definition families that do not contain any #' `ACTIVE` task definition revisions by setting the `status` parameter to #' `ACTIVE`. You can also filter the results with the `familyPrefix` #' parameter. #' #' @usage #' ecs_list_task_definition_families(familyPrefix, status, nextToken, #' maxResults) #' #' @param familyPrefix The `familyPrefix` is a string that is used to filter the results of #' `ListTaskDefinitionFamilies`. If you specify a `familyPrefix`, only task #' definition family names that begin with the `familyPrefix` string are #' returned. #' @param status The task definition family status with which to filter the #' `ListTaskDefinitionFamilies` results. By default, both `ACTIVE` and #' `INACTIVE` task definition families are listed. If this parameter is set #' to `ACTIVE`, only task definition families that have an `ACTIVE` task #' definition revision are returned. If this parameter is set to #' `INACTIVE`, only task definition families that do not have any `ACTIVE` #' task definition revisions are returned. If you paginate the resulting #' output, be sure to keep the `status` value constant in each subsequent #' request. #' @param nextToken The `nextToken` value returned from a `ListTaskDefinitionFamilies` #' request indicating that more results are available to fulfill the #' request and further calls will be needed. If `maxResults` was provided, #' it is possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of task definition family results returned by #' `ListTaskDefinitionFamilies` in paginated output. When this parameter is #' used, `ListTaskDefinitions` only returns `maxResults` results in a #' single page along with a `nextToken` response element. The remaining #' results of the initial request can be seen by sending another #' `ListTaskDefinitionFamilies` request with the returned `nextToken` #' value. This value can be between 1 and 100. If this parameter is not #' used, then `ListTaskDefinitionFamilies` returns up to 100 results and a #' `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_task_definition_families( #' familyPrefix = "string", #' status = "ACTIVE"|"INACTIVE"|"ALL", #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your registered task definition families. #' svc$list_task_definition_families() #' #' # This example lists the task definition revisions that start with "hpcc". #' svc$list_task_definition_families( #' familyPrefix = "hpcc" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_task_definition_families ecs_list_task_definition_families <- function(familyPrefix = NULL, status = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListTaskDefinitionFamilies", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_task_definition_families_input(familyPrefix = familyPrefix, status = status, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_task_definition_families_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_task_definition_families <- ecs_list_task_definition_families #' Returns a list of task definitions that are registered to your account #' #' @description #' Returns a list of task definitions that are registered to your account. #' You can filter the results by family name with the `familyPrefix` #' parameter or by status with the `status` parameter. #' #' @usage #' ecs_list_task_definitions(familyPrefix, status, sort, nextToken, #' maxResults) #' #' @param familyPrefix The full family name with which to filter the `ListTaskDefinitions` #' results. Specifying a `familyPrefix` limits the listed task definitions #' to task definition revisions that belong to that family. #' @param status The task definition status with which to filter the #' `ListTaskDefinitions` results. By default, only `ACTIVE` task #' definitions are listed. By setting this parameter to `INACTIVE`, you can #' view task definitions that are `INACTIVE` as long as an active task or #' service still references them. If you paginate the resulting output, be #' sure to keep the `status` value constant in each subsequent request. #' @param sort The order in which to sort the results. Valid values are `ASC` and #' `DESC`. By default (`ASC`), task definitions are listed #' lexicographically by family name and in ascending numerical order by #' revision so that the newest task definitions in a family are listed #' last. Setting this parameter to `DESC` reverses the sort order on family #' name and revision so that the newest task definitions in a family are #' listed first. #' @param nextToken The `nextToken` value returned from a `ListTaskDefinitions` request #' indicating that more results are available to fulfill the request and #' further calls will be needed. If `maxResults` was provided, it is #' possible the number of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of task definition results returned by #' `ListTaskDefinitions` in paginated output. When this parameter is used, #' `ListTaskDefinitions` only returns `maxResults` results in a single page #' along with a `nextToken` response element. The remaining results of the #' initial request can be seen by sending another `ListTaskDefinitions` #' request with the returned `nextToken` value. This value can be between 1 #' and 100. If this parameter is not used, then `ListTaskDefinitions` #' returns up to 100 results and a `nextToken` value if applicable. #' #' @section Request syntax: #' ``` #' svc$list_task_definitions( #' familyPrefix = "string", #' status = "ACTIVE"|"INACTIVE", #' sort = "ASC"|"DESC", #' nextToken = "string", #' maxResults = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of your registered task definitions. #' svc$list_task_definitions() #' #' # This example lists the task definition revisions of a specified family. #' svc$list_task_definitions( #' familyPrefix = "wordpress" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_task_definitions ecs_list_task_definitions <- function(familyPrefix = NULL, status = NULL, sort = NULL, nextToken = NULL, maxResults = NULL) { op <- new_operation( name = "ListTaskDefinitions", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_task_definitions_input(familyPrefix = familyPrefix, status = status, sort = sort, nextToken = nextToken, maxResults = maxResults) output <- .ecs$list_task_definitions_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_task_definitions <- ecs_list_task_definitions #' Returns a list of tasks for a specified cluster #' #' @description #' Returns a list of tasks for a specified cluster. You can filter the #' results by family name, by a particular container instance, or by the #' desired status of the task with the `family`, `containerInstance`, and #' `desiredStatus` parameters. #' #' Recently stopped tasks might appear in the returned results. Currently, #' stopped tasks appear in the returned results for at least one hour. #' #' @usage #' ecs_list_tasks(cluster, containerInstance, family, nextToken, #' maxResults, startedBy, serviceName, desiredStatus, launchType) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the tasks to list. If you do not specify a cluster, the default #' cluster is assumed. #' @param containerInstance The container instance ID or full ARN of the container instance with #' which to filter the `ListTasks` results. Specifying a #' `containerInstance` limits the results to tasks that belong to that #' container instance. #' @param family The name of the family with which to filter the `ListTasks` results. #' Specifying a `family` limits the results to tasks that belong to that #' family. #' @param nextToken The `nextToken` value returned from a `ListTasks` request indicating #' that more results are available to fulfill the request and further calls #' will be needed. If `maxResults` was provided, it is possible the number #' of results to be fewer than `maxResults`. #' #' This token should be treated as an opaque identifier that is only used #' to retrieve the next items in a list and not for other programmatic #' purposes. #' @param maxResults The maximum number of task results returned by `ListTasks` in paginated #' output. When this parameter is used, `ListTasks` only returns #' `maxResults` results in a single page along with a `nextToken` response #' element. The remaining results of the initial request can be seen by #' sending another `ListTasks` request with the returned `nextToken` value. #' This value can be between 1 and 100. If this parameter is not used, then #' `ListTasks` returns up to 100 results and a `nextToken` value if #' applicable. #' @param startedBy The `startedBy` value with which to filter the task results. Specifying #' a `startedBy` value limits the results to tasks that were started with #' that value. #' @param serviceName The name of the service with which to filter the `ListTasks` results. #' Specifying a `serviceName` limits the results to tasks that belong to #' that service. #' @param desiredStatus The task desired status with which to filter the `ListTasks` results. #' Specifying a `desiredStatus` of `STOPPED` limits the results to tasks #' that Amazon ECS has set the desired status to `STOPPED`. This can be #' useful for debugging tasks that are not starting properly or have died #' or finished. The default status filter is `RUNNING`, which shows tasks #' that Amazon ECS has set the desired status to `RUNNING`. #' #' Although you can filter results based on a desired status of `PENDING`, #' this does not return any results. Amazon ECS never sets the desired #' status of a task to that value (only a task's `lastStatus` may have a #' value of `PENDING`). #' @param launchType The launch type for services to list. #' #' @section Request syntax: #' ``` #' svc$list_tasks( #' cluster = "string", #' containerInstance = "string", #' family = "string", #' nextToken = "string", #' maxResults = 123, #' startedBy = "string", #' serviceName = "string", #' desiredStatus = "RUNNING"|"PENDING"|"STOPPED", #' launchType = "EC2"|"FARGATE" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example lists all of the tasks in a cluster. #' svc$list_tasks( #' cluster = "default" #' ) #' #' # This example lists the tasks of a specified container instance. #' # Specifying a `containerInstance` value limits the results to tasks #' # that belong to that container instance. #' svc$list_tasks( #' cluster = "default", #' containerInstance = "f6bbb147-5370-4ace-8c73-c7181ded911f" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_list_tasks ecs_list_tasks <- function(cluster = NULL, containerInstance = NULL, family = NULL, nextToken = NULL, maxResults = NULL, startedBy = NULL, serviceName = NULL, desiredStatus = NULL, launchType = NULL) { op <- new_operation( name = "ListTasks", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$list_tasks_input(cluster = cluster, containerInstance = containerInstance, family = family, nextToken = nextToken, maxResults = maxResults, startedBy = startedBy, serviceName = serviceName, desiredStatus = desiredStatus, launchType = launchType) output <- .ecs$list_tasks_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$list_tasks <- ecs_list_tasks #' Modifies an account setting #' #' @description #' Modifies an account setting. Account settings are set on a per-Region #' basis. #' #' If you change the account setting for the root user, the default #' settings for all of the IAM users and roles for which no individual #' account setting has been specified are reset. For more information, see #' [Account #' Settings](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' When `serviceLongArnFormat`, `taskLongArnFormat`, or #' `containerInstanceLongArnFormat` are specified, the Amazon Resource Name #' (ARN) and resource ID format of the resource type for a specified IAM #' user, IAM role, or the root user for an account is affected. The opt-in #' and opt-out account setting must be set for each Amazon ECS resource #' separately. The ARN and resource ID format of a resource will be defined #' by the opt-in status of the IAM user or role that created the resource. #' You must enable this setting to use Amazon ECS features such as resource #' tagging. #' #' When `awsvpcTrunking` is specified, the elastic network interface (ENI) #' limit for any new container instances that support the feature is #' changed. If `awsvpcTrunking` is enabled, any new container instances #' that support the feature are launched have the increased ENI limits #' available to them. For more information, see [Elastic Network Interface #' Trunking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-instance-eni.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' When `containerInsights` is specified, the default setting indicating #' whether CloudWatch Container Insights is enabled for your clusters is #' changed. If `containerInsights` is enabled, any new clusters that are #' created will have Container Insights enabled unless you disable it #' during cluster creation. For more information, see [CloudWatch Container #' Insights](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cloudwatch-container-insights.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_put_account_setting(name, value, principalArn) #' #' @param name &#91;required&#93; The Amazon ECS resource name for which to modify the account setting. If #' `serviceLongArnFormat` is specified, the ARN for your Amazon ECS #' services is affected. If `taskLongArnFormat` is specified, the ARN and #' resource ID for your Amazon ECS tasks is affected. If #' `containerInstanceLongArnFormat` is specified, the ARN and resource ID #' for your Amazon ECS container instances is affected. If `awsvpcTrunking` #' is specified, the elastic network interface (ENI) limit for your Amazon #' ECS container instances is affected. If `containerInsights` is #' specified, the default setting for CloudWatch Container Insights for #' your clusters is affected. #' @param value &#91;required&#93; The account setting value for the specified principal ARN. Accepted #' values are `enabled` and `disabled`. #' @param principalArn The ARN of the principal, which can be an IAM user, IAM role, or the #' root user. If you specify the root user, it modifies the account setting #' for all IAM users, IAM roles, and the root user of the account unless an #' IAM user or role explicitly overrides these settings. If this field is #' omitted, the setting is changed only for the authenticated user. #' #' @section Request syntax: #' ``` #' svc$put_account_setting( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' value = "string", #' principalArn = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example modifies your account settings to opt in to the new ARN and #' # resource ID format for Amazon ECS services. If you’re using this command #' # as the root user, then changes apply to the entire AWS account, unless #' # an IAM user or role explicitly overrides these settings for themselves. #' svc$put_account_setting( #' name = "serviceLongArnFormat", #' value = "enabled" #' ) #' #' # This example modifies the account setting for a specific IAM user or IAM #' # role to opt in to the new ARN and resource ID format for Amazon ECS #' # container instances. If you’re using this command as the root user, then #' # changes apply to the entire AWS account, unless an IAM user or role #' # explicitly overrides these settings for themselves. #' svc$put_account_setting( #' name = "containerInstanceLongArnFormat", #' value = "enabled", #' principalArn = "arn:aws:iam::<aws_account_id>:user/principalName" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_put_account_setting ecs_put_account_setting <- function(name, value, principalArn = NULL) { op <- new_operation( name = "PutAccountSetting", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_account_setting_input(name = name, value = value, principalArn = principalArn) output <- .ecs$put_account_setting_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_account_setting <- ecs_put_account_setting #' Modifies an account setting for all IAM users on an account for whom no #' individual account setting has been specified #' #' @description #' Modifies an account setting for all IAM users on an account for whom no #' individual account setting has been specified. Account settings are set #' on a per-Region basis. #' #' @usage #' ecs_put_account_setting_default(name, value) #' #' @param name &#91;required&#93; The resource name for which to modify the account setting. If #' `serviceLongArnFormat` is specified, the ARN for your Amazon ECS #' services is affected. If `taskLongArnFormat` is specified, the ARN and #' resource ID for your Amazon ECS tasks is affected. If #' `containerInstanceLongArnFormat` is specified, the ARN and resource ID #' for your Amazon ECS container instances is affected. If `awsvpcTrunking` #' is specified, the ENI limit for your Amazon ECS container instances is #' affected. If `containerInsights` is specified, the default setting for #' CloudWatch Container Insights for your clusters is affected. #' @param value &#91;required&#93; The account setting value for the specified principal ARN. Accepted #' values are `enabled` and `disabled`. #' #' @section Request syntax: #' ``` #' svc$put_account_setting_default( #' name = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights", #' value = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example modifies the default account setting for the specified #' # resource for all IAM users or roles on an account. These changes apply #' # to the entire AWS account, unless an IAM user or role explicitly #' # overrides these settings for themselves. #' svc$put_account_setting_default( #' name = "serviceLongArnFormat", #' value = "enabled" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_put_account_setting_default ecs_put_account_setting_default <- function(name, value) { op <- new_operation( name = "PutAccountSettingDefault", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_account_setting_default_input(name = name, value = value) output <- .ecs$put_account_setting_default_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_account_setting_default <- ecs_put_account_setting_default #' Create or update an attribute on an Amazon ECS resource #' #' @description #' Create or update an attribute on an Amazon ECS resource. If the #' attribute does not exist, it is created. If the attribute exists, its #' value is replaced with the specified value. To delete an attribute, use #' DeleteAttributes. For more information, see #' [Attributes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html#attributes) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_put_attributes(cluster, attributes) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' contains the resource to apply attributes. If you do not specify a #' cluster, the default cluster is assumed. #' @param attributes &#91;required&#93; The attributes to apply to your resource. You can specify up to 10 #' custom attributes per resource. You can specify up to 10 attributes in a #' single call. #' #' @section Request syntax: #' ``` #' svc$put_attributes( #' cluster = "string", #' attributes = list( #' list( #' name = "string", #' value = "string", #' targetType = "container-instance", #' targetId = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_put_attributes ecs_put_attributes <- function(cluster = NULL, attributes) { op <- new_operation( name = "PutAttributes", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_attributes_input(cluster = cluster, attributes = attributes) output <- .ecs$put_attributes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_attributes <- ecs_put_attributes #' Modifies the available capacity providers and the default capacity #' provider strategy for a cluster #' #' @description #' Modifies the available capacity providers and the default capacity #' provider strategy for a cluster. #' #' You must specify both the available capacity providers and a default #' capacity provider strategy for the cluster. If the specified cluster has #' existing capacity providers associated with it, you must specify all #' existing capacity providers in addition to any new ones you want to add. #' Any existing capacity providers associated with a cluster that are #' omitted from a PutClusterCapacityProviders API call will be #' disassociated with the cluster. You can only disassociate an existing #' capacity provider from a cluster if it's not being used by any existing #' tasks. #' #' When creating a service or running a task on a cluster, if no capacity #' provider or launch type is specified, then the cluster's default #' capacity provider strategy is used. It is recommended to define a #' default capacity provider strategy for your cluster, however you may #' specify an empty array (`\\[\\]`) to bypass defining a default strategy. #' #' @usage #' ecs_put_cluster_capacity_providers(cluster, capacityProviders, #' defaultCapacityProviderStrategy) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster to #' modify the capacity provider settings for. If you do not specify a #' cluster, the default cluster is assumed. #' @param capacityProviders &#91;required&#93; The name of one or more capacity providers to associate with the #' cluster. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' @param defaultCapacityProviderStrategy &#91;required&#93; The capacity provider strategy to use by default for the cluster. #' #' When creating a service or running a task on a cluster, if no capacity #' provider or launch type is specified then the default capacity provider #' strategy for the cluster is used. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' @section Request syntax: #' ``` #' svc$put_cluster_capacity_providers( #' cluster = "string", #' capacityProviders = list( #' "string" #' ), #' defaultCapacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_put_cluster_capacity_providers ecs_put_cluster_capacity_providers <- function(cluster, capacityProviders, defaultCapacityProviderStrategy) { op <- new_operation( name = "PutClusterCapacityProviders", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$put_cluster_capacity_providers_input(cluster = cluster, capacityProviders = capacityProviders, defaultCapacityProviderStrategy = defaultCapacityProviderStrategy) output <- .ecs$put_cluster_capacity_providers_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$put_cluster_capacity_providers <- ecs_put_cluster_capacity_providers #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Registers an EC2 instance into the specified cluster. This instance #' becomes available to place containers on. #' #' @usage #' ecs_register_container_instance(cluster, instanceIdentityDocument, #' instanceIdentityDocumentSignature, totalResources, versionInfo, #' containerInstanceArn, attributes, platformDevices, tags) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster with #' which to register your container instance. If you do not specify a #' cluster, the default cluster is assumed. #' @param instanceIdentityDocument The instance identity document for the EC2 instance to register. This #' document can be found by running the following command from the #' instance: #' `curl http://169.254.169.254/latest/dynamic/instance-identity/document/` #' @param instanceIdentityDocumentSignature The instance identity document signature for the EC2 instance to #' register. This signature can be found by running the following command #' from the instance: #' `curl http://169.254.169.254/latest/dynamic/instance-identity/signature/` #' @param totalResources The resources available on the instance. #' @param versionInfo The version information for the Amazon ECS container agent and Docker #' daemon running on the container instance. #' @param containerInstanceArn The ARN of the container instance (if it was previously registered). #' @param attributes The container instance attributes that this container instance supports. #' @param platformDevices The devices that are available on the container instance. The only #' supported device type is a GPU. #' @param tags The metadata that you apply to the container instance to help you #' categorize and organize them. Each tag consists of a key and an optional #' value, both of which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$register_container_instance( #' cluster = "string", #' instanceIdentityDocument = "string", #' instanceIdentityDocumentSignature = "string", #' totalResources = list( #' list( #' name = "string", #' type = "string", #' doubleValue = 123.0, #' longValue = 123, #' integerValue = 123, #' stringSetValue = list( #' "string" #' ) #' ) #' ), #' versionInfo = list( #' agentVersion = "string", #' agentHash = "string", #' dockerVersion = "string" #' ), #' containerInstanceArn = "string", #' attributes = list( #' list( #' name = "string", #' value = "string", #' targetType = "container-instance", #' targetId = "string" #' ) #' ), #' platformDevices = list( #' list( #' id = "string", #' type = "GPU" #' ) #' ), #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_register_container_instance ecs_register_container_instance <- function(cluster = NULL, instanceIdentityDocument = NULL, instanceIdentityDocumentSignature = NULL, totalResources = NULL, versionInfo = NULL, containerInstanceArn = NULL, attributes = NULL, platformDevices = NULL, tags = NULL) { op <- new_operation( name = "RegisterContainerInstance", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$register_container_instance_input(cluster = cluster, instanceIdentityDocument = instanceIdentityDocument, instanceIdentityDocumentSignature = instanceIdentityDocumentSignature, totalResources = totalResources, versionInfo = versionInfo, containerInstanceArn = containerInstanceArn, attributes = attributes, platformDevices = platformDevices, tags = tags) output <- .ecs$register_container_instance_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$register_container_instance <- ecs_register_container_instance #' Registers a new task definition from the supplied family and #' containerDefinitions #' #' @description #' Registers a new task definition from the supplied `family` and #' `containerDefinitions`. Optionally, you can add data volumes to your #' containers with the `volumes` parameter. For more information about task #' definition parameters and defaults, see [Amazon ECS Task #' Definitions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' You can specify an IAM role for your task with the `taskRoleArn` #' parameter. When you specify an IAM role for a task, its containers can #' then use the latest versions of the AWS CLI or SDKs to make API requests #' to the AWS services that are specified in the IAM policy associated with #' the role. For more information, see [IAM Roles for #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' You can specify a Docker networking mode for the containers in your task #' definition with the `networkMode` parameter. The available network modes #' correspond to those described in [Network #' settings](https://docs.docker.com/engine/reference/run/#/network-settings) #' in the Docker run reference. If you specify the `awsvpc` network mode, #' the task is allocated an elastic network interface, and you must specify #' a NetworkConfiguration when you create a service or run a task with the #' task definition. For more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_register_task_definition(family, taskRoleArn, executionRoleArn, #' networkMode, containerDefinitions, volumes, placementConstraints, #' requiresCompatibilities, cpu, memory, tags, pidMode, ipcMode, #' proxyConfiguration, inferenceAccelerators) #' #' @param family &#91;required&#93; You must specify a `family` for a task definition, which allows you to #' track multiple versions of the same task definition. The `family` is #' used as a name for your task definition. Up to 255 letters (uppercase #' and lowercase), numbers, and hyphens are allowed. #' @param taskRoleArn The short name or full Amazon Resource Name (ARN) of the IAM role that #' containers in this task can assume. All containers in this task are #' granted the permissions that are specified in this role. For more #' information, see [IAM Roles for #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param executionRoleArn The Amazon Resource Name (ARN) of the task execution role that grants #' the Amazon ECS container agent permission to make AWS API calls on your #' behalf. The task execution IAM role is required depending on the #' requirements of your task. For more information, see [Amazon ECS task #' execution IAM #' role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param networkMode The Docker networking mode to use for the containers in the task. The #' valid values are `none`, `bridge`, `awsvpc`, and `host`. If no network #' mode is specified, the default is `bridge`. #' #' For Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. #' For Amazon ECS tasks on Amazon EC2 instances, any network mode can be #' used. If the network mode is set to `none`, you cannot specify port #' mappings in your container definitions, and the tasks containers do not #' have external connectivity. The `host` and `awsvpc` network modes offer #' the highest networking performance for containers because they use the #' EC2 network stack instead of the virtualized network stack provided by #' the `bridge` mode. #' #' With the `host` and `awsvpc` network modes, exposed container ports are #' mapped directly to the corresponding host port (for the `host` network #' mode) or the attached elastic network interface port (for the `awsvpc` #' network mode), so you cannot take advantage of dynamic host port #' mappings. #' #' When using the `host` network mode, you should not run containers using #' the root user (UID 0). It is considered best practice to use a non-root #' user. #' #' If the network mode is `awsvpc`, the task is allocated an elastic #' network interface, and you must specify a NetworkConfiguration value #' when you create a service or run a task with the task definition. For #' more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' Currently, only Amazon ECS-optimized AMIs, other Amazon Linux variants #' with the `ecs-init` package, or AWS Fargate infrastructure support the #' `awsvpc` network mode. #' #' If the network mode is `host`, you cannot run multiple instantiations of #' the same task on a single container instance when port mappings are #' used. #' #' Docker for Windows uses different network modes than Docker for Linux. #' When you register a task definition with Windows containers, you must #' not specify a network mode. If you use the console to register a task #' definition with Windows containers, you must choose the #' `&lt;default&gt;` network mode object. #' #' For more information, see [Network #' settings](https://docs.docker.com/engine/reference/run/#network-settings) #' in the *Docker run reference*. #' @param containerDefinitions &#91;required&#93; A list of container definitions in JSON format that describe the #' different containers that make up your task. #' @param volumes A list of volume definitions in JSON format that containers in your task #' may use. #' @param placementConstraints An array of placement constraint objects to use for the task. You can #' specify a maximum of 10 constraints per task (this limit includes #' constraints in the task definition and those specified at runtime). #' @param requiresCompatibilities The task launch type that Amazon ECS should validate the task definition #' against. This ensures that the task definition parameters are compatible #' with the specified launch type. If no value is specified, it defaults to #' `EC2`. #' @param cpu The number of CPU units used by the task. It can be expressed as an #' integer using CPU units, for example `1024`, or as a string using vCPUs, #' for example `1 vCPU` or `1 vcpu`, in a task definition. String values #' are converted to an integer indicating the CPU units when the task #' definition is registered. #' #' Task-level CPU and memory parameters are ignored for Windows containers. #' We recommend specifying container-level resources for Windows #' containers. #' #' If you are using the EC2 launch type, this field is optional. Supported #' values are between `128` CPU units (`0.125` vCPUs) and `10240` CPU units #' (`10` vCPUs). #' #' If you are using the Fargate launch type, this field is required and you #' must use one of the following values, which determines your range of #' supported values for the `memory` parameter: #' #' - 256 (.25 vCPU) - Available `memory` values: 512 (0.5 GB), 1024 (1 #' GB), 2048 (2 GB) #' #' - 512 (.5 vCPU) - Available `memory` values: 1024 (1 GB), 2048 (2 GB), #' 3072 (3 GB), 4096 (4 GB) #' #' - 1024 (1 vCPU) - Available `memory` values: 2048 (2 GB), 3072 (3 GB), #' 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) #' #' - 2048 (2 vCPU) - Available `memory` values: Between 4096 (4 GB) and #' 16384 (16 GB) in increments of 1024 (1 GB) #' #' - 4096 (4 vCPU) - Available `memory` values: Between 8192 (8 GB) and #' 30720 (30 GB) in increments of 1024 (1 GB) #' @param memory The amount of memory (in MiB) used by the task. It can be expressed as #' an integer using MiB, for example `1024`, or as a string using GB, for #' example `1GB` or `1 GB`, in a task definition. String values are #' converted to an integer indicating the MiB when the task definition is #' registered. #' #' Task-level CPU and memory parameters are ignored for Windows containers. #' We recommend specifying container-level resources for Windows #' containers. #' #' If using the EC2 launch type, this field is optional. #' #' If using the Fargate launch type, this field is required and you must #' use one of the following values, which determines your range of #' supported values for the `cpu` parameter: #' #' - 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available `cpu` values: 256 #' (.25 vCPU) #' #' - 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available `cpu` #' values: 512 (.5 vCPU) #' #' - 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), #' 7168 (7 GB), 8192 (8 GB) - Available `cpu` values: 1024 (1 vCPU) #' #' - Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - #' Available `cpu` values: 2048 (2 vCPU) #' #' - Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - #' Available `cpu` values: 4096 (4 vCPU) #' @param tags The metadata that you apply to the task definition to help you #' categorize and organize them. Each tag consists of a key and an optional #' value, both of which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param pidMode The process namespace to use for the containers in the task. The valid #' values are `host` or `task`. If `host` is specified, then all containers #' within the tasks that specified the `host` PID mode on the same #' container instance share the same process namespace with the host Amazon #' EC2 instance. If `task` is specified, all containers within the #' specified task share the same process namespace. If no value is #' specified, the default is a private namespace. For more information, see #' [PID #' settings](https://docs.docker.com/engine/reference/run/#pid-settings---pid) #' in the *Docker run reference*. #' #' If the `host` PID mode is used, be aware that there is a heightened risk #' of undesired process namespace expose. For more information, see [Docker #' security](https://docs.docker.com/engine/security/security/). #' #' This parameter is not supported for Windows containers or tasks using #' the Fargate launch type. #' @param ipcMode The IPC resource namespace to use for the containers in the task. The #' valid values are `host`, `task`, or `none`. If `host` is specified, then #' all containers within the tasks that specified the `host` IPC mode on #' the same container instance share the same IPC resources with the host #' Amazon EC2 instance. If `task` is specified, all containers within the #' specified task share the same IPC resources. If `none` is specified, #' then IPC resources within the containers of a task are private and not #' shared with other containers in a task or on the container instance. If #' no value is specified, then the IPC resource namespace sharing depends #' on the Docker daemon setting on the container instance. For more #' information, see [IPC #' settings](https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) #' in the *Docker run reference*. #' #' If the `host` IPC mode is used, be aware that there is a heightened risk #' of undesired IPC namespace expose. For more information, see [Docker #' security](https://docs.docker.com/engine/security/security/). #' #' If you are setting namespaced kernel parameters using `systemControls` #' for the containers in the task, the following will apply to your IPC #' resource namespace. For more information, see [System #' Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' - For tasks that use the `host` IPC mode, IPC namespace related #' `systemControls` are not supported. #' #' - For tasks that use the `task` IPC mode, IPC namespace related #' `systemControls` will apply to all containers within a task. #' #' This parameter is not supported for Windows containers or tasks using #' the Fargate launch type. #' @param proxyConfiguration #' @param inferenceAccelerators The Elastic Inference accelerators to use for the containers in the #' task. #' #' @section Request syntax: #' ``` #' svc$register_task_definition( #' family = "string", #' taskRoleArn = "string", #' executionRoleArn = "string", #' networkMode = "bridge"|"host"|"awsvpc"|"none", #' containerDefinitions = list( #' list( #' name = "string", #' image = "string", #' repositoryCredentials = list( #' credentialsParameter = "string" #' ), #' cpu = 123, #' memory = 123, #' memoryReservation = 123, #' links = list( #' "string" #' ), #' portMappings = list( #' list( #' containerPort = 123, #' hostPort = 123, #' protocol = "tcp"|"udp" #' ) #' ), #' essential = TRUE|FALSE, #' entryPoint = list( #' "string" #' ), #' command = list( #' "string" #' ), #' environment = list( #' list( #' name = "string", #' value = "string" #' ) #' ), #' environmentFiles = list( #' list( #' value = "string", #' type = "s3" #' ) #' ), #' mountPoints = list( #' list( #' sourceVolume = "string", #' containerPath = "string", #' readOnly = TRUE|FALSE #' ) #' ), #' volumesFrom = list( #' list( #' sourceContainer = "string", #' readOnly = TRUE|FALSE #' ) #' ), #' linuxParameters = list( #' capabilities = list( #' add = list( #' "string" #' ), #' drop = list( #' "string" #' ) #' ), #' devices = list( #' list( #' hostPath = "string", #' containerPath = "string", #' permissions = list( #' "read"|"write"|"mknod" #' ) #' ) #' ), #' initProcessEnabled = TRUE|FALSE, #' sharedMemorySize = 123, #' tmpfs = list( #' list( #' containerPath = "string", #' size = 123, #' mountOptions = list( #' "string" #' ) #' ) #' ), #' maxSwap = 123, #' swappiness = 123 #' ), #' secrets = list( #' list( #' name = "string", #' valueFrom = "string" #' ) #' ), #' dependsOn = list( #' list( #' containerName = "string", #' condition = "START"|"COMPLETE"|"SUCCESS"|"HEALTHY" #' ) #' ), #' startTimeout = 123, #' stopTimeout = 123, #' hostname = "string", #' user = "string", #' workingDirectory = "string", #' disableNetworking = TRUE|FALSE, #' privileged = TRUE|FALSE, #' readonlyRootFilesystem = TRUE|FALSE, #' dnsServers = list( #' "string" #' ), #' dnsSearchDomains = list( #' "string" #' ), #' extraHosts = list( #' list( #' hostname = "string", #' ipAddress = "string" #' ) #' ), #' dockerSecurityOptions = list( #' "string" #' ), #' interactive = TRUE|FALSE, #' pseudoTerminal = TRUE|FALSE, #' dockerLabels = list( #' "string" #' ), #' ulimits = list( #' list( #' name = "core"|"cpu"|"data"|"fsize"|"locks"|"memlock"|"msgqueue"|"nice"|"nofile"|"nproc"|"rss"|"rtprio"|"rttime"|"sigpending"|"stack", #' softLimit = 123, #' hardLimit = 123 #' ) #' ), #' logConfiguration = list( #' logDriver = "json-file"|"syslog"|"journald"|"gelf"|"fluentd"|"awslogs"|"splunk"|"awsfirelens", #' options = list( #' "string" #' ), #' secretOptions = list( #' list( #' name = "string", #' valueFrom = "string" #' ) #' ) #' ), #' healthCheck = list( #' command = list( #' "string" #' ), #' interval = 123, #' timeout = 123, #' retries = 123, #' startPeriod = 123 #' ), #' systemControls = list( #' list( #' namespace = "string", #' value = "string" #' ) #' ), #' resourceRequirements = list( #' list( #' value = "string", #' type = "GPU"|"InferenceAccelerator" #' ) #' ), #' firelensConfiguration = list( #' type = "fluentd"|"fluentbit", #' options = list( #' "string" #' ) #' ) #' ) #' ), #' volumes = list( #' list( #' name = "string", #' host = list( #' sourcePath = "string" #' ), #' dockerVolumeConfiguration = list( #' scope = "task"|"shared", #' autoprovision = TRUE|FALSE, #' driver = "string", #' driverOpts = list( #' "string" #' ), #' labels = list( #' "string" #' ) #' ), #' efsVolumeConfiguration = list( #' fileSystemId = "string", #' rootDirectory = "string", #' transitEncryption = "ENABLED"|"DISABLED", #' transitEncryptionPort = 123, #' authorizationConfig = list( #' accessPointId = "string", #' iam = "ENABLED"|"DISABLED" #' ) #' ), #' fsxWindowsFileServerVolumeConfiguration = list( #' fileSystemId = "string", #' rootDirectory = "string", #' authorizationConfig = list( #' credentialsParameter = "string", #' domain = "string" #' ) #' ) #' ) #' ), #' placementConstraints = list( #' list( #' type = "memberOf", #' expression = "string" #' ) #' ), #' requiresCompatibilities = list( #' "EC2"|"FARGATE" #' ), #' cpu = "string", #' memory = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' pidMode = "host"|"task", #' ipcMode = "host"|"task"|"none", #' proxyConfiguration = list( #' type = "APPMESH", #' containerName = "string", #' properties = list( #' list( #' name = "string", #' value = "string" #' ) #' ) #' ), #' inferenceAccelerators = list( #' list( #' deviceName = "string", #' deviceType = "string" #' ) #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example registers a task definition to the specified family. #' svc$register_task_definition( #' containerDefinitions = list( #' list( #' name = "sleep", #' command = list( #' "sleep", #' "360" #' ), #' cpu = 10L, #' essential = TRUE, #' image = "busybox", #' memory = 10L #' ) #' ), #' family = "sleep360", #' taskRoleArn = "", #' volumes = list() #' ) #' } #' #' @keywords internal #' #' @rdname ecs_register_task_definition ecs_register_task_definition <- function(family, taskRoleArn = NULL, executionRoleArn = NULL, networkMode = NULL, containerDefinitions, volumes = NULL, placementConstraints = NULL, requiresCompatibilities = NULL, cpu = NULL, memory = NULL, tags = NULL, pidMode = NULL, ipcMode = NULL, proxyConfiguration = NULL, inferenceAccelerators = NULL) { op <- new_operation( name = "RegisterTaskDefinition", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$register_task_definition_input(family = family, taskRoleArn = taskRoleArn, executionRoleArn = executionRoleArn, networkMode = networkMode, containerDefinitions = containerDefinitions, volumes = volumes, placementConstraints = placementConstraints, requiresCompatibilities = requiresCompatibilities, cpu = cpu, memory = memory, tags = tags, pidMode = pidMode, ipcMode = ipcMode, proxyConfiguration = proxyConfiguration, inferenceAccelerators = inferenceAccelerators) output <- .ecs$register_task_definition_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$register_task_definition <- ecs_register_task_definition #' Starts a new task using the specified task definition #' #' @description #' Starts a new task using the specified task definition. #' #' You can allow Amazon ECS to place tasks for you, or you can customize #' how Amazon ECS places tasks using placement constraints and placement #' strategies. For more information, see [Scheduling #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' Alternatively, you can use StartTask to use your own scheduler or place #' tasks manually on specific container instances. #' #' The Amazon ECS API follows an eventual consistency model, due to the #' distributed nature of the system supporting the API. This means that the #' result of an API command you run that affects your Amazon ECS resources #' might not be immediately visible to all subsequent commands you run. #' Keep this in mind when you carry out an API command that immediately #' follows a previous API command. #' #' To manage eventual consistency, you can do the following: #' #' - Confirm the state of the resource before you run a command to modify #' it. Run the DescribeTasks command using an exponential backoff #' algorithm to ensure that you allow enough time for the previous #' command to propagate through the system. To do this, run the #' DescribeTasks command repeatedly, starting with a couple of seconds #' of wait time and increasing gradually up to five minutes of wait #' time. #' #' - Add wait time between subsequent commands, even if the DescribeTasks #' command returns an accurate response. Apply an exponential backoff #' algorithm starting with a couple of seconds of wait time, and #' increase gradually up to about five minutes of wait time. #' #' @usage #' ecs_run_task(capacityProviderStrategy, cluster, count, #' enableECSManagedTags, group, launchType, networkConfiguration, #' overrides, placementConstraints, placementStrategy, platformVersion, #' propagateTags, referenceId, startedBy, tags, taskDefinition) #' #' @param capacityProviderStrategy The capacity provider strategy to use for the task. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If a `capacityProviderStrategy` is specified, the `launchType` parameter #' must be omitted. If no `capacityProviderStrategy` or `launchType` is #' specified, the `defaultCapacityProviderStrategy` for the cluster is #' used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster on #' which to run your task. If you do not specify a cluster, the default #' cluster is assumed. #' @param count The number of instantiations of the specified task to place on your #' cluster. You can specify up to 10 tasks per call. #' @param enableECSManagedTags Specifies whether to enable Amazon ECS managed tags for the task. For #' more information, see [Tagging Your Amazon ECS #' Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param group The name of the task group to associate with the task. The default value #' is the family name of the task definition (for example, #' family:my-family-name). #' @param launchType The launch type on which to run your task. For more information, see #' [Amazon ECS Launch #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' If a `launchType` is specified, the `capacityProviderStrategy` parameter #' must be omitted. #' @param networkConfiguration The network configuration for the task. This parameter is required for #' task definitions that use the `awsvpc` network mode to receive their own #' elastic network interface, and it is not supported for other network #' modes. For more information, see [Task #' Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param overrides A list of container overrides in JSON format that specify the name of a #' container in the specified task definition and the overrides it should #' receive. You can override the default command for a container (that is #' specified in the task definition or Docker image) with a `command` #' override. You can also override existing environment variables (that are #' specified in the task definition or Docker image) on a container or add #' new environment variables to it with an `environment` override. #' #' A total of 8192 characters are allowed for overrides. This limit #' includes the JSON formatting characters of the override structure. #' @param placementConstraints An array of placement constraint objects to use for the task. You can #' specify up to 10 constraints per task (including constraints in the task #' definition and those specified at runtime). #' @param placementStrategy The placement strategy objects to use for the task. You can specify a #' maximum of five strategy rules per task. #' @param platformVersion The platform version the task should run. A platform version is only #' specified for tasks using the Fargate launch type. If one is not #' specified, the `LATEST` platform version is used by default. For more #' information, see [AWS Fargate Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param propagateTags Specifies whether to propagate the tags from the task definition to the #' task. If no value is specified, the tags are not propagated. Tags can #' only be propagated to the task during task creation. To add tags to a #' task after task creation, use the TagResource API action. #' #' An error will be received if you specify the `SERVICE` option when #' running a task. #' @param referenceId The reference ID to use for the task. #' @param startedBy An optional tag specified when a task is started. For example, if you #' automatically trigger a task to run a batch process job, you could apply #' a unique identifier for that job to your task with the `startedBy` #' parameter. You can then identify which tasks belong to that job by #' filtering the results of a ListTasks call with the `startedBy` value. Up #' to 36 letters (uppercase and lowercase), numbers, hyphens, and #' underscores are allowed. #' #' If a task is started by an Amazon ECS service, then the `startedBy` #' parameter contains the deployment ID of the service that starts it. #' @param tags The metadata that you apply to the task to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param taskDefinition &#91;required&#93; The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to run. If a `revision` is not specified, the latest `ACTIVE` #' revision is used. #' #' @section Request syntax: #' ``` #' svc$run_task( #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' cluster = "string", #' count = 123, #' enableECSManagedTags = TRUE|FALSE, #' group = "string", #' launchType = "EC2"|"FARGATE", #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' overrides = list( #' containerOverrides = list( #' list( #' name = "string", #' command = list( #' "string" #' ), #' environment = list( #' list( #' name = "string", #' value = "string" #' ) #' ), #' environmentFiles = list( #' list( #' value = "string", #' type = "s3" #' ) #' ), #' cpu = 123, #' memory = 123, #' memoryReservation = 123, #' resourceRequirements = list( #' list( #' value = "string", #' type = "GPU"|"InferenceAccelerator" #' ) #' ) #' ) #' ), #' cpu = "string", #' inferenceAcceleratorOverrides = list( #' list( #' deviceName = "string", #' deviceType = "string" #' ) #' ), #' executionRoleArn = "string", #' memory = "string", #' taskRoleArn = "string" #' ), #' placementConstraints = list( #' list( #' type = "distinctInstance"|"memberOf", #' expression = "string" #' ) #' ), #' placementStrategy = list( #' list( #' type = "random"|"spread"|"binpack", #' field = "string" #' ) #' ), #' platformVersion = "string", #' propagateTags = "TASK_DEFINITION"|"SERVICE", #' referenceId = "string", #' startedBy = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' taskDefinition = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example runs the specified task definition on your default cluster. #' svc$run_task( #' cluster = "default", #' taskDefinition = "sleep360:1" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_run_task ecs_run_task <- function(capacityProviderStrategy = NULL, cluster = NULL, count = NULL, enableECSManagedTags = NULL, group = NULL, launchType = NULL, networkConfiguration = NULL, overrides = NULL, placementConstraints = NULL, placementStrategy = NULL, platformVersion = NULL, propagateTags = NULL, referenceId = NULL, startedBy = NULL, tags = NULL, taskDefinition) { op <- new_operation( name = "RunTask", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$run_task_input(capacityProviderStrategy = capacityProviderStrategy, cluster = cluster, count = count, enableECSManagedTags = enableECSManagedTags, group = group, launchType = launchType, networkConfiguration = networkConfiguration, overrides = overrides, placementConstraints = placementConstraints, placementStrategy = placementStrategy, platformVersion = platformVersion, propagateTags = propagateTags, referenceId = referenceId, startedBy = startedBy, tags = tags, taskDefinition = taskDefinition) output <- .ecs$run_task_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$run_task <- ecs_run_task #' Starts a new task from the specified task definition on the specified #' container instance or instances #' #' @description #' Starts a new task from the specified task definition on the specified #' container instance or instances. #' #' Alternatively, you can use RunTask to place tasks for you. For more #' information, see [Scheduling #' Tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/scheduling_tasks.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_start_task(cluster, containerInstances, enableECSManagedTags, group, #' networkConfiguration, overrides, propagateTags, referenceId, startedBy, #' tags, taskDefinition) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster on #' which to start your task. If you do not specify a cluster, the default #' cluster is assumed. #' @param containerInstances &#91;required&#93; The container instance IDs or full ARN entries for the container #' instances on which you would like to place your task. You can specify up #' to 10 container instances. #' @param enableECSManagedTags Specifies whether to enable Amazon ECS managed tags for the task. For #' more information, see [Tagging Your Amazon ECS #' Resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param group The name of the task group to associate with the task. The default value #' is the family name of the task definition (for example, #' family:my-family-name). #' @param networkConfiguration The VPC subnet and security group configuration for tasks that receive #' their own elastic network interface by using the `awsvpc` networking #' mode. #' @param overrides A list of container overrides in JSON format that specify the name of a #' container in the specified task definition and the overrides it should #' receive. You can override the default command for a container (that is #' specified in the task definition or Docker image) with a `command` #' override. You can also override existing environment variables (that are #' specified in the task definition or Docker image) on a container or add #' new environment variables to it with an `environment` override. #' #' A total of 8192 characters are allowed for overrides. This limit #' includes the JSON formatting characters of the override structure. #' @param propagateTags Specifies whether to propagate the tags from the task definition or the #' service to the task. If no value is specified, the tags are not #' propagated. #' @param referenceId The reference ID to use for the task. #' @param startedBy An optional tag specified when a task is started. For example, if you #' automatically trigger a task to run a batch process job, you could apply #' a unique identifier for that job to your task with the `startedBy` #' parameter. You can then identify which tasks belong to that job by #' filtering the results of a ListTasks call with the `startedBy` value. Up #' to 36 letters (uppercase and lowercase), numbers, hyphens, and #' underscores are allowed. #' #' If a task is started by an Amazon ECS service, then the `startedBy` #' parameter contains the deployment ID of the service that starts it. #' @param tags The metadata that you apply to the task to help you categorize and #' organize them. Each tag consists of a key and an optional value, both of #' which you define. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' @param taskDefinition &#91;required&#93; The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to start. If a `revision` is not specified, the latest #' `ACTIVE` revision is used. #' #' @section Request syntax: #' ``` #' svc$start_task( #' cluster = "string", #' containerInstances = list( #' "string" #' ), #' enableECSManagedTags = TRUE|FALSE, #' group = "string", #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' overrides = list( #' containerOverrides = list( #' list( #' name = "string", #' command = list( #' "string" #' ), #' environment = list( #' list( #' name = "string", #' value = "string" #' ) #' ), #' environmentFiles = list( #' list( #' value = "string", #' type = "s3" #' ) #' ), #' cpu = 123, #' memory = 123, #' memoryReservation = 123, #' resourceRequirements = list( #' list( #' value = "string", #' type = "GPU"|"InferenceAccelerator" #' ) #' ) #' ) #' ), #' cpu = "string", #' inferenceAcceleratorOverrides = list( #' list( #' deviceName = "string", #' deviceType = "string" #' ) #' ), #' executionRoleArn = "string", #' memory = "string", #' taskRoleArn = "string" #' ), #' propagateTags = "TASK_DEFINITION"|"SERVICE", #' referenceId = "string", #' startedBy = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ), #' taskDefinition = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_start_task ecs_start_task <- function(cluster = NULL, containerInstances, enableECSManagedTags = NULL, group = NULL, networkConfiguration = NULL, overrides = NULL, propagateTags = NULL, referenceId = NULL, startedBy = NULL, tags = NULL, taskDefinition) { op <- new_operation( name = "StartTask", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$start_task_input(cluster = cluster, containerInstances = containerInstances, enableECSManagedTags = enableECSManagedTags, group = group, networkConfiguration = networkConfiguration, overrides = overrides, propagateTags = propagateTags, referenceId = referenceId, startedBy = startedBy, tags = tags, taskDefinition = taskDefinition) output <- .ecs$start_task_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$start_task <- ecs_start_task #' Stops a running task #' #' @description #' Stops a running task. Any tags associated with the task will be deleted. #' #' When StopTask is called on a task, the equivalent of `docker stop` is #' issued to the containers running in the task. This results in a #' `SIGTERM` value and a default 30-second timeout, after which the #' `SIGKILL` value is sent and the containers are forcibly stopped. If the #' container handles the `SIGTERM` value gracefully and exits within 30 #' seconds from receiving it, no `SIGKILL` value is sent. #' #' The default 30-second timeout can be configured on the Amazon ECS #' container agent with the `ECS_CONTAINER_STOP_TIMEOUT` variable. For more #' information, see [Amazon ECS Container Agent #' Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_stop_task(cluster, task, reason) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the task to stop. If you do not specify a cluster, the default #' cluster is assumed. #' @param task &#91;required&#93; The task ID or full Amazon Resource Name (ARN) of the task to stop. #' @param reason An optional message specified when a task is stopped. For example, if #' you are using a custom scheduler, you can use this parameter to specify #' the reason for stopping the task here, and the message appears in #' subsequent DescribeTasks API operations on this task. Up to 255 #' characters are allowed in this message. #' #' @section Request syntax: #' ``` #' svc$stop_task( #' cluster = "string", #' task = "string", #' reason = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_stop_task ecs_stop_task <- function(cluster = NULL, task, reason = NULL) { op <- new_operation( name = "StopTask", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$stop_task_input(cluster = cluster, task = task, reason = reason) output <- .ecs$stop_task_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$stop_task <- ecs_stop_task #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Sent to acknowledge that an attachment changed states. #' #' @usage #' ecs_submit_attachment_state_changes(cluster, attachments) #' #' @param cluster The short name or full ARN of the cluster that hosts the container #' instance the attachment belongs to. #' @param attachments &#91;required&#93; Any attachments associated with the state change request. #' #' @section Request syntax: #' ``` #' svc$submit_attachment_state_changes( #' cluster = "string", #' attachments = list( #' list( #' attachmentArn = "string", #' status = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_submit_attachment_state_changes ecs_submit_attachment_state_changes <- function(cluster = NULL, attachments) { op <- new_operation( name = "SubmitAttachmentStateChanges", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$submit_attachment_state_changes_input(cluster = cluster, attachments = attachments) output <- .ecs$submit_attachment_state_changes_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$submit_attachment_state_changes <- ecs_submit_attachment_state_changes #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Sent to acknowledge that a container changed states. #' #' @usage #' ecs_submit_container_state_change(cluster, task, containerName, #' runtimeId, status, exitCode, reason, networkBindings) #' #' @param cluster The short name or full ARN of the cluster that hosts the container. #' @param task The task ID or full Amazon Resource Name (ARN) of the task that hosts #' the container. #' @param containerName The name of the container. #' @param runtimeId The ID of the Docker container. #' @param status The status of the state change request. #' @param exitCode The exit code returned for the state change request. #' @param reason The reason for the state change request. #' @param networkBindings The network bindings of the container. #' #' @section Request syntax: #' ``` #' svc$submit_container_state_change( #' cluster = "string", #' task = "string", #' containerName = "string", #' runtimeId = "string", #' status = "string", #' exitCode = 123, #' reason = "string", #' networkBindings = list( #' list( #' bindIP = "string", #' containerPort = 123, #' hostPort = 123, #' protocol = "tcp"|"udp" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_submit_container_state_change ecs_submit_container_state_change <- function(cluster = NULL, task = NULL, containerName = NULL, runtimeId = NULL, status = NULL, exitCode = NULL, reason = NULL, networkBindings = NULL) { op <- new_operation( name = "SubmitContainerStateChange", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$submit_container_state_change_input(cluster = cluster, task = task, containerName = containerName, runtimeId = runtimeId, status = status, exitCode = exitCode, reason = reason, networkBindings = networkBindings) output <- .ecs$submit_container_state_change_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$submit_container_state_change <- ecs_submit_container_state_change #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent #' #' @description #' This action is only used by the Amazon ECS agent, and it is not intended #' for use outside of the agent. #' #' Sent to acknowledge that a task changed states. #' #' @usage #' ecs_submit_task_state_change(cluster, task, status, reason, containers, #' attachments, pullStartedAt, pullStoppedAt, executionStoppedAt) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the task. #' @param task The task ID or full ARN of the task in the state change request. #' @param status The status of the state change request. #' @param reason The reason for the state change request. #' @param containers Any containers associated with the state change request. #' @param attachments Any attachments associated with the state change request. #' @param pullStartedAt The Unix timestamp for when the container image pull began. #' @param pullStoppedAt The Unix timestamp for when the container image pull completed. #' @param executionStoppedAt The Unix timestamp for when the task execution stopped. #' #' @section Request syntax: #' ``` #' svc$submit_task_state_change( #' cluster = "string", #' task = "string", #' status = "string", #' reason = "string", #' containers = list( #' list( #' containerName = "string", #' imageDigest = "string", #' runtimeId = "string", #' exitCode = 123, #' networkBindings = list( #' list( #' bindIP = "string", #' containerPort = 123, #' hostPort = 123, #' protocol = "tcp"|"udp" #' ) #' ), #' reason = "string", #' status = "string" #' ) #' ), #' attachments = list( #' list( #' attachmentArn = "string", #' status = "string" #' ) #' ), #' pullStartedAt = as.POSIXct( #' "2015-01-01" #' ), #' pullStoppedAt = as.POSIXct( #' "2015-01-01" #' ), #' executionStoppedAt = as.POSIXct( #' "2015-01-01" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_submit_task_state_change ecs_submit_task_state_change <- function(cluster = NULL, task = NULL, status = NULL, reason = NULL, containers = NULL, attachments = NULL, pullStartedAt = NULL, pullStoppedAt = NULL, executionStoppedAt = NULL) { op <- new_operation( name = "SubmitTaskStateChange", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$submit_task_state_change_input(cluster = cluster, task = task, status = status, reason = reason, containers = containers, attachments = attachments, pullStartedAt = pullStartedAt, pullStoppedAt = pullStoppedAt, executionStoppedAt = executionStoppedAt) output <- .ecs$submit_task_state_change_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$submit_task_state_change <- ecs_submit_task_state_change #' Associates the specified tags to a resource with the specified #' resourceArn #' #' @description #' Associates the specified tags to a resource with the specified #' `resourceArn`. If existing tags on a resource are not specified in the #' request parameters, they are not changed. When a resource is deleted, #' the tags associated with that resource are deleted as well. #' #' @usage #' ecs_tag_resource(resourceArn, tags) #' #' @param resourceArn &#91;required&#93; The Amazon Resource Name (ARN) of the resource to which to add tags. #' Currently, the supported resources are Amazon ECS capacity providers, #' tasks, services, task definitions, clusters, and container instances. #' @param tags &#91;required&#93; The tags to add to the resource. A tag is an array of key-value pairs. #' #' The following basic restrictions apply to tags: #' #' - Maximum number of tags per resource - 50 #' #' - For each resource, each tag key must be unique, and each tag key can #' have only one value. #' #' - Maximum key length - 128 Unicode characters in UTF-8 #' #' - Maximum value length - 256 Unicode characters in UTF-8 #' #' - If your tagging schema is used across multiple services and #' resources, remember that other services may have restrictions on #' allowed characters. Generally allowed characters are: letters, #' numbers, and spaces representable in UTF-8, and the following #' characters: + - = . \\_ : / @@. #' #' - Tag keys and values are case-sensitive. #' #' - Do not use `aws:`, `AWS:`, or any upper or lowercase combination of #' such as a prefix for either keys or values as it is reserved for AWS #' use. You cannot edit or delete tag keys or values with this prefix. #' Tags with this prefix do not count against your tags per resource #' limit. #' #' @section Request syntax: #' ``` #' svc$tag_resource( #' resourceArn = "string", #' tags = list( #' list( #' key = "string", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example tags the 'dev' cluster with key 'team' and value 'dev'. #' svc$tag_resource( #' resourceArn = "arn:aws:ecs:region:aws_account_id:cluster/dev", #' tags = list( #' list( #' key = "team", #' value = "dev" #' ) #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_tag_resource ecs_tag_resource <- function(resourceArn, tags) { op <- new_operation( name = "TagResource", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$tag_resource_input(resourceArn = resourceArn, tags = tags) output <- .ecs$tag_resource_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$tag_resource <- ecs_tag_resource #' Deletes specified tags from a resource #' #' @description #' Deletes specified tags from a resource. #' #' @usage #' ecs_untag_resource(resourceArn, tagKeys) #' #' @param resourceArn &#91;required&#93; The Amazon Resource Name (ARN) of the resource from which to delete #' tags. Currently, the supported resources are Amazon ECS capacity #' providers, tasks, services, task definitions, clusters, and container #' instances. #' @param tagKeys &#91;required&#93; The keys of the tags to be removed. #' #' @section Request syntax: #' ``` #' svc$untag_resource( #' resourceArn = "string", #' tagKeys = list( #' "string" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example deletes the 'team' tag from the 'dev' cluster. #' svc$untag_resource( #' resourceArn = "arn:aws:ecs:region:aws_account_id:cluster/dev", #' tagKeys = list( #' "team" #' ) #' ) #' } #' #' @keywords internal #' #' @rdname ecs_untag_resource ecs_untag_resource <- function(resourceArn, tagKeys) { op <- new_operation( name = "UntagResource", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys) output <- .ecs$untag_resource_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$untag_resource <- ecs_untag_resource #' Modifies the parameters for a capacity provider #' #' @description #' Modifies the parameters for a capacity provider. #' #' @usage #' ecs_update_capacity_provider(name, autoScalingGroupProvider) #' #' @param name &#91;required&#93; An object representing the parameters to update for the Auto Scaling #' group capacity provider. #' @param autoScalingGroupProvider &#91;required&#93; The name of the capacity provider to update. #' #' @section Request syntax: #' ``` #' svc$update_capacity_provider( #' name = "string", #' autoScalingGroupProvider = list( #' managedScaling = list( #' status = "ENABLED"|"DISABLED", #' targetCapacity = 123, #' minimumScalingStepSize = 123, #' maximumScalingStepSize = 123, #' instanceWarmupPeriod = 123 #' ), #' managedTerminationProtection = "ENABLED"|"DISABLED" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_capacity_provider ecs_update_capacity_provider <- function(name, autoScalingGroupProvider) { op <- new_operation( name = "UpdateCapacityProvider", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_capacity_provider_input(name = name, autoScalingGroupProvider = autoScalingGroupProvider) output <- .ecs$update_capacity_provider_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_capacity_provider <- ecs_update_capacity_provider #' Modifies the settings to use for a cluster #' #' @description #' Modifies the settings to use for a cluster. #' #' @usage #' ecs_update_cluster_settings(cluster, settings) #' #' @param cluster &#91;required&#93; The name of the cluster to modify the settings for. #' @param settings &#91;required&#93; The setting to use by default for a cluster. This parameter is used to #' enable CloudWatch Container Insights for a cluster. If this value is #' specified, it will override the `containerInsights` value set with #' PutAccountSetting or PutAccountSettingDefault. #' #' @section Request syntax: #' ``` #' svc$update_cluster_settings( #' cluster = "string", #' settings = list( #' list( #' name = "containerInsights", #' value = "string" #' ) #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_cluster_settings ecs_update_cluster_settings <- function(cluster, settings) { op <- new_operation( name = "UpdateClusterSettings", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_cluster_settings_input(cluster = cluster, settings = settings) output <- .ecs$update_cluster_settings_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_cluster_settings <- ecs_update_cluster_settings #' Updates the Amazon ECS container agent on a specified container instance #' #' @description #' Updates the Amazon ECS container agent on a specified container #' instance. Updating the Amazon ECS container agent does not interrupt #' running tasks or services on the container instance. The process for #' updating the agent differs depending on whether your container instance #' was launched with the Amazon ECS-optimized AMI or another operating #' system. #' #' `UpdateContainerAgent` requires the Amazon ECS-optimized AMI or Amazon #' Linux with the `ecs-init` service installed and running. For help #' updating the Amazon ECS container agent on other operating systems, see #' [Manually Updating the Amazon ECS Container #' Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_update_container_agent(cluster, containerInstance) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' your container instance is running on. If you do not specify a cluster, #' the default cluster is assumed. #' @param containerInstance &#91;required&#93; The container instance ID or full ARN entries for the container instance #' on which you would like to update the Amazon ECS container agent. #' #' @section Request syntax: #' ``` #' svc$update_container_agent( #' cluster = "string", #' containerInstance = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_container_agent ecs_update_container_agent <- function(cluster = NULL, containerInstance) { op <- new_operation( name = "UpdateContainerAgent", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_container_agent_input(cluster = cluster, containerInstance = containerInstance) output <- .ecs$update_container_agent_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_container_agent <- ecs_update_container_agent #' Modifies the status of an Amazon ECS container instance #' #' @description #' Modifies the status of an Amazon ECS container instance. #' #' Once a container instance has reached an `ACTIVE` state, you can change #' the status of a container instance to `DRAINING` to manually remove an #' instance from a cluster, for example to perform system updates, update #' the Docker daemon, or scale down the cluster size. #' #' A container instance cannot be changed to `DRAINING` until it has #' reached an `ACTIVE` status. If the instance is in any other status, an #' error will be received. #' #' When you set a container instance to `DRAINING`, Amazon ECS prevents new #' tasks from being scheduled for placement on the container instance and #' replacement service tasks are started on other container instances in #' the cluster if the resources are available. Service tasks on the #' container instance that are in the `PENDING` state are stopped #' immediately. #' #' Service tasks on the container instance that are in the `RUNNING` state #' are stopped and replaced according to the service's deployment #' configuration parameters, `minimumHealthyPercent` and `maximumPercent`. #' You can change the deployment configuration of your service using #' UpdateService. #' #' - If `minimumHealthyPercent` is below 100\%, the scheduler can ignore #' `desiredCount` temporarily during task replacement. For example, #' `desiredCount` is four tasks, a minimum of 50\% allows the scheduler #' to stop two existing tasks before starting two new tasks. If the #' minimum is 100\%, the service scheduler can't remove existing tasks #' until the replacement tasks are considered healthy. Tasks for #' services that do not use a load balancer are considered healthy if #' they are in the `RUNNING` state. Tasks for services that use a load #' balancer are considered healthy if they are in the `RUNNING` state #' and the container instance they are hosted on is reported as healthy #' by the load balancer. #' #' - The `maximumPercent` parameter represents an upper limit on the #' number of running tasks during task replacement, which enables you #' to define the replacement batch size. For example, if `desiredCount` #' is four tasks, a maximum of 200\% starts four new tasks before #' stopping the four tasks to be drained, provided that the cluster #' resources required to do this are available. If the maximum is 100\%, #' then replacement tasks can't start until the draining tasks have #' stopped. #' #' Any `PENDING` or `RUNNING` tasks that do not belong to a service are not #' affected. You must wait for them to finish or stop them manually. #' #' A container instance has completed draining when it has no more #' `RUNNING` tasks. You can verify this using ListTasks. #' #' When a container instance has been drained, you can set a container #' instance to `ACTIVE` status and once it has reached that status the #' Amazon ECS scheduler can begin scheduling tasks on the instance again. #' #' @usage #' ecs_update_container_instances_state(cluster, containerInstances, #' status) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the container instance to update. If you do not specify a cluster, #' the default cluster is assumed. #' @param containerInstances &#91;required&#93; A list of container instance IDs or full ARN entries. #' @param status &#91;required&#93; The container instance state with which to update the container #' instance. The only valid values for this action are `ACTIVE` and #' `DRAINING`. A container instance can only be updated to `DRAINING` #' status once it has reached an `ACTIVE` state. If a container instance is #' in `REGISTERING`, `DEREGISTERING`, or `REGISTRATION_FAILED` state you #' can describe the container instance but will be unable to update the #' container instance state. #' #' @section Request syntax: #' ``` #' svc$update_container_instances_state( #' cluster = "string", #' containerInstances = list( #' "string" #' ), #' status = "ACTIVE"|"DRAINING"|"REGISTERING"|"DEREGISTERING"|"REGISTRATION_FAILED" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_container_instances_state ecs_update_container_instances_state <- function(cluster = NULL, containerInstances, status) { op <- new_operation( name = "UpdateContainerInstancesState", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_container_instances_state_input(cluster = cluster, containerInstances = containerInstances, status = status) output <- .ecs$update_container_instances_state_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_container_instances_state <- ecs_update_container_instances_state #' Updating the task placement strategies and constraints on an Amazon ECS #' service remains in preview and is a Beta Service as defined by and #' subject to the Beta Service Participation Service Terms located at #' https://aws #' #' @description #' Updating the task placement strategies and constraints on an Amazon ECS #' service remains in preview and is a Beta Service as defined by and #' subject to the Beta Service Participation Service Terms located at #' [https://aws.amazon.com/service-terms](https://aws.amazon.com/service-terms/) #' ("Beta Terms"). These Beta Terms apply to your participation in this #' preview. #' #' Modifies the parameters of a service. #' #' For services using the rolling update (`ECS`) deployment controller, the #' desired count, deployment configuration, network configuration, task #' placement constraints and strategies, or task definition used can be #' updated. #' #' For services using the blue/green (`CODE_DEPLOY`) deployment controller, #' only the desired count, deployment configuration, task placement #' constraints and strategies, and health check grace period can be updated #' using this API. If the network configuration, platform version, or task #' definition need to be updated, a new AWS CodeDeploy deployment should be #' created. For more information, see #' [CreateDeployment](https://docs.aws.amazon.com/codedeploy/latest/APIReference/API_CreateDeployment.html) #' in the *AWS CodeDeploy API Reference*. #' #' For services using an external deployment controller, you can update #' only the desired count, task placement constraints and strategies, and #' health check grace period using this API. If the launch type, load #' balancer, network configuration, platform version, or task definition #' need to be updated, you should create a new task set. For more #' information, see CreateTaskSet. #' #' You can add to or subtract from the number of instantiations of a task #' definition in a service by specifying the cluster that the service is #' running in and a new `desiredCount` parameter. #' #' If you have updated the Docker image of your application, you can create #' a new task definition with that image and deploy it to your service. The #' service scheduler uses the minimum healthy percent and maximum percent #' parameters (in the service's deployment configuration) to determine the #' deployment strategy. #' #' If your updated Docker image uses the same tag as what is in the #' existing task definition for your service (for example, #' `my_image:latest`), you do not need to create a new revision of your #' task definition. You can update the service using the #' `forceNewDeployment` option. The new tasks launched by the deployment #' pull the current image/tag combination from your repository when they #' start. #' #' You can also update the deployment configuration of a service. When a #' deployment is triggered by updating the task definition of a service, #' the service scheduler uses the deployment configuration parameters, #' `minimumHealthyPercent` and `maximumPercent`, to determine the #' deployment strategy. #' #' - If `minimumHealthyPercent` is below 100\%, the scheduler can ignore #' `desiredCount` temporarily during a deployment. For example, if #' `desiredCount` is four tasks, a minimum of 50\% allows the scheduler #' to stop two existing tasks before starting two new tasks. Tasks for #' services that do not use a load balancer are considered healthy if #' they are in the `RUNNING` state. Tasks for services that use a load #' balancer are considered healthy if they are in the `RUNNING` state #' and the container instance they are hosted on is reported as healthy #' by the load balancer. #' #' - The `maximumPercent` parameter represents an upper limit on the #' number of running tasks during a deployment, which enables you to #' define the deployment batch size. For example, if `desiredCount` is #' four tasks, a maximum of 200\% starts four new tasks before stopping #' the four older tasks (provided that the cluster resources required #' to do this are available). #' #' When UpdateService stops a task during a deployment, the equivalent of #' `docker stop` is issued to the containers running in the task. This #' results in a `SIGTERM` and a 30-second timeout, after which `SIGKILL` is #' sent and the containers are forcibly stopped. If the container handles #' the `SIGTERM` gracefully and exits within 30 seconds from receiving it, #' no `SIGKILL` is sent. #' #' When the service scheduler launches new tasks, it determines task #' placement in your cluster with the following logic: #' #' - Determine which of the container instances in your cluster can #' support your service's task definition (for example, they have the #' required CPU, memory, ports, and container instance attributes). #' #' - By default, the service scheduler attempts to balance tasks across #' Availability Zones in this manner (although you can choose a #' different placement strategy): #' #' - Sort the valid container instances by the fewest number of #' running tasks for this service in the same Availability Zone as #' the instance. For example, if zone A has one running service #' task and zones B and C each have zero, valid container instances #' in either zone B or C are considered optimal for placement. #' #' - Place the new service task on a valid container instance in an #' optimal Availability Zone (based on the previous steps), #' favoring container instances with the fewest number of running #' tasks for this service. #' #' When the service scheduler stops running tasks, it attempts to maintain #' balance across the Availability Zones in your cluster using the #' following logic: #' #' - Sort the container instances by the largest number of running tasks #' for this service in the same Availability Zone as the instance. For #' example, if zone A has one running service task and zones B and C #' each have two, container instances in either zone B or C are #' considered optimal for termination. #' #' - Stop the task on a container instance in an optimal Availability #' Zone (based on the previous steps), favoring container instances #' with the largest number of running tasks for this service. #' #' @usage #' ecs_update_service(cluster, service, desiredCount, taskDefinition, #' capacityProviderStrategy, deploymentConfiguration, networkConfiguration, #' placementConstraints, placementStrategy, platformVersion, #' forceNewDeployment, healthCheckGracePeriodSeconds) #' #' @param cluster The short name or full Amazon Resource Name (ARN) of the cluster that #' your service is running on. If you do not specify a cluster, the default #' cluster is assumed. #' @param service &#91;required&#93; The name of the service to update. #' @param desiredCount The number of instantiations of the task to place and keep running in #' your service. #' @param taskDefinition The `family` and `revision` (`family:revision`) or full ARN of the task #' definition to run in your service. If a `revision` is not specified, the #' latest `ACTIVE` revision is used. If you modify the task definition with #' `UpdateService`, Amazon ECS spawns a task with the new version of the #' task definition and then stops an old task after the new version is #' running. #' @param capacityProviderStrategy The capacity provider strategy to update the service to use. #' #' If the service is using the default capacity provider strategy for the #' cluster, the service can be updated to use one or more capacity #' providers as opposed to the default capacity provider strategy. However, #' when a service is using a capacity provider strategy that is not the #' default capacity provider strategy, the service cannot be updated to use #' the cluster's default capacity provider strategy. #' #' A capacity provider strategy consists of one or more capacity providers #' along with the `base` and `weight` to assign to them. A capacity #' provider must be associated with the cluster to be used in a capacity #' provider strategy. The PutClusterCapacityProviders API is used to #' associate a capacity provider with a cluster. Only capacity providers #' with an `ACTIVE` or `UPDATING` status can be used. #' #' If specifying a capacity provider that uses an Auto Scaling group, the #' capacity provider must already be created. New capacity providers can be #' created with the CreateCapacityProvider API operation. #' #' To use a AWS Fargate capacity provider, specify either the `FARGATE` or #' `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers #' are available to all accounts and only need to be associated with a #' cluster to be used. #' #' The PutClusterCapacityProviders API operation is used to update the list #' of available capacity providers for a cluster after the cluster is #' created. #' @param deploymentConfiguration Optional deployment parameters that control how many tasks run during #' the deployment and the ordering of stopping and starting tasks. #' @param networkConfiguration #' @param placementConstraints An array of task placement constraint objects to update the service to #' use. If no value is specified, the existing placement constraints for #' the service will remain unchanged. If this value is specified, it will #' override any existing placement constraints defined for the service. To #' remove all existing placement constraints, specify an empty array. #' #' You can specify a maximum of 10 constraints per task (this limit #' includes constraints in the task definition and those specified at #' runtime). #' @param placementStrategy The task placement strategy objects to update the service to use. If no #' value is specified, the existing placement strategy for the service will #' remain unchanged. If this value is specified, it will override the #' existing placement strategy defined for the service. To remove an #' existing placement strategy, specify an empty object. #' #' You can specify a maximum of five strategy rules per service. #' @param platformVersion The platform version on which your tasks in the service are running. A #' platform version is only specified for tasks using the Fargate launch #' type. If a platform version is not specified, the `LATEST` platform #' version is used by default. For more information, see [AWS Fargate #' Platform #' Versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' @param forceNewDeployment Whether to force a new deployment of the service. Deployments are not #' forced by default. You can use this option to trigger a new deployment #' with no service definition changes. For example, you can update a #' service's tasks to use a newer Docker image with the same image/tag #' combination (`my_image:latest`) or to roll Fargate tasks onto a newer #' platform version. #' @param healthCheckGracePeriodSeconds The period of time, in seconds, that the Amazon ECS service scheduler #' should ignore unhealthy Elastic Load Balancing target health checks #' after a task has first started. This is only valid if your service is #' configured to use a load balancer. If your service's tasks take a while #' to start and respond to Elastic Load Balancing health checks, you can #' specify a health check grace period of up to 2,147,483,647 seconds. #' During that time, the Amazon ECS service scheduler ignores the Elastic #' Load Balancing health check status. This grace period can prevent the #' ECS service scheduler from marking tasks as unhealthy and stopping them #' before they have time to come up. #' #' @section Request syntax: #' ``` #' svc$update_service( #' cluster = "string", #' service = "string", #' desiredCount = 123, #' taskDefinition = "string", #' capacityProviderStrategy = list( #' list( #' capacityProvider = "string", #' weight = 123, #' base = 123 #' ) #' ), #' deploymentConfiguration = list( #' deploymentCircuitBreaker = list( #' enable = TRUE|FALSE, #' rollback = TRUE|FALSE #' ), #' maximumPercent = 123, #' minimumHealthyPercent = 123 #' ), #' networkConfiguration = list( #' awsvpcConfiguration = list( #' subnets = list( #' "string" #' ), #' securityGroups = list( #' "string" #' ), #' assignPublicIp = "ENABLED"|"DISABLED" #' ) #' ), #' placementConstraints = list( #' list( #' type = "distinctInstance"|"memberOf", #' expression = "string" #' ) #' ), #' placementStrategy = list( #' list( #' type = "random"|"spread"|"binpack", #' field = "string" #' ) #' ), #' platformVersion = "string", #' forceNewDeployment = TRUE|FALSE, #' healthCheckGracePeriodSeconds = 123 #' ) #' ``` #' #' @examples #' \dontrun{ #' # This example updates the my-http-service service to use the #' # amazon-ecs-sample task definition. #' svc$update_service( #' service = "my-http-service", #' taskDefinition = "amazon-ecs-sample" #' ) #' #' # This example updates the desired count of the my-http-service service to #' # 10. #' svc$update_service( #' desiredCount = 10L, #' service = "my-http-service" #' ) #' } #' #' @keywords internal #' #' @rdname ecs_update_service ecs_update_service <- function(cluster = NULL, service, desiredCount = NULL, taskDefinition = NULL, capacityProviderStrategy = NULL, deploymentConfiguration = NULL, networkConfiguration = NULL, placementConstraints = NULL, placementStrategy = NULL, platformVersion = NULL, forceNewDeployment = NULL, healthCheckGracePeriodSeconds = NULL) { op <- new_operation( name = "UpdateService", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_service_input(cluster = cluster, service = service, desiredCount = desiredCount, taskDefinition = taskDefinition, capacityProviderStrategy = capacityProviderStrategy, deploymentConfiguration = deploymentConfiguration, networkConfiguration = networkConfiguration, placementConstraints = placementConstraints, placementStrategy = placementStrategy, platformVersion = platformVersion, forceNewDeployment = forceNewDeployment, healthCheckGracePeriodSeconds = healthCheckGracePeriodSeconds) output <- .ecs$update_service_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_service <- ecs_update_service #' Modifies which task set in a service is the primary task set #' #' @description #' Modifies which task set in a service is the primary task set. Any #' parameters that are updated on the primary task set in a service will #' transition to the service. This is used when a service uses the #' `EXTERNAL` deployment controller type. For more information, see [Amazon #' ECS Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_update_service_primary_task_set(cluster, service, primaryTaskSet) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task set exists in. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' the task set exists in. #' @param primaryTaskSet &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the task set to set #' as the primary task set in the deployment. #' #' @section Request syntax: #' ``` #' svc$update_service_primary_task_set( #' cluster = "string", #' service = "string", #' primaryTaskSet = "string" #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_service_primary_task_set ecs_update_service_primary_task_set <- function(cluster, service, primaryTaskSet) { op <- new_operation( name = "UpdateServicePrimaryTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_service_primary_task_set_input(cluster = cluster, service = service, primaryTaskSet = primaryTaskSet) output <- .ecs$update_service_primary_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_service_primary_task_set <- ecs_update_service_primary_task_set #' Modifies a task set #' #' @description #' Modifies a task set. This is used when a service uses the `EXTERNAL` #' deployment controller type. For more information, see [Amazon ECS #' Deployment #' Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) #' in the *Amazon Elastic Container Service Developer Guide*. #' #' @usage #' ecs_update_task_set(cluster, service, taskSet, scale) #' #' @param cluster &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the cluster that #' hosts the service that the task set exists in. #' @param service &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the service that #' the task set exists in. #' @param taskSet &#91;required&#93; The short name or full Amazon Resource Name (ARN) of the task set to #' update. #' @param scale &#91;required&#93; #' #' @section Request syntax: #' ``` #' svc$update_task_set( #' cluster = "string", #' service = "string", #' taskSet = "string", #' scale = list( #' value = 123.0, #' unit = "PERCENT" #' ) #' ) #' ``` #' #' @keywords internal #' #' @rdname ecs_update_task_set ecs_update_task_set <- function(cluster, service, taskSet, scale) { op <- new_operation( name = "UpdateTaskSet", http_method = "POST", http_path = "/", paginator = list() ) input <- .ecs$update_task_set_input(cluster = cluster, service = service, taskSet = taskSet, scale = scale) output <- .ecs$update_task_set_output() config <- get_config() svc <- .ecs$service(config) request <- new_request(svc, op, input, output) response <- send_request(request) return(response) } .ecs$operations$update_task_set <- ecs_update_task_set
#' Plot an enmtools.model object on an interactive map #' #' Function that take an \code{enmtools.model} object and plots an #' interactive map of the presence points, background points (if applicable), and #' species suitability map. This function uses \code{\link{leaflet}} for mapping #' and will only function properly if you have an active internet connection. #' #' @param x entools.model object to plot #' @param map.provider Name of a map provider for the underlying interactive base map. #' Default is "Esri.WorldPhysical", and attractive topographic map with no place labels. A #' preview of all map provider options can be viewed at \url{http://leaflet-extras.github.io/leaflet-providers/preview/} #' @param plot.bg Should background points be plotted? #' @param cluster.points Should points be clustered? If TRUE, points close together #' will be grouped into clusters that can be interactively expanded by clicking #' on them. #' @export interactive.plot.enmtools.model interactive.plot.enmtools.model <- function(x, map.provider = "Esri.WorldPhysical", plot.bg = FALSE, cluster.points = FALSE) { pal <- colorFactor(c("grey10", "red"), domain = c("0", "1")) if(!is.null(x$analysis.df$presence)) { pnts <- x$analysis.df[ , c("Longitude", "Latitude", "presence")] if(!plot.bg) { pnts <- pnts[pnts$presence == 1, ] } } else { pnts <- x$analysis.df[ , c("Longitude", "Latitude")] } m <- leaflet(pnts) %>% addProviderTiles(map.provider) %>% addRasterImage(x$suitability, colors = "inferno", opacity = 0.65) if(!is.null(x$analysis.df$presence)) { if(cluster.points) { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = ~pal(as.factor(presence)), stroke = FALSE, fillOpacity = 0.5, radius= 8, clusterOptions = markerClusterOptions()) } else { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = ~pal(as.factor(presence)), stroke = FALSE, fillOpacity = 0.5, radius= 8) } } else { if(cluster.points) { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = "red", stroke = FALSE, fillOpacity = 0.5, radius= 8, clusterOptions = markerClusterOptions()) } else { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = "red", stroke = FALSE, fillOpacity = 0.5, radius= 8) } } m <- m %>% addLegend(pal = colorNumeric("inferno", c(min(values(x$suitability), na.rm = TRUE), max(values(x$suitability), na.rm = TRUE))), values = c(min(values(x$suitability), na.rm = TRUE), max(values(x$suitability), na.rm = TRUE))) if(plot.bg) { m <- m %>% addLegend("bottomright", pal = pal, values = c("0", "1"), labFormat = function(type, x) { labs = c("0" = "Background points", "1" = "Presence points") labs[x] }) } else { m <- m %>% addLegend("bottomright", pal = pal, values = c("1"), labFormat = function(type, x) { labs = c("0" = "Background points", "1" = "Presence points") labs[x] }) } m }
/R/interactive.plot.enmtools.model.R
no_license
helixcn/ENMTools
R
false
false
3,475
r
#' Plot an enmtools.model object on an interactive map #' #' Function that take an \code{enmtools.model} object and plots an #' interactive map of the presence points, background points (if applicable), and #' species suitability map. This function uses \code{\link{leaflet}} for mapping #' and will only function properly if you have an active internet connection. #' #' @param x entools.model object to plot #' @param map.provider Name of a map provider for the underlying interactive base map. #' Default is "Esri.WorldPhysical", and attractive topographic map with no place labels. A #' preview of all map provider options can be viewed at \url{http://leaflet-extras.github.io/leaflet-providers/preview/} #' @param plot.bg Should background points be plotted? #' @param cluster.points Should points be clustered? If TRUE, points close together #' will be grouped into clusters that can be interactively expanded by clicking #' on them. #' @export interactive.plot.enmtools.model interactive.plot.enmtools.model <- function(x, map.provider = "Esri.WorldPhysical", plot.bg = FALSE, cluster.points = FALSE) { pal <- colorFactor(c("grey10", "red"), domain = c("0", "1")) if(!is.null(x$analysis.df$presence)) { pnts <- x$analysis.df[ , c("Longitude", "Latitude", "presence")] if(!plot.bg) { pnts <- pnts[pnts$presence == 1, ] } } else { pnts <- x$analysis.df[ , c("Longitude", "Latitude")] } m <- leaflet(pnts) %>% addProviderTiles(map.provider) %>% addRasterImage(x$suitability, colors = "inferno", opacity = 0.65) if(!is.null(x$analysis.df$presence)) { if(cluster.points) { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = ~pal(as.factor(presence)), stroke = FALSE, fillOpacity = 0.5, radius= 8, clusterOptions = markerClusterOptions()) } else { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = ~pal(as.factor(presence)), stroke = FALSE, fillOpacity = 0.5, radius= 8) } } else { if(cluster.points) { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = "red", stroke = FALSE, fillOpacity = 0.5, radius= 8, clusterOptions = markerClusterOptions()) } else { m <- m %>% addCircleMarkers(~Longitude, ~Latitude, color = "red", stroke = FALSE, fillOpacity = 0.5, radius= 8) } } m <- m %>% addLegend(pal = colorNumeric("inferno", c(min(values(x$suitability), na.rm = TRUE), max(values(x$suitability), na.rm = TRUE))), values = c(min(values(x$suitability), na.rm = TRUE), max(values(x$suitability), na.rm = TRUE))) if(plot.bg) { m <- m %>% addLegend("bottomright", pal = pal, values = c("0", "1"), labFormat = function(type, x) { labs = c("0" = "Background points", "1" = "Presence points") labs[x] }) } else { m <- m %>% addLegend("bottomright", pal = pal, values = c("1"), labFormat = function(type, x) { labs = c("0" = "Background points", "1" = "Presence points") labs[x] }) } m }
#RECODE SELECT MULTIPLE VARIABLES FROM T/F TO BINARY VARIABLE #raw_data_first[c(which(startsWith(names(raw_data_first), "stop_points")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "stop_points")))] == "TRUE", 1, 0) #raw_data_first[c(which(startsWith(names(raw_data_first), "difficulties")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "difficulties")))] == "TRUE", 1, 0) #raw_data_first[c(which(startsWith(names(raw_data_first), "item_bring")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "item_bring")))] == "TRUE", 1, 0) #raw_data_first[c(which(startsWith(names(raw_data_first), "priority_need")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "priority_need")))] == "TRUE", 1, 0) #RECODE RAW DATASET SO THAT NAs CHANGED TO 0 FOR VULNERABLE POPULATIONS AND BY CALCULATING #THE GROUP SIZE BASED ON THE NUMBER OF PEOPLE PER AGE GROUP recode_raw <- function(summs) { ifelse(is.na(summs[,c("unaccompanied_number", "disabled_number", "disabled_number", "pregnant_number")]), 0, summs[,c("unaccompanied_number", "disabled_number", "disabled_number", "pregnant_number")]) summs$male_0_17 <- as.numeric(as.character(summs$male_0_17)) summs$female_0_17 <- as.numeric(as.character(summs$female_0_17)) summs$male_18_60 <- as.numeric(as.character(summs$male_18_60)) summs$female_18_60 <- as.numeric(as.character(summs$female_18_60)) summs$male_60 <- as.numeric(as.character(summs$male_60)) summs$female_60 <- as.numeric(as.character(summs$female_60)) summs$group_size <- as.numeric(apply(summs[,c("male_0_17", "female_0_17", "male_18_60", "female_18_60", "male_60", "female_60")], 1, sum)) summs$female_total <- summs$female_18_60 summs$minor_total <- as.numeric(apply(summs[,c("female_0_17", "male_0_17")], 1, sum)) return(summs) } #RECODING AND RESHAPING SUMMARY DATASET BY DELETING VARIABLES WITH MULTIPLE MENTIONS AND BY #MERGING THE NAME HEADER WITH THE FACTORS IN THE FIRST ROW reshape_summstats <- function(summs) { summs[, c("X", "independent.var", "independent.var.value", "se", "repeat.var", "repeat.var.value")] <- NULL summs <- as.data.frame(t(summs)) names(summs) <- as.matrix(summs[1, ]) summs <- summs[-1, ] summs[] <- lapply(summs, function(x) type.convert(as.character(x))) namesvar<-as.vector(sapply( summs[1,], paste0, collapse="")) names(summs) <- ifelse(namesvar=="NA", names(summs), paste(names(summs[1,]), namesvar, sep = "_")) summs<-summs[-1,] return(summs) } #CALCULATING AVERAGES (BASED ON GROUP SIZE) FOR THE VULNERABLE POPULATIONS AND FOR THE #VARIOUS AGE GROUPS calc_avgs <- function(avg) { avg$disabled_number<- ifelse(is.na(avg$disabled_number), 0, avg$disabled_number) avg$unaccompanied_number<- ifelse(is.na(avg$unaccompanied_number), 0, avg$unaccompanied_number) avg$pregnant_number<- ifelse(is.na(avg$pregnant_number), 0, avg$pregnant_number) avg$male_0_17<- ifelse(is.na(avg$male_0_17), 0, avg$male_0_17) avg$male_18_60<- ifelse(is.na(avg$male_18_60), 0, avg$male_18_60) avg$male_60<- ifelse(is.na(avg$male_60), 0, avg$male_60) avg$female_0_17<- ifelse(is.na(avg$female_0_17), 0, avg$female_0_17) avg$female_18_60<- ifelse(is.na(avg$female_18_60), 0, avg$female_18_60) avg$female_18_60<- ifelse(is.na(avg$female_18_60), 0, avg$female_18_60) avg$female_60<- ifelse(is.na(avg$female_60), 0, avg$female_60) avg$members_no_id<- ifelse(is.na(avg$members_no_id), 0, avg$members_no_id) subset_age <- avg[, c("male_0_17", 'male_18_60', "male_60", "female_0_17", "female_18_60", "female_60","disabled_number", "unaccompanied_number", "pregnant_number", "members_no_id", "group_size", "female_total", "minor_total")] subset_age %<>% mutate_if(is.character,as.numeric) subset_age <- as.data.frame(t(colSums(subset_age))) subset_age[, c("male_0_17", 'male_18_60', "male_60", "female_0_17", "female_18_60", "female_60","disabled_number", "members_no_id")] <- subset_age[, c("male_0_17", 'male_18_60', "male_60", "female_0_17", "female_18_60", "female_60","disabled_number", "members_no_id")] / subset_age$group_size subset_age$pregnant_number <- subset_age$pregnant_number / subset_age$female_total subset_age$unaccompanied_number <- subset_age$unaccompanied_number / subset_age$minor_total subset_age$group_size <- NULL return(subset_age) } #CALCULATE RELEVANT TIME VARIABLES FOR INDESIGN FILE #LINES BLANKED OUT WITH A # WERE NOT CHOSEN BY ANYONE IN THIS ITERATION OF THE ASSESSMENT calc_time <- function(avg) { avg$duration_rc_8_24_hours <- as.numeric(as.character(avg$duration_rc_8_24_hours)) avg$duration_rc_two_days <- as.numeric(as.character(avg$duration_rc_two_days)) # avg$duration_rc_three_days <- as.numeric(as.character(avg$duration_rc_three_days)) avg$wait_border_8_24_hours <- as.numeric(as.character(avg$wait_border_8_24_hours)) avg$wait_border_two_days <- as.numeric(as.character(avg$wait_border_two_days)) avg$wait_border_three_days <- as.numeric(as.character(avg$wait_border_three_days)) # avg$date_left_today <- as.numeric(as.character(avg$date_left_today)) avg$date_left_yesterday <- as.numeric(as.character(avg$date_left_yesterday)) avg$date_left_two_days <- as.numeric(as.character(avg$date_left_two_days)) avg$date_left_three_days <- as.numeric(as.character(avg$date_left_three_days)) avg$date_left_more_than_three <- as.numeric(as.character(avg$date_left_more_than_three)) avg$date_left_week <- as.numeric(as.character(avg$date_left_week)) # avg$date_left_more_week <- as.numeric(as.character(avg$date_left_more_week)) avg$syriaeight <- avg$wait_border_8_24_hours + avg$wait_border_two_days + avg$wait_border_three_days avg$registeredeight <- avg$duration_rc_8_24_hours + avg$duration_rc_two_days # + avg$duration_rc_three_days avg$leftpie7 <- avg$date_left_week # + avg$date_left_more_week avg$leftpie47 <- avg$date_left_more_than_three avg$leftpie13 <- avg$date_left_two_days + avg$date_left_three_days avg$leftpie1 <- avg$date_left_yesterday #+ avg$date_left_today return(avg) } #SELECT RELEVANT VARIABLES FOR INDESIGN FILE extract_indesign <- function(r) { r<-r[c("group_size", "memb_left_yes", "decision_timing_6_less", "syriaeight", "registeredeight", "leftpie1", "leftpie7", "leftpie47", "leftpie13", "origin_from_location_yes", "intent_return_yes", "relatives_kri_yes", "reason_intent_family_host", "reason_intent_no_choice", "reason_intent_no_options", "reason_intent_other")] write.csv(r, "output/extract_indesign.csv") return(r) } # "group_size", "male017avg", "male1860avg", "male60avg", "female017avg", # "female1860avg", "female60avg", "unaccompanied_avg", "disabled_avg", "pregnant_avg", #EXCLUDED FROM ABOVE SUBSET BECAUSE NOT MENTIONED IN THIS DAY'S ASSESSMENT #"leftpie1", "reason_intent_no_options", #TO BE CHANGE IN NEXT ANALAYSIS FROM TO #"reason_intent_I do not have a choice (decided by the government);" TO "reason_intent_no_choice" #EXTRACT VARIABLE NAMES FOR THE AGGREGATION TABLE aggtable_names <- function(summarystats) { r <- as.data.frame(t(summarystats[1,])) r<- setDT(r, keep.rownames = TRUE)[] write.csv(r, "output/aggtable_names.csv") return(r) } #SELECT RELEVANT VARIABLES FOR THE AGGREGATION TABLE extract_aggtable <- function(r) { r<-r[c("location_interview_rc_peshkhabor", "location_interview_rc_Sahila", "gender_ki_male", "gender_ki_female", "residency_yes", "residency_no", "residency_dont_know", "residency_refuse", "residency_yes", "arrive_syria_yes", "arrive_syria_no", "reason_displace", "reason_displace_airstrikes", "reason_displace_arrival_military", "reason_displace_dont_know", "reason_displace_escorted_security", "reason_displace_home_destroyed", "reason_displace_other", "reason_displace_other_home_destr", "reason_displace_planned_travel_return", "reason_displace_planned_travel_to_kri")] write.csv(r, "output/extract_indesign.csv") return(r) } #CHARACTER VARIABLES - FUNCTION TO SELECT THE TOP FOUR MOST COMMONLY MENTIONED #FACTORS FOR THE INDESIGN FILE select_top_four_character <- function(df, x) { df<- df[c(which(startsWith(names(df), x)))] df<-df[1,] df <- as.data.frame(unlist(t(df))) df<-as.data.frame(sort(df$numbers, decreasing=T)) df<-setDT(df, keep.rownames = TRUE)[] df<-as.data.frame(df[1:4,]) names(df)[2] <- x df<-as.data.frame(t(df)) return(df) } #NUMERIC VARIABLES - FUNCTION TO SELECT THE TOP FOUR MOST COMMONLY MENTIONED #FACTORS FOR THE INDESIGN FILE select_top_four_numeric <- function(df, x) { df<- df[c(which(startsWith(names(df), x)))] df<-df[1,] df <- as.data.frame(unlist(t(df))) df<-setDT(df, keep.rownames = T)[] df<-df[order(-df$numbers),][1:4,] names(df)[2] <- x df<-as.data.frame(t(df)) return(df) }
/Recoding.R
no_license
michaelbly/RapidAssessment_Border
R
false
false
9,581
r
#RECODE SELECT MULTIPLE VARIABLES FROM T/F TO BINARY VARIABLE #raw_data_first[c(which(startsWith(names(raw_data_first), "stop_points")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "stop_points")))] == "TRUE", 1, 0) #raw_data_first[c(which(startsWith(names(raw_data_first), "difficulties")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "difficulties")))] == "TRUE", 1, 0) #raw_data_first[c(which(startsWith(names(raw_data_first), "item_bring")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "item_bring")))] == "TRUE", 1, 0) #raw_data_first[c(which(startsWith(names(raw_data_first), "priority_need")))] <- # ifelse(raw_data_first[c(which(startsWith(names(raw_data_first), "priority_need")))] == "TRUE", 1, 0) #RECODE RAW DATASET SO THAT NAs CHANGED TO 0 FOR VULNERABLE POPULATIONS AND BY CALCULATING #THE GROUP SIZE BASED ON THE NUMBER OF PEOPLE PER AGE GROUP recode_raw <- function(summs) { ifelse(is.na(summs[,c("unaccompanied_number", "disabled_number", "disabled_number", "pregnant_number")]), 0, summs[,c("unaccompanied_number", "disabled_number", "disabled_number", "pregnant_number")]) summs$male_0_17 <- as.numeric(as.character(summs$male_0_17)) summs$female_0_17 <- as.numeric(as.character(summs$female_0_17)) summs$male_18_60 <- as.numeric(as.character(summs$male_18_60)) summs$female_18_60 <- as.numeric(as.character(summs$female_18_60)) summs$male_60 <- as.numeric(as.character(summs$male_60)) summs$female_60 <- as.numeric(as.character(summs$female_60)) summs$group_size <- as.numeric(apply(summs[,c("male_0_17", "female_0_17", "male_18_60", "female_18_60", "male_60", "female_60")], 1, sum)) summs$female_total <- summs$female_18_60 summs$minor_total <- as.numeric(apply(summs[,c("female_0_17", "male_0_17")], 1, sum)) return(summs) } #RECODING AND RESHAPING SUMMARY DATASET BY DELETING VARIABLES WITH MULTIPLE MENTIONS AND BY #MERGING THE NAME HEADER WITH THE FACTORS IN THE FIRST ROW reshape_summstats <- function(summs) { summs[, c("X", "independent.var", "independent.var.value", "se", "repeat.var", "repeat.var.value")] <- NULL summs <- as.data.frame(t(summs)) names(summs) <- as.matrix(summs[1, ]) summs <- summs[-1, ] summs[] <- lapply(summs, function(x) type.convert(as.character(x))) namesvar<-as.vector(sapply( summs[1,], paste0, collapse="")) names(summs) <- ifelse(namesvar=="NA", names(summs), paste(names(summs[1,]), namesvar, sep = "_")) summs<-summs[-1,] return(summs) } #CALCULATING AVERAGES (BASED ON GROUP SIZE) FOR THE VULNERABLE POPULATIONS AND FOR THE #VARIOUS AGE GROUPS calc_avgs <- function(avg) { avg$disabled_number<- ifelse(is.na(avg$disabled_number), 0, avg$disabled_number) avg$unaccompanied_number<- ifelse(is.na(avg$unaccompanied_number), 0, avg$unaccompanied_number) avg$pregnant_number<- ifelse(is.na(avg$pregnant_number), 0, avg$pregnant_number) avg$male_0_17<- ifelse(is.na(avg$male_0_17), 0, avg$male_0_17) avg$male_18_60<- ifelse(is.na(avg$male_18_60), 0, avg$male_18_60) avg$male_60<- ifelse(is.na(avg$male_60), 0, avg$male_60) avg$female_0_17<- ifelse(is.na(avg$female_0_17), 0, avg$female_0_17) avg$female_18_60<- ifelse(is.na(avg$female_18_60), 0, avg$female_18_60) avg$female_18_60<- ifelse(is.na(avg$female_18_60), 0, avg$female_18_60) avg$female_60<- ifelse(is.na(avg$female_60), 0, avg$female_60) avg$members_no_id<- ifelse(is.na(avg$members_no_id), 0, avg$members_no_id) subset_age <- avg[, c("male_0_17", 'male_18_60', "male_60", "female_0_17", "female_18_60", "female_60","disabled_number", "unaccompanied_number", "pregnant_number", "members_no_id", "group_size", "female_total", "minor_total")] subset_age %<>% mutate_if(is.character,as.numeric) subset_age <- as.data.frame(t(colSums(subset_age))) subset_age[, c("male_0_17", 'male_18_60', "male_60", "female_0_17", "female_18_60", "female_60","disabled_number", "members_no_id")] <- subset_age[, c("male_0_17", 'male_18_60', "male_60", "female_0_17", "female_18_60", "female_60","disabled_number", "members_no_id")] / subset_age$group_size subset_age$pregnant_number <- subset_age$pregnant_number / subset_age$female_total subset_age$unaccompanied_number <- subset_age$unaccompanied_number / subset_age$minor_total subset_age$group_size <- NULL return(subset_age) } #CALCULATE RELEVANT TIME VARIABLES FOR INDESIGN FILE #LINES BLANKED OUT WITH A # WERE NOT CHOSEN BY ANYONE IN THIS ITERATION OF THE ASSESSMENT calc_time <- function(avg) { avg$duration_rc_8_24_hours <- as.numeric(as.character(avg$duration_rc_8_24_hours)) avg$duration_rc_two_days <- as.numeric(as.character(avg$duration_rc_two_days)) # avg$duration_rc_three_days <- as.numeric(as.character(avg$duration_rc_three_days)) avg$wait_border_8_24_hours <- as.numeric(as.character(avg$wait_border_8_24_hours)) avg$wait_border_two_days <- as.numeric(as.character(avg$wait_border_two_days)) avg$wait_border_three_days <- as.numeric(as.character(avg$wait_border_three_days)) # avg$date_left_today <- as.numeric(as.character(avg$date_left_today)) avg$date_left_yesterday <- as.numeric(as.character(avg$date_left_yesterday)) avg$date_left_two_days <- as.numeric(as.character(avg$date_left_two_days)) avg$date_left_three_days <- as.numeric(as.character(avg$date_left_three_days)) avg$date_left_more_than_three <- as.numeric(as.character(avg$date_left_more_than_three)) avg$date_left_week <- as.numeric(as.character(avg$date_left_week)) # avg$date_left_more_week <- as.numeric(as.character(avg$date_left_more_week)) avg$syriaeight <- avg$wait_border_8_24_hours + avg$wait_border_two_days + avg$wait_border_three_days avg$registeredeight <- avg$duration_rc_8_24_hours + avg$duration_rc_two_days # + avg$duration_rc_three_days avg$leftpie7 <- avg$date_left_week # + avg$date_left_more_week avg$leftpie47 <- avg$date_left_more_than_three avg$leftpie13 <- avg$date_left_two_days + avg$date_left_three_days avg$leftpie1 <- avg$date_left_yesterday #+ avg$date_left_today return(avg) } #SELECT RELEVANT VARIABLES FOR INDESIGN FILE extract_indesign <- function(r) { r<-r[c("group_size", "memb_left_yes", "decision_timing_6_less", "syriaeight", "registeredeight", "leftpie1", "leftpie7", "leftpie47", "leftpie13", "origin_from_location_yes", "intent_return_yes", "relatives_kri_yes", "reason_intent_family_host", "reason_intent_no_choice", "reason_intent_no_options", "reason_intent_other")] write.csv(r, "output/extract_indesign.csv") return(r) } # "group_size", "male017avg", "male1860avg", "male60avg", "female017avg", # "female1860avg", "female60avg", "unaccompanied_avg", "disabled_avg", "pregnant_avg", #EXCLUDED FROM ABOVE SUBSET BECAUSE NOT MENTIONED IN THIS DAY'S ASSESSMENT #"leftpie1", "reason_intent_no_options", #TO BE CHANGE IN NEXT ANALAYSIS FROM TO #"reason_intent_I do not have a choice (decided by the government);" TO "reason_intent_no_choice" #EXTRACT VARIABLE NAMES FOR THE AGGREGATION TABLE aggtable_names <- function(summarystats) { r <- as.data.frame(t(summarystats[1,])) r<- setDT(r, keep.rownames = TRUE)[] write.csv(r, "output/aggtable_names.csv") return(r) } #SELECT RELEVANT VARIABLES FOR THE AGGREGATION TABLE extract_aggtable <- function(r) { r<-r[c("location_interview_rc_peshkhabor", "location_interview_rc_Sahila", "gender_ki_male", "gender_ki_female", "residency_yes", "residency_no", "residency_dont_know", "residency_refuse", "residency_yes", "arrive_syria_yes", "arrive_syria_no", "reason_displace", "reason_displace_airstrikes", "reason_displace_arrival_military", "reason_displace_dont_know", "reason_displace_escorted_security", "reason_displace_home_destroyed", "reason_displace_other", "reason_displace_other_home_destr", "reason_displace_planned_travel_return", "reason_displace_planned_travel_to_kri")] write.csv(r, "output/extract_indesign.csv") return(r) } #CHARACTER VARIABLES - FUNCTION TO SELECT THE TOP FOUR MOST COMMONLY MENTIONED #FACTORS FOR THE INDESIGN FILE select_top_four_character <- function(df, x) { df<- df[c(which(startsWith(names(df), x)))] df<-df[1,] df <- as.data.frame(unlist(t(df))) df<-as.data.frame(sort(df$numbers, decreasing=T)) df<-setDT(df, keep.rownames = TRUE)[] df<-as.data.frame(df[1:4,]) names(df)[2] <- x df<-as.data.frame(t(df)) return(df) } #NUMERIC VARIABLES - FUNCTION TO SELECT THE TOP FOUR MOST COMMONLY MENTIONED #FACTORS FOR THE INDESIGN FILE select_top_four_numeric <- function(df, x) { df<- df[c(which(startsWith(names(df), x)))] df<-df[1,] df <- as.data.frame(unlist(t(df))) df<-setDT(df, keep.rownames = T)[] df<-df[order(-df$numbers),][1:4,] names(df)[2] <- x df<-as.data.frame(t(df)) return(df) }
data <- read.csv('Datasets/loan_dataset.csv') train = data[1:nrow(data)-1,] test = data[nrow(data),] k = 3 distances = c() train$dist = sqrt((train$cred_card-test[1,2])^2 + (train$balance-test[1,3])^2) train <- train[order(train$dist),] count = 0 for(i in 1:k){ if(train[i, 4]=='Y') count = count + 1 else count = count - 1 } if(count>=0) print("Yes") else print("No")
/Question_3.R
no_license
AshwinHarish/Data-Analytics
R
false
false
405
r
data <- read.csv('Datasets/loan_dataset.csv') train = data[1:nrow(data)-1,] test = data[nrow(data),] k = 3 distances = c() train$dist = sqrt((train$cred_card-test[1,2])^2 + (train$balance-test[1,3])^2) train <- train[order(train$dist),] count = 0 for(i in 1:k){ if(train[i, 4]=='Y') count = count + 1 else count = count - 1 } if(count>=0) print("Yes") else print("No")
% Generated by roxygen2 (4.0.0): do not edit by hand \name{install_gitorious} \alias{install_gitorious} \title{Attempts to install a package directly from gitorious.} \usage{ install_gitorious(repo, project = repo, ref = "master", subdir = NULL, branch = NULL, ...) } \arguments{ \item{project}{Gitorious project name} \item{repo}{Repo name} \item{ref}{Desired git ref - defaults to \code{"master"}} \item{subdir}{subdirectory within repo that contains the R package.} \item{branch}{Deprecated. Use \code{ref} instead.} \item{...}{Other arguments passed on to \code{\link{install}}.} } \description{ This function is vectorised so you can install multiple packages in a single command. } \examples{ \dontrun{ install_gitorious("r-mpc-package") } } \seealso{ Other package installation: \code{\link{install_bitbucket}}; \code{\link{install_github}}; \code{\link{install_git}}; \code{\link{install_url}}; \code{\link{install_version}}; \code{\link{install}} }
/man/install_gitorious.Rd
no_license
BrunoVilela/devtools
R
false
false
988
rd
% Generated by roxygen2 (4.0.0): do not edit by hand \name{install_gitorious} \alias{install_gitorious} \title{Attempts to install a package directly from gitorious.} \usage{ install_gitorious(repo, project = repo, ref = "master", subdir = NULL, branch = NULL, ...) } \arguments{ \item{project}{Gitorious project name} \item{repo}{Repo name} \item{ref}{Desired git ref - defaults to \code{"master"}} \item{subdir}{subdirectory within repo that contains the R package.} \item{branch}{Deprecated. Use \code{ref} instead.} \item{...}{Other arguments passed on to \code{\link{install}}.} } \description{ This function is vectorised so you can install multiple packages in a single command. } \examples{ \dontrun{ install_gitorious("r-mpc-package") } } \seealso{ Other package installation: \code{\link{install_bitbucket}}; \code{\link{install_github}}; \code{\link{install_git}}; \code{\link{install_url}}; \code{\link{install_version}}; \code{\link{install}} }
# Multiple plot function # # ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects) # - cols: Number of columns in layout # - layout: A matrix specifying the layout. If present, 'cols' is ignored. # # If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE), # then plot 1 will go in the upper left, 2 will go in the upper right, and # 3 will go all the way across the bottom. # multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { require(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } }
/Functions_Fonctions/PlotScripts/InfoPlotsFUN/Mulitplot.r
no_license
martinjeanphd/CABIN_vv_RCBA
R
false
false
1,577
r
# Multiple plot function # # ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects) # - cols: Number of columns in layout # - layout: A matrix specifying the layout. If present, 'cols' is ignored. # # If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE), # then plot 1 will go in the upper left, 2 will go in the upper right, and # 3 will go all the way across the bottom. # multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { require(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } }
# make heat maps for paper library(ggplot2) library(reshape2) library(RColorBrewer) library(scales) # read in files OTUtables <- list.files(path='.', pattern='.csv', full.names=T) Data1_OTU_wgs_16s <- read.csv(OTUtables[1], header=T) Data2_OTU_wgs_microb <- read.csv(OTUtables[2], header=T) Data3_OTU_metabar <- read.csv(OTUtables[3], header=T) Data5_OTU_wgs_refmapped <- read.csv(OTUtables[4], header=T) # format tables - from matrix to df data1 <- melt(Data1_OTU_wgs_16s); colnames(data1) <- c('Site', 'Order', 'NumReads') data2 <- melt(Data2_OTU_wgs_microb); colnames(data2) <- c('Site', 'Order', 'NumReads') data3 <- melt(Data3_OTU_metabar); colnames(data3) <- c('Site', 'Order', 'NumReads') data5 <- melt(Data5_OTU_wgs_refmapped); colnames(data5) <- c('Site', 'Order', 'NumReads') # clean up files # data1 should not have Stamenopiles (euk) for the singlem results data1_clean <- data1[data1$Order!='Stramenopiles', ] # ---- as bar plots ---- order.cols.dat1 <- length(unique(data1_clean$Order)) ordercolors.dat1 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat1) order.cols.dat2 <- length(unique(data2$Order)) ordercolors.dat2 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat2) order.cols.dat3 <- length(unique(data3$Order)) ordercolors.dat3 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat3) order.cols.dat5 <- length(unique(data5$Order)) ordercolors.dat5 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat5) ggplot(data1_clean, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat1) + scale_y_continuous(labels=scales::comma) + ggtitle('Data1_OTU_wgs_16s') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data1_OTU_wgs_16s_barplot.jpg', height=8, width=10, units='in', dpi=600) ggplot(data2, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat2) + scale_y_continuous(labels=scales::comma) + ggtitle('Data2_OTU_wgs_microb') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data2_OTU_wgs_microb_barplot.jpg', height=8, width=10, units='in', dpi=600) ggplot(data3, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat3) + scale_y_continuous(labels=scales::comma) + ggtitle('Data3_OTU_metabar') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data3_OTU_metabar_barplot.jpg', height=8, width=10, units='in', dpi=600) ggplot(data5, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat5) + scale_y_continuous(labels=scales::comma) + ggtitle('Data5_OTU_wgs_refmapped') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data5_OTU_wgs_refmapped_barplot.jpg', height=8, width=20, units='in', dpi=600) # ---- as heat maps ---- # make 0 counts NA's so they can be plotted as white boxes in tile plots data1_clean[data1_clean == 0] <- NA data2[data2==0] <- NA data3[data3==0] <- NA data5[data5==0] <- NA ggplot(data1_clean, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data1_clean$NumReads)), na.value='white', labels=comma) + ggtitle('Data1_OTU_wgs_16s') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data1_OTU_wgs_16s_heatmap.jpg', height=8, width=10, units='in', dpi=600) ggplot(data2, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data2$NumReads)), na.value='white', labels=comma) + ggtitle('Data2_OTU_wgs_microb') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data2_OTU_wgs_microb_heatmap.jpg', height=8, width=10, units='in', dpi=600) ggplot(data3, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data3$NumReads)), na.value='white', labels=comma) + ggtitle('Data3_OTU_metabar') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data3_OTU_metabar_heatmap.jpg', height=8, width=10, units='in', dpi=600) ggplot(data5, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data5$NumReads)), na.value='white', labels=comma) + ggtitle('Data5_OTU_wgs_refmapped') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data5_OTU_wgs_refmapped_heatmap.jpg', height=15, width=10, units='in', dpi=600)
/Scripts_to_compare_dats/heatmap_plts.r
permissive
marisalim/Everest-eDNA-biodiversity-survey
R
false
false
6,315
r
# make heat maps for paper library(ggplot2) library(reshape2) library(RColorBrewer) library(scales) # read in files OTUtables <- list.files(path='.', pattern='.csv', full.names=T) Data1_OTU_wgs_16s <- read.csv(OTUtables[1], header=T) Data2_OTU_wgs_microb <- read.csv(OTUtables[2], header=T) Data3_OTU_metabar <- read.csv(OTUtables[3], header=T) Data5_OTU_wgs_refmapped <- read.csv(OTUtables[4], header=T) # format tables - from matrix to df data1 <- melt(Data1_OTU_wgs_16s); colnames(data1) <- c('Site', 'Order', 'NumReads') data2 <- melt(Data2_OTU_wgs_microb); colnames(data2) <- c('Site', 'Order', 'NumReads') data3 <- melt(Data3_OTU_metabar); colnames(data3) <- c('Site', 'Order', 'NumReads') data5 <- melt(Data5_OTU_wgs_refmapped); colnames(data5) <- c('Site', 'Order', 'NumReads') # clean up files # data1 should not have Stamenopiles (euk) for the singlem results data1_clean <- data1[data1$Order!='Stramenopiles', ] # ---- as bar plots ---- order.cols.dat1 <- length(unique(data1_clean$Order)) ordercolors.dat1 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat1) order.cols.dat2 <- length(unique(data2$Order)) ordercolors.dat2 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat2) order.cols.dat3 <- length(unique(data3$Order)) ordercolors.dat3 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat3) order.cols.dat5 <- length(unique(data5$Order)) ordercolors.dat5 <- colorRampPalette(brewer.pal(8, "Set1"))(order.cols.dat5) ggplot(data1_clean, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat1) + scale_y_continuous(labels=scales::comma) + ggtitle('Data1_OTU_wgs_16s') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data1_OTU_wgs_16s_barplot.jpg', height=8, width=10, units='in', dpi=600) ggplot(data2, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat2) + scale_y_continuous(labels=scales::comma) + ggtitle('Data2_OTU_wgs_microb') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data2_OTU_wgs_microb_barplot.jpg', height=8, width=10, units='in', dpi=600) ggplot(data3, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat3) + scale_y_continuous(labels=scales::comma) + ggtitle('Data3_OTU_metabar') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data3_OTU_metabar_barplot.jpg', height=8, width=10, units='in', dpi=600) ggplot(data5, aes(x=Site, y=NumReads, fill=factor(Order))) + geom_bar(stat="identity", colour="black") + scale_fill_manual(values=ordercolors.dat5) + scale_y_continuous(labels=scales::comma) + ggtitle('Data5_OTU_wgs_refmapped') + ylab('Number of seq reads') + xlab('Sample') + guides(fill=guide_legend(title="Order")) + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data5_OTU_wgs_refmapped_barplot.jpg', height=8, width=20, units='in', dpi=600) # ---- as heat maps ---- # make 0 counts NA's so they can be plotted as white boxes in tile plots data1_clean[data1_clean == 0] <- NA data2[data2==0] <- NA data3[data3==0] <- NA data5[data5==0] <- NA ggplot(data1_clean, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data1_clean$NumReads)), na.value='white', labels=comma) + ggtitle('Data1_OTU_wgs_16s') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data1_OTU_wgs_16s_heatmap.jpg', height=8, width=10, units='in', dpi=600) ggplot(data2, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data2$NumReads)), na.value='white', labels=comma) + ggtitle('Data2_OTU_wgs_microb') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data2_OTU_wgs_microb_heatmap.jpg', height=8, width=10, units='in', dpi=600) ggplot(data3, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data3$NumReads)), na.value='white', labels=comma) + ggtitle('Data3_OTU_metabar') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data3_OTU_metabar_heatmap.jpg', height=8, width=10, units='in', dpi=600) ggplot(data5, aes(x=Site, y=factor(Order))) + geom_tile(aes(fill=NumReads), col='grey') + scale_fill_gradientn(colors=viridis_pal()(20), limits=c(0, max(data5$NumReads)), na.value='white', labels=comma) + ggtitle('Data5_OTU_wgs_refmapped') + ylab('Taxonomic Order') + theme(axis.text.x=element_text(angle=45, hjust=1, size=10), axis.text.y=element_text(size=10), axis.title=element_text(size=15, face='bold')) ggsave('Data5_OTU_wgs_refmapped_heatmap.jpg', height=15, width=10, units='in', dpi=600)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FootprintFinder.R \docType{methods} \name{getFootprintsForGene,FootprintFinder-method} \alias{getFootprintsForGene,FootprintFinder-method} \alias{getFootprintsForGene} \title{Get Footprints for Gene} \usage{ \S4method{getFootprintsForGene}{FootprintFinder}(obj, gene, size.upstream = 1000, size.downstream = 0, biotype = "protein_coding", moleculetype = "gene") } \arguments{ \item{obj}{An object of class FootprintFinder} \item{gene}{A gene name of ID} \item{size.upstream}{An integer denoting the distance upstream of the target gene to look for footprints (default = 1000)} \item{size.downstream}{An integer denoting the distance downstream of the target gene to look for footprints (default = 0)} \item{biotype}{A type of biological unit (default="protein_coding")} \item{moleculetype}{A type of molecule (default="gene")} } \value{ A dataframe containing all footprints for the specified gene and accompanying parameters } \description{ Using the \code{\link{getGenePromoterRegion}} and \code{\link{getFootprintsInRegion}} functions in conjunction with the gtf table inside the genome database specified by the FootprintFinder object, retrieve a dataframe containing the footprints for a specified gene } \examples{ db.address <- system.file(package="TReNA", "extdata") genome.db.uri <- paste("sqlite:/",db.address,"genome.sub.db", sep = "/") project.db.uri <- paste("sqlite:/",db.address,"project.sub.db", sep = "/") fp <- FootprintFinder(genome.db.uri, project.db.uri) footprints <- getFootprintsForGene(fp, gene = "MEF2C") } \seealso{ Other FootprintFinder methods: \code{\link{FootprintFinder-class}}, \code{\link{closeDatabaseConnections,FootprintFinder-method}}, \code{\link{getChromLoc,FootprintFinder-method}}, \code{\link{getFootprintsInRegion,FootprintFinder-method}}, \code{\link{getGenePromoterRegion,FootprintFinder-method}}, \code{\link{getGtfGeneBioTypes,FootprintFinder-method}}, \code{\link{getGtfMoleculeTypes,FootprintFinder-method}}, \code{\link{getPromoterRegionsAllGenes,FootprintFinder-method}}, \code{\link{mapMotifsToTFsMergeIntoTable,FootprintFinder-method}} }
/man/getFootprintsForGene.Rd
no_license
noahmclean1/TReNA
R
false
true
2,196
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FootprintFinder.R \docType{methods} \name{getFootprintsForGene,FootprintFinder-method} \alias{getFootprintsForGene,FootprintFinder-method} \alias{getFootprintsForGene} \title{Get Footprints for Gene} \usage{ \S4method{getFootprintsForGene}{FootprintFinder}(obj, gene, size.upstream = 1000, size.downstream = 0, biotype = "protein_coding", moleculetype = "gene") } \arguments{ \item{obj}{An object of class FootprintFinder} \item{gene}{A gene name of ID} \item{size.upstream}{An integer denoting the distance upstream of the target gene to look for footprints (default = 1000)} \item{size.downstream}{An integer denoting the distance downstream of the target gene to look for footprints (default = 0)} \item{biotype}{A type of biological unit (default="protein_coding")} \item{moleculetype}{A type of molecule (default="gene")} } \value{ A dataframe containing all footprints for the specified gene and accompanying parameters } \description{ Using the \code{\link{getGenePromoterRegion}} and \code{\link{getFootprintsInRegion}} functions in conjunction with the gtf table inside the genome database specified by the FootprintFinder object, retrieve a dataframe containing the footprints for a specified gene } \examples{ db.address <- system.file(package="TReNA", "extdata") genome.db.uri <- paste("sqlite:/",db.address,"genome.sub.db", sep = "/") project.db.uri <- paste("sqlite:/",db.address,"project.sub.db", sep = "/") fp <- FootprintFinder(genome.db.uri, project.db.uri) footprints <- getFootprintsForGene(fp, gene = "MEF2C") } \seealso{ Other FootprintFinder methods: \code{\link{FootprintFinder-class}}, \code{\link{closeDatabaseConnections,FootprintFinder-method}}, \code{\link{getChromLoc,FootprintFinder-method}}, \code{\link{getFootprintsInRegion,FootprintFinder-method}}, \code{\link{getGenePromoterRegion,FootprintFinder-method}}, \code{\link{getGtfGeneBioTypes,FootprintFinder-method}}, \code{\link{getGtfMoleculeTypes,FootprintFinder-method}}, \code{\link{getPromoterRegionsAllGenes,FootprintFinder-method}}, \code{\link{mapMotifsToTFsMergeIntoTable,FootprintFinder-method}} }
# Generated by roxytest: Do not edit by hand! context("File R/setup_vars.R: @testexamples") test_that("Function setup_vars() @ L24", { x <- system.file("extdata/COLO829-replay.json.gz", package = "dracarys") (v <- setup_vars(x)) expect_equal(class(v), "list") expect_equal(length(v), 1) expect_equal(length(v[[1]]), 14) expect_equal(names(v[[1]]), c("name", "res_dir", "replay_fn", "fraglen_fn", "map_met_fn", "ploidy_est_fn", "time_met_fn", "cov_contig_fn", "cov_met_fn", "cov_finehist_fn", "vc_met_fn", "snv_fn", "sv_fn", "steps_run")) })
/tests/testthat/test-roxytest-testexamples-setup_vars.R
permissive
umccr/old_dracarys
R
false
false
566
r
# Generated by roxytest: Do not edit by hand! context("File R/setup_vars.R: @testexamples") test_that("Function setup_vars() @ L24", { x <- system.file("extdata/COLO829-replay.json.gz", package = "dracarys") (v <- setup_vars(x)) expect_equal(class(v), "list") expect_equal(length(v), 1) expect_equal(length(v[[1]]), 14) expect_equal(names(v[[1]]), c("name", "res_dir", "replay_fn", "fraglen_fn", "map_met_fn", "ploidy_est_fn", "time_met_fn", "cov_contig_fn", "cov_met_fn", "cov_finehist_fn", "vc_met_fn", "snv_fn", "sv_fn", "steps_run")) })
MAPsig1<-function(unique.pat,value.dis, iter=1000) { cat("Permutation: \n") n <- apply(value.dis, 2, sum) n.soft <- patternMatch(value.dis, unique.pat) n.strong <- patternMatch.strong(value.dis, unique.pat) n.pat <- length(unique.pat) res.random <- matrix(0, length(unique.pat), iter) res.random.strong <- matrix(0, length(unique.pat), iter) n.entity <- dim(value.dis)[2] genes <- 1:nrow(value.dis) for (l in 1:iter) { if (l %% 50 == 0) cat(l, "\n") Sgenex.random <- value.dis * 0 for (i in 1:n.entity) { Sgenex.random[sample(genes, n[i]), i] <- 1 } res.random[, l] <- patternMatch(Sgenex.random, unique.pat) res.random.strong[, l] <- patternMatch.strong(Sgenex.random, unique.pat) } rownames(res.random) <- unique.pat rownames(res.random.strong) <- unique.pat p.soft <- array(0, n.pat) p.strong <- array(0, n.pat) for (i in 1:n.pat) { p.soft[i] <- length(which(res.random[i, ] >= n.soft[i]))/iter p.strong[i] <- length(which(res.random.strong[i, ] >= n.strong[i]))/iter } res <- data.frame(p.soft, p.strong) return(res) }
/R/MAPsig1.R
no_license
cran/MAMA
R
false
false
1,233
r
MAPsig1<-function(unique.pat,value.dis, iter=1000) { cat("Permutation: \n") n <- apply(value.dis, 2, sum) n.soft <- patternMatch(value.dis, unique.pat) n.strong <- patternMatch.strong(value.dis, unique.pat) n.pat <- length(unique.pat) res.random <- matrix(0, length(unique.pat), iter) res.random.strong <- matrix(0, length(unique.pat), iter) n.entity <- dim(value.dis)[2] genes <- 1:nrow(value.dis) for (l in 1:iter) { if (l %% 50 == 0) cat(l, "\n") Sgenex.random <- value.dis * 0 for (i in 1:n.entity) { Sgenex.random[sample(genes, n[i]), i] <- 1 } res.random[, l] <- patternMatch(Sgenex.random, unique.pat) res.random.strong[, l] <- patternMatch.strong(Sgenex.random, unique.pat) } rownames(res.random) <- unique.pat rownames(res.random.strong) <- unique.pat p.soft <- array(0, n.pat) p.strong <- array(0, n.pat) for (i in 1:n.pat) { p.soft[i] <- length(which(res.random[i, ] >= n.soft[i]))/iter p.strong[i] <- length(which(res.random.strong[i, ] >= n.strong[i]))/iter } res <- data.frame(p.soft, p.strong) return(res) }
cor.BF <- function(x, y, iterations = 10000) { # Take two vectors as input: Compute the correlation coefficent and the corresponding BF # Return a named vector of length two: the Bayes factor and estimated coefficient rho library(BayesFactor) cor <- correlationBF(x, y) BF <- extractBF(cor)$bf coeff <- mean(posterior(cor, iterations = iterations, progress = FALSE)[, "rho"]) out <- c(BF, coeff) names(out) <- c("BF", "rho") return(out) } # For example: # cor.BF(rnorm(50), rnorm(50)) cor_heatmap_BF <- function(X, plot.all = FALSE, labels=NULL, low.col = "lightblue", high.col = "red", title=NULL, subtitle=NULL, digits=2, show.N=TRUE, show.BF=TRUE, legend.position = "right") { # X: a data.frame or matrix that can be passed to corr.test() # data.frame is preferred because colnames will be retained for labels # code largely taken from: # http://www.sthda.com/english/wiki/ggplot2-quick-correlation-matrix-heatmap-r-software-and-data-visualization # Load necessary libraries (will throw error if not installed): library(ggplot2) library(reshape2) # for melt() # library(psych) # for corr.test() if(!is.null(labels)) { if(length(labels) != ncol(X)) { warning(paste("Number of specified labels does not correspond to number of columns in X!", length(labels), "!=", ncol(X), "\nOriginal colnames will be retained and parameter `labels` will be ignored.")) } else { colnames(X) <- labels } } #Compute correlations: # cormat <- corr.test(X, adjust = p.adjust) cormat <- list(r = matrix(NA, ncol = ncol(X), nrow = ncol(X)), # pre-allocate bf = matrix(NA, ncol = ncol(X), nrow = ncol(X)), n = matrix(NA, ncol = ncol(X), nrow = ncol(X))) for(i in seq_along(X)) { for(j in seq_along(X)) { cor.bf <- cor.BF(as.matrix(X)[, i], as.matrix(X)[, j]) cormat[["r"]][i, j] <- cor.bf["rho"] cormat[["bf"]][i, j] <- cor.bf["BF"] cormat[["n"]][i, j] <- nrow(na.omit(X[, c(i, j)])) } } cormat$r <- round(cormat$r, digits) # Remove redundant correlations if not otherwise specified: if(!plot.all) { cormat$r[upper.tri(cormat$r, diag = TRUE)] <- NA cormat$bf[upper.tri(cormat$bf, diag = TRUE)] <- NA cormat$n[upper.tri(cormat$n, diag = TRUE)] <- NA } colnames(cormat$r) <- colnames(cormat$bf) <- colnames(cormat$n) <- colnames(X) row.names(cormat$r) <- row.names(cormat$bf) <- row.names(cormat$n) <- colnames(X) # Put together data structure: cormat.melt <- melt(cormat$r, na.rm = TRUE, value.name = "r") cormat.melt <- merge(cormat.melt, melt(cormat$bf, value.name = "bf")) cormat.melt <- merge(cormat.melt, melt(cormat$n, value.name = "n")) if(show.BF) cormat.melt$bf.label <- prettyNum(round(cormat.melt$bf, 2), big.mark = ",", scientific = FALSE) corplot <- ggplot(cormat.melt, aes(Var1, Var2, fill=r)) + geom_tile(alpha=.8) + geom_text(aes(label=format(r, nsmall=digits))) + theme_minimal() + coord_fixed() + labs(title=title, subtitle=subtitle, x=NULL, y=NULL) + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1), axis.text.y = element_text(angle = 45, vjust = 1, hjust = 1), legend.position = legend.position) + scale_fill_gradient2(low = low.col, high = high.col, mid = "white", midpoint = 0, limit = c(-1,1), space = "Lab", name="Correlation") # Conditional additions: if(show.N) corplot <- corplot + geom_text(aes(label=paste("N =", n)), size=2, nudge_y = -0.25, color="darkgray") if(show.BF) { corplot <- corplot + geom_text(aes(label="Bayes factor"), size=2, nudge_y = 0.35, color="darkgray", hjust="center") + geom_text(aes(label=bf.label), size=2, nudge_y = 0.25, color="darkgray", hjust="center") } return(corplot) }
/R_functions/cor_heatmap_BF.R
no_license
VanRijnLab/speak-smart-1
R
false
false
3,826
r
cor.BF <- function(x, y, iterations = 10000) { # Take two vectors as input: Compute the correlation coefficent and the corresponding BF # Return a named vector of length two: the Bayes factor and estimated coefficient rho library(BayesFactor) cor <- correlationBF(x, y) BF <- extractBF(cor)$bf coeff <- mean(posterior(cor, iterations = iterations, progress = FALSE)[, "rho"]) out <- c(BF, coeff) names(out) <- c("BF", "rho") return(out) } # For example: # cor.BF(rnorm(50), rnorm(50)) cor_heatmap_BF <- function(X, plot.all = FALSE, labels=NULL, low.col = "lightblue", high.col = "red", title=NULL, subtitle=NULL, digits=2, show.N=TRUE, show.BF=TRUE, legend.position = "right") { # X: a data.frame or matrix that can be passed to corr.test() # data.frame is preferred because colnames will be retained for labels # code largely taken from: # http://www.sthda.com/english/wiki/ggplot2-quick-correlation-matrix-heatmap-r-software-and-data-visualization # Load necessary libraries (will throw error if not installed): library(ggplot2) library(reshape2) # for melt() # library(psych) # for corr.test() if(!is.null(labels)) { if(length(labels) != ncol(X)) { warning(paste("Number of specified labels does not correspond to number of columns in X!", length(labels), "!=", ncol(X), "\nOriginal colnames will be retained and parameter `labels` will be ignored.")) } else { colnames(X) <- labels } } #Compute correlations: # cormat <- corr.test(X, adjust = p.adjust) cormat <- list(r = matrix(NA, ncol = ncol(X), nrow = ncol(X)), # pre-allocate bf = matrix(NA, ncol = ncol(X), nrow = ncol(X)), n = matrix(NA, ncol = ncol(X), nrow = ncol(X))) for(i in seq_along(X)) { for(j in seq_along(X)) { cor.bf <- cor.BF(as.matrix(X)[, i], as.matrix(X)[, j]) cormat[["r"]][i, j] <- cor.bf["rho"] cormat[["bf"]][i, j] <- cor.bf["BF"] cormat[["n"]][i, j] <- nrow(na.omit(X[, c(i, j)])) } } cormat$r <- round(cormat$r, digits) # Remove redundant correlations if not otherwise specified: if(!plot.all) { cormat$r[upper.tri(cormat$r, diag = TRUE)] <- NA cormat$bf[upper.tri(cormat$bf, diag = TRUE)] <- NA cormat$n[upper.tri(cormat$n, diag = TRUE)] <- NA } colnames(cormat$r) <- colnames(cormat$bf) <- colnames(cormat$n) <- colnames(X) row.names(cormat$r) <- row.names(cormat$bf) <- row.names(cormat$n) <- colnames(X) # Put together data structure: cormat.melt <- melt(cormat$r, na.rm = TRUE, value.name = "r") cormat.melt <- merge(cormat.melt, melt(cormat$bf, value.name = "bf")) cormat.melt <- merge(cormat.melt, melt(cormat$n, value.name = "n")) if(show.BF) cormat.melt$bf.label <- prettyNum(round(cormat.melt$bf, 2), big.mark = ",", scientific = FALSE) corplot <- ggplot(cormat.melt, aes(Var1, Var2, fill=r)) + geom_tile(alpha=.8) + geom_text(aes(label=format(r, nsmall=digits))) + theme_minimal() + coord_fixed() + labs(title=title, subtitle=subtitle, x=NULL, y=NULL) + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1), axis.text.y = element_text(angle = 45, vjust = 1, hjust = 1), legend.position = legend.position) + scale_fill_gradient2(low = low.col, high = high.col, mid = "white", midpoint = 0, limit = c(-1,1), space = "Lab", name="Correlation") # Conditional additions: if(show.N) corplot <- corplot + geom_text(aes(label=paste("N =", n)), size=2, nudge_y = -0.25, color="darkgray") if(show.BF) { corplot <- corplot + geom_text(aes(label="Bayes factor"), size=2, nudge_y = 0.35, color="darkgray", hjust="center") + geom_text(aes(label=bf.label), size=2, nudge_y = 0.25, color="darkgray", hjust="center") } return(corplot) }
#' @import data.table aggregateByDocAndRegID <- function(frequencies){ #Step to avoid CRAN check error with data.table selection docID <- regID <- word <- NULL agg <- frequencies[, lapply(.SD,sum), by = list(docID, regID, word)] return(agg[,c("docID", "regID", "word", "NormalizedScore", "NormalizedFrequencyPerRegID")]) } #' @import data.table aggregateByRegID <- function(frequencies){ #Step to avoid CRAN check error with data.table selection regID <- word <- NULL frequencies <- frequencies[, c("regID", "word", "NormalizedFrequencyPerRegID")] agg <- frequencies[, lapply(.SD,sum), by = list(regID, word)] agg$NormalizedScorePerRegID <- as.numeric(agg$NormalizedFrequencyPerRegID) agg$NormalizedScorePerRegID <- as.numeric(agg$NormalizedScorePerRegID) return(agg[, c("regID", "word", "NormalizedFrequencyPerRegID")]) } #' @import Matrix toSparseMatrix <- function(frequencies){ row <- factor(frequencies$regID) col <- factor(frequencies$word) regIDNormalizedDfm = Matrix::sparseMatrix(i = as.integer(row), j = as.integer(col), x = as.vector(frequencies$NormalizedFrequencyPerRegID)) rownames(regIDNormalizedDfm) <- levels(row) colnames(regIDNormalizedDfm) <- levels(col) return(regIDNormalizedDfm) } addSentimentScores <- function(frequencies, scores) { frequencies$SentimentScores <- frequencies$NormalizedFrequencyPerRegID*scores$score[match(frequencies$word, scores$word)] return(frequencies) } #' @import data.table computeSentimentByRegID <- function(frequencies, scores) { frequencies <- frequencies[, c("regID", "SentimentScores")] regID <- NULL agg <- frequencies[, lapply(.SD,sum, na.rm = TRUE), by = list(regID)] agg$SentimentScores <- as.numeric(agg$SentimentScores) return(agg[, c("regID", "SentimentScores")]) }
/R/utils.R
no_license
josepedro/GWP
R
false
false
1,884
r
#' @import data.table aggregateByDocAndRegID <- function(frequencies){ #Step to avoid CRAN check error with data.table selection docID <- regID <- word <- NULL agg <- frequencies[, lapply(.SD,sum), by = list(docID, regID, word)] return(agg[,c("docID", "regID", "word", "NormalizedScore", "NormalizedFrequencyPerRegID")]) } #' @import data.table aggregateByRegID <- function(frequencies){ #Step to avoid CRAN check error with data.table selection regID <- word <- NULL frequencies <- frequencies[, c("regID", "word", "NormalizedFrequencyPerRegID")] agg <- frequencies[, lapply(.SD,sum), by = list(regID, word)] agg$NormalizedScorePerRegID <- as.numeric(agg$NormalizedFrequencyPerRegID) agg$NormalizedScorePerRegID <- as.numeric(agg$NormalizedScorePerRegID) return(agg[, c("regID", "word", "NormalizedFrequencyPerRegID")]) } #' @import Matrix toSparseMatrix <- function(frequencies){ row <- factor(frequencies$regID) col <- factor(frequencies$word) regIDNormalizedDfm = Matrix::sparseMatrix(i = as.integer(row), j = as.integer(col), x = as.vector(frequencies$NormalizedFrequencyPerRegID)) rownames(regIDNormalizedDfm) <- levels(row) colnames(regIDNormalizedDfm) <- levels(col) return(regIDNormalizedDfm) } addSentimentScores <- function(frequencies, scores) { frequencies$SentimentScores <- frequencies$NormalizedFrequencyPerRegID*scores$score[match(frequencies$word, scores$word)] return(frequencies) } #' @import data.table computeSentimentByRegID <- function(frequencies, scores) { frequencies <- frequencies[, c("regID", "SentimentScores")] regID <- NULL agg <- frequencies[, lapply(.SD,sum, na.rm = TRUE), by = list(regID)] agg$SentimentScores <- as.numeric(agg$SentimentScores) return(agg[, c("regID", "SentimentScores")]) }
#' Greates common divisor #' #' Gets greatest common divisor of two numbers. #' @param a,b numbers. #' @return A number that is the greatest common divisor of \code{a} & \code{b}. #' @keywords gcd #' @export #' @examples #' greatest_common_div(27,30) greatest_common_div <- function(a,b) { while (b != 0) { # euclidean algorithm temp <- b b <- a %% b a <- temp } return(a) }
/R/greatest_common_div.R
no_license
erwinrmendez/EulerFunctions
R
false
false
413
r
#' Greates common divisor #' #' Gets greatest common divisor of two numbers. #' @param a,b numbers. #' @return A number that is the greatest common divisor of \code{a} & \code{b}. #' @keywords gcd #' @export #' @examples #' greatest_common_div(27,30) greatest_common_div <- function(a,b) { while (b != 0) { # euclidean algorithm temp <- b b <- a %% b a <- temp } return(a) }
# SPI and SPEI plots using nClimDiv data # MAC 12/7/19 # # To do: latest date update in selector, plotly heatmap colors don't match, plotly legend names # load shiny libraries library(shiny) library(shinythemes) # load code libraries library(reshape2) library(RCurl) #library(maps) library(raster) #library(ggplot2) library(cowplot) library(tidyverse) #library(zoo) #library(maptools) library(SPEI) library(weathermetrics) #library(metR) library(scales) library(magick) library(plotly) # load datasets # ---- Functions ---- # capitalize county names CapStr <- function(y) { c <- strsplit(y, " ")[[1]] paste(toupper(substring(c, 1,1)), substring(c, 2), sep="", collapse=" ") } # custom date picker dateRangeInput2 <- function(inputId, label, minview = "days", maxview = "decades", ...) { d <- shiny::dateRangeInput(inputId, label, ...) d$children[[2L]]$children[[1]]$attribs[["data-date-min-view-mode"]] <- minview d$children[[2L]]$children[[3]]$attribs[["data-date-min-view-mode"]] <- minview d$children[[2L]]$children[[1]]$attribs[["data-date-max-view-mode"]] <- maxview d$children[[2L]]$children[[3]]$attribs[["data-date-max-view-mode"]] <- maxview d } # add/subtracting months add.months= function(date,n) seq(date, by = paste (n, "months"), length = 2)[2] # ------ # load supporting data from Rdata file generateSupportingData.R load("nClimDivApp_Data.RData") # date list for picker - advances on 10th of month sysDay<-as.numeric(format(Sys.Date(),"%d")) if(sysDay<=9){ date.list<-seq(as.Date("1895/1/1"), add.months(Sys.Date(),-2), "months") }else{ date.list<-seq(as.Date("1895/1/1"), add.months(Sys.Date(),-1), "months") } latest.date<-max(date.list) # ---- # UI code ui <- fluidPage(theme=shinytheme('sandstone'), tags$head(HTML( "<!-- Global site tag (gtag.js) - Google Analytics --> <script async src='https://www.googletagmanager.com/gtag/js?id=UA-155656786-1'></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-155656786-1'); </script>")), titlePanel(strong("Standardized Drought Index Visualization Tool")), sidebarLayout( sidebarPanel( radioButtons("selectOpt", "Select spatial scale", list("Climate Division"='dv', "County"='cy',"State"='st',"Region"='rg'), selected = 'dv'), conditionalPanel( condition = "input.selectOpt=='dv'", selectizeInput( inputId = "sel_dv", label = "Select a division", choices = cdiv.list, selected=cdiv.list[27] )), conditionalPanel( condition = "input.selectOpt=='cy'", selectizeInput( inputId = "sel_cy", label = "Select a county", choices = county.list, selected = county.list[104] )), conditionalPanel( condition = "input.selectOpt=='st'", selectizeInput( inputId = "sel_st", label = "Select a state", choices = state.list, selected = state.list[2] )), conditionalPanel( condition = "input.selectOpt=='rg'", selectizeInput( inputId = "sel_rg", label = "Select a region", choices = region.list, selected = region.list[[1]][1] )), dateRangeInput2("dateRangeMY", "Select date range for plot (1895-present)", startview = "year", minview = "months", maxview = "decades", start = "1981-01-01", end = latest.date, min = min(date.list), max = latest.date), sliderInput("maxTimescale", "Max timescale to display (y-axis):", min = 3, max = 60, value = 60), radioButtons("petOpt", "Select potential evapotranspiration (PET) estimation method for SPEI", list("Hargreaves (recommended)"='harg', "Thornthwaite"='thornW'), selected = 'harg'), actionButton("updateAll","Update"), hr(), HTML('<div style="text-align: center;">Contact Mike Crimmins (<a href="mailto:crimmins@email.arizona.edu">crimmins@email.arizona.edu</a>) with questions or comments. SDI Viz Tool v1.0 01/07/20</div>' ) ), mainPanel( tabsetPanel( tabPanel("SPI", #img(src="spiPlot.png", width="50%", align="left"), plotOutput("spiImage", width = "100%"), tags$head(tags$style(type="text/css", "#spiImage img { border: 1; max-width: 100%; } element.style { width: 33.33%; }"))), tabPanel("SPEI", #img(src="spiPlot.png", width="50%", align="left"), plotOutput("speiImage", width = "100%"), tags$head(tags$style(type="text/css", "#speiImage img { border: 1; max-width: 100%; } element.style { width: 33.33%; }"))), tabPanel("Interactive SPI & SPEI", br(), p("Hover cursor over plots to interrogate values. Use plot image controls to zoom in/out, pan, reset axes, and download snapshot."), br(), plotlyOutput('SPIPlotly', width = "auto"), br(), plotlyOutput('SPEIPlotly', width = "auto"), br(), p("This plot shows the difference between SPI and SPEI values for each month and timescale. Purple colors indicate when SPI values were more positive than SPEI and orange colors vice versa. For example, a difference value of 1 could indicate that the SPEI (-1) is more negative than the SPI (0) reflecting more intense drought conditions."), plotlyOutput('diffPlotly', width = "auto") ), tabPanel("Explore Monthly Data", br(), p("COMING SOON -- All of the monthly nClimDiv data used in the calculation of the SPI and SPEI plots are displayed in the plots this page. The first plot shows the long-term (1895-present) monthly averages of the various climate variables used in the calculation of the drought indices. The monthly averages can depict seasonality in temperature, precipitation, and potential evapotranspiration (PET) that can aid in the interpretation of different drought index timescales."), br(), #plotlyOutput('climoPlotly', width = "auto", height = "400px"), br(), p("Click and drag a box on any part of a time series to zoom in on a specific period. Double click to restore the plot to the full time period."), br() #plotlyOutput('moClimPlotly', width = "auto",height = "800px") ), tabPanel("About", tags$div( HTML("<html> <head> <meta content='text/html; charset=ISO-8859-1' http-equiv='content-type'> <title>SDI Viz Tool Info</title> </head> <body> <h2 style='font-family: Helvetica,Arial,sans-serif;'>About the Standardized Drought Index Visualization Tool</h2> <span style='font-family: Helvetica,Arial,sans-serif;'>The Standardized Drought Index Visualization Tool (SDI Viz Tool) was developed to be able to quickly generate and customize multiscale Standardized Precipitation Index (SPI) and Standardized Precipitation Evapotranspiration Index (SPEI) plots. These plots portray all SPI and SPEI timescales, allowing for the visualization of both short and long-term droughts all at once and through time. More information on how to interpret the plots can be found <a target='_blank' href='https://cals.arizona.edu/climate/misc/spi/spicontour.png'>here</a>. More information on the SPI and SPEI can be found at the <a target='_blank' href='https://wrcc.dri.edu/wwdt/about.php'>Westwide Drought Tracker</a>. <br> <br> The data used in the creation of the plots is the <a target='_blank' href='https://www.ncdc.noaa.gov/monitoring-references/maps/us-climate-divisions.php'>NOAA U.S. Climate Divisional Dataset (nClimDiv)</a>. This dataset is updated through the end of the previous month, usually by the 10th of the current month. Maps of the climate divisions and special regions can be found <a target='_blank' href='https://www.ncdc.noaa.gov/monitoring-references/'>here</a>. &nbsp; <br> </span><br style='font-family: Helvetica,Arial,sans-serif;'> <span style='font-family: Helvetica,Arial,sans-serif;'>The SPI and SPEI values were calculated on the full period of record using the <a href='https://cran.r-project.org/web/packages/SPEI/index.html' target='_blank'>R SPEI package</a>. SDI Viz Tool code can be found at </span><a style='font-family: Helvetica,Arial,sans-serif;' href='https://github.com/mcrimmins/SDIVizTool'>https://github.com/mcrimmins/SDIVizTool</a>. <br> <br> <div style='text-align: center;'><em style='font-family: &quot;Helvetica Neue&quot;,Helvetica,Arial,sans-serif; font-size: 14px; letter-spacing: normal; orphans: 2; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; background-color: rgb(255, 255, 255); font-weight: bold; color: black;'><a href='http://cals.arizona.edu/climate/' style='background-color: transparent; text-decoration: none;'>Climate Science Applications Program - University of Arizona Cooperative Extension</a></em><br> <br> <img style='width: 400px; height: 71px;' alt='logo' src='UA_CSAP_CLIMAS_logos_horiz.png'><br> <br> <span style='color: rgb(51, 51, 51); font-family: &quot;Helvetica Neue&quot;,Helvetica,Arial,sans-serif; font-size: 12px; font-style: normal; font-weight: 500; letter-spacing: normal; orphans: 2; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; background-color: rgb(255, 255, 255); display: inline ! important; float: none;'>© 2020 The Arizona Board of Regents. All contents copyrighted. All rights reserved.</span><br style='font-family: Helvetica,Arial,sans-serif;'> </div> <span style='font-family: Helvetica,Arial,sans-serif;'></span><br> <br> <br> </body> </html>" ) )) ) ) ) ) # Define server logic required to draw a histogram server <- function(input, output) { # ---- Get nClimDiv data ---- # get county, div and state data ---- dataSets<-c("climdiv-pcpncy-v", "climdiv-pcpndv-v", "climdiv-pcpnst-v", "climdiv-tmincy-v", "climdiv-tmindv-v", "climdiv-tminst-v", "climdiv-tmaxcy-v", "climdiv-tmaxdv-v", "climdiv-tmaxst-v") # ----- # container for all data datalist = list() # get directory listing and find most recent prcp file url <- 'ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/' # see if FTP is working tryCatch(getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE, verbose=TRUE), error=function(e) { err <<- conditionMessage(e) }) # if(exists("err")==TRUE){ showModal(modalDialog(title="DOWNLOAD ERROR","NOAA ftp data server not responding - please try again later (notify Mike Crimmins at crimmins@email.arizona.edu if problem persists)", footer=NULL)) } # proceed filenames = getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE, verbose=TRUE) filelist<-unlist(strsplit(filenames,"\n")) showModal(modalDialog(title="Please wait","Downloading data...this can take several minutes.", footer=NULL)) # loop through dataset for(i in 1:length(dataSets)){ # download files and format into list tempName<-filelist[which((grepl(dataSets[i], filelist)) == TRUE)] url<-paste0("ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/",tempName) tempData<-read.table(url, colClasses = c("character","numeric", "numeric","numeric","numeric","numeric","numeric", "numeric","numeric","numeric","numeric","numeric", "numeric")) colnames(tempData)<-c("code",1:12) tempData$var<-dataSets[i] # add to container datalist[[i]] <- tempData print(paste0("Downloading ",dataSets[i])) } # combine into dataframe allData = do.call(rbind, datalist) rm(datalist) # ---- # update max.date # maxYr<-as.numeric(substr(allData[nrow(allData),1],nchar(allData[nrow(allData),1])-3,nchar(allData[nrow(allData),1]))) # if(length(which(allData[nrow(allData),]==-99.9))==0){ # mm<-12 # }else{ # mm<-which(allData[nrow(allData),]==-99.9)-2 # } # latest.date<-as.Date(paste0(maxYr,"-",mm,"-01")) # end of wait message removeModal() # ---- DROP IN PLOTTING CODE observeEvent(input$updateAll,ignoreNULL=FALSE,{ showModal(modalDialog("Updating and generating plot", footer=NULL)) # ---- subset and wrangle data ---- # county, division or state? DEAL WITH SPECIAL REGIONS!! typePET<-input$petOpt # thornW or harg # selections region<-input$selectOpt # cy,st, dv, rg state <-input$sel_st cdiv <- input$sel_dv county<- input$sel_cy specReg<-input$sel_rg # rg is not a string on climdiv filenames...create new var with ifelse for rg regChr<-region regChr<-ifelse(regChr=="rg", "st", regChr) # region subset tempData<-allData[which(grepl(regChr, allData$var)==TRUE),] # parse code col if(region=="cy"){ # get codes geoCode<-strsplit(county,"-") stCode<- stateCodes[which(stateCodes$name==((geoCode[[1]][1]))),1] cyFIPS<-as.character(county.fips[which(county.fips$polyname==paste0(tolower(geoCode[[1]][1]),",",tolower(geoCode[[1]][2]))),1]) cyFIPS<-as.numeric(ifelse(nchar(cyFIPS)==4, substr(cyFIPS,2,4), substr(cyFIPS,3,5))) # parse into columns tempData$state<-(as.numeric(substr(tempData$code, 1,2))) tempData$div<-(as.numeric(substr(tempData$code, 3,5))) tempData$element<-(as.numeric(substr(tempData$code, 6,7))) tempData$year<-(as.numeric(substr(tempData$code, 8,11))) tempData<-subset(tempData, state==stCode & div==cyFIPS) # centroid subArea<-subset(countiesPoly,NAME_2==geoCode[[1]][2] & NAME_1==geoCode[[1]][1]) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-paste0(geoCode[[1]][2]," County,",geoCode[[1]][1]) }else if (region=="st"){ # get codes stCode<- stateCodes[which(stateCodes$name==state),1] # parse into cols tempData$state<-(as.numeric(substr(tempData$code, 1,3))) tempData$div<-(as.numeric(substr(tempData$code, 4,4))) tempData$element<-(as.numeric(substr(tempData$code, 5,6))) tempData$year<-(as.numeric(substr(tempData$code, 7,10))) tempData<-subset(tempData, state==stCode & div==0) # centroid subArea<-subset(statesPoly, NAME_1==(state)) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-paste0((state)) }else if (region=="dv") { # get codes geoCode1<-strsplit(cdiv,"-") geoCode2<-strsplit(geoCode1[[1]][2],",") stCode<- stateCodes[which(stateCodes$name==((geoCode1[[1]][1]))),1] cdiv<- as.numeric(geoCode2[[1]][1]) # parse into cols tempData$state<-(as.numeric(substr(tempData$code, 1,2))) tempData$div<-(as.numeric(substr(tempData$code, 3,4))) tempData$element<-(as.numeric(substr(tempData$code, 5,6))) tempData$year<-(as.numeric(substr(tempData$code, 7,10))) tempData<-subset(tempData, state==stCode & div==cdiv) # centroid subArea<-subset(cdivPoly, STATE==geoCode1[[1]][1] & CD_NEW==cdiv) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-paste0(geoCode1[[1]][1]," Climate Division ", cdiv) }else{ ### REGION # get codes stCode<- regionCodes$code[which(regionCodes$name==specReg)] # parse into cols tempData$state<-(as.numeric(substr(tempData$code, 1,3))) tempData$div<-(as.numeric(substr(tempData$code, 4,4))) tempData$element<-(as.numeric(substr(tempData$code, 5,6))) tempData$year<-(as.numeric(substr(tempData$code, 7,10))) tempData<-subset(tempData, state==stCode & div==0) # centroid subArea<-subset(combinedRegions, region==(specReg)) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-CapStr(gsub("-", " ", specReg)) ### REGION FIX } # melt data tempDataMelt<-melt(tempData, id.vars=c(14,18), measure.vars = 2:13) #tempDataMelt$date <- as.yearmon(paste(tempDataMelt$year, as.numeric(tempDataMelt$variable), sep = "-")) tempDataMelt$date <- as.Date(paste0(tempDataMelt$year,"-",as.numeric(tempDataMelt$variable),"-01"), format="%Y-%m-%d") tempDataMelt<-spread(tempDataMelt, var, value) # sort, -999 to NA tempDataMelt[tempDataMelt == -9.99] <- NA tempDataMelt[tempDataMelt == -99.9] <- NA # trim to 2018 #allDataSubset<-allDataSubset[-(which(allDataSubset$year==2019)),] # standard colnames colnames(tempDataMelt)[4:6]<-c("precip","tmax","tmin") # calc tmean tempDataMelt$tmean<-(tempDataMelt$tmax+tempDataMelt$tmin)/2 # ---- # inset map ---- fix MI boundary ---- insetmap<-ggplot() + geom_polygon(data = states4map, aes(x = long, y = lat, group = group), fill="lightgrey", color="grey",size=0.15) + # get the state border back on top geom_polygon(data = subArea, aes(x = long, y = lat, group = group), fill="orange", color="red", size=0.15) + # get the state border back on top coord_fixed(xlim=c(-125, -68), ylim = c(25,50), ratio = 1)+ #coord_fixed(xlim=c(out$meta$ll[1]-3.5, out$meta$ll[1]+3.5), ylim=c(out$meta$ll[2]-3.5, out$meta$ll[2]+3.5), ratio = 1) + #geom_point(data = point, aes(x = V1, y = V2), size=1, color='red')+ theme_bw(base_size=5)+ theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank())+ theme(plot.margin = unit(c(0, 0, 0, 0), "pt"))+ theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank()) # calculate indices ---- ## Loop thru full SPI set dfSPI<-tempDataMelt[,1:3] for(i in 1:60){ tempSPI <- spi(tempDataMelt$precip,i, na.rm = TRUE) dfSPI[[paste('SPI-',i,sep="")]] <-tempSPI$fitted } # remove rows with NAs dfSPI<-na.omit(dfSPI) #indexName="Standardized Precipitation Index" #indexNameShort="SPI" # # SPEI switch? if (typePET=="thornW"){ PET <- thornthwaite(fahrenheit.to.celsius(tempDataMelt$tmean,round=2), as.numeric(centroid[2]), na.rm = TRUE) }else{ PET <- hargreaves(fahrenheit.to.celsius(tempDataMelt$tmin,round=2),fahrenheit.to.celsius(tempDataMelt$tmax,round=2),Ra=NA, as.numeric(centroid[2]), na.rm = TRUE) } dfSPEI<-tempDataMelt[,1:3] for(i in 1:60){ tempSPI <- spei(inches_to_metric(tempDataMelt$precip,unit="mm",round=2)-PET,i, na.rm = TRUE) dfSPEI[[paste('SPEI-',i,sep="")]] <-tempSPI$fitted } # remove rows with NAs dfSPEI<-na.omit(dfSPEI) #indexName="Standardized Precipitation-Evapotranspiration Index" #indexNameShort="SPEI" # monthly anomalies - https://www.r-bloggers.com/visualize-monthly-precipitation-anomalies/ tempDataMelt$PET<-PET/25.4 tempDataMelt$P_PET<-tempDataMelt$precip-tempDataMelt$PET moAvg <- tempDataMelt %>% group_by(variable) %>% summarise(moAvgPrecip = mean(precip, na.rm=TRUE), moAvgTemp = mean(tmean, na.rm=TRUE), moAvgPET = mean(PET, na.rm=TRUE), moAvgP_PET = mean(P_PET, na.rm=TRUE)) moAvg[,2:5] <-round(moAvg[,2:5],2) tempDataMelt <- left_join(tempDataMelt, moAvg, by = "variable") tempDataMelt$precipAnom <- tempDataMelt$precip-tempDataMelt$moAvgPrecip tempDataMelt$tempAnom <- tempDataMelt$tmean-tempDataMelt$moAvgTemp tempDataMelt$PETAnom <- tempDataMelt$PET-tempDataMelt$moAvgPET tempDataMelt$P_PETAnom <- tempDataMelt$P_PET-tempDataMelt$moAvgP_PET # anom sign tempDataMelt$pAnomSign<-ifelse(tempDataMelt$precipAnom > 0, "pos", "neg") tempDataMelt$petAnomSign<-ifelse(tempDataMelt$P_PETAnom > 0, "pos", "neg") tempDataMelt$TAnomSign<-ifelse(tempDataMelt$tempAnom > 0, "pos", "neg") # round values tempDataMelt[,8:17] <-round(tempDataMelt[,8:17],2) # plot variables ---- # date range dateRange<-input$dateRangeMY date1<-dateRange[1]# by month date2<-dateRange[2] # by month maxScale<-input$maxTimescale+1# max 60 # SPI contour plot ---- dfSPI<-melt(dfSPI, id.vars=c(1:3), measure.vars = 4:63) dfSPI$value<-as.numeric(dfSPI$value) colnames(dfSPI)[2]<-"month" # current heat map currDfSPI<-dfSPI[which(dfSPI$date==date2),] # plot pCurr<- ggplot(currDfSPI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=1)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = FALSE, limits=c(-3, 3), oob=squish)+ theme_bw()+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ scale_x_date(labels = date_format("%b%Y"), expand=c(0,0))+ theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank(), axis.title.x=element_blank())+ labs(title=" ")+ theme(plot.margin = unit(c(5, 5, 0, 0), "pt")) # main plot p1<- ggplot(dfSPI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=31)+ #scale_fill_gradient2(low = "brown", high = "green",mid = "white", # na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_x_date(labels = date_format("%Y-%m"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),as.Date(date2)))+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ theme_bw()+ theme(legend.position="left")+ theme(plot.title = element_text(face="bold"), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+ guides(fill = guide_colourbar(barwidth = 1, barheight = 10))+ ylab("Timescale (mos)")+ labs(title=paste0(titleName," Standardized Precipitation Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"))+ theme(plot.margin = unit(c(5, 0, 0, 0), "pt")) # precip anoms p2<- ggplot(tempDataMelt,aes(date, precipAnom, fill = pAnomSign)) + geom_bar(stat = "identity", show.legend = FALSE) + #scale_y_continuous(breaks = seq(-100, 100, 20)) + scale_fill_manual(values = c("orange4", "darkgreen"))+ scale_x_date(labels = date_format("%Y"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),add.months(as.Date(date2),1)))+ ylab("Precip Anom (in.)")+ xlab("Month-Year")+ theme_bw()+ theme(axis.text.x = element_text(angle = 45, hjust = 1))+ theme(plot.margin = unit(c(0, 0, 0, 0), "pt"))+ theme(panel.grid.minor = element_blank()) # # trying to get alignments # mainP<-plot_grid(p1, p2, ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) # sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) # plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) # # another solution # plot_grid(p1,pCurr,p2,NULL, ncol = 2, nrow = 2, align = 'v',axis = 'b', rel_heights = c(10,10,1,1), # rel_widths = c(20,1,20,1)) # plotting grid using align mainCurr <- align_plots(p1, pCurr, align = 'h', axis = 'l') mainPrec <- align_plots(p1, p2, align = 'v', axis = 'b') mainP<-plot_grid(mainCurr[[1]], mainPrec[[2]], ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) #plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) spiPlot<-plot_grid(mainP, sideP, nrow = 1, rel_widths = c(20,1)) # add inset map spiPlot<-ggdraw(spiPlot)+draw_plot(insetmap, -0.315, 0.40, scale=0.14) # add margin spiPlot = spiPlot + theme(plot.margin = unit(c(0.25, 0.25, 0.7, 0.25), "in")) # add caption captionString <- c( "Data from NOAA-NCEI", "ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/", paste0("Plot created: ", format(Sys.Date(), "%m-%d-%Y")), "The University of Arizona", "https://cals.arizona.edu/climate/") spiPlot<-ggdraw(spiPlot) + draw_text(captionString, x =0.125, y = c(0.0625,0.0500,0.0375,0.0250,0.0125), hjust = 0,vjust=-0.25, size=8) # write high res to file ---- png("spiPlot.png", width = 11, height = 8.5, units = "in", res = 300L) #grid.newpage() print(spiPlot, newpage = FALSE) dev.off() # add logos # Call back the plot plot <- image_read("spiPlot.png") # And bring in a logo #logo_raw <- image_read("./logos/UA_CLIMAS_logos.png") logo_raw <- image_read("UA_CSAP_CLIMAS_logos_horiz.png") logo <- image_resize(logo_raw, geometry_size_percent(width=95,height = 95)) # Stack them on top of each other #final_plot <- image_append((c(plot, logo)), stack = TRUE) #final_plot <- image_mosaic((c(plot, logo))) final_plot <- image_composite(plot, logo, offset = "+2235+2365") # And overwrite the plot without a logo image_write(final_plot, "spiPlot.png") # ---- # send image file output$spiImage <- renderImage({ # When input$n is 3, filename is ./images/image3.jpeg filename <- "spiPlot.png" # Return a list containing the filename and alt text list(src = filename, alt = "SPIPlot") }, deleteFile = FALSE) # PLOTLY SPI HEATMAP ---- output$SPIPlotly <- renderPlotly({ plot_ly(dfSPI, x = ~date, y = ~variable, z = ~value, colors=colorRamp(c("orange3","orange","yellow","white","green","green2","darkgreen")), type = "heatmap", zmin=-3, zmax=3) %>% layout(title = paste0(titleName," Standardized Precipitation Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"), xaxis=list(title="Month-Year", range = c(as.Date(date1),as.Date(date2))), yaxis=list(title="Timescale (mos)", range = c(0,maxScale)) ) }) # ---- # SPEI contour plot ---- dfSPEI<-melt(dfSPEI, id.vars=c(1:3), measure.vars = 4:63) dfSPEI$value<-as.numeric(dfSPEI$value) colnames(dfSPEI)[2]<-"month" # current heat map currDfSPEI<-dfSPEI[which(dfSPEI$date==date2),] # plot pCurr<- ggplot(currDfSPEI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=1)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = FALSE, limits=c(-3, 3), oob=squish)+ theme_bw()+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ scale_x_date(labels = date_format("%b%Y"), expand=c(0,0))+ theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank(), axis.title.x=element_blank())+ labs(title=" ")+ theme(plot.margin = unit(c(5, 5, 0, 0), "pt")) # main plot p1<- ggplot(dfSPEI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=31)+ #scale_fill_gradient2(low = "brown", high = "green",mid = "white", # na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_x_date(labels = date_format("%Y-%m"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),as.Date(date2)))+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ theme_bw()+ theme(legend.position="left")+ theme(plot.title = element_text(face="bold"), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+ guides(fill = guide_colourbar(barwidth = 1, barheight = 10))+ ylab("Timescale (mos)")+ labs(title=paste0(titleName," Standardized Precipitation-Evapotranspiration Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"))+ theme(plot.margin = unit(c(5, 0, 0, 0), "pt")) # precip anoms p2<- ggplot(tempDataMelt,aes(date, P_PETAnom, fill = petAnomSign)) + geom_bar(stat = "identity", show.legend = FALSE) + #scale_y_continuous(breaks = seq(-100, 100, 20)) + scale_fill_manual(values = c("orange4", "darkgreen"))+ scale_x_date(labels = date_format("%Y"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),add.months(as.Date(date2),1)))+ ylab("P-PET Anom (in.)")+ xlab("Month-Year")+ theme_bw()+ theme(axis.text.x = element_text(angle = 45, hjust = 1))+ theme(plot.margin = unit(c(0, 0, 0, 0), "pt"))+ theme(panel.grid.minor = element_blank()) # # trying to get alignments # mainP<-plot_grid(p1, p2, ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) # sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) # plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) # # another solution # plot_grid(p1,pCurr,p2,NULL, ncol = 2, nrow = 2, align = 'v',axis = 'b', rel_heights = c(10,10,1,1), # rel_widths = c(20,1,20,1)) # plotting grid using align mainCurr <- align_plots(p1, pCurr, align = 'h', axis = 'l') mainPrec <- align_plots(p1, p2, align = 'v', axis = 'b') mainP<-plot_grid(mainCurr[[1]], mainPrec[[2]], ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) #plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) spiPlot<-plot_grid(mainP, sideP, nrow = 1, rel_widths = c(20,1)) # add inset map spiPlot<-ggdraw(spiPlot)+draw_plot(insetmap, -0.315, 0.40, scale=0.14) # add margin spiPlot = spiPlot + theme(plot.margin = unit(c(0.25, 0.25, 0.7, 0.25), "in")) # add caption captionString <- c( "Data from NOAA-NCEI", "ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/", paste0("Plot created: ", format(Sys.Date(), "%m-%d-%Y")), "The University of Arizona", "https://cals.arizona.edu/climate/") spiPlot<-ggdraw(spiPlot) + draw_text(captionString, x =0.125, y = c(0.0625,0.0500,0.0375,0.0250,0.0125), hjust = 0,vjust=-0.25, size=8) # write high res to file ---- png("speiPlot.png", width = 11, height = 8.5, units = "in", res = 300L) #grid.newpage() print(spiPlot, newpage = FALSE) dev.off() # add logos # Call back the plot plot <- image_read("speiPlot.png") # And bring in a logo #logo_raw <- image_read("./logos/UA_CLIMAS_logos.png") logo_raw <- image_read("UA_CSAP_CLIMAS_logos_horiz.png") logo <- image_resize(logo_raw, geometry_size_percent(width=95,height = 95)) # Stack them on top of each other #final_plot <- image_append((c(plot, logo)), stack = TRUE) #final_plot <- image_mosaic((c(plot, logo))) final_plot <- image_composite(plot, logo, offset = "+2235+2365") # And overwrite the plot without a logo image_write(final_plot, "speiPlot.png") # ---- # send image file output$speiImage <- renderImage({ # When input$n is 3, filename is ./images/image3.jpeg filename <- "speiPlot.png" # Return a list containing the filename and alt text list(src = filename, alt = "SPEIPlot") }, deleteFile = FALSE) # PLOTLY SPI HEATMAP ---- output$SPEIPlotly <- renderPlotly({plot_ly(dfSPEI, x = ~date, y = ~variable, z = ~value, colors=colorRamp(c("orange3","orange","yellow","white","green","green2","darkgreen")), type = "heatmap", zmin=-3, zmax=3) %>% layout(title = paste0(titleName," Standardized Precipitation-Evapotranspiration Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"), xaxis=list(title="Month-Year", range = c(as.Date(date1),as.Date(date2))), yaxis=list(title="Timescale (mos)", range = c(0,maxScale)) ) }) # ---- # PLOTLY diff plot ---- tempPlotlyDF<-as.data.frame(cbind(dfSPI$variable,dfSPI$value-dfSPEI$value)) colnames(tempPlotlyDF)<-c("variable","value") tempPlotlyDF$date<-dfSPI$date output$diffPlotly <- renderPlotly({plot_ly(tempPlotlyDF, x = ~date, y = ~variable, z = ~value, colors='PuOr', type = "heatmap", zmin=-2, zmax=2) %>% layout(title = paste0(titleName," SPI-SPEI (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"), xaxis=list(title="Month-Year", range = c(as.Date(date1),as.Date(date2))), yaxis=list(title="Timescale (mos)", range = c(0,maxScale)) ) }) # ---- # # interactive plots of temp, precip, PET, Anoms ---- # # temp Plotly # tempPlotlyVars<-tempDataMelt[,c(3,5,6,7)] # colnames(tempPlotlyVars)<-c("date","T-max(F)","T-mean(F)","T-min(F)") # tempPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = c("dodgerblue4","dimgray","firebrick"), # yaxis = ~paste0("y", id)) %>% # add_lines() # # temp Anom Plotly # tempPlotlyVars<-tempDataMelt[,c(3,15)] # colnames(tempPlotlyVars)<-c("date","T-mean Anom(F)") # tempAnomPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "dimgray", # yaxis = ~paste0("y", id)) %>% # add_lines() # # precip Plotly # tempPlotlyVars<-tempDataMelt[,c(3,4,14)] # colnames(tempPlotlyVars)<-c("date","Precip(in)","PrecipAnom(in)") # precipPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = c("forestgreen","darkslateblue"), # yaxis = ~paste0("y", id)) %>% # add_lines() # # Precip Anom Plotly # # tempPlotlyVars<-tempDataMelt[,c(3,14)] # # precipAnomPlots<-tempPlotlyVars %>% # # tidyr::gather(variable,value,-date) %>% # # transform(id = as.integer(factor(variable))) %>% # # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "darkgreen", # # yaxis = ~paste0("y", id)) %>% # # add_lines() # # PET Plotly # tempPlotlyVars<-tempDataMelt[,c(3,8)] # colnames(tempPlotlyVars)<-c("date","PET(in)") # PETPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "darkgoldenrod", # yaxis = ~paste0("y", id)) %>% # add_lines() # # PET_P Plotly # tempPlotlyVars<-tempDataMelt[,c(3,9,17)] # colnames(tempPlotlyVars)<-c("date","Precip-PET(in)","Precip-PETAnom(in)") # PET_PPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = c("darkorange","darkorchid4"), # yaxis = ~paste0("y", id)) %>% # add_lines() # # Precip Anom Plotly # # tempPlotlyVars<-tempDataMelt[,c(3,17)] # # PET_PAnomPlots<-tempPlotlyVars %>% # # tidyr::gather(variable,value,-date) %>% # # transform(id = as.integer(factor(variable))) %>% # # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "darkgreen", # # yaxis = ~paste0("y", id)) %>% # # add_lines() # # combine in subplots # pSubPlot<-subplot(tempPlots, tempAnomPlots, precipPlots, # PETPlots, PET_PPlots, nrows = 6, shareX = TRUE) # # render Plotly # output$moClimPlotly <- renderPlotly({pSubPlot<-layout(pSubPlot, title=paste0(titleName," Monthly Climate Data")) # }) # # ---- # # # climograph https://plot.ly/r/multiple-axes/ ---- # ay <- list( # tickfont = list(color = "red"), # overlaying = "y", # side = "right", # title = "Temp(F)" # ) # pClimo <- plot_ly() %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgPrecip, name = "Precip(in)") %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgPET, name = "PET(in)") %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgP_PET, name = "P-PET(in)") %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgTemp, name = "Temp(F)", yaxis = "y2") %>% # layout( # title = paste0(titleName," Monthly Average Climate"), yaxis2 = ay, # xaxis = list(title="month", # range=c(1,12)), # yaxis = list(title="inches") # ) # # output$climoPlotly <- renderPlotly({pClimo # }) removeModal() } ) # ---- END PLOTTING CODE } # Run the application shinyApp(ui = ui, server = server)
/app.R
no_license
mcrimmins/SDIVizTool
R
false
false
43,085
r
# SPI and SPEI plots using nClimDiv data # MAC 12/7/19 # # To do: latest date update in selector, plotly heatmap colors don't match, plotly legend names # load shiny libraries library(shiny) library(shinythemes) # load code libraries library(reshape2) library(RCurl) #library(maps) library(raster) #library(ggplot2) library(cowplot) library(tidyverse) #library(zoo) #library(maptools) library(SPEI) library(weathermetrics) #library(metR) library(scales) library(magick) library(plotly) # load datasets # ---- Functions ---- # capitalize county names CapStr <- function(y) { c <- strsplit(y, " ")[[1]] paste(toupper(substring(c, 1,1)), substring(c, 2), sep="", collapse=" ") } # custom date picker dateRangeInput2 <- function(inputId, label, minview = "days", maxview = "decades", ...) { d <- shiny::dateRangeInput(inputId, label, ...) d$children[[2L]]$children[[1]]$attribs[["data-date-min-view-mode"]] <- minview d$children[[2L]]$children[[3]]$attribs[["data-date-min-view-mode"]] <- minview d$children[[2L]]$children[[1]]$attribs[["data-date-max-view-mode"]] <- maxview d$children[[2L]]$children[[3]]$attribs[["data-date-max-view-mode"]] <- maxview d } # add/subtracting months add.months= function(date,n) seq(date, by = paste (n, "months"), length = 2)[2] # ------ # load supporting data from Rdata file generateSupportingData.R load("nClimDivApp_Data.RData") # date list for picker - advances on 10th of month sysDay<-as.numeric(format(Sys.Date(),"%d")) if(sysDay<=9){ date.list<-seq(as.Date("1895/1/1"), add.months(Sys.Date(),-2), "months") }else{ date.list<-seq(as.Date("1895/1/1"), add.months(Sys.Date(),-1), "months") } latest.date<-max(date.list) # ---- # UI code ui <- fluidPage(theme=shinytheme('sandstone'), tags$head(HTML( "<!-- Global site tag (gtag.js) - Google Analytics --> <script async src='https://www.googletagmanager.com/gtag/js?id=UA-155656786-1'></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-155656786-1'); </script>")), titlePanel(strong("Standardized Drought Index Visualization Tool")), sidebarLayout( sidebarPanel( radioButtons("selectOpt", "Select spatial scale", list("Climate Division"='dv', "County"='cy',"State"='st',"Region"='rg'), selected = 'dv'), conditionalPanel( condition = "input.selectOpt=='dv'", selectizeInput( inputId = "sel_dv", label = "Select a division", choices = cdiv.list, selected=cdiv.list[27] )), conditionalPanel( condition = "input.selectOpt=='cy'", selectizeInput( inputId = "sel_cy", label = "Select a county", choices = county.list, selected = county.list[104] )), conditionalPanel( condition = "input.selectOpt=='st'", selectizeInput( inputId = "sel_st", label = "Select a state", choices = state.list, selected = state.list[2] )), conditionalPanel( condition = "input.selectOpt=='rg'", selectizeInput( inputId = "sel_rg", label = "Select a region", choices = region.list, selected = region.list[[1]][1] )), dateRangeInput2("dateRangeMY", "Select date range for plot (1895-present)", startview = "year", minview = "months", maxview = "decades", start = "1981-01-01", end = latest.date, min = min(date.list), max = latest.date), sliderInput("maxTimescale", "Max timescale to display (y-axis):", min = 3, max = 60, value = 60), radioButtons("petOpt", "Select potential evapotranspiration (PET) estimation method for SPEI", list("Hargreaves (recommended)"='harg', "Thornthwaite"='thornW'), selected = 'harg'), actionButton("updateAll","Update"), hr(), HTML('<div style="text-align: center;">Contact Mike Crimmins (<a href="mailto:crimmins@email.arizona.edu">crimmins@email.arizona.edu</a>) with questions or comments. SDI Viz Tool v1.0 01/07/20</div>' ) ), mainPanel( tabsetPanel( tabPanel("SPI", #img(src="spiPlot.png", width="50%", align="left"), plotOutput("spiImage", width = "100%"), tags$head(tags$style(type="text/css", "#spiImage img { border: 1; max-width: 100%; } element.style { width: 33.33%; }"))), tabPanel("SPEI", #img(src="spiPlot.png", width="50%", align="left"), plotOutput("speiImage", width = "100%"), tags$head(tags$style(type="text/css", "#speiImage img { border: 1; max-width: 100%; } element.style { width: 33.33%; }"))), tabPanel("Interactive SPI & SPEI", br(), p("Hover cursor over plots to interrogate values. Use plot image controls to zoom in/out, pan, reset axes, and download snapshot."), br(), plotlyOutput('SPIPlotly', width = "auto"), br(), plotlyOutput('SPEIPlotly', width = "auto"), br(), p("This plot shows the difference between SPI and SPEI values for each month and timescale. Purple colors indicate when SPI values were more positive than SPEI and orange colors vice versa. For example, a difference value of 1 could indicate that the SPEI (-1) is more negative than the SPI (0) reflecting more intense drought conditions."), plotlyOutput('diffPlotly', width = "auto") ), tabPanel("Explore Monthly Data", br(), p("COMING SOON -- All of the monthly nClimDiv data used in the calculation of the SPI and SPEI plots are displayed in the plots this page. The first plot shows the long-term (1895-present) monthly averages of the various climate variables used in the calculation of the drought indices. The monthly averages can depict seasonality in temperature, precipitation, and potential evapotranspiration (PET) that can aid in the interpretation of different drought index timescales."), br(), #plotlyOutput('climoPlotly', width = "auto", height = "400px"), br(), p("Click and drag a box on any part of a time series to zoom in on a specific period. Double click to restore the plot to the full time period."), br() #plotlyOutput('moClimPlotly', width = "auto",height = "800px") ), tabPanel("About", tags$div( HTML("<html> <head> <meta content='text/html; charset=ISO-8859-1' http-equiv='content-type'> <title>SDI Viz Tool Info</title> </head> <body> <h2 style='font-family: Helvetica,Arial,sans-serif;'>About the Standardized Drought Index Visualization Tool</h2> <span style='font-family: Helvetica,Arial,sans-serif;'>The Standardized Drought Index Visualization Tool (SDI Viz Tool) was developed to be able to quickly generate and customize multiscale Standardized Precipitation Index (SPI) and Standardized Precipitation Evapotranspiration Index (SPEI) plots. These plots portray all SPI and SPEI timescales, allowing for the visualization of both short and long-term droughts all at once and through time. More information on how to interpret the plots can be found <a target='_blank' href='https://cals.arizona.edu/climate/misc/spi/spicontour.png'>here</a>. More information on the SPI and SPEI can be found at the <a target='_blank' href='https://wrcc.dri.edu/wwdt/about.php'>Westwide Drought Tracker</a>. <br> <br> The data used in the creation of the plots is the <a target='_blank' href='https://www.ncdc.noaa.gov/monitoring-references/maps/us-climate-divisions.php'>NOAA U.S. Climate Divisional Dataset (nClimDiv)</a>. This dataset is updated through the end of the previous month, usually by the 10th of the current month. Maps of the climate divisions and special regions can be found <a target='_blank' href='https://www.ncdc.noaa.gov/monitoring-references/'>here</a>. &nbsp; <br> </span><br style='font-family: Helvetica,Arial,sans-serif;'> <span style='font-family: Helvetica,Arial,sans-serif;'>The SPI and SPEI values were calculated on the full period of record using the <a href='https://cran.r-project.org/web/packages/SPEI/index.html' target='_blank'>R SPEI package</a>. SDI Viz Tool code can be found at </span><a style='font-family: Helvetica,Arial,sans-serif;' href='https://github.com/mcrimmins/SDIVizTool'>https://github.com/mcrimmins/SDIVizTool</a>. <br> <br> <div style='text-align: center;'><em style='font-family: &quot;Helvetica Neue&quot;,Helvetica,Arial,sans-serif; font-size: 14px; letter-spacing: normal; orphans: 2; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; background-color: rgb(255, 255, 255); font-weight: bold; color: black;'><a href='http://cals.arizona.edu/climate/' style='background-color: transparent; text-decoration: none;'>Climate Science Applications Program - University of Arizona Cooperative Extension</a></em><br> <br> <img style='width: 400px; height: 71px;' alt='logo' src='UA_CSAP_CLIMAS_logos_horiz.png'><br> <br> <span style='color: rgb(51, 51, 51); font-family: &quot;Helvetica Neue&quot;,Helvetica,Arial,sans-serif; font-size: 12px; font-style: normal; font-weight: 500; letter-spacing: normal; orphans: 2; text-indent: 0px; text-transform: none; white-space: normal; widows: 2; word-spacing: 0px; background-color: rgb(255, 255, 255); display: inline ! important; float: none;'>© 2020 The Arizona Board of Regents. All contents copyrighted. All rights reserved.</span><br style='font-family: Helvetica,Arial,sans-serif;'> </div> <span style='font-family: Helvetica,Arial,sans-serif;'></span><br> <br> <br> </body> </html>" ) )) ) ) ) ) # Define server logic required to draw a histogram server <- function(input, output) { # ---- Get nClimDiv data ---- # get county, div and state data ---- dataSets<-c("climdiv-pcpncy-v", "climdiv-pcpndv-v", "climdiv-pcpnst-v", "climdiv-tmincy-v", "climdiv-tmindv-v", "climdiv-tminst-v", "climdiv-tmaxcy-v", "climdiv-tmaxdv-v", "climdiv-tmaxst-v") # ----- # container for all data datalist = list() # get directory listing and find most recent prcp file url <- 'ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/' # see if FTP is working tryCatch(getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE, verbose=TRUE), error=function(e) { err <<- conditionMessage(e) }) # if(exists("err")==TRUE){ showModal(modalDialog(title="DOWNLOAD ERROR","NOAA ftp data server not responding - please try again later (notify Mike Crimmins at crimmins@email.arizona.edu if problem persists)", footer=NULL)) } # proceed filenames = getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE, verbose=TRUE) filelist<-unlist(strsplit(filenames,"\n")) showModal(modalDialog(title="Please wait","Downloading data...this can take several minutes.", footer=NULL)) # loop through dataset for(i in 1:length(dataSets)){ # download files and format into list tempName<-filelist[which((grepl(dataSets[i], filelist)) == TRUE)] url<-paste0("ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/",tempName) tempData<-read.table(url, colClasses = c("character","numeric", "numeric","numeric","numeric","numeric","numeric", "numeric","numeric","numeric","numeric","numeric", "numeric")) colnames(tempData)<-c("code",1:12) tempData$var<-dataSets[i] # add to container datalist[[i]] <- tempData print(paste0("Downloading ",dataSets[i])) } # combine into dataframe allData = do.call(rbind, datalist) rm(datalist) # ---- # update max.date # maxYr<-as.numeric(substr(allData[nrow(allData),1],nchar(allData[nrow(allData),1])-3,nchar(allData[nrow(allData),1]))) # if(length(which(allData[nrow(allData),]==-99.9))==0){ # mm<-12 # }else{ # mm<-which(allData[nrow(allData),]==-99.9)-2 # } # latest.date<-as.Date(paste0(maxYr,"-",mm,"-01")) # end of wait message removeModal() # ---- DROP IN PLOTTING CODE observeEvent(input$updateAll,ignoreNULL=FALSE,{ showModal(modalDialog("Updating and generating plot", footer=NULL)) # ---- subset and wrangle data ---- # county, division or state? DEAL WITH SPECIAL REGIONS!! typePET<-input$petOpt # thornW or harg # selections region<-input$selectOpt # cy,st, dv, rg state <-input$sel_st cdiv <- input$sel_dv county<- input$sel_cy specReg<-input$sel_rg # rg is not a string on climdiv filenames...create new var with ifelse for rg regChr<-region regChr<-ifelse(regChr=="rg", "st", regChr) # region subset tempData<-allData[which(grepl(regChr, allData$var)==TRUE),] # parse code col if(region=="cy"){ # get codes geoCode<-strsplit(county,"-") stCode<- stateCodes[which(stateCodes$name==((geoCode[[1]][1]))),1] cyFIPS<-as.character(county.fips[which(county.fips$polyname==paste0(tolower(geoCode[[1]][1]),",",tolower(geoCode[[1]][2]))),1]) cyFIPS<-as.numeric(ifelse(nchar(cyFIPS)==4, substr(cyFIPS,2,4), substr(cyFIPS,3,5))) # parse into columns tempData$state<-(as.numeric(substr(tempData$code, 1,2))) tempData$div<-(as.numeric(substr(tempData$code, 3,5))) tempData$element<-(as.numeric(substr(tempData$code, 6,7))) tempData$year<-(as.numeric(substr(tempData$code, 8,11))) tempData<-subset(tempData, state==stCode & div==cyFIPS) # centroid subArea<-subset(countiesPoly,NAME_2==geoCode[[1]][2] & NAME_1==geoCode[[1]][1]) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-paste0(geoCode[[1]][2]," County,",geoCode[[1]][1]) }else if (region=="st"){ # get codes stCode<- stateCodes[which(stateCodes$name==state),1] # parse into cols tempData$state<-(as.numeric(substr(tempData$code, 1,3))) tempData$div<-(as.numeric(substr(tempData$code, 4,4))) tempData$element<-(as.numeric(substr(tempData$code, 5,6))) tempData$year<-(as.numeric(substr(tempData$code, 7,10))) tempData<-subset(tempData, state==stCode & div==0) # centroid subArea<-subset(statesPoly, NAME_1==(state)) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-paste0((state)) }else if (region=="dv") { # get codes geoCode1<-strsplit(cdiv,"-") geoCode2<-strsplit(geoCode1[[1]][2],",") stCode<- stateCodes[which(stateCodes$name==((geoCode1[[1]][1]))),1] cdiv<- as.numeric(geoCode2[[1]][1]) # parse into cols tempData$state<-(as.numeric(substr(tempData$code, 1,2))) tempData$div<-(as.numeric(substr(tempData$code, 3,4))) tempData$element<-(as.numeric(substr(tempData$code, 5,6))) tempData$year<-(as.numeric(substr(tempData$code, 7,10))) tempData<-subset(tempData, state==stCode & div==cdiv) # centroid subArea<-subset(cdivPoly, STATE==geoCode1[[1]][1] & CD_NEW==cdiv) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-paste0(geoCode1[[1]][1]," Climate Division ", cdiv) }else{ ### REGION # get codes stCode<- regionCodes$code[which(regionCodes$name==specReg)] # parse into cols tempData$state<-(as.numeric(substr(tempData$code, 1,3))) tempData$div<-(as.numeric(substr(tempData$code, 4,4))) tempData$element<-(as.numeric(substr(tempData$code, 5,6))) tempData$year<-(as.numeric(substr(tempData$code, 7,10))) tempData<-subset(tempData, state==stCode & div==0) # centroid subArea<-subset(combinedRegions, region==(specReg)) centroid<-colMeans(coordinates(subArea)) # build name string titleName<-CapStr(gsub("-", " ", specReg)) ### REGION FIX } # melt data tempDataMelt<-melt(tempData, id.vars=c(14,18), measure.vars = 2:13) #tempDataMelt$date <- as.yearmon(paste(tempDataMelt$year, as.numeric(tempDataMelt$variable), sep = "-")) tempDataMelt$date <- as.Date(paste0(tempDataMelt$year,"-",as.numeric(tempDataMelt$variable),"-01"), format="%Y-%m-%d") tempDataMelt<-spread(tempDataMelt, var, value) # sort, -999 to NA tempDataMelt[tempDataMelt == -9.99] <- NA tempDataMelt[tempDataMelt == -99.9] <- NA # trim to 2018 #allDataSubset<-allDataSubset[-(which(allDataSubset$year==2019)),] # standard colnames colnames(tempDataMelt)[4:6]<-c("precip","tmax","tmin") # calc tmean tempDataMelt$tmean<-(tempDataMelt$tmax+tempDataMelt$tmin)/2 # ---- # inset map ---- fix MI boundary ---- insetmap<-ggplot() + geom_polygon(data = states4map, aes(x = long, y = lat, group = group), fill="lightgrey", color="grey",size=0.15) + # get the state border back on top geom_polygon(data = subArea, aes(x = long, y = lat, group = group), fill="orange", color="red", size=0.15) + # get the state border back on top coord_fixed(xlim=c(-125, -68), ylim = c(25,50), ratio = 1)+ #coord_fixed(xlim=c(out$meta$ll[1]-3.5, out$meta$ll[1]+3.5), ylim=c(out$meta$ll[2]-3.5, out$meta$ll[2]+3.5), ratio = 1) + #geom_point(data = point, aes(x = V1, y = V2), size=1, color='red')+ theme_bw(base_size=5)+ theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank())+ theme(plot.margin = unit(c(0, 0, 0, 0), "pt"))+ theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank()) # calculate indices ---- ## Loop thru full SPI set dfSPI<-tempDataMelt[,1:3] for(i in 1:60){ tempSPI <- spi(tempDataMelt$precip,i, na.rm = TRUE) dfSPI[[paste('SPI-',i,sep="")]] <-tempSPI$fitted } # remove rows with NAs dfSPI<-na.omit(dfSPI) #indexName="Standardized Precipitation Index" #indexNameShort="SPI" # # SPEI switch? if (typePET=="thornW"){ PET <- thornthwaite(fahrenheit.to.celsius(tempDataMelt$tmean,round=2), as.numeric(centroid[2]), na.rm = TRUE) }else{ PET <- hargreaves(fahrenheit.to.celsius(tempDataMelt$tmin,round=2),fahrenheit.to.celsius(tempDataMelt$tmax,round=2),Ra=NA, as.numeric(centroid[2]), na.rm = TRUE) } dfSPEI<-tempDataMelt[,1:3] for(i in 1:60){ tempSPI <- spei(inches_to_metric(tempDataMelt$precip,unit="mm",round=2)-PET,i, na.rm = TRUE) dfSPEI[[paste('SPEI-',i,sep="")]] <-tempSPI$fitted } # remove rows with NAs dfSPEI<-na.omit(dfSPEI) #indexName="Standardized Precipitation-Evapotranspiration Index" #indexNameShort="SPEI" # monthly anomalies - https://www.r-bloggers.com/visualize-monthly-precipitation-anomalies/ tempDataMelt$PET<-PET/25.4 tempDataMelt$P_PET<-tempDataMelt$precip-tempDataMelt$PET moAvg <- tempDataMelt %>% group_by(variable) %>% summarise(moAvgPrecip = mean(precip, na.rm=TRUE), moAvgTemp = mean(tmean, na.rm=TRUE), moAvgPET = mean(PET, na.rm=TRUE), moAvgP_PET = mean(P_PET, na.rm=TRUE)) moAvg[,2:5] <-round(moAvg[,2:5],2) tempDataMelt <- left_join(tempDataMelt, moAvg, by = "variable") tempDataMelt$precipAnom <- tempDataMelt$precip-tempDataMelt$moAvgPrecip tempDataMelt$tempAnom <- tempDataMelt$tmean-tempDataMelt$moAvgTemp tempDataMelt$PETAnom <- tempDataMelt$PET-tempDataMelt$moAvgPET tempDataMelt$P_PETAnom <- tempDataMelt$P_PET-tempDataMelt$moAvgP_PET # anom sign tempDataMelt$pAnomSign<-ifelse(tempDataMelt$precipAnom > 0, "pos", "neg") tempDataMelt$petAnomSign<-ifelse(tempDataMelt$P_PETAnom > 0, "pos", "neg") tempDataMelt$TAnomSign<-ifelse(tempDataMelt$tempAnom > 0, "pos", "neg") # round values tempDataMelt[,8:17] <-round(tempDataMelt[,8:17],2) # plot variables ---- # date range dateRange<-input$dateRangeMY date1<-dateRange[1]# by month date2<-dateRange[2] # by month maxScale<-input$maxTimescale+1# max 60 # SPI contour plot ---- dfSPI<-melt(dfSPI, id.vars=c(1:3), measure.vars = 4:63) dfSPI$value<-as.numeric(dfSPI$value) colnames(dfSPI)[2]<-"month" # current heat map currDfSPI<-dfSPI[which(dfSPI$date==date2),] # plot pCurr<- ggplot(currDfSPI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=1)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = FALSE, limits=c(-3, 3), oob=squish)+ theme_bw()+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ scale_x_date(labels = date_format("%b%Y"), expand=c(0,0))+ theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank(), axis.title.x=element_blank())+ labs(title=" ")+ theme(plot.margin = unit(c(5, 5, 0, 0), "pt")) # main plot p1<- ggplot(dfSPI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=31)+ #scale_fill_gradient2(low = "brown", high = "green",mid = "white", # na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_x_date(labels = date_format("%Y-%m"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),as.Date(date2)))+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ theme_bw()+ theme(legend.position="left")+ theme(plot.title = element_text(face="bold"), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+ guides(fill = guide_colourbar(barwidth = 1, barheight = 10))+ ylab("Timescale (mos)")+ labs(title=paste0(titleName," Standardized Precipitation Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"))+ theme(plot.margin = unit(c(5, 0, 0, 0), "pt")) # precip anoms p2<- ggplot(tempDataMelt,aes(date, precipAnom, fill = pAnomSign)) + geom_bar(stat = "identity", show.legend = FALSE) + #scale_y_continuous(breaks = seq(-100, 100, 20)) + scale_fill_manual(values = c("orange4", "darkgreen"))+ scale_x_date(labels = date_format("%Y"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),add.months(as.Date(date2),1)))+ ylab("Precip Anom (in.)")+ xlab("Month-Year")+ theme_bw()+ theme(axis.text.x = element_text(angle = 45, hjust = 1))+ theme(plot.margin = unit(c(0, 0, 0, 0), "pt"))+ theme(panel.grid.minor = element_blank()) # # trying to get alignments # mainP<-plot_grid(p1, p2, ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) # sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) # plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) # # another solution # plot_grid(p1,pCurr,p2,NULL, ncol = 2, nrow = 2, align = 'v',axis = 'b', rel_heights = c(10,10,1,1), # rel_widths = c(20,1,20,1)) # plotting grid using align mainCurr <- align_plots(p1, pCurr, align = 'h', axis = 'l') mainPrec <- align_plots(p1, p2, align = 'v', axis = 'b') mainP<-plot_grid(mainCurr[[1]], mainPrec[[2]], ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) #plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) spiPlot<-plot_grid(mainP, sideP, nrow = 1, rel_widths = c(20,1)) # add inset map spiPlot<-ggdraw(spiPlot)+draw_plot(insetmap, -0.315, 0.40, scale=0.14) # add margin spiPlot = spiPlot + theme(plot.margin = unit(c(0.25, 0.25, 0.7, 0.25), "in")) # add caption captionString <- c( "Data from NOAA-NCEI", "ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/", paste0("Plot created: ", format(Sys.Date(), "%m-%d-%Y")), "The University of Arizona", "https://cals.arizona.edu/climate/") spiPlot<-ggdraw(spiPlot) + draw_text(captionString, x =0.125, y = c(0.0625,0.0500,0.0375,0.0250,0.0125), hjust = 0,vjust=-0.25, size=8) # write high res to file ---- png("spiPlot.png", width = 11, height = 8.5, units = "in", res = 300L) #grid.newpage() print(spiPlot, newpage = FALSE) dev.off() # add logos # Call back the plot plot <- image_read("spiPlot.png") # And bring in a logo #logo_raw <- image_read("./logos/UA_CLIMAS_logos.png") logo_raw <- image_read("UA_CSAP_CLIMAS_logos_horiz.png") logo <- image_resize(logo_raw, geometry_size_percent(width=95,height = 95)) # Stack them on top of each other #final_plot <- image_append((c(plot, logo)), stack = TRUE) #final_plot <- image_mosaic((c(plot, logo))) final_plot <- image_composite(plot, logo, offset = "+2235+2365") # And overwrite the plot without a logo image_write(final_plot, "spiPlot.png") # ---- # send image file output$spiImage <- renderImage({ # When input$n is 3, filename is ./images/image3.jpeg filename <- "spiPlot.png" # Return a list containing the filename and alt text list(src = filename, alt = "SPIPlot") }, deleteFile = FALSE) # PLOTLY SPI HEATMAP ---- output$SPIPlotly <- renderPlotly({ plot_ly(dfSPI, x = ~date, y = ~variable, z = ~value, colors=colorRamp(c("orange3","orange","yellow","white","green","green2","darkgreen")), type = "heatmap", zmin=-3, zmax=3) %>% layout(title = paste0(titleName," Standardized Precipitation Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"), xaxis=list(title="Month-Year", range = c(as.Date(date1),as.Date(date2))), yaxis=list(title="Timescale (mos)", range = c(0,maxScale)) ) }) # ---- # SPEI contour plot ---- dfSPEI<-melt(dfSPEI, id.vars=c(1:3), measure.vars = 4:63) dfSPEI$value<-as.numeric(dfSPEI$value) colnames(dfSPEI)[2]<-"month" # current heat map currDfSPEI<-dfSPEI[which(dfSPEI$date==date2),] # plot pCurr<- ggplot(currDfSPEI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=1)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = FALSE, limits=c(-3, 3), oob=squish)+ theme_bw()+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ scale_x_date(labels = date_format("%b%Y"), expand=c(0,0))+ theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank(), axis.title.x=element_blank())+ labs(title=" ")+ theme(plot.margin = unit(c(5, 5, 0, 0), "pt")) # main plot p1<- ggplot(dfSPEI, aes((date),as.numeric(variable) , fill = value))+ geom_tile(width=31)+ #scale_fill_gradient2(low = "brown", high = "green",mid = "white", # na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_fill_gradientn(colors=c("orange3","orange","yellow","white","green","green2","darkgreen"), name=" ", na.value = "grey50", guide = "colourbar", limits=c(-3, 3), oob=squish)+ scale_x_date(labels = date_format("%Y-%m"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),as.Date(date2)))+ scale_y_continuous(limits=c(0,maxScale), expand=c(0,0), breaks=seq(0,60,6))+ theme_bw()+ theme(legend.position="left")+ theme(plot.title = element_text(face="bold"), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+ guides(fill = guide_colourbar(barwidth = 1, barheight = 10))+ ylab("Timescale (mos)")+ labs(title=paste0(titleName," Standardized Precipitation-Evapotranspiration Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"))+ theme(plot.margin = unit(c(5, 0, 0, 0), "pt")) # precip anoms p2<- ggplot(tempDataMelt,aes(date, P_PETAnom, fill = petAnomSign)) + geom_bar(stat = "identity", show.legend = FALSE) + #scale_y_continuous(breaks = seq(-100, 100, 20)) + scale_fill_manual(values = c("orange4", "darkgreen"))+ scale_x_date(labels = date_format("%Y"), breaks='2 years', expand=c(0,0), limits = c(as.Date(date1),add.months(as.Date(date2),1)))+ ylab("P-PET Anom (in.)")+ xlab("Month-Year")+ theme_bw()+ theme(axis.text.x = element_text(angle = 45, hjust = 1))+ theme(plot.margin = unit(c(0, 0, 0, 0), "pt"))+ theme(panel.grid.minor = element_blank()) # # trying to get alignments # mainP<-plot_grid(p1, p2, ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) # sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) # plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) # # another solution # plot_grid(p1,pCurr,p2,NULL, ncol = 2, nrow = 2, align = 'v',axis = 'b', rel_heights = c(10,10,1,1), # rel_widths = c(20,1,20,1)) # plotting grid using align mainCurr <- align_plots(p1, pCurr, align = 'h', axis = 'l') mainPrec <- align_plots(p1, p2, align = 'v', axis = 'b') mainP<-plot_grid(mainCurr[[1]], mainPrec[[2]], ncol = 1, align = 'v', axis=c('l'),rel_heights = c(3.5,1)) sideP<-plot_grid(pCurr, NULL, ncol = 1, rel_heights = c(3.5,1)) #plot_grid(mainP, sideP, nrow = 1, align='h',axis = c('tblr'), rel_widths = c(20,1)) spiPlot<-plot_grid(mainP, sideP, nrow = 1, rel_widths = c(20,1)) # add inset map spiPlot<-ggdraw(spiPlot)+draw_plot(insetmap, -0.315, 0.40, scale=0.14) # add margin spiPlot = spiPlot + theme(plot.margin = unit(c(0.25, 0.25, 0.7, 0.25), "in")) # add caption captionString <- c( "Data from NOAA-NCEI", "ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/", paste0("Plot created: ", format(Sys.Date(), "%m-%d-%Y")), "The University of Arizona", "https://cals.arizona.edu/climate/") spiPlot<-ggdraw(spiPlot) + draw_text(captionString, x =0.125, y = c(0.0625,0.0500,0.0375,0.0250,0.0125), hjust = 0,vjust=-0.25, size=8) # write high res to file ---- png("speiPlot.png", width = 11, height = 8.5, units = "in", res = 300L) #grid.newpage() print(spiPlot, newpage = FALSE) dev.off() # add logos # Call back the plot plot <- image_read("speiPlot.png") # And bring in a logo #logo_raw <- image_read("./logos/UA_CLIMAS_logos.png") logo_raw <- image_read("UA_CSAP_CLIMAS_logos_horiz.png") logo <- image_resize(logo_raw, geometry_size_percent(width=95,height = 95)) # Stack them on top of each other #final_plot <- image_append((c(plot, logo)), stack = TRUE) #final_plot <- image_mosaic((c(plot, logo))) final_plot <- image_composite(plot, logo, offset = "+2235+2365") # And overwrite the plot without a logo image_write(final_plot, "speiPlot.png") # ---- # send image file output$speiImage <- renderImage({ # When input$n is 3, filename is ./images/image3.jpeg filename <- "speiPlot.png" # Return a list containing the filename and alt text list(src = filename, alt = "SPEIPlot") }, deleteFile = FALSE) # PLOTLY SPI HEATMAP ---- output$SPEIPlotly <- renderPlotly({plot_ly(dfSPEI, x = ~date, y = ~variable, z = ~value, colors=colorRamp(c("orange3","orange","yellow","white","green","green2","darkgreen")), type = "heatmap", zmin=-3, zmax=3) %>% layout(title = paste0(titleName," Standardized Precipitation-Evapotranspiration Index (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"), xaxis=list(title="Month-Year", range = c(as.Date(date1),as.Date(date2))), yaxis=list(title="Timescale (mos)", range = c(0,maxScale)) ) }) # ---- # PLOTLY diff plot ---- tempPlotlyDF<-as.data.frame(cbind(dfSPI$variable,dfSPI$value-dfSPEI$value)) colnames(tempPlotlyDF)<-c("variable","value") tempPlotlyDF$date<-dfSPI$date output$diffPlotly <- renderPlotly({plot_ly(tempPlotlyDF, x = ~date, y = ~variable, z = ~value, colors='PuOr', type = "heatmap", zmin=-2, zmax=2) %>% layout(title = paste0(titleName," SPI-SPEI (", format(as.Date(date1), "%b%Y"), " - ",format(as.Date(date2), "%b%Y"),")"), xaxis=list(title="Month-Year", range = c(as.Date(date1),as.Date(date2))), yaxis=list(title="Timescale (mos)", range = c(0,maxScale)) ) }) # ---- # # interactive plots of temp, precip, PET, Anoms ---- # # temp Plotly # tempPlotlyVars<-tempDataMelt[,c(3,5,6,7)] # colnames(tempPlotlyVars)<-c("date","T-max(F)","T-mean(F)","T-min(F)") # tempPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = c("dodgerblue4","dimgray","firebrick"), # yaxis = ~paste0("y", id)) %>% # add_lines() # # temp Anom Plotly # tempPlotlyVars<-tempDataMelt[,c(3,15)] # colnames(tempPlotlyVars)<-c("date","T-mean Anom(F)") # tempAnomPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "dimgray", # yaxis = ~paste0("y", id)) %>% # add_lines() # # precip Plotly # tempPlotlyVars<-tempDataMelt[,c(3,4,14)] # colnames(tempPlotlyVars)<-c("date","Precip(in)","PrecipAnom(in)") # precipPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = c("forestgreen","darkslateblue"), # yaxis = ~paste0("y", id)) %>% # add_lines() # # Precip Anom Plotly # # tempPlotlyVars<-tempDataMelt[,c(3,14)] # # precipAnomPlots<-tempPlotlyVars %>% # # tidyr::gather(variable,value,-date) %>% # # transform(id = as.integer(factor(variable))) %>% # # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "darkgreen", # # yaxis = ~paste0("y", id)) %>% # # add_lines() # # PET Plotly # tempPlotlyVars<-tempDataMelt[,c(3,8)] # colnames(tempPlotlyVars)<-c("date","PET(in)") # PETPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "darkgoldenrod", # yaxis = ~paste0("y", id)) %>% # add_lines() # # PET_P Plotly # tempPlotlyVars<-tempDataMelt[,c(3,9,17)] # colnames(tempPlotlyVars)<-c("date","Precip-PET(in)","Precip-PETAnom(in)") # PET_PPlots<-tempPlotlyVars %>% # tidyr::gather(variable,value,-date) %>% # transform(id = as.integer(factor(variable))) %>% # plot_ly(x = ~date, y = ~value, color = ~variable, colors = c("darkorange","darkorchid4"), # yaxis = ~paste0("y", id)) %>% # add_lines() # # Precip Anom Plotly # # tempPlotlyVars<-tempDataMelt[,c(3,17)] # # PET_PAnomPlots<-tempPlotlyVars %>% # # tidyr::gather(variable,value,-date) %>% # # transform(id = as.integer(factor(variable))) %>% # # plot_ly(x = ~date, y = ~value, color = ~variable, colors = "darkgreen", # # yaxis = ~paste0("y", id)) %>% # # add_lines() # # combine in subplots # pSubPlot<-subplot(tempPlots, tempAnomPlots, precipPlots, # PETPlots, PET_PPlots, nrows = 6, shareX = TRUE) # # render Plotly # output$moClimPlotly <- renderPlotly({pSubPlot<-layout(pSubPlot, title=paste0(titleName," Monthly Climate Data")) # }) # # ---- # # # climograph https://plot.ly/r/multiple-axes/ ---- # ay <- list( # tickfont = list(color = "red"), # overlaying = "y", # side = "right", # title = "Temp(F)" # ) # pClimo <- plot_ly() %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgPrecip, name = "Precip(in)") %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgPET, name = "PET(in)") %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgP_PET, name = "P-PET(in)") %>% # add_lines(x = as.numeric(moAvg$variable), y = moAvg$moAvgTemp, name = "Temp(F)", yaxis = "y2") %>% # layout( # title = paste0(titleName," Monthly Average Climate"), yaxis2 = ay, # xaxis = list(title="month", # range=c(1,12)), # yaxis = list(title="inches") # ) # # output$climoPlotly <- renderPlotly({pClimo # }) removeModal() } ) # ---- END PLOTTING CODE } # Run the application shinyApp(ui = ui, server = server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/package.R \docType{package} \name{SRTtools} \alias{SRTtools} \alias{SRTtools-package} \title{Adjust srt file to get better experience in watching movie.} \description{ Srt file is a common subtitle format for videos, it contains subtitle and when the subtitle showed. This package is for ealign time of srt file, and also change color, style and position of subtitle in videos, the srt file will be read as a vector into R, and can be write into srt file after modified using this package. } \author{ Jim Chen }
/man/SRTtools.Rd
no_license
cran/SRTtools
R
false
true
605
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/package.R \docType{package} \name{SRTtools} \alias{SRTtools} \alias{SRTtools-package} \title{Adjust srt file to get better experience in watching movie.} \description{ Srt file is a common subtitle format for videos, it contains subtitle and when the subtitle showed. This package is for ealign time of srt file, and also change color, style and position of subtitle in videos, the srt file will be read as a vector into R, and can be write into srt file after modified using this package. } \author{ Jim Chen }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tar_script.R \name{tar_script} \alias{tar_script} \title{Write a \verb{_targets.R} script to the current working directory.} \usage{ tar_script(code = NULL, library_targets = TRUE, ask = NULL) } \arguments{ \item{code}{R code to write to \verb{_targets.R}. If \code{NULL}, an example target script is written instead.} \item{library_targets}{logical, whether to write a \code{library(targets)} line at the top of \verb{_targets.R} automatically (recommended). If \code{TRUE}, you do not need to explicitly put \code{library(targets)} in \code{code}.} \item{ask}{Logical, whether to ask before writing if \verb{_targets.R} already exists. If \code{NULL}, defaults to \code{Sys.getenv("TAR_SCRIPT_ASK")}. (Set to \code{"true"} or \code{"false"} with \code{Sys.setenv()}). If \code{ask} and the \code{TAR_SCRIPT_ASK} environment variable are both indeterminate, defaults to \code{interactive()}.} } \value{ Nothing. } \description{ The \code{tar_script()} function is a convenient way to create the required target script (\verb{_targets.R} file) in the current working directory. It always overwrites the existing target script, and it requires you to be in the working directory where you intend to write the file, so be careful. See the "Target script" section for details. } \section{Target script}{ Every \code{targets} project requires a target script in the project root. The target script must always be named \verb{_targets.R}. Functions \code{\link[=tar_make]{tar_make()}} and friends look for \verb{_targets.R} in the current working directory and use it to set up the pipeline. Every \verb{_targets.R} file should run the following steps in the order below: 1. Package: load the \code{targets} package. This step is automatically inserted at the top of \verb{_targets.R} files produced by \code{tar_script()} if \code{library_targets} is \code{TRUE}, so you do not need to explicitly include it in \code{code}. 1. Globals: load custom functions and global objects into memory. Usually, this section is a bunch of calls to \code{source()} that run scripts defining user-defined functions. These functions support the R commands of the targets. 2. Options: call \code{\link[=tar_option_set]{tar_option_set()}} to set defaults for targets-specific settings such as the names of required packages. Even if you have no specific options to set, it is still recommended to call \code{\link[=tar_option_set]{tar_option_set()}} in order to register the proper environment. 3. Targets: define one or more target objects using \code{\link[=tar_target]{tar_target()}}. 4. Pipeline: call \code{\link[=tar_pipeline]{tar_pipeline()}} to bring the targets from (3) together in a pipeline object. Every \verb{_targets.R} script must return a pipeline object, which usually means ending with a call to \code{\link[=tar_pipeline]{tar_pipeline()}}. In practice, (3) and (4) can be combined together in the same function call. } \examples{ \dontrun{ tar_dir({ tar_script() # Writes an example target script. # Writes a user-defined target script: tar_script({ x <- tar_target(x, 1 + 1) tar_option_set() tar_pipeline(x) }) }) } }
/man/tar_script.Rd
permissive
Robinlovelace/targets
R
false
true
3,205
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tar_script.R \name{tar_script} \alias{tar_script} \title{Write a \verb{_targets.R} script to the current working directory.} \usage{ tar_script(code = NULL, library_targets = TRUE, ask = NULL) } \arguments{ \item{code}{R code to write to \verb{_targets.R}. If \code{NULL}, an example target script is written instead.} \item{library_targets}{logical, whether to write a \code{library(targets)} line at the top of \verb{_targets.R} automatically (recommended). If \code{TRUE}, you do not need to explicitly put \code{library(targets)} in \code{code}.} \item{ask}{Logical, whether to ask before writing if \verb{_targets.R} already exists. If \code{NULL}, defaults to \code{Sys.getenv("TAR_SCRIPT_ASK")}. (Set to \code{"true"} or \code{"false"} with \code{Sys.setenv()}). If \code{ask} and the \code{TAR_SCRIPT_ASK} environment variable are both indeterminate, defaults to \code{interactive()}.} } \value{ Nothing. } \description{ The \code{tar_script()} function is a convenient way to create the required target script (\verb{_targets.R} file) in the current working directory. It always overwrites the existing target script, and it requires you to be in the working directory where you intend to write the file, so be careful. See the "Target script" section for details. } \section{Target script}{ Every \code{targets} project requires a target script in the project root. The target script must always be named \verb{_targets.R}. Functions \code{\link[=tar_make]{tar_make()}} and friends look for \verb{_targets.R} in the current working directory and use it to set up the pipeline. Every \verb{_targets.R} file should run the following steps in the order below: 1. Package: load the \code{targets} package. This step is automatically inserted at the top of \verb{_targets.R} files produced by \code{tar_script()} if \code{library_targets} is \code{TRUE}, so you do not need to explicitly include it in \code{code}. 1. Globals: load custom functions and global objects into memory. Usually, this section is a bunch of calls to \code{source()} that run scripts defining user-defined functions. These functions support the R commands of the targets. 2. Options: call \code{\link[=tar_option_set]{tar_option_set()}} to set defaults for targets-specific settings such as the names of required packages. Even if you have no specific options to set, it is still recommended to call \code{\link[=tar_option_set]{tar_option_set()}} in order to register the proper environment. 3. Targets: define one or more target objects using \code{\link[=tar_target]{tar_target()}}. 4. Pipeline: call \code{\link[=tar_pipeline]{tar_pipeline()}} to bring the targets from (3) together in a pipeline object. Every \verb{_targets.R} script must return a pipeline object, which usually means ending with a call to \code{\link[=tar_pipeline]{tar_pipeline()}}. In practice, (3) and (4) can be combined together in the same function call. } \examples{ \dontrun{ tar_dir({ tar_script() # Writes an example target script. # Writes a user-defined target script: tar_script({ x <- tar_target(x, 1 + 1) tar_option_set() tar_pipeline(x) }) }) } }
#' @@inheritParams: Inherit parameters from another function. #' #' This tag will bring in all documentation for parameters that are #' undocumented in the current function, but documented in the source #' function. The source can be a function in the current package, #' \code{function}, or another package \code{package::function}. #' #' You may use multiple \code{@@inheritParams} tags to inherit parameters #' from multiple functions. #' #' @tagUsage #' @@inheritParams local_function #' @@inheritParams package::remote_function setClass("InheritParamsTag", contains = "Tag") setMethod("defaultTag", c("InheritParamsTag", "S4MethodObject"), function(tag, object) { method <- object@value pkg <- getPackageName(environment(object@value)) gen_pkg <- attr(method@generic, "package") inherit <- as.character(method@generic) if (pkg != gen_pkg) { inherit <- str_c(gen_pkg, "::", inherit) } new("InheritParamsTag", text = inherit) } ) setMethod("defaultTag", c("InheritParamsTag", "S3MethodObject"), function(tag, object) { method <- object@value gen_name <- attr(method, "s3method")[1] gen <- get(gen_name, attr(method, "s3env")) pkg <- getPackageName(environment(object@value)) if (is.primitive(gen)) { gen_pkg <- "base" } else { gen_pkg <- getPackageName(environment(gen)) } inherit <- gen_name if (pkg != gen_pkg) { inherit <- str_c(gen_pkg, "::", inherit) } new("InheritParamsTag", text = inherit) } )
/R/tag-inherit-params.r
no_license
kashenfelter/roxygen3
R
false
false
1,520
r
#' @@inheritParams: Inherit parameters from another function. #' #' This tag will bring in all documentation for parameters that are #' undocumented in the current function, but documented in the source #' function. The source can be a function in the current package, #' \code{function}, or another package \code{package::function}. #' #' You may use multiple \code{@@inheritParams} tags to inherit parameters #' from multiple functions. #' #' @tagUsage #' @@inheritParams local_function #' @@inheritParams package::remote_function setClass("InheritParamsTag", contains = "Tag") setMethod("defaultTag", c("InheritParamsTag", "S4MethodObject"), function(tag, object) { method <- object@value pkg <- getPackageName(environment(object@value)) gen_pkg <- attr(method@generic, "package") inherit <- as.character(method@generic) if (pkg != gen_pkg) { inherit <- str_c(gen_pkg, "::", inherit) } new("InheritParamsTag", text = inherit) } ) setMethod("defaultTag", c("InheritParamsTag", "S3MethodObject"), function(tag, object) { method <- object@value gen_name <- attr(method, "s3method")[1] gen <- get(gen_name, attr(method, "s3env")) pkg <- getPackageName(environment(object@value)) if (is.primitive(gen)) { gen_pkg <- "base" } else { gen_pkg <- getPackageName(environment(gen)) } inherit <- gen_name if (pkg != gen_pkg) { inherit <- str_c(gen_pkg, "::", inherit) } new("InheritParamsTag", text = inherit) } )
############################################################################# ############################################################################# ## Mohamed Omar ## 10/4/2019 ## Goal: Getting and organizing the data for the next step ############################################################################ rm(list = ls()) setwd("~/Documents/Research/Projects/Malaria") renv::activate("~/Documents/Research/Projects/Malaria") library(GEOquery) library(Biobase) library(sampling) library(limma) library(edgeR) library(caret) ### Getting the data # dataset1_2 <- getGEO("GSE1124", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset1 <- dataset1_2$`GSE1124-GPL96_series_matrix.txt.gz` # dataset2 <- dataset1_2$`GSE1124-GPL97_series_matrix.txt.gz` # # dataset3 <- getGEO("GSE117613", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset3 <- dataset3$GSE117613_series_matrix.txt.gz # # dataset4 <- getGEO("GSE35858", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset4 <- dataset4$GSE35858_series_matrix.txt.gz # # dataset5 <- getGEO("GSE34404", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset5 <- dataset5$GSE34404_series_matrix.txt.gz # # dataset6 <- getGEO("GSE116306", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset6 <- dataset6$GSE116306_series_matrix.txt.gz # # dataset7 <- getGEO("GSE119150", GSEMatrix = TRUE, AnnotGPL = TRUE) # Problem # dataset7 <- dataset7$GSE119150_series_matrix.txt.gz # # dataset8 <- getGEO("GSE16463", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset8 <- dataset8$GSE16463_series_matrix.txt.gz # # dataset9 <- getGEO("GSE72058", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset9 <- dataset9$GSE72058_series_matrix.txt.gz # # save(dataset1, dataset2, dataset3, dataset4, dataset5, dataset6, dataset7, dataset8, dataset9, file = "./Data/MalariaData.rda") load("./Data/MalariaData.rda") ################## ## Expression expr1 <- exprs(dataset1) expr2 <- exprs(dataset2) expr3 <- exprs(dataset3) expr4 <- exprs(dataset4) expr5 <- exprs(dataset5) expr6 <- exprs(dataset6) expr7 <- exprs(dataset7) expr8 <- exprs(dataset8) expr9 <- exprs(dataset9) #################### ## Feature data featData1 <- fData(dataset1) featData2 <- fData(dataset2) featData3 <- fData(dataset3) featData4 <- fData(dataset4) featData5 <- fData(dataset5) featData6 <- fData(dataset6) featData7 <- fData(dataset7) featData8 <- fData(dataset8) featData9 <- fData(dataset9) #############################3 ## Phenotype pheno1 <- pData(dataset1) pheno2 <- pData(dataset2) pheno3 <- pData(dataset3) pheno4 <- pData(dataset4) pheno5 <- pData(dataset5) pheno6 <- pData(dataset6) pheno7 <- pData(dataset7) pheno8 <- pData(dataset8) pheno9 <- pData(dataset9) ############################ ## Annotation ## Expr1 head(rownames(expr1)) rownames(expr1) <- featData1$`Gene symbol` summary(is.na(rownames(expr1))) #rownames(expr1) <- gsub("-","", rownames(expr1)) #rownames(expr1) <- gsub("_","",rownames(expr1)) sel <- which(apply(expr1, 1, function(x) all(is.finite(x)) )) expr1 <- expr1[sel, ] expr1 <- expr1[!is.na(rownames(expr1)),] dim(expr1) expr1 <- log2(expr1 + 1) range(expr1) plot(density(expr1)) boxplot(expr1) # X1 <- expr1 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt1 <- genefilter(2^X1,ffun) # expr1 <- expr1[filt1,] # expr1 <- t(scale(t(expr1), center = TRUE, scale = TRUE)) ############################# ## Expr2 head(rownames(expr2)) rownames(expr2) <- featData2$`Gene symbol` expr2 <- expr2[!(rownames(expr2) == ""), ] #rownames(expr2) <- gsub("-","", rownames(expr2)) # rownames(expr2) <- gsub("_","",rownames(expr2)) sel <- which(apply(expr2, 1, function(x) all(is.finite(x)) )) expr2 <- expr2[sel, ] expr2 <- expr2[!is.na(rownames(expr2)),] dim(expr2) expr2 <- log2(expr2 + 1) range(expr2) plot(density(expr2)) boxplot(expr2) # X2 <- expr2 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt2 <- genefilter(2^X2,ffun) # expr2 <- expr2[filt2,] # dim(expr2) # expr2 <- t(scale(t(expr2), center = TRUE, scale = TRUE)) ############################## ## Expr3 head(rownames(expr3)) rownames(expr3) <- featData3$`Gene symbol` expr3 <- expr3[!(rownames(expr3) == ""), ] #rownames(expr3) <- gsub("-","", rownames(expr3)) # rownames(expr3) <- gsub("_","",rownames(expr3)) sel <- which(apply(expr3, 1, function(x) all(is.finite(x)) )) expr3 <- expr3[sel, ] expr3 <- expr3[!is.na(rownames(expr3)),] dim(expr3) range(expr3) plot(density(expr3)) boxplot(expr3[,1:15]) # X3 <- expr3 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr3 <- expr3[filt3,] # dim(expr3) # expr3 <- t(scale(t(expr3), center = TRUE, scale = TRUE)) #############################3 ## Expr4 head(rownames(expr4)) rownames(expr4) <- featData4$`Composite Element Database Entry[Gene Symbol]` expr4 <- expr4[!(rownames(expr4) == ""), ] #rownames(expr4) <- gsub("-","", rownames(expr4)) # rownames(expr4) <- gsub("_","",rownames(expr4)) sel <- which(apply(expr4, 1, function(x) all(is.finite(x)) )) expr4 <- expr4[sel, ] expr4 <- expr4[!is.na(rownames(expr4)),] dim(expr4) range(expr4) ## Z scored plot(density(expr4)) boxplot(expr4[,1:15]) # X3 <- expr4 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr4 <- expr4[filt3,] # dim(expr4) # # expr4 <- t(scale(t(expr4), center = TRUE, scale = TRUE)) #############################3 ## Expr5 head(rownames(expr5)) rownames(expr5) <- featData5$`Gene symbol` expr5 <- expr5[!(rownames(expr5) == ""), ] #rownames(expr5) <- gsub("-","", rownames(expr5)) # rownames(expr5) <- gsub("_","",rownames(expr5)) sel <- which(apply(expr5, 1, function(x) all(is.finite(x)) )) expr5 <- expr5[sel, ] expr5 <- expr5[!is.na(rownames(expr5)),] dim(expr5) expr5 <- log2(expr5 + 1) range(expr5) plot(density(expr5)) boxplot(expr5[,1:15]) # X3 <- expr5 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr5 <- expr5[filt3,] # dim(expr5) # expr5 <- t(scale(t(expr5), center = TRUE, scale = TRUE)) #############################3 ## Expr6 head(rownames(expr6)) rownames(expr6) <- featData6$GENE_SYMBOL expr6 <- expr6[!(rownames(expr6) == ""), ] #rownames(expr6) <- gsub("-","", rownames(expr6)) # rownames(expr6) <- gsub("_","",rownames(expr6)) sel <- which(apply(expr6, 1, function(x) all(is.finite(x)) )) expr6 <- expr6[sel, ] expr6 <- expr6[!is.na(rownames(expr6)),] dim(expr6) range(expr6) plot(density(expr6)) boxplot(expr6[,1:15]) # X3 <- expr6 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr6 <- expr6[filt3,] # dim(expr6) # expr6 <- t(scale(t(expr6), center = TRUE, scale = TRUE)) #############################3 ## Expr7 head(rownames(expr7)) rownames(expr7) <- featData7$`Gene Symbol` expr7 <- expr7[!(rownames(expr7) == ""), ] #rownames(expr7) <- gsub("-","", rownames(expr7)) # rownames(expr7) <- gsub("_","",rownames(expr7)) sel <- which(apply(expr7, 1, function(x) all(is.finite(x)) )) expr7 <- expr7[sel, ] expr7 <- expr7[!is.na(rownames(expr7)),] dim(expr7) range(expr7) plot(density(expr7)) boxplot(expr7[,1:10]) # X3 <- expr7 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr7 <- expr7[filt3,] # dim(expr7) # expr7 <- t(scale(t(expr7), center = TRUE, scale = TRUE)) #############################3 ## Expr8 head(rownames(expr8)) rownames(expr8) <- featData8$`Gene symbol` expr8 <- expr8[!(rownames(expr8) == ""), ] #rownames(expr8) <- gsub("-","", rownames(expr8)) # rownames(expr8) <- gsub("_","",rownames(expr8)) sel <- which(apply(expr8, 1, function(x) all(is.finite(x)) )) expr8 <- expr8[sel, ] expr8 <- expr8[!is.na(rownames(expr8)),] dim(expr8) range(expr8) ## Z-scored plot(density(expr8)) boxplot(expr8[,1:10]) # X3 <- expr8 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr8 <- expr8[filt3,] # dim(expr8) # # expr8 <- t(scale(t(expr8), center = TRUE, scale = TRUE)) #############################3 ## Expr9 head(rownames(expr9)) rownames(expr9) <- featData9$`Gene symbol` expr9 <- expr9[!(rownames(expr9) == ""), ] #rownames(expr9) <- gsub("-","", rownames(expr9)) # rownames(expr9) <- gsub("_","",rownames(expr9)) sel <- which(apply(expr9, 1, function(x) all(is.finite(x)) )) expr9 <- expr9[sel, ] expr9 <- expr9[!is.na(rownames(expr9)),] dim(expr9) expr9 <- log2(expr9 + 1) range(expr9) plot(density(expr9)) boxplot(expr9[,1:10]) # X3 <- expr9 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr9 <- expr9[filt3,] # dim(expr9) # expr9 <- t(scale(t(expr9), center = TRUE, scale = TRUE)) ################################################################################ ############################################################################### ### Modify the phenotype # Remove controls # Pheno1 pheno1 <- pheno1[!(pheno1$`diesease status:ch1` == "healthy"), ] pheno1$DiseaseStatus <- as.factor(pheno1$`diesease status:ch1`) levels(pheno1$DiseaseStatus) <- c("nonCerebral", "cerebral", "nonCerebral", "nonCerebral") table(pheno1$DiseaseStatus) expr1 <- expr1[, colnames(expr1) %in% rownames(pheno1)] all(rownames(pheno1) == colnames(expr1)) # Pheno2 pheno2 <- pheno2[!(pheno2$`diesease status:ch1` == "healthy"), ] pheno2$DiseaseStatus <- as.factor(pheno2$`diesease status:ch1`) levels(pheno2$DiseaseStatus) <- c("nonCerebral", "cerebral", "nonCerebral", "nonCerebral") table(pheno2$DiseaseStatus) expr2 <- expr2[, colnames(expr2) %in% rownames(pheno2)] all(rownames(pheno2) == colnames(expr2)) # Pheno3 pheno3 <- pheno3[!(pheno3$`diagnosis:ch1` == "no Plasmodium falciparum infection"), ] pheno3$DiseaseStatus <- as.factor(pheno3$`diagnosis:ch1`) levels(pheno3$DiseaseStatus) <- c("cerebral", "nonCerebral") table(pheno3$DiseaseStatus) expr3 <- expr3[, colnames(expr3) %in% rownames(pheno3)] all(rownames(pheno3) == colnames(expr3)) # Pheno4 pheno4 <- pheno4[!(pheno4$`disease state:ch2` == "Healthy"), ] pheno4$DiseaseStatus <- as.factor(pheno4$`disease state:ch2`) levels(pheno4$DiseaseStatus) <- c("nonCerebral", "nonCerebral") table(pheno4$DiseaseStatus) expr4 <- expr4[, colnames(expr4) %in% rownames(pheno4)] all(rownames(pheno4) == colnames(expr4)) # Pheno5 pheno5 <- pheno5[!(pheno5$source_name_ch1 == "Whole blood, age-matched control"), ] pheno5$DiseaseStatus <- as.factor(pheno5$source_name_ch1) levels(pheno5$DiseaseStatus) <- c("nonCerebral") table(pheno5$DiseaseStatus) expr5 <- expr5[, colnames(expr5) %in% rownames(pheno5)] all(rownames(pheno5) == colnames(expr5)) # Pheno6 pheno6$DiseaseStatus <- as.factor(pheno6$`disease state:ch1`) levels(pheno6$DiseaseStatus) <- c("cerebral", "nonCerebral", "nonCerebral") table(pheno6$DiseaseStatus) expr6 <- expr6[, colnames(expr6) %in% rownames(pheno6)] all(rownames(pheno6) == colnames(expr6)) # Pheno7 pheno7 <- pheno7[!(pheno7$`subject status:ch1` == "normal, healthy subject"), ] pheno7$DiseaseStatus <- as.factor(pheno7$`subject status:ch1`) levels(pheno7$DiseaseStatus) <- c("nonCerebral") table(pheno7$DiseaseStatus) expr7 <- expr7[, colnames(expr7) %in% rownames(pheno7)] all(rownames(pheno7) == colnames(expr7)) # Pheno8 pheno8 <- pheno8[pheno8$`disease group:ch1` %in% c("Malaria"), ] pheno8$DiseaseStatus <- as.factor(pheno8$`disease group:ch1`) levels(pheno8$DiseaseStatus) <- c("nonCerebral") table(pheno8$DiseaseStatus) expr8 <- expr8[, colnames(expr8) %in% rownames(pheno8)] all(rownames(pheno8) == colnames(expr8)) # Pheno9 pheno9$DiseaseStatus <- rep("cerebral", nrow(pheno9)) table(pheno9$DiseaseStatus) ######################################################################## ######################################################################## allpheno <- list(pheno1, pheno2, pheno3, pheno4, pheno5, pheno6, pheno7, pheno8, pheno9) names(allpheno) <- c("GSE1124-GPL96", "GSE1124-GPL97", "GSE117613", "GSE35858", "GSE34404", "GSE116306", "GSE119150", "GSE16463", "GSE72058") allExpr <- list(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9) names(allExpr) <- c("GSE1124-GPL96", "GSE1124-GPL97", "GSE117613", "GSE35858", "GSE34404", "GSE116306", "GSE119150", "GSE16463", "GSE72058") ### Filter phenotype information for the required samples DiseaseStatus <- mapply(x=allpheno, FUN=function(x) { x <- x[,"DiseaseStatus"] out <- factor(x, levels=c("nonCerebral", "cerebral")) out }) ################################################################################### ################################################################################## ### Find commom subset of genes commonGenes <- Reduce("intersect", lapply(allExpr, rownames)) ### Filter expression for the required samples exprsMalaria <- mapply(x=allExpr, FUN=function(x, gns) { x <- x[ gns ,] }, MoreArgs=list(gns=commonGenes)) ### Check all(names(exprsMalaria) == names(DiseaseStatus)) ### Check order all(rownames(allpheno$`GSE1124-GPL96`) == colnames(allExpr$`GSE1124-GPL96`)) all(rownames(allpheno$`GSE1124-GPL97`) == colnames(allExpr$`GSE1124-GPL97`)) all(rownames(allpheno$GSE117613) == colnames(allExpr$GSE117613)) all(rownames(allpheno$GSE35858) == colnames(allExpr$GSE35858)) all(rownames(allpheno$GSE34404) == colnames(allExpr$GSE34404)) all(rownames(allpheno$GSE116306) == colnames(allExpr$GSE116306)) all(rownames(allpheno$GSE119150) == colnames(allExpr$GSE119150)) all(rownames(allpheno$GSE16463) == colnames(allExpr$GSE16463)) all(rownames(allpheno$GSE72058) == colnames(allExpr$GSE72058)) ################################################################## ##################### ## All combined allMat <- do.call("cbind", exprsMalaria) allGroup <- unlist(DiseaseStatus) allStudies <- names(allGroup) names(allGroup) <- colnames(allMat) all(colnames(allMat) == names(allGroup)) load("./Objs/AllMat_Annot.rda") AllMat_Annot$CerebralStatus <- allGroup table(AllMat_Annot$CerebralStatus) table(AllMat_Annot$ComplicationStatus) save(AllMat_Annot, file = "./Objs/AllMat_Annot.rda") ############################################################# ### WBC count allpheno$GSE117613$WBC <- as.character(allpheno$GSE117613$`wbc.count:ch1`) allpheno$GSE117613$WBC <- as.numeric(allpheno$GSE117613$WBC) allpheno$GSE34404$WBC <- as.character(allpheno$GSE34404$`white blood cells:ch1`) allpheno$GSE34404$WBC <- as.numeric(allpheno$GSE34404$WBC) allpheno$GSE116306$WBC <- as.character(allpheno$GSE116306$`leucocytes count (giga/l):ch1`) allpheno$GSE116306$WBC <- as.numeric(allpheno$GSE116306$WBC) allpheno$GSE119150$WBC <- as.character(allpheno$GSE119150$`wbc (×10^9/l):ch1`) allpheno$GSE119150$WBC <- as.numeric(allpheno$GSE119150$WBC) ### Covariates of relevance select complete cases: WBC count allWBC <- lapply(allpheno, function(x) { i <- grep("WBC", colnames(x)) if (length(i) == 0) out <- factor(rep("", nrow(x))) else x <- as.numeric(x[, i ]) }) allWBC <- unlist(allWBC) ################################################################################# ############################### ### Age allpheno$GSE117613$AGE <- as.character(allpheno$GSE117613$characteristics_ch1.3) allpheno$GSE117613$AGE <- gsub("age: ", "", allpheno$GSE117613$AGE) allpheno$GSE117613$AGE <- as.numeric(allpheno$GSE117613$AGE) allpheno$GSE34404$AGE <- as.character(allpheno$GSE34404$`age (years):ch1`) allpheno$GSE34404$AGE <- as.numeric(allpheno$GSE34404$AGE) allpheno$GSE116306$AGE <- as.character(allpheno$GSE116306$`age:ch1`) allpheno$GSE116306$AGE <- as.numeric(allpheno$GSE116306$AGE) allpheno$GSE119150$AGE <- as.character(allpheno$GSE119150$`age (years):ch1`) allpheno$GSE119150$AGE <- as.numeric(allpheno$GSE119150$AGE) allpheno$GSE16463$AGE <- as.character(allpheno$GSE16463$`age (years):ch1`) allpheno$GSE16463$AGE <- as.numeric(allpheno$GSE16463$AGE) ### Covariates of relevance select complete cases: AGE allAGE <- lapply(allpheno, function(x) { i <- grep("^AGE$", colnames(x)) if (length(i) == 0) out <- rep(NA, nrow(x)) else x <- as.numeric(x[, i ]) }) allAGE <- unlist(allAGE) ################################################################################## ################################ ### Sex allpheno$GSE117613$GENDER <- as.character(allpheno$GSE117613$`Sex:ch1`) allpheno$GSE34404$GENDER <- as.character(allpheno$GSE34404$`gender:ch1`) allpheno$GSE34404$GENDER[allpheno$GSE34404$GENDER == "M"] <- "Male" allpheno$GSE34404$GENDER[allpheno$GSE34404$GENDER == "F"] <- "Female" allpheno$GSE116306$GENDER <- as.character(allpheno$GSE116306$`gender:ch1`) allpheno$GSE116306$GENDER[allpheno$GSE116306$GENDER == "M"] <- "Male" allpheno$GSE116306$GENDER[allpheno$GSE116306$GENDER == "F"] <- "Female" allpheno$GSE119150$GENDER <- as.character(allpheno$GSE119150$`gender:ch1`) allpheno$GSE119150$GENDER[allpheno$GSE119150$GENDER == "male"] <- "Male" allpheno$GSE16463$GENDER <- as.character(allpheno$GSE16463$`gender:ch1`) ### Covariates of relevance select complete cases: SEX allGENDER <- lapply(allpheno, function(x) { i <- grep("GENDER", colnames(x)) if (length(i) == 0) out <- rep(NA, nrow(x)) else x <- factor(x[, i ]) }) allGENDER <- factor(unlist(allGENDER)) ######################################################################### ### Assemble in one data.frame and turn numeric covs <- data.frame(STUDIES=allStudies, WBC=allWBC, GENDER=allGENDER, AGE=allAGE) ### Prepare vocs for sampling covs <- sapply(covs , function(x) as.numeric(factor(paste(x))) ) ########################################################################### ###SAMPLING ### Balanced stratification set.seed(333) trainingOrTesting <- balancedstratification( covs[ , , drop=FALSE], strata=1*(allGroup == "cerebral"), pik=inclusionprobabilities(1:nrow(covs), nrow(covs) * 0.3), comment=TRUE, method=1) ### Show apply(covs[, -ncol(covs),drop=FALSE], 2, table, allGroup, trainingOrTesting) ### Subset Training mixTrainMat <- allMat[ , trainingOrTesting == 0] mixTrainGroup <- allGroup[ trainingOrTesting == 0] mixTrainStudy <- allStudies[ trainingOrTesting == 0] ### Subset Testing mixTestMat <- allMat[ , trainingOrTesting == 1] mixTestGroup <- allGroup[ trainingOrTesting == 1] mixTestStudy <- allStudies[ trainingOrTesting == 1] table(mixTrainGroup) table(mixTestGroup) ########################################################################### ### Save save(exprsMalaria, mixTrainMat, mixTrainGroup, mixTrainStudy, mixTestMat, mixTestGroup, mixTestStudy, file="./Objs/MalariaDataGood_NCvsC2.rda") ######################################################################### ######################################################################### ######################################################################## sessionInfo()
/Code/1_DataCollection_NCvsC.R
no_license
MohamedOmar2020/Malaria
R
false
false
18,568
r
############################################################################# ############################################################################# ## Mohamed Omar ## 10/4/2019 ## Goal: Getting and organizing the data for the next step ############################################################################ rm(list = ls()) setwd("~/Documents/Research/Projects/Malaria") renv::activate("~/Documents/Research/Projects/Malaria") library(GEOquery) library(Biobase) library(sampling) library(limma) library(edgeR) library(caret) ### Getting the data # dataset1_2 <- getGEO("GSE1124", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset1 <- dataset1_2$`GSE1124-GPL96_series_matrix.txt.gz` # dataset2 <- dataset1_2$`GSE1124-GPL97_series_matrix.txt.gz` # # dataset3 <- getGEO("GSE117613", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset3 <- dataset3$GSE117613_series_matrix.txt.gz # # dataset4 <- getGEO("GSE35858", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset4 <- dataset4$GSE35858_series_matrix.txt.gz # # dataset5 <- getGEO("GSE34404", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset5 <- dataset5$GSE34404_series_matrix.txt.gz # # dataset6 <- getGEO("GSE116306", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset6 <- dataset6$GSE116306_series_matrix.txt.gz # # dataset7 <- getGEO("GSE119150", GSEMatrix = TRUE, AnnotGPL = TRUE) # Problem # dataset7 <- dataset7$GSE119150_series_matrix.txt.gz # # dataset8 <- getGEO("GSE16463", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset8 <- dataset8$GSE16463_series_matrix.txt.gz # # dataset9 <- getGEO("GSE72058", GSEMatrix = TRUE, AnnotGPL = TRUE) # dataset9 <- dataset9$GSE72058_series_matrix.txt.gz # # save(dataset1, dataset2, dataset3, dataset4, dataset5, dataset6, dataset7, dataset8, dataset9, file = "./Data/MalariaData.rda") load("./Data/MalariaData.rda") ################## ## Expression expr1 <- exprs(dataset1) expr2 <- exprs(dataset2) expr3 <- exprs(dataset3) expr4 <- exprs(dataset4) expr5 <- exprs(dataset5) expr6 <- exprs(dataset6) expr7 <- exprs(dataset7) expr8 <- exprs(dataset8) expr9 <- exprs(dataset9) #################### ## Feature data featData1 <- fData(dataset1) featData2 <- fData(dataset2) featData3 <- fData(dataset3) featData4 <- fData(dataset4) featData5 <- fData(dataset5) featData6 <- fData(dataset6) featData7 <- fData(dataset7) featData8 <- fData(dataset8) featData9 <- fData(dataset9) #############################3 ## Phenotype pheno1 <- pData(dataset1) pheno2 <- pData(dataset2) pheno3 <- pData(dataset3) pheno4 <- pData(dataset4) pheno5 <- pData(dataset5) pheno6 <- pData(dataset6) pheno7 <- pData(dataset7) pheno8 <- pData(dataset8) pheno9 <- pData(dataset9) ############################ ## Annotation ## Expr1 head(rownames(expr1)) rownames(expr1) <- featData1$`Gene symbol` summary(is.na(rownames(expr1))) #rownames(expr1) <- gsub("-","", rownames(expr1)) #rownames(expr1) <- gsub("_","",rownames(expr1)) sel <- which(apply(expr1, 1, function(x) all(is.finite(x)) )) expr1 <- expr1[sel, ] expr1 <- expr1[!is.na(rownames(expr1)),] dim(expr1) expr1 <- log2(expr1 + 1) range(expr1) plot(density(expr1)) boxplot(expr1) # X1 <- expr1 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt1 <- genefilter(2^X1,ffun) # expr1 <- expr1[filt1,] # expr1 <- t(scale(t(expr1), center = TRUE, scale = TRUE)) ############################# ## Expr2 head(rownames(expr2)) rownames(expr2) <- featData2$`Gene symbol` expr2 <- expr2[!(rownames(expr2) == ""), ] #rownames(expr2) <- gsub("-","", rownames(expr2)) # rownames(expr2) <- gsub("_","",rownames(expr2)) sel <- which(apply(expr2, 1, function(x) all(is.finite(x)) )) expr2 <- expr2[sel, ] expr2 <- expr2[!is.na(rownames(expr2)),] dim(expr2) expr2 <- log2(expr2 + 1) range(expr2) plot(density(expr2)) boxplot(expr2) # X2 <- expr2 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt2 <- genefilter(2^X2,ffun) # expr2 <- expr2[filt2,] # dim(expr2) # expr2 <- t(scale(t(expr2), center = TRUE, scale = TRUE)) ############################## ## Expr3 head(rownames(expr3)) rownames(expr3) <- featData3$`Gene symbol` expr3 <- expr3[!(rownames(expr3) == ""), ] #rownames(expr3) <- gsub("-","", rownames(expr3)) # rownames(expr3) <- gsub("_","",rownames(expr3)) sel <- which(apply(expr3, 1, function(x) all(is.finite(x)) )) expr3 <- expr3[sel, ] expr3 <- expr3[!is.na(rownames(expr3)),] dim(expr3) range(expr3) plot(density(expr3)) boxplot(expr3[,1:15]) # X3 <- expr3 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr3 <- expr3[filt3,] # dim(expr3) # expr3 <- t(scale(t(expr3), center = TRUE, scale = TRUE)) #############################3 ## Expr4 head(rownames(expr4)) rownames(expr4) <- featData4$`Composite Element Database Entry[Gene Symbol]` expr4 <- expr4[!(rownames(expr4) == ""), ] #rownames(expr4) <- gsub("-","", rownames(expr4)) # rownames(expr4) <- gsub("_","",rownames(expr4)) sel <- which(apply(expr4, 1, function(x) all(is.finite(x)) )) expr4 <- expr4[sel, ] expr4 <- expr4[!is.na(rownames(expr4)),] dim(expr4) range(expr4) ## Z scored plot(density(expr4)) boxplot(expr4[,1:15]) # X3 <- expr4 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr4 <- expr4[filt3,] # dim(expr4) # # expr4 <- t(scale(t(expr4), center = TRUE, scale = TRUE)) #############################3 ## Expr5 head(rownames(expr5)) rownames(expr5) <- featData5$`Gene symbol` expr5 <- expr5[!(rownames(expr5) == ""), ] #rownames(expr5) <- gsub("-","", rownames(expr5)) # rownames(expr5) <- gsub("_","",rownames(expr5)) sel <- which(apply(expr5, 1, function(x) all(is.finite(x)) )) expr5 <- expr5[sel, ] expr5 <- expr5[!is.na(rownames(expr5)),] dim(expr5) expr5 <- log2(expr5 + 1) range(expr5) plot(density(expr5)) boxplot(expr5[,1:15]) # X3 <- expr5 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr5 <- expr5[filt3,] # dim(expr5) # expr5 <- t(scale(t(expr5), center = TRUE, scale = TRUE)) #############################3 ## Expr6 head(rownames(expr6)) rownames(expr6) <- featData6$GENE_SYMBOL expr6 <- expr6[!(rownames(expr6) == ""), ] #rownames(expr6) <- gsub("-","", rownames(expr6)) # rownames(expr6) <- gsub("_","",rownames(expr6)) sel <- which(apply(expr6, 1, function(x) all(is.finite(x)) )) expr6 <- expr6[sel, ] expr6 <- expr6[!is.na(rownames(expr6)),] dim(expr6) range(expr6) plot(density(expr6)) boxplot(expr6[,1:15]) # X3 <- expr6 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr6 <- expr6[filt3,] # dim(expr6) # expr6 <- t(scale(t(expr6), center = TRUE, scale = TRUE)) #############################3 ## Expr7 head(rownames(expr7)) rownames(expr7) <- featData7$`Gene Symbol` expr7 <- expr7[!(rownames(expr7) == ""), ] #rownames(expr7) <- gsub("-","", rownames(expr7)) # rownames(expr7) <- gsub("_","",rownames(expr7)) sel <- which(apply(expr7, 1, function(x) all(is.finite(x)) )) expr7 <- expr7[sel, ] expr7 <- expr7[!is.na(rownames(expr7)),] dim(expr7) range(expr7) plot(density(expr7)) boxplot(expr7[,1:10]) # X3 <- expr7 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr7 <- expr7[filt3,] # dim(expr7) # expr7 <- t(scale(t(expr7), center = TRUE, scale = TRUE)) #############################3 ## Expr8 head(rownames(expr8)) rownames(expr8) <- featData8$`Gene symbol` expr8 <- expr8[!(rownames(expr8) == ""), ] #rownames(expr8) <- gsub("-","", rownames(expr8)) # rownames(expr8) <- gsub("_","",rownames(expr8)) sel <- which(apply(expr8, 1, function(x) all(is.finite(x)) )) expr8 <- expr8[sel, ] expr8 <- expr8[!is.na(rownames(expr8)),] dim(expr8) range(expr8) ## Z-scored plot(density(expr8)) boxplot(expr8[,1:10]) # X3 <- expr8 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr8 <- expr8[filt3,] # dim(expr8) # # expr8 <- t(scale(t(expr8), center = TRUE, scale = TRUE)) #############################3 ## Expr9 head(rownames(expr9)) rownames(expr9) <- featData9$`Gene symbol` expr9 <- expr9[!(rownames(expr9) == ""), ] #rownames(expr9) <- gsub("-","", rownames(expr9)) # rownames(expr9) <- gsub("_","",rownames(expr9)) sel <- which(apply(expr9, 1, function(x) all(is.finite(x)) )) expr9 <- expr9[sel, ] expr9 <- expr9[!is.na(rownames(expr9)),] dim(expr9) expr9 <- log2(expr9 + 1) range(expr9) plot(density(expr9)) boxplot(expr9[,1:10]) # X3 <- expr9 # ffun <- filterfun(pOverA(p = 0.5, A = 100)) # filt3 <- genefilter(2^X3,ffun) # expr9 <- expr9[filt3,] # dim(expr9) # expr9 <- t(scale(t(expr9), center = TRUE, scale = TRUE)) ################################################################################ ############################################################################### ### Modify the phenotype # Remove controls # Pheno1 pheno1 <- pheno1[!(pheno1$`diesease status:ch1` == "healthy"), ] pheno1$DiseaseStatus <- as.factor(pheno1$`diesease status:ch1`) levels(pheno1$DiseaseStatus) <- c("nonCerebral", "cerebral", "nonCerebral", "nonCerebral") table(pheno1$DiseaseStatus) expr1 <- expr1[, colnames(expr1) %in% rownames(pheno1)] all(rownames(pheno1) == colnames(expr1)) # Pheno2 pheno2 <- pheno2[!(pheno2$`diesease status:ch1` == "healthy"), ] pheno2$DiseaseStatus <- as.factor(pheno2$`diesease status:ch1`) levels(pheno2$DiseaseStatus) <- c("nonCerebral", "cerebral", "nonCerebral", "nonCerebral") table(pheno2$DiseaseStatus) expr2 <- expr2[, colnames(expr2) %in% rownames(pheno2)] all(rownames(pheno2) == colnames(expr2)) # Pheno3 pheno3 <- pheno3[!(pheno3$`diagnosis:ch1` == "no Plasmodium falciparum infection"), ] pheno3$DiseaseStatus <- as.factor(pheno3$`diagnosis:ch1`) levels(pheno3$DiseaseStatus) <- c("cerebral", "nonCerebral") table(pheno3$DiseaseStatus) expr3 <- expr3[, colnames(expr3) %in% rownames(pheno3)] all(rownames(pheno3) == colnames(expr3)) # Pheno4 pheno4 <- pheno4[!(pheno4$`disease state:ch2` == "Healthy"), ] pheno4$DiseaseStatus <- as.factor(pheno4$`disease state:ch2`) levels(pheno4$DiseaseStatus) <- c("nonCerebral", "nonCerebral") table(pheno4$DiseaseStatus) expr4 <- expr4[, colnames(expr4) %in% rownames(pheno4)] all(rownames(pheno4) == colnames(expr4)) # Pheno5 pheno5 <- pheno5[!(pheno5$source_name_ch1 == "Whole blood, age-matched control"), ] pheno5$DiseaseStatus <- as.factor(pheno5$source_name_ch1) levels(pheno5$DiseaseStatus) <- c("nonCerebral") table(pheno5$DiseaseStatus) expr5 <- expr5[, colnames(expr5) %in% rownames(pheno5)] all(rownames(pheno5) == colnames(expr5)) # Pheno6 pheno6$DiseaseStatus <- as.factor(pheno6$`disease state:ch1`) levels(pheno6$DiseaseStatus) <- c("cerebral", "nonCerebral", "nonCerebral") table(pheno6$DiseaseStatus) expr6 <- expr6[, colnames(expr6) %in% rownames(pheno6)] all(rownames(pheno6) == colnames(expr6)) # Pheno7 pheno7 <- pheno7[!(pheno7$`subject status:ch1` == "normal, healthy subject"), ] pheno7$DiseaseStatus <- as.factor(pheno7$`subject status:ch1`) levels(pheno7$DiseaseStatus) <- c("nonCerebral") table(pheno7$DiseaseStatus) expr7 <- expr7[, colnames(expr7) %in% rownames(pheno7)] all(rownames(pheno7) == colnames(expr7)) # Pheno8 pheno8 <- pheno8[pheno8$`disease group:ch1` %in% c("Malaria"), ] pheno8$DiseaseStatus <- as.factor(pheno8$`disease group:ch1`) levels(pheno8$DiseaseStatus) <- c("nonCerebral") table(pheno8$DiseaseStatus) expr8 <- expr8[, colnames(expr8) %in% rownames(pheno8)] all(rownames(pheno8) == colnames(expr8)) # Pheno9 pheno9$DiseaseStatus <- rep("cerebral", nrow(pheno9)) table(pheno9$DiseaseStatus) ######################################################################## ######################################################################## allpheno <- list(pheno1, pheno2, pheno3, pheno4, pheno5, pheno6, pheno7, pheno8, pheno9) names(allpheno) <- c("GSE1124-GPL96", "GSE1124-GPL97", "GSE117613", "GSE35858", "GSE34404", "GSE116306", "GSE119150", "GSE16463", "GSE72058") allExpr <- list(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9) names(allExpr) <- c("GSE1124-GPL96", "GSE1124-GPL97", "GSE117613", "GSE35858", "GSE34404", "GSE116306", "GSE119150", "GSE16463", "GSE72058") ### Filter phenotype information for the required samples DiseaseStatus <- mapply(x=allpheno, FUN=function(x) { x <- x[,"DiseaseStatus"] out <- factor(x, levels=c("nonCerebral", "cerebral")) out }) ################################################################################### ################################################################################## ### Find commom subset of genes commonGenes <- Reduce("intersect", lapply(allExpr, rownames)) ### Filter expression for the required samples exprsMalaria <- mapply(x=allExpr, FUN=function(x, gns) { x <- x[ gns ,] }, MoreArgs=list(gns=commonGenes)) ### Check all(names(exprsMalaria) == names(DiseaseStatus)) ### Check order all(rownames(allpheno$`GSE1124-GPL96`) == colnames(allExpr$`GSE1124-GPL96`)) all(rownames(allpheno$`GSE1124-GPL97`) == colnames(allExpr$`GSE1124-GPL97`)) all(rownames(allpheno$GSE117613) == colnames(allExpr$GSE117613)) all(rownames(allpheno$GSE35858) == colnames(allExpr$GSE35858)) all(rownames(allpheno$GSE34404) == colnames(allExpr$GSE34404)) all(rownames(allpheno$GSE116306) == colnames(allExpr$GSE116306)) all(rownames(allpheno$GSE119150) == colnames(allExpr$GSE119150)) all(rownames(allpheno$GSE16463) == colnames(allExpr$GSE16463)) all(rownames(allpheno$GSE72058) == colnames(allExpr$GSE72058)) ################################################################## ##################### ## All combined allMat <- do.call("cbind", exprsMalaria) allGroup <- unlist(DiseaseStatus) allStudies <- names(allGroup) names(allGroup) <- colnames(allMat) all(colnames(allMat) == names(allGroup)) load("./Objs/AllMat_Annot.rda") AllMat_Annot$CerebralStatus <- allGroup table(AllMat_Annot$CerebralStatus) table(AllMat_Annot$ComplicationStatus) save(AllMat_Annot, file = "./Objs/AllMat_Annot.rda") ############################################################# ### WBC count allpheno$GSE117613$WBC <- as.character(allpheno$GSE117613$`wbc.count:ch1`) allpheno$GSE117613$WBC <- as.numeric(allpheno$GSE117613$WBC) allpheno$GSE34404$WBC <- as.character(allpheno$GSE34404$`white blood cells:ch1`) allpheno$GSE34404$WBC <- as.numeric(allpheno$GSE34404$WBC) allpheno$GSE116306$WBC <- as.character(allpheno$GSE116306$`leucocytes count (giga/l):ch1`) allpheno$GSE116306$WBC <- as.numeric(allpheno$GSE116306$WBC) allpheno$GSE119150$WBC <- as.character(allpheno$GSE119150$`wbc (×10^9/l):ch1`) allpheno$GSE119150$WBC <- as.numeric(allpheno$GSE119150$WBC) ### Covariates of relevance select complete cases: WBC count allWBC <- lapply(allpheno, function(x) { i <- grep("WBC", colnames(x)) if (length(i) == 0) out <- factor(rep("", nrow(x))) else x <- as.numeric(x[, i ]) }) allWBC <- unlist(allWBC) ################################################################################# ############################### ### Age allpheno$GSE117613$AGE <- as.character(allpheno$GSE117613$characteristics_ch1.3) allpheno$GSE117613$AGE <- gsub("age: ", "", allpheno$GSE117613$AGE) allpheno$GSE117613$AGE <- as.numeric(allpheno$GSE117613$AGE) allpheno$GSE34404$AGE <- as.character(allpheno$GSE34404$`age (years):ch1`) allpheno$GSE34404$AGE <- as.numeric(allpheno$GSE34404$AGE) allpheno$GSE116306$AGE <- as.character(allpheno$GSE116306$`age:ch1`) allpheno$GSE116306$AGE <- as.numeric(allpheno$GSE116306$AGE) allpheno$GSE119150$AGE <- as.character(allpheno$GSE119150$`age (years):ch1`) allpheno$GSE119150$AGE <- as.numeric(allpheno$GSE119150$AGE) allpheno$GSE16463$AGE <- as.character(allpheno$GSE16463$`age (years):ch1`) allpheno$GSE16463$AGE <- as.numeric(allpheno$GSE16463$AGE) ### Covariates of relevance select complete cases: AGE allAGE <- lapply(allpheno, function(x) { i <- grep("^AGE$", colnames(x)) if (length(i) == 0) out <- rep(NA, nrow(x)) else x <- as.numeric(x[, i ]) }) allAGE <- unlist(allAGE) ################################################################################## ################################ ### Sex allpheno$GSE117613$GENDER <- as.character(allpheno$GSE117613$`Sex:ch1`) allpheno$GSE34404$GENDER <- as.character(allpheno$GSE34404$`gender:ch1`) allpheno$GSE34404$GENDER[allpheno$GSE34404$GENDER == "M"] <- "Male" allpheno$GSE34404$GENDER[allpheno$GSE34404$GENDER == "F"] <- "Female" allpheno$GSE116306$GENDER <- as.character(allpheno$GSE116306$`gender:ch1`) allpheno$GSE116306$GENDER[allpheno$GSE116306$GENDER == "M"] <- "Male" allpheno$GSE116306$GENDER[allpheno$GSE116306$GENDER == "F"] <- "Female" allpheno$GSE119150$GENDER <- as.character(allpheno$GSE119150$`gender:ch1`) allpheno$GSE119150$GENDER[allpheno$GSE119150$GENDER == "male"] <- "Male" allpheno$GSE16463$GENDER <- as.character(allpheno$GSE16463$`gender:ch1`) ### Covariates of relevance select complete cases: SEX allGENDER <- lapply(allpheno, function(x) { i <- grep("GENDER", colnames(x)) if (length(i) == 0) out <- rep(NA, nrow(x)) else x <- factor(x[, i ]) }) allGENDER <- factor(unlist(allGENDER)) ######################################################################### ### Assemble in one data.frame and turn numeric covs <- data.frame(STUDIES=allStudies, WBC=allWBC, GENDER=allGENDER, AGE=allAGE) ### Prepare vocs for sampling covs <- sapply(covs , function(x) as.numeric(factor(paste(x))) ) ########################################################################### ###SAMPLING ### Balanced stratification set.seed(333) trainingOrTesting <- balancedstratification( covs[ , , drop=FALSE], strata=1*(allGroup == "cerebral"), pik=inclusionprobabilities(1:nrow(covs), nrow(covs) * 0.3), comment=TRUE, method=1) ### Show apply(covs[, -ncol(covs),drop=FALSE], 2, table, allGroup, trainingOrTesting) ### Subset Training mixTrainMat <- allMat[ , trainingOrTesting == 0] mixTrainGroup <- allGroup[ trainingOrTesting == 0] mixTrainStudy <- allStudies[ trainingOrTesting == 0] ### Subset Testing mixTestMat <- allMat[ , trainingOrTesting == 1] mixTestGroup <- allGroup[ trainingOrTesting == 1] mixTestStudy <- allStudies[ trainingOrTesting == 1] table(mixTrainGroup) table(mixTestGroup) ########################################################################### ### Save save(exprsMalaria, mixTrainMat, mixTrainGroup, mixTrainStudy, mixTestMat, mixTestGroup, mixTestStudy, file="./Objs/MalariaDataGood_NCvsC2.rda") ######################################################################### ######################################################################### ######################################################################## sessionInfo()
e_mat <- matrix(c(2,0),ncol=1) p_vec <- c(1, 1) z0 <- c(1000) # initial population vector times <- seq(1,20) func_deps <- c('c[1]','c[2]') priors <- rep(list(list(name="uniform",params=c(0, 1), bounds=c(0,2))),2) mod <- bp_model_simple_birth_death(func_deps, 2, 0) simulation_params <- c(0.1, 0.05) simulation_dat <- bpsims(mod, simulation_params, z0, times, 25) dat <- stan_data_from_simulation(simulation_dat, mod, simple_bd = T) stan_code = generate(mod, priors, simple_bd = T) options(mc.cores = parallel::detectCores()) ranges <- matrix(rep(c(0,1),nrow(e_mat)),nrow(e_mat),2,byrow = T) init <- uniform_initialize(ranges, 4) stan_mod <- rstan::stan_model(model_code = stan_code) fit_data <- rstan::sampling(stan_mod, data = dat, control = list(adapt_delta = 0.95), chains = 4, refresh = 1, iter = 3000, warmup = 1000)
/examples/one_type.R
no_license
jproney/bpinference
R
false
false
832
r
e_mat <- matrix(c(2,0),ncol=1) p_vec <- c(1, 1) z0 <- c(1000) # initial population vector times <- seq(1,20) func_deps <- c('c[1]','c[2]') priors <- rep(list(list(name="uniform",params=c(0, 1), bounds=c(0,2))),2) mod <- bp_model_simple_birth_death(func_deps, 2, 0) simulation_params <- c(0.1, 0.05) simulation_dat <- bpsims(mod, simulation_params, z0, times, 25) dat <- stan_data_from_simulation(simulation_dat, mod, simple_bd = T) stan_code = generate(mod, priors, simple_bd = T) options(mc.cores = parallel::detectCores()) ranges <- matrix(rep(c(0,1),nrow(e_mat)),nrow(e_mat),2,byrow = T) init <- uniform_initialize(ranges, 4) stan_mod <- rstan::stan_model(model_code = stan_code) fit_data <- rstan::sampling(stan_mod, data = dat, control = list(adapt_delta = 0.95), chains = 4, refresh = 1, iter = 3000, warmup = 1000)
t<-list( t0=c( 5 , 1 , 45 ), lowert=c( 5 , 0 , 10 ), swfirst=c( 16 ), swlast=c( 17 ), phases=matrix(c( 1 , 3 , 4 , 3 , 4 , 6 ),2), maxl=200,phasemax=c(100,0,100,100),cap=defaultcap,daycap=defaultdaycap,comment=1,sowreq=-0.0)
/scenarios/Amedwuha/payout.data/contract.R
no_license
zachary62/data
R
false
false
225
r
t<-list( t0=c( 5 , 1 , 45 ), lowert=c( 5 , 0 , 10 ), swfirst=c( 16 ), swlast=c( 17 ), phases=matrix(c( 1 , 3 , 4 , 3 , 4 , 6 ),2), maxl=200,phasemax=c(100,0,100,100),cap=defaultcap,daycap=defaultdaycap,comment=1,sowreq=-0.0)
# rm(list=ls(all=TRUE)) loadData <- function(location='/home/wijnand/R_workspace/8_Practical_Machine_Learning/course_project/resources/train.csv') { library(caret); set.seed(1234) data <- read.csv(location, na.strings = c("NA", "#DIV/0!", "")) # remove column with unique number data$X <- NULL # remove 'zero' and 'near-zero' columns uselessColumns <- nearZeroVar(data, freqCut = 95/5, uniqueCut = 10, saveMetrics = T) uselessColumns <- names(data[,uselessColumns$zeroVar==TRUE | uselessColumns$nzv==TRUE]) data <- data[,!(names(data) %in% uselessColumns)] # remove columns in which > 97% of the values are not available data <- data[,colSums(is.na(data))<(nrow(data)*0.97)] # randomize the dataset data <- data[order(runif(nrow(data))),] inTrain <- createDataPartition(data$classe, p = 0.7, list=F) training <- data[inTrain,] testing <- data[-inTrain,] trainedModel <- train(classe ~ . , training, method="rf", trControl = trainControl(method = "cv", number = 2),do.trace=T,ntree=50) prediction <- predict(trainedModel, testing) confusionMatrix(prediction, testing$classe) testdata <- read.csv('/home/wijnand/R_workspace/8_Practical_Machine_Learning/course_project/resources/test.csv', na.strings = c("NA", "#DIV/0!", "")) prediction <- predict(trainedModel, testdata) pml_write_files(prediction) trainedModel } pml_write_files = function(x) { n = length(x) for(i in 1:n){ filename = paste0("/home/wijnand/R_workspace/8_Practical_Machine_Learning/course_project/resources/problem_id_",i,".txt") write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE) } }
/8_Practical_Machine_Learning/course_project/activity_prediction.R
no_license
WijnandNuij/DataScience
R
false
false
1,920
r
# rm(list=ls(all=TRUE)) loadData <- function(location='/home/wijnand/R_workspace/8_Practical_Machine_Learning/course_project/resources/train.csv') { library(caret); set.seed(1234) data <- read.csv(location, na.strings = c("NA", "#DIV/0!", "")) # remove column with unique number data$X <- NULL # remove 'zero' and 'near-zero' columns uselessColumns <- nearZeroVar(data, freqCut = 95/5, uniqueCut = 10, saveMetrics = T) uselessColumns <- names(data[,uselessColumns$zeroVar==TRUE | uselessColumns$nzv==TRUE]) data <- data[,!(names(data) %in% uselessColumns)] # remove columns in which > 97% of the values are not available data <- data[,colSums(is.na(data))<(nrow(data)*0.97)] # randomize the dataset data <- data[order(runif(nrow(data))),] inTrain <- createDataPartition(data$classe, p = 0.7, list=F) training <- data[inTrain,] testing <- data[-inTrain,] trainedModel <- train(classe ~ . , training, method="rf", trControl = trainControl(method = "cv", number = 2),do.trace=T,ntree=50) prediction <- predict(trainedModel, testing) confusionMatrix(prediction, testing$classe) testdata <- read.csv('/home/wijnand/R_workspace/8_Practical_Machine_Learning/course_project/resources/test.csv', na.strings = c("NA", "#DIV/0!", "")) prediction <- predict(trainedModel, testdata) pml_write_files(prediction) trainedModel } pml_write_files = function(x) { n = length(x) for(i in 1:n){ filename = paste0("/home/wijnand/R_workspace/8_Practical_Machine_Learning/course_project/resources/problem_id_",i,".txt") write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE) } }
" nox: nitrogen oxides concentration (parts per 10 million). dis: weighted mean of distances to five Boston employment centres. " cor(Boston$nox, Boston$dis) #-0.7692301 "Why is the negative correlation between the concentration of Nitrogen oxides and distance to employment centres so so strong?" "First of all we need to know what Nitrogen Oxides is. According to an article by CorrosionPedia which can be read here: https://www.corrosionpedia.com/definition/5304/nitrogen-oxide. Nitrogen oxide is a common term for mono-nitrogen oxides such as nitric oxide (NO) and nitrogen dioxide (NO2). Its chemical formula is (NOx). It is formed when nitrogen (N2) comes in contact or reacts with oxygen (O2). It is a group of highly reactive gases, toxic and one of the causes of acid rain. It does not have any odor or color. It is produced by human activity and when fossil fuels are burned at high temperatures. It is considered to be a pollutant." "This is weird. If the distance to five Boston employment centres is high then people should spent more time driving, thus, producing more nitrogen oxides. The correlation should be positive, but in reality it is negative. Could it be that driving produces many more chemical compounds other than nitrogen oxide in much larger quantity such that even though there is more nitrogen oxide, the concentration actually goes down because the other compounds are increasing by a larger degree?" library(ggplot2) ggplot(Boston, aes(dis, nox)) + geom_point(color = "sky blue", size = 2, alpha = 0.5) + geom_smooth(group = 1, se = F, color = "red") + xlab("Mean Distance to 5 Boston Employment Centre") + ylab("Nitrogen Oxide Concentration (Parts Per 10 Million)") + ggtitle("Distance to Employment Centre vs NOx Concentration") + theme_bw() "The relationship is clearly non-linear, that is why we didn't use lm." 'Oh after i think about it. I think i have found the answer. The reason why the correlation is negative is this. High value for the mean distance to the 5 employment centre means that the town is more rural. The buildings are far from each other, like in a small town which has considerably fewer cars than the larger towns. This is why the concentration is lower in the small towns. Even though people need to drive longer, there is simply way less people. Furthermore we all know that the air quality is better in small towns than large ones. This happens because small towns have fewer cars, fewer factories, fewer people. This is why people go on a vacation to the mountains, like Puncak in Indonesia. People living in Jakarta (the capital city of Indonesia) often goes to Puncak just to refresh themselves. Question answered."
/script/nox dis.R
no_license
JonathanRyanW/MASS_Boston
R
false
false
2,701
r
" nox: nitrogen oxides concentration (parts per 10 million). dis: weighted mean of distances to five Boston employment centres. " cor(Boston$nox, Boston$dis) #-0.7692301 "Why is the negative correlation between the concentration of Nitrogen oxides and distance to employment centres so so strong?" "First of all we need to know what Nitrogen Oxides is. According to an article by CorrosionPedia which can be read here: https://www.corrosionpedia.com/definition/5304/nitrogen-oxide. Nitrogen oxide is a common term for mono-nitrogen oxides such as nitric oxide (NO) and nitrogen dioxide (NO2). Its chemical formula is (NOx). It is formed when nitrogen (N2) comes in contact or reacts with oxygen (O2). It is a group of highly reactive gases, toxic and one of the causes of acid rain. It does not have any odor or color. It is produced by human activity and when fossil fuels are burned at high temperatures. It is considered to be a pollutant." "This is weird. If the distance to five Boston employment centres is high then people should spent more time driving, thus, producing more nitrogen oxides. The correlation should be positive, but in reality it is negative. Could it be that driving produces many more chemical compounds other than nitrogen oxide in much larger quantity such that even though there is more nitrogen oxide, the concentration actually goes down because the other compounds are increasing by a larger degree?" library(ggplot2) ggplot(Boston, aes(dis, nox)) + geom_point(color = "sky blue", size = 2, alpha = 0.5) + geom_smooth(group = 1, se = F, color = "red") + xlab("Mean Distance to 5 Boston Employment Centre") + ylab("Nitrogen Oxide Concentration (Parts Per 10 Million)") + ggtitle("Distance to Employment Centre vs NOx Concentration") + theme_bw() "The relationship is clearly non-linear, that is why we didn't use lm." 'Oh after i think about it. I think i have found the answer. The reason why the correlation is negative is this. High value for the mean distance to the 5 employment centre means that the town is more rural. The buildings are far from each other, like in a small town which has considerably fewer cars than the larger towns. This is why the concentration is lower in the small towns. Even though people need to drive longer, there is simply way less people. Furthermore we all know that the air quality is better in small towns than large ones. This happens because small towns have fewer cars, fewer factories, fewer people. This is why people go on a vacation to the mountains, like Puncak in Indonesia. People living in Jakarta (the capital city of Indonesia) often goes to Puncak just to refresh themselves. Question answered."
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trades.R \name{getPortNameByAccount} \alias{getPortNameByAccount} \title{A function to map portfolio info to a trades data frame by account id, referenced as "account".} \usage{ getPortNameByAccount(data, session) } \arguments{ \item{data}{a trades data frame} \item{session}{The rdecaf session} } \value{ A data frame } \description{ This is the description }
/man/getPortNameByAccount.Rd
no_license
beatnaut/remaputils
R
false
true
440
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trades.R \name{getPortNameByAccount} \alias{getPortNameByAccount} \title{A function to map portfolio info to a trades data frame by account id, referenced as "account".} \usage{ getPortNameByAccount(data, session) } \arguments{ \item{data}{a trades data frame} \item{session}{The rdecaf session} } \value{ A data frame } \description{ This is the description }
library(tidyverse) library(pcldar) library(magrittr) nr_topics <- 20 ds_fn <- "na" iterations <- 1000 cnf <- new_simple_lda_config(ds_fn, nr_topics = nr_topics, alpha = 0.01, beta = (nr_topics / 50), iterations = iterations, rareword_threshold = 10, stoplist_fn = system.file("extdata", "stoplist.txt", package = "pcldar"), topic_interval = 10, tmpdir = "/tmp") ds_fn <- system.file("extdata", "100ap.txt", package = "pcldar") trtextdf <- as.data.frame(readLines(ds_fn)) colnames(trtextdf) <- "line" trtextdf <- trtextdf %>% dplyr::mutate(line = iconv(line,"latin1", "ASCII")) doclines <- as.character(trtextdf$line) dss <- create_lda_dataset(doclines,doclines) lda <- sample_pclda(cnf, dss[[1]], iterations = iterations, testset=dss[[2]]) phi <- get_phi(lda) ttm <- get_type_topics(lda) dens <- calculate_ttm_density(ttm) zBar <- get_z_means(lda) theta <- get_theta_estimate(lda) tw <- get_topwords(lda) trw <- get_top_relevance_words(lda,cnf) ll <- get_log_likelihood(lda) ll hll <- get_held_out_log_likelihood(lda) hll stats <- data.frame(iter=(1:(length(ll)))*10,loglikelihood=ll,heldout_likelihood=hll) stats %<>% tidyr::gather(type,value,-iter) ggplot(stats,aes(x=iter,y=value, color=type)) + geom_line() + theme_bw() cat("Top Words:\n") cat(print_top_words(get_topwords(lda))) cat("Top Relevance Words:\n") cat(print_top_words(get_top_relevance_words(lda,cnf)))
/inst/examples/lda_from_text.R
no_license
lejon/pcldar
R
false
false
1,556
r
library(tidyverse) library(pcldar) library(magrittr) nr_topics <- 20 ds_fn <- "na" iterations <- 1000 cnf <- new_simple_lda_config(ds_fn, nr_topics = nr_topics, alpha = 0.01, beta = (nr_topics / 50), iterations = iterations, rareword_threshold = 10, stoplist_fn = system.file("extdata", "stoplist.txt", package = "pcldar"), topic_interval = 10, tmpdir = "/tmp") ds_fn <- system.file("extdata", "100ap.txt", package = "pcldar") trtextdf <- as.data.frame(readLines(ds_fn)) colnames(trtextdf) <- "line" trtextdf <- trtextdf %>% dplyr::mutate(line = iconv(line,"latin1", "ASCII")) doclines <- as.character(trtextdf$line) dss <- create_lda_dataset(doclines,doclines) lda <- sample_pclda(cnf, dss[[1]], iterations = iterations, testset=dss[[2]]) phi <- get_phi(lda) ttm <- get_type_topics(lda) dens <- calculate_ttm_density(ttm) zBar <- get_z_means(lda) theta <- get_theta_estimate(lda) tw <- get_topwords(lda) trw <- get_top_relevance_words(lda,cnf) ll <- get_log_likelihood(lda) ll hll <- get_held_out_log_likelihood(lda) hll stats <- data.frame(iter=(1:(length(ll)))*10,loglikelihood=ll,heldout_likelihood=hll) stats %<>% tidyr::gather(type,value,-iter) ggplot(stats,aes(x=iter,y=value, color=type)) + geom_line() + theme_bw() cat("Top Words:\n") cat(print_top_words(get_topwords(lda))) cat("Top Relevance Words:\n") cat(print_top_words(get_top_relevance_words(lda,cnf)))
corr <- function(directory, threshold = 0) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'threshold' is a numeric vector of length 1 indicating the ## number of completely observed observations (on all ## variables) required to compute the correlation between ## nitrate and sulfate; the default is 0 ## Return a numeric vector of correlations # Load up the CSV datasets mons <- dir(directory,pattern="*.csv",full.name=TRUE) mons_data <- lapply(mons, read.csv) mons_data_comb <- rbindlist(mons_data) # Filter out all the incompletes mons_complete <- mons_data_comb[complete.cases(mons_data_comb)] # Find out the unique IDs id<-unique(mons_complete$ID) # Count the number of unique elements # (Use a custom function for lapply) count_ent <- function(id_val,lst){ sum(lst$ID==id_val) } nobs <- lapply(id,count_ent,mons_complete) # Filter monitors with nobs below the threshold id_ref = id[nobs>=threshold] # Only do the correlation if there's data to correlate if (length(id_ref) == 0){ return(id_ref) }else{ # Run correlation for each remaining monitor corr_pollutants <- function(id_val,mon_data){ dat <- subset(mon_data,ID==id_val,select=c(sulfate,nitrate)) cor(dat[[1]],dat[[2]]) } unlist(lapply(id_ref,corr_pollutants,mons_complete)) } }
/R_prj1/corr.R
no_license
eyedvabny/coursera-ds-main
R
false
false
1,409
r
corr <- function(directory, threshold = 0) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'threshold' is a numeric vector of length 1 indicating the ## number of completely observed observations (on all ## variables) required to compute the correlation between ## nitrate and sulfate; the default is 0 ## Return a numeric vector of correlations # Load up the CSV datasets mons <- dir(directory,pattern="*.csv",full.name=TRUE) mons_data <- lapply(mons, read.csv) mons_data_comb <- rbindlist(mons_data) # Filter out all the incompletes mons_complete <- mons_data_comb[complete.cases(mons_data_comb)] # Find out the unique IDs id<-unique(mons_complete$ID) # Count the number of unique elements # (Use a custom function for lapply) count_ent <- function(id_val,lst){ sum(lst$ID==id_val) } nobs <- lapply(id,count_ent,mons_complete) # Filter monitors with nobs below the threshold id_ref = id[nobs>=threshold] # Only do the correlation if there's data to correlate if (length(id_ref) == 0){ return(id_ref) }else{ # Run correlation for each remaining monitor corr_pollutants <- function(id_val,mon_data){ dat <- subset(mon_data,ID==id_val,select=c(sulfate,nitrate)) cor(dat[[1]],dat[[2]]) } unlist(lapply(id_ref,corr_pollutants,mons_complete)) } }
.onAttach <- function(libname, pkgname) { packageStartupMessage("Thanks to use naverapi package. for more information about Naver Open API, visit to 'https://developers.naver.com'") }
/R/zzz.R
no_license
KimByoungmo/naverapi
R
false
false
186
r
.onAttach <- function(libname, pkgname) { packageStartupMessage("Thanks to use naverapi package. for more information about Naver Open API, visit to 'https://developers.naver.com'") }
library(datasets) ##lm <- lsfit(x = airquality$Ozone, y = airquality$Temp*airquality$Temp) ##with(airquality, plot(Ozone, Temp)) ##abline(lm) x <- cbind(airquality$Ozone, airquality$Temp) y <- as.data.frame(x[complete.cases(x), ]) names(y) <- c("Ozone", "Temp") Time <- as.vector(y$Temp) Counts <- as.vector(y$Ozone) length(Time) length(Counts) attach(y) names(y) linear.model <-lm(Counts ~ Time) plot(Time, Counts, pch=16, ylab = "Ozone Level", cex.lab = 1.3, col = "red" ) abline(lm(Counts ~ Time), col = "blue") Time2 <- Time^2 quadratic.model <-lm(Counts ~ Time + Time2) timevalues <- seq(0, 100, 0.1) predictedcounts <- predict(quadratic.model,list(Time=timevalues, Time2=timevalues^2)) plot(Time, Counts, pch=16, xlab = "Temp(s)", ylab = "Ozone Level", cex.lab = 1.3, col = "blue", main = "Ozone Level over Temperature") lines(timevalues, predictedcounts, col = "darkgreen", lwd = 3) text(60, 130, expression("Ozone Level = 305.48577 - 9.5060(Temp) + .07708(Temp)"^2), cex = .6, adj=0) # plot(1:10, 1:10, yaxt="n", ylab=""); # mtext("Title", side=3, adj=1, line=1.2, cex=2, font=2); # axis(2, las=1) # # plot(1:10) # legend('topleft', expression(4^th*"-root transformation")) # # rsquarelm2 <- 0.855463 # # text(5, 5, bquote(R^2)) # ?mtext
/R/Quadratic Formula.R
no_license
BJWiley233/Practical-Computer-Concepts-Files
R
false
false
1,297
r
library(datasets) ##lm <- lsfit(x = airquality$Ozone, y = airquality$Temp*airquality$Temp) ##with(airquality, plot(Ozone, Temp)) ##abline(lm) x <- cbind(airquality$Ozone, airquality$Temp) y <- as.data.frame(x[complete.cases(x), ]) names(y) <- c("Ozone", "Temp") Time <- as.vector(y$Temp) Counts <- as.vector(y$Ozone) length(Time) length(Counts) attach(y) names(y) linear.model <-lm(Counts ~ Time) plot(Time, Counts, pch=16, ylab = "Ozone Level", cex.lab = 1.3, col = "red" ) abline(lm(Counts ~ Time), col = "blue") Time2 <- Time^2 quadratic.model <-lm(Counts ~ Time + Time2) timevalues <- seq(0, 100, 0.1) predictedcounts <- predict(quadratic.model,list(Time=timevalues, Time2=timevalues^2)) plot(Time, Counts, pch=16, xlab = "Temp(s)", ylab = "Ozone Level", cex.lab = 1.3, col = "blue", main = "Ozone Level over Temperature") lines(timevalues, predictedcounts, col = "darkgreen", lwd = 3) text(60, 130, expression("Ozone Level = 305.48577 - 9.5060(Temp) + .07708(Temp)"^2), cex = .6, adj=0) # plot(1:10, 1:10, yaxt="n", ylab=""); # mtext("Title", side=3, adj=1, line=1.2, cex=2, font=2); # axis(2, las=1) # # plot(1:10) # legend('topleft', expression(4^th*"-root transformation")) # # rsquarelm2 <- 0.855463 # # text(5, 5, bquote(R^2)) # ?mtext
% % Copyright 2007-2018 by the individuals mentioned in the source code history % % Licensed under the Apache License, Version 2.0 (the "License"); % you may not use this file except in compliance with the License. % You may obtain a copy of the License at % % http://www.apache.org/licenses/LICENSE-2.0 % % Unless required by applicable law or agreed to in writing, software % distributed under the License is distributed on an "AS IS" BASIS, % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % See the License for the specific language governing permissions and % limitations under the License. \name{mxBootstrapEval} \alias{mxBootstrapEval} \alias{omxBootstrapEvalCov} \alias{omxBootstrapEval} \alias{omxBootstrapEvalByName} \alias{mxBootstrapEvalByName} \title{Evaluate Values in a bootstrapped MxModel} \description{ This function can be used to evaluate an arbitrary R expression that includes named entities from a \link{MxModel} object, or labels from a \link{MxMatrix} object. } \usage{ mxBootstrapEval(expression, model, defvar.row = 1, ..., bq=c(.25,.75), method=c('bcbci','quantile')) mxBootstrapEvalByName(name, model, defvar.row = 1, ..., bq=c(.25,.75), method=c('bcbci','quantile')) omxBootstrapEval(expression, model, defvar.row = 1L, ...) omxBootstrapEvalCov(expression, model, defvar.row = 1L, ...) omxBootstrapEvalByName(name, model, defvar.row=1L, ...) } \arguments{ \item{expression}{An arbitrary R expression.} \item{name}{The character name of an object to evaluate.} \item{model}{The model in which to evaluate the expression.} \item{defvar.row}{The row to use for definition variables when compute=TRUE (defaults to 1). When compute=FALSE, values for definition variables are always taken from the first (i.e., first before any automated sorting is done) row of the raw data.} \item{...}{Not used. Forces remaining arguments to be specified by name.} \item{bq}{numeric. A vector of bootstrap quantiles at which to summarize the bootstrap replication.} \item{method}{character. One of \sQuote{quantile} or \sQuote{bcbci}.} } \details{ The argument \sQuote{expression} is an arbitrary R expression. Any named entities that are used within the R expression are translated into their current value from the model. Any labels from the matrices within the model are translated into their current value from the model. Finally the expression is evaluated and the result is returned. To enable debugging, the \sQuote{show} argument has been provided. The most common mistake when using this function is to include named entities in the model that are identical to R function names. For example, if a model contains a named entity named \sQuote{c}, then the following mxEval call will return an error: \code{mxEval(c(A, B, C), model)}. The \code{mxEvalByName} function is a wrapper around \code{mxEval} that takes a character instead of an R expression. \emph{nb}: \sQuote{bcbci} stands for \sQuote{bias-corrected bootstrap confidence interval} The default behavior is to use the \sQuote{bcbci} \code{method}, due to its superior theoretical properties. } \value{ \code{omxBootstrapEval} and \code{omxBootstrapEvalByName} return the raw matrix of \code{cvectorize}'d results. \code{omxBootstrapEvalCov} returns the covariance matrix of the \code{cvectorize}'d results. \code{mxBootstrapEval} and \code{mxBootstrapEvalByName} return the \code{cvectorize}'d results summarized by \code{method} at quantiles \code{bq}. } \references{ The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation. } \seealso{ \link{mxAlgebra} to create algebraic expressions inside your model and \link{mxModel} for the model object mxEval looks inside when evaluating. \link{mxBootstrap} to create bootstrap data. } \examples{ library(OpenMx) # make a unit-weighted 10-row data set of values 1 thru 10 myData = mxData(data.frame(weight=1.0, value=1:10), "raw", weight = "weight") sum(1:10) # Model sums data$value (sum(1:10)= 55), subtracts "A", squares the result, # and tries to minimize this (achieved by setting A=55) testModel = mxModel(model = "testModel1", myData, mxMatrix(name = "A", "Full", nrow = 1, ncol = 1, values = 1, free=TRUE), # nb: filteredDataRow is an auto-generated matrix of # non-missing data from the present row. # This is placed into the "rowResults" matrix (also auto-generated) mxAlgebra(name = "rowAlg", data.weight * filteredDataRow), # Algebra to turn the rowResults into a single number mxAlgebra(name = "reduceAlg", (sum(rowResults) - A)^2), mxFitFunctionRow( rowAlgebra = "rowAlg", reduceAlgebra = "reduceAlg", dimnames = "value" ) # no need for an MxExpectation object when using mxFitFunctionRow ) testModel = mxRun(testModel) # A is estimated at 55, with SE= 1 testBoot = mxBootstrap(testModel) summary(testBoot) # A is estimated at 55, with SE= 0 # Let's compute A^2 (55^2 = 3025) mxBootstrapEval(A^2, testBoot) # SE 25.0% 75.0% # [1,] 0 3025 3025 }
/man/mxBootstrapEval.Rd
no_license
bgoodri/OpenMx
R
false
false
5,041
rd
% % Copyright 2007-2018 by the individuals mentioned in the source code history % % Licensed under the Apache License, Version 2.0 (the "License"); % you may not use this file except in compliance with the License. % You may obtain a copy of the License at % % http://www.apache.org/licenses/LICENSE-2.0 % % Unless required by applicable law or agreed to in writing, software % distributed under the License is distributed on an "AS IS" BASIS, % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % See the License for the specific language governing permissions and % limitations under the License. \name{mxBootstrapEval} \alias{mxBootstrapEval} \alias{omxBootstrapEvalCov} \alias{omxBootstrapEval} \alias{omxBootstrapEvalByName} \alias{mxBootstrapEvalByName} \title{Evaluate Values in a bootstrapped MxModel} \description{ This function can be used to evaluate an arbitrary R expression that includes named entities from a \link{MxModel} object, or labels from a \link{MxMatrix} object. } \usage{ mxBootstrapEval(expression, model, defvar.row = 1, ..., bq=c(.25,.75), method=c('bcbci','quantile')) mxBootstrapEvalByName(name, model, defvar.row = 1, ..., bq=c(.25,.75), method=c('bcbci','quantile')) omxBootstrapEval(expression, model, defvar.row = 1L, ...) omxBootstrapEvalCov(expression, model, defvar.row = 1L, ...) omxBootstrapEvalByName(name, model, defvar.row=1L, ...) } \arguments{ \item{expression}{An arbitrary R expression.} \item{name}{The character name of an object to evaluate.} \item{model}{The model in which to evaluate the expression.} \item{defvar.row}{The row to use for definition variables when compute=TRUE (defaults to 1). When compute=FALSE, values for definition variables are always taken from the first (i.e., first before any automated sorting is done) row of the raw data.} \item{...}{Not used. Forces remaining arguments to be specified by name.} \item{bq}{numeric. A vector of bootstrap quantiles at which to summarize the bootstrap replication.} \item{method}{character. One of \sQuote{quantile} or \sQuote{bcbci}.} } \details{ The argument \sQuote{expression} is an arbitrary R expression. Any named entities that are used within the R expression are translated into their current value from the model. Any labels from the matrices within the model are translated into their current value from the model. Finally the expression is evaluated and the result is returned. To enable debugging, the \sQuote{show} argument has been provided. The most common mistake when using this function is to include named entities in the model that are identical to R function names. For example, if a model contains a named entity named \sQuote{c}, then the following mxEval call will return an error: \code{mxEval(c(A, B, C), model)}. The \code{mxEvalByName} function is a wrapper around \code{mxEval} that takes a character instead of an R expression. \emph{nb}: \sQuote{bcbci} stands for \sQuote{bias-corrected bootstrap confidence interval} The default behavior is to use the \sQuote{bcbci} \code{method}, due to its superior theoretical properties. } \value{ \code{omxBootstrapEval} and \code{omxBootstrapEvalByName} return the raw matrix of \code{cvectorize}'d results. \code{omxBootstrapEvalCov} returns the covariance matrix of the \code{cvectorize}'d results. \code{mxBootstrapEval} and \code{mxBootstrapEvalByName} return the \code{cvectorize}'d results summarized by \code{method} at quantiles \code{bq}. } \references{ The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation. } \seealso{ \link{mxAlgebra} to create algebraic expressions inside your model and \link{mxModel} for the model object mxEval looks inside when evaluating. \link{mxBootstrap} to create bootstrap data. } \examples{ library(OpenMx) # make a unit-weighted 10-row data set of values 1 thru 10 myData = mxData(data.frame(weight=1.0, value=1:10), "raw", weight = "weight") sum(1:10) # Model sums data$value (sum(1:10)= 55), subtracts "A", squares the result, # and tries to minimize this (achieved by setting A=55) testModel = mxModel(model = "testModel1", myData, mxMatrix(name = "A", "Full", nrow = 1, ncol = 1, values = 1, free=TRUE), # nb: filteredDataRow is an auto-generated matrix of # non-missing data from the present row. # This is placed into the "rowResults" matrix (also auto-generated) mxAlgebra(name = "rowAlg", data.weight * filteredDataRow), # Algebra to turn the rowResults into a single number mxAlgebra(name = "reduceAlg", (sum(rowResults) - A)^2), mxFitFunctionRow( rowAlgebra = "rowAlg", reduceAlgebra = "reduceAlg", dimnames = "value" ) # no need for an MxExpectation object when using mxFitFunctionRow ) testModel = mxRun(testModel) # A is estimated at 55, with SE= 1 testBoot = mxBootstrap(testModel) summary(testBoot) # A is estimated at 55, with SE= 0 # Let's compute A^2 (55^2 = 3025) mxBootstrapEval(A^2, testBoot) # SE 25.0% 75.0% # [1,] 0 3025 3025 }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods-optimizer-player-setting.R \name{update_fpts,optimizer-method} \alias{update_fpts,optimizer-method} \alias{update_fpts} \title{Method for updating fantasy points in an object} \usage{ \S4method{update_fpts}{optimizer}(object, fpts_data) } \arguments{ \item{object}{An object of class Optimizer} \item{fpts_data}{a data.frame containing players and points. See details.} } \value{ Updated optimizer object } \description{ Method for updating fantasy points in an object } \details{ The data.frame passed in fpts_data must contain two columns - \code{id} and \code{fpts}. }
/man/update_fpts-optimizer-method.Rd
permissive
jkope892/dfsOptimizer
R
false
true
659
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods-optimizer-player-setting.R \name{update_fpts,optimizer-method} \alias{update_fpts,optimizer-method} \alias{update_fpts} \title{Method for updating fantasy points in an object} \usage{ \S4method{update_fpts}{optimizer}(object, fpts_data) } \arguments{ \item{object}{An object of class Optimizer} \item{fpts_data}{a data.frame containing players and points. See details.} } \value{ Updated optimizer object } \description{ Method for updating fantasy points in an object } \details{ The data.frame passed in fpts_data must contain two columns - \code{id} and \code{fpts}. }
library(tidyverse) library(seqinr) df_IEDB_binders <- read_csv(file = "IEDB_HLAI_epitopes.csv", skip = 1) df_IEDB_11mers <- df_IEDB_binders %>% filter(nchar(Description) == 11) write.fasta(as.list(df_IEDB_11mers$Description), names = 1:length(df_IEDB_11mers), file.out = "IEDB_11mers.fasta") IEDB_11mer_consensus <- "RLLDLSGLLLL"
/TCR_pMHC_binding/get_consensus.R
no_license
andy90/mutations_immunogenicity
R
false
false
339
r
library(tidyverse) library(seqinr) df_IEDB_binders <- read_csv(file = "IEDB_HLAI_epitopes.csv", skip = 1) df_IEDB_11mers <- df_IEDB_binders %>% filter(nchar(Description) == 11) write.fasta(as.list(df_IEDB_11mers$Description), names = 1:length(df_IEDB_11mers), file.out = "IEDB_11mers.fasta") IEDB_11mer_consensus <- "RLLDLSGLLLL"
############################################################################## ### Table A.8: Regression of prosocial behavior on destruction controlling ### ### for confounders (Manski bound imputation for migrants) ### ############################################################################## rm(list=ls()) # load libraries library(readstata13) library(stargazer) # read data data <- read.dta13("./kyrgyzstan.dta") ### Some data cleaning data$social_cap_retro <- data$leadership data$movedhouse <- ifelse(data$movedhouse=="Always lived in this house",0, ifelse(data$movedhouse=="Moved house ",1,99)) data$migrateoutyear_after2010 <- ifelse(data$migrateoutyear>2010,1,0) # recode variables data$affected <- as.integer(data$affected) data$affected <- data$affected - 1 data$pd_in <- as.integer(data$pd_in) - 1 data$pd_out <- as.integer(data$pd_out) - 1 # rename variable data$social_cap_retro <- data$leadership # subset data set according to ethnic groups data_uzbek <- data[which(data$ethnicity=="Uzbek"),] # scale variables data_uzbek$pd_in_scale <- scale(data_uzbek$pd_in) data_uzbek$dg_in_scale <- scale(data_uzbek$dg_in) data_uzbek$pd_out_scale <- scale(data_uzbek$pd_out) data_uzbek$dg_out_scale <- scale(data_uzbek$dg_out) # When did you live abroad last time? data_uzbek_affected <- data_uzbek[which(data_uzbek$affected==1),] ############## ### Manski ### ############## data_uzbek$pd_in_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 1, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$pd_in))) data_uzbek$dg_in_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 100, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$dg_in))) data_uzbek$pd_out_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 1, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$pd_out))) data_uzbek$dg_out_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 100, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$dg_out))) data_uzbek$cooperation_index_manski <- rowSums(cbind(data_uzbek$pd_in_manski, data_uzbek$dg_in_manski, data_uzbek$pd_out_manski, data_uzbek$dg_out_manski), na.rm=T)/4 model1 <- lm(pd_in_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model2 <- lm(dg_in_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model3 <- lm(pd_out_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model4 <- lm(dg_out_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model5 <- lm(cooperation_index_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) stargazer(model1, model2, model3, model4, model5, covariate.labels = c("Destruction", "Wealth index", "State capacity index", "Community policing index"), dep.var.labels = c("Cooperation in Prisoner's Dilemma", "Investment in Dictator Game", "Cooperation in Prisoner's Dilemma", "Investment in Dictator Game" , "Cooperation-Index"), star.char = c("*", "**", "***"), star.cutoffs = c(0.05, 0.01, 0.001))
/Original Paper and Code/Original Code/TableA8.R
no_license
CianStryker/Prosocial_Behavior
R
false
false
3,286
r
############################################################################## ### Table A.8: Regression of prosocial behavior on destruction controlling ### ### for confounders (Manski bound imputation for migrants) ### ############################################################################## rm(list=ls()) # load libraries library(readstata13) library(stargazer) # read data data <- read.dta13("./kyrgyzstan.dta") ### Some data cleaning data$social_cap_retro <- data$leadership data$movedhouse <- ifelse(data$movedhouse=="Always lived in this house",0, ifelse(data$movedhouse=="Moved house ",1,99)) data$migrateoutyear_after2010 <- ifelse(data$migrateoutyear>2010,1,0) # recode variables data$affected <- as.integer(data$affected) data$affected <- data$affected - 1 data$pd_in <- as.integer(data$pd_in) - 1 data$pd_out <- as.integer(data$pd_out) - 1 # rename variable data$social_cap_retro <- data$leadership # subset data set according to ethnic groups data_uzbek <- data[which(data$ethnicity=="Uzbek"),] # scale variables data_uzbek$pd_in_scale <- scale(data_uzbek$pd_in) data_uzbek$dg_in_scale <- scale(data_uzbek$dg_in) data_uzbek$pd_out_scale <- scale(data_uzbek$pd_out) data_uzbek$dg_out_scale <- scale(data_uzbek$dg_out) # When did you live abroad last time? data_uzbek_affected <- data_uzbek[which(data_uzbek$affected==1),] ############## ### Manski ### ############## data_uzbek$pd_in_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 1, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$pd_in))) data_uzbek$dg_in_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 100, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$dg_in))) data_uzbek$pd_out_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 1, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$pd_out))) data_uzbek$dg_out_manski <- scale(ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==1, 100, ifelse(data_uzbek$movedhouse==1 & data_uzbek$affected==0, 0, data_uzbek$dg_out))) data_uzbek$cooperation_index_manski <- rowSums(cbind(data_uzbek$pd_in_manski, data_uzbek$dg_in_manski, data_uzbek$pd_out_manski, data_uzbek$dg_out_manski), na.rm=T)/4 model1 <- lm(pd_in_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model2 <- lm(dg_in_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model3 <- lm(pd_out_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model4 <- lm(dg_out_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) model5 <- lm(cooperation_index_manski ~ affected + economy_index + state_index + social_cap_retro, data=data_uzbek) stargazer(model1, model2, model3, model4, model5, covariate.labels = c("Destruction", "Wealth index", "State capacity index", "Community policing index"), dep.var.labels = c("Cooperation in Prisoner's Dilemma", "Investment in Dictator Game", "Cooperation in Prisoner's Dilemma", "Investment in Dictator Game" , "Cooperation-Index"), star.char = c("*", "**", "***"), star.cutoffs = c(0.05, 0.01, 0.001))
####### Functions ##### Function 1: Make a summary of the number of genes of each scRNA-seq cell type summary_background<-function(single_cell, list_clouds, output) { #selected<-single_cell[single_cell$cluster %in% list_clouds,] #clouds<-as.vector(selected$cluster) #clouds<-sort(unique(clouds)) #list_clouds<-sort(as.character(unique(selected_smedwi$cluster))) #single_cell<-selected_smedwi clouds<-list_clouds total<-single_cell[single_cell$cluster %in% list_clouds,] clusterx_single<-total #Ahora creamos la matrix donde vamos a ir guardando todo row_total<-sort(unique(as.character(total$ID))) col_total<-clouds total_x<-matrix(data=1, ncol = length(col_total), nrow = length(row_total)) colnames(total_x)<-col_total rownames(total_x)<-row_total #Teniendo ahora la referencia, podemos empezar a llenar la matrix for (i in 1:length(row_total)) { x<-total[total$ID%in% (row_total[i]),] sub_x<-sort(unique(as.character(x$cluster))) values_total<-match(col_total,sub_x) values_total [!is.na(values_total)]<-1 values_total [is.na(values_total)]<-0 total_x [i,]<-values_total } summary<-t(as.matrix(colSums(total_x))) rownames(summary)<-"#genes in each single cell cluster" matriz<-matrix(data = NA, ncol = length(list_clouds), nrow = 1) colnames(matriz)<-list_clouds rownames(matriz)<-c("Number of genes in each single cell cluster") for(i in 1:length(list_clouds)) { ##Quitar duplicados de las tablas clusterx_single1<- clusterx_single[ clusterx_single$cluster %in% list_clouds[i],] clusterx_single1<-clusterx_single1[!duplicated(clusterx_single1$ID),] num<-length(which(clusterx_single1$cluster==list_clouds[i])) matriz[1,i]<-num } if(output==1){ return(matriz) } if(output==2) { return(total_x) } } ##### Function 2 Enrichment enrichment<- function (background, summary, summary_A, cells_FULL, clusterA) { mhyper_A<-matrix(data=NA, ncol = dim(summary)[2], nrow=1) colnames(mhyper_A)<-cells_FULL rownames(mhyper_A)<-c("A") #Here you can put the name of the cluster p_values<-vector() universe<-dim(background)[1] #background for (i in 1:dim(summary)[2]) { white<-summary[i] black<-universe-white white_inSample<-summary_A[i] sample_size<-dim(clusterA)[1] math<-phyper(white_inSample-1, white, black, sample_size, lower.tail = FALSE) p_values<-append(p_values, math) } mhyper_A[1,]<-p_values return(mhyper_A) } ######## Function 3: Overlap overlap<-function (binary_m, summary_cluster, summary_general, background, clusterx, pvalue, enrichment_logical_adj, list_clouds, output) { A_list<-vector() binary_m_df<-as.data.frame(binary_m) for (i in 1:dim(binary_m)[2]){ x<-subset(binary_m_df, binary_m_df[,i]==1) x_list<-rownames(x) A_list<-append(A_list, x_list) } m_A<-matrix(data = NA, ncol = 2, nrow = length(A_list)) colnames(m_A)<-c("ID", "cluster") m_A[,1]<-A_list m_A[,2]<-rep(c(colnames(summary_cluster)), c(summary_cluster[1,])) ####### CLUSTER A df<-as.data.frame(m_A) populations<-as.character(unique(df$cluster)) genes_populations<-matrix(data = NA, ncol = length(populations), nrow = length(populations)) colnames(genes_populations)<-populations rownames(genes_populations)<-populations vector<-vector() for (i in 1:length(populations)) { for (j in 1:length(populations)) { selected1<-df[df$cluster %in% populations[i],] n1<-unique(selected1$ID) set1<-n1 selected2<-df[df$cluster %in% populations[j],] n2<-unique(selected2$ID) set2<-n2 overlap <- sum(set1 %in% set2)/length(set1) #what fraction of set1 is in set2 genes_populations[j,i] <- overlap } } A_genes_populations_percentage<- genes_populations * 100 df<-as.data.frame(m_A) populations<-as.character(unique(df$cluster)) genes_populations<-matrix(data = NA, ncol = length(populations), nrow = length(populations)) colnames(genes_populations)<-populations rownames(genes_populations)<-populations vector<-vector() for (i in 1:length(populations)) { for (j in 1:length(populations)) { selected1<-df[df$cluster %in% populations[i],] n1<-unique(selected1$ID) set1<-n1 selected2<-df[df$cluster %in% populations[j],] n2<-unique(selected2$ID) set2<-n2 overlap <- sum(set1 %in% set2) #what fraction of set1 is in set2 genes_populations[j,i] <- overlap } } A_genes_populations_interseccion<- genes_populations ######## Function 4: Enrichment of the overlap of genes across scRNA-seq cell types summary_clusterhypo <-as.data.frame(summary_general) keeps <- colnames(A_genes_populations_percentage) summary_clusterhypo<-summary_clusterhypo[keeps] summary_clusterhypo<-as.matrix(summary_clusterhypo) mhyper_A<-matrix(data=NA, ncol = dim(summary_clusterhypo)[2], nrow=dim(summary_clusterhypo)[2]) colnames(mhyper_A)<-colnames(A_genes_populations_percentage) rownames(mhyper_A)<-colnames(A_genes_populations_percentage) universe<-dim(background)[1] for (i in 1:dim(summary_clusterhypo)[2]) { p_values<-vector() for (j in 1:dim(summary_clusterhypo)[2]) { white<-summary_clusterhypo[j] black<-universe-white white_inSample<-A_genes_populations_interseccion[j,i] sample_size<-dim(clusterx)[1] math<-phyper(white_inSample-1, white, black, sample_size, lower.tail = FALSE) p_values<-append(p_values, math) } mhyper_A[,i]<-p_values } #### Heatmap ###### all_names<-colnames(summary_general) your_name<-colnames(mhyper_A) diff<-setdiff(all_names, your_name) the_name<-append(your_name, diff) enrichment_logical<-as.matrix((mhyper_A)) a<-dim(enrichment_logical)[1] k<-vector() enrichment_logical2<-matrix(data=NA, ncol = length(the_name), nrow = length(the_name)) colnames(enrichment_logical2)<-the_name rownames(enrichment_logical2)<-the_name for (i in 1:a) { k<-enrichment_logical[,i] k<-append(k, rep(1,length(diff))) k<-p.adjust(k ) enrichment_logical2[,i]<-k } if(a!=length(the_name)) { for(j in (i+1):dim(enrichment_logical2)[1]) { k<-vector() k<-rep(1,dim(enrichment_logical2)[1]) enrichment_logical2[,j]<-k } } hypo_enrichment_A<-enrichment_logical2 ####### Cluster A ###### enrichment<-as.data.frame(hypo_enrichment_A) enrichment<-enrichment[order(rownames(enrichment)),] enrichment<-enrichment[ ,order(colnames(enrichment))] clusterA_summary<-matrix(data = NA, ncol = 4, nrow = length(list_clouds)) colnames(clusterA_summary)<-c("Cluster", "Population", "Enrichment(adj pvalue)","Possible identities") clusterA_summary[,1]<-rep("Cluster", length(names)) clusterA_summary[,2]<-list_clouds clusterA_summary[,3]<-enrichment_logical_adj[1,] for(i in 1:length(list_clouds)) { n<-subset(enrichment, enrichment[,i]<=pvalue) n<-rownames(n) if (length(n)!=0){ if(length(n)==1){ if (n==as.character(clusterA_summary[i,2])) { clusterA_summary[i,4]<-paste(n, "is/are enriched") } }else { clusterA_summary[i,4]<-paste(n, collapse = ",") } } else { clusterA_summary[i,4]<-paste("No other possible enrichments") } } if(output==1) { return(clusterA_summary) } if(output==2) { return(A_genes_populations_interseccion) } if(output==3) { return(mhyper_A) } if(output==4) { return(hypo_enrichment_A) } if(output==5) { return(A_genes_populations_percentage) } } #end of function
/191125_functions_SCREAM.R
no_license
AnaliMigueles/SCREAM
R
false
false
7,697
r
####### Functions ##### Function 1: Make a summary of the number of genes of each scRNA-seq cell type summary_background<-function(single_cell, list_clouds, output) { #selected<-single_cell[single_cell$cluster %in% list_clouds,] #clouds<-as.vector(selected$cluster) #clouds<-sort(unique(clouds)) #list_clouds<-sort(as.character(unique(selected_smedwi$cluster))) #single_cell<-selected_smedwi clouds<-list_clouds total<-single_cell[single_cell$cluster %in% list_clouds,] clusterx_single<-total #Ahora creamos la matrix donde vamos a ir guardando todo row_total<-sort(unique(as.character(total$ID))) col_total<-clouds total_x<-matrix(data=1, ncol = length(col_total), nrow = length(row_total)) colnames(total_x)<-col_total rownames(total_x)<-row_total #Teniendo ahora la referencia, podemos empezar a llenar la matrix for (i in 1:length(row_total)) { x<-total[total$ID%in% (row_total[i]),] sub_x<-sort(unique(as.character(x$cluster))) values_total<-match(col_total,sub_x) values_total [!is.na(values_total)]<-1 values_total [is.na(values_total)]<-0 total_x [i,]<-values_total } summary<-t(as.matrix(colSums(total_x))) rownames(summary)<-"#genes in each single cell cluster" matriz<-matrix(data = NA, ncol = length(list_clouds), nrow = 1) colnames(matriz)<-list_clouds rownames(matriz)<-c("Number of genes in each single cell cluster") for(i in 1:length(list_clouds)) { ##Quitar duplicados de las tablas clusterx_single1<- clusterx_single[ clusterx_single$cluster %in% list_clouds[i],] clusterx_single1<-clusterx_single1[!duplicated(clusterx_single1$ID),] num<-length(which(clusterx_single1$cluster==list_clouds[i])) matriz[1,i]<-num } if(output==1){ return(matriz) } if(output==2) { return(total_x) } } ##### Function 2 Enrichment enrichment<- function (background, summary, summary_A, cells_FULL, clusterA) { mhyper_A<-matrix(data=NA, ncol = dim(summary)[2], nrow=1) colnames(mhyper_A)<-cells_FULL rownames(mhyper_A)<-c("A") #Here you can put the name of the cluster p_values<-vector() universe<-dim(background)[1] #background for (i in 1:dim(summary)[2]) { white<-summary[i] black<-universe-white white_inSample<-summary_A[i] sample_size<-dim(clusterA)[1] math<-phyper(white_inSample-1, white, black, sample_size, lower.tail = FALSE) p_values<-append(p_values, math) } mhyper_A[1,]<-p_values return(mhyper_A) } ######## Function 3: Overlap overlap<-function (binary_m, summary_cluster, summary_general, background, clusterx, pvalue, enrichment_logical_adj, list_clouds, output) { A_list<-vector() binary_m_df<-as.data.frame(binary_m) for (i in 1:dim(binary_m)[2]){ x<-subset(binary_m_df, binary_m_df[,i]==1) x_list<-rownames(x) A_list<-append(A_list, x_list) } m_A<-matrix(data = NA, ncol = 2, nrow = length(A_list)) colnames(m_A)<-c("ID", "cluster") m_A[,1]<-A_list m_A[,2]<-rep(c(colnames(summary_cluster)), c(summary_cluster[1,])) ####### CLUSTER A df<-as.data.frame(m_A) populations<-as.character(unique(df$cluster)) genes_populations<-matrix(data = NA, ncol = length(populations), nrow = length(populations)) colnames(genes_populations)<-populations rownames(genes_populations)<-populations vector<-vector() for (i in 1:length(populations)) { for (j in 1:length(populations)) { selected1<-df[df$cluster %in% populations[i],] n1<-unique(selected1$ID) set1<-n1 selected2<-df[df$cluster %in% populations[j],] n2<-unique(selected2$ID) set2<-n2 overlap <- sum(set1 %in% set2)/length(set1) #what fraction of set1 is in set2 genes_populations[j,i] <- overlap } } A_genes_populations_percentage<- genes_populations * 100 df<-as.data.frame(m_A) populations<-as.character(unique(df$cluster)) genes_populations<-matrix(data = NA, ncol = length(populations), nrow = length(populations)) colnames(genes_populations)<-populations rownames(genes_populations)<-populations vector<-vector() for (i in 1:length(populations)) { for (j in 1:length(populations)) { selected1<-df[df$cluster %in% populations[i],] n1<-unique(selected1$ID) set1<-n1 selected2<-df[df$cluster %in% populations[j],] n2<-unique(selected2$ID) set2<-n2 overlap <- sum(set1 %in% set2) #what fraction of set1 is in set2 genes_populations[j,i] <- overlap } } A_genes_populations_interseccion<- genes_populations ######## Function 4: Enrichment of the overlap of genes across scRNA-seq cell types summary_clusterhypo <-as.data.frame(summary_general) keeps <- colnames(A_genes_populations_percentage) summary_clusterhypo<-summary_clusterhypo[keeps] summary_clusterhypo<-as.matrix(summary_clusterhypo) mhyper_A<-matrix(data=NA, ncol = dim(summary_clusterhypo)[2], nrow=dim(summary_clusterhypo)[2]) colnames(mhyper_A)<-colnames(A_genes_populations_percentage) rownames(mhyper_A)<-colnames(A_genes_populations_percentage) universe<-dim(background)[1] for (i in 1:dim(summary_clusterhypo)[2]) { p_values<-vector() for (j in 1:dim(summary_clusterhypo)[2]) { white<-summary_clusterhypo[j] black<-universe-white white_inSample<-A_genes_populations_interseccion[j,i] sample_size<-dim(clusterx)[1] math<-phyper(white_inSample-1, white, black, sample_size, lower.tail = FALSE) p_values<-append(p_values, math) } mhyper_A[,i]<-p_values } #### Heatmap ###### all_names<-colnames(summary_general) your_name<-colnames(mhyper_A) diff<-setdiff(all_names, your_name) the_name<-append(your_name, diff) enrichment_logical<-as.matrix((mhyper_A)) a<-dim(enrichment_logical)[1] k<-vector() enrichment_logical2<-matrix(data=NA, ncol = length(the_name), nrow = length(the_name)) colnames(enrichment_logical2)<-the_name rownames(enrichment_logical2)<-the_name for (i in 1:a) { k<-enrichment_logical[,i] k<-append(k, rep(1,length(diff))) k<-p.adjust(k ) enrichment_logical2[,i]<-k } if(a!=length(the_name)) { for(j in (i+1):dim(enrichment_logical2)[1]) { k<-vector() k<-rep(1,dim(enrichment_logical2)[1]) enrichment_logical2[,j]<-k } } hypo_enrichment_A<-enrichment_logical2 ####### Cluster A ###### enrichment<-as.data.frame(hypo_enrichment_A) enrichment<-enrichment[order(rownames(enrichment)),] enrichment<-enrichment[ ,order(colnames(enrichment))] clusterA_summary<-matrix(data = NA, ncol = 4, nrow = length(list_clouds)) colnames(clusterA_summary)<-c("Cluster", "Population", "Enrichment(adj pvalue)","Possible identities") clusterA_summary[,1]<-rep("Cluster", length(names)) clusterA_summary[,2]<-list_clouds clusterA_summary[,3]<-enrichment_logical_adj[1,] for(i in 1:length(list_clouds)) { n<-subset(enrichment, enrichment[,i]<=pvalue) n<-rownames(n) if (length(n)!=0){ if(length(n)==1){ if (n==as.character(clusterA_summary[i,2])) { clusterA_summary[i,4]<-paste(n, "is/are enriched") } }else { clusterA_summary[i,4]<-paste(n, collapse = ",") } } else { clusterA_summary[i,4]<-paste("No other possible enrichments") } } if(output==1) { return(clusterA_summary) } if(output==2) { return(A_genes_populations_interseccion) } if(output==3) { return(mhyper_A) } if(output==4) { return(hypo_enrichment_A) } if(output==5) { return(A_genes_populations_percentage) } } #end of function
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predict.model_list} \alias{predict.model_list} \title{Make predictions using the best-performing model from tuning} \usage{ \method{predict}{model_list}(object, newdata, prepdata, ...) } \arguments{ \item{object}{model_list object, as from `tune_models`} \item{newdata}{data on which to make predictions. If missing, predictions will be made on the training data. Should have the same structure as the input to `prep_data`,`tune_models` or `train_models`. `predict` will try to figure out if the data need to be sent through `prep_data` before making predictions; this can be overriden by setting `prepdata = FALSE``, but this should rarely be needed.} \item{prepdata}{Logical, rarely needs to be set by the user. By default, if `newdata` hasn't been prepped, it will be prepped by `prep_data` before predictions are made. Set this to TRUE to force already-prepped data through `prep_data` again, or set to FALSE to prevent `newdata` from being sent through `prep_data`.} \item{...}{Unused.} } \value{ A tibble data frame: newdata with an additional column for the predictions in "predicted_TARGET" where TARGET is the name of the variable being predicted. If classification, the new column will contain predicted probabilities. The tibble will have child class "hcai_predicted_df" and attribute "model_info" that contains information about the model used to make predictions. } \description{ Make predictions using the best-performing model from tuning } \details{ The model and hyperparameter values with the best out-of-fold performance in model training according to the selected metric is used to make predictions. Prepping data inside `predict` has the advantage of returning your predictions with the newdata in its original format. } \examples{ # Tune models using only the first 50 rows to keep computation fast models <- machine_learn(pima_diabetes[1:50, ], diabetes) # Make prediction on the next 20 rows. This uses the best-performing model from # tuning cross validation, and it also prepares the new data in the same way as # the training data was prepared. predictions <- predict(models, newdata = pima_diabetes[51:70, ]) predictions ggplot(predictions, aes(x = predicted_diabetes, fill = diabetes)) + geom_density(alpha = .5) } \seealso{ \code{\link{tune_models}}, \code{\link{prep_data}} }
/man/predict.model_list.Rd
permissive
magesa/healthcareai-r
R
false
true
2,422
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.R \name{predict.model_list} \alias{predict.model_list} \title{Make predictions using the best-performing model from tuning} \usage{ \method{predict}{model_list}(object, newdata, prepdata, ...) } \arguments{ \item{object}{model_list object, as from `tune_models`} \item{newdata}{data on which to make predictions. If missing, predictions will be made on the training data. Should have the same structure as the input to `prep_data`,`tune_models` or `train_models`. `predict` will try to figure out if the data need to be sent through `prep_data` before making predictions; this can be overriden by setting `prepdata = FALSE``, but this should rarely be needed.} \item{prepdata}{Logical, rarely needs to be set by the user. By default, if `newdata` hasn't been prepped, it will be prepped by `prep_data` before predictions are made. Set this to TRUE to force already-prepped data through `prep_data` again, or set to FALSE to prevent `newdata` from being sent through `prep_data`.} \item{...}{Unused.} } \value{ A tibble data frame: newdata with an additional column for the predictions in "predicted_TARGET" where TARGET is the name of the variable being predicted. If classification, the new column will contain predicted probabilities. The tibble will have child class "hcai_predicted_df" and attribute "model_info" that contains information about the model used to make predictions. } \description{ Make predictions using the best-performing model from tuning } \details{ The model and hyperparameter values with the best out-of-fold performance in model training according to the selected metric is used to make predictions. Prepping data inside `predict` has the advantage of returning your predictions with the newdata in its original format. } \examples{ # Tune models using only the first 50 rows to keep computation fast models <- machine_learn(pima_diabetes[1:50, ], diabetes) # Make prediction on the next 20 rows. This uses the best-performing model from # tuning cross validation, and it also prepares the new data in the same way as # the training data was prepared. predictions <- predict(models, newdata = pima_diabetes[51:70, ]) predictions ggplot(predictions, aes(x = predicted_diabetes, fill = diabetes)) + geom_density(alpha = .5) } \seealso{ \code{\link{tune_models}}, \code{\link{prep_data}} }
library(sf) library(tidyverse) setwd("~/Documents/R Studio/macrocystis/julio/hornillo") kelp_pal <- readRDS("~/Documents/R Studio/palette/k_pal.rds") coquimbo <- readRDS("~/Documents/R Studio/macrocystis/mapa/coquimbo.rds") kelp_hornillo_data <- readRDS("~/Documents/R Studio/macrocystis/data/KD/hornillo") kelp_hornillo <- kelp_hornillo_data %>% filter(KD >= 11 ) summary(kelp_hornillo) # Separacion por temporada ---- sum15 <- kelp_hornillo %>% filter(Season == "Summer15") aut16 <- kelp_hornillo %>% filter(Season == "Autumn16") win16 <- kelp_hornillo %>% filter(Season == "Winter16") spg16 <- kelp_hornillo %>% filter(Season == "Spring16") sum16 <- kelp_hornillo %>% filter(Season == "Summer16") aut17 <- kelp_hornillo %>% filter(Season == "Autumn17") win17 <- kelp_hornillo %>% filter(Season == "Winter17") spg17 <- kelp_hornillo %>% filter(Season == "Spring17") sum17 <- kelp_hornillo %>% filter(Season == "Summer17") aut18 <- kelp_hornillo %>% filter(Season == "Autumn18") win18 <- kelp_hornillo %>% filter(Season == "Winter18") spg18 <- kelp_hornillo %>% filter(Season == "Spring18") sum18 <- kelp_hornillo %>% filter(Season == "Summer18") aut19 <- kelp_hornillo %>% filter(Season == "Autumn19") win19 <- kelp_hornillo %>% filter(Season == "Winter19") spg19 <- kelp_hornillo %>% filter(Season == "Spring19") sum19 <- kelp_hornillo %>% filter(Season == "Summer19") aut20 <- kelp_hornillo %>% filter(Season == "Autumn20") win20 <- kelp_hornillo %>% filter(Season == "Winter20") spg20 <- kelp_hornillo %>% filter(Season == "Spring20") # Plot para cada temporada ---- ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum15, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2015") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer15.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut16, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2016") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn16.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win16, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2016") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter16.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg16, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2016") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring16.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum17, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2017") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer17.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut20, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2020") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn20.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win20, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2020") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter20.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg20, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2020") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring20.png")
/plot_hornillo.R
no_license
ddrojass/kelp_distribution
R
false
false
12,420
r
library(sf) library(tidyverse) setwd("~/Documents/R Studio/macrocystis/julio/hornillo") kelp_pal <- readRDS("~/Documents/R Studio/palette/k_pal.rds") coquimbo <- readRDS("~/Documents/R Studio/macrocystis/mapa/coquimbo.rds") kelp_hornillo_data <- readRDS("~/Documents/R Studio/macrocystis/data/KD/hornillo") kelp_hornillo <- kelp_hornillo_data %>% filter(KD >= 11 ) summary(kelp_hornillo) # Separacion por temporada ---- sum15 <- kelp_hornillo %>% filter(Season == "Summer15") aut16 <- kelp_hornillo %>% filter(Season == "Autumn16") win16 <- kelp_hornillo %>% filter(Season == "Winter16") spg16 <- kelp_hornillo %>% filter(Season == "Spring16") sum16 <- kelp_hornillo %>% filter(Season == "Summer16") aut17 <- kelp_hornillo %>% filter(Season == "Autumn17") win17 <- kelp_hornillo %>% filter(Season == "Winter17") spg17 <- kelp_hornillo %>% filter(Season == "Spring17") sum17 <- kelp_hornillo %>% filter(Season == "Summer17") aut18 <- kelp_hornillo %>% filter(Season == "Autumn18") win18 <- kelp_hornillo %>% filter(Season == "Winter18") spg18 <- kelp_hornillo %>% filter(Season == "Spring18") sum18 <- kelp_hornillo %>% filter(Season == "Summer18") aut19 <- kelp_hornillo %>% filter(Season == "Autumn19") win19 <- kelp_hornillo %>% filter(Season == "Winter19") spg19 <- kelp_hornillo %>% filter(Season == "Spring19") sum19 <- kelp_hornillo %>% filter(Season == "Summer19") aut20 <- kelp_hornillo %>% filter(Season == "Autumn20") win20 <- kelp_hornillo %>% filter(Season == "Winter20") spg20 <- kelp_hornillo %>% filter(Season == "Spring20") # Plot para cada temporada ---- ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum15, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2015") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer15.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut16, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2016") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn16.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win16, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2016") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter16.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg16, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2016") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring16.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum17, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2017") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer17.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum18, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2018") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer18.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = sum19, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Verano 2019") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/summer19.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = aut20, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Otoño 2020") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/autumn20.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = win20, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 4) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Invierno 2020") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/winter20.png") ggplot() + theme_bw(base_size = 13) + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5)) + geom_sf(data = coquimbo, stat = "sf") + coord_sf(xlim = c(-71.665, -71.61), ylim = c(-31.33, -31.235), expand = FALSE) + scale_x_continuous(breaks = c(-71.66, -71.64, -71.62)) + stat_summary_2d(data = spg20, aes(x = Lon, y = Lat, z = KD), fun=sum, bins = 30) + scale_fill_gradientn(colours = kelp_pal, name = "KD total", n.breaks = 5) + xlab("Longitud") + ylab("Latitud") + labs(title = "Mantos de Hornillo", subtitle = "Temporada Primavera 2020") + ggsave("~/Documents/R Studio/macrocystis/julio/hornillo/plots/spring20.png")
library(party) library(ROCR) # Load the kyphosis data set. require(rpart) # Split randomly x <- kyphosis[sample(1:nrow(kyphosis), nrow(kyphosis), replace = F),] x.train <- kyphosis[1:floor(nrow(x)*.75), ] x.evaluate <- kyphosis[(floor(nrow(x)*.75)+1):nrow(x), ] # Create a model using "random forest and bagging ensemble algorithms # utilizing conditional inference trees." require(party) x.model <- cforest(Kyphosis ~ Age + Number + Start, data=x.train, control = cforest_unbiased(mtry = 3)) # Alternatively, use "recursive partitioning [...] in a conditional # inference framework." # x.model <- ctree(Kyphosis ~ Age + Number + Start, data=x.train) # ctree plots nicely (but cforest doesn"t plot) # plot (x.model) # Use the model to predict the evaluation. x.evaluate$prediction <- predict(x.model, newdata=x.evaluate) # Calculate the overall accuracy. x.evaluate$correct <- x.evaluate$prediction == x.evaluate$Kyphosis print(paste("% of predicted classifications correct", mean(x.evaluate$correct))) # Extract the class probabilities. x.evaluate$probabilities <- 1- unlist(treeresponse(x.model, newdata=x.evaluate), use.names=F)[seq(1,nrow(x.evaluate)*2,2)] # Plot the performance of the model applied to the evaluation set as # an ROC curve. require(ROCR) pred <- prediction(x.evaluate$probabilities, x.evaluate$Kyphosis) perf <- performance(pred,"tpr","fpr") plot(perf, main="ROC curve", colorize=T) # And then a lift chart perf <- performance(pred,"lift","rpp") plot(perf, main="lift curve", colorize=T)
/cran/party/kyphosis.r
no_license
bikong2/data-science-notebook
R
false
false
1,590
r
library(party) library(ROCR) # Load the kyphosis data set. require(rpart) # Split randomly x <- kyphosis[sample(1:nrow(kyphosis), nrow(kyphosis), replace = F),] x.train <- kyphosis[1:floor(nrow(x)*.75), ] x.evaluate <- kyphosis[(floor(nrow(x)*.75)+1):nrow(x), ] # Create a model using "random forest and bagging ensemble algorithms # utilizing conditional inference trees." require(party) x.model <- cforest(Kyphosis ~ Age + Number + Start, data=x.train, control = cforest_unbiased(mtry = 3)) # Alternatively, use "recursive partitioning [...] in a conditional # inference framework." # x.model <- ctree(Kyphosis ~ Age + Number + Start, data=x.train) # ctree plots nicely (but cforest doesn"t plot) # plot (x.model) # Use the model to predict the evaluation. x.evaluate$prediction <- predict(x.model, newdata=x.evaluate) # Calculate the overall accuracy. x.evaluate$correct <- x.evaluate$prediction == x.evaluate$Kyphosis print(paste("% of predicted classifications correct", mean(x.evaluate$correct))) # Extract the class probabilities. x.evaluate$probabilities <- 1- unlist(treeresponse(x.model, newdata=x.evaluate), use.names=F)[seq(1,nrow(x.evaluate)*2,2)] # Plot the performance of the model applied to the evaluation set as # an ROC curve. require(ROCR) pred <- prediction(x.evaluate$probabilities, x.evaluate$Kyphosis) perf <- performance(pred,"tpr","fpr") plot(perf, main="ROC curve", colorize=T) # And then a lift chart perf <- performance(pred,"lift","rpp") plot(perf, main="lift curve", colorize=T)
baltimore <- subset(NEI, NEI$fips == "24510") baltimoreType <- aggregate(Emissions ~ year + type, baltimore, sum) ggplot(baltimoreType, aes(year, Emissions, col = type)) + geom_line() + geom_point() + ggtitle(expression("Total Baltimore " ~ PM[2.5] ~ "Emissions by Type and Year")) + ylab(expression("Total Baltimore " ~ PM[2.5] ~ "Emissions")) + xlab("Year") + scale_colour_discrete(name = "Type of sources") + theme(legend.title = element_text(face = "bold"))
/plot3.R
no_license
Karishma-Yadav/Exploratory-data-analysis-project-2
R
false
false
492
r
baltimore <- subset(NEI, NEI$fips == "24510") baltimoreType <- aggregate(Emissions ~ year + type, baltimore, sum) ggplot(baltimoreType, aes(year, Emissions, col = type)) + geom_line() + geom_point() + ggtitle(expression("Total Baltimore " ~ PM[2.5] ~ "Emissions by Type and Year")) + ylab(expression("Total Baltimore " ~ PM[2.5] ~ "Emissions")) + xlab("Year") + scale_colour_discrete(name = "Type of sources") + theme(legend.title = element_text(face = "bold"))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generate_kernel.R \name{kernel_matern} \alias{kernel_matern} \title{Generating A Single Matrix-wise Function Using Matern} \usage{ kernel_matern(l, p, sigma) } \arguments{ \item{l}{(numeric) A numeric number indicating the hyperparameter (flexibility) of a specific kernel.} \item{p}{(integer) For polynomial, p is the power; for matern, v = p + 1 / 2; for rational, alpha = p.} \item{sigma}{(numeric) The covariance coefficient for neural network kernel.} } \value{ \item{matrix_wise}{(function) A function calculating the relevance of two matrices.} } \description{ Generate matrix-wise functions for two matrices using matern kernel. } \details{ \bold{Matern Kernels} \deqn{k_{Matern}(r)=\frac{2^{1-\nu}}{\Gamma(\nu)}\Big(\frac{\sqrt{2\nu r}}{l}\Big)^\nu K_\nu \Big(\frac{\sqrt{2\nu r}}{l}\Big)} } \references{ The MIT Press. Gaussian Processes for Machine Learning, 2006. } \author{ Wenying Deng }
/man/kernel_matern.Rd
no_license
cran/CVEK
R
false
true
982
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generate_kernel.R \name{kernel_matern} \alias{kernel_matern} \title{Generating A Single Matrix-wise Function Using Matern} \usage{ kernel_matern(l, p, sigma) } \arguments{ \item{l}{(numeric) A numeric number indicating the hyperparameter (flexibility) of a specific kernel.} \item{p}{(integer) For polynomial, p is the power; for matern, v = p + 1 / 2; for rational, alpha = p.} \item{sigma}{(numeric) The covariance coefficient for neural network kernel.} } \value{ \item{matrix_wise}{(function) A function calculating the relevance of two matrices.} } \description{ Generate matrix-wise functions for two matrices using matern kernel. } \details{ \bold{Matern Kernels} \deqn{k_{Matern}(r)=\frac{2^{1-\nu}}{\Gamma(\nu)}\Big(\frac{\sqrt{2\nu r}}{l}\Big)^\nu K_\nu \Big(\frac{\sqrt{2\nu r}}{l}\Big)} } \references{ The MIT Press. Gaussian Processes for Machine Learning, 2006. } \author{ Wenying Deng }
## PA 434 ## Assignment 3 ## Alexis Kwan # loading data from working directory library(tidyverse) migration <- read_csv("MigrationFlows.csv") origin <- read_csv("Origin.csv") population <- read_csv("Population.csv") refugees <- read_csv("Refugees.csv") ## TASK 1 # A tidy dataset containing all common observations (e.g., countries) # across all four datasets common_cols <- c("Country code","year") # A tidy dataset containing as much information as possible across all four datasets # I remove country names because of inconsistencies of names across datasets # which will result in duplicates both in row counts, but also columns # if country name is not joined on. Additionally, while it is possible to clean # the country names up, it will require manual steps and several steps to match # across datasets, because for example there are grammatical differences # in names which are nontrivial problems because they are not systematically # different, whereas country code is consistent across datasets and is usually # backed by ISO standards. # check data types str(migration) str(origin) str(population) str(refugees) # origin <- select(origin, -c("country_name")) # origin <- rename(origin, "Country code" = "code") # origin <- unite(year, c("century","decade","year"), sep = "") # origin$year <- as.numeric(origin$year) population2 <- select(population, -c("Country")) population2$population <- population$population * 1000 # refugees <- select(refugees, -c("Country")) migration2 <- migration %>% pivot_longer(cols = -c("Country","Country code"), names_sep = "_", names_to = c("Migrant gender","year"), values_to = "Migrants", names_transform = list(year = as.numeric)) origin2 <- origin %>% select(-c("country_name")) %>% unite(year, c("century","decade","year"), sep = "") %>% pivot_longer(cols = -c("code","year"), names_prefix = "tot_", names_to = c("origin"), values_to = "Origin total") %>% mutate(`Country code` = code, year = as.numeric(year)) refugees2 <- refugees %>% select(-c("Country")) %>% pivot_longer(cols = -"Country code", names_to = "year", names_prefix = "REFUGEES_", values_to = "Refugees", names_transform = list(year = as.numeric)) # full_join or left_join? joined2 <- migration2 %>% left_join(origin2, by = common_cols) %>% left_join(population2, by = common_cols) %>% left_join(refugees2, by = common_cols) # 1. What is the unit of analysis of the dataset? # The unit of analysis is countries. # 2. What are the issues with each dataset? How do you make your data tidy? # Some of the datasets do not have matching column names like have "country" vs. # "country_name". There are also some columns, like "year" in origin, that are # nonstandard and also not a column that gets joined on. The "country" column for # every data set contained country names that contained non-alphanumeric # characters that may represent characters from a larger ASCII superset or are # potentially meaningless. # 3. What is expected number of rows for each of the two datasets that you # need to produce? # 6 years * 232 countries = 1392 total unique year+country combinations 6 * 232 # 3 poptypes * 1392 3 * 1392 # 3 migration types * 5 origins * 4176 3 * 5 * 4176 # 62640 unique cominbations for 2nd dataset # 4. Make sure to report the final size of each new dataset and confirm # that it matches with your expectations. # 18468 # 62640 ## TASK 2 # Using the dataset containing the most information, your boss asks you to look # at the data and extract a few information: # 5. What is the average percentage of migrants on the # total population of a country in 2015? mean(joined2$Migrants[joined2$`Migrant gender` == "Tot" & joined2$year == "2015"] / joined2$population[joined2$Poptype == "Pop" & joined2$year == "2015"]) # 13.33% # 6. Did the average percentage of refugees on the total of immigrants increased # or decreased from 1990? Refugees_2015 <- mean(joined2$Refugees[joined2$year == "2015"] / joined2$Migrants[joined2$year == "2015"], na.rm = T) Refugees_1990 <- mean(joined2$Refugees[joined2$year == "1990"] / joined2$Migrants[joined2$year == "1990"], na.rm = T) Refugees_2015 - Refugees_1990 # decreased by 0.24% # 7. What is the highest percentage of immigrants in a # country in 2010? What is the smallest? max(joined2$Migrants[joined2$`Migrant gender` == "Tot" & joined2$year == "2010"] / joined2$population[joined2$Poptype == "Pop" & joined2$year == "2010"], na.rm = T) # 87.84% min(joined2$Migrants[joined2$`Migrant gender` == "Tot" & joined2$year == "2010"] / joined2$population[joined2$Poptype == "Pop" & joined2$year == "2010"], na.rm = T) # 0.06% # 8. What is the median percentage of immigrants from the # different continents/geographical areas in 2015? totals_only <- joined2 %>% filter(`Migrant gender` == "Tot" & Poptype == "Pop" & year == "2015") totals_only$perc <- totals_only$`Origin total` / totals_only$Migrants migrant_medians <- totals_only %>% group_by(origin) %>% summarize( median_perc = median(perc) ) migrant_medians # 1 africa 0.66% # 2 asia 4.80% # 3 developed 16.30% # 4 latam 0.00% # 5 oceania 0.00%
/week3_ex/Class3_Ex.r
no_license
alexkcode/pa-434
R
false
false
5,692
r
## PA 434 ## Assignment 3 ## Alexis Kwan # loading data from working directory library(tidyverse) migration <- read_csv("MigrationFlows.csv") origin <- read_csv("Origin.csv") population <- read_csv("Population.csv") refugees <- read_csv("Refugees.csv") ## TASK 1 # A tidy dataset containing all common observations (e.g., countries) # across all four datasets common_cols <- c("Country code","year") # A tidy dataset containing as much information as possible across all four datasets # I remove country names because of inconsistencies of names across datasets # which will result in duplicates both in row counts, but also columns # if country name is not joined on. Additionally, while it is possible to clean # the country names up, it will require manual steps and several steps to match # across datasets, because for example there are grammatical differences # in names which are nontrivial problems because they are not systematically # different, whereas country code is consistent across datasets and is usually # backed by ISO standards. # check data types str(migration) str(origin) str(population) str(refugees) # origin <- select(origin, -c("country_name")) # origin <- rename(origin, "Country code" = "code") # origin <- unite(year, c("century","decade","year"), sep = "") # origin$year <- as.numeric(origin$year) population2 <- select(population, -c("Country")) population2$population <- population$population * 1000 # refugees <- select(refugees, -c("Country")) migration2 <- migration %>% pivot_longer(cols = -c("Country","Country code"), names_sep = "_", names_to = c("Migrant gender","year"), values_to = "Migrants", names_transform = list(year = as.numeric)) origin2 <- origin %>% select(-c("country_name")) %>% unite(year, c("century","decade","year"), sep = "") %>% pivot_longer(cols = -c("code","year"), names_prefix = "tot_", names_to = c("origin"), values_to = "Origin total") %>% mutate(`Country code` = code, year = as.numeric(year)) refugees2 <- refugees %>% select(-c("Country")) %>% pivot_longer(cols = -"Country code", names_to = "year", names_prefix = "REFUGEES_", values_to = "Refugees", names_transform = list(year = as.numeric)) # full_join or left_join? joined2 <- migration2 %>% left_join(origin2, by = common_cols) %>% left_join(population2, by = common_cols) %>% left_join(refugees2, by = common_cols) # 1. What is the unit of analysis of the dataset? # The unit of analysis is countries. # 2. What are the issues with each dataset? How do you make your data tidy? # Some of the datasets do not have matching column names like have "country" vs. # "country_name". There are also some columns, like "year" in origin, that are # nonstandard and also not a column that gets joined on. The "country" column for # every data set contained country names that contained non-alphanumeric # characters that may represent characters from a larger ASCII superset or are # potentially meaningless. # 3. What is expected number of rows for each of the two datasets that you # need to produce? # 6 years * 232 countries = 1392 total unique year+country combinations 6 * 232 # 3 poptypes * 1392 3 * 1392 # 3 migration types * 5 origins * 4176 3 * 5 * 4176 # 62640 unique cominbations for 2nd dataset # 4. Make sure to report the final size of each new dataset and confirm # that it matches with your expectations. # 18468 # 62640 ## TASK 2 # Using the dataset containing the most information, your boss asks you to look # at the data and extract a few information: # 5. What is the average percentage of migrants on the # total population of a country in 2015? mean(joined2$Migrants[joined2$`Migrant gender` == "Tot" & joined2$year == "2015"] / joined2$population[joined2$Poptype == "Pop" & joined2$year == "2015"]) # 13.33% # 6. Did the average percentage of refugees on the total of immigrants increased # or decreased from 1990? Refugees_2015 <- mean(joined2$Refugees[joined2$year == "2015"] / joined2$Migrants[joined2$year == "2015"], na.rm = T) Refugees_1990 <- mean(joined2$Refugees[joined2$year == "1990"] / joined2$Migrants[joined2$year == "1990"], na.rm = T) Refugees_2015 - Refugees_1990 # decreased by 0.24% # 7. What is the highest percentage of immigrants in a # country in 2010? What is the smallest? max(joined2$Migrants[joined2$`Migrant gender` == "Tot" & joined2$year == "2010"] / joined2$population[joined2$Poptype == "Pop" & joined2$year == "2010"], na.rm = T) # 87.84% min(joined2$Migrants[joined2$`Migrant gender` == "Tot" & joined2$year == "2010"] / joined2$population[joined2$Poptype == "Pop" & joined2$year == "2010"], na.rm = T) # 0.06% # 8. What is the median percentage of immigrants from the # different continents/geographical areas in 2015? totals_only <- joined2 %>% filter(`Migrant gender` == "Tot" & Poptype == "Pop" & year == "2015") totals_only$perc <- totals_only$`Origin total` / totals_only$Migrants migrant_medians <- totals_only %>% group_by(origin) %>% summarize( median_perc = median(perc) ) migrant_medians # 1 africa 0.66% # 2 asia 4.80% # 3 developed 16.30% # 4 latam 0.00% # 5 oceania 0.00%
setwd("/data1_2/jiap/projects/18-CCLE-VAE/new/V15.2/NOPEER.RANK.Sigmoid/result.EN/dr.CCLE/04-mix/09.Expr/") ############################################################################### drug.ccle = read.table(file="/data1_2/jiap/projects/18-CCLE-VAE/new/V15.2/NOPEER.RANK.Sigmoid/result.EN/dr.CCLE/01/MIX-F1-W5-PCC/MIX-F1-W5-PCC.best.pred_TCGA.txt", header=T, as.is=T, sep="\t") colnames(drug.ccle)[3:ncol(drug.ccle)] -> drugs cancer.types = unique(drug.ccle[,2]) sample.type = substr(drug.ccle[,1], 14, 15) ss = gsub("\\.", "-", drug.ccle[,1]) drug.ccle[,1] = ss cancer.drug.ccle = c() for(ct in 1:length(cancer.types)){ cancer = cancer.types[ct] type.code = "01" if(cancer == "LAML"){ type.code = "03" } if(cancer == "SKCM"){ type.code = "06" } blca.ccle = drug.ccle[which(drug.ccle[,2] == cancer & sample.type == type.code), ] cancer.drug.ccle = rbind(cancer.drug.ccle, blca.ccle) } drug.ccle = cancer.drug.ccle ############################################################################### cur.cancer = "BRCA" original.TCGA.RPKM = read.delim(paste("/data/mshao/TCGA/",cur.cancer,"/HiSeqV2", sep=""), as.is=T) apply(original.TCGA.RPKM[,-1],1,sum) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] apply(original.TCGA.RPKM[,-1],1,var) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] TCGA.ss = gsub("\\.", "-", colnames(non0.TCGA.RPKM)) colnames(non0.TCGA.RPKM) = TCGA.ss; rm(TCGA.ss) match(unique(non0.TCGA.RPKM[,1]), non0.TCGA.RPKM[,1]) -> ii ### keep unique genes non0.TCGA.RPKM = non0.TCGA.RPKM[ii, ] subtype = read.delim("/data1_2/jiap/projects/18-CCLE-VAE/DATA/BRCA.subtype.TCGA.txt", as.is=T, header=T, sep="\t", skip=1) subtype[which(subtype$PAM50.mRNA=="Basal-like"),1] -> g1 subtype[which(subtype$PAM50.mRNA=="HER2-enriched"),1] -> g2 subtype[which(subtype$PAM50.mRNA=="Luminal A"),1] -> g3 subtype[which(subtype$PAM50.mRNA=="Luminal B"),1] -> g4 subtype.list = list() subtype.list[["BRCA-Basal"]] = g1 subtype.list[["BRCA-HER2"]] = g2 subtype.list[["BRCA-LumA"]] = g3 subtype.list[["BRCA-LumB"]] = g4 for(k in 1:length(subtype.list)){ gene.drug.expr.mat = c() cancer = names(subtype.list)[k] subtype.ss = subtype.list[[k]] blca.ccle = drug.ccle[which(drug.ccle[,2] == cur.cancer), ] fixed.ss = intersect( substr(colnames(non0.TCGA.RPKM), 1,12), substr(blca.ccle[,1],1,12)) fixed.ss = intersect(fixed.ss, subtype.ss) blca.ccle = blca.ccle[match(fixed.ss, substr(blca.ccle[,1], 1,12) ),] apply(blca.ccle[, 3:ncol(blca.ccle)], 2, scale) -> new.ccle TCGA.RPKM = non0.TCGA.RPKM[, match(fixed.ss, substr(colnames(non0.TCGA.RPKM), 1,12 ) )] rownames(TCGA.RPKM) = non0.TCGA.RPKM[,1] apply(TCGA.RPKM, 1, sd) -> rowCheck TCGA.RPKM = TCGA.RPKM[rowCheck!=0, ] apply(TCGA.RPKM,1,mean) -> rowMean TCGA.RPKM = TCGA.RPKM[rowMean > 1, ] ### unique in W5 t(apply(TCGA.RPKM,1,scale)) -> scaled.TCGA.RPKM dimnames(scaled.TCGA.RPKM) = dimnames(TCGA.RPKM) for(kgene in 1:nrow(TCGA.RPKM)){ t(TCGA.RPKM[kgene, ]) -> gene.expr X2 = rep(1, length(gene.expr)) X2[which(gene.expr < quantile(gene.expr, probs=.25))] = 0 X2[which(gene.expr > quantile(gene.expr, probs=.75))] = 2 if(length(unique(X2)) < 3)next apply(new.ccle, 2, function(Y){ summary(glm(Y ~ X2)) -> sfit coef(sfit)[2, 1:4] }) -> ps.twosided betas = ps.twosided[1,] stds = ps.twosided[2,] tvalue = ps.twosided[3,] ps = ps.twosided[4,] gene.drug.expr.mat = rbind(gene.drug.expr.mat, cbind( names(subtype.list)[k], rownames(TCGA.RPKM)[kgene], drugs, ps, tvalue, betas, stds, mean(gene.expr[which(X2==0)]), mean(gene.expr[which(X2==1)]), mean(gene.expr[which(X2==2)]) )) if(kgene %% 100 == 0)cat(kgene, ".", sep="") } cat(cur.cancer, ".", sep="") write.table(gene.drug.expr.mat, file=paste("W5.",names(subtype.list)[k],".Expr.txt", sep=""), quote=F, row.names=F, sep="\t") } ############################################################################### cur.cancer = "LGG" original.TCGA.RPKM = read.delim(paste("/data/mshao/TCGA/",cur.cancer,"/HiSeqV2", sep=""), as.is=T) apply(original.TCGA.RPKM[,-1],1,sum) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] apply(original.TCGA.RPKM[,-1],1,var) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] TCGA.ss = gsub("\\.", "-", colnames(non0.TCGA.RPKM)) colnames(non0.TCGA.RPKM) = TCGA.ss; rm(TCGA.ss) match(unique(non0.TCGA.RPKM[,1]), non0.TCGA.RPKM[,1]) -> ii ### keep unique genes non0.TCGA.RPKM = non0.TCGA.RPKM[ii, ] subtype = read.delim("/data1_2/jiap/projects/18-CCLE-VAE/DATA/LGG.subtype.TCGA.txt", as.is=T, header=T) subtype[which(subtype[,9]=="coc1"),1] -> g1 subtype[which(subtype[,9]=="coc2"),1] -> g2 subtype[which(subtype[,9]=="coc3"),1] -> g3 subtype.list = list() subtype.list[["LGG-coc1"]] = g1 subtype.list[["LGG-coc2"]] = g2 subtype.list[["LGG-coc3"]] = g3 for(k in 1:length(subtype.list)){ gene.drug.expr.mat = c() cancer = names(subtype.list)[k] subtype.ss = subtype.list[[k]] blca.ccle = drug.ccle[which(drug.ccle[,2] == cur.cancer), ] fixed.ss = intersect( substr(colnames(non0.TCGA.RPKM), 1,12), substr(blca.ccle[,1],1,12)) fixed.ss = intersect(fixed.ss, subtype.ss) blca.ccle = blca.ccle[match(fixed.ss, substr(blca.ccle[,1], 1,12) ),] apply(blca.ccle[, 3:ncol(blca.ccle)], 2, scale) -> new.ccle TCGA.RPKM = non0.TCGA.RPKM[, match(fixed.ss, substr(colnames(non0.TCGA.RPKM), 1,12 ) )] rownames(TCGA.RPKM) = non0.TCGA.RPKM[,1] apply(TCGA.RPKM, 1, sd) -> rowCheck TCGA.RPKM = TCGA.RPKM[rowCheck!=0, ] apply(TCGA.RPKM,1,mean) -> rowMean TCGA.RPKM = TCGA.RPKM[rowMean > 1, ] for(kgene in 1:nrow(TCGA.RPKM)){ t(TCGA.RPKM[kgene, ]) -> gene.expr X2 = rep(1, length(gene.expr)) X2[which(gene.expr < quantile(gene.expr, probs=.25))] = 0 X2[which(gene.expr > quantile(gene.expr, probs=.75))] = 2 if(length(unique(X2)) < 3)next apply(new.ccle, 2, function(Y){ summary(glm(Y ~ X2)) -> sfit coef(sfit)[2, 1:4] }) -> ps.twosided betas = ps.twosided[1,] stds = ps.twosided[2,] tvalue = ps.twosided[3,] ps = ps.twosided[4,] gene.drug.expr.mat = rbind(gene.drug.expr.mat, cbind( names(subtype.list)[k], rownames(TCGA.RPKM)[kgene], drugs, ps, tvalue, betas, stds, mean(gene.expr[which(X2==0)]), mean(gene.expr[which(X2==1)]), mean(gene.expr[which(X2==2)]) )) if(kgene %% 100 == 0)cat(kgene, ".", sep="") } cat(cur.cancer, ".", sep="") write.table(gene.drug.expr.mat, file=paste("W5.",names(subtype.list)[k],".Expr.txt", sep=""), quote=F, row.names=F, sep="\t") } ############################################################################### cur.cancer = "THCA" original.TCGA.RPKM = read.delim(paste("/data/mshao/TCGA/",cur.cancer,"/HiSeqV2", sep=""), as.is=T) apply(original.TCGA.RPKM[,-1],1,sum) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] apply(original.TCGA.RPKM[,-1],1,var) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] TCGA.ss = gsub("\\.", "-", colnames(non0.TCGA.RPKM)) colnames(non0.TCGA.RPKM) = TCGA.ss; rm(TCGA.ss) match(unique(non0.TCGA.RPKM[,1]), non0.TCGA.RPKM[,1]) -> ii ### keep unique genes non0.TCGA.RPKM = non0.TCGA.RPKM[ii, ] subtype = read.delim("/data1_2/jiap/projects/18-CCLE-VAE/DATA/THCA.BRS.txt", as.is=T, header=F, sep="\t") ss = substr(subtype[,1], 1, 15) ss[which(subtype[,2]=="Braf-like")] -> g1 ss[which(subtype[,2]=="Ras-like")] -> g2 subtype.list = list() subtype.list[["THCA-Braf-like"]] = g1 subtype.list[["THCA-Ras-like"]] = g2 for(k in 1:length(subtype.list)){ gene.drug.expr.mat = c() cancer = names(subtype.list)[k] subtype.ss = substr(subtype.list[[k]], 1, 12) blca.ccle = drug.ccle[which(drug.ccle[,2] == cur.cancer), ] fixed.ss = intersect( substr(colnames(non0.TCGA.RPKM), 1,12), substr(blca.ccle[,1],1,12)) fixed.ss = intersect(fixed.ss, subtype.ss) blca.ccle = blca.ccle[match(fixed.ss, substr(blca.ccle[,1], 1,12) ),] apply(blca.ccle[, 3:ncol(blca.ccle)], 2, scale) -> new.ccle TCGA.RPKM = non0.TCGA.RPKM[, match(fixed.ss, substr(colnames(non0.TCGA.RPKM), 1,12 ) )] rownames(TCGA.RPKM) = non0.TCGA.RPKM[,1] apply(TCGA.RPKM, 1, sd) -> rowCheck TCGA.RPKM = TCGA.RPKM[rowCheck!=0, ] apply(TCGA.RPKM,1,mean) -> rowMean TCGA.RPKM = TCGA.RPKM[rowMean > 1, ] ### unique in W5 t(apply(TCGA.RPKM,1,scale)) -> scaled.TCGA.RPKM dimnames(scaled.TCGA.RPKM) = dimnames(TCGA.RPKM) for(kgene in 1:nrow(TCGA.RPKM)){ t(TCGA.RPKM[kgene, ]) -> gene.expr X2 = rep(1, length(gene.expr)) X2[which(gene.expr < quantile(gene.expr, probs=.25))] = 0 X2[which(gene.expr > quantile(gene.expr, probs=.75))] = 2 if(length(unique(X2)) < 3)next apply(new.ccle, 2, function(Y){ summary(glm(Y ~ X2)) -> sfit coef(sfit)[2, 1:4] }) -> ps.twosided betas = ps.twosided[1,] stds = ps.twosided[2,] tvalue = ps.twosided[3,] ps = ps.twosided[4,] gene.drug.expr.mat = rbind(gene.drug.expr.mat, cbind( names(subtype.list)[k], rownames(TCGA.RPKM)[kgene], drugs, ps, tvalue, betas, stds, mean(gene.expr[which(X2==0)]), mean(gene.expr[which(X2==1)]), mean(gene.expr[which(X2==2)]) )) if(kgene %% 100 == 0)cat(kgene, ".", sep="") } cat(cur.cancer, ".", sep="") write.table(gene.drug.expr.mat, file=paste("W5.",names(subtype.list)[k],".Expr.txt", sep=""), quote=F, row.names=F, sep="\t") }
/result.EN/dr.CCLE/Analysis/09.Expr/04.09.02.R
no_license
hegu2692/VAEN
R
false
false
9,537
r
setwd("/data1_2/jiap/projects/18-CCLE-VAE/new/V15.2/NOPEER.RANK.Sigmoid/result.EN/dr.CCLE/04-mix/09.Expr/") ############################################################################### drug.ccle = read.table(file="/data1_2/jiap/projects/18-CCLE-VAE/new/V15.2/NOPEER.RANK.Sigmoid/result.EN/dr.CCLE/01/MIX-F1-W5-PCC/MIX-F1-W5-PCC.best.pred_TCGA.txt", header=T, as.is=T, sep="\t") colnames(drug.ccle)[3:ncol(drug.ccle)] -> drugs cancer.types = unique(drug.ccle[,2]) sample.type = substr(drug.ccle[,1], 14, 15) ss = gsub("\\.", "-", drug.ccle[,1]) drug.ccle[,1] = ss cancer.drug.ccle = c() for(ct in 1:length(cancer.types)){ cancer = cancer.types[ct] type.code = "01" if(cancer == "LAML"){ type.code = "03" } if(cancer == "SKCM"){ type.code = "06" } blca.ccle = drug.ccle[which(drug.ccle[,2] == cancer & sample.type == type.code), ] cancer.drug.ccle = rbind(cancer.drug.ccle, blca.ccle) } drug.ccle = cancer.drug.ccle ############################################################################### cur.cancer = "BRCA" original.TCGA.RPKM = read.delim(paste("/data/mshao/TCGA/",cur.cancer,"/HiSeqV2", sep=""), as.is=T) apply(original.TCGA.RPKM[,-1],1,sum) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] apply(original.TCGA.RPKM[,-1],1,var) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] TCGA.ss = gsub("\\.", "-", colnames(non0.TCGA.RPKM)) colnames(non0.TCGA.RPKM) = TCGA.ss; rm(TCGA.ss) match(unique(non0.TCGA.RPKM[,1]), non0.TCGA.RPKM[,1]) -> ii ### keep unique genes non0.TCGA.RPKM = non0.TCGA.RPKM[ii, ] subtype = read.delim("/data1_2/jiap/projects/18-CCLE-VAE/DATA/BRCA.subtype.TCGA.txt", as.is=T, header=T, sep="\t", skip=1) subtype[which(subtype$PAM50.mRNA=="Basal-like"),1] -> g1 subtype[which(subtype$PAM50.mRNA=="HER2-enriched"),1] -> g2 subtype[which(subtype$PAM50.mRNA=="Luminal A"),1] -> g3 subtype[which(subtype$PAM50.mRNA=="Luminal B"),1] -> g4 subtype.list = list() subtype.list[["BRCA-Basal"]] = g1 subtype.list[["BRCA-HER2"]] = g2 subtype.list[["BRCA-LumA"]] = g3 subtype.list[["BRCA-LumB"]] = g4 for(k in 1:length(subtype.list)){ gene.drug.expr.mat = c() cancer = names(subtype.list)[k] subtype.ss = subtype.list[[k]] blca.ccle = drug.ccle[which(drug.ccle[,2] == cur.cancer), ] fixed.ss = intersect( substr(colnames(non0.TCGA.RPKM), 1,12), substr(blca.ccle[,1],1,12)) fixed.ss = intersect(fixed.ss, subtype.ss) blca.ccle = blca.ccle[match(fixed.ss, substr(blca.ccle[,1], 1,12) ),] apply(blca.ccle[, 3:ncol(blca.ccle)], 2, scale) -> new.ccle TCGA.RPKM = non0.TCGA.RPKM[, match(fixed.ss, substr(colnames(non0.TCGA.RPKM), 1,12 ) )] rownames(TCGA.RPKM) = non0.TCGA.RPKM[,1] apply(TCGA.RPKM, 1, sd) -> rowCheck TCGA.RPKM = TCGA.RPKM[rowCheck!=0, ] apply(TCGA.RPKM,1,mean) -> rowMean TCGA.RPKM = TCGA.RPKM[rowMean > 1, ] ### unique in W5 t(apply(TCGA.RPKM,1,scale)) -> scaled.TCGA.RPKM dimnames(scaled.TCGA.RPKM) = dimnames(TCGA.RPKM) for(kgene in 1:nrow(TCGA.RPKM)){ t(TCGA.RPKM[kgene, ]) -> gene.expr X2 = rep(1, length(gene.expr)) X2[which(gene.expr < quantile(gene.expr, probs=.25))] = 0 X2[which(gene.expr > quantile(gene.expr, probs=.75))] = 2 if(length(unique(X2)) < 3)next apply(new.ccle, 2, function(Y){ summary(glm(Y ~ X2)) -> sfit coef(sfit)[2, 1:4] }) -> ps.twosided betas = ps.twosided[1,] stds = ps.twosided[2,] tvalue = ps.twosided[3,] ps = ps.twosided[4,] gene.drug.expr.mat = rbind(gene.drug.expr.mat, cbind( names(subtype.list)[k], rownames(TCGA.RPKM)[kgene], drugs, ps, tvalue, betas, stds, mean(gene.expr[which(X2==0)]), mean(gene.expr[which(X2==1)]), mean(gene.expr[which(X2==2)]) )) if(kgene %% 100 == 0)cat(kgene, ".", sep="") } cat(cur.cancer, ".", sep="") write.table(gene.drug.expr.mat, file=paste("W5.",names(subtype.list)[k],".Expr.txt", sep=""), quote=F, row.names=F, sep="\t") } ############################################################################### cur.cancer = "LGG" original.TCGA.RPKM = read.delim(paste("/data/mshao/TCGA/",cur.cancer,"/HiSeqV2", sep=""), as.is=T) apply(original.TCGA.RPKM[,-1],1,sum) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] apply(original.TCGA.RPKM[,-1],1,var) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] TCGA.ss = gsub("\\.", "-", colnames(non0.TCGA.RPKM)) colnames(non0.TCGA.RPKM) = TCGA.ss; rm(TCGA.ss) match(unique(non0.TCGA.RPKM[,1]), non0.TCGA.RPKM[,1]) -> ii ### keep unique genes non0.TCGA.RPKM = non0.TCGA.RPKM[ii, ] subtype = read.delim("/data1_2/jiap/projects/18-CCLE-VAE/DATA/LGG.subtype.TCGA.txt", as.is=T, header=T) subtype[which(subtype[,9]=="coc1"),1] -> g1 subtype[which(subtype[,9]=="coc2"),1] -> g2 subtype[which(subtype[,9]=="coc3"),1] -> g3 subtype.list = list() subtype.list[["LGG-coc1"]] = g1 subtype.list[["LGG-coc2"]] = g2 subtype.list[["LGG-coc3"]] = g3 for(k in 1:length(subtype.list)){ gene.drug.expr.mat = c() cancer = names(subtype.list)[k] subtype.ss = subtype.list[[k]] blca.ccle = drug.ccle[which(drug.ccle[,2] == cur.cancer), ] fixed.ss = intersect( substr(colnames(non0.TCGA.RPKM), 1,12), substr(blca.ccle[,1],1,12)) fixed.ss = intersect(fixed.ss, subtype.ss) blca.ccle = blca.ccle[match(fixed.ss, substr(blca.ccle[,1], 1,12) ),] apply(blca.ccle[, 3:ncol(blca.ccle)], 2, scale) -> new.ccle TCGA.RPKM = non0.TCGA.RPKM[, match(fixed.ss, substr(colnames(non0.TCGA.RPKM), 1,12 ) )] rownames(TCGA.RPKM) = non0.TCGA.RPKM[,1] apply(TCGA.RPKM, 1, sd) -> rowCheck TCGA.RPKM = TCGA.RPKM[rowCheck!=0, ] apply(TCGA.RPKM,1,mean) -> rowMean TCGA.RPKM = TCGA.RPKM[rowMean > 1, ] for(kgene in 1:nrow(TCGA.RPKM)){ t(TCGA.RPKM[kgene, ]) -> gene.expr X2 = rep(1, length(gene.expr)) X2[which(gene.expr < quantile(gene.expr, probs=.25))] = 0 X2[which(gene.expr > quantile(gene.expr, probs=.75))] = 2 if(length(unique(X2)) < 3)next apply(new.ccle, 2, function(Y){ summary(glm(Y ~ X2)) -> sfit coef(sfit)[2, 1:4] }) -> ps.twosided betas = ps.twosided[1,] stds = ps.twosided[2,] tvalue = ps.twosided[3,] ps = ps.twosided[4,] gene.drug.expr.mat = rbind(gene.drug.expr.mat, cbind( names(subtype.list)[k], rownames(TCGA.RPKM)[kgene], drugs, ps, tvalue, betas, stds, mean(gene.expr[which(X2==0)]), mean(gene.expr[which(X2==1)]), mean(gene.expr[which(X2==2)]) )) if(kgene %% 100 == 0)cat(kgene, ".", sep="") } cat(cur.cancer, ".", sep="") write.table(gene.drug.expr.mat, file=paste("W5.",names(subtype.list)[k],".Expr.txt", sep=""), quote=F, row.names=F, sep="\t") } ############################################################################### cur.cancer = "THCA" original.TCGA.RPKM = read.delim(paste("/data/mshao/TCGA/",cur.cancer,"/HiSeqV2", sep=""), as.is=T) apply(original.TCGA.RPKM[,-1],1,sum) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] apply(original.TCGA.RPKM[,-1],1,var) -> rowCheck non0.TCGA.RPKM = original.TCGA.RPKM[which(rowCheck!=0),] TCGA.ss = gsub("\\.", "-", colnames(non0.TCGA.RPKM)) colnames(non0.TCGA.RPKM) = TCGA.ss; rm(TCGA.ss) match(unique(non0.TCGA.RPKM[,1]), non0.TCGA.RPKM[,1]) -> ii ### keep unique genes non0.TCGA.RPKM = non0.TCGA.RPKM[ii, ] subtype = read.delim("/data1_2/jiap/projects/18-CCLE-VAE/DATA/THCA.BRS.txt", as.is=T, header=F, sep="\t") ss = substr(subtype[,1], 1, 15) ss[which(subtype[,2]=="Braf-like")] -> g1 ss[which(subtype[,2]=="Ras-like")] -> g2 subtype.list = list() subtype.list[["THCA-Braf-like"]] = g1 subtype.list[["THCA-Ras-like"]] = g2 for(k in 1:length(subtype.list)){ gene.drug.expr.mat = c() cancer = names(subtype.list)[k] subtype.ss = substr(subtype.list[[k]], 1, 12) blca.ccle = drug.ccle[which(drug.ccle[,2] == cur.cancer), ] fixed.ss = intersect( substr(colnames(non0.TCGA.RPKM), 1,12), substr(blca.ccle[,1],1,12)) fixed.ss = intersect(fixed.ss, subtype.ss) blca.ccle = blca.ccle[match(fixed.ss, substr(blca.ccle[,1], 1,12) ),] apply(blca.ccle[, 3:ncol(blca.ccle)], 2, scale) -> new.ccle TCGA.RPKM = non0.TCGA.RPKM[, match(fixed.ss, substr(colnames(non0.TCGA.RPKM), 1,12 ) )] rownames(TCGA.RPKM) = non0.TCGA.RPKM[,1] apply(TCGA.RPKM, 1, sd) -> rowCheck TCGA.RPKM = TCGA.RPKM[rowCheck!=0, ] apply(TCGA.RPKM,1,mean) -> rowMean TCGA.RPKM = TCGA.RPKM[rowMean > 1, ] ### unique in W5 t(apply(TCGA.RPKM,1,scale)) -> scaled.TCGA.RPKM dimnames(scaled.TCGA.RPKM) = dimnames(TCGA.RPKM) for(kgene in 1:nrow(TCGA.RPKM)){ t(TCGA.RPKM[kgene, ]) -> gene.expr X2 = rep(1, length(gene.expr)) X2[which(gene.expr < quantile(gene.expr, probs=.25))] = 0 X2[which(gene.expr > quantile(gene.expr, probs=.75))] = 2 if(length(unique(X2)) < 3)next apply(new.ccle, 2, function(Y){ summary(glm(Y ~ X2)) -> sfit coef(sfit)[2, 1:4] }) -> ps.twosided betas = ps.twosided[1,] stds = ps.twosided[2,] tvalue = ps.twosided[3,] ps = ps.twosided[4,] gene.drug.expr.mat = rbind(gene.drug.expr.mat, cbind( names(subtype.list)[k], rownames(TCGA.RPKM)[kgene], drugs, ps, tvalue, betas, stds, mean(gene.expr[which(X2==0)]), mean(gene.expr[which(X2==1)]), mean(gene.expr[which(X2==2)]) )) if(kgene %% 100 == 0)cat(kgene, ".", sep="") } cat(cur.cancer, ".", sep="") write.table(gene.drug.expr.mat, file=paste("W5.",names(subtype.list)[k],".Expr.txt", sep=""), quote=F, row.names=F, sep="\t") }
%% %% WARNING! DO NOT EDIT! %% This file is automatically generated from set.default.arguments.R %% \name{control.bgev.default} \alias{control.bgev.default} \alias{inla.control.bgev.default} \alias{inla.set.control.bgev.default.default} \alias{set.control.bgev.default.default} \alias{control.bgev.default.default} \title{Control variables in control.bgev.default} \description{Control variables in \code{control.bgev.default} for use in \code{inla}} \usage{ inla.set.control.bgev.default.default(...) control.bgev.default(beta.ab, q.location, q.mix, q.spread) } \arguments{ \item{...}{Possible arguments} \item{q.location}{ The quantile level for the location parameter} \item{q.spread}{ The quantile level for the spread parameter (must be < 0.5)} \item{q.mix}{ The lower and upper quantile level for the mixing function} \item{beta.ab}{ The parameters a and b in the Beta mixing function} } \value{ The \code{control.bgev}-list is set within the corresponding \code{control.family}-list as control parameters to the \code{family="bgev"} The function \code{control.bgev.default} is used to TAB-complete arguments and returns a list of given arguments. The function \code{inla.set.control.bgev.default.default} returns a list with all the default values of all parameters within this control statement. } \seealso{ \code{\link{control.update}}, \code{\link{control.lincomb}}, \code{\link{control.group}}, \code{\link{control.mix}}, \code{\link{control.link}}, \code{\link{control.expert}}, \code{\link{control.compute}}, \code{\link{control.pardiso.default}}, \code{\link{control.bgev.default}}, \code{\link{control.family}}, \code{\link{control.fixed}}, \code{\link{control.inla}}, \code{\link{control.predictor}}, \code{\link{control.results}}, \code{\link{control.mode}}, \code{\link{control.hazard}}, \code{\link{inla}} }
/man/control.bgev.default.Rd
no_license
jdsimkin04/shinyinla
R
false
false
1,839
rd
%% %% WARNING! DO NOT EDIT! %% This file is automatically generated from set.default.arguments.R %% \name{control.bgev.default} \alias{control.bgev.default} \alias{inla.control.bgev.default} \alias{inla.set.control.bgev.default.default} \alias{set.control.bgev.default.default} \alias{control.bgev.default.default} \title{Control variables in control.bgev.default} \description{Control variables in \code{control.bgev.default} for use in \code{inla}} \usage{ inla.set.control.bgev.default.default(...) control.bgev.default(beta.ab, q.location, q.mix, q.spread) } \arguments{ \item{...}{Possible arguments} \item{q.location}{ The quantile level for the location parameter} \item{q.spread}{ The quantile level for the spread parameter (must be < 0.5)} \item{q.mix}{ The lower and upper quantile level for the mixing function} \item{beta.ab}{ The parameters a and b in the Beta mixing function} } \value{ The \code{control.bgev}-list is set within the corresponding \code{control.family}-list as control parameters to the \code{family="bgev"} The function \code{control.bgev.default} is used to TAB-complete arguments and returns a list of given arguments. The function \code{inla.set.control.bgev.default.default} returns a list with all the default values of all parameters within this control statement. } \seealso{ \code{\link{control.update}}, \code{\link{control.lincomb}}, \code{\link{control.group}}, \code{\link{control.mix}}, \code{\link{control.link}}, \code{\link{control.expert}}, \code{\link{control.compute}}, \code{\link{control.pardiso.default}}, \code{\link{control.bgev.default}}, \code{\link{control.family}}, \code{\link{control.fixed}}, \code{\link{control.inla}}, \code{\link{control.predictor}}, \code{\link{control.results}}, \code{\link{control.mode}}, \code{\link{control.hazard}}, \code{\link{inla}} }
#' Run Broad-Enrich on broad genomic regions #' #' Broad-Enrich is designed for use with broad peaks that may intersect multiple #' gene loci, and cumulatively cover greater than 5\% of the genome. For example, #' ChIP-seq experiments for histone modifications. For more details, see the #' 'Broad-Enrich Method' section below. For help choosing a method, see the #' 'Choosing A Method' section below, or see the vignette. #' #' @section Broad-Enrich Method: #' The Broad-Enrich method uses the cumulative peak coverage of genes in its model #' for enrichment: \code{GO ~ ratio + s(log10_length)}. Here, \code{GO} is a #' binary vector indicating whether a gene is in the gene set being tested, #' \code{ratio} is a numeric vector indicating the ratio of the gene covered by #' peaks, and \code{s(log10_length)} is a binomial cubic smoothing spline which #' adjusts for the relationship between gene coverage and locus length. #' #' @section Choosing A Method: #' The following guidelines are intended to help select an enrichment function: #' \describe{ #' \item{broadenrich():}{ is designed for use with broad peaks that may intersect #' multiple gene loci, and cumulatively cover greater than 5\% of the genome. For #' example, ChIP-seq experiments for histone modifications.} #' \item{chipenrich():}{ is designed for use with 1,000s or 10,000s of narrow #' peaks which results in fewer gene loci containing a peak overall. For example, #' ChIP-seq experiments for transcription factors.} #' \item{polyenrich():}{ is also designed for narrow peaks, for experiments with #' 100,000s of peaks, or in cases where the number of binding sites per gene affects #' its regulation. If unsure whether to use chipenrich or polyenrich, then we recommend #' hybridenrich.} #' \item{hybridenrich():}{ is a combination of chipenrich and polyenrich, to be #' used when one is unsure which is the optimal method. } #' } #' #' @section Randomizations: #' Randomization of locus definitions allows for the assessment of Type I Error #' under the null hypothesis. The randomization codes are: #' \describe{ #' \item{\code{NULL}:}{ No randomizations, the default.} #' \item{'complete':}{ Shuffle the \code{gene_id} and \code{symbol} columns of the #' \code{locusdef} together, without regard for the chromosome location, or locus length. #' The null hypothesis is that there is no true gene set enrichment.} #' \item{'bylength':}{ Shuffle the \code{gene_id} and \code{symbol} columns of the #' \code{locusdef} together within bins of 100 genes sorted by locus length. The null #' hypothesis is that there is no true gene set enrichment, but with preserved locus #' length relationship.} #' \item{'bylocation':}{ Shuffle the \code{gene_id} and \code{symbol} columns of the #' \code{locusdef} together within bins of 50 genes sorted by genomic location. The null #' hypothesis is that there is no true gene set enrichment, but with preserved #' genomic location.} #' } #' The return value with a selected randomization is the same list as without. #' To assess the Type I error, the \code{alpha} level for the particular data set #' can be calculated by dividing the total number of gene sets with p-value < \code{alpha} #' by the total number of tests. Users may want to perform multiple randomizations #' for a set of peaks and take the median of the \code{alpha} values. #' #' @param peaks Either a file path or a \code{data.frame} of peaks in BED-like #' format. If a file path, the following formats are fully supported via their #' file extensions: .bed, .broadPeak, .narrowPeak, .gff3, .gff2, .gff, and .bedGraph #' or .bdg. BED3 through BED6 files are supported under the .bed extension. Files #' without these extensions are supported under the conditions that the first 3 #' columns correspond to 'chr', 'start', and 'end' and that there is either no #' header column, or it is commented out. If a \code{data.frame} A BEDX+Y style #' \code{data.frame}. See \code{GenomicRanges::makeGRangesFromDataFrame} for #' acceptable column names. #' @param out_name Prefix string to use for naming output files. This should not #' contain any characters that would be illegal for the system being used (Unix, #' Windows, etc.) The default value is "broadenrich", and a file "broadenrich_results.tab" #' is produced. If \code{qc_plots} is set, then a file "broadenrich_qcplots.png" #' is produced containing a number of quality control plots. If \code{out_name} #' is set to NULL, no files are written, and results then must be retrieved from #' the list returned by \code{broadenrich}. #' @param out_path Directory to which results files will be written out. Defaults #' to the current working directory as returned by \code{\link{getwd}}. #' @param genome One of the \code{supported_genomes()}. #' @param genesets A character vector of geneset databases to be tested for #' enrichment. See \code{supported_genesets()}. Alternately, a file path to a #' a tab-delimited text file with header and first column being the geneset ID #' or name, and the second column being Entrez Gene IDs. For an example custom #' gene set file, see the vignette. #' @param locusdef One of: 'nearest_tss', 'nearest_gene', 'exon', 'intron', '1kb', #' '1kb_outside', '1kb_outside_upstream', '5kb', '5kb_outside', '5kb_outside_upstream', #' '10kb', '10kb_outside', '10kb_outside_upstream'. For a description of each, #' see the vignette or \code{\link{supported_locusdefs}}. Alternately, a file path for #' a custom locus definition. NOTE: Must be for a \code{supported_genome()}, and #' must have columns 'chr', 'start', 'end', and 'gene_id' or 'geneid'. For an #' example custom locus definition file, see the vignette. #' @param mappability One of \code{NULL}, a file path to a custom mappability file, #' or an \code{integer} for a valid read length given by \code{supported_read_lengths}. #' If a file, it should contain a header with two column named 'gene_id' and 'mappa'. #' Gene IDs should be Entrez IDs, and mappability values should range from 0 and 1. #' For an example custom mappability file, see the vignette. Default value is NULL. #' @param qc_plots A logical variable that enables the automatic generation of #' plots for quality control. #' @param min_geneset_size Sets the minimum number of genes a gene set may have #' to be considered for enrichment testing. #' @param max_geneset_size Sets the maximum number of genes a gene set may have #' to be considered for enrichment testing. #' @param randomization One of \code{NULL}, 'complete', 'bylength', or 'bylocation'. #' See the Randomizations section below. #' @param n_cores The number of cores to use for enrichment testing. We recommend #' using only up to the maximum number of \emph{physical} cores present, as #' virtual cores do not significantly decrease runtime. Default number of cores #' is set to 1. NOTE: Windows does not support multicore enrichment. #' #' @return A list, containing the following items: #' #' \item{opts }{ A data frame containing the arguments/values passed to \code{broadenrich}.} #' #' \item{peaks }{ #' A data frame containing peak assignments to genes. Peaks which do not overlap #' a gene locus are not included. Each peak that was assigned to a gene is listed, #' along with the peak midpoint or peak interval coordinates (depending on which #' was used), the gene to which the peak was assigned, the locus start and end #' position of the gene, and the distance from the peak to the TSS. #' #' The columns are: #' #' \describe{ #' \item{peak_id}{an ID given to unique combinations of chromosome, peak start, and peak end. } #' \item{chr}{the chromosome the peak originated from. } #' \item{peak_start}{start position of the peak. } #' \item{peak_end}{end position of the peak. } #' \item{gene_id}{the Entrez ID of the gene to which the peak was assigned. } #' \item{gene_symbol}{the official gene symbol for the gene_id (above). } #' \item{gene_locus_start}{the start position of the locus for the gene to which the peak was assigned (specified by the locus definition used.) } #' \item{gene_locus_end}{the end position of the locus for the gene to which the peak was assigned (specified by the locus definition used.) } #' \item{overlap_start}{ the start position of the peak overlap with the gene locus.} #' \item{overlap_end}{ the end position of the peak overlap with the gene locus.} #' \item{peak_overlap}{ the base pair overlap of the peak with the gene locus.} #' }} #' #' \item{peaks_per_gene }{ #' A data frame of the count of peaks per gene. The columns are: #' #' \describe{ #' \item{gene_id}{the Entrez Gene ID. } #' \item{length}{the length of the gene's locus (depending on which locus definition you chose.)} #' \item{log10_length}{the log10(locus length) for the gene.} #' \item{num_peaks}{the number of peaks that were assigned to the gene, given the current locus definition. } #' \item{peak}{whether or not the gene is considered to have a peak, as defined by \code{num_peak_threshold}. } #' \item{peak_overlap}{the number of base pairs of the gene covered by a peak.} #' \item{ratio}{the proportion of the gene covered by a peak.} #' }} #' #' \item{results }{ #' A data frame of the results from performing the gene set enrichment test on #' each geneset that was requested (all genesets are merged into one final data #' frame.) The columns are: #' #' \describe{ #' \item{Geneset.ID}{the identifier for a given gene set from the selected database. For example, GO:0000003. } #' \item{Geneset.Type}{ specifies from which database the Geneset.ID originates. For example, "Gene Ontology Biological Process."} #' \item{Description}{ gives a definition of the geneset. For example, "reproduction."} #' \item{P.Value}{the probability of observing the degree of enrichment of the gene set given the null hypothesis that peaks are not associated with any gene sets.} #' \item{FDR}{the false discovery rate proposed by Bejamini \& Hochberg for adjusting the p-value to control for family-wise error rate.} #' \item{Odds.Ratio}{the estimated odds that peaks are associated with a given gene set compared to the odds that peaks are associated with other gene sets, after controlling for locus length and/or mappability. An odds ratio greater than 1 indicates enrichment, and less than 1 indicates depletion.} #' \item{N.Geneset.Genes}{the number of genes in the gene set.} #' \item{N.Geneset.Peak.Genes}{the number of genes in the genes set that were assigned at least one peak.} #' \item{Geneset.Avg.Gene.Length}{the average length of the genes in the gene set.} #' \item{Geneset.Avg.Gene.Coverage}{the mean proportion of the gene loci in the gene set covered by a peak.} #' \item{Geneset.Peak.Genes}{the list of genes from the gene set that had at least one peak assigned.} #' #' }} #' #' @family enrichment functions #' #' @examples #' #' # Run Broad-Enrich using an example dataset, assigning peaks to the nearest TSS, #' # and on a small custom geneset #' data(peaks_H3K4me3_GM12878, package = 'chipenrich.data') #' peaks_H3K4me3_GM12878 = subset(peaks_H3K4me3_GM12878, #' peaks_H3K4me3_GM12878$chrom == 'chr1') #' gs_path = system.file('extdata','vignette_genesets.txt', package='chipenrich') #' results = broadenrich(peaks_H3K4me3_GM12878, locusdef='nearest_tss', #' genome = 'hg19', genesets=gs_path, out_name=NULL) #' #' # Get the list of peaks that were assigned to genes. #' assigned_peaks = results$peaks #' #' # Get the results of enrichment testing. #' enrich = results$results #' #' @export #' @include constants.R utils.R supported.R setup.R randomize.R #' @include read.R assign_peaks.R peaks_per_gene.R #' @include plot_gene_coverage.R #' @include test_broadenrich.R broadenrich = function( peaks, out_name = "broadenrich", out_path = getwd(), genome = supported_genomes(), genesets = c( 'GOBP', 'GOCC', 'GOMF'), locusdef = "nearest_tss", mappability = NULL, qc_plots = TRUE, min_geneset_size = 15, max_geneset_size = 2000, randomization = NULL, n_cores = 1 ) { genome = match.arg(genome) n_cores = reset_ncores_for_windows(n_cores) ############################################################################ # Collect options for opts output opts_list = as.list(sys.call()) opts_list = opts_list[2:length(opts_list)] opts = data.frame( parameters = names(opts_list), values = as.character(opts_list), stringsAsFactors = FALSE ) ############################################################################ # Setup locus definitions, genesets, and mappability ldef_list = setup_locusdef(locusdef, genome, randomization) ldef = ldef_list[['ldef']] tss = ldef_list[['tss']] geneset_list = setup_genesets(gs_codes = genesets, ldef_obj = ldef, genome = genome, min_geneset_size = min_geneset_size, max_geneset_size = max_geneset_size) mappa = setup_mappa(mappa_code = mappability, genome = genome, ldef_code = locusdef, ldef_obj = ldef) ############################################################################ ############################################################################ # Start enrichment process ############################################################################ ############################################################################ ###################################################### # Read in and format peaks (from data.frame or file) if (class(peaks) == "data.frame") { message('Reading peaks from data.frame...') peakobj = load_peaks(peaks) } else if (class(peaks) == "character") { peakobj = read_bed(peaks) } # Number of peaks in data. num_peaks = length(peakobj) ###################################################### # Assign peaks to genes. NOTE: If method = 'broadenrich' use # assign_peak_segments(), otherwise use assign_peaks(). message("Assigning peaks to genes with assign_peak_segments(...) ..") assigned_peaks = assign_peak_segments(peakobj, ldef) peak_genes = unique(assigned_peaks$gene_id) ###################################################### # Compute peaks per gene table ppg = num_peaks_per_gene(assigned_peaks, ldef, mappa) # Add gene overlaps for broadenrich message("Calculating peak overlaps with gene loci..") ppg = calc_peak_gene_overlap(assigned_peaks,ppg) ###################################################### # Enrichment results = list() for (gobj in geneset_list) { message("Test: Broad-Enrich") message(sprintf("Genesets: %s", gobj@type)) message("Running tests..") rtemp = test_broadenrich(gobj, ppg, n_cores) # Annotate with geneset descriptions. rtemp$"Description" = as.character(mget(rtemp$Geneset.ID, gobj@set.name, ifnotfound=NA)) rtemp$"Geneset.Type" = gobj@type results[[gobj@type]] = rtemp } enrich = Reduce(rbind,results) ###################################################### # Post-process enrichment # Order columns, add enriched/depleted column as needed, remove bad tests, # sort by p-value, rename rownames to integers enrich = post_process_enrichments(enrich) ###################################################### # Write result objects to files if (!is.null(out_name)) { filename_analysis = file.path(out_path, sprintf("%s_results.tab", out_name)) write.table(enrich, file = filename_analysis, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote results to: ", filename_analysis) filename_peaks = file.path(out_path, sprintf("%s_peaks.tab", out_name)) write.table(assigned_peaks, file = filename_peaks, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote peak-to-gene assignments to: ", filename_peaks) filename_opts = file.path(out_path, sprintf("%s_opts.tab", out_name)) write.table(opts, file = filename_opts, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote run options/arguments to: ", filename_opts) filename_ppg = file.path(out_path, sprintf("%s_peaks-per-gene.tab", out_name)) write.table(ppg, file = filename_ppg, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote count of peaks per gene to: ", filename_ppg) if (qc_plots) { filename_qcplots = file.path(out_path, sprintf("%s_qcplots.png", out_name)) grDevices::png(filename_qcplots) print(..plot_gene_coverage(ppg, mappability = mappability, num_peaks = num_peaks)) grDevices::dev.off() message("Wrote QC plots to: ",filename_qcplots) } } ###################################################### # Return objects as list return(list( peaks = assigned_peaks, results = enrich, opts = opts, peaks_per_gene = ppg )) }
/R/broadenrich.R
no_license
cotton-lab/chipenrich
R
false
false
16,560
r
#' Run Broad-Enrich on broad genomic regions #' #' Broad-Enrich is designed for use with broad peaks that may intersect multiple #' gene loci, and cumulatively cover greater than 5\% of the genome. For example, #' ChIP-seq experiments for histone modifications. For more details, see the #' 'Broad-Enrich Method' section below. For help choosing a method, see the #' 'Choosing A Method' section below, or see the vignette. #' #' @section Broad-Enrich Method: #' The Broad-Enrich method uses the cumulative peak coverage of genes in its model #' for enrichment: \code{GO ~ ratio + s(log10_length)}. Here, \code{GO} is a #' binary vector indicating whether a gene is in the gene set being tested, #' \code{ratio} is a numeric vector indicating the ratio of the gene covered by #' peaks, and \code{s(log10_length)} is a binomial cubic smoothing spline which #' adjusts for the relationship between gene coverage and locus length. #' #' @section Choosing A Method: #' The following guidelines are intended to help select an enrichment function: #' \describe{ #' \item{broadenrich():}{ is designed for use with broad peaks that may intersect #' multiple gene loci, and cumulatively cover greater than 5\% of the genome. For #' example, ChIP-seq experiments for histone modifications.} #' \item{chipenrich():}{ is designed for use with 1,000s or 10,000s of narrow #' peaks which results in fewer gene loci containing a peak overall. For example, #' ChIP-seq experiments for transcription factors.} #' \item{polyenrich():}{ is also designed for narrow peaks, for experiments with #' 100,000s of peaks, or in cases where the number of binding sites per gene affects #' its regulation. If unsure whether to use chipenrich or polyenrich, then we recommend #' hybridenrich.} #' \item{hybridenrich():}{ is a combination of chipenrich and polyenrich, to be #' used when one is unsure which is the optimal method. } #' } #' #' @section Randomizations: #' Randomization of locus definitions allows for the assessment of Type I Error #' under the null hypothesis. The randomization codes are: #' \describe{ #' \item{\code{NULL}:}{ No randomizations, the default.} #' \item{'complete':}{ Shuffle the \code{gene_id} and \code{symbol} columns of the #' \code{locusdef} together, without regard for the chromosome location, or locus length. #' The null hypothesis is that there is no true gene set enrichment.} #' \item{'bylength':}{ Shuffle the \code{gene_id} and \code{symbol} columns of the #' \code{locusdef} together within bins of 100 genes sorted by locus length. The null #' hypothesis is that there is no true gene set enrichment, but with preserved locus #' length relationship.} #' \item{'bylocation':}{ Shuffle the \code{gene_id} and \code{symbol} columns of the #' \code{locusdef} together within bins of 50 genes sorted by genomic location. The null #' hypothesis is that there is no true gene set enrichment, but with preserved #' genomic location.} #' } #' The return value with a selected randomization is the same list as without. #' To assess the Type I error, the \code{alpha} level for the particular data set #' can be calculated by dividing the total number of gene sets with p-value < \code{alpha} #' by the total number of tests. Users may want to perform multiple randomizations #' for a set of peaks and take the median of the \code{alpha} values. #' #' @param peaks Either a file path or a \code{data.frame} of peaks in BED-like #' format. If a file path, the following formats are fully supported via their #' file extensions: .bed, .broadPeak, .narrowPeak, .gff3, .gff2, .gff, and .bedGraph #' or .bdg. BED3 through BED6 files are supported under the .bed extension. Files #' without these extensions are supported under the conditions that the first 3 #' columns correspond to 'chr', 'start', and 'end' and that there is either no #' header column, or it is commented out. If a \code{data.frame} A BEDX+Y style #' \code{data.frame}. See \code{GenomicRanges::makeGRangesFromDataFrame} for #' acceptable column names. #' @param out_name Prefix string to use for naming output files. This should not #' contain any characters that would be illegal for the system being used (Unix, #' Windows, etc.) The default value is "broadenrich", and a file "broadenrich_results.tab" #' is produced. If \code{qc_plots} is set, then a file "broadenrich_qcplots.png" #' is produced containing a number of quality control plots. If \code{out_name} #' is set to NULL, no files are written, and results then must be retrieved from #' the list returned by \code{broadenrich}. #' @param out_path Directory to which results files will be written out. Defaults #' to the current working directory as returned by \code{\link{getwd}}. #' @param genome One of the \code{supported_genomes()}. #' @param genesets A character vector of geneset databases to be tested for #' enrichment. See \code{supported_genesets()}. Alternately, a file path to a #' a tab-delimited text file with header and first column being the geneset ID #' or name, and the second column being Entrez Gene IDs. For an example custom #' gene set file, see the vignette. #' @param locusdef One of: 'nearest_tss', 'nearest_gene', 'exon', 'intron', '1kb', #' '1kb_outside', '1kb_outside_upstream', '5kb', '5kb_outside', '5kb_outside_upstream', #' '10kb', '10kb_outside', '10kb_outside_upstream'. For a description of each, #' see the vignette or \code{\link{supported_locusdefs}}. Alternately, a file path for #' a custom locus definition. NOTE: Must be for a \code{supported_genome()}, and #' must have columns 'chr', 'start', 'end', and 'gene_id' or 'geneid'. For an #' example custom locus definition file, see the vignette. #' @param mappability One of \code{NULL}, a file path to a custom mappability file, #' or an \code{integer} for a valid read length given by \code{supported_read_lengths}. #' If a file, it should contain a header with two column named 'gene_id' and 'mappa'. #' Gene IDs should be Entrez IDs, and mappability values should range from 0 and 1. #' For an example custom mappability file, see the vignette. Default value is NULL. #' @param qc_plots A logical variable that enables the automatic generation of #' plots for quality control. #' @param min_geneset_size Sets the minimum number of genes a gene set may have #' to be considered for enrichment testing. #' @param max_geneset_size Sets the maximum number of genes a gene set may have #' to be considered for enrichment testing. #' @param randomization One of \code{NULL}, 'complete', 'bylength', or 'bylocation'. #' See the Randomizations section below. #' @param n_cores The number of cores to use for enrichment testing. We recommend #' using only up to the maximum number of \emph{physical} cores present, as #' virtual cores do not significantly decrease runtime. Default number of cores #' is set to 1. NOTE: Windows does not support multicore enrichment. #' #' @return A list, containing the following items: #' #' \item{opts }{ A data frame containing the arguments/values passed to \code{broadenrich}.} #' #' \item{peaks }{ #' A data frame containing peak assignments to genes. Peaks which do not overlap #' a gene locus are not included. Each peak that was assigned to a gene is listed, #' along with the peak midpoint or peak interval coordinates (depending on which #' was used), the gene to which the peak was assigned, the locus start and end #' position of the gene, and the distance from the peak to the TSS. #' #' The columns are: #' #' \describe{ #' \item{peak_id}{an ID given to unique combinations of chromosome, peak start, and peak end. } #' \item{chr}{the chromosome the peak originated from. } #' \item{peak_start}{start position of the peak. } #' \item{peak_end}{end position of the peak. } #' \item{gene_id}{the Entrez ID of the gene to which the peak was assigned. } #' \item{gene_symbol}{the official gene symbol for the gene_id (above). } #' \item{gene_locus_start}{the start position of the locus for the gene to which the peak was assigned (specified by the locus definition used.) } #' \item{gene_locus_end}{the end position of the locus for the gene to which the peak was assigned (specified by the locus definition used.) } #' \item{overlap_start}{ the start position of the peak overlap with the gene locus.} #' \item{overlap_end}{ the end position of the peak overlap with the gene locus.} #' \item{peak_overlap}{ the base pair overlap of the peak with the gene locus.} #' }} #' #' \item{peaks_per_gene }{ #' A data frame of the count of peaks per gene. The columns are: #' #' \describe{ #' \item{gene_id}{the Entrez Gene ID. } #' \item{length}{the length of the gene's locus (depending on which locus definition you chose.)} #' \item{log10_length}{the log10(locus length) for the gene.} #' \item{num_peaks}{the number of peaks that were assigned to the gene, given the current locus definition. } #' \item{peak}{whether or not the gene is considered to have a peak, as defined by \code{num_peak_threshold}. } #' \item{peak_overlap}{the number of base pairs of the gene covered by a peak.} #' \item{ratio}{the proportion of the gene covered by a peak.} #' }} #' #' \item{results }{ #' A data frame of the results from performing the gene set enrichment test on #' each geneset that was requested (all genesets are merged into one final data #' frame.) The columns are: #' #' \describe{ #' \item{Geneset.ID}{the identifier for a given gene set from the selected database. For example, GO:0000003. } #' \item{Geneset.Type}{ specifies from which database the Geneset.ID originates. For example, "Gene Ontology Biological Process."} #' \item{Description}{ gives a definition of the geneset. For example, "reproduction."} #' \item{P.Value}{the probability of observing the degree of enrichment of the gene set given the null hypothesis that peaks are not associated with any gene sets.} #' \item{FDR}{the false discovery rate proposed by Bejamini \& Hochberg for adjusting the p-value to control for family-wise error rate.} #' \item{Odds.Ratio}{the estimated odds that peaks are associated with a given gene set compared to the odds that peaks are associated with other gene sets, after controlling for locus length and/or mappability. An odds ratio greater than 1 indicates enrichment, and less than 1 indicates depletion.} #' \item{N.Geneset.Genes}{the number of genes in the gene set.} #' \item{N.Geneset.Peak.Genes}{the number of genes in the genes set that were assigned at least one peak.} #' \item{Geneset.Avg.Gene.Length}{the average length of the genes in the gene set.} #' \item{Geneset.Avg.Gene.Coverage}{the mean proportion of the gene loci in the gene set covered by a peak.} #' \item{Geneset.Peak.Genes}{the list of genes from the gene set that had at least one peak assigned.} #' #' }} #' #' @family enrichment functions #' #' @examples #' #' # Run Broad-Enrich using an example dataset, assigning peaks to the nearest TSS, #' # and on a small custom geneset #' data(peaks_H3K4me3_GM12878, package = 'chipenrich.data') #' peaks_H3K4me3_GM12878 = subset(peaks_H3K4me3_GM12878, #' peaks_H3K4me3_GM12878$chrom == 'chr1') #' gs_path = system.file('extdata','vignette_genesets.txt', package='chipenrich') #' results = broadenrich(peaks_H3K4me3_GM12878, locusdef='nearest_tss', #' genome = 'hg19', genesets=gs_path, out_name=NULL) #' #' # Get the list of peaks that were assigned to genes. #' assigned_peaks = results$peaks #' #' # Get the results of enrichment testing. #' enrich = results$results #' #' @export #' @include constants.R utils.R supported.R setup.R randomize.R #' @include read.R assign_peaks.R peaks_per_gene.R #' @include plot_gene_coverage.R #' @include test_broadenrich.R broadenrich = function( peaks, out_name = "broadenrich", out_path = getwd(), genome = supported_genomes(), genesets = c( 'GOBP', 'GOCC', 'GOMF'), locusdef = "nearest_tss", mappability = NULL, qc_plots = TRUE, min_geneset_size = 15, max_geneset_size = 2000, randomization = NULL, n_cores = 1 ) { genome = match.arg(genome) n_cores = reset_ncores_for_windows(n_cores) ############################################################################ # Collect options for opts output opts_list = as.list(sys.call()) opts_list = opts_list[2:length(opts_list)] opts = data.frame( parameters = names(opts_list), values = as.character(opts_list), stringsAsFactors = FALSE ) ############################################################################ # Setup locus definitions, genesets, and mappability ldef_list = setup_locusdef(locusdef, genome, randomization) ldef = ldef_list[['ldef']] tss = ldef_list[['tss']] geneset_list = setup_genesets(gs_codes = genesets, ldef_obj = ldef, genome = genome, min_geneset_size = min_geneset_size, max_geneset_size = max_geneset_size) mappa = setup_mappa(mappa_code = mappability, genome = genome, ldef_code = locusdef, ldef_obj = ldef) ############################################################################ ############################################################################ # Start enrichment process ############################################################################ ############################################################################ ###################################################### # Read in and format peaks (from data.frame or file) if (class(peaks) == "data.frame") { message('Reading peaks from data.frame...') peakobj = load_peaks(peaks) } else if (class(peaks) == "character") { peakobj = read_bed(peaks) } # Number of peaks in data. num_peaks = length(peakobj) ###################################################### # Assign peaks to genes. NOTE: If method = 'broadenrich' use # assign_peak_segments(), otherwise use assign_peaks(). message("Assigning peaks to genes with assign_peak_segments(...) ..") assigned_peaks = assign_peak_segments(peakobj, ldef) peak_genes = unique(assigned_peaks$gene_id) ###################################################### # Compute peaks per gene table ppg = num_peaks_per_gene(assigned_peaks, ldef, mappa) # Add gene overlaps for broadenrich message("Calculating peak overlaps with gene loci..") ppg = calc_peak_gene_overlap(assigned_peaks,ppg) ###################################################### # Enrichment results = list() for (gobj in geneset_list) { message("Test: Broad-Enrich") message(sprintf("Genesets: %s", gobj@type)) message("Running tests..") rtemp = test_broadenrich(gobj, ppg, n_cores) # Annotate with geneset descriptions. rtemp$"Description" = as.character(mget(rtemp$Geneset.ID, gobj@set.name, ifnotfound=NA)) rtemp$"Geneset.Type" = gobj@type results[[gobj@type]] = rtemp } enrich = Reduce(rbind,results) ###################################################### # Post-process enrichment # Order columns, add enriched/depleted column as needed, remove bad tests, # sort by p-value, rename rownames to integers enrich = post_process_enrichments(enrich) ###################################################### # Write result objects to files if (!is.null(out_name)) { filename_analysis = file.path(out_path, sprintf("%s_results.tab", out_name)) write.table(enrich, file = filename_analysis, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote results to: ", filename_analysis) filename_peaks = file.path(out_path, sprintf("%s_peaks.tab", out_name)) write.table(assigned_peaks, file = filename_peaks, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote peak-to-gene assignments to: ", filename_peaks) filename_opts = file.path(out_path, sprintf("%s_opts.tab", out_name)) write.table(opts, file = filename_opts, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote run options/arguments to: ", filename_opts) filename_ppg = file.path(out_path, sprintf("%s_peaks-per-gene.tab", out_name)) write.table(ppg, file = filename_ppg, row.names = FALSE, quote = FALSE, sep = "\t") message("Wrote count of peaks per gene to: ", filename_ppg) if (qc_plots) { filename_qcplots = file.path(out_path, sprintf("%s_qcplots.png", out_name)) grDevices::png(filename_qcplots) print(..plot_gene_coverage(ppg, mappability = mappability, num_peaks = num_peaks)) grDevices::dev.off() message("Wrote QC plots to: ",filename_qcplots) } } ###################################################### # Return objects as list return(list( peaks = assigned_peaks, results = enrich, opts = opts, peaks_per_gene = ppg )) }
#################################################### ### Cross-link FIA plots with climatic variables ### #################################################### # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Any results obtained with this code should refer to the following publication: # Liénard, J. and Harrisson, J. and Strigul, N., "U.S. Forest Response to Projected Climate-Related Stress: a Tolerance Perspective", Global Change Biology (2016), doi: 10.1111/gcb.13291 ##### ## main function: extract_tmpppt = function(fia_db, p) { require('raster') require('rgdal') tmps = raster(paste0(p,'WORLDCLIM/bio_1.bil')) ppts = raster(paste0(p,'WORLDCLIM/bio_12.bil')) coords = cbind(fia_db$LON, fia_db$LAT) coords = SpatialPoints(coords, proj4string = CRS("+proj=longlat +datum=NAD83")) coords = spTransform(coords, crs(tmps)) tmps = extract(tmps,coords) ppts = extract(ppts,coords) return(list(tmps=tmps, ppts=ppts)) } # shows the tolerance of FIA stands in the climatic space plot_tolerance_climatic = function(tmps, ppts, val, title_label, cutoff=NA) { xr = sample(length(val)) plot(tmps[xr]/10, ppts[xr]/10, pch=1, col=colm100[cut(val[xr],breaks=seq(0,1,le=101),include.lowest = T)], xlim=c(-5,30),bty='n',las=1, xlab='Temperature (⁰C)',ylab='Precipitation (mm/month)') mtext(title_label,font = 2) fudgeit(c(0, 1),colm100,smallplot=c(0.825,0.85,0.2,0.8)) }
/clim_extract.R
no_license
renjianning/TDM
R
false
false
1,575
r
#################################################### ### Cross-link FIA plots with climatic variables ### #################################################### # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Any results obtained with this code should refer to the following publication: # Liénard, J. and Harrisson, J. and Strigul, N., "U.S. Forest Response to Projected Climate-Related Stress: a Tolerance Perspective", Global Change Biology (2016), doi: 10.1111/gcb.13291 ##### ## main function: extract_tmpppt = function(fia_db, p) { require('raster') require('rgdal') tmps = raster(paste0(p,'WORLDCLIM/bio_1.bil')) ppts = raster(paste0(p,'WORLDCLIM/bio_12.bil')) coords = cbind(fia_db$LON, fia_db$LAT) coords = SpatialPoints(coords, proj4string = CRS("+proj=longlat +datum=NAD83")) coords = spTransform(coords, crs(tmps)) tmps = extract(tmps,coords) ppts = extract(ppts,coords) return(list(tmps=tmps, ppts=ppts)) } # shows the tolerance of FIA stands in the climatic space plot_tolerance_climatic = function(tmps, ppts, val, title_label, cutoff=NA) { xr = sample(length(val)) plot(tmps[xr]/10, ppts[xr]/10, pch=1, col=colm100[cut(val[xr],breaks=seq(0,1,le=101),include.lowest = T)], xlim=c(-5,30),bty='n',las=1, xlab='Temperature (⁰C)',ylab='Precipitation (mm/month)') mtext(title_label,font = 2) fudgeit(c(0, 1),colm100,smallplot=c(0.825,0.85,0.2,0.8)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{teams_links} \alias{teams_links} \title{Teams Dictionary Team link reference lookup for the package} \format{ A data frame with 357 rows and 6 variables:\ \describe{ \item{\code{Team}}{character.} \item{\code{Team.link}}{character.} \item{\code{team.link.ref}}{character.} \item{\code{Year}}{double.} \item{\code{Conf}}{character.} \item{\code{Conf.link}}{character.} \item{\code{conf.link.ref}}{character.} } } \usage{ teams_links } \description{ Teams Dictionary Team link reference lookup for the package } \keyword{datasets}
/man/teams_links.Rd
permissive
mrcaseb/hoopR
R
false
true
635
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{teams_links} \alias{teams_links} \title{Teams Dictionary Team link reference lookup for the package} \format{ A data frame with 357 rows and 6 variables:\ \describe{ \item{\code{Team}}{character.} \item{\code{Team.link}}{character.} \item{\code{team.link.ref}}{character.} \item{\code{Year}}{double.} \item{\code{Conf}}{character.} \item{\code{Conf.link}}{character.} \item{\code{conf.link.ref}}{character.} } } \usage{ teams_links } \description{ Teams Dictionary Team link reference lookup for the package } \keyword{datasets}
library(ggplot2) library(gridExtra) N <- seq(10, 150, by = 10) D <- seq(1:8) error_plot_avg <- data.frame(n = integer(), pse = numeric(), d = character()) ## PSE seeds <- seq(1:200) for(seed in seeds){ # sample data set.seed(seed) X_i <- runif(max(N), 0, 1) e_i <- rnorm(max(N), 0, 0.2) Y_i <- sin(2*pi*X_i^3)^3 + e_i # initialize df for plotting error_plot <- data.frame(n = integer(), pse = numeric(), d = character()) # compute CV for dimension-D model on N datapoints for (d in D) { d_plot <- data.frame(n = N, pse = rep(0, length(N)), d = paste0("d=", d)) for (n in N) { m_hat = lm(Y_i[1:n] ~ poly(X_i[1:n], d)) pse = sum((residuals(m_hat) / (1 - hatvalues(m_hat)))^2) / n d_plot[d_plot$n == n, "pse"] <- pse } error_plot <- rbind(error_plot, d_plot) } if(nrow(error_plot_avg) == 0){ error_plot_avg <- error_plot }else{ error_plot_avg[, "pse"] <- error_plot_avg[, "pse"] + error_plot[, "pse"] }} error_plot_avg[, "pse"] <- error_plot_avg[, "pse"] / length(seeds) # plot ggplot(data = error_plot, aes(x = n, y = pse, group = d)) + geom_line(aes(colour = d)) + coord_cartesian(ylim = c(0, 0.35)) + labs(x = "n", y = "PSE", color = "Dimension") frames = 15 for(i in 1:frames){ # creating a name for each plot file with leading zeros if (i < 10) {name = paste('000',i,'plot.png',sep='')} if (i < 100 && i >= 10) {name = paste('00',i,'plot.png', sep='')} if (i >= 100) {name = paste('0', i,'plot.png', sep='')} n <- 10*i min_pse = min(error_plot_avg[error_plot_avg$n == n, "pse"]) min_d = error_plot_avg[error_plot_avg$n == n & error_plot_avg$pse == min_pse, "d"] min_d = as.integer(substr(min_d, 3, 3)) d_sizes <- rep(.5, length(D)) d_sizes[min_d] = 2 #saves the plot as a .png file in the working directory png(name) p <- ggplot(data = error_plot[error_plot$n <= n, ] , aes(x = n, y = pse, group = d)) + geom_line(aes(colour = d, size = d)) + scale_size_manual(values = d_sizes, guide = FALSE) + coord_cartesian(ylim = c(0, 0.35), xlim = c(0, 150)) + guides(colour = guide_legend(override.aes = list(size = d_sizes))) + labs(x = "n", y = "PSE", color = "Dimension") print(p) dev.off() } ## Fitted Results # sample data set.seed(750) X_i <- runif(max(N), 0, 1) e_i <- rnorm(max(N), 0, 0.2) Y_i <- sin(2*pi*X_i^3)^3 + e_i true_reg <- function(x){sin(2*pi*x^3)^3} opt_D <- c(1, 2, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7) for(i in 1:frames){ # creating a name for each plot file with leading zeros if (i < 10) {name = paste('000',i,'fit.png',sep='')} if (i < 100 && i >= 10) {name = paste('00',i,'fit.png', sep='')} n <- 10*i min_pse = min(error_plot_avg[error_plot_avg$n == n, "pse"]) min_d = error_plot_avg[error_plot_avg$n == n & error_plot_avg$pse == min_pse, "d"] min_d = as.integer(substr(min_d, 3, 3)) d_sizes <- rep(.5, length(D)) d_sizes[min_d] = 2 #saves the plot as a .png file in the working directory png(name) p <- ggplot(data = error_plot[error_plot$n <= n, ] , aes(x = n, y = pse, group = d)) + geom_line(aes(colour = d, size = d)) + scale_size_manual(values = d_sizes, guide = FALSE) + coord_cartesian(ylim = c(0, 0.35), xlim = c(0, 150)) + guides(colour = guide_legend(override.aes = list(size = d_sizes))) + labs(x = "n", y = "PSE", color = "Dimension") # fitted values x <- X_i[1:N[i]] y <- Y_i[1:N[i]] df <- data.frame(x = x, y = y) m_hat <- lm(y ~ poly(x, opt_D[i]), data = df) ix <- sort(x, index.return = TRUE)$ix m_hat <- approxfun(x[ix], predict(m_hat)[ix]) grid <- seq(0, 1, by = .01) pred_df <- data.frame(x = grid, pred_y = m_hat(grid)) f <- ggplot(data = df, aes(x = x, y = y)) + geom_point() + geom_line(data = pred_df, aes(y = pred_y, x = x)) + coord_cartesian(ylim = c(-1.2, 1.2), xlim = c(0, 1)) grid.arrange(p, f) dev.off() } # ezgif.com for making gif
/R_code/sieves_simulation.R
no_license
matthewrw/Sieves_Project
R
false
false
3,979
r
library(ggplot2) library(gridExtra) N <- seq(10, 150, by = 10) D <- seq(1:8) error_plot_avg <- data.frame(n = integer(), pse = numeric(), d = character()) ## PSE seeds <- seq(1:200) for(seed in seeds){ # sample data set.seed(seed) X_i <- runif(max(N), 0, 1) e_i <- rnorm(max(N), 0, 0.2) Y_i <- sin(2*pi*X_i^3)^3 + e_i # initialize df for plotting error_plot <- data.frame(n = integer(), pse = numeric(), d = character()) # compute CV for dimension-D model on N datapoints for (d in D) { d_plot <- data.frame(n = N, pse = rep(0, length(N)), d = paste0("d=", d)) for (n in N) { m_hat = lm(Y_i[1:n] ~ poly(X_i[1:n], d)) pse = sum((residuals(m_hat) / (1 - hatvalues(m_hat)))^2) / n d_plot[d_plot$n == n, "pse"] <- pse } error_plot <- rbind(error_plot, d_plot) } if(nrow(error_plot_avg) == 0){ error_plot_avg <- error_plot }else{ error_plot_avg[, "pse"] <- error_plot_avg[, "pse"] + error_plot[, "pse"] }} error_plot_avg[, "pse"] <- error_plot_avg[, "pse"] / length(seeds) # plot ggplot(data = error_plot, aes(x = n, y = pse, group = d)) + geom_line(aes(colour = d)) + coord_cartesian(ylim = c(0, 0.35)) + labs(x = "n", y = "PSE", color = "Dimension") frames = 15 for(i in 1:frames){ # creating a name for each plot file with leading zeros if (i < 10) {name = paste('000',i,'plot.png',sep='')} if (i < 100 && i >= 10) {name = paste('00',i,'plot.png', sep='')} if (i >= 100) {name = paste('0', i,'plot.png', sep='')} n <- 10*i min_pse = min(error_plot_avg[error_plot_avg$n == n, "pse"]) min_d = error_plot_avg[error_plot_avg$n == n & error_plot_avg$pse == min_pse, "d"] min_d = as.integer(substr(min_d, 3, 3)) d_sizes <- rep(.5, length(D)) d_sizes[min_d] = 2 #saves the plot as a .png file in the working directory png(name) p <- ggplot(data = error_plot[error_plot$n <= n, ] , aes(x = n, y = pse, group = d)) + geom_line(aes(colour = d, size = d)) + scale_size_manual(values = d_sizes, guide = FALSE) + coord_cartesian(ylim = c(0, 0.35), xlim = c(0, 150)) + guides(colour = guide_legend(override.aes = list(size = d_sizes))) + labs(x = "n", y = "PSE", color = "Dimension") print(p) dev.off() } ## Fitted Results # sample data set.seed(750) X_i <- runif(max(N), 0, 1) e_i <- rnorm(max(N), 0, 0.2) Y_i <- sin(2*pi*X_i^3)^3 + e_i true_reg <- function(x){sin(2*pi*x^3)^3} opt_D <- c(1, 2, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7) for(i in 1:frames){ # creating a name for each plot file with leading zeros if (i < 10) {name = paste('000',i,'fit.png',sep='')} if (i < 100 && i >= 10) {name = paste('00',i,'fit.png', sep='')} n <- 10*i min_pse = min(error_plot_avg[error_plot_avg$n == n, "pse"]) min_d = error_plot_avg[error_plot_avg$n == n & error_plot_avg$pse == min_pse, "d"] min_d = as.integer(substr(min_d, 3, 3)) d_sizes <- rep(.5, length(D)) d_sizes[min_d] = 2 #saves the plot as a .png file in the working directory png(name) p <- ggplot(data = error_plot[error_plot$n <= n, ] , aes(x = n, y = pse, group = d)) + geom_line(aes(colour = d, size = d)) + scale_size_manual(values = d_sizes, guide = FALSE) + coord_cartesian(ylim = c(0, 0.35), xlim = c(0, 150)) + guides(colour = guide_legend(override.aes = list(size = d_sizes))) + labs(x = "n", y = "PSE", color = "Dimension") # fitted values x <- X_i[1:N[i]] y <- Y_i[1:N[i]] df <- data.frame(x = x, y = y) m_hat <- lm(y ~ poly(x, opt_D[i]), data = df) ix <- sort(x, index.return = TRUE)$ix m_hat <- approxfun(x[ix], predict(m_hat)[ix]) grid <- seq(0, 1, by = .01) pred_df <- data.frame(x = grid, pred_y = m_hat(grid)) f <- ggplot(data = df, aes(x = x, y = y)) + geom_point() + geom_line(data = pred_df, aes(y = pred_y, x = x)) + coord_cartesian(ylim = c(-1.2, 1.2), xlim = c(0, 1)) grid.arrange(p, f) dev.off() } # ezgif.com for making gif
context("Test subset function") test_that("Checking subset on object of class mpcross by markers, with a single dataset", { #Test function for an object with a single dataset testSingle <- function(cross, subsetted, marker) { expect_identical(validObject(subsetted, complete=TRUE), TRUE) expect_identical(subsetted@geneticData[[1]]@finals, cross@geneticData[[1]]@finals[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@founders, cross@geneticData[[1]]@founders[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@pedigree, cross@geneticData[[1]]@pedigree) expect_identical(length(subsetted@geneticData[[1]]@hetData), 1L) expect_identical(cross@geneticData[[1]]@hetData[[marker]], subsetted@geneticData[[1]]@hetData[[1]]) } map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(500) cross <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) #Checks on marker indices expect_that(subset(cross, markers = -1), throws_error()) expect_that(subset(cross, markers = (-3):0), throws_error()) expect_that(subset(cross, markers = 1:12), throws_error()) expect_error(subset(cross, markers = 1:11), NA) #Test without dominant markers subsetted <- subset(cross, markers = 2) testSingle(cross, subsetted, 2) #Tests with dominant markers crossDominant <- cross + biparentalDominant() subsetted <- subset(crossDominant, markers = 2) testSingle(crossDominant, subsetted, 2) subsetted <- subset(crossDominant, markers = 3) testSingle(crossDominant, subsetted, 3) subsetted <- subset(crossDominant, markers = 4) testSingle(crossDominant, subsetted, 4) }) test_that("Subset refuses to duplicate markers and lines", { map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(1000) cross <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) expect_that(subset(cross, markers = rep(1:nMarkers(cross), each = 2)), throws_error("Duplicates detected")) expect_that(subset(cross, lines = rep(rownames(cross@geneticData[[1]]@finals), each = 2)), throws_error("Duplicates detected")) }) test_that("Subset changes the pedigree from detailedPedigree to pedigree when subsetting by lines", { map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(1000) cross <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) expect_that(cross@geneticData[[1]]@pedigree, is_a("detailedPedigree")) subsetted <- subset(cross, markers = 1:nMarkers(cross)) expect_that(subsetted@geneticData[[1]]@pedigree, is_a("detailedPedigree")) subsetted <- subset(cross, lines = 1:2) expect_true(!is(subsetted@geneticData[[1]]@pedigree, "detailedPedigree")) }) test_that("Checking subset on object of class mpcross by markers, with two datasets", { map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(1000) #Test function for an object with a pair of datasets testPair <- function(cross, subsetted, marker) { expect_identical(length(cross@geneticData), 2L) expect_identical(length(subsetted@geneticData), 2L) expect_identical(validObject(subsetted, complete=TRUE), TRUE) expect_identical(subsetted@geneticData[[1]]@finals, cross@geneticData[[1]]@finals[,marker,drop=F]) expect_identical(subsetted@geneticData[[2]]@finals, cross@geneticData[[2]]@finals[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@founders, cross@geneticData[[1]]@founders[,marker,drop=F]) expect_identical(subsetted@geneticData[[2]]@founders, cross@geneticData[[2]]@founders[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@pedigree, cross@geneticData[[1]]@pedigree) expect_identical(subsetted@geneticData[[2]]@pedigree, cross@geneticData[[2]]@pedigree) expect_identical(cross@geneticData[[1]]@hetData[[marker]], subsetted@geneticData[[1]]@hetData[[1]]) expect_identical(cross@geneticData[[2]]@hetData[[marker]], subsetted@geneticData[[2]]@hetData[[1]]) } cross1 <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) cross2 <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) pedigreeSubset <- cross1@geneticData[[1]]@pedigree@lineNames %in% rownames(cross1@geneticData[[1]]@finals) cross1@geneticData[[1]]@pedigree@lineNames[pedigreeSubset] <- paste0(cross1@geneticData[[1]]@pedigree@lineNames[pedigreeSubset], ",2") rownames(cross1@geneticData[[1]]@finals) <- paste0(rownames(cross1@geneticData[[1]]@finals), ",2") #Test codominant cross <- cross1 + cross2 subsetted <- subset(cross, markers = 2) testPair(cross, subsetted, 2) #Test dominant crossDominant <- cross crossDominant@geneticData[[1]] <- crossDominant@geneticData[[1]] + biparentalDominant() crossDominant@geneticData[[2]] <- crossDominant@geneticData[[2]] + biparentalDominant() subsetted <- subset(crossDominant, markers = 2) testPair(crossDominant, subsetted, 2) subsetted <- subset(crossDominant, markers = 3) testPair(crossDominant, subsetted, 3) subsetted <- subset(crossDominant, markers = 4) testPair(crossDominant, subsetted, 4) }) test_that("Checking subset by lines when imputation data is present", { map <- qtl::sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE) pedigree <- f2Pedigree(500) cross <- simulateMPCross(map=map, pedigree=pedigree, mapFunction = haldane) mapped <- new("mpcrossMapped", cross, map = map) suppressWarnings(result <- imputeFounders(mapped, errorProb = 0)) subsetted <- subset(result, lines = sample(lineNames(result))) expect_error(validObject(subsetted, complete = TRUE), NA) })
/tests/testthat/test-subset.mpcross.R
no_license
rohan-shah/mpMap2
R
false
false
5,836
r
context("Test subset function") test_that("Checking subset on object of class mpcross by markers, with a single dataset", { #Test function for an object with a single dataset testSingle <- function(cross, subsetted, marker) { expect_identical(validObject(subsetted, complete=TRUE), TRUE) expect_identical(subsetted@geneticData[[1]]@finals, cross@geneticData[[1]]@finals[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@founders, cross@geneticData[[1]]@founders[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@pedigree, cross@geneticData[[1]]@pedigree) expect_identical(length(subsetted@geneticData[[1]]@hetData), 1L) expect_identical(cross@geneticData[[1]]@hetData[[marker]], subsetted@geneticData[[1]]@hetData[[1]]) } map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(500) cross <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) #Checks on marker indices expect_that(subset(cross, markers = -1), throws_error()) expect_that(subset(cross, markers = (-3):0), throws_error()) expect_that(subset(cross, markers = 1:12), throws_error()) expect_error(subset(cross, markers = 1:11), NA) #Test without dominant markers subsetted <- subset(cross, markers = 2) testSingle(cross, subsetted, 2) #Tests with dominant markers crossDominant <- cross + biparentalDominant() subsetted <- subset(crossDominant, markers = 2) testSingle(crossDominant, subsetted, 2) subsetted <- subset(crossDominant, markers = 3) testSingle(crossDominant, subsetted, 3) subsetted <- subset(crossDominant, markers = 4) testSingle(crossDominant, subsetted, 4) }) test_that("Subset refuses to duplicate markers and lines", { map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(1000) cross <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) expect_that(subset(cross, markers = rep(1:nMarkers(cross), each = 2)), throws_error("Duplicates detected")) expect_that(subset(cross, lines = rep(rownames(cross@geneticData[[1]]@finals), each = 2)), throws_error("Duplicates detected")) }) test_that("Subset changes the pedigree from detailedPedigree to pedigree when subsetting by lines", { map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(1000) cross <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) expect_that(cross@geneticData[[1]]@pedigree, is_a("detailedPedigree")) subsetted <- subset(cross, markers = 1:nMarkers(cross)) expect_that(subsetted@geneticData[[1]]@pedigree, is_a("detailedPedigree")) subsetted <- subset(cross, lines = 1:2) expect_true(!is(subsetted@geneticData[[1]]@pedigree, "detailedPedigree")) }) test_that("Checking subset on object of class mpcross by markers, with two datasets", { map <- qtl::sim.map(len = 100, n.mar = 11, anchor.tel=TRUE, include.x=FALSE, eq.spacing=TRUE) f2Pedigree <- f2Pedigree(1000) #Test function for an object with a pair of datasets testPair <- function(cross, subsetted, marker) { expect_identical(length(cross@geneticData), 2L) expect_identical(length(subsetted@geneticData), 2L) expect_identical(validObject(subsetted, complete=TRUE), TRUE) expect_identical(subsetted@geneticData[[1]]@finals, cross@geneticData[[1]]@finals[,marker,drop=F]) expect_identical(subsetted@geneticData[[2]]@finals, cross@geneticData[[2]]@finals[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@founders, cross@geneticData[[1]]@founders[,marker,drop=F]) expect_identical(subsetted@geneticData[[2]]@founders, cross@geneticData[[2]]@founders[,marker,drop=F]) expect_identical(subsetted@geneticData[[1]]@pedigree, cross@geneticData[[1]]@pedigree) expect_identical(subsetted@geneticData[[2]]@pedigree, cross@geneticData[[2]]@pedigree) expect_identical(cross@geneticData[[1]]@hetData[[marker]], subsetted@geneticData[[1]]@hetData[[1]]) expect_identical(cross@geneticData[[2]]@hetData[[marker]], subsetted@geneticData[[2]]@hetData[[1]]) } cross1 <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) cross2 <- simulateMPCross(map=map, pedigree=f2Pedigree, mapFunction = haldane) pedigreeSubset <- cross1@geneticData[[1]]@pedigree@lineNames %in% rownames(cross1@geneticData[[1]]@finals) cross1@geneticData[[1]]@pedigree@lineNames[pedigreeSubset] <- paste0(cross1@geneticData[[1]]@pedigree@lineNames[pedigreeSubset], ",2") rownames(cross1@geneticData[[1]]@finals) <- paste0(rownames(cross1@geneticData[[1]]@finals), ",2") #Test codominant cross <- cross1 + cross2 subsetted <- subset(cross, markers = 2) testPair(cross, subsetted, 2) #Test dominant crossDominant <- cross crossDominant@geneticData[[1]] <- crossDominant@geneticData[[1]] + biparentalDominant() crossDominant@geneticData[[2]] <- crossDominant@geneticData[[2]] + biparentalDominant() subsetted <- subset(crossDominant, markers = 2) testPair(crossDominant, subsetted, 2) subsetted <- subset(crossDominant, markers = 3) testPair(crossDominant, subsetted, 3) subsetted <- subset(crossDominant, markers = 4) testPair(crossDominant, subsetted, 4) }) test_that("Checking subset by lines when imputation data is present", { map <- qtl::sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x=FALSE, eq.spacing=TRUE) pedigree <- f2Pedigree(500) cross <- simulateMPCross(map=map, pedigree=pedigree, mapFunction = haldane) mapped <- new("mpcrossMapped", cross, map = map) suppressWarnings(result <- imputeFounders(mapped, errorProb = 0)) subsetted <- subset(result, lines = sample(lineNames(result))) expect_error(validObject(subsetted, complete = TRUE), NA) })
library(tidyverse) library(grid) library(gridExtra) library(scales) #<-- for plot colors library(ggpubr) library(ggthemes) library(ggplot2) setwd("C:/github/MIMICS_HiRes/MCMC") ### Load all MCMC output csv files from directory filenames <- list.files(path="Output/",pattern=".*csv") ## Create list of data frame names without the ".csv" part names <-paste0("MCMC", seq(1,length(filenames))) ### Load all files for(i in 1:length(names)){ data_in <- read.csv(paste0("Output/",filenames[i]), as.is=T) ### Optional: Cut down number of iterations #data_in <- data_in %>% filter(iter <= 10000) %>% arrange(iter) #Add end row data_improve_steps <- data_in %>% filter(improve == 1) data_in <- rbind(data_in, data_improve_steps[nrow(data_improve_steps),]) data_in$iter[nrow(data_in)] <- nrow(data_in) data_in$ID = paste0("Run ", as.character(i)) #assign(names[i], MC_data) if(i == 1) { MCMC <- data_in } else { MCMC <- rbind(MCMC, data_in) } } ### Filter MCMC data to only include steps that improved RMSE MCMC <- MCMC %>% filter(improve == 1) ############################################ ### Create plot of parameter MCMC walks ############################################ colourCount = length(names) ### Change column names for plotting MCMC <- MCMC %>% rename(Iteration = iter) #Set plot theme my_theme <- theme_bw() + theme(panel.spacing.x=unit(1.5, "lines"),panel.spacing.y=unit(3, "lines")) + theme(legend.position="none") + theme(strip.text = element_text(size = 12)) + theme(axis.title.y = element_text(margin = margin(t = 0, r = 10, b = 0, l = 0))) + theme(axis.title.x = element_text(margin = margin(t = 10, r = 0, b = 0, l = 0))) + theme(text = element_text(size=12)) + theme(legend.text=element_text(color='grey20',size=12)) + theme(legend.title=element_text(color='grey20',size=12)) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank())# + #theme(panel.background = element_blank()) + #theme(strip.background = element_blank()) + #theme(strip.placement = "outside") + #theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #set color ramp colfunc <-colorRampPalette(c("#000000","#111111","#222222", "#333333", "#444444")) pcols <- colfunc(length(names)) #set color ramp for RMSE and r2 colfunc2 <-colorRampPalette(c("#d54813","#f73c00","#850000")) pcols2 <- colfunc2(length(names)) pRMSE <- ggplot(MCMC, aes(x=Iteration, y=RMSE, colour=factor(ID))) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols2 ,guide = guide_legend(nrow=2))# + xlim(0, 5000) pr2 <- ggplot(MCMC, aes(x=Iteration, y=r2, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols2 ,guide = guide_legend(nrow=2))# + ylim(0.5, 1) pTau_x <- ggplot(MCMC, aes(x=Iteration, y=Tau_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 2) pCUE_x <-ggplot(MCMC, aes(x=Iteration, y=CUE_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 2) pDesorb_x <- ggplot(MCMC, aes(x=Iteration, y=desorb_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.1, 3) pFPHYS_x <- ggplot(MCMC, aes(x=Iteration, y=fPHYS_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.1, 3) pVslope_x <- ggplot(MCMC, aes(x=Iteration, y=Vslope_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) pVint_x <- ggplot(MCMC, aes(x=Iteration, y=Vint_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) pKslope_x <- ggplot(MCMC, aes(x=Iteration, y=Kslope_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) pKint_x <- ggplot(MCMC, aes(x=Iteration, y=Kint_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) # Put all plotsa together in a matrix mplot <- ggarrange(pRMSE, pr2, pTau_x, pCUE_x, pDesorb_x, pFPHYS_x, pVslope_x, pVint_x, pKslope_x, pKint_x, ncol=2, nrow=5, common.legend = TRUE, legend="none") ### Save matrix plot ggsave(plot=mplot, filename = "Post_MCMC_Analysis/Plots/MCMC_diff_plot.jpeg", width = 7, height = 10 , dpi = 600) # Alternate plot code #Isolate a specific MCMC run MCMC_out <- read.csv(paste0("Output/",filenames[1]), as.is=T) #Add an end row improve_steps <- MCMC_out %>% filter(improve == 1) MCMC_out <- rbind(MCMC_out, improve_steps[nrow(improve_steps),]) MCMC_out$iter[nrow(MCMC_out)] <- nrow(MCMC_out) #Trim down iterations to simplify plot MCMC_out <- MCMC_out %>% filter(iter < 60001) pRMSE <- ggplot(MCMC_out, aes(x=iter, y=RMSE)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") +ylim(1,5) pr2 <- ggplot(MCMC_out, aes(x=iter, y=r2)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pTau_x <- ggplot(MCMC_out, aes(x=iter, y=Tau_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pCUE_x <-ggplot(MCMC_out, aes(x=iter, y=CUE_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pDesorb_x <- ggplot(MCMC_out, aes(x=iter, y=desorb_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pFPHYS_x <- ggplot(MCMC_out, aes(x=iter, y=fPHYS_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pVslope_x <- ggplot(MCMC_out, aes(x=iter, y=Vslope_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pVint_x <- ggplot(MCMC_out, aes(x=iter, y=Vint_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pKslope_x <- ggplot(MCMC_out, aes(x=iter, y=Kslope_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pKint_x <- ggplot(MCMC_out, aes(x=iter, y=Kint_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") grid.arrange(pRMSE, pr2, pTau_x, pCUE_x, pDesorb_x, pFPHYS_x, pVslope_x, pVint_x, pKslope_x, ncol = 2) #save plot png(file="C:/github/MIMICS_HiRes/MC_walk_plot.png", width=5000, height=6000, units="px", res=600) grid.arrange(pRMSE, pr2, pTau_x, pCUE_x, pDesorb_x, pFPHYS_x, pVslope_x, pVint_x, pKslope_x, ncol = 2) dev.off()
/MCMC/Post_MCMC_Analysis/MCMC_results_plot.R
permissive
piersond/MIMICS_HiRes
R
false
false
9,513
r
library(tidyverse) library(grid) library(gridExtra) library(scales) #<-- for plot colors library(ggpubr) library(ggthemes) library(ggplot2) setwd("C:/github/MIMICS_HiRes/MCMC") ### Load all MCMC output csv files from directory filenames <- list.files(path="Output/",pattern=".*csv") ## Create list of data frame names without the ".csv" part names <-paste0("MCMC", seq(1,length(filenames))) ### Load all files for(i in 1:length(names)){ data_in <- read.csv(paste0("Output/",filenames[i]), as.is=T) ### Optional: Cut down number of iterations #data_in <- data_in %>% filter(iter <= 10000) %>% arrange(iter) #Add end row data_improve_steps <- data_in %>% filter(improve == 1) data_in <- rbind(data_in, data_improve_steps[nrow(data_improve_steps),]) data_in$iter[nrow(data_in)] <- nrow(data_in) data_in$ID = paste0("Run ", as.character(i)) #assign(names[i], MC_data) if(i == 1) { MCMC <- data_in } else { MCMC <- rbind(MCMC, data_in) } } ### Filter MCMC data to only include steps that improved RMSE MCMC <- MCMC %>% filter(improve == 1) ############################################ ### Create plot of parameter MCMC walks ############################################ colourCount = length(names) ### Change column names for plotting MCMC <- MCMC %>% rename(Iteration = iter) #Set plot theme my_theme <- theme_bw() + theme(panel.spacing.x=unit(1.5, "lines"),panel.spacing.y=unit(3, "lines")) + theme(legend.position="none") + theme(strip.text = element_text(size = 12)) + theme(axis.title.y = element_text(margin = margin(t = 0, r = 10, b = 0, l = 0))) + theme(axis.title.x = element_text(margin = margin(t = 10, r = 0, b = 0, l = 0))) + theme(text = element_text(size=12)) + theme(legend.text=element_text(color='grey20',size=12)) + theme(legend.title=element_text(color='grey20',size=12)) + theme(axis.line = element_line(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank())# + #theme(panel.background = element_blank()) + #theme(strip.background = element_blank()) + #theme(strip.placement = "outside") + #theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #set color ramp colfunc <-colorRampPalette(c("#000000","#111111","#222222", "#333333", "#444444")) pcols <- colfunc(length(names)) #set color ramp for RMSE and r2 colfunc2 <-colorRampPalette(c("#d54813","#f73c00","#850000")) pcols2 <- colfunc2(length(names)) pRMSE <- ggplot(MCMC, aes(x=Iteration, y=RMSE, colour=factor(ID))) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols2 ,guide = guide_legend(nrow=2))# + xlim(0, 5000) pr2 <- ggplot(MCMC, aes(x=Iteration, y=r2, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols2 ,guide = guide_legend(nrow=2))# + ylim(0.5, 1) pTau_x <- ggplot(MCMC, aes(x=Iteration, y=Tau_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 2) pCUE_x <-ggplot(MCMC, aes(x=Iteration, y=CUE_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 2) pDesorb_x <- ggplot(MCMC, aes(x=Iteration, y=desorb_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.1, 3) pFPHYS_x <- ggplot(MCMC, aes(x=Iteration, y=fPHYS_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.1, 3) pVslope_x <- ggplot(MCMC, aes(x=Iteration, y=Vslope_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) pVint_x <- ggplot(MCMC, aes(x=Iteration, y=Vint_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) pKslope_x <- ggplot(MCMC, aes(x=Iteration, y=Kslope_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) pKint_x <- ggplot(MCMC, aes(x=Iteration, y=Kint_x, color=ID)) + geom_line(alpha=0.5, size=1.3) + geom_point(size=1.5, alpha=0.8, shape=16) + my_theme + scale_color_manual(values = pcols ,guide = guide_legend(nrow=2))# + ylim(0.5, 4) # Put all plotsa together in a matrix mplot <- ggarrange(pRMSE, pr2, pTau_x, pCUE_x, pDesorb_x, pFPHYS_x, pVslope_x, pVint_x, pKslope_x, pKint_x, ncol=2, nrow=5, common.legend = TRUE, legend="none") ### Save matrix plot ggsave(plot=mplot, filename = "Post_MCMC_Analysis/Plots/MCMC_diff_plot.jpeg", width = 7, height = 10 , dpi = 600) # Alternate plot code #Isolate a specific MCMC run MCMC_out <- read.csv(paste0("Output/",filenames[1]), as.is=T) #Add an end row improve_steps <- MCMC_out %>% filter(improve == 1) MCMC_out <- rbind(MCMC_out, improve_steps[nrow(improve_steps),]) MCMC_out$iter[nrow(MCMC_out)] <- nrow(MCMC_out) #Trim down iterations to simplify plot MCMC_out <- MCMC_out %>% filter(iter < 60001) pRMSE <- ggplot(MCMC_out, aes(x=iter, y=RMSE)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") +ylim(1,5) pr2 <- ggplot(MCMC_out, aes(x=iter, y=r2)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pTau_x <- ggplot(MCMC_out, aes(x=iter, y=Tau_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pCUE_x <-ggplot(MCMC_out, aes(x=iter, y=CUE_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pDesorb_x <- ggplot(MCMC_out, aes(x=iter, y=desorb_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pFPHYS_x <- ggplot(MCMC_out, aes(x=iter, y=fPHYS_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pVslope_x <- ggplot(MCMC_out, aes(x=iter, y=Vslope_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pVint_x <- ggplot(MCMC_out, aes(x=iter, y=Vint_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pKslope_x <- ggplot(MCMC_out, aes(x=iter, y=Kslope_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") pKint_x <- ggplot(MCMC_out, aes(x=iter, y=Kint_x)) + geom_line(color="grey50", alpha=0.1) + geom_point(size=3, color="grey50", alpha=0.5) + geom_line(data=MCMC_out %>% filter(improve > 0), color="red", size=1) + geom_point(data=MCMC_out %>% filter(improve > 0), color="red", size=4) + theme_minimal() +theme(legend.position = "none") grid.arrange(pRMSE, pr2, pTau_x, pCUE_x, pDesorb_x, pFPHYS_x, pVslope_x, pVint_x, pKslope_x, ncol = 2) #save plot png(file="C:/github/MIMICS_HiRes/MC_walk_plot.png", width=5000, height=6000, units="px", res=600) grid.arrange(pRMSE, pr2, pTau_x, pCUE_x, pDesorb_x, pFPHYS_x, pVslope_x, pVint_x, pKslope_x, ncol = 2) dev.off()
\name{combineRgn} \alias{combineRgn} \title{ Combine Genomic Regions } \description{ Combine genomic regions from BED files } \usage{ combineRgn(files) } \arguments{ \item{files}{ Genomic region filenames in BED format. } } \details{ This function computes the union of genomic regions if input files are more than one, otherwise will return the genomic regions in the only file. } \value{ Genomic ranges of the combined regions. } \author{ Qi Wang } \examples{ }
/man/combineRgn.Rd
permissive
qwang-big/irene
R
false
false
469
rd
\name{combineRgn} \alias{combineRgn} \title{ Combine Genomic Regions } \description{ Combine genomic regions from BED files } \usage{ combineRgn(files) } \arguments{ \item{files}{ Genomic region filenames in BED format. } } \details{ This function computes the union of genomic regions if input files are more than one, otherwise will return the genomic regions in the only file. } \value{ Genomic ranges of the combined regions. } \author{ Qi Wang } \examples{ }
# Atmpspheric carbon dioxide data from Mauna Loa Observatory since 1958 library(readr) co2level <- read_csv("data-raw/co2level.csv") devtools::use_data(co2level, overwrite = TRUE)
/data-raw/co2level.R
no_license
PHP2560-Statistical-Programming-R/r-package-beautiful-day
R
false
false
181
r
# Atmpspheric carbon dioxide data from Mauna Loa Observatory since 1958 library(readr) co2level <- read_csv("data-raw/co2level.csv") devtools::use_data(co2level, overwrite = TRUE)
## Word frequency analysis # aim is to take the submissions, look at the most frequenct words, and try and use this to filter submissions that are the same # Make a list of submissions whose top 10 most frequent words are the same? # references: http://onepager.togaware.com/TextMiningO.pdf # https://gist.github.com/benmarwick/11333467 # https://jhuria.wordpress.com/2012/07/01/text-mining-in-r/ # 22June2015 # Ashlee Jollymore ################ ## set working directory rm(list = ls()) ls() Selected_individual <- "/Users/user/Dropbox/PhD Work/WaterAct Paper/Selected_individual" dname <- "/Users/user/Dropbox/PhD Work/WaterAct Paper/Selected_individual/corpus" # where txt files will be stored setwd(Selected_individual) ########## Convert PDF to txt files and input as a tm object (corpus) # Necessary packages library(tm) library(wordcloud) library(SnowballC) library(reshape) library(plyr) library(gsubfn) ############### Convert PDFs to txt files # Necessary to do this prior to doing text analysis. # Not necessary if your files are already in .txt files # read PDFs from file with randomly selected files length(dir(Selected_individual)) # make a vector of PDF file names myfiles <- list.files(path = Selected_individual, pattern = "pdf", full.names = TRUE) # convert each PDF file that is named in the vector into a text file # text file is created in the same directory as the PDFs # puts a bunch of text files into the destination #lapply(myfiles, function(i) system(paste('"/usr/local/bin/pdftotext"', paste0('"', i, '"')), wait = FALSE) ) # get text files that you just created in that directory. Not that .txt files are moved to 'Corpus' # folder within the same folder # get file names setwd(dname) filelist_txt <- list.files(pattern = ".txt$") y = length(filelist_txt) sample.ID <- 0 #create sample ID variable for (i in 1:y){ sample.ID.temp <- strapplyc(filelist_txt[i], "(.*).txt", simplify = TRUE) sample.ID[i] <- sample.ID.temp } ############## Creating corpus file # First step of text analysis # input the text files into a corpus corpus.i <- Corpus(DirSource(dname), readerControl = list(language="lat")) # inspect corpus to make sure that the documents are input # Should pull up one of the submissions inspect(corpus.i[2]) #MyCorpus <- tm_map(YourCorpus, # content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte')), # mc.cores=1) #test <- tm_map(YourCorpus, stemDocument, lazy = TRUE) ######## Preprocessing text data prior to analysis # To see transformations possible within the tm package -> getTransformations() # using two cores! tell r to nly use one core using lazy = TRUE # this was converted into a function so that could use this later for partitioned txt files text.pro <- function(txtdoc){ corpus <- tm_map(txtdoc, content_transformer(tolower)) # get rid of weird punctuation - change it to a space toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x)) docs <- tm_map(corpus, toSpace, "/|@|\\|***|", lazy = TRUE) # convert all upper case to lower case docs <- tm_map(corpus, content_transformer(tolower)) # remove numbers docs <- tm_map(docs, content_transformer(removeNumbers)) # remove punctuation docs <- tm_map(docs, content_transformer(removePunctuation)) # remove stop words like for, very, and, of, are, plus personal stop words docs <- tm_map(docs, removeWords, c(stopwords("english"), "personal","identifiers","removed","water","wsa","sustainability","act", "proposal", "need"), lazy = TRUE) docs <- tm_map(docs, removeWords, c(stopwords("english"),"my","custom","words")) # strip white spaces docs <- tm_map(docs, stripWhitespace, lazy = TRUE) # stem document - remove common word endings docs <- tm_map(docs, stemDocument, lazy = TRUE) # convert back to plan text document docs <- tm_map(docs, PlainTextDocument, lazy = TRUE) return(docs) } # use this function to clean corpus from all individual txt submissions docs <- text.pro(txtdoc = corpus.i) ########## Create document term matrix # A document term matrix is simply a matrix with documents as the rows and terms as # the columns and a count of the frequency # of words as the cells of the matrix. We use DocumentTermMatrix() to create the matrix #create document term matrix dtm <- DocumentTermMatrix(docs) tdm <- TermDocumentMatrix(docs) ########### Frequency Analysis on all data! #Frequent Terms and Associations #freq.terms.1 <- findFreqTerms(dtm, lowfreq=4) freq.terms <- findFreqTerms(tdm, lowfreq=100) freq <- colSums(as.matrix(dtm)) ord <- order(freq) # Least frequent terms least <- freq[head(ord)] # Most frequent terms most <- freq[tail(ord)] # calculate the frequency of words wordfreq <- sort(rowSums(as.matrix(tdm)), decreasing=TRUE) # #which words are associated with what? can find context of words # according to frequency- most frequent top.associations <- findAssocs(dtm, c("protect", "use", "must", "public", "new", "resourc"), c(0.4, 0.4, 0.4, 0.4, 0.4, 0.4)) env.associations <- findAssocs(dtm, c("environment"), 0.4) # save dput(top.associations, file = file.path(paste(dname, "/top_associations.txt", sep=""))) dput(env.associations, file = file.path(paste(dname, "/env_associations.csv", sep=""))) #clustering :: k-means clustering cluster <- kmeans(tdm, 10) #colnames(cluster) <- sample.ID ############### Subsetting form data ########### Word Count Analysis # sort submissions by the word count, on the assumption that forms will have similar word count # After preliminary sorted by word count, will confirm that it is a form using word frequency analysis # first sort, and then find qord frequencies of groups. # then compare the word frequency of inidvidual submissions to the groups # Word count per document wordc <- data.frame(rowSums(as.matrix(dtm))) row.names(wordc) <- sample.ID # plot to see if there are distributions of word counts to tease out forms hist(wordc[,1], breaks = 200) # get most frequent word counts num.groups <- 20 #number of word count groups you want to create top.wc <- sort(table(wordc[,1]),decreasing=TRUE)[1:num.groups] top.wcn <- as.numeric(rownames(top.wc)) # get the submissions sorted according to word count # adaptable to choose the number of groups ########### # save into different folders - function save.wc <- function(listwc, sub.wc) { y = length(listwc) for (i in 1:y){ filename <- paste(toString(listwc[i]), ".txt", sep = "") filepath.temp <- file.path(dname, paste("corpus_", sub.wc, "/", filename, sep = "")) temp <- file.copy(filename, filepath.temp) } return(temp) } ########### # use to sort and save the files into separate folders temp <- seq(1,num.groups, by = 1) n = length(temp) for (i in 1:n) { # partition files wc.temp <- rownames(subset(wordc, wordc[,1] >= (top.wcn[i]-5) & wordc[,1] <= (top.wcn[i]+5))) #name each of the divisions -wc.[the partition you are looking for] assign(paste("wc.", i, sep = ""), wc.temp) ## save files #create folder if it doesn't exist folder.name <- file.path(dname, paste("corpus_", i,sep = "")) # if the folder doesn't exist, create it. if (file.exists(folder.name) == FALSE) { dir.create(folder.name, showWarnings = FALSE) } #save files in correct folder save.wc(listwc = wc.temp, sub.wc = i) } ############### Word frequency analysis ######## Test that the word count partitioned the files well wfa.matrix <- function(parentdir, section){ filepath.1 <- file.path(parentdir, paste("corpus_", section, "/", sep = "")) corpus.1 <- Corpus(DirSource(filepath.1), readerControl = list(language="lat")) # massage data corpus.1 <- text.pro(txtdoc = corpus.1 ) #create document term matrix dtm.1 <- DocumentTermMatrix(corpus.1) tdm.1 <- TermDocumentMatrix(corpus.1) freq.terms <- findFreqTerms(tdm.1, lowfreq=100) # calculate the frequency of words # names of the top 50 most common names within the subset if (length(rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE)))) >= 50) { wordfreq.50 <- rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE)))[1:50] } if (length(rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE)))) < 50) { wordfreq.50 <- rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE))) } # convert tdm to matrix term.matrix <- as.matrix(tdm.1) #select only the rows with the 50 most common names in the subset to simplify the tdm matrix test <- as.matrix(term.matrix[wordfreq.50,]) #attach filenames as the column names of the matrix setwd(filepath.1) sample.ID <- 0 #reset sample.ID variable filelist_txt <- list.files(pattern = ".txt$") y <- length(dir(filepath.1)) for (i in 1:y){ sample.ID.temp <- strapplyc(filelist_txt[i], "(.*).txt", simplify = TRUE) sample.ID[i] <- sample.ID.temp } colnames(test) <- sample.ID # write matrix to compare directory.matrix <- file.path(Selected_individual, paste("form_splitresults/", section, ".csv", sep = "")) write.table(test, file = directory.matrix, row.names = TRUE, col.names = TRUE, sep = ",") } ### Use function to write matrix of most frequent works to separate csv files for (t in 1:n) { wfa.matrix(parentdir = dname , section = t) }
/WaterAct_wordanalysis.R
no_license
ashjolly/SpecScripts
R
false
false
9,497
r
## Word frequency analysis # aim is to take the submissions, look at the most frequenct words, and try and use this to filter submissions that are the same # Make a list of submissions whose top 10 most frequent words are the same? # references: http://onepager.togaware.com/TextMiningO.pdf # https://gist.github.com/benmarwick/11333467 # https://jhuria.wordpress.com/2012/07/01/text-mining-in-r/ # 22June2015 # Ashlee Jollymore ################ ## set working directory rm(list = ls()) ls() Selected_individual <- "/Users/user/Dropbox/PhD Work/WaterAct Paper/Selected_individual" dname <- "/Users/user/Dropbox/PhD Work/WaterAct Paper/Selected_individual/corpus" # where txt files will be stored setwd(Selected_individual) ########## Convert PDF to txt files and input as a tm object (corpus) # Necessary packages library(tm) library(wordcloud) library(SnowballC) library(reshape) library(plyr) library(gsubfn) ############### Convert PDFs to txt files # Necessary to do this prior to doing text analysis. # Not necessary if your files are already in .txt files # read PDFs from file with randomly selected files length(dir(Selected_individual)) # make a vector of PDF file names myfiles <- list.files(path = Selected_individual, pattern = "pdf", full.names = TRUE) # convert each PDF file that is named in the vector into a text file # text file is created in the same directory as the PDFs # puts a bunch of text files into the destination #lapply(myfiles, function(i) system(paste('"/usr/local/bin/pdftotext"', paste0('"', i, '"')), wait = FALSE) ) # get text files that you just created in that directory. Not that .txt files are moved to 'Corpus' # folder within the same folder # get file names setwd(dname) filelist_txt <- list.files(pattern = ".txt$") y = length(filelist_txt) sample.ID <- 0 #create sample ID variable for (i in 1:y){ sample.ID.temp <- strapplyc(filelist_txt[i], "(.*).txt", simplify = TRUE) sample.ID[i] <- sample.ID.temp } ############## Creating corpus file # First step of text analysis # input the text files into a corpus corpus.i <- Corpus(DirSource(dname), readerControl = list(language="lat")) # inspect corpus to make sure that the documents are input # Should pull up one of the submissions inspect(corpus.i[2]) #MyCorpus <- tm_map(YourCorpus, # content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte')), # mc.cores=1) #test <- tm_map(YourCorpus, stemDocument, lazy = TRUE) ######## Preprocessing text data prior to analysis # To see transformations possible within the tm package -> getTransformations() # using two cores! tell r to nly use one core using lazy = TRUE # this was converted into a function so that could use this later for partitioned txt files text.pro <- function(txtdoc){ corpus <- tm_map(txtdoc, content_transformer(tolower)) # get rid of weird punctuation - change it to a space toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x)) docs <- tm_map(corpus, toSpace, "/|@|\\|***|", lazy = TRUE) # convert all upper case to lower case docs <- tm_map(corpus, content_transformer(tolower)) # remove numbers docs <- tm_map(docs, content_transformer(removeNumbers)) # remove punctuation docs <- tm_map(docs, content_transformer(removePunctuation)) # remove stop words like for, very, and, of, are, plus personal stop words docs <- tm_map(docs, removeWords, c(stopwords("english"), "personal","identifiers","removed","water","wsa","sustainability","act", "proposal", "need"), lazy = TRUE) docs <- tm_map(docs, removeWords, c(stopwords("english"),"my","custom","words")) # strip white spaces docs <- tm_map(docs, stripWhitespace, lazy = TRUE) # stem document - remove common word endings docs <- tm_map(docs, stemDocument, lazy = TRUE) # convert back to plan text document docs <- tm_map(docs, PlainTextDocument, lazy = TRUE) return(docs) } # use this function to clean corpus from all individual txt submissions docs <- text.pro(txtdoc = corpus.i) ########## Create document term matrix # A document term matrix is simply a matrix with documents as the rows and terms as # the columns and a count of the frequency # of words as the cells of the matrix. We use DocumentTermMatrix() to create the matrix #create document term matrix dtm <- DocumentTermMatrix(docs) tdm <- TermDocumentMatrix(docs) ########### Frequency Analysis on all data! #Frequent Terms and Associations #freq.terms.1 <- findFreqTerms(dtm, lowfreq=4) freq.terms <- findFreqTerms(tdm, lowfreq=100) freq <- colSums(as.matrix(dtm)) ord <- order(freq) # Least frequent terms least <- freq[head(ord)] # Most frequent terms most <- freq[tail(ord)] # calculate the frequency of words wordfreq <- sort(rowSums(as.matrix(tdm)), decreasing=TRUE) # #which words are associated with what? can find context of words # according to frequency- most frequent top.associations <- findAssocs(dtm, c("protect", "use", "must", "public", "new", "resourc"), c(0.4, 0.4, 0.4, 0.4, 0.4, 0.4)) env.associations <- findAssocs(dtm, c("environment"), 0.4) # save dput(top.associations, file = file.path(paste(dname, "/top_associations.txt", sep=""))) dput(env.associations, file = file.path(paste(dname, "/env_associations.csv", sep=""))) #clustering :: k-means clustering cluster <- kmeans(tdm, 10) #colnames(cluster) <- sample.ID ############### Subsetting form data ########### Word Count Analysis # sort submissions by the word count, on the assumption that forms will have similar word count # After preliminary sorted by word count, will confirm that it is a form using word frequency analysis # first sort, and then find qord frequencies of groups. # then compare the word frequency of inidvidual submissions to the groups # Word count per document wordc <- data.frame(rowSums(as.matrix(dtm))) row.names(wordc) <- sample.ID # plot to see if there are distributions of word counts to tease out forms hist(wordc[,1], breaks = 200) # get most frequent word counts num.groups <- 20 #number of word count groups you want to create top.wc <- sort(table(wordc[,1]),decreasing=TRUE)[1:num.groups] top.wcn <- as.numeric(rownames(top.wc)) # get the submissions sorted according to word count # adaptable to choose the number of groups ########### # save into different folders - function save.wc <- function(listwc, sub.wc) { y = length(listwc) for (i in 1:y){ filename <- paste(toString(listwc[i]), ".txt", sep = "") filepath.temp <- file.path(dname, paste("corpus_", sub.wc, "/", filename, sep = "")) temp <- file.copy(filename, filepath.temp) } return(temp) } ########### # use to sort and save the files into separate folders temp <- seq(1,num.groups, by = 1) n = length(temp) for (i in 1:n) { # partition files wc.temp <- rownames(subset(wordc, wordc[,1] >= (top.wcn[i]-5) & wordc[,1] <= (top.wcn[i]+5))) #name each of the divisions -wc.[the partition you are looking for] assign(paste("wc.", i, sep = ""), wc.temp) ## save files #create folder if it doesn't exist folder.name <- file.path(dname, paste("corpus_", i,sep = "")) # if the folder doesn't exist, create it. if (file.exists(folder.name) == FALSE) { dir.create(folder.name, showWarnings = FALSE) } #save files in correct folder save.wc(listwc = wc.temp, sub.wc = i) } ############### Word frequency analysis ######## Test that the word count partitioned the files well wfa.matrix <- function(parentdir, section){ filepath.1 <- file.path(parentdir, paste("corpus_", section, "/", sep = "")) corpus.1 <- Corpus(DirSource(filepath.1), readerControl = list(language="lat")) # massage data corpus.1 <- text.pro(txtdoc = corpus.1 ) #create document term matrix dtm.1 <- DocumentTermMatrix(corpus.1) tdm.1 <- TermDocumentMatrix(corpus.1) freq.terms <- findFreqTerms(tdm.1, lowfreq=100) # calculate the frequency of words # names of the top 50 most common names within the subset if (length(rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE)))) >= 50) { wordfreq.50 <- rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE)))[1:50] } if (length(rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE)))) < 50) { wordfreq.50 <- rownames(as.matrix(sort(rowSums(as.matrix(tdm.1)), decreasing=TRUE))) } # convert tdm to matrix term.matrix <- as.matrix(tdm.1) #select only the rows with the 50 most common names in the subset to simplify the tdm matrix test <- as.matrix(term.matrix[wordfreq.50,]) #attach filenames as the column names of the matrix setwd(filepath.1) sample.ID <- 0 #reset sample.ID variable filelist_txt <- list.files(pattern = ".txt$") y <- length(dir(filepath.1)) for (i in 1:y){ sample.ID.temp <- strapplyc(filelist_txt[i], "(.*).txt", simplify = TRUE) sample.ID[i] <- sample.ID.temp } colnames(test) <- sample.ID # write matrix to compare directory.matrix <- file.path(Selected_individual, paste("form_splitresults/", section, ".csv", sep = "")) write.table(test, file = directory.matrix, row.names = TRUE, col.names = TRUE, sep = ",") } ### Use function to write matrix of most frequent works to separate csv files for (t in 1:n) { wfa.matrix(parentdir = dname , section = t) }
## setwd to the folder that contains the unzipped household power consumption dataset plot2 <- function() { library(dplyr) library(lubridate) data <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", na.strings = "?", skip = 66637, nrows = 2880) names(data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") data <- tbl_df(transform(data, Date = as.Date(Date, format = "%d/%m/%Y"), Time = format(strptime(Time, "%H:%M:%S"), "%H:%M:%S"))) data <- mutate(data, datetime = ymd_hms(paste(data$Date, data$Time))) png("plot2.png") plot(data$Global_active_power~data$datetime, type = "n", xlab = "", ylab = "Global Active Power (kilowatts)") lines(data$Global_active_power~data$datetime) dev.off() }
/plot2.R
no_license
CesarTC14/ExData_Plotting1
R
false
false
905
r
## setwd to the folder that contains the unzipped household power consumption dataset plot2 <- function() { library(dplyr) library(lubridate) data <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", na.strings = "?", skip = 66637, nrows = 2880) names(data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") data <- tbl_df(transform(data, Date = as.Date(Date, format = "%d/%m/%Y"), Time = format(strptime(Time, "%H:%M:%S"), "%H:%M:%S"))) data <- mutate(data, datetime = ymd_hms(paste(data$Date, data$Time))) png("plot2.png") plot(data$Global_active_power~data$datetime, type = "n", xlab = "", ylab = "Global Active Power (kilowatts)") lines(data$Global_active_power~data$datetime) dev.off() }
check.vars(c("uof.for.year")) ######################################################################################################## ######################################################################################################## title <- "Victim injuries during UOF" uof.by.victim.injury <- uof.for.year %>% group_by(Subject.Injured) count.by.victim.injury <- uof.by.victim.injury %>% summarise(count = n()) p.uof.by.victim.injury <- plot_ly(count.by.victim.injury, type = 'pie', labels = ~Subject.Injured, values = ~count, textposition = 'inside', textinfo = 'label+value+percent', insidetextfont = list(color = '#FFFFFF')) %>% layout(hovermode = "compare", showlegend = FALSE) p.uof.by.victim.injury gen.plotly.json(p.uof.by.victim.injury, "uof-by-public-injury")
/data-analysis/force/outcome/uof-by-public-injury.R
permissive
marvinmarnold/oipm_annual_report_2019
R
false
false
959
r
check.vars(c("uof.for.year")) ######################################################################################################## ######################################################################################################## title <- "Victim injuries during UOF" uof.by.victim.injury <- uof.for.year %>% group_by(Subject.Injured) count.by.victim.injury <- uof.by.victim.injury %>% summarise(count = n()) p.uof.by.victim.injury <- plot_ly(count.by.victim.injury, type = 'pie', labels = ~Subject.Injured, values = ~count, textposition = 'inside', textinfo = 'label+value+percent', insidetextfont = list(color = '#FFFFFF')) %>% layout(hovermode = "compare", showlegend = FALSE) p.uof.by.victim.injury gen.plotly.json(p.uof.by.victim.injury, "uof-by-public-injury")
### Create inclusion probabilities #### ## for multibeam and lidar ####### #install.packages("MBHdesign") library( MBHdesign) library( parallel) library( class) library( fields) #install.packages("pdist") library( pdist) library( raster) library( rgdal) library( sp) # clear environment ---- rm(list = ls()) # Set working directory #### w.dir <- dirname(rstudioapi::getActiveDocumentContext()$path) s.dir <- paste(w.dir, "spatial_data", sep ='/') ######################### #read in data ----- #GBDat <- readRDS( "GBData_forDesign3.RDS") #gb_rasters <- readRDS("GBRasters_forDesign3.RDS") #zones <- readRDS( "GBZones_forDesign3.RDS") b <- raster(paste(s.dir, "SWbathy_fishHWY3.tif", sep='/')) plot(b) # remove all cells shallower than 35m #values(b2)[values(b2) > -35 ] = NA #plot(b2) # remove all cells deeper than 60m #values(b2)[values(b2) < -60 ] = NA #plot(b2) # SW marine park polygon ---- #swnp <- readOGR(paste(s.dir, "SW_CMR_NP.shp", sep='/')) #plot(swnp, add=T) # remove the marine park area and were the multibeam will be ---- #ga2 <- raster(paste(s.dir, "ga4858_grid2_MSL.tiff", sep='/')) #ga2 <- projectRaster(ga2, b) #plot(ga2, add=T) #e <- drawExtent() # cut the bottom part: NP and where multibeam bathy will be #b <- crop(b, e) #plot(b) #writeRaster(b, paste(s.dir, "SWbathy_fishHWY3.tif", sep ='/'), overwrite=T) ## crop to desired location #b <- raster(paste(s.dir, "GB-SW_250mBathy.tif", sep='/')) #plot(b) #e <- drawExtent() #b2 <- crop(b2, e) #plot(b2) #plot(swnp, add=T) # save new raster #writeRaster(b2, paste(s.dir, "SWbathy_fishHWY3.tif", sep ='/'), overwrite=T) ## read points from Dean ---- #wp <- read.csv(paste(w.dir, "SW-points-Dean.csv", sep='/')) # make sp -- #coordinates(wp) <- ~Longitude+Latitude #points(wp) #################################### #### Straw man for numbers of samples in each region #################################### #straw.nums <- c( 16, 12, 6, 9) # numbers of drops #straw.props <- straw.nums / sum( straw.nums) #names( straw.nums) <- names( straw.props) <- c( "MUZ", "SPZ", "HPZ", "NPZ") #saveRDS( straw.nums, file="StrawmanNumbers_Zones.RDS") #setwd("~/MBHdesignGB/Design3/") ################################### #### Hand-picking Bathy cut points #### And their numbers of drops ################################### #Bathy.quant <- c(0,0.8,0.9,0.925,0.95,0.975,1) Bathy.quant <- c(0,0.4,0.6,1) Bathy.cuts <- quantile(b, Bathy.quant)#c( -Inf,0.02,0.04,0.08,0.16,Inf) #trying to make it so there is no hand-picking (except for the hand-picked function) tmp <- cumsum(Bathy.quant) Bathy.targetNums <- rep(floor(18/8), 4)#floor( ( tmp / sum( tmp))[-1] * 200)#rep( 40, 5)#c( 20,20,30,65,65) Bathy.targetProps <- Bathy.targetNums / sum( Bathy.targetNums) ######################## #depth limiting and some elementary cleaning #minDepthInAMP <- max( NingalooDat$BATHY, na.rm=TRUE) #NingalooDat[ is.na( NingalooDat$BATHY) | NingalooDat$BATHY < -195 & NingalooDat$BATHY > minDepthInAMP, c("BATHY","TPI_GF")] <- NA #GBDat[ !is.na( GBDat$BATHY) & GBDat$BATHY < -50 , "BATHY"] <- NA #GBDat[ !is.na( GBDat$BATHY) & GBDat$BATHY > 0 , "BATHY"] <- NA #GBDat_small <- GBDat[!is.na( GBDat$BATHY),] #tmp <- colSums( GBDat_small[,c("MUZ", "SPZ", "HPZ", "NPZ")], na.rm=TRUE) #tmp[2] <- tmp[2] - tmp[1] # so similar amount of sites in SPZ and MUZ #props <- tmp / nrow( GBDat_small) #props <- props / sum( props) # 1 UP TO HERE ################################### #### TPI to get cut points ################################### catB <- cut(b, breaks=Bathy.cuts, na.rm=TRUE) # convert values to classes according to bathy cuts plot(catB) #plot(zones$MUZ); plot( catB, add=TRUE); plot( zones$MUZ, add=TRUE) # shave raster of bathy classes ---- #writeRaster(catB, paste(s.dir, "Bathy_cuts_SW-Fish-HWY3.tif", sep='/'), overwrite=TRUE) plot(catB) # convert to matrix for ease plotting bathym <- raster::as.matrix(b) bathym str(bathym) # dim(bathym) # 129 85 bathym[50,40] # -42 # transpose the axis of the matrix so x first and y later bathym2 <- t(bathym) str(bathym2) # [1:1075, 1:723] bathym2[40,50] # -42 # make data frame bathydf <- as.data.frame ( cbind (coordinates (b), as.numeric (bathym2))) colnames(bathydf) <- c("Easting", "Northing", "depth") head(bathydf) bathydf <- bathydf[ order(bathydf$Northing, bathydf$Easting),] # order ascending first by northing and then by easting ## Setting up plotting for now and later #### uniqueEast <- base::unique ( bathydf$Easting) # duplicate rows removed uniqueNorth <- base::unique ( bathydf$Northing) ELims <- range ( na.exclude ( bathydf)$Easting) NLims <- range ( na.exclude ( bathydf)$Northing) str(uniqueEast) ## all the x coordinates class(uniqueEast) str(uniqueNorth) ## all the y coordinates str(bathym2) # the dimensions of the matrix neet to be transposed for the plot class(bathym2) #Fix up ordering issue bathym2 <- bathym2[, rev ( 1 : ncol (bathym2))] # this is needed so the map looks the right way- because of the trasnposing of matrix ## plot it to see what we are dealing with #### ## these kind of plots are kind of slow ### image.plot ( uniqueEast, uniqueNorth, bathym2, xlab= "Easting" , ylab= "Northing" , main= "WA South West Fishing Highway" , legend.lab= "Depth" , asp=1 , ylim= NLims, xlim= ELims, col= ( tim.colors ())) # # # SKIPPING THE TPI PART 3 #### ######### Get TPI ############# #tpi <- terrain(b3, opt="TPI") #plot(tpi) #b3 <- raster::aggregate(b2, fact=4) #plot(b3) #writeRaster(tpi, "~/MBHdesignSW/spatial_data/TPIreefs12x12.tif") tpi <- raster("~/MBHdesignSW/spatial_data/TPIreefs12x12.tif") plot(tpi) #Bathy.quant <- c(0,0.8,0.9,0.925,0.95,0.975,1) tpi.quant <- c(0,0.1,0.99,1) tpi.cuts <- quantile(tpi, tpi.quant)#c( -Inf,0.02,0.04,0.08,0.16,Inf) #trying to make it so there is no hand-picking (except for the hand-picked function) tmp <- cumsum( tpi.quant) #Bathy.targetNums <- rep( floor( 18/8), 4)#floor( ( tmp / sum( tmp))[-1] * 200)#rep( 40, 5)#c( 20,20,30,65,65) #Bathy.targetProps <- Bathy.targetNums / sum( Bathy.targetNums) catT <- cut( tpi, breaks=tpi.cuts, na.rm=TRUE) plot(catT) #writeRaster(catT,"~/MBHdesignSW/design1/TPI_cuts_SW1.tif", overwrite=TRUE) # conver to matrix for ease plotting tpim <- raster::as.matrix(tpi) tpim str(tpim) # dim(tpim) # 723 1075 tpim[70,75] # 0.5803415 # transpose the axis of the matrix so x first and y later tpim2 <- t(tpim) str(tpim2) # [1:1075, 1:723] tpim2[75,70] # 0.5803415 # make data frame tpidf <- as.data.frame ( cbind (coordinates (tpi), as.numeric (tpim2))) colnames(tpidf ) <- c("Easting", "Northing", "tpi") head(tpidf ) tpidf <- tpidf [ order(tpidf $Northing, tpidf $Easting),] # order ascending first by northing and then by easting ## Setting up plotting for now and later #### uniqueEast <- base::unique ( tpidf $Easting) # duplicate rows removed uniqueNorth <- base::unique ( tpidf $Northing) ELims <- range ( na.exclude ( tpidf)$Easting) NLims <- range ( na.exclude ( tpidf )$Northing) str(uniqueEast) ## all the x coordinates class(uniqueEast) str(uniqueNorth) ## all the y coordinates str(tpim2) # the dimensions of the matrix neet to be transposed for the plot class(tpim2) #Fix up ordering issue tpim2 <- tpim2[, rev ( 1 : ncol (tpim2))] # this is needed so the map looks the right way- because of the trasnposing of matrix ## plot it to see what we are dealing with #### ## these kind of plots are kind of slow ### image.plot ( uniqueEast, uniqueNorth, tpim2, xlab= "Easting" , ylab= "Northing" , main= "South West Reefs" , legend.lab= "TPI" , asp=1 , ylim= NLims, xlim= ELims, col= ( tim.colors ())) #### INCLUSION PROBS #### par ( mfrow= c ( 1 , 3 ), mar= rep ( 4 , 4 )) tpi.quant <- c(0,0.1,0.98,0.99,1) tpi.cuts <- quantile(tpi, tpi.quant)#c( -Inf,0.02,0.04,0.08,0.16,Inf) #trying to make it so there is no hand-picking (except for the hand-picked function) tmp <- cumsum( tpi.quant) #Bathy.targetNums <- rep( floor( 18/8), 4)#floor( ( tmp / sum( tmp))[-1] * 200)#rep( 40, 5)#c( 20,20,30,65,65) #Bathy.targetProps <- Bathy.targetNums / sum( Bathy.targetNums) catT <- cut( tpi, breaks=tpi.cuts, na.rm=TRUE) plot(catT) #writeRaster(catT,"~/MBHdesignSW/design1/TPI_cuts_SW1.tif", overwrite=TRUE) ### TPI part finishes here ---- # set n n <- 24 # The number of 'depth bins' to spread sampling effort over. nbins <- 3 # force the breaks so R doesn't use 'pretty' breaks <- seq ( from= min ( bathydf$depth, na.rm= TRUE ), to= max ( bathydf$depth, na.rm= TRUE ), length= nbins +1 ) # -52.00 -47.75 -43.50 -39.25 -35.00 # chech the values above with raster minValue(b) # -54 maxValue(b) # -35 # Find sensible tpi bins using pre-packaged code tmpHist <- hist ( bathydf$depth, breaks= breaks, plot= T, freq = F ) ## or make breaks according to bathy.cuts Bathy.cuts b.cuts <- c(-54, -45,-44,-35) tmpHist <- hist ( bathydf$depth, breaks= b.cuts, plot= T, freq = F ) # change breaks if needed - least interest areas should have more counts # When reset the breaks run the tmpHist again tmpHist <- hist ( bathydf$depth, breaks= b.cuts, plot= T, freq = F ) #breaks <- c(-46.90025, -30, -20, -8.540641) # #breaks <- c(-1.1969223, -0.8, 0.5258718, 1.9372688) # check breaks #tmpHist <- hist ( bathydf$depth, breaks= breaks, freq= F, plot= T ) # Find the inclusion probability for each 'stratum' (for earch 'bin') # tmpHist$counts are the number of cells in each depth category # this would result in equal proportions for each depth class #tmpHist$inclProbs <- (n/(nbins)) / tmpHist$counts # 0.001445435 0.003973510 0.003949967 0.004474273 # or change to different proportions, but should equal no of clusters * no. depth classes, in this case = 24/3 =8 tmpHist$inclProbs <- c(2,4,2) / tmpHist$counts # 0.0005740528 0.0022727273 0.0024539877 0.0018248175 # Matching up locations to probabilties - in data frame str(bathydf) tmpHist$ID <- findInterval ( bathydf$depth, tmpHist$breaks) # breaks coded as 1, 2, 3 and so on depending on how many bins tmpHist$ID[2000] # 2 # not sure why the NAs, but with depth it worked fine length(tmpHist$ID) # 12927 # see hist - quicker way to observe freq of each bin hist(tmpHist$ID) head(bathydf) # A container for the design # create data frame with each location and its inlcusion probability #### design <- data.frame ( siteID= 1 : nrow ( bathydf), Easting= bathydf$Easting, Northing= bathydf$Northing, depth= bathydf$depth, inclProb= tmpHist$inclProbs[tmpHist$ID]) str(design) head(design) ### test #### str(design) head(design) # remove unnecessary columns incprob <- design[,c(2,3,5)] head(incprob) # make df a raster -- coordinates(incprob) <- ~Easting+Northing gridded(incprob) <- TRUE rasterIP <- raster(incprob) plot(rasterIP) cellStats(rasterIP, sum) # save raster of inclusion probabilities writeRaster(rasterIP, paste(s.dir, "InclProbs-FishHWY-d5.tif", sep='/'), overwrite = T) str(design) length(design$Easting) # 4556 # make matrix with inclProbs from design m <- matrix ( design$inclProb, nrow= length ( uniqueEast), byrow= F) str(m) head(m) m[1] # then plot with ( design, image.plot ( uniqueEast, uniqueNorth, m, xlab= "" , ylab= "" , main= "Inclusion Probabilities (24 clusters)" , asp= 1 , ylim= NLims, xlim= ELims)) # Take the Sample using the inclusion probabilities #### #design$inclProb[4174928] <- 4.935784e-06 ### give this NA an inclusion value str(design) ##### replace Nas of Inclusion probabilities for zeroes #### names(design) head(design) design$inclProb[is.na(design$inclProb)] <- 0 head(design) class(design) any(is.na(design$inclProb)) # turn design df into matrix designMat <- cbind(design$Easting,design$Northing, design$inclProb) head(designMat) str(designMat) class(designMat) colnames(designMat) <- c("Easting", "Northing", "inclProbs") #Fix up ordering issue: revert ordering of columns: first column last so: inclProbs, Northing, Easting designMat <- designMat[, rev ( 1 : ncol (designMat))] ############### ## Make data frame out of designMat to make raster ### designDF <- as.data.frame(designMat) head(designDF) designDF <- designDF[ order ( designDF$Northing, designDF$Easting),] # order ascending first by northing and then by easting # turn data frame into raster #### # df into spatial points coordinates(designDF) <- ~ Easting + Northing # coerce to SpatialPixelsDataFrame gridded(designDF) <- TRUE # coerce to raster designr <- raster(designDF) designr plot(designr) #second Mat - for plotting designMat2 <- raster::as.matrix(designr, mode ='any') dim(designMat2) str(designMat2) # transpose matrix designMat3 <- t(designMat2) dim(designMat3) str(designMat3) designMat3 <- designMat3[, rev ( 1 : ncol (designMat3))] ### make a new data frame out of this raster and Matrix designdf <- as.data.frame ( cbind ( coordinates ( designr), as.numeric ( designMat3))) colnames ( designdf) <- c ( "Easting" , "Northing" , "inclProbs" ) head(designdf) designdf <- designdf[ order ( designdf$Northing, designdf$Easting),] # order ascending first by northing and then by easting ########## Get cluster centres ####### # Sample with 'quasiSamp' from MBH package #### this takes some time Clusters <- quasiSamp ( n = 24, dimension= 2 , potential.sites = coordinates(designr), inclusion.probs= designdf$inclProbs , nSampsToConsider= 10000) # inclProb that are not NA! Clusters # save to csv write.csv(Clusters, paste(w.dir, "Fish-HWY", "FishHWY-24custers-d5.csv", sep='/')) #### plot Clusters #### clustersp <- Clusters coordinates(clustersp) <- ~x+y clustersp proj4string(clustersp) <- proj4string(b) #plot(b2) #plot(tpi) plot(rasterIP) plot(clustersp, pch=20, cex=0.7,col='black',add=T) pdf(paste(w.dir, "Fish-HWY", "FishHWY-Clusters24-design5.pdf", sep='/'), height=7, width=8) #plot(b, main = "Bathymetry - Cluster centres") # or plot inc probs -- plot(rasterIP, main = "Inclusion probabilities - 24 Cluster centres - Design5") points(clustersp, pch=20, cex = 0.8, col = 'black') dev.off() ############################### #### Choose new points within clusters #### Here I need to choose transects not points ############################## getlocal <- function(ii){ point <- Clusters[ii,c("x","y")] r2 <- rasterize( point, rasterIP, field=1) pbuf <- buffer( r2, width=600) ## units are in metres buf <- mask( rasterIP, pbuf) buffer <- trim(buf, pad=0) return( buffer) } sampWithOver <- 12 fullSample <- list() fullZones <- list() ## I think in this funtion I need to change quasiSamp for TransectSamp for( ii in 1:nrow( clustersp)){ tmp <- getlocal(ii) fullZones[[ii]] <- rownames( clustersp@data)[ii] tmpm <- raster::as.matrix(tmp) tmpm <- t(tmpm) tmpdf <- as.data.frame ( cbind (coordinates (tmp), as.numeric (tmpm))) colnames(tmpdf) <- c("x", "y", "inclProbs_design1") tmpdf <- tmpdf[ order(tmpdf$y, tmpdf$x),] # order ascending first by northing and then by easting fullSample[[ii]] <- quasiSamp( n=sampWithOver, potential.sites=coordinates( tmp), inclusion.probs=values( tmp), nSampsToConsider=5000) plot( tmp) points( fullSample[[ii]]$points[,c("x","y")], pch=20, col='red') #plot( legacySites, add=TRUE, pch=4, col='blue') } fullSample <- do.call( "rbind", fullSample) fullSample$cluster <- rep( do.call( "c", fullZones), each=sampWithOver) #fullSample$ID <- paste( fullSample$cluster, rep( paste0( "shot.",1:6), each=nrow( clustersp)), sep="_") #fullSample2 <- SpatialPointsDataFrame( coords=fullSample[,c("x","y")], data=fullSample, proj4string=CRS(proj4string(inclProbs))) fullSample3 <- fullSample coordinates(fullSample3) <- ~x+y proj4string(fullSample3) <- proj4string(b) plot(rasterIP) points(fullSample3, pch=20, cex = 0.4, col='black') points(clustersp, pch=20, cex = 0.7, col='blue') pdf(paste(w.dir, "Fish-HWY", "FishHWY-BRUVs-Clusters24-design5.pdf", sep='/'), height=7, width=8) plot(rasterIP, main = "Inclusion probabilities - 24 Clusters - Design5") points(fullSample3, pch=20, cex = 0.4, col='black') points(clustersp, pch=20, cex = 0.7, col='blue') #plot(swnp, add=T) dev.off() ### plot nicely ##### library(dichromat) library(RColorBrewer) pal <- colorRampPalette(c("red","blue")) pdf(paste(w.dir, "Fish-HWY", "FishHWY-Bathy-BRUVs-Clusters24-design5.pdf", sep='/'), height=7, width=8) plot(b, main ="Bathymetry - 24 Clusters - Design5", col = rev(brewer.pal(20, "RdYlBu"))) #plot(tpi, main ="Clustered Stereo-BRUVs - SW", col = pal1) points(fullSample3, pch=20, cex = 0.7, col='black') #plot(swnp, add=T) #points(wp, add = T, pch=20, cex = 0.7, col='green') dev.off() #### Write the shape files d.dir <- paste(w.dir, "Fish-HWY", sep='/') writeOGR(fullSample3, dsn=d.dir, layer="FHWY-BRUVS-d5", driver="ESRI Shapefile", overwrite_layer=TRUE) #writeOGR(fullSample3, dsn=d.dir, layer=paste( "Bruvs4.8", Sys.Date(), sep="_"), driver="ESRI Shapefile", overwrite_layer=TRUE) writeOGR(clustersp, dsn=d.dir, layer="FHWY-24clusters-d5", driver="ESRI Shapefile", overwrite_layer=TRUE)
/3.SW-BRUVs-design_Fish-HWY.R
no_license
anitas-giraldo/MBH-SW-Oct2020
R
false
false
17,118
r
### Create inclusion probabilities #### ## for multibeam and lidar ####### #install.packages("MBHdesign") library( MBHdesign) library( parallel) library( class) library( fields) #install.packages("pdist") library( pdist) library( raster) library( rgdal) library( sp) # clear environment ---- rm(list = ls()) # Set working directory #### w.dir <- dirname(rstudioapi::getActiveDocumentContext()$path) s.dir <- paste(w.dir, "spatial_data", sep ='/') ######################### #read in data ----- #GBDat <- readRDS( "GBData_forDesign3.RDS") #gb_rasters <- readRDS("GBRasters_forDesign3.RDS") #zones <- readRDS( "GBZones_forDesign3.RDS") b <- raster(paste(s.dir, "SWbathy_fishHWY3.tif", sep='/')) plot(b) # remove all cells shallower than 35m #values(b2)[values(b2) > -35 ] = NA #plot(b2) # remove all cells deeper than 60m #values(b2)[values(b2) < -60 ] = NA #plot(b2) # SW marine park polygon ---- #swnp <- readOGR(paste(s.dir, "SW_CMR_NP.shp", sep='/')) #plot(swnp, add=T) # remove the marine park area and were the multibeam will be ---- #ga2 <- raster(paste(s.dir, "ga4858_grid2_MSL.tiff", sep='/')) #ga2 <- projectRaster(ga2, b) #plot(ga2, add=T) #e <- drawExtent() # cut the bottom part: NP and where multibeam bathy will be #b <- crop(b, e) #plot(b) #writeRaster(b, paste(s.dir, "SWbathy_fishHWY3.tif", sep ='/'), overwrite=T) ## crop to desired location #b <- raster(paste(s.dir, "GB-SW_250mBathy.tif", sep='/')) #plot(b) #e <- drawExtent() #b2 <- crop(b2, e) #plot(b2) #plot(swnp, add=T) # save new raster #writeRaster(b2, paste(s.dir, "SWbathy_fishHWY3.tif", sep ='/'), overwrite=T) ## read points from Dean ---- #wp <- read.csv(paste(w.dir, "SW-points-Dean.csv", sep='/')) # make sp -- #coordinates(wp) <- ~Longitude+Latitude #points(wp) #################################### #### Straw man for numbers of samples in each region #################################### #straw.nums <- c( 16, 12, 6, 9) # numbers of drops #straw.props <- straw.nums / sum( straw.nums) #names( straw.nums) <- names( straw.props) <- c( "MUZ", "SPZ", "HPZ", "NPZ") #saveRDS( straw.nums, file="StrawmanNumbers_Zones.RDS") #setwd("~/MBHdesignGB/Design3/") ################################### #### Hand-picking Bathy cut points #### And their numbers of drops ################################### #Bathy.quant <- c(0,0.8,0.9,0.925,0.95,0.975,1) Bathy.quant <- c(0,0.4,0.6,1) Bathy.cuts <- quantile(b, Bathy.quant)#c( -Inf,0.02,0.04,0.08,0.16,Inf) #trying to make it so there is no hand-picking (except for the hand-picked function) tmp <- cumsum(Bathy.quant) Bathy.targetNums <- rep(floor(18/8), 4)#floor( ( tmp / sum( tmp))[-1] * 200)#rep( 40, 5)#c( 20,20,30,65,65) Bathy.targetProps <- Bathy.targetNums / sum( Bathy.targetNums) ######################## #depth limiting and some elementary cleaning #minDepthInAMP <- max( NingalooDat$BATHY, na.rm=TRUE) #NingalooDat[ is.na( NingalooDat$BATHY) | NingalooDat$BATHY < -195 & NingalooDat$BATHY > minDepthInAMP, c("BATHY","TPI_GF")] <- NA #GBDat[ !is.na( GBDat$BATHY) & GBDat$BATHY < -50 , "BATHY"] <- NA #GBDat[ !is.na( GBDat$BATHY) & GBDat$BATHY > 0 , "BATHY"] <- NA #GBDat_small <- GBDat[!is.na( GBDat$BATHY),] #tmp <- colSums( GBDat_small[,c("MUZ", "SPZ", "HPZ", "NPZ")], na.rm=TRUE) #tmp[2] <- tmp[2] - tmp[1] # so similar amount of sites in SPZ and MUZ #props <- tmp / nrow( GBDat_small) #props <- props / sum( props) # 1 UP TO HERE ################################### #### TPI to get cut points ################################### catB <- cut(b, breaks=Bathy.cuts, na.rm=TRUE) # convert values to classes according to bathy cuts plot(catB) #plot(zones$MUZ); plot( catB, add=TRUE); plot( zones$MUZ, add=TRUE) # shave raster of bathy classes ---- #writeRaster(catB, paste(s.dir, "Bathy_cuts_SW-Fish-HWY3.tif", sep='/'), overwrite=TRUE) plot(catB) # convert to matrix for ease plotting bathym <- raster::as.matrix(b) bathym str(bathym) # dim(bathym) # 129 85 bathym[50,40] # -42 # transpose the axis of the matrix so x first and y later bathym2 <- t(bathym) str(bathym2) # [1:1075, 1:723] bathym2[40,50] # -42 # make data frame bathydf <- as.data.frame ( cbind (coordinates (b), as.numeric (bathym2))) colnames(bathydf) <- c("Easting", "Northing", "depth") head(bathydf) bathydf <- bathydf[ order(bathydf$Northing, bathydf$Easting),] # order ascending first by northing and then by easting ## Setting up plotting for now and later #### uniqueEast <- base::unique ( bathydf$Easting) # duplicate rows removed uniqueNorth <- base::unique ( bathydf$Northing) ELims <- range ( na.exclude ( bathydf)$Easting) NLims <- range ( na.exclude ( bathydf)$Northing) str(uniqueEast) ## all the x coordinates class(uniqueEast) str(uniqueNorth) ## all the y coordinates str(bathym2) # the dimensions of the matrix neet to be transposed for the plot class(bathym2) #Fix up ordering issue bathym2 <- bathym2[, rev ( 1 : ncol (bathym2))] # this is needed so the map looks the right way- because of the trasnposing of matrix ## plot it to see what we are dealing with #### ## these kind of plots are kind of slow ### image.plot ( uniqueEast, uniqueNorth, bathym2, xlab= "Easting" , ylab= "Northing" , main= "WA South West Fishing Highway" , legend.lab= "Depth" , asp=1 , ylim= NLims, xlim= ELims, col= ( tim.colors ())) # # # SKIPPING THE TPI PART 3 #### ######### Get TPI ############# #tpi <- terrain(b3, opt="TPI") #plot(tpi) #b3 <- raster::aggregate(b2, fact=4) #plot(b3) #writeRaster(tpi, "~/MBHdesignSW/spatial_data/TPIreefs12x12.tif") tpi <- raster("~/MBHdesignSW/spatial_data/TPIreefs12x12.tif") plot(tpi) #Bathy.quant <- c(0,0.8,0.9,0.925,0.95,0.975,1) tpi.quant <- c(0,0.1,0.99,1) tpi.cuts <- quantile(tpi, tpi.quant)#c( -Inf,0.02,0.04,0.08,0.16,Inf) #trying to make it so there is no hand-picking (except for the hand-picked function) tmp <- cumsum( tpi.quant) #Bathy.targetNums <- rep( floor( 18/8), 4)#floor( ( tmp / sum( tmp))[-1] * 200)#rep( 40, 5)#c( 20,20,30,65,65) #Bathy.targetProps <- Bathy.targetNums / sum( Bathy.targetNums) catT <- cut( tpi, breaks=tpi.cuts, na.rm=TRUE) plot(catT) #writeRaster(catT,"~/MBHdesignSW/design1/TPI_cuts_SW1.tif", overwrite=TRUE) # conver to matrix for ease plotting tpim <- raster::as.matrix(tpi) tpim str(tpim) # dim(tpim) # 723 1075 tpim[70,75] # 0.5803415 # transpose the axis of the matrix so x first and y later tpim2 <- t(tpim) str(tpim2) # [1:1075, 1:723] tpim2[75,70] # 0.5803415 # make data frame tpidf <- as.data.frame ( cbind (coordinates (tpi), as.numeric (tpim2))) colnames(tpidf ) <- c("Easting", "Northing", "tpi") head(tpidf ) tpidf <- tpidf [ order(tpidf $Northing, tpidf $Easting),] # order ascending first by northing and then by easting ## Setting up plotting for now and later #### uniqueEast <- base::unique ( tpidf $Easting) # duplicate rows removed uniqueNorth <- base::unique ( tpidf $Northing) ELims <- range ( na.exclude ( tpidf)$Easting) NLims <- range ( na.exclude ( tpidf )$Northing) str(uniqueEast) ## all the x coordinates class(uniqueEast) str(uniqueNorth) ## all the y coordinates str(tpim2) # the dimensions of the matrix neet to be transposed for the plot class(tpim2) #Fix up ordering issue tpim2 <- tpim2[, rev ( 1 : ncol (tpim2))] # this is needed so the map looks the right way- because of the trasnposing of matrix ## plot it to see what we are dealing with #### ## these kind of plots are kind of slow ### image.plot ( uniqueEast, uniqueNorth, tpim2, xlab= "Easting" , ylab= "Northing" , main= "South West Reefs" , legend.lab= "TPI" , asp=1 , ylim= NLims, xlim= ELims, col= ( tim.colors ())) #### INCLUSION PROBS #### par ( mfrow= c ( 1 , 3 ), mar= rep ( 4 , 4 )) tpi.quant <- c(0,0.1,0.98,0.99,1) tpi.cuts <- quantile(tpi, tpi.quant)#c( -Inf,0.02,0.04,0.08,0.16,Inf) #trying to make it so there is no hand-picking (except for the hand-picked function) tmp <- cumsum( tpi.quant) #Bathy.targetNums <- rep( floor( 18/8), 4)#floor( ( tmp / sum( tmp))[-1] * 200)#rep( 40, 5)#c( 20,20,30,65,65) #Bathy.targetProps <- Bathy.targetNums / sum( Bathy.targetNums) catT <- cut( tpi, breaks=tpi.cuts, na.rm=TRUE) plot(catT) #writeRaster(catT,"~/MBHdesignSW/design1/TPI_cuts_SW1.tif", overwrite=TRUE) ### TPI part finishes here ---- # set n n <- 24 # The number of 'depth bins' to spread sampling effort over. nbins <- 3 # force the breaks so R doesn't use 'pretty' breaks <- seq ( from= min ( bathydf$depth, na.rm= TRUE ), to= max ( bathydf$depth, na.rm= TRUE ), length= nbins +1 ) # -52.00 -47.75 -43.50 -39.25 -35.00 # chech the values above with raster minValue(b) # -54 maxValue(b) # -35 # Find sensible tpi bins using pre-packaged code tmpHist <- hist ( bathydf$depth, breaks= breaks, plot= T, freq = F ) ## or make breaks according to bathy.cuts Bathy.cuts b.cuts <- c(-54, -45,-44,-35) tmpHist <- hist ( bathydf$depth, breaks= b.cuts, plot= T, freq = F ) # change breaks if needed - least interest areas should have more counts # When reset the breaks run the tmpHist again tmpHist <- hist ( bathydf$depth, breaks= b.cuts, plot= T, freq = F ) #breaks <- c(-46.90025, -30, -20, -8.540641) # #breaks <- c(-1.1969223, -0.8, 0.5258718, 1.9372688) # check breaks #tmpHist <- hist ( bathydf$depth, breaks= breaks, freq= F, plot= T ) # Find the inclusion probability for each 'stratum' (for earch 'bin') # tmpHist$counts are the number of cells in each depth category # this would result in equal proportions for each depth class #tmpHist$inclProbs <- (n/(nbins)) / tmpHist$counts # 0.001445435 0.003973510 0.003949967 0.004474273 # or change to different proportions, but should equal no of clusters * no. depth classes, in this case = 24/3 =8 tmpHist$inclProbs <- c(2,4,2) / tmpHist$counts # 0.0005740528 0.0022727273 0.0024539877 0.0018248175 # Matching up locations to probabilties - in data frame str(bathydf) tmpHist$ID <- findInterval ( bathydf$depth, tmpHist$breaks) # breaks coded as 1, 2, 3 and so on depending on how many bins tmpHist$ID[2000] # 2 # not sure why the NAs, but with depth it worked fine length(tmpHist$ID) # 12927 # see hist - quicker way to observe freq of each bin hist(tmpHist$ID) head(bathydf) # A container for the design # create data frame with each location and its inlcusion probability #### design <- data.frame ( siteID= 1 : nrow ( bathydf), Easting= bathydf$Easting, Northing= bathydf$Northing, depth= bathydf$depth, inclProb= tmpHist$inclProbs[tmpHist$ID]) str(design) head(design) ### test #### str(design) head(design) # remove unnecessary columns incprob <- design[,c(2,3,5)] head(incprob) # make df a raster -- coordinates(incprob) <- ~Easting+Northing gridded(incprob) <- TRUE rasterIP <- raster(incprob) plot(rasterIP) cellStats(rasterIP, sum) # save raster of inclusion probabilities writeRaster(rasterIP, paste(s.dir, "InclProbs-FishHWY-d5.tif", sep='/'), overwrite = T) str(design) length(design$Easting) # 4556 # make matrix with inclProbs from design m <- matrix ( design$inclProb, nrow= length ( uniqueEast), byrow= F) str(m) head(m) m[1] # then plot with ( design, image.plot ( uniqueEast, uniqueNorth, m, xlab= "" , ylab= "" , main= "Inclusion Probabilities (24 clusters)" , asp= 1 , ylim= NLims, xlim= ELims)) # Take the Sample using the inclusion probabilities #### #design$inclProb[4174928] <- 4.935784e-06 ### give this NA an inclusion value str(design) ##### replace Nas of Inclusion probabilities for zeroes #### names(design) head(design) design$inclProb[is.na(design$inclProb)] <- 0 head(design) class(design) any(is.na(design$inclProb)) # turn design df into matrix designMat <- cbind(design$Easting,design$Northing, design$inclProb) head(designMat) str(designMat) class(designMat) colnames(designMat) <- c("Easting", "Northing", "inclProbs") #Fix up ordering issue: revert ordering of columns: first column last so: inclProbs, Northing, Easting designMat <- designMat[, rev ( 1 : ncol (designMat))] ############### ## Make data frame out of designMat to make raster ### designDF <- as.data.frame(designMat) head(designDF) designDF <- designDF[ order ( designDF$Northing, designDF$Easting),] # order ascending first by northing and then by easting # turn data frame into raster #### # df into spatial points coordinates(designDF) <- ~ Easting + Northing # coerce to SpatialPixelsDataFrame gridded(designDF) <- TRUE # coerce to raster designr <- raster(designDF) designr plot(designr) #second Mat - for plotting designMat2 <- raster::as.matrix(designr, mode ='any') dim(designMat2) str(designMat2) # transpose matrix designMat3 <- t(designMat2) dim(designMat3) str(designMat3) designMat3 <- designMat3[, rev ( 1 : ncol (designMat3))] ### make a new data frame out of this raster and Matrix designdf <- as.data.frame ( cbind ( coordinates ( designr), as.numeric ( designMat3))) colnames ( designdf) <- c ( "Easting" , "Northing" , "inclProbs" ) head(designdf) designdf <- designdf[ order ( designdf$Northing, designdf$Easting),] # order ascending first by northing and then by easting ########## Get cluster centres ####### # Sample with 'quasiSamp' from MBH package #### this takes some time Clusters <- quasiSamp ( n = 24, dimension= 2 , potential.sites = coordinates(designr), inclusion.probs= designdf$inclProbs , nSampsToConsider= 10000) # inclProb that are not NA! Clusters # save to csv write.csv(Clusters, paste(w.dir, "Fish-HWY", "FishHWY-24custers-d5.csv", sep='/')) #### plot Clusters #### clustersp <- Clusters coordinates(clustersp) <- ~x+y clustersp proj4string(clustersp) <- proj4string(b) #plot(b2) #plot(tpi) plot(rasterIP) plot(clustersp, pch=20, cex=0.7,col='black',add=T) pdf(paste(w.dir, "Fish-HWY", "FishHWY-Clusters24-design5.pdf", sep='/'), height=7, width=8) #plot(b, main = "Bathymetry - Cluster centres") # or plot inc probs -- plot(rasterIP, main = "Inclusion probabilities - 24 Cluster centres - Design5") points(clustersp, pch=20, cex = 0.8, col = 'black') dev.off() ############################### #### Choose new points within clusters #### Here I need to choose transects not points ############################## getlocal <- function(ii){ point <- Clusters[ii,c("x","y")] r2 <- rasterize( point, rasterIP, field=1) pbuf <- buffer( r2, width=600) ## units are in metres buf <- mask( rasterIP, pbuf) buffer <- trim(buf, pad=0) return( buffer) } sampWithOver <- 12 fullSample <- list() fullZones <- list() ## I think in this funtion I need to change quasiSamp for TransectSamp for( ii in 1:nrow( clustersp)){ tmp <- getlocal(ii) fullZones[[ii]] <- rownames( clustersp@data)[ii] tmpm <- raster::as.matrix(tmp) tmpm <- t(tmpm) tmpdf <- as.data.frame ( cbind (coordinates (tmp), as.numeric (tmpm))) colnames(tmpdf) <- c("x", "y", "inclProbs_design1") tmpdf <- tmpdf[ order(tmpdf$y, tmpdf$x),] # order ascending first by northing and then by easting fullSample[[ii]] <- quasiSamp( n=sampWithOver, potential.sites=coordinates( tmp), inclusion.probs=values( tmp), nSampsToConsider=5000) plot( tmp) points( fullSample[[ii]]$points[,c("x","y")], pch=20, col='red') #plot( legacySites, add=TRUE, pch=4, col='blue') } fullSample <- do.call( "rbind", fullSample) fullSample$cluster <- rep( do.call( "c", fullZones), each=sampWithOver) #fullSample$ID <- paste( fullSample$cluster, rep( paste0( "shot.",1:6), each=nrow( clustersp)), sep="_") #fullSample2 <- SpatialPointsDataFrame( coords=fullSample[,c("x","y")], data=fullSample, proj4string=CRS(proj4string(inclProbs))) fullSample3 <- fullSample coordinates(fullSample3) <- ~x+y proj4string(fullSample3) <- proj4string(b) plot(rasterIP) points(fullSample3, pch=20, cex = 0.4, col='black') points(clustersp, pch=20, cex = 0.7, col='blue') pdf(paste(w.dir, "Fish-HWY", "FishHWY-BRUVs-Clusters24-design5.pdf", sep='/'), height=7, width=8) plot(rasterIP, main = "Inclusion probabilities - 24 Clusters - Design5") points(fullSample3, pch=20, cex = 0.4, col='black') points(clustersp, pch=20, cex = 0.7, col='blue') #plot(swnp, add=T) dev.off() ### plot nicely ##### library(dichromat) library(RColorBrewer) pal <- colorRampPalette(c("red","blue")) pdf(paste(w.dir, "Fish-HWY", "FishHWY-Bathy-BRUVs-Clusters24-design5.pdf", sep='/'), height=7, width=8) plot(b, main ="Bathymetry - 24 Clusters - Design5", col = rev(brewer.pal(20, "RdYlBu"))) #plot(tpi, main ="Clustered Stereo-BRUVs - SW", col = pal1) points(fullSample3, pch=20, cex = 0.7, col='black') #plot(swnp, add=T) #points(wp, add = T, pch=20, cex = 0.7, col='green') dev.off() #### Write the shape files d.dir <- paste(w.dir, "Fish-HWY", sep='/') writeOGR(fullSample3, dsn=d.dir, layer="FHWY-BRUVS-d5", driver="ESRI Shapefile", overwrite_layer=TRUE) #writeOGR(fullSample3, dsn=d.dir, layer=paste( "Bruvs4.8", Sys.Date(), sep="_"), driver="ESRI Shapefile", overwrite_layer=TRUE) writeOGR(clustersp, dsn=d.dir, layer="FHWY-24clusters-d5", driver="ESRI Shapefile", overwrite_layer=TRUE)
library("knitr") library("rgl") #knit("sulfentrazone.Rmd") #markdownToHTML('sulfentrazone.md', 'sulfentrazone.html', options=c("use_xhml")) #system("pandoc -s sulfentrazone.html -o sulfentrazone.pdf") knit2html('sulfentrazone.Rmd')
/FDA_Pesticide_Glossary/sulfentrazone.R
permissive
andrewdefries/andrewdefries.github.io
R
false
false
234
r
library("knitr") library("rgl") #knit("sulfentrazone.Rmd") #markdownToHTML('sulfentrazone.md', 'sulfentrazone.html', options=c("use_xhml")) #system("pandoc -s sulfentrazone.html -o sulfentrazone.pdf") knit2html('sulfentrazone.Rmd')
#' MLModel Class Constructor #' #' Create a model for use with the \pkg{MachineShop} package. #' #' @param name character name of the object to which the model is assigned. #' @param label optional character descriptor for the model. #' @param packages character vector of packages required to use the model. #' @param response_types character vector of response variable types to which #' the model can be fit. Supported types are \code{"binary"}, = #' \code{"BinomialVariate"}, \code{"DiscreteVariate"}, \code{"factor"}, #' \code{"matrix"}, \code{"NegBinomialVariate"}, \code{"numeric"}, #' \code{"ordered"}, \code{"PoissonVariate"}, and \code{"Surv"}. #' @param predictor_encoding character string indicating whether the model is #' fit with predictor variables encoded as a \code{"\link{model.matrix}"}, a #' data.frame containing the originally specified model \code{"terms"}, or #' unspecified (default). #' @param params list of user-specified model parameters to be passed to the #' \code{fit} function. #' @param grid tuning grid function whose first agument \code{x} is a #' \code{\link{ModelFrame}} of the model fit data and formula, followed by a #' \code{length} to use in generating sequences of parameter values, a number #' of grid points to sample at \code{random}, and an ellipsis (\code{...}). #' @param fit model fitting function whose arguments are a \code{formula}, a #' \code{\link{ModelFrame}} named \code{data}, case \code{weights}, and an #' ellipsis. #' @param predict model prediction function whose arguments are the #' \code{object} returned by \code{fit}, a \code{\link{ModelFrame}} named #' \code{newdata} of predictor variables, optional vector of \code{times} at #' which to predict survival, and an ellipsis. #' @param varimp variable importance function whose arguments are the #' \code{object} returned by \code{fit}, optional arguments passed from calls #' to \code{\link{varimp}}, and an ellipsis. #' @param ... arguments passed from other methods. #' #' @details #' If supplied, the \code{grid} function should return a list whose elements are #' named after and contain values of parameters to include in a tuning grid to #' be constructed automatically by the package. #' #' Argument \code{data} in the \code{fit} function may be converted to a data #' frame with the \code{as.data.frame} function as needed. The function should #' return the object resulting from the model fit. #' #' Values returned by the \code{predict} functions should be formatted according #' to the response variable types below. #' \describe{ #' \item{factor}{vector or column matrix of probabilities for the second level #' of binary factors or a matrix whose columns contain the probabilities for #' factors with more than two levels.} #' \item{matrix}{matrix of predicted responses.} #' \item{numeric}{vector or column matrix of predicted responses.} #' \item{Surv}{matrix whose columns contain survival probabilities at #' \code{times} if supplied or a vector of predicted survival means #' otherwise.} #' } #' #' The \code{varimp} function should return a vector of importance values named #' after the predictor variables or a matrix or data frame whose rows are named #' after the predictors. #' #' @return \code{MLModel} class object. #' #' @seealso \code{\link{models}}, \code{\link{fit}}, \code{\link{resample}} #' #' @examples #' ## Logistic regression model #' LogisticModel <- MLModel( #' name = "LogisticModel", #' response_types = "binary", #' fit = function(formula, data, weights, ...) { #' glm(formula, data = data, weights = weights, family = binomial, ...) #' }, #' predict = function(object, newdata, ...) { #' predict(object, newdata = newdata, type = "response") #' }, #' varimp = function(object, ...) { #' pchisq(coef(object)^2 / diag(vcov(object)), 1) #' } #' ) #' #' library(MASS) #' res <- resample(type ~ ., data = Pima.tr, model = LogisticModel) #' summary(res) #' MLModel <- function(name = "MLModel", label = name, packages = character(), response_types = character(), predictor_encoding = c(NA, "model.matrix", "terms"), params = list(), grid = function(x, length, random, ...) NULL, fit = function(formula, data, weights, ...) stop("no fit function"), predict = function(object, newdata, times, ...) stop("no predict function"), varimp = function(object, ...) NULL, ...) { stopifnot(response_types %in% .response_types) new("MLModel", name = name, label = label, packages = packages, response_types = response_types, predictor_encoding = match.arg(predictor_encoding), params = params, grid = grid, fit = fit, predict = predict, varimp = varimp) } .response_types <- c("binary", "BinomialVariate", "DiscreteVariate", "factor", "matrix", "NegBinomialVariate", "numeric", "ordered", "PoissonVariate", "Surv") MLModelFit <- function(object, Class, model, x) { model@x <- prep(x) if (is(object, Class)) { object <- unMLModelFit(object) } else if (is(object, "MLModelFit")) { stop("cannot change MLModelFit class") } if (!is(model, "MLModel")) stop("model not of class MLModel") if (isS4(object)) { object <- new(Class, object, mlmodel = model) } else if (is.list(object)) { object$mlmodel <- model class(object) <- c(Class, "MLModelFit", class(object)) } else { stop("unsupported object class") } object } unMLModelFit <- function(object) { if (is(object, "MLModelFit")) { if (isS4(object)) { classes <- extends(class(object)) pos <- match("MLModelFit", classes) as(object, classes[pos + 1]) } else { object$mlmodel <- NULL classes <- class(object) pos <- match("MLModelFit", classes) structure(object, class = classes[-(1:pos)]) } } else object }
/R/MLModel.R
no_license
chen061218/MachineShop
R
false
false
6,090
r
#' MLModel Class Constructor #' #' Create a model for use with the \pkg{MachineShop} package. #' #' @param name character name of the object to which the model is assigned. #' @param label optional character descriptor for the model. #' @param packages character vector of packages required to use the model. #' @param response_types character vector of response variable types to which #' the model can be fit. Supported types are \code{"binary"}, = #' \code{"BinomialVariate"}, \code{"DiscreteVariate"}, \code{"factor"}, #' \code{"matrix"}, \code{"NegBinomialVariate"}, \code{"numeric"}, #' \code{"ordered"}, \code{"PoissonVariate"}, and \code{"Surv"}. #' @param predictor_encoding character string indicating whether the model is #' fit with predictor variables encoded as a \code{"\link{model.matrix}"}, a #' data.frame containing the originally specified model \code{"terms"}, or #' unspecified (default). #' @param params list of user-specified model parameters to be passed to the #' \code{fit} function. #' @param grid tuning grid function whose first agument \code{x} is a #' \code{\link{ModelFrame}} of the model fit data and formula, followed by a #' \code{length} to use in generating sequences of parameter values, a number #' of grid points to sample at \code{random}, and an ellipsis (\code{...}). #' @param fit model fitting function whose arguments are a \code{formula}, a #' \code{\link{ModelFrame}} named \code{data}, case \code{weights}, and an #' ellipsis. #' @param predict model prediction function whose arguments are the #' \code{object} returned by \code{fit}, a \code{\link{ModelFrame}} named #' \code{newdata} of predictor variables, optional vector of \code{times} at #' which to predict survival, and an ellipsis. #' @param varimp variable importance function whose arguments are the #' \code{object} returned by \code{fit}, optional arguments passed from calls #' to \code{\link{varimp}}, and an ellipsis. #' @param ... arguments passed from other methods. #' #' @details #' If supplied, the \code{grid} function should return a list whose elements are #' named after and contain values of parameters to include in a tuning grid to #' be constructed automatically by the package. #' #' Argument \code{data} in the \code{fit} function may be converted to a data #' frame with the \code{as.data.frame} function as needed. The function should #' return the object resulting from the model fit. #' #' Values returned by the \code{predict} functions should be formatted according #' to the response variable types below. #' \describe{ #' \item{factor}{vector or column matrix of probabilities for the second level #' of binary factors or a matrix whose columns contain the probabilities for #' factors with more than two levels.} #' \item{matrix}{matrix of predicted responses.} #' \item{numeric}{vector or column matrix of predicted responses.} #' \item{Surv}{matrix whose columns contain survival probabilities at #' \code{times} if supplied or a vector of predicted survival means #' otherwise.} #' } #' #' The \code{varimp} function should return a vector of importance values named #' after the predictor variables or a matrix or data frame whose rows are named #' after the predictors. #' #' @return \code{MLModel} class object. #' #' @seealso \code{\link{models}}, \code{\link{fit}}, \code{\link{resample}} #' #' @examples #' ## Logistic regression model #' LogisticModel <- MLModel( #' name = "LogisticModel", #' response_types = "binary", #' fit = function(formula, data, weights, ...) { #' glm(formula, data = data, weights = weights, family = binomial, ...) #' }, #' predict = function(object, newdata, ...) { #' predict(object, newdata = newdata, type = "response") #' }, #' varimp = function(object, ...) { #' pchisq(coef(object)^2 / diag(vcov(object)), 1) #' } #' ) #' #' library(MASS) #' res <- resample(type ~ ., data = Pima.tr, model = LogisticModel) #' summary(res) #' MLModel <- function(name = "MLModel", label = name, packages = character(), response_types = character(), predictor_encoding = c(NA, "model.matrix", "terms"), params = list(), grid = function(x, length, random, ...) NULL, fit = function(formula, data, weights, ...) stop("no fit function"), predict = function(object, newdata, times, ...) stop("no predict function"), varimp = function(object, ...) NULL, ...) { stopifnot(response_types %in% .response_types) new("MLModel", name = name, label = label, packages = packages, response_types = response_types, predictor_encoding = match.arg(predictor_encoding), params = params, grid = grid, fit = fit, predict = predict, varimp = varimp) } .response_types <- c("binary", "BinomialVariate", "DiscreteVariate", "factor", "matrix", "NegBinomialVariate", "numeric", "ordered", "PoissonVariate", "Surv") MLModelFit <- function(object, Class, model, x) { model@x <- prep(x) if (is(object, Class)) { object <- unMLModelFit(object) } else if (is(object, "MLModelFit")) { stop("cannot change MLModelFit class") } if (!is(model, "MLModel")) stop("model not of class MLModel") if (isS4(object)) { object <- new(Class, object, mlmodel = model) } else if (is.list(object)) { object$mlmodel <- model class(object) <- c(Class, "MLModelFit", class(object)) } else { stop("unsupported object class") } object } unMLModelFit <- function(object) { if (is(object, "MLModelFit")) { if (isS4(object)) { classes <- extends(class(object)) pos <- match("MLModelFit", classes) as(object, classes[pos + 1]) } else { object$mlmodel <- NULL classes <- class(object) pos <- match("MLModelFit", classes) structure(object, class = classes[-(1:pos)]) } } else object }
source('data.R') source('theme.R') ## TODO drop y text image_device("daily1") m <- nn5[, c(1, 110)] r <- nrow(m) labels <- make.unique(colnames(m)) df <- data.frame( index(m)[rep.int(1:r, ncol(m))], factor(rep(1:ncol(m), each = r), levels = 1:ncol(m), labels = labels), as.vector(coredata(m))) names(df) <- c("Index", "Series", "Value") df[, "Color"] <- "black" df[df$Index > nn5_last_train_date, "Color"] <- "blue" p <- ggplot(data = df) + geom_path(aes(x = Index, y = Value, color = Color), na.rm = TRUE) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + facet_grid(Series ~ ., scales = "free_y") + tm + theme(strip.text.y = element_blank()) print(p) dev.off() image_device("daily2") m <- nn5[, c(26, 89)] r <- nrow(m) labels <- make.unique(colnames(m)) df <- data.frame( index(m)[rep.int(1:r, ncol(m))], factor(rep(1:ncol(m), each = r), levels = 1:ncol(m), labels = labels), as.vector(coredata(m))) names(df) <- c("Index", "Series", "Value") df[, "Color"] <- "black" df[df$Index > nn5_last_train_date, "Color"] <- "blue" p <- ggplot(data = df) + geom_path(aes(x = Index, y = Value, color = Color), na.rm = TRUE) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + facet_grid(Series ~ ., scales = "free_y") + tm + theme(strip.text.y = element_blank()) print(p) dev.off() image_device("half-hourly") data <- data.frame(x = 1:384, y = taylor[1:384]) data["color"] <- "black" data[336:384, "color"] <- "blue" p <- ggplot(data) + geom_path(aes(x = x, y = y, color = color)) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + tm + theme_no_labels print(p) dev.off() image_device("dirac-delta") delta <- data.frame(x = (-500):500, y = 0) delta[501, "y"] <- 1 p <- ggplot(delta) + geom_path(aes(x = x, y = y)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() delta <- rep.int(0, 200) delta[1] <- 1 image_device("ar_1") start.innov <- c(0) sim <- as.numeric(arima.sim(list(ar = (0.9)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:100] p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ar_2_1") start.innov <- c(0, 0) sim <- as.numeric(arima.sim(list(ar = c(0.3, 0.3)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:50] p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ar_2_2") start.innov <- c(0, 0) sim <- as.numeric(arima.sim(list(ar = c(0.9, -0.85)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:100] ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ar_2_3") start.innov <- c(0, 0) sim <- as.numeric(arima.sim(list(ar = c(0.08, 0.9)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov)) p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ma_5") start.innov <- rep.int(0, 5) sim <- as.numeric(arima.sim(list(ma = c(0.8, 0.6, 0.4, 0.2, 0.1)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:15] p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() noise <- rnorm(100) image_device("i_0") p <- ggplot() + geom_path(aes(x = seq_along(noise), y = noise)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() noise <- cumsum(noise) image_device("i_1") p <- ggplot() + geom_path(aes(x = seq_along(noise), y = noise)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() noise <- cumsum(noise) image_device("i_2") p <- ggplot() + geom_path(aes(x = seq_along(noise), y = noise)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() pattern <- c(5, 4.7, 4, 4.5, 6, 0, 0) long <- rep(pattern, 100) long <- long + rnorm(length(long), 0, 0.5) long[seq_along(long) %% 7 == 6] <- 0 long[seq_along(long) %% 7 == 0] <- 0 image_device("weekly_noise") clean <- rep(pattern, 4) short <- long[1:28] x <- 1:28 p<- ggplot() + geom_path(aes(x = x, y = clean, color = "green")) + geom_path(aes(x = x, y = short, color = "black")) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() auto.fit <- auto.arima(long, max.p = 28, max.q = 5, max.order = 40, start.p = 1, start.q = 1) x <- 1:70 image_device("arima_auto") p <- ggplot() + geom_path(aes(x = x, y = rep(pattern, 10), color = "green")) + geom_path(aes(x = x, y = forecast(auto.fit, h = 70)$mean, color = "blue")) + scale_colour_identity() + ggtitle(as.character(auto.fit)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() manual.fit <- Arima(long, c(7, 0, 0)) image_device("arima_7") p <- ggplot() + geom_path(aes(x = x, y = rep(pattern, 10), color = "green")) + geom_path(aes(x = x, y = forecast(manual.fit, h = 70)$mean, color = "blue")) + scale_colour_identity() + ggtitle(as.character(manual.fit)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off()
/R/images.R
permissive
gridl/arima-vs-long-forecasts-2017
R
false
false
5,143
r
source('data.R') source('theme.R') ## TODO drop y text image_device("daily1") m <- nn5[, c(1, 110)] r <- nrow(m) labels <- make.unique(colnames(m)) df <- data.frame( index(m)[rep.int(1:r, ncol(m))], factor(rep(1:ncol(m), each = r), levels = 1:ncol(m), labels = labels), as.vector(coredata(m))) names(df) <- c("Index", "Series", "Value") df[, "Color"] <- "black" df[df$Index > nn5_last_train_date, "Color"] <- "blue" p <- ggplot(data = df) + geom_path(aes(x = Index, y = Value, color = Color), na.rm = TRUE) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + facet_grid(Series ~ ., scales = "free_y") + tm + theme(strip.text.y = element_blank()) print(p) dev.off() image_device("daily2") m <- nn5[, c(26, 89)] r <- nrow(m) labels <- make.unique(colnames(m)) df <- data.frame( index(m)[rep.int(1:r, ncol(m))], factor(rep(1:ncol(m), each = r), levels = 1:ncol(m), labels = labels), as.vector(coredata(m))) names(df) <- c("Index", "Series", "Value") df[, "Color"] <- "black" df[df$Index > nn5_last_train_date, "Color"] <- "blue" p <- ggplot(data = df) + geom_path(aes(x = Index, y = Value, color = Color), na.rm = TRUE) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + facet_grid(Series ~ ., scales = "free_y") + tm + theme(strip.text.y = element_blank()) print(p) dev.off() image_device("half-hourly") data <- data.frame(x = 1:384, y = taylor[1:384]) data["color"] <- "black" data[336:384, "color"] <- "blue" p <- ggplot(data) + geom_path(aes(x = x, y = y, color = color)) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + tm + theme_no_labels print(p) dev.off() image_device("dirac-delta") delta <- data.frame(x = (-500):500, y = 0) delta[501, "y"] <- 1 p <- ggplot(delta) + geom_path(aes(x = x, y = y)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() delta <- rep.int(0, 200) delta[1] <- 1 image_device("ar_1") start.innov <- c(0) sim <- as.numeric(arima.sim(list(ar = (0.9)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:100] p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ar_2_1") start.innov <- c(0, 0) sim <- as.numeric(arima.sim(list(ar = c(0.3, 0.3)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:50] p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ar_2_2") start.innov <- c(0, 0) sim <- as.numeric(arima.sim(list(ar = c(0.9, -0.85)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:100] ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ar_2_3") start.innov <- c(0, 0) sim <- as.numeric(arima.sim(list(ar = c(0.08, 0.9)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov)) p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() image_device("ma_5") start.innov <- rep.int(0, 5) sim <- as.numeric(arima.sim(list(ma = c(0.8, 0.6, 0.4, 0.2, 0.1)), length(delta), innov = delta, n.start = length(start.innov), start.innov = start.innov))[1:15] p <- ggplot() + geom_path(aes(x = seq_along(sim) - 1, y = sim)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() noise <- rnorm(100) image_device("i_0") p <- ggplot() + geom_path(aes(x = seq_along(noise), y = noise)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() noise <- cumsum(noise) image_device("i_1") p <- ggplot() + geom_path(aes(x = seq_along(noise), y = noise)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() noise <- cumsum(noise) image_device("i_2") p <- ggplot() + geom_path(aes(x = seq_along(noise), y = noise)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() pattern <- c(5, 4.7, 4, 4.5, 6, 0, 0) long <- rep(pattern, 100) long <- long + rnorm(length(long), 0, 0.5) long[seq_along(long) %% 7 == 6] <- 0 long[seq_along(long) %% 7 == 0] <- 0 image_device("weekly_noise") clean <- rep(pattern, 4) short <- long[1:28] x <- 1:28 p<- ggplot() + geom_path(aes(x = x, y = clean, color = "green")) + geom_path(aes(x = x, y = short, color = "black")) + scale_colour_identity() + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() auto.fit <- auto.arima(long, max.p = 28, max.q = 5, max.order = 40, start.p = 1, start.q = 1) x <- 1:70 image_device("arima_auto") p <- ggplot() + geom_path(aes(x = x, y = rep(pattern, 10), color = "green")) + geom_path(aes(x = x, y = forecast(auto.fit, h = 70)$mean, color = "blue")) + scale_colour_identity() + ggtitle(as.character(auto.fit)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off() manual.fit <- Arima(long, c(7, 0, 0)) image_device("arima_7") p <- ggplot() + geom_path(aes(x = x, y = rep(pattern, 10), color = "green")) + geom_path(aes(x = x, y = forecast(manual.fit, h = 70)$mean, color = "blue")) + scale_colour_identity() + ggtitle(as.character(manual.fit)) + xlab(NULL) + ylab(NULL) + tm print(p) dev.off()
library(mclust) library(latex2exp) library(ghibli) path_cvx <- "./data/" load("./data/rand_config_p40_n20_tree_set1.RData") load("./data/rand_config_tree_set2_28simus_tvmax10.RData") load("./data/rand_config_tree_set3_27simus_tvmax5.RData") load("./data/rand_config_tree_set4_35simus_tvmax2_5.RData") load("./data/rand_config_tree_set5_30simus_tvmax1.RData") dt <- c(rand_config_p40_n20_tree_set1, rand_config_tree_set2_28simus_tvmax10, rand_config_tree_set3_27simus_tvmax5, rand_config_tree_set4_35simus_tvmax2_5, rand_config_tree_set5_30simus_tvmax1) list_res1 <- lapply(dt, function(e){get_perf_from_raw2("rand", e, thresh_fuse = 1e-4, cah_kmeans_available = TRUE)}) dt_rand <- do.call(rbind, list_res1) plot_res2(dt_rand, crit_ = "rand", c(3, 5, 6, 10, 22), np_ = c(0.5, 1, 2))
/scripts/supplemental/supp4-performances-hierarchically-structured-model.R.R
permissive
computorg/published-202306-sanou-multiscale_glasso
R
false
false
820
r
library(mclust) library(latex2exp) library(ghibli) path_cvx <- "./data/" load("./data/rand_config_p40_n20_tree_set1.RData") load("./data/rand_config_tree_set2_28simus_tvmax10.RData") load("./data/rand_config_tree_set3_27simus_tvmax5.RData") load("./data/rand_config_tree_set4_35simus_tvmax2_5.RData") load("./data/rand_config_tree_set5_30simus_tvmax1.RData") dt <- c(rand_config_p40_n20_tree_set1, rand_config_tree_set2_28simus_tvmax10, rand_config_tree_set3_27simus_tvmax5, rand_config_tree_set4_35simus_tvmax2_5, rand_config_tree_set5_30simus_tvmax1) list_res1 <- lapply(dt, function(e){get_perf_from_raw2("rand", e, thresh_fuse = 1e-4, cah_kmeans_available = TRUE)}) dt_rand <- do.call(rbind, list_res1) plot_res2(dt_rand, crit_ = "rand", c(3, 5, 6, 10, 22), np_ = c(0.5, 1, 2))
library(e1071) > library(e1071) > data(Ozone, package="mlbench") > ## split data into a train and test set > index <- 1:nrow(Ozone) > testindex <- sample(index, trunc(length(index)/3)) > testset <- na.omit(Ozone[testindex,-3]) > trainset <- na.omit(Ozone[-testindex,-3]) > ## svm > svm.model <- svm(V4 ~ ., data = trainset, cost = 1000, gamma = 0.0001) > svm.pred <- predict(svm.model, testset[,-3]) > crossprod(svm.pred - testset[,3]) / length(testindex) # and # http://archive.ics.uci.edu/ml/datasets/Student+Performance # http://archive.ics.uci.edu/ml/datasets/NoisyOffice # http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
/aquarius DA/lab1_svmreg1.R
no_license
priteshmaheshwari/DataAnalytics2021_Pritesh_Maheshwari
R
false
false
648
r
library(e1071) > library(e1071) > data(Ozone, package="mlbench") > ## split data into a train and test set > index <- 1:nrow(Ozone) > testindex <- sample(index, trunc(length(index)/3)) > testset <- na.omit(Ozone[testindex,-3]) > trainset <- na.omit(Ozone[-testindex,-3]) > ## svm > svm.model <- svm(V4 ~ ., data = trainset, cost = 1000, gamma = 0.0001) > svm.pred <- predict(svm.model, testset[,-3]) > crossprod(svm.pred - testset[,3]) / length(testindex) # and # http://archive.ics.uci.edu/ml/datasets/Student+Performance # http://archive.ics.uci.edu/ml/datasets/NoisyOffice # http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/umxACEnew.R \name{umxACEnew} \alias{umxACEnew} \title{Build and run a 2-group Cholesky twin model (uni-variate or multi-variate)} \usage{ umxACEnew(name = "ACE", selDVs, selCovs = NULL, covMethod = c("fixed", "random"), dzData, mzData, sep = NULL, type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"), dzAr = 0.5, dzCr = 1, addStd = TRUE, addCI = TRUE, numObsDZ = NULL, numObsMZ = NULL, boundDiag = 0, weightVar = NULL, equateMeans = TRUE, bVector = FALSE, thresholds = c("deviationBased"), autoRun = getOption("umx_auto_run"), tryHard = c("no", "mxTryHard", "mxTryHardOrdinal", "mxTryHardWideSearch"), optimizer = NULL, intervals = FALSE) } \arguments{ \item{name}{The name of the model (defaults to"ACE").} \item{selDVs}{The variables to include from the data: preferably, just "dep" not c("dep_T1", "dep_T2").} \item{selCovs}{(optional) covariates to include from the data (do not include sep in names)} \item{covMethod}{How to treat covariates: "fixed" (default) or "random".} \item{dzData}{The DZ dataframe.} \item{mzData}{The MZ dataframe.} \item{sep}{The separator in twin variable names, often "_T", e.g. "dep_T1". Simplifies selDVs.} \item{type}{Analysis method one of c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS")} \item{dzAr}{The DZ genetic correlation (defaults to .5, vary to examine assortative mating).} \item{dzCr}{The DZ "C" correlation (defaults to 1: set to .25 to make an ADE model).} \item{addStd}{Whether to add the algebras to compute a std model (defaults to TRUE).} \item{addCI}{Whether to add intervals to compute CIs (defaults to TRUE).} \item{numObsDZ}{Number of DZ twins: Set this if you input covariance data.} \item{numObsMZ}{Number of MZ twins: Set this if you input covariance data.} \item{boundDiag}{Numeric lbound for diagonal of the a, c, and e matrices. Defaults to 0 since umx version 1.8} \item{weightVar}{If provided, a vector objective will be used to weight the data. (default = NULL).} \item{equateMeans}{Whether to equate the means across twins (defaults to TRUE).} \item{bVector}{Whether to compute row-wise likelihoods (defaults to FALSE).} \item{thresholds}{How to implement ordinal thresholds c("deviationBased", "WLS").} \item{autoRun}{Whether to run the model, and return that (default), or just to create it and return without running.} \item{tryHard}{optionally tryHard (default 'no' uses normal mxRun). c("no", "mxTryHard", "mxTryHardOrdinal", "mxTryHardWideSearch")} \item{optimizer}{Optionally set the optimizer (default NULL does nothing).} \item{intervals}{Whether to run mxCI confidence intervals (default = FALSE)} } \value{ \itemize{ \item \code{\link{mxModel}} of subclass mxModel.ACE } } \description{ Implementing a core task in twin modeling, umxACE models the genetic and environmental structure of one or more phenotypes (measured variables) using the Cholesky ACE model (Neale and Cardon, 1996). Classical twin modeling uses the genetic and environmental differences among pairs of mono-zygotic (MZ) and di-zygotic (DZ) twins reared together. \code{umxACE} implements a 2-group model to capture these data and represent the phenotypic variance as a sum of Additive genetic, unique environmental (E) and, optionally, either common or shared-environment (C) or non-additive genetic effects (D). The following figure shows how the ACE model appears as a path diagram (for one variable): \if{html}{\figure{ACEunivariate.png}{options: width="50\%" alt="Figure: ACE univariate.png"}} \if{latex}{\figure{ACEunivariate.pdf}{options: width=7cm}} \code{umxACE} allows multivariate analyses, and this brings us to the Cholesky part of the model. This model creates as many latent A C and E variables as there are phenotypes, and, moving from left to right, decomposes the variance in each manifest into successively restricted factors. The following figure shows how the ACE model appears as a path diagram: \if{html}{\figure{ACEmatrix.png}{options: width="50\%" alt="Figure: ACE matrix.png"}} \if{latex}{\figure{ACEmatrix.pdf}{options: width=7cm}} In this model, the variance-covariance matrix of the raw data is recovered as the product of the lower Cholesky and its transform. This Cholesky or lower-triangle decomposition allows a model which is both sure to be solvable, and also to account for all the variance (with some restrictions) in the data. This figure also contains the key to understanding how to modify models that \code{umxACE} produces. read the "Matrices and Labels in ACE model" section in details below... \strong{NOTE}: Scroll down to details for how to use the function, a figure and multiple examples. } \details{ \strong{Data Input} The function flexibly accepts raw data, and also summary covariance data (in which case the user must also supple numbers of observations for the two input data sets). The \code{type} parameter can select how you want the model data treated. "FIML" is the normal treatment. "cov" and "cor" will turn raw data into cor data for analysis, or check that you've provided cor data as input. Types "WLS", "DWLS", and "ULS" will process raw data into WLS data of these types. The default, "Auto" will treat data as the type they are provided as. \strong{Ordinal Data} In an important capability, the model transparently handles ordinal (binary or multi-level ordered factor data) inputs, and can handle mixtures of continuous, binary, and ordinal data in any combination. An experimental feature is under development to allow Tobit modeling. The function also supports weighting of individual data rows. In this case, the model is estimated for each row individually, then each row likelihood is multiplied by its weight, and these weighted likelihoods summed to form the model-likelihood, which is to be minimized. This feature is used in the non-linear GxE model functions. \strong{Additional features} The umxACE function supports varying the DZ genetic association (defaulting to .5) to allow exploring assortative mating effects, as well as varying the DZ \dQuote{C} factor from 1 (the default for modeling family-level effects shared 100% by twins in a pair), to .25 to model dominance effects. \strong{Matrices and Labels in ACE model} Matrices 'a', 'c', and 'e' contain the path loadings of the Cholesky ACE factor model. So, labels relevant to modifying the model are of the form \code{"a_r1c1", "c_r1c1"} etc. Variables are in rows, and factors are in columns. So to drop the influence of factor 2 on variable 3, you would say\preformatted{\code{m2 = umxModify(m1, update = "c_r3c2")} } Less commonly-modified matrices are the mean matrix \code{expMean}. This has 1 row, and the columns are laid out for each variable for twin 1, followed by each variable for twin 2. So, in a model where the means for twin 1 and twin 2 had been equated (set = to T1), you could make them independent again with this script: \code{m1$top$expMean$labels[1, 4:6] = c("expMean_r1c4", "expMean_r1c5", "expMean_r1c6")} \emph{note}: Only one of C or D may be estimated simultaneously. This restriction reflects the lack of degrees of freedom to simultaneously model C and D with only MZ and DZ twin pairs (Eaves et al. 1978 p267). } \examples{ # ============================ # = How heritable is height? = # ============================ require(umx) data(twinData) # ?twinData from Australian twins. # Pick the variables selDVs = c("ht") mzData <- twinData[twinData$zygosity \%in\% "MZFF", ] dzData <- twinData[twinData$zygosity \%in\% "DZFF", ] m1 = umxACEnew(selDVs = selDVs, sep = "", dzData = dzData, mzData = mzData) umxSummary(m1, std = FALSE) # unstandardized # tip: with report = "html", umxSummary can print the table to your browser! plot(m1) # ======================================================== # = Evidence for dominance ? (DZ correlation set to .25) = # ======================================================== m2 = umxACEnew("ADE", selDVs = selDVs, sep = "", dzData = dzData, mzData = mzData, dzCr = .25) umxCompare(m2, m1) # ADE is better umxSummary(m2, comparison = m1) # nb: Although summary is smart enough to print d, the underlying # matrices are still called a, c & e. # ============================== # = Univariate model of weight = # ============================== # Things to note: # 1. This variable has a large variance, but umx picks good starts. # 2. umxACEnew can figure out variable names: provide sep= "_T" and selVar = "wt" -> "wt_T1" "wt_T2" # 3. umxACEnew picks the variables it needs from the data. # 4. note: the default boundDiag = 0 lower-bounds a, c, and e at 0 (prevents mirror-solutions). # can remove this by setting boundDiag = NULL m1 = umxACEnew(selDVs = "wt", dzData = dzData, mzData = mzData, sep = "", boundDiag = NULL) # MODEL MODIFICATION # We can modify this model, say testing shared environment, and see a comparison: m2 = umxModify(m1, update = "c_r1c1", name = "no_C", comparison = TRUE) # nb: You can see names of free parameters with parameters(m2) # ===================================== # = Bivariate height and weight model = # ===================================== data(twinData) mzData = twinData[twinData$zygosity \%in\% c("MZFF", "MZMM"),] dzData = twinData[twinData$zygosity \%in\% c("DZFF", "DZMM", "DZOS"), ] mzData = mzData[1:80,] # quicker run to keep CRAN happy dzData = dzData[1:80,] selDVs = c("ht", "wt") # umx will add sep (in this case "") + "1" or '2' m1 = umxACEnew(selDVs = selDVs, dzData = dzData, mzData = mzData, sep = '') umxSummary(m1) # ========================================================= # = Well done! Now you can make modify twin models in umx = # ========================================================= # =================== # = Ordinal example = # =================== require(umx) data(twinData) # Cut BMI column to form ordinal obesity variables obesityLevels = c('normal', 'overweight', 'obese') cutPoints <- quantile(twinData[, "bmi1"], probs = c(.5, .2), na.rm = TRUE) twinData$obese1 <- cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) twinData$obese2 <- cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) # Make the ordinal variables into umxFactors (ensure ordered is TRUE, and require levels) ordDVs = c("obese1", "obese2") twinData[, ordDVs] <- mxFactor(twinData[, ordDVs], levels = obesityLevels) mzData = twinData[twinData$zygosity \%in\% "MZFF", ] dzData = twinData[twinData$zygosity \%in\% "DZFF", ] mzData = mzData[1:80, ] # Just top 80 pairs to run fast dzData = dzData[1:80, ] str(mzData) # make sure mz, dz, and t1 and t2 have the same levels! # Data-prep done - here's where the model starts: selDVs = c("obese") m1 = umxACEnew(selDVs = selDVs, dzData = dzData, mzData = mzData, sep = '') umxSummary(m1) # ============================================ # = Bivariate continuous and ordinal example = # ============================================ data(twinData) # Cut BMI column to form ordinal obesity variables obesityLevels = c('normal', 'overweight', 'obese') cutPoints = quantile(twinData[, "bmi1"], probs = c(.5, .2), na.rm = TRUE) twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) # Make the ordinal variables into mxFactors (ensure ordered is TRUE, and require levels) ordDVs = c("obese1", "obese2") twinData[, ordDVs] = umxFactor(twinData[, ordDVs]) mzData = twinData[twinData$zygosity \%in\% "MZFF",] dzData = twinData[twinData$zygosity \%in\% "DZFF",] mzData <- mzData[1:80,] # just top 80 so example runs in a couple of secs dzData <- dzData[1:80,] m1 = umxACEnew(selDVs = c("wt", "obese"), dzData = dzData, mzData = mzData, sep = '') # ======================================= # = Mixed continuous and binary example = # ======================================= require(umx) data(twinData) # Cut to form category of 20\% obese subjects # and make into mxFactors (ensure ordered is TRUE, and require levels) obesityLevels = c('normal', 'obese') cutPoints = quantile(twinData[, "bmi1"], probs = .2, na.rm = TRUE) twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) ordDVs = c("obese1", "obese2") twinData[, ordDVs] = umxFactor(twinData[, ordDVs]) selDVs = c("wt", "obese") mzData = twinData[twinData$zygosity \%in\% "MZFF",] dzData = twinData[twinData$zygosity \%in\% "DZFF",] \dontrun{ m1 = umxACEnew(selDVs = selDVs, dzData = dzData, mzData = mzData, sep = '') umxSummary(m1) } # =================================== # Example with covariance data only = # =================================== require(umx) data(twinData) selDVs = c("wt1", "wt2") mz = cov(twinData[twinData$zygosity \%in\% "MZFF", selDVs], use = "complete") dz = cov(twinData[twinData$zygosity \%in\% "DZFF", selDVs], use = "complete") m1 = umxACEnew(selDVs = selDVs, dzData = dz, mzData = mz, numObsDZ=569, numObsMZ=351) umxSummary(m1) plot(m1) } \references{ \itemize{ \item Eaves, L. J., Last, K. A., Young, P. A., & Martin, N. G. (1978). Model-fitting approaches to the analysis of human behaviour. \emph{Heredity}, \strong{41}, 249-320. \url{https://www.nature.com/articles/hdy1978101.pdf} } } \seealso{ \itemize{ \item \code{\link{plot.MxModelACE}}, \code{\link{plot.MxModelACE}}, \code{\link{umxSummaryACE}}, \code{\link{umxModify}} } Other Twin Modeling Functions: \code{\link{umxACE_cov_fixed}}, \code{\link{umxACEcov}}, \code{\link{umxACEv}}, \code{\link{umxACE}}, \code{\link{umxCPold}}, \code{\link{umxCP}}, \code{\link{umxGxE_window}}, \code{\link{umxGxEbiv}}, \code{\link{umxGxE}}, \code{\link{umxIPnew}}, \code{\link{umxIP}}, \code{\link{umxSexLim}}, \code{\link{umxSimplex}}, \code{\link{umxSummaryACEcov}}, \code{\link{umxSummaryACEv}}, \code{\link{umxSummaryACE}}, \code{\link{umxSummaryCP}}, \code{\link{umxSummaryGxEbiv}}, \code{\link{umxSummaryGxE}}, \code{\link{umxSummaryIP}}, \code{\link{umxSummarySexLim}}, \code{\link{umxSummarySimplex}}, \code{\link{umx}}, \code{\link{xmu_twin_check}} } \concept{Twin Modeling Functions}
/man/umxACEnew.Rd
no_license
guhjy/umx
R
false
true
14,319
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/umxACEnew.R \name{umxACEnew} \alias{umxACEnew} \title{Build and run a 2-group Cholesky twin model (uni-variate or multi-variate)} \usage{ umxACEnew(name = "ACE", selDVs, selCovs = NULL, covMethod = c("fixed", "random"), dzData, mzData, sep = NULL, type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"), dzAr = 0.5, dzCr = 1, addStd = TRUE, addCI = TRUE, numObsDZ = NULL, numObsMZ = NULL, boundDiag = 0, weightVar = NULL, equateMeans = TRUE, bVector = FALSE, thresholds = c("deviationBased"), autoRun = getOption("umx_auto_run"), tryHard = c("no", "mxTryHard", "mxTryHardOrdinal", "mxTryHardWideSearch"), optimizer = NULL, intervals = FALSE) } \arguments{ \item{name}{The name of the model (defaults to"ACE").} \item{selDVs}{The variables to include from the data: preferably, just "dep" not c("dep_T1", "dep_T2").} \item{selCovs}{(optional) covariates to include from the data (do not include sep in names)} \item{covMethod}{How to treat covariates: "fixed" (default) or "random".} \item{dzData}{The DZ dataframe.} \item{mzData}{The MZ dataframe.} \item{sep}{The separator in twin variable names, often "_T", e.g. "dep_T1". Simplifies selDVs.} \item{type}{Analysis method one of c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS")} \item{dzAr}{The DZ genetic correlation (defaults to .5, vary to examine assortative mating).} \item{dzCr}{The DZ "C" correlation (defaults to 1: set to .25 to make an ADE model).} \item{addStd}{Whether to add the algebras to compute a std model (defaults to TRUE).} \item{addCI}{Whether to add intervals to compute CIs (defaults to TRUE).} \item{numObsDZ}{Number of DZ twins: Set this if you input covariance data.} \item{numObsMZ}{Number of MZ twins: Set this if you input covariance data.} \item{boundDiag}{Numeric lbound for diagonal of the a, c, and e matrices. Defaults to 0 since umx version 1.8} \item{weightVar}{If provided, a vector objective will be used to weight the data. (default = NULL).} \item{equateMeans}{Whether to equate the means across twins (defaults to TRUE).} \item{bVector}{Whether to compute row-wise likelihoods (defaults to FALSE).} \item{thresholds}{How to implement ordinal thresholds c("deviationBased", "WLS").} \item{autoRun}{Whether to run the model, and return that (default), or just to create it and return without running.} \item{tryHard}{optionally tryHard (default 'no' uses normal mxRun). c("no", "mxTryHard", "mxTryHardOrdinal", "mxTryHardWideSearch")} \item{optimizer}{Optionally set the optimizer (default NULL does nothing).} \item{intervals}{Whether to run mxCI confidence intervals (default = FALSE)} } \value{ \itemize{ \item \code{\link{mxModel}} of subclass mxModel.ACE } } \description{ Implementing a core task in twin modeling, umxACE models the genetic and environmental structure of one or more phenotypes (measured variables) using the Cholesky ACE model (Neale and Cardon, 1996). Classical twin modeling uses the genetic and environmental differences among pairs of mono-zygotic (MZ) and di-zygotic (DZ) twins reared together. \code{umxACE} implements a 2-group model to capture these data and represent the phenotypic variance as a sum of Additive genetic, unique environmental (E) and, optionally, either common or shared-environment (C) or non-additive genetic effects (D). The following figure shows how the ACE model appears as a path diagram (for one variable): \if{html}{\figure{ACEunivariate.png}{options: width="50\%" alt="Figure: ACE univariate.png"}} \if{latex}{\figure{ACEunivariate.pdf}{options: width=7cm}} \code{umxACE} allows multivariate analyses, and this brings us to the Cholesky part of the model. This model creates as many latent A C and E variables as there are phenotypes, and, moving from left to right, decomposes the variance in each manifest into successively restricted factors. The following figure shows how the ACE model appears as a path diagram: \if{html}{\figure{ACEmatrix.png}{options: width="50\%" alt="Figure: ACE matrix.png"}} \if{latex}{\figure{ACEmatrix.pdf}{options: width=7cm}} In this model, the variance-covariance matrix of the raw data is recovered as the product of the lower Cholesky and its transform. This Cholesky or lower-triangle decomposition allows a model which is both sure to be solvable, and also to account for all the variance (with some restrictions) in the data. This figure also contains the key to understanding how to modify models that \code{umxACE} produces. read the "Matrices and Labels in ACE model" section in details below... \strong{NOTE}: Scroll down to details for how to use the function, a figure and multiple examples. } \details{ \strong{Data Input} The function flexibly accepts raw data, and also summary covariance data (in which case the user must also supple numbers of observations for the two input data sets). The \code{type} parameter can select how you want the model data treated. "FIML" is the normal treatment. "cov" and "cor" will turn raw data into cor data for analysis, or check that you've provided cor data as input. Types "WLS", "DWLS", and "ULS" will process raw data into WLS data of these types. The default, "Auto" will treat data as the type they are provided as. \strong{Ordinal Data} In an important capability, the model transparently handles ordinal (binary or multi-level ordered factor data) inputs, and can handle mixtures of continuous, binary, and ordinal data in any combination. An experimental feature is under development to allow Tobit modeling. The function also supports weighting of individual data rows. In this case, the model is estimated for each row individually, then each row likelihood is multiplied by its weight, and these weighted likelihoods summed to form the model-likelihood, which is to be minimized. This feature is used in the non-linear GxE model functions. \strong{Additional features} The umxACE function supports varying the DZ genetic association (defaulting to .5) to allow exploring assortative mating effects, as well as varying the DZ \dQuote{C} factor from 1 (the default for modeling family-level effects shared 100% by twins in a pair), to .25 to model dominance effects. \strong{Matrices and Labels in ACE model} Matrices 'a', 'c', and 'e' contain the path loadings of the Cholesky ACE factor model. So, labels relevant to modifying the model are of the form \code{"a_r1c1", "c_r1c1"} etc. Variables are in rows, and factors are in columns. So to drop the influence of factor 2 on variable 3, you would say\preformatted{\code{m2 = umxModify(m1, update = "c_r3c2")} } Less commonly-modified matrices are the mean matrix \code{expMean}. This has 1 row, and the columns are laid out for each variable for twin 1, followed by each variable for twin 2. So, in a model where the means for twin 1 and twin 2 had been equated (set = to T1), you could make them independent again with this script: \code{m1$top$expMean$labels[1, 4:6] = c("expMean_r1c4", "expMean_r1c5", "expMean_r1c6")} \emph{note}: Only one of C or D may be estimated simultaneously. This restriction reflects the lack of degrees of freedom to simultaneously model C and D with only MZ and DZ twin pairs (Eaves et al. 1978 p267). } \examples{ # ============================ # = How heritable is height? = # ============================ require(umx) data(twinData) # ?twinData from Australian twins. # Pick the variables selDVs = c("ht") mzData <- twinData[twinData$zygosity \%in\% "MZFF", ] dzData <- twinData[twinData$zygosity \%in\% "DZFF", ] m1 = umxACEnew(selDVs = selDVs, sep = "", dzData = dzData, mzData = mzData) umxSummary(m1, std = FALSE) # unstandardized # tip: with report = "html", umxSummary can print the table to your browser! plot(m1) # ======================================================== # = Evidence for dominance ? (DZ correlation set to .25) = # ======================================================== m2 = umxACEnew("ADE", selDVs = selDVs, sep = "", dzData = dzData, mzData = mzData, dzCr = .25) umxCompare(m2, m1) # ADE is better umxSummary(m2, comparison = m1) # nb: Although summary is smart enough to print d, the underlying # matrices are still called a, c & e. # ============================== # = Univariate model of weight = # ============================== # Things to note: # 1. This variable has a large variance, but umx picks good starts. # 2. umxACEnew can figure out variable names: provide sep= "_T" and selVar = "wt" -> "wt_T1" "wt_T2" # 3. umxACEnew picks the variables it needs from the data. # 4. note: the default boundDiag = 0 lower-bounds a, c, and e at 0 (prevents mirror-solutions). # can remove this by setting boundDiag = NULL m1 = umxACEnew(selDVs = "wt", dzData = dzData, mzData = mzData, sep = "", boundDiag = NULL) # MODEL MODIFICATION # We can modify this model, say testing shared environment, and see a comparison: m2 = umxModify(m1, update = "c_r1c1", name = "no_C", comparison = TRUE) # nb: You can see names of free parameters with parameters(m2) # ===================================== # = Bivariate height and weight model = # ===================================== data(twinData) mzData = twinData[twinData$zygosity \%in\% c("MZFF", "MZMM"),] dzData = twinData[twinData$zygosity \%in\% c("DZFF", "DZMM", "DZOS"), ] mzData = mzData[1:80,] # quicker run to keep CRAN happy dzData = dzData[1:80,] selDVs = c("ht", "wt") # umx will add sep (in this case "") + "1" or '2' m1 = umxACEnew(selDVs = selDVs, dzData = dzData, mzData = mzData, sep = '') umxSummary(m1) # ========================================================= # = Well done! Now you can make modify twin models in umx = # ========================================================= # =================== # = Ordinal example = # =================== require(umx) data(twinData) # Cut BMI column to form ordinal obesity variables obesityLevels = c('normal', 'overweight', 'obese') cutPoints <- quantile(twinData[, "bmi1"], probs = c(.5, .2), na.rm = TRUE) twinData$obese1 <- cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) twinData$obese2 <- cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) # Make the ordinal variables into umxFactors (ensure ordered is TRUE, and require levels) ordDVs = c("obese1", "obese2") twinData[, ordDVs] <- mxFactor(twinData[, ordDVs], levels = obesityLevels) mzData = twinData[twinData$zygosity \%in\% "MZFF", ] dzData = twinData[twinData$zygosity \%in\% "DZFF", ] mzData = mzData[1:80, ] # Just top 80 pairs to run fast dzData = dzData[1:80, ] str(mzData) # make sure mz, dz, and t1 and t2 have the same levels! # Data-prep done - here's where the model starts: selDVs = c("obese") m1 = umxACEnew(selDVs = selDVs, dzData = dzData, mzData = mzData, sep = '') umxSummary(m1) # ============================================ # = Bivariate continuous and ordinal example = # ============================================ data(twinData) # Cut BMI column to form ordinal obesity variables obesityLevels = c('normal', 'overweight', 'obese') cutPoints = quantile(twinData[, "bmi1"], probs = c(.5, .2), na.rm = TRUE) twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) # Make the ordinal variables into mxFactors (ensure ordered is TRUE, and require levels) ordDVs = c("obese1", "obese2") twinData[, ordDVs] = umxFactor(twinData[, ordDVs]) mzData = twinData[twinData$zygosity \%in\% "MZFF",] dzData = twinData[twinData$zygosity \%in\% "DZFF",] mzData <- mzData[1:80,] # just top 80 so example runs in a couple of secs dzData <- dzData[1:80,] m1 = umxACEnew(selDVs = c("wt", "obese"), dzData = dzData, mzData = mzData, sep = '') # ======================================= # = Mixed continuous and binary example = # ======================================= require(umx) data(twinData) # Cut to form category of 20\% obese subjects # and make into mxFactors (ensure ordered is TRUE, and require levels) obesityLevels = c('normal', 'obese') cutPoints = quantile(twinData[, "bmi1"], probs = .2, na.rm = TRUE) twinData$obese1 = cut(twinData$bmi1, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) twinData$obese2 = cut(twinData$bmi2, breaks = c(-Inf, cutPoints, Inf), labels = obesityLevels) ordDVs = c("obese1", "obese2") twinData[, ordDVs] = umxFactor(twinData[, ordDVs]) selDVs = c("wt", "obese") mzData = twinData[twinData$zygosity \%in\% "MZFF",] dzData = twinData[twinData$zygosity \%in\% "DZFF",] \dontrun{ m1 = umxACEnew(selDVs = selDVs, dzData = dzData, mzData = mzData, sep = '') umxSummary(m1) } # =================================== # Example with covariance data only = # =================================== require(umx) data(twinData) selDVs = c("wt1", "wt2") mz = cov(twinData[twinData$zygosity \%in\% "MZFF", selDVs], use = "complete") dz = cov(twinData[twinData$zygosity \%in\% "DZFF", selDVs], use = "complete") m1 = umxACEnew(selDVs = selDVs, dzData = dz, mzData = mz, numObsDZ=569, numObsMZ=351) umxSummary(m1) plot(m1) } \references{ \itemize{ \item Eaves, L. J., Last, K. A., Young, P. A., & Martin, N. G. (1978). Model-fitting approaches to the analysis of human behaviour. \emph{Heredity}, \strong{41}, 249-320. \url{https://www.nature.com/articles/hdy1978101.pdf} } } \seealso{ \itemize{ \item \code{\link{plot.MxModelACE}}, \code{\link{plot.MxModelACE}}, \code{\link{umxSummaryACE}}, \code{\link{umxModify}} } Other Twin Modeling Functions: \code{\link{umxACE_cov_fixed}}, \code{\link{umxACEcov}}, \code{\link{umxACEv}}, \code{\link{umxACE}}, \code{\link{umxCPold}}, \code{\link{umxCP}}, \code{\link{umxGxE_window}}, \code{\link{umxGxEbiv}}, \code{\link{umxGxE}}, \code{\link{umxIPnew}}, \code{\link{umxIP}}, \code{\link{umxSexLim}}, \code{\link{umxSimplex}}, \code{\link{umxSummaryACEcov}}, \code{\link{umxSummaryACEv}}, \code{\link{umxSummaryACE}}, \code{\link{umxSummaryCP}}, \code{\link{umxSummaryGxEbiv}}, \code{\link{umxSummaryGxE}}, \code{\link{umxSummaryIP}}, \code{\link{umxSummarySexLim}}, \code{\link{umxSummarySimplex}}, \code{\link{umx}}, \code{\link{xmu_twin_check}} } \concept{Twin Modeling Functions}