blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3b3251a08dab34c469cb91ca40c95ade99ddaba3
|
033259a8308432f54aee40e272cd9870084cbe8e
|
/man/tickers_metrics.Rd
|
aa31b747dcafd684e94f21e9b85df6acb27e0e58
|
[
"MIT"
] |
permissive
|
abresler/fundManageR
|
89d7d51c0da3de150170433ac15dba0feb50e62c
|
6d9144353b5284bfd6fadf6922690904cf263320
|
refs/heads/master
| 2023-08-03T21:47:41.812747
| 2023-07-19T13:09:16
| 2023-07-19T13:09:16
| 68,140,115
| 192
| 31
| null | null | null | null |
UTF-8
|
R
| false
| true
| 698
|
rd
|
tickers_metrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finbox.R
\name{tickers_metrics}
\alias{tickers_metrics}
\title{finbox ticker metrics}
\usage{
tickers_metrics(
tickers = c("AAPL", "NFLX", "FB", "GOOG", "TSLA", "VNO"),
metrics = c("total_current_assets", "total_net_income_margin", "total_net_income",
"pre_tax_income_cagr", "cash_from_operations", "stock_price_open",
"operating_income_margin", "total_current_liabilities", "avg_ebit_margin",
"after_tax_cost_of_debt_mid"),
multiple_types = c("ltm", "fwd"),
period_types = c("fy", "fq", "ytd"),
return_message = TRUE
)
}
\arguments{
\item{return_message}{}
}
\description{
finbox ticker metrics
}
|
38f8ce62ab852f351a0c40802bef1802d83f9ba6
|
3063fc9808667c80e1aabdee9dfba9d459dcec12
|
/R/colony_edge.R
|
648387c6b9fbd18357255de92949cfee2370aa8a
|
[] |
no_license
|
yuczhang/colonyHCS
|
06a66368b2945c575f2aec5f90410abaf853089b
|
ef1ebe50b7eb9439977628671bb2a96368cd272f
|
refs/heads/master
| 2020-09-28T19:36:30.654618
| 2019-12-09T09:44:03
| 2019-12-09T09:44:03
| 226,847,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,107
|
r
|
colony_edge.R
|
#' Edge detection to segment the forground of the microoganism colonies.
#'
#' In order to characterize the phenotypic change of mutant, the Canny edge detection algotithm is applied to realzie the segmentation.
#' @param expr_mode a string indicates the phenotypic experiment type, as different experiments require differnt
#' processing steps
#' @param colony_patch a matrix specifies the intensity value of a colony patch,whose value is normalized into [0,1]
#' @param color_patch An 'Image' object or an array of specifies a colony patch in Color mode,or the same as the colony_patch for some
#' phenotypic experiment that does not focus on color change
#' @param Canny_sigma A numeric specifies the Gaussian filter variance in Canny detector.Defaults to 7 in the
#' examples.
#' @param low_thr lower threshold value of the Canny detector.
#' @param high_thr upper threshold value of the Canny detector.
#' @param mor_size A numeric containing the size of the brush in pixels. This should be an odd number; even numbers
#' are rounded to the next odd one, i.e., size = 4 has the same effect as size = 5. Default is 9.
#' @param mor_shape A character vector indicating the shape of the brush. Can be box, disc, diamond, Gaussian or line. Default is disc.
#' @param high_connectivity A logic 4(false)- or 8(true)-connectivity in 2d case, and between 6(false)- or 26(true)-connectivity in 3d case.
#' Default is TRUE.
#'
#' @return A list contains 1) An array containing labelled objects. Labelled objects are pixel sets with the same unique integer value.
#' 2) An array, containing the painted version of a certain colony.
#'
#' @export
#' @seealso
#' @examples
#' edge_rslt<-colony_edge(expr_mode="swarming",colony_patch=img_patch$patch[[i,j]],
#' color_patch=img_patch$color_patch[[i,j]],Canny_sigma=8,
#' low_thr=6,high_thr=20,mor_size=9)
colony_edge<-function(expr_mode="Congo_red",colony_patch,color_patch=NULL,Canny_sigma=7,low_thr=6,
high_thr=20,mor_size=9,mor_shape='disc',high_connectivity=TRUE){
#library(pixmap)
library(image.CannyEdges)
#library(wvtool)
library(imager)
edges_pic<-image.CannyEdges::image_canny_edge_detector(colony_patch * 255, s = Canny_sigma, low_thr = low_thr, high_thr = high_thr)$edges
kern = EBImage::makeBrush(size=mor_size, shape=mor_shape)
im_afmor<-EBImage::closing(edges_pic,kern)
# label function in imager package
cimg_patch<-imager::as.cimg(im_afmor)
labels0 <- imager::label(imager::as.pixset(cimg_patch), high_connectivity = high_connectivity) #8邻域
labels <-EBImage::rotate(imager::cimg2im(labels0)$v,-90) #旋转90度
if(expr_mode!="Congo_red"){
patch_edge<-EBImage::paintObjects(x=labels,EBImage::toRGB(colony_patch),col=c("red","black"), opac=c(1, 0.3))
}
else{
patch_edge<-EBImage::paintObjects(x=labels,color_patch,col=c("blue","black"), opac=c(1, 0.3))
}
return(list(labels=labels,patch_edge=patch_edge))
}
|
468070816c09888e701ce0c5a88d1d688423e0c3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lattice/examples/melanoma.Rd.R
|
38aaed4c5e7f7b848d7470ee3e410fbef57ab4bf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
melanoma.Rd.R
|
library(lattice)
### Name: H_melanoma
### Title: Melanoma skin cancer incidence
### Aliases: melanoma
### Keywords: datasets
### ** Examples
# Time-series plot. Figure 3.64 from Cleveland.
xyplot(incidence ~ year,
data = melanoma,
aspect = "xy",
panel = function(x, y)
panel.xyplot(x, y, type="o", pch = 16),
ylim = c(0, 6),
xlab = "Year",
ylab = "Incidence")
|
d6a563f6b6fe8403673bb1131e24cccb4ec12c34
|
75e8ab3b754f02e7b6796c182dc36968befe6788
|
/data-raw/horoscopes.R
|
6466b3a6c156fa442f295fd3353fa4a48e5b4c9e
|
[] |
no_license
|
jjchern/astrologer
|
d22a3958c9a44b14105aea02b7653f5c21c326a2
|
97513240a0a839b60aea5d38190deb963921d8d5
|
refs/heads/master
| 2021-01-20T00:12:54.141390
| 2017-02-13T06:10:51
| 2017-02-13T06:10:51
| 82,588,047
| 0
| 0
| null | 2017-02-20T18:14:49
| 2017-02-20T18:14:49
| null |
UTF-8
|
R
| false
| false
| 5,550
|
r
|
horoscopes.R
|
library(rvest)
library(stringr)
library(dplyr)
library(tidytext)
library(lubridate)
library(httr)
library(forcats)
library(purrr)
library(tidyr)
library(forcats)
# Check which URLs exist for all year/month combinations
horoscopes <- crossing(year = 2015:2017,
month = c("01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12")) %>%
mutate(date_url = map2_chr(year, month,
~ str_c("http://chaninicholas.com/", .x, "/", .y)),
url_exists = map_lgl(date_url,
~ (status_code(GET(.x)) >= 300) == FALSE)) %>%
filter(url_exists)
# Get page 2 of horoscopes for year/month combinations that exist (if it exists too!)
horoscopes <- horoscopes %>%
bind_rows(horoscopes %>%
mutate(date_url = str_c(date_url, "/page/2"),
url_exists = NA)) %>%
mutate(url_exists = if_else(is.na(url_exists),
map_lgl(date_url,
~ (status_code(GET(.x)) >= 300) == FALSE),
url_exists)) %>%
filter(url_exists)
# Extract horoscope links from each date link
horoscopes <- horoscopes %>%
mutate(url = pmap(list(date_url, year, month),
function(date_url, year, month)
date_url %>%
read_html() %>%
html_nodes("a") %>%
html_attr("href") %>%
str_subset('scope') %>% # only horoscope links -- sometimes written weirdly!
str_subset(str_c(year, "/", month)) %>% # for that year/month only
unique())) %>%
unnest(url) %>%
select(-date_url) %>%
unique()
# Extract start date for each horoscope
horoscopes <- horoscopes %>%
mutate(day = map_dbl(url,
~ str_match(str_match(.x, "scope(.*?)/")[,2], "[0-9]+") %>%
as.numeric()),
startdate = ymd(str_c(year, "/", month, "/", day)))
# Fill in missing dates
horoscopes <- horoscopes %>%
mutate(date_manual = case_when(.$url == "http://chaninicholas.com/2015/01/new-moon-aquariusmercury-retrograde-horoscopes/" ~ ymd("2015-01-19"),
.$url == "http://chaninicholas.com/2015/02/new-moon-aquarius-horoscopes/" ~ ymd("2015-02-16"),
.$url == "http://chaninicholas.com/2015/12/horoscopes-for-the-winter-solstice-and-the-full-moon-in-cancer/" ~ ymd("2015-12-21"),
.$url == "http://chaninicholas.com/2017/01/2017-your-horoscope-for-the-year-ahead/" ~ ymd("2017-01-02")),
startdate = if_else(is.na(startdate), date_manual, startdate)) %>%
select(-day, -date_manual)
# Extract horoscope text, split into paragraphs
horoscopes <- horoscopes %>%
mutate(text = map_chr(url,
~ .x %>%
read_html() %>%
html_nodes('.entry-content') %>%
html_text() %>%
iconv(to = 'UTF-8') %>%
str_replace_all(c("[\r]" = " ", "[“‘’”]" = "'"))),
text_split = map(text,
~ .x %>%
strsplit('\n') %>%
unlist() %>%
str_trim()),
text_split = map(text_split,
~ .x[.x != ""]))
# Remove duplicate elements within each horoscope (ads, etc)
horoscopes <- horoscopes %>%
mutate(text_split = map(text_split,
~ .x[!(duplicated(.x)) & !(duplicated(.x, fromLast = TRUE))]),
text_length = map_dbl(text_split, ~length(.x)))
# Get the horoscope that corresponds to each sign
horoscopes <- horoscopes %>%
mutate(join = TRUE) %>%
left_join(tibble(zodiacsign = c("Aries", "Taurus", "Gemini" ,"Cancer",
"Leo", "Virgo", "Libra", "Scorpio",
"Sagittarius", "Capricorn", "Aquarius", "Pisces"),
join = TRUE),
by = "join") %>%
group_by(startdate, zodiacsign) %>%
mutate(start_of_sign = map2(text_split, zodiacsign,
~ which(str_detect(.x, str_c("^", .y, ".*Rising")) |
str_detect(.x, str_c("^", .y, " &")) |
.x == .y)),
start_of_sign = ifelse(length(unlist(start_of_sign)) != 0,
unlist(start_of_sign) + 1,
NA)) %>%
ungroup() %>%
mutate(end_of_sign = ifelse(zodiacsign != 'Pisces',
lead(start_of_sign) - 2,
text_length),
horoscope = pmap_chr(list(text_split, start_of_sign, end_of_sign),
function(text_split, start_of_sign, end_of_sign)
str_c(text_split[start_of_sign:end_of_sign], collapse = " ")))
# Remove exta variables, order by date/signs
horoscopes <- horoscopes %>%
select(startdate, zodiacsign, horoscope, url) %>%
mutate(zodiacsign = fct_relevel(zodiacsign, c("Aries", "Taurus", "Gemini", "Cancer",
"Leo", "Virgo", "Libra", "Scorpio",
"Sagittarius", "Capricorn", "Aquarius", "Pisces"))) %>%
arrange(startdate, zodiacsign)
# Add data files to package
devtools::use_data(horoscopes, overwrite = TRUE)
|
b59b3b97fc957674e286b77f32e78afbd71f7093
|
06de334e82606450d41cf556da7b3a89d24b8d62
|
/app-shiny-neny/ui.R
|
ae7adad663d60303a60fad57351a97fc6b943a87
|
[] |
no_license
|
almeidaxan/tools-gee
|
b5b81a9b67485bcf40299b1db6f64be063ffdd24
|
264a3d541d2523db168846e3dd5da72f0e2859de
|
refs/heads/master
| 2021-05-10T09:11:11.880061
| 2019-02-17T14:09:08
| 2019-02-17T14:09:08
| 118,915,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,853
|
r
|
ui.R
|
shinyUI(navbarPage(
title = div(em(strong("Landsat Downloader")), HTML("<small>by Menini & Almeida</small>")),
windowTitle = "Landsat Downloader",
tabPanel(
title = "Pixel",
icon = icon(name = "th", lib = "font-awesome", class = "fa-lg"),
sidebarPanel(
# Set up shinyjs
useShinyjs(),
fileInput(inputId = "pixel_datafile",
label = "Choose CSV File",
accept = c(".csv")),
helpText("The input data must be a .csv file, with comma sep. There must be three columns: plot (id), lat (latitude) and long (longitude)."),
br(),
shinyDirButton(id = "dir_download_pixel",
label = "Choose the folder",
title = "Choose the folder"),
verbatimTextOutput("dir_download_pixel_text"),
helpText("Enter the folder that your data will be downloaded."),
br(),
checkboxInput("pixel_showMap", "Show points on the map?", FALSE),
textInput(inputId = "pixel_filename",
label = "Downloaded data file name",
value = "downloaded-data"),
selectInput(inputId = "pixel_versionLS",
label = "Landsat SR Version",
choices = list("Collection 1" = "new",
"Pre-Collection" = "old")),
bsButton(
inputId = "pixel_botaoDownload",
label = "Download",
style = "primary",
icon = icon("download", lib = "font-awesome"),
width = "50%"
)
# verbatimTextOutput("teste", placeholder = FALSE)
),
mainPanel(
leafletOutput(
outputId = "pixel_leaf"
)
)
),
tabPanel(
title = "Raster",
icon = icon(name = "square-o", lib = "font-awesome", class = "fa-lg"),
sidebarPanel(
# Set up shinyjs
useShinyjs(),
fileInput(inputId = "raster_datafile",
label = "Choose shapefile",
accept = c(".zip")),
helpText(
"The shape must be compressed into a zip with, at least, the .shp, .shx, .dbf, and .prj files. The zip file must have the same name as its contents."
),
br(),
checkboxInput("raster_showMap", "Show shapefile on the map?", FALSE),
checkboxInput("download_SRTM", "Download SRTM data?", FALSE),
selectInput(inputId = "raster_versionLS",
label = "Landsat SR Version",
choices = list("Collection 1" = "SR_new",
"Pre-Collection" = "SR_old",
"TOA" = "TOA")),
selectInput(inputId = "raster_satellite",
label = "Landsat Number",
choices = list(4, 5, 7, 8)),
textInput(inputId = "raster_periodStart",
label = "Period start",
value = "2000-01-01"),
textInput(inputId = "raster_periodEnd",
label = "Period end",
value = "2000-12-31"),
bsButton(
inputId = "raster_botaoDownload",
label = "Download",
style = "primary",
icon = icon("download", lib = "font-awesome"),
width = "50%"
),
textOutput("msg")
),
mainPanel(
leafletOutput(
outputId = "raster_leaf"
)
)
)
))
|
bd035dce32c0547175fe7de3d3753b6a2e06e69e
|
57aa4d2e4e113e3e41a274f0434ad67e7d1ffc44
|
/R/calculate_tables.R
|
68401abcf392f713edecdaae8b6d01da13f69c3e
|
[] |
no_license
|
sleyn/MoRbido
|
8ba8efd8958d789e7b8905fda37715c957fc4ca5
|
a5d3c7ce7ec982d3d4184a526523a36df1815710
|
refs/heads/master
| 2023-01-20T01:57:38.099075
| 2020-12-01T23:51:53
| 2020-12-01T23:51:53
| 204,804,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,854
|
r
|
calculate_tables.R
|
#' Create a list tables of OD and Concentration changes through time from the log files.
#' @param dil_check - Were any manual dilutions introdused? Default is TRUE. If TRUE - need to provide dil_file
#' @param dil_file - File that specifies dilution time, ratio (volume taken to total volume), concentration of the drug in the added media and the reactor.
#' @param bc_check - Was concentration of a drug changed during the run. Default is TRUE.
#' @param bc_file - File that specifies bottle change time, concentration in the Pump 1 bottle, Pump 2 bottle.
#' @param C0 - Concentration in reactors at the start of the run.
#' @param CS1 - Concentration in Pump 1 bottle at the start of the run.
#' @param CS2 - Concentration in Pump 2 bottle at the start of the run.
#' @param V - Volume of of media in the reactors.
#' @param dV - The delution volume.
#' @param od_file - Log file of OD measurements.
#' @param pump_file - Log file of pumps activation.
#' @param tubes_selected - Vector with numbers of reactors activated in the current run.
#' @param zero - zero levels of each tube. The levels should be in the increasing order.
#' @param dV.change - file with time of change and new dV
#' @export
#' @import rlist
calculate_tables <- function(dil_check = T, bc_check = T, C0 = 0, CS1 = 0, CS2 = 10, V =20, dV = 4, od_file="6-21-17-OD.txt",pump_file="6-21-17-pumps.txt",dil_file="dilutions.txt",bc_file = "bottle_change.txt", tubes_selected = 1:6, zero = c(Tube1 = 0,Tube2 = 0,Tube3 = 0,Tube4 = 0,Tube5 = 0,Tube6 = 0), dV.change = c()){
#C0 antibiotic concentration in tubes on start (ug/ml)
#CS1 antibiotic concentration in media 1 (ug/ml)
#CS2 antibiotic concentration in media 2 (ug/ml)
#V reaction volume (ml)
#dV dilution volume (ml)
#dil_check True if dilutions were present
#bc_check True if botle changes were present
table.t <- read.table(od_file,sep="\t") #Read OD table
colnames(table.t) <- c("time","tube","value","pump")
table.t$time <- as.POSIXlt(table.t$time)
time0 <- table.t$time[1]
table.t$time <- as.numeric(table.t$time - time0, units = "secs") #convers time to seconds passed from the first OD measurement
for( tube.name in names(zero)){ #introduce zero values
table.t[table.t$tube == tube.name,]$value = table.t[table.t$tube == tube.name,]$value - zero[tube.name]
}
table.t$value[table.t$value <= 0] = 0.01
#table.t <- table.t[table.t$value > 0,]
table.p <- read.table(pump_file,sep="\t") #Pump work table
colnames(table.p) <- c("time","tube","pump")
table.p$time <- as.POSIXlt(table.p$time)
table.p$time <- as.numeric(table.p$time - time0, units = "secs") #convers time to minutes passed from the first OD measurement
if( length(dV.change) > 0 ){
names(dV.change) = c('time', 'dV', 'tube')
dV.change$time = as.POSIXlt(dV.change$time)
dV.change$time = as.numeric(dV.change$time - time0, units = "secs")
}
tables <- list() #list where will be stored tV and tC tables for tubes
if( dil_check == T ){
dilutions <- read.table(dil_file,sep="\t") #load dilution table
colnames(dilutions) <- c("time","dilution","conc","tube")
dilutions$time <- as.POSIXlt(dilutions$time)
dilutions$time <- as.numeric(dilutions$time - time0, units = "secs")
}
if( bc_check == T){
change.bottles <- read.table(bc_file,sep="\t") #load bottle change
colnames(change.bottles) <- c("time","Cp1","Cp2")
change.bottles$time <- as.POSIXlt(change.bottles$time)
change.bottles$time <- as.numeric(change.bottles$time - time0, units = "secs")
}
for(i in tubes_selected){
tube = paste0("Tube",i)
tp <- table.p[table.p$tube == tube,]
if( dil_check == T){
d <- dilutions[dilutions$tube == tube,]
dc <- 1 #dilutions count
}
if( bc_check == T ){
bc <- 1 #bottle change count
}
Cn <- C0
CS11 <- CS1
CS21 <- CS2
tC <- data.frame(c(),c(),c(),colnames(c("time","Concentration","Bottle")))
ttemp <- data.frame(time = 0,Concentration = Cn, Bottle = CS21) #zero time point for concentration
tC <- rbind(tC,ttemp)
tV <- table.t[table.t$tube == tube,]
if( length(dV.change) > 0 ){ # make change of dV table specific to the current tube
dV.c.t = dV.change[dV.change$tube == tube, ]
dV.c = 1 # counts of dV changes
}
if( dil_check == T && bc_check == T){ #BOTH Dilutions and Bottle changes were present
if(dim(d)[1] > 0 ){
for(j in 1:dim(d)[1]){ #clear jumps at dilution times
tV <- tV[-which(tV$time > d$time[j] & tV$time < d$time[j] + 3600),]
}
}
for(j in 1:dim(tp)[1]){
if(length(d$time) >= dc){ #introduce dilutions
if(tp$time[j] >= d$time[dc]){
Cn <- Cn * (1 - d$dilution[dc]) + d$conc[dc] * d$dilution[dc]
ttemp <- data.frame(as.integer((tp$time[j-1]+tp$time[j])/2),Cn,CS21)
colnames(ttemp) <- c("time","Concentration","Bottle")
tC <- rbind(tC,ttemp)
dc <- dc + 1
}
}
if(length(change.bottles$time) >= bc){ #introduce bottle change
if(tp$time[j] >= change.bottles$time[bc]){
CS11 <- change.bottles$Cp1[bc]
CS21 <- change.bottles$Cp2[bc]
bc <- bc + 1
}
}
if( length(dV.change > 0)){ # introduce change of dV
if( length(dV.c.t$time) >= dV.c ){
if( tp$time[j] >= dV.c.t$time[dV.c] ){
dV = dV.c.t$dV
dV.c = dV.c + 1
}
}
}
if(tp$pump[j] == "Pump1"){ #main concentration changes
Cn <- (Cn*(V - dV)+CS11*dV)/V
}else{
Cn <- (Cn*(V - dV)+CS21*dV)/V
}
tC <- rbind(tC,data.frame(time = tp$time[j], Concentration = Cn, Bottle = CS21))
}
}else if(dil_check == T && bc_check == F){ #Only dilutions wer present
for(j in 1:dim(d)[1]){ #clear jumps at dilution times
tV <- tV[-which(tV$time > d$time[j] & tV$time < d$time[j] + 3600),]
}
for(j in 1:dim(tp)[1]){
if(length(d$time) >= dc){ #introduce dilutions
if(tp$time[j] >= d$time[dc]){
Cn <- Cn * (1 - d$dilution[dc]) + d$conc[dc] * d$dilution[dc] #dilution is (V-dV)/V
ttemp <- data.frame(tp$time[j-1]+1,Cn, CS21)
colnames(ttemp) <- c("time","Concentration","Bottle")
tC <- rbind(tC,ttemp)
dc <- dc + 1
}
}
if( length(dV.change > 0)){ # introduce change of dV
if( length(dV.c.t$time) >= dV.c ){
if( tp$time[j] >= dV.c.t$time[dV.c] ){
dV = dV.c.t$dV
dV.c = dV.c + 1
}
}
}
if(tp$pump[j] == "Pump1"){ #main concentration changes
Cn <- (Cn*(V - dV)+CS11*dV)/V
}else{
Cn <- (Cn*(V - dV)+CS21*dV)/V
}
tC <- rbind(tC,data.frame(time=c(tp$time[j]),Concentration=c(Cn),Bottle=CS21))
}
}else if(dil_check == F && bc_check == T){ #Only bottle changes were present
for(j in 1:dim(tp)[1]){
if(length(change.bottles$time) >= bc){ #introduce bottle change
if(tp$time[j] >= change.bottles$time[bc]){
CS11 <- change.bottles$Cp1[bc]
CS21 <- change.bottles$Cp2[bc]
bc <- bc + 1
}
}
if( length(dV.change > 0)){ # introduce change of dV
if( length(dV.c.t$time) >= dV.c ){
if( tp$time[j] >= dV.c.t$time[dV.c] ){
dV = dV.c.t$dV
dV.c = dV.c + 1
}
}
}
if(tp$pump[j] == "Pump1"){ #main concentration changes
Cn <- (Cn*(V - dV)+CS11*dV)/V
}else{
Cn <- (Cn*(V - dV)+CS21*dV)/V
}
tC <- rbind(tC,data.frame(time=c(tp$time[j]),Concentration=c(Cn),Bottle=CS21))
}
}else{
for(j in 1:dim(tp)[1]){
if( length(dV.change > 0)){ # introduce change of dV
if( length(dV.c.t$time) >= dV.c ){
if( tp$time[j] >= dV.c.t$time[dV.c] ){
dV = dV.c.t$dV
dV.c = dV.c + 1
}
}
}
if(tp$pump[j] == "Pump1"){ #main concentration changes
Cn <- (Cn*(V - dV)+CS11*dV)/V
}else{
Cn <- (Cn*(V - dV)+CS21*dV)/V
}
tC <- rbind(tC,data.frame(time=c(tp$time[j]),Concentration=c(Cn),Bottle=CS21))
}
}
table.names <- names(tables)
tables <- rlist::list.append(tables,x = list(OD = tV,Conc = tC))
names(tables) <- c(table.names,tube)
}
return(tables)
}
|
507ac47d1b660244913be557b3e4926094882f11
|
01de953181aa892b3ef0eec50e95e4b5c158458c
|
/vignettes/grasleppa.R
|
cbe74b4b16062dfcec161f4726dedcf1dfbb248f
|
[] |
no_license
|
einarhjorleifsson/mar
|
c4d18937b7c4b71f99ee5c1bac19f6959a60a0cb
|
41180e6ba575dba9cd605fa86508ec3343d23e09
|
refs/heads/master
| 2021-01-18T21:23:45.755326
| 2019-02-02T12:11:02
| 2019-02-02T12:11:02
| 35,633,987
| 0
| 1
| null | 2020-07-07T13:03:28
| 2015-05-14T20:06:10
|
R
|
UTF-8
|
R
| false
| false
| 404
|
r
|
grasleppa.R
|
## ----message=FALSE-------------------------------------------------------
library(tidyverse)
library(mar)
mar <- connect_mar()
## ----fig.width=7---------------------------------------------------------
afli_grasl(mar) %>%
filter(ar==2015) %>%
collect(n=Inf) %>%
ggplot(aes(lon,lat)) +
geom_polygon(data=geo::bisland) +
geom_jitter(col='red',aes(size=fj_grasl),alpha=0.1)+ coord_map()
|
fd3443e3163f2bfbcc73bf83a12e49eae40421a6
|
9855901b3da9cae4e5532c57c77ee2e03f44e316
|
/K-Means/TeenMarketSegments.R
|
daca7acd62ef29ef3e2706464b4b99b7f3d8e957
|
[] |
no_license
|
Walter-Ullon/Machine-Learning
|
cbfa835682b0a5291ebf78e4365c70043e4394db
|
777e22157a855063fef2f12d9c952eac7729c3fb
|
refs/heads/master
| 2022-11-26T13:32:21.209852
| 2019-09-17T15:05:34
| 2019-09-17T15:05:34
| 97,531,891
| 3
| 0
| null | 2022-11-21T21:27:40
| 2017-07-17T23:43:52
|
Python
|
UTF-8
|
R
| false
| false
| 2,436
|
r
|
TeenMarketSegments.R
|
# Application of "K-Means Clustering" algortihms to Teen Market Segements using social media data.
# Data collected and curated by Brett Lantz (https://raw.githubusercontent.com/dataspelunking)
# Install and load packages.
library(stats)
# Load data. Inspect.
teens <- read.csv(url("https://raw.githubusercontent.com/dataspelunking/MLwR/master/Machine%20Learning%20with%20R%20(2nd%20Ed.)/Chapter%2009/snsdata.csv"))
str(teens)
table(teens$gender, useNA = "ifany") # We have 2724 missing points in the gender column.
summary(teens$age)
# Since the "age summary" presented us with problematic statistics, we will have to clean up the data.
teens$age <- ifelse(teens$age > 13 & teens$age < 20, teens$age, NA)
summary(teens$age)
# Use "imputation" to deal with the missing values in age. Use the mean age to impute said values.
# Specifically, we need the mean for each of the graduation years.
mean(teens$age, na.rm = TRUE)
aggregate(data = teens, age ~ gradyear, mean, na.rm = TRUE)
# Above: "in data = teens, sort age by gradyear, and apply mean to each. Remove NA values".
ave_age <- ave(teens$age, teens$gradyear, FUN = function(x) mean(x, na.rm = TRUE))
# We use an "ifelse" to replace the NA values with the new gradyear age means.
teens$age <- ifelse(is.na(teens$age), ave_age, teens$age)
# Check our work...
summary(teens$age)
# Create "dummy variables" in order to solve the missing values in "gender". Assign "NA" values to "no_gender".
teens$female <- ifelse(teens$gender == "F" & !is.na(teens$gender), 1, 0)
teens$no_gender <- ifelse(is.na(teens$gender), 1, 0)
# Create a data frame containing only the "interests" of this group (columns 5:40).
interests <- teens[5:40]
# Train Model on the data. Use k = 5 (we predict 5 "teen" types).
interests_z <- as.data.frame(lapply(interests, scale))
set.seed(2345)
teens_cluster <- kmeans(interests_z, 5)
# Evaluate Model Performance.
teens_cluster$size
teens_cluster$centers
#--------------- Improve Model Performance -------------------
# The kmeans() function includes a component named "cluster" that contains the cluster assignments
# for each of the 30,000 individuals in the sample. We will add this as a column to "teens" data.
teens$cluster <- teens_cluster$cluster
# Obatin mean proportion of females per cluster.
aggregate(data = teens, female ~ cluster, mean)
# Obatin mean proportion of friends per cluster.
aggregate(data = teens, friends ~ cluster, mean)
|
7ba0d242618eb8732c8f35042adf491d310522a1
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/JSM/R/LambMultGeneric.R
|
ff34866d60c4708285b241ffc767f34ca9b6860f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,074
|
r
|
LambMultGeneric.R
|
#========== Function to Obtain Lamb Given Other Finite Dimensional Parameters ==========#
#=============== Model I for Multiplicative Joint Modeling ===============#
LambMultGeneric <- function (para, lamb.init, tol, iter, ncz, ncb, B.st, n, Y.st, b, model, Btime, Btime2, Index, Ztime, Ztime2, Index0, nknot, nk, Index1, rho, d, wGQ, Index2){
para.list <- Vec2ListMult(para, ncz, ncb)
gamma <- para.list$gamma
phi <- para.list$phi
alpha <- para.list$alpha
Ysigma2 <- (para.list$Ysigma) ^ 2
Bsigma2 <- (para.list$Bsigma) ^ 2
M <- length(Index)
BTg <- lapply(B.st, function(x) as.vector(x %*% gamma))
VY <- lapply(1:n, function(i) calc_VY(BTg[[i]], Bsigma2, Ysigma2))
VB <- lapply(1:n, function(i) calc_VB(M1 = Bsigma2,M2 = BTg[[i]], VY[[i]]))
muB <-lapply(1:n, function(i) calc_muBMult( Bsigma2,VY[[i]],BTg[[i]],Y.st[[i]] )+1 )
bi.st <- lapply(1:n, function(i) calc_bi_st(v0=muB[[i]], b ,M = VB[[i]]) )
bi <- do.call(rbind, bi.st) # n*nknot matrix #
Ztime_phi <- if (ncz > 0) Ztime %*% phi else rep(0, n)
Ztime2_phi <- if (ncz > 0) Ztime2 %*% phi else rep(0, M)
if (model == 1){
Btime.b <- as.vector(Btime %*% gamma) * bi # n*nknot matrix #
Btime2.b <- as.vector(Btime2 %*% gamma) * bi[Index, ] # M*nknot matrix #
eta.h <- as.vector(Ztime_phi) + alpha * Btime.b # n*nknot matrix #
exp.es <- exp( as.vector(Ztime2_phi) + alpha * Btime2.b) # M*nknot matrix #
} else if(model ==2){
eta.h <- as.vector(Ztime_phi) + alpha * bi # n*nknot matrix #
exp.es <- exp(as.vector(Ztime2_phi) + alpha * bi[Index, ]) # M*nknot matrix #
} else {
stop("Invalid model type")
}
lamb.old <- lamb.init
err <- 1
for (step in 1:iter) {
Lamb.old <- cumsum(lamb.old)
log.lamb <- log(lamb.old[Index0])
log.lamb[is.na(log.lamb)] <- 0
log.density1 <- log.lamb + eta.h # n*nknot matrix #
const <- matrix(0, n, nknot) # n*nknot matrix #
const[nk != 0, ] <- calc_rowsum_mult((Index), lamb.old[Index1], exp.es)
log.density2 <- - log(1 + rho * const) # n*nknot matrix #
log.survival <- if(rho > 0) - log(1 + rho * const) / rho else - const # n*nknot matrix #
f.surv <- exp(d * log.density1 + d * log.density2 + log.survival) # n*nknot matrix #
deno <- as.vector(f.surv %*% wGQ) # vector of length n #
Integral <- f.surv / deno # n*nknot matrix #
CondExp <- (1 + d * rho) / (1 + rho * const) # conditional expectation E(xi|bi,Oi), n*GQ matrix #
tempLamb0 <- exp.es; tempLamb0[1] = tempLamb0[1] + 0 # "touch the variable"
calc_M1_M2_M3_Hadamard(tempLamb0, CondExp , Integral, as.integer(Index - 1))
tempLamb <- calc_M_v(v = wGQ, M = tempLamb0)
postLamb <- calc_tapply_vect_sum(v1 = tempLamb, v2 = as.integer(Index1 - 1))
lamb.new <- Index2 / postLamb
Lamb.new <- cumsum(lamb.new)
err <- max(abs((Lamb.new - Lamb.old) / Lamb.old))
if (step > 3 & err < tol) break
lamb.old <- lamb.new
}
converge <- as.numeric(step <= iter & err < tol)
return(list(lamb = lamb.new, converge = converge))
}
|
619079af685eec25f4cf72612d0b6334e34e4ce4
|
d1e1c9b25aebcea37927c08a8f344713562b3e42
|
/man/F1-prentice.test.Rd
|
879ac3f4396d42baadbe36f411dfa8f41f219c07
|
[] |
no_license
|
cran/muStat
|
43783938835cae3e7a5afb5f8285f9b36ec8b07d
|
a77f2af75558f6a558d1044945f6085281655361
|
refs/heads/master
| 2021-01-17T06:33:48.507309
| 2010-09-17T00:00:00
| 2010-09-17T00:00:00
| 17,697,709
| 1
| 1
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 12,858
|
rd
|
F1-prentice.test.Rd
|
\name{prentice.test}
\alias{prentice.test}
\alias{mu.friedman.test}
\alias{mu.kruskal.test}
\alias{mu.wilcox.test}
\encoding{latin1}
\title{Prentice (Friedman/Wilcoxon/Kruskal) Rank Sum Test}
\description{
Performs a generalized Friedman rank sum test with replicated blocked
data or, as special cases, a Kruskal-Wallis rank sum test on data
following a one-way layout or a Wilcoxon rank sum test following
a one-way layout with only two groups.
}
\usage{
prentice.test(y, groups, blocks = NULL,
score = "rank", blkwght = "prentice", condvar = TRUE,
alternative = "two.sided", mu = 0, paired = FALSE,
exact = NULL, correct = FALSE, df = -1, warn = 0, optim = TRUE)
mu.wilcox.test(y, groups, blocks = NULL, score = "rank",
paired = FALSE, exact = TRUE, correct = TRUE, ...)
mu.kruskal.test(y, groups, blocks, \dots )
mu.friedman.test(y, groups, blocks, \dots )
}
\arguments{
\item{y}{
a numeric vector of data values,
\code{NA}s are used to compute block weights with incomplete block
data (see \code{blkwght}), but will be removed with one-way designs.
Blocks with observations in only one group will be removed.
\code{Inf}s are allowed, and are not removed as they are rankable.
Required.
}
\item{groups}{
factor or category object of the same length as \code{y}, giving
the group (treatment) for each corresponding element of \code{y}.
\code{NA}s are allowed and observations with \code{NA} in
\code{groups} will be used for scoring other observations,
treating them as if they were observations in an additional
(fictional) group.
If not a factor or category object, it will be coerced to one.
Required.
}
\item{blocks}{
factor or category object of the same length as \code{y}, giving
the block membership for each corresponding element of \code{y}.
Observations with \code{NA} in \code{blocks} will be removed.
If not a factor or category object, it will be coerced to one.
}
\item{score}{
character or function object, giving the score function to be
used. If \code{NULL}, \code{y} is assumed to already have been
scored, e.g., by marginal likelihood scores (Wittkowski 1992)
or u-scores (Wittkowski 2004) for multivariate data. If a
function, it is applied to the ranks of \code{y} (Lehmann, 1951),
if character,
\itemize{
\item \code{"rank"} or \code{"wilcoxon"} are equivalent
to the default,
\item \code{"normal"} or \code{"vanderwaerden"} are \dots
\item \dots (to be continued)
}
}
\item{blkwght}{
character object indicating the weights to apply to \code{blocks}
depending on the ratio between planned size (including \code{NA}s)
and observed size (excluding \code{NA}s). Options are
\itemize{
\item \code{"prentice"},
\item \code{"klotz"},
\item \code{"skillingsmack"},
\item \code{"rai"}
}
see Wittkowski (1988) and Alvo-Cabilio (2005) for details.
}
\item{condvar}{
if \code{FALSE}, the variance is conditional on
(\dQuote{corrected for})
the observed ties, otherwise, the variance is the expected
variance in the absence of ties, see Wittkowski (1988, 1998)
and Randles (2001) for details.
}
\item{df}{
if \code{-1}, the degrees of freedom are computed from the
observed data, if \code{0}, the degrees of freedom are computed
from the planned number of groups, if \code{>0}, the parameter
value is taken as the degrees of freedom.
}
\item{alternative}{
character string, one of\code{"greater"}, \code{"less"} or
\code{"two.sided"}, indicating the specification of the alternative
hypothesis. For Wilcoxon test only.
}
\item{mu}{
a single number representing the value of the mean or difference
in means specified by the null hypothesis.
}
\item{paired}{
if \code{TRUE}, the Wilcoxon signed rank test is computed.
The default is the Wilcoxon rank sum test.
}
\item{exact}{
if \code{TRUE} the exact distribution for the test statistic
is used to compute the p-value if possible.
}
\item{correct}{
if \code{TRUE} a continuity correction is applied to
the normal approximation for the p-value.
}
\item{warn}{
no warnings will be given if \code{warn} is \code{-1} }
\item{optim}{
if \code{FALSE}, the generic algorithm (see Algorithm) is always
used, for testing and teaching purposes only
if \code{TRUE}, faster algorithms are used when available.
}
\item{\dots}{
further arguments to be passed to or from methods.
}
}
\details{
\code{prentice.test} is approximately twice as fast as
\code{friedman.test} or \code{kruskal.test}. In some cases, the
Kruskal-Wallis test reduces to the Wilcoxon Rank-sum test.
Thus, \code{prentice.test} allows the additional parameters
\code{mu}, \code{paired}, \code{exact}, and \code{correct},
to be entered, and passed. To ensure consistency of the results
between \code{wilcox.test} and \code{kruskal.test},
the default for \code{correct} is \code{FALSE} in either case.
}
\value{
A list with class \code{"htest"} containing the following components:
\item{statistic}{
the value of chi-squared statistic,
with \code{names} attribute \code{"statistic: chi-square"}.
See section DETAILS for a definition.
}
\item{parameter}{
the degrees of freedom of the asymptotic chi-squared distribution
associated with \code{statistic}. Component \code{parameters} has
\code{names} attribute \code{"df"}.
}
\item{p.value}{
the asymptotic or exact p-value of the test.
}
\item{method}{
a character string giving the name of the method used.
}
\item{data.name}{
a character string (vector of length 1) containing the actual names
of the input arguments \code{y}, \code{groups}, and \code{blocks}.
}
}
\section{Null Hypothesis}{
The null hypothesis is that for any two observations chosen randomly
from the same block, the probability that the first is larger than
the second is the same as the probability that it is smaller.
}
\section{Test Assumptions}{
The errors are assumed to be independent and identically distributed.
The returned p.value should be interpreted carefully. It is only a
large-sample approximation whose validity increases with the size of
the smallest of the groups and/or the number of blocks.
}
\section{Algorithm}{
\preformatted{
prentice.test <- function(
y,
groups,
blocks = NULL,
score = "rank", # NULL: y already scored
blkwght = "prentice", # block weights
<...>)
{
<...>
m <- xTable(blocks,groups)
<...>
p <- dim(m)[2]-1 # planned number of groups
y.ok <- <...>
y <- y [y.ok]
groups <- groups[y.ok]
blocks <- blocks[y.ok]
M <- xTable(blocks,groups)
<...>
mi <- rowSums(m)
Mi <- rowSums(M)
Wi <- switch(tolower(blkwght),
prentice = (mi+1),
klotz = (Mi+1),
skillingsmack = sqrt(Mi+1),
rai = (Mi+1)/Mi,
<...>)
Bijk <- Wi[blocks]
Tijk <- Centered(
Score(FUNByIdx(y,blocks,wRank,na.any=FALSE)/(Mi[blocks]+1)),
blocks, Mi) * Bijk
T1 <- qapply(Tijk,groups,sum)
A0i2 <- (1/(Mi-1))*qapply(Tijk^2,blocks,sum)
V0 <- structure(dim=c(P,P), A0i2 \%*\% (
t(apply(M,1, function(x,P) diag(x))) - (1/Mi) *
t(apply(M,1,MC(function(x) outer1(x),list(outer1=outer1))))))
V1 <- ginv(V0)
W <- as.numeric(T1 \%*\% V1 \%*\% T1)
df.W <- attr(V1,"rank")
p.W <- 1 - pchisq(W, df.W)
}
}
}
\references{
Friedman, M. (1937)
\emph{Journal of the American Statistical Association}, \bold{32}: 675-701.
Lehmann, E. L. (1951)
\emph{Annals of Mathematical Statistics}, \bold{22}: 165-179.
Kruskal, W. H. and Wallis, W. A. (1952)
\emph{Journal of the American Statistical Association}, \bold{47}: 583-631.
Hajek, J. and Sidak, Z. (1967)
\emph{Theory of rank tests}, New York, NY: Academic.
Hollander, M. and Wolfe, D. A. (1973).
\emph{Nonparametric Statistical Methods}. New York, NY: John Wiley.
Lehmann, E. L. (1975).
\emph{Nonparametrics: Statistical Methods Based on Ranks}. Oakland, CA: Holden-Day.
Prentice, M. J. (1979)
\emph{Biometrika}, \bold{66}: 167-170.
Wittkowski, K. M. (1988)
\emph{Journal of the American Statistical Association}, \bold{83}: 1163-1170.
Alvo, M. and Cabilio, P. (2005)
\emph{Canadian Journal of Statistics-Revue Canadienne De Statistique}, \bold{33}: 115-129.
Wittkowski, K. M. (1992)
\emph{Journal of the American Statistical Association}, \bold{87}: 258.
Wittkowski, K. M. (1998)
\emph{Biometrics}, \bold{54}: 789¡§C791.
Randles, H. R. (2001)
\emph{The American Statistician}, \bold{55}: 96-101.
Wittkowski, K. M., Lee, E., Nussbaum, R., Chamian, F. N. and Krueger, J. G. (2004)
\emph{Statistics in Medicine}, \bold{23}: 1579-1592.
}
\author{Knut M. Wittkowski \email{kmw@rockefeller.edu}}
\seealso{
\code{\link[stats]{wilcox.test}},
\code{\link[stats]{kruskal.test}},
\code{\link[stats]{friedman.test}},
\code{\link[base]{rank}},
\code{\link[stats]{aov}}
}
\examples{
# friedman.test examples
treatments <- factor(rep(c("Trt1", "Trt2", "Trt3"), each=4))
people <- factor(rep(c("Subj1", "Subj2", "Subj3", "Subj4"), 3))
y <- c(0.73,0.76,0.46,0.85,0.48,0.78,0.87,0.22,0.51,0.03,0.39,0.44)
print( friedman.test(y, treatments, people))
print(mu.friedman.test(y, treatments, people))
# Now suppose the data is in the form of a matrix,
# rows are people and columns are treatments.
# Generate 'ymat' and the factor objects:
ymat <- matrix(c(0.73,0.76,0.46,0.85,0.48,0.78,0.87,0.22,0.51,
0.03,0.39,0.44), ncol=3)
bl <- factor(as.vector(row(ymat)))
gr <- factor(as.vector(col(ymat)))
print( friedman.test(ymat, gr, bl)) # same answer as above
print(mu.friedman.test(ymat, gr, bl))
# kruskal.test examples
# Data from Hollander and Wolfe (1973), p. 116
holl.y <- c(2.9,3.0,2.5,2.6,3.2,3.8,2.7,4.0,2.4,2.8,3.4,3.7,2.2,2.0)
holl.grps <- factor(c(1,1,1,1,1,2,2,2,2,3,3,3,3,3),
labels=c("Normal Subjects","Obstr. Airway Disease","Asbestosis"))
print( kruskal.test(holl.y, holl.grps))
print(mu.kruskal.test(holl.y, holl.grps))
# Now suppose the data is in the form of a table already,
# with groups in columns; note this implies that group
# sizes are the same.
tab.data <- matrix(c(.38,.58,.15,.72,.09,.66,.52,.02,.59,.94,
.24,.94,.08,.97,.47,.92,.59,.77), ncol=3)
tab.data
y2 <- as.vector(tab.data)
gr <- factor(as.vector(col(tab.data))) # Groups are columns
print( kruskal.test(y2, gr))
print(mu.kruskal.test(y2, gr))
# wilcox.test examples
x <- c(8.2, 9.4, 9.6, 9.7, 10.0, 14.5, 15.2, 16.1, 17.6, 21.5)
y <- c(4.2, 5.2, 5.8, 6.4, 7.0, 7.3, 10.1, 11.2, 11.3, 11.5)
print( wilcox.test(x,y))
print(mu.wilcox.test(x,y))
print( wilcox.test(x,y, exact=FALSE))
print(mu.wilcox.test(x,y, exact=FALSE))
print( wilcox.test(x,y, exact=FALSE, correct=FALSE))
print(mu.wilcox.test(x,y, exact=FALSE, correct=FALSE))
xy <- c(x,y)
groups <- c(rep(1,length(x)),rep(2,length(y)))
print(prentice.test(xy,groups,exact=FALSE, correct=FALSE))
# compare speed
if (is.R()) sys.time <- function (...) system.time(...)
n <- 1000
data <- runif(30*n)
grps <- c(rep(1,10*n),rep(2,8*n),rep(3,12*n))
print(sys.time( kruskal.test( data,grps) ))
print(sys.time( mu.kruskal.test( data,grps,optim=FALSE) ))
print(sys.time( prentice.test(data,grps) ))
data <- runif(600)
grps <- rep(1:6,each=100)
blks <- rep(1:100,length.out=length(data))
print(sys.time( friedman.test(data,grps,blks) ))
print(sys.time( mu.friedman.test(data,grps,blks,optim=FALSE) ))
print(sys.time( prentice.test(data,grps,blks) ))
data <- runif(50000)
grps <- rep(1:2,each=25000)
Wx <- data[grps==1]
Wy <- data[grps==2]
print(sys.time( wilcox.test(Wx,Wy) ))
print(sys.time( mu.wilcox.test(Wx,Wy,optim=FALSE) ))
print(sys.time( prentice.test(data,grps) ))
}
\keyword{htest}
\keyword{multivariate}
\keyword{nonparametric}
|
25edcffa1186018173fd382d0d9a3f8e54eeeca9
|
16b68dbf022ed548baa7fe4d4d45f6536a8cb290
|
/Cancer-Data-Impute.R
|
13e9c69bab00a0defc485203eb172163f1ed6b0b
|
[] |
no_license
|
ckas-fh/cancer-dataimpute
|
ff314dfdf9902d5f0135424f79b1d7bd9e5ca921
|
c2c73b8c97fe1cce5ed6f1c6edbe21d1ad9c45ca
|
refs/heads/master
| 2020-06-21T11:16:16.699943
| 2019-07-17T17:25:16
| 2019-07-17T17:25:16
| 197,433,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,372
|
r
|
Cancer-Data-Impute.R
|
#######################
#Caroline Cancer Data Cleaning/Imputation
# Feb 26, 2019
########################
#1. loading relevant packages
library(dplyr)
library(mice)
library(naniar)
#2. loading dataframe and setting working directory
setwd("Desktop")
cancer_data <- read.csv("Caroline-Dataset.csv")
#3. general data cleaning
#changing BonyLesions to be more readable
cancer_data <- cancer_data %>%
mutate(BonyLesions=recode(BonyLesions, ">3"="3"))
#changing ClinStages data and column name to be more readable
cancer_data <- cancer_data %>%
mutate(ClinStageS=recode(ClinStageS, "I"="1", "II"="2","III"="3"))%>%
rename(ClinStages=ClinStageS)
#4. observing how many missing values in overall data frame
cancer_data[cancer_data == ""] <- NA
print(cancer_data)
sum(is.na(cancer_data))
md.pattern(cancer_data)
#5. observing how many missing values in each column
colSums(is.na(cancer_data))
#tells us that YearofDx,SurvivalDays,AgeAtDx have missing values
#6. imputing missing values with MICE
imputed_Data <- mice(cancer_data, m=5, maxit = 20, method = 'pmm', seed = NA)
summary(imputed_Data)
#7. observing imputed values
imputed_Data$imp$YearOfDx
imputed_Data$imp$AgeAtDx
imputed_Data$imp$SurvivalDays
#8. completing dataset with imputed values
complete_Data <- complete(imputed_Data,2)
#9. checking that dataset has imputed values
sum(is.na(complete_Data))
|
a0f58a13b72913115bf5b0ebe37a944a726b9d57
|
c08af96cf3635e1b4c881e06fbec6bbeb6e431f3
|
/plot3.R
|
6d8405229265e438d08eb5db8a2871aca3b54729
|
[] |
no_license
|
Cruzzor/ExData_Plotting1
|
862fa012ea3ecd4fc57d8eb37194c071ce0989d3
|
56cb65a3515e0bc9ad8cd0498dcd164493ab3a34
|
refs/heads/master
| 2021-01-18T10:01:38.572013
| 2015-09-13T19:27:40
| 2015-09-13T19:27:40
| 42,227,187
| 0
| 0
| null | 2015-09-10T06:38:48
| 2015-09-10T06:38:47
| null |
UTF-8
|
R
| false
| false
| 1,450
|
r
|
plot3.R
|
library(dplyr)
library(tidyr)
## Read/filter data
data <- read.csv("household_power_consumption.txt", stringsAsFactors = FALSE, header = TRUE, sep = ";")
mydata <- data %>% filter(Date == "1/2/2007" | Date == "2/2/2007")
##change locale to english for proper x-axis labeling
original_locale <- Sys.getlocale(category = "LC_TIME")
Sys.setlocale(category = "LC_TIME", locale = "English")
## Convert columns
mydata$Time <- strptime(paste(mydata$Date, mydata$Time), "%d/%m/%Y %H:%M:%S")
mydata$Sub_metering_1 <- as.numeric(mydata$Sub_metering_1)
mydata$Sub_metering_2 <- as.numeric(mydata$Sub_metering_2)
mydata$Sub_metering_3 <- as.numeric(mydata$Sub_metering_3)
## Open png device for plotting
png(file = "plot3.png", width = 480, height = 480)
## Init Parameters
par(col = "black", mfrow = c(1, 1))
## Create empty diagram
plot(mydata$Time, mydata$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1, 1, 1))
## Draw black graph
lines(mydata$Time, mydata$Sub_metering_1)
## Draw red graph
par(col = "red")
lines(mydata$Time, mydata$Sub_metering_2)
## Draw blue graph
par(col = "blue")
lines(mydata$Time, mydata$Sub_metering_3)
#change locale back to the original setting
Sys.setlocale(category = "LC_TIME", locale = original_locale)
## Close png device
dev.off()
|
d31c5156c5658026656b101770621730fc844fe5
|
a9cff4d70b2659ab336290a5ab9a8d20c1a72f7c
|
/cachematrix.R
|
1005c73e786119d5005724d036740b76c92f52e1
|
[] |
no_license
|
info-business-devel/ProgrammingAssignment2
|
81c8461d052b9e92ffc7c2e27413d1bb7a57beb5
|
df7b10abffbc00587b35890d00c53cdc9fc9033c
|
refs/heads/master
| 2020-03-08T03:17:15.204407
| 2018-04-03T10:06:13
| 2018-04-03T10:06:13
| 127,798,085
| 0
| 0
| null | 2018-04-02T18:47:45
| 2018-04-02T18:47:45
| null |
UTF-8
|
R
| false
| false
| 4,687
|
r
|
cachematrix.R
|
# cachematrix.R
## Function: cacheSolve <- function(x, ...)
### 'x' is a matrix.
### This function asks function makeCacheMatrix whether or not it has 'x' in cache.
### If makeCacheMatrix has 'x' in cache that means there is no need to create its inverse matrix
### because this is already in cache (both are in cache: 'x' and its inverse).
### If 'x' is not in cache, the function cacheSolve has to create its inverse and call
### function makeCacheMatrix to put both of them ('x' and its inverse) in cache.
### The regular operation of this function is (example):
### matrix_1 <- matrix(c(2, 4, 6, 8), nrow = 2, ncol = 2)
### cacheSolve(makeCacheMatrix(matrix_1))
### matrix_1 is not in cache.
### cacheSolve asks makeCacheMatrix if matrix_1 is in cache (not, it isn't).
### cacheSolve creates its inverse and call makeCacheMatrix to put both in cache.
### Now matrix_1 and its inverse are both in cache.
### matrix_2 <- matrix(c(2, 4, 6, 8), nrow = 2, ncol = 2)
### cacheSolve(makeCacheMatrix(matrix_2))
### matrix_2 is identical to matrix_1.
### cacheSolve asks makeCacheMatrix if matrix_2 is in cache (yes, it's).
### There is no need to create matrix_2 inverse just retrieve it from cache.
### cacheSolve calls makeCacheMatrix and retrieves matrix_1 inverse.
### cacheSolve returns matrix_1 inverse (identical to matrix_2 inverse).
## Function: makeCacheMatrix <- function(x = matrix())
### This function just waits to be called by function cacheSolve.
### It has got 5 'methods':
#### get_data: It returns 'x', the argument of the function (in previous example: matrix_1 or matrix_2).
#### set_initial_matrix: It puts the initial matrix in cache.
#### get_initial_matrix: It gets the initial matrix from cache.
#### set_inverse_matrix: It puts the inverse matrix in cache.
#### get_inverse_matrix: It gets the inverse matrix from cache.
makeCacheMatrix <- function(x = matrix()) {
# This function puts and gets matrices in/from cache
get_data <- function() {
return(x) # It just returns the matrix argument of the function
}
set_initial_matrix <- function(matrix_temp) {
initial_matrix <<- matrix_temp # It puts the initial matrix in cache
}
get_initial_matrix <- function() {
return(initial_matrix) # It gets the initial matrix from cache
}
set_inverse_matrix <- function(matrix_temp) {
inverse_matrix <<- matrix_temp # It puts the inverse matrix in cache
}
get_inverse_matrix <- function() {
return(inverse_matrix) # It gets the inverse matrix from cache
}
list(
get_data = get_data,
set_initial_matrix = set_initial_matrix,
get_initial_matrix = get_initial_matrix,
set_inverse_matrix = set_inverse_matrix,
get_inverse_matrix = get_inverse_matrix)
}
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
# If 'x' is in cache, it just gets its inverse by calling makeCacheMatrix
# If 'x' is not in cache, it creates its inverse and caches both by calling makeCacheMatrix
# It gets the initial matrix if cache (if there is any)
initial_matrix_in_cache <- x$get_initial_matrix()
# It gets the 'x' matrix in the function by calling makeCacheMatrix
x_matrix <- x$get_data()
if(!is.null(initial_matrix_in_cache))
{
if (identical(initial_matrix_in_cache, x_matrix)) {
# There was an identical matrix in cache and returns the inverse from cache
message("The matrix is in cache. Getting cached inverse matrix.")
return(x$get_inverse_matrix())
}
}
# There wasn't an identical matrix in cache so it has to create its inverse
inverse_matrix <- solve(x_matrix, ...)
# It puts the initial matrix in cache
x$set_initial_matrix(x_matrix)
# It puts the inverse matrix in cache
x$set_inverse_matrix(inverse_matrix)
# It returns the inverse of 'x'
return(inverse_matrix)
}
# Firs declaration of initial_matrix ('x' matriz in cache)
initial_matrix <- NULL
# Try it by removing '#' before the following lines:
## matrix_1 <- matrix(c(2, 4, 6, 8), nrow = 2, ncol = 2)
## cacheSolve(makeCacheMatrix(matrix_1))
## matrix_2 <- matrix(c(2, 4, 6, 8), nrow = 2, ncol = 2)
## cacheSolve(makeCacheMatrix(matrix_2))
|
e88c027041ef120b23518d9a8527db5b4dc0973c
|
f640b73ca47aad41a982d882a9e7c5c691d7fb23
|
/man/as_tibble.Rd
|
2a3199721f40448b415137cdc3065254a9854ca3
|
[
"Apache-2.0"
] |
permissive
|
ccb2n19/sfnetworks
|
dcd16a14af4a2a46455448d408e6763569e829cc
|
c06418060ec0f9df1b378e60b7903ce7dbee44f8
|
refs/heads/master
| 2023-03-03T01:26:15.046876
| 2021-02-08T11:21:01
| 2021-02-08T11:21:01
| 339,112,861
| 0
| 0
|
Apache-2.0
| 2021-02-18T09:14:34
| 2021-02-15T15:01:48
| null |
UTF-8
|
R
| false
| true
| 1,887
|
rd
|
as_tibble.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tibble.R
\name{as_tibble}
\alias{as_tibble}
\alias{as_tibble.sfnetwork}
\title{Extract the active element of a sfnetwork as spatial tibble}
\usage{
\method{as_tibble}{sfnetwork}(x, active = NULL, spatial = TRUE, ...)
}
\arguments{
\item{x}{An object of class \code{\link{sfnetwork}}.}
\item{active}{Which network element (i.e. nodes or edges) to activate before
extracting. If \code{NULL}, it will be set to the current active element of
the given network. Defaults to \code{NULL}.}
\item{spatial}{Should the extracted tibble be a 'spatial tibble', i.e. an
object of class \code{c('sf', 'tbl_df')}, if it contains a geometry list
column. Defaults to \code{TRUE}.}
\item{...}{Arguments passed on to \code{\link[tibble]{as_tibble}}.}
}
\value{
The active element of the network as an object of class
\code{\link[tibble]{tibble}}.
}
\description{
The sfnetwork method for \code{\link[tibble]{as_tibble}} is conceptually
different. Whenever a geometry list column is present, it will by default
return what we call a 'spatial tibble'. With that we mean an object of
class \code{c('sf', 'tbl_df')} instead of an object of class
\code{'tbl_df'}. This little conceptual trick is essential for how
tidyverse functions handle \code{\link{sfnetwork}} objects, i.e. always
using the corresponding \code{\link[sf]{sf}} method if present. When using
\code{\link[tibble]{as_tibble}} on \code{\link{sfnetwork}} objects directly
as a user, you can disable this behaviour by setting \code{spatial = FALSE}.
}
\examples{
library(tibble, quietly = TRUE)
net = as_sfnetwork(roxel)
# Extract the active network element as a spatial tibble.
as_tibble(net)
# Extract any network element as a spatial tibble.
as_tibble(net, "edges")
# Extract the active network element as a regular tibble.
as_tibble(net, spatial = FALSE)
}
|
f7a069d7dec0edd58cf56f65b0361810ba49daf1
|
17f1b5b761a43ec178602a43f24ac72c2d5d01a9
|
/hmlasso/inst/testfiles/softThresholdC/libFuzzer_softThresholdC/softThresholdC_valgrind_files/1609897578-test.R
|
806868709747c152f33b355c55ac9da4e3aa6065
|
[] |
no_license
|
akhikolla/newtestfiles-2
|
3e1882e7eea3091f45003c3abb3e55bc9c2f8f56
|
e539420696b7fdc05ce9bad66b5c7564c5b4dab2
|
refs/heads/master
| 2023-03-30T14:44:30.614977
| 2021-04-11T23:21:23
| 2021-04-11T23:21:23
| 356,957,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 134
|
r
|
1609897578-test.R
|
testlist <- list(g = 2.1644539979134e+233, z = 1.32515051110005e-105)
result <- do.call(hmlasso:::softThresholdC,testlist)
str(result)
|
7eeada0c0e05f86836a91512a4055543a66fc3ad
|
4d3672136d43264176fe42ea42196f113532138d
|
/man/Firstchi.Rd
|
aa0661348f4acbe00af7e92f668502a7d57a6123
|
[] |
no_license
|
alanarnholt/BSDA
|
43c851749a402c6fe73213c31d42c26fa968303e
|
2098ae86a552d69e4af0287c8b1828f7fa0ee325
|
refs/heads/master
| 2022-06-10T10:52:15.879117
| 2022-05-14T23:58:15
| 2022-05-14T23:58:15
| 52,566,969
| 5
| 13
| null | 2017-07-27T02:06:33
| 2016-02-26T00:28:07
|
R
|
UTF-8
|
R
| false
| true
| 588
|
rd
|
Firstchi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Firstchi}
\alias{Firstchi}
\title{Ages of women at the birth of their first child}
\format{
A data frame/tibble with 87 observations on one variable
\describe{
\item{age}{age of woman at birth of her first child}
}
}
\usage{
Firstchi
}
\description{
Data for Exercise 5.11
}
\examples{
EDA(Firstchi$age)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
|
a46abee26b2ed85d3e72985a8319e5abaf00ec51
|
f76ff063f424701662126563603ea2114368e0aa
|
/cachematrix.R
|
478126b1ec32ef9933716700f70c46445531e669
|
[] |
no_license
|
SayanPal/ProgrammingAssignment2
|
ed237c083bdd4ae66cffe594140087418b39ca03
|
10c2048dca6043f42b9caad6574c0163ee8ec8e6
|
refs/heads/master
| 2020-04-01T13:58:58.577206
| 2015-05-21T16:55:03
| 2015-05-21T16:55:03
| 35,992,985
| 0
| 0
| null | 2015-05-21T05:31:30
| 2015-05-21T05:31:29
| null |
UTF-8
|
R
| false
| false
| 2,059
|
r
|
cachematrix.R
|
## This file contains two functions makeCacheMatrix() and cacheSolve.
## makeCacheMatrix makes a 'special' type of matrix object that caches it inverse.
## cacheSolve checks whether the inverse already computed or not and return the
## value accordingly.
## Assumption: The matrix supplied is always invertible.
## makeCacheMatrix: creates a special "matrix" object that can cache its
## inverse.
## x: matrix object, default value is matrix().
## inverse of x is intended to be computed.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(newinverse) inverse <<- newinverse
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: computes the inverse of the special "matrix" returned by
## makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
mat <- x$get()
inverse <- solve(mat, ...)
x$setinverse(inverse)
inverse
}
## External Sources: referred following links for sample invertible matrices:
## http://www.sosmath.com/matrix/matinv/matinv.html
## http://www.purplemath.com/modules/mtrxinvr2.htm
## http://www.mathwords.com/i/inverse_of_a_matrix.htm
## Sample usage:
## 1. splmat <- makeCacheMatrix(matrix(c(2,2,3,2),2,2))
## cacheSolve(splmat)
## 2. splmat <- makeCacheMatrix(matrix(c(1,-1,1,2),2,2))
## cacheSolve(splmat)
## Valid output of these above two call can be verified from the above said link.
|
ee07137fb92ec378750e93339e92ddb81bb667d7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Dowd/examples/tVaRDFPerc.Rd.R
|
343af6674f8f0a78756b7fe21574895c70e3922b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 468
|
r
|
tVaRDFPerc.Rd.R
|
library(Dowd)
### Name: tVaRDFPerc
### Title: Percentiles of VaR distribution function
### Aliases: tVaRDFPerc
### ** Examples
# Estimates Percentiles of VaR distribution
data <- runif(5, min = 0, max = .2)
tVaRDFPerc(returns = data, perc = .7,
df = 6, cl = .95, hp = 60)
# Computes v given mean and standard deviation of return data
tVaRDFPerc(mu = .012, sigma = .03, n= 10,
perc = .8, df = 6, cl = .99, hp = 40)
|
a028bdebbbc6df95eb6eb6a6539826cd7015e42d
|
0ddcd55cbfed8282a8f43ae141d6f3f5b9acb0b7
|
/best.R
|
1ff09a89a327771886dade0a3c54b9fcaf32f380
|
[] |
no_license
|
kristenphan/HospitalPerformanceAnalysis
|
e267b04ebfb1e6cdc3a2e185eab4c9d038e1da92
|
798890f4b3d0599aa2a31b54f42bbd00a65e12d1
|
refs/heads/master
| 2022-07-05T10:07:47.655848
| 2020-05-14T09:34:40
| 2020-05-14T09:34:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,069
|
r
|
best.R
|
## this program best.R houses the main function best.R which determines the best hospital in a specified
## state in terms of the 30-day mortality rate of a specified outcome (i.e., heart attack, heart failure,
## or pneumonia)
## best.R is supported by 4 functions:
## (1) read_data(): read in the dataset containing data on 4000 major U.S. hospitals on various metrics including 30-day mortality rate for heart attack, heart failure, and pneumonia
## (2) validateState(): ensure the passed in state is valid
## (3) validateOutcome(): ensure the passed in outcome is valid
## (4) findBestHospital(): return the best hospital in the state for the outcome
## the function reads in the outcome CSV file from the passed in filepath
## and returns a data frame with complete records for the specified state and outcome
readData <- function(file_path, state, outcome, outcomes) {
data <- read.csv(file_path, na.strings= "Not Available", stringsAsFactors=FALSE)
# retain only 3 columns: hospital name (col 2), state (col 7), mortality rate the specified outcome
data_by_outcome <- data[,c(2, 7, outcomes[outcome])]
names(data_by_outcome) <- c("hospital", "state", "rate")
# filter out data for the specified state
data_by_outcome_and_state <- subset(data_by_outcome, state == "TX", select = names(data_by_outcome))
# filter out "Not Available" data in "rate" column
final_df <- data_by_outcome_and_state[complete.cases(data_by_outcome_and_state), ]
final_df
}
## this function checks if the state is valid and stops the execution if it is not
validateState <- function(state,state_file_path) {
# read a csv file that contains abbreviations of 50 U.S. States and Washington DC (DC)
states <- read.csv(state_file_path)
# if the state is valid
if (!(state %in% states$Postal.Code)) {
stop("Invalid state!")
}
}
## this function checks that outcome is valid
## the outcomes can be one of "heart attack", "heart failure", or "pneumonia"
## this function stops the execution if outcome is invalid
validateOutcome <- function(outcome, possible_outcomes) {
if (!(outcome %in% names(outcomes))) {
stop("Invalid outcome!")
}
}
## this function returns the best hospital in the state with the lowest mortality rate for the specified outcome
findBestHospital <- function(data) {
min_fatality <- Inf
hospitals <- c()
for(i in 1:nrow(data)) {
fatality_rate <- data$rate[i]
hospital <- data$hospital[i]
if (fatality_rate < min_fatality) {
min_fatality <- fatality_rate
hospitals <- hospital
}
if (fatality_rate == min_fatality) {
hospitals <- c(hospitals,hospital)
}
}
## sort the hospital names and return the top one in lexicographical order
hospitals <- sort(hospitals)
hospitals[1]
}
## this function reads the outcome-of-care-measures.csv file and returns a character vector with the name of the hospital that has the best (i.e. lowest) 30-day mortality for the specifed outcome in that state.
## If there is a tie for the best hospital for a given outcome, then the hospital names should be sorted in alphabetical order and the first hospital in that set should be chosen (i.e. if hospitals "b" and "c" are tied for best, then hospital "b" should be returned)
best <- function(state, outcome) {
## Check that state and outcome are valid
## there are 3 outcomes in scope for this analysis: heart attack, heart failure, pneumonia
outcomes <- c("heart attack" = 11, "heart failure"=17, "pneumonia"=23)
validateOutcome(outcome,outcomes)
state_file_path <- "states.csv"
validateState(state,state_file_path)
## Read outcome data for hospitals in the state, except ones with no data on the outcome
outcome_file_path <- "outcome-of-care-measures.csv"
data <- readData(outcome_file_path, state, outcome, outcomes)
## return hospital name in that state with lowest 30-day death rate
best_hospital <- findBestHospital(data)
best_hospital
}
|
6cb60699223fa95d8b2e6339d234dcd25b0c5467
|
d73d1fffc2c69ed18638262380186db28ef129c8
|
/man/loopall_fun.Rd
|
bad1431f382d3e15feb74203b713e77cae7112c5
|
[] |
no_license
|
estalenberg/ptrm
|
7f48a545e679fcefcddaf7009c8f7304e21883bf
|
262755ead3ee8b6e0900775134ac401e799ddc4c
|
refs/heads/master
| 2020-04-30T12:26:35.587724
| 2019-07-31T09:22:56
| 2019-07-31T09:22:56
| 176,826,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,611
|
rd
|
loopall_fun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loopall_fun.R
\name{loopall_fun}
\alias{loopall_fun}
\title{Energy volume functions}
\usage{
loopall_fun(assets.df, other.df, iab.df, priceprof.df, projyearend.in,
age.in, retireslim.in, addnew.in, productivity.in, dnsp.in, rba.in,
use.in, cust.in, cars.in, ogrid.in, solar.in, batt.in, signal.in)
}
\arguments{
\item{assets.df}{database of static dnsp inputs for all assets}
\item{other.df}{database of static dnsp inputs}
\item{iab.df}{database of static dnsp inputs for iab and tax iab}
\item{priceprof.df}{dataframe of static dnsp price profile inputs}
\item{projyearend.in}{dynamic variable of final year}
\item{age.in}{dynamic variable of age of assets}
\item{retireslim.in}{dynamic variable of slim assets}
\item{addnew.in}{dynamic variable of add new assets}
\item{productivity.in}{dynamic variable of productivity}
\item{dnsp.in}{DNSP selected for function loop}
\item{rba.in}{dynamic variable of RBA cash rate}
\item{use.in}{dynamic variable of growth of energy use per customer}
\item{cust.in}{dynamic variable of growth of customers percent}
\item{cars.in}{dynamic variable for electric vehicle penetration by 2060 as a percent}
\item{ogrid.in}{dynamic variable of offgrid customers by 2060 percent}
\item{solar.in}{dynamic variable of percent of solar penetration by 2060}
\item{batt.in}{dynamic variable of percent of battery penetration by 2060}
\item{signal.in}{dynamic variable of price signal text input}
}
\description{
Formula for calculating the energy volume distribution among customers
}
|
86993aa5ca025cf1c67893cf742f771539df6dbc
|
11a39c775c4fa4d64aaf1a1eb41a75060c55d80c
|
/ui.R
|
d03ba93b823391781ca330782493990e2ab453ba
|
[] |
no_license
|
mramalin1996/Airbnb_shinyapp
|
92b9ec5085c4d00bd0b4f02751ca0b9b94a98d31
|
58699792edf47d2fdaedf20da411bea504ef077c
|
refs/heads/master
| 2021-07-18T09:05:42.033454
| 2017-10-25T17:59:20
| 2017-10-25T17:59:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,441
|
r
|
ui.R
|
library(shinythemes)
shinyUI(
navbarPage(title = "Airbnb Visualization",
id ="nav",
theme = shinytheme("united"), #https://rstudio.github.io/shinythemes/
##### Overview ##########
tabPanel("Overview",
br(),
br(),
br(),
#img(src = "airbnb_overview.jpg", height = 600, weight =700, align="center")
#use Shiny’s HTML tag functions to center the image
#https://stackoverflow.com/questions/34663099/how-to-center-an-image-in-a-shiny-app
HTML('<center><img src="airbnb_overview.jpg", height = 600, weight =700 ></center>')
),
##### Map ##########
tabPanel("NYC map",
div(class="outer",
tags$head(#customized CSS
includeCSS("styles.css")),
leafletOutput(outputId = "map", width = "100%", height = "100%"),
# Panel options: borough, Room Type, Price, Rating, Reviews
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE,
top = 80, left = "auto", right = 20, bottom = "auto",
width = 320, height = "auto",
h2("Airbnb in NYC"),
checkboxGroupInput(inputId = "select_boro", label = h4("Borough"),
choices = boro, selected = 'Manhattan'),
checkboxGroupInput(inputId = "select_room", label = h4("Room Type"),
choices = room, selected = room),
sliderInput(inputId = "slider_price", label = h4("Price"), min = 1, max = 300, step = 20,
pre = "$", sep = ",", value = c(30, 300)),
sliderInput(inputId = "slider_rating", label = h4("Rating Score"), min = 20, max = 100, step = 10,
value = c(60, 100)),
sliderInput(inputId = "slider_review", label = h4("Number of Reviews"), min = 10, max = 450, step = 50,
value = c(10, 450)),
h6("The map information is based on May 02, 2017 dataset from"),
h6(a("Inside Airbnb", href = "http://insideairbnb.com/get-the-data.html", target="_blank"))
),
# Results: count_room, avgprice
absolutePanel(id = "controls", class = "panel panel-default", fixed = FALSE, draggable = TRUE,
top = 320, left = 20, right = "auto" , bottom = "auto",
width = 320, height = "auto",
plotlyOutput(outputId = "count_room",height = 150),
plotlyOutput(outputId = "avgprice", height = 150))
)),
##### Listings ##########
tabPanel("Listings, Boroughs and Price Changes",
fluidRow(
column(3,
h3("Listings by Boroughs and Room Type"),
br(),
br(),
sliderInput(inputId = "tab2_price", h4("Price/Night"), min = 10, max = 500, value = c(10, 500)),
sliderInput(inputId = "tab2_rating", h4("Rating Score"), min = 10, max = 100, value = c(10,100)),
br(),
br(),
h3("Price Changes over Time"),
selectInput("price_option", label = h3("Select Time Type"),
choices = list("Year" = "Year","Month" = "Month"), selected = "Year")
),
column(9,
h3(""),
plotlyOutput(outputId = "graph1", width=1000, height =350),
br(),
plotlyOutput(outputId = "tab_price", width=1000, height =350)
)
)
),
##### References ##########
navbarMenu("References",
tabPanel("Inside Airbnb",
h3("Inside Airbnb", a("Link", href="http://insideairbnb.com/get-the-data.html"))),
tabPanel("Airbnb Business Model",
h3("Airbnb Business Model", a("Link", href="http://bmtoolbox.net/stories/airbnb/")))
)
#imbed html https://stackoverflow.com/questions/17847764/put-a-html-link-to-the-r-shiny-application
#imbed pdf https://gist.github.com/aagarw30/d5aa49864674aaf74951
#imbed web https://stackoverflow.com/questions/33020558/embed-iframe-inside-shiny-app
))
|
89d2f6f290eefafcc1d681dfd1e8a3c70f8b005b
|
8ccbc8d238b98866dcf504fb366c99459f61aea5
|
/plot3.R
|
df2a4527780920cdac28820180426d44f4ce69cc
|
[] |
no_license
|
alfysamuel/ExData-Plotting1
|
2a6395886a10dee78df3c16ce1921e1f700733d6
|
d81038cb647d4bf4b05371887ee2d1fb921f721f
|
refs/heads/master
| 2021-01-20T21:49:08.422419
| 2015-02-08T20:10:49
| 2015-02-08T20:10:49
| 30,498,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
plot3.R
|
if (!"load_data.R" %in% list.files()) {
setwd("C:/Users/Alfy/Desktop/Coursera/Exploratory Data Analysis/")
}
source("load_data.R")
png(filename="plot3.png")
legendcols = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(x=data$DateTime,
y=data$Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_2, type="l", col="red")
lines(data$DateTime, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, lwd=1, col=c("black","blue","red"), legend=legendcols)
dev.off()
|
ad9d46764a908a7e1344285492477de9c4f39a27
|
4f156c03403ebc1e14df59e09389b0ccb51c16fa
|
/ui.R
|
6e0459dae939291febe829a019334dd9fd194433
|
[] |
no_license
|
antonymaina12/ProcurementApp
|
6cc4df0e4ce03ce211034bb81cc64b94eee0b688
|
2192d7d7798ddbc4e252727c0f3a36a9cbfdd9be
|
refs/heads/master
| 2021-09-09T11:23:28.804212
| 2018-03-15T15:20:48
| 2018-03-15T15:20:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,993
|
r
|
ui.R
|
# ui
library(shiny)
library(shinydashboard)
# Some data imports:
memberlist <- read.csv("data/memberlist.csv", stringsAsFactors=FALSE)[[1]]
memberlist <- append(memberlist, "All countries", after = 0)
choices <- setNames(memberlist, memberlist)
# User Interface:
ui <- dashboardPage(
dashboardHeader(title = "Procurement requests*"),
dashboardSidebar(disable = TRUE),
dashboardBody(
fluidRow(
br(),
" *: The data used here is heavily obfuscated and randomized and does not resemble real UN data.
This project is merely a learning project for Shiny Apps",
br()
),
fluidRow(
shinydashboard::box(
title = "Please select input (1)", width = 4,
background = "light-blue",
selectInput("partition", "Select UN or UNOPS:",
choices = list("UNOPS" = "unops", "UN" = "un"),
selected = "un", multiple = FALSE)),
shinydashboard::box(
title = "Please select input (2)", width = 4,
background = "light-blue",
selectInput("ctryID", "Select a country:",
choices = choices,
selected = "Afghanistan", multiple = FALSE)
)
),
fluidRow(
shinydashboard::box(
title = "Quick stats", width = 3,
background = "light-blue",
"Total procurement by UN/UNOPS in this country:",
verbatimTextOutput("totalsproc"),
"Country's share in total UN/UNOPS procurement:",
verbatimTextOutput("ctrysharetot")
),
shinydashboard::box(
title = "Supplier pie chart", width = 5,
status = "primary", solidHeader = TRUE,
imageOutput("section2", height=350),
downloadButton("dl_suppie", "Download", class = "butt")
)
),
fluidRow(
shinydashboard::box(
title = "Total procurement - plot", width = 5,
status = "primary", solidHeader = TRUE,
imageOutput("section1", height=300),
downloadButton("dl_procbar", "Download", class = "butt"),
tags$head(
tags$style(".butt{background-color:#518ecb;}
.butt{color: white;} .butt{font-family:Verdana;}"))
),
shinydashboard::box(
title = "Total procurement - raw", width = 3,
status = "primary", solidHeader = TRUE,
br(),
tableOutput('totproc_table'),
br(), br(),
downloadButton("dl_totproc_raw", "Download", class = "butt")
)
),
fluidRow(
shinydashboard::box(
title = "Category distributions", width = 8,
status = "primary", solidHeader = TRUE,
imageOutput("gs_breakdown", height=400),
br(),
downloadButton("dl_gs_breakdown", "Download", class = "butt")
)
),
fluidRow(
shinydashboard::box(
title = "Procurement by agency - plot", width = 5,
status = "primary", solidHeader = TRUE,
imageOutput("sharebar", height = 300),
br(),
downloadButton("dl_sharebar", "Download", class = "butt"),
strong("Only available without agency filter"),
br()
),
shinydashboard::box(
title = "Procurement by agency - raw", width = 3,
status = "primary", solidHeader = TRUE,
br(),
tableOutput("sharetable"),
br(),
downloadButton("dl_sharetab", "Download", class = "butt")
)
)
)
)
# End of UI
|
bf8941924f93360f549b58350524b782db20c386
|
e4cef8cae061e7abb76316b76848105d90ae0205
|
/run_analysis.R
|
d777ac3581c8721b0c0ae47317e81fb0ed7766f2
|
[] |
no_license
|
highsounding/Getting_and_Cleaning_Data
|
bdf3ca38cb16e410230441e73bbcfcf8650e4fc5
|
f5a1ffc8237f06f3dce1dd1e02a6ab4552bfc818
|
refs/heads/master
| 2021-08-26T07:29:53.651670
| 2017-11-22T09:15:16
| 2017-11-22T09:15:16
| 111,662,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,289
|
r
|
run_analysis.R
|
library(dplyr)
# build the tidy data set
build_full_data <- function(){
feature <- read.table("features.txt") # read feature items
activity <- read.table("activity_labels.txt") # read descriptive activity names
mean_or_std_seq <- grep('mean|std',feature$V2) # extract items related to "mean" or "std" from features
mean_or_std_names <- feature$V2[mean_or_std_seq] # get descriptive names for "mean" or "std" features
mean_or_std_names <- tolower(gsub('\\(|\\)','',mean_or_std_names)) # remove brackets and convert all characters to lower
train_label <- read.table("train/y_train.txt") # read label info for train set
train_subject <- read.table("train/subject_train.txt") # read subject info for train set
train_temp <- cbind(train_label,train_subject) # combine activity and subject info into single data frame
names(train_temp) <- c("activity","subject") # add descriptive column names
train_temp[,'activity'] <- activity[train_temp[,'activity'],'V2'] # update activity value to descriptive factors
training_set <- read.table("train/X_train.txt") # read train data set to data frame with 561 columns
part_training_set <- select(training_set,mean_or_std_seq) # extract only the columns related to "mean" or "std"
names(part_training_set) <- mean_or_std_names # add corresponding feature names for columns
train <- cbind(train_temp,part_training_set) # combine all columns into single data frame which contain all info of train set
# similar as above code, just replace "train" with "test" to construct test data frame
test_label <- read.table("test/y_test.txt")
test_subject <- read.table("test/subject_test.txt")
test_temp <- cbind(test_label,test_subject)
names(test_temp) <- c("activity","subject")
test_temp[,'activity'] <- activity[test_temp[,'activity'],'V2']
testing_set <- read.table("test/X_test.txt")
part_testing_set <- select(testing_set,mean_or_std_seq)
names(part_testing_set) <- mean_or_std_names
test <- cbind(test_temp,part_testing_set)
full_data <- rbind(train,test) # combine train and test data to build the final full data frame
ordered_data <- full_data[order(full_data$subject),] # order data set by subject
ordered_data
}
final_data <- build_full_data() # call function build_full_data() to build the tidy full data set
average <- NA # initialize the second data set
for (s in unique(final_data$subject)) {
for (a in unique(final_data$activity)) {
temp <- filter(final_data, subject == s & activity == a) # use double loop to divide full data into small set based on different subject and activity
newline <- temp[1,]
for (i in 3:81) {
newline[1,i] <- mean(temp[,i]) # use the third loop to calculate average value for each measure data
}
if(is.na(average)){
average <- newline
}
else{
average <- rbind(average,newline) # one by one put the average data together to build the data frame
}
}
}
names(average)[3:81] <- sub('^','avg-', names(average)[3:81]) # add "avg-" prefix for all measure data columns
average
|
02780ccef702e746824b98c622002c01deca51ce
|
c685bdc4bebe1624a0fdc9bb956862889a3ec03c
|
/MultiSppBiomassAllometric.R
|
a41f6a8d25d3a55fc9bfcdfd3092a202d59a49e2
|
[] |
no_license
|
derek-corcoran-barrios/RtoAmpl
|
dffd0db812d86b74a80847ae0b8db2eb0f1c861c
|
62a2f07b87f2b6d11eb3222ade6e7f1ad9b642dd
|
refs/heads/master
| 2020-06-26T04:46:16.622097
| 2017-11-24T17:28:58
| 2017-11-24T17:28:58
| 97,002,573
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,831
|
r
|
MultiSppBiomassAllometric.R
|
library(gdistance)
library(RtoAmpl)
DistMax <- function(m){3.31*(m^0.65)*10 * 1000}
Density <- function(m) {m*4.23*(m^-0.75)}
DistConect2 <- function(Raster, m, Time = 7){
#First we make a transition layer with the function transition from gdistance
h16 <- transition(Raster, transitionFunction=function(x){1},16,symm=FALSE)
#Then geocorrect for projection
h16 <- geoCorrection(h16, scl=FALSE)
#Since transition layers work with XY rather than IDs, get a matrix of XY coordinates
B <- xyFromCell(Raster, cell = 1:ncell(Raster))
#Start a list
connections <- list()
#For each pair of cells in B
accCost2 <- function(x, fromCoords) {
fromCells <- cellFromXY(x, fromCoords)
tr <- transitionMatrix(x)
tr <- rBind(tr, rep(0, nrow(tr)))
tr <- cBind(tr, rep(0, nrow(tr)))
startNode <- nrow(tr)
adjP <- cbind(rep(startNode, times = length(fromCells)), fromCells)
tr[adjP] <- Inf
adjacencyGraph <- graph.adjacency(tr, mode = "directed", weighted = TRUE)
E(adjacencyGraph)$weight <- 1/E(adjacencyGraph)$weight
return(shortest.paths(adjacencyGraph, v = startNode, mode = "out")[-startNode])
}
for (i in 1:nrow(B)){
#Create a temporal raster for each row with the distance from cell xy to all other cells
temp <- accCost2(h16,B[i,])
index <- which(temp < DistMax(m))
connections[[i]] <- cbind(i, index, temp[index])
}
#Get everything together as a large data frame
connections <- do.call("rbind", connections)
connections <- as.data.frame(connections)
colnames(connections) <- c("from", "to", "dist")
connections$Beta <- exp(-(connections$dist/max(connections$dist)))
b <- connections %>% group_by(from) %>% summarize(TotalBeta = sum(Beta))
connections <-merge(connections, b)
connections$beta <-connections$Beta /connections$TotalBeta
#connections<- dplyr::filter(connections, beta > quantile(beta, 0.05))
connections <- connections[,c(1,2,6)]
n <- nrow(connections)
connections <- do.call("rbind", replicate((Time), connections, simplify = FALSE))
connections$Time <- rep(c(0:(Time-1)), each =n)
return(connections)
}
library(RtoAmpl)
data(Cost)
costlayer <- Cost
data("univariate")
data("bivariate")
extent(Cost) <- c(-2.5,2.5,-2.5,2.5)
extent(univariate) <- c(-2.5,2.5,-2.5,2.5)
extent(bivariate) <- c(-2.5,2.5,-2.5,2.5)
Stacklist <- list(univariate, bivariate)
names(Stacklist) <- c("SPA", "SPB")
Threshold = 0.0
mass <- c(25,50)
alphamax <- function(m){(3.5 * 7.5 * (10^-4) * (m^-0.25) )* 3650}
multisppBiomass <- function(Stacklist, name = "Stack", Threshold, costlayer, mass = c(50,50), N = 4169){
Masklayer <- costlayer
values(Masklayer) <- ifelse(is.na(values(Masklayer)), NA, 1)
TempRaster <- list()
for (i in 1:length(Stacklist)){
Stacklist[[i]] <- Stacklist[[i]] * Masklayer
TempStack <- Stacklist[[i]]
values(TempStack)[values(TempStack) < Threshold] = 0
values(TempStack)[values(TempStack) >= Threshold] = 1
TempRaster[i] <- sum(TempStack)
}
TempRaster <- do.call("sum", TempRaster)
TempRaster[values(TempRaster) > 0] = 1
TempRaster[values(TempRaster) == 0] = NA
for (i in 1:length(Stacklist)){
Stacklist[[i]] <- Stacklist[[i]]*TempRaster
}
Alphas <- list()
for(j in 1:length(Stacklist)){
Alpha <- list()
for (i in 1:nlayers(Stacklist[[j]])){
temp <- data.frame(Alpha = values(Stacklist[[j]][[i]]), ID = 1:length(values(Stacklist[[j]][[i]])), Time = i-1)
Alpha[[i]] <- temp[complete.cases(temp),]
}
Alphas[[j]]<- do.call("rbind", Alpha)
Alphas[[j]]$Spp <- names(Stacklist)[j]
Alphas[[j]]$Alpha <- Alphas[[j]]$Alpha*alphamax(mass[j])
}
Alpha <- do.call(rbind, Alphas)
s <- Alpha %>% group_by(ID) %>% summarise(SUMA = sum(Alpha)) %>% filter(SUMA > 0)
Alpha <- Alpha[Alpha$ID %in% s$ID,]
Spps <- unique(Alpha$Spp)
Nodos <- unique(Alpha$ID)
Alphas <- list()
for (i in Spps){
Alphas[[i]] <- dplyr::filter(Alpha, Spp == i)
temp <- split(Alphas[[i]], Alphas[[i]]$Time)
Alphas[[i]] <- do.call(cbind, lapply(1:length(temp), function(i){
if (i == 1){
setNames(data.frame(paste("[",temp[[i]][["Spp"]],",",temp[[i]][["ID"]], ",*","]", sep = ""), temp[[i]]["Time"], temp[[i]]["Alpha"]),
c("V", paste("T", i, sep = ""), i-1))
} else if (i == length(temp)){
setNames(data.frame(temp[[i]]["Time"], temp[[i]]["Alpha"], rep("\n", NROW(temp[[i]]))),
c(paste("T", i, sep = ""), i-1, "line"))
} else {
setNames(data.frame(temp[[i]]["Time"], temp[[i]]["Alpha"]),
c(paste("T", i, sep = ""), i-1))
}
}))
}
Alpha <-do.call("rbind", Alphas)
Biomasa <- list()
for (i in 1:length(Stacklist)){
Biomasa[[i]] <- data.frame(Sp = rep(Spps[i], times = length(values(Stacklist[[i]][[1]]))), ID = 1:length(values(Stacklist[[i]][[1]])), Biomasa = values(Stacklist[[i]][[1]])*Density(mass[i])*cellStats(area(costlayer), mean))
Biomasa[[i]] <- Biomasa[[i]][complete.cases(Biomasa[[i]]),]
Biomasa[[i]]$Biomasa <- round(Biomasa[[i]]$Biomasa, 4)
Biomasa[[i]]$Sp <- Spps[i]
}
Biomasa <- do.call(rbind, Biomasa)
Biomasa$line <- "\n"
Biomasa$ID <- paste0("[",Biomasa$Sp, ",", Biomasa$ID, "]")
Biomasa <- Biomasa[,-1]
Capacidades <- list()
for(j in 1:length(Stacklist)){
Capacidad <- list()
for (i in 1:nlayers(Stacklist[[j]])){
temp <- data.frame(Capacidad = values(Stacklist[[j]][[i]])*Density(mass[j])*cellStats(area(costlayer), mean), ID = 1:length(values(Stacklist[[j]][[i]])), Time = i-1)
Capacidad[[i]] <- temp[complete.cases(temp),]
}
Capacidades[[j]]<- do.call("rbind", Capacidad)
Capacidades[[j]]$Spp <- names(Stacklist)[j]
}
Capacidad <- do.call(rbind, Capacidades)
Capacidad <- Capacidad[Capacidad$ID %in% s$ID,]
Capacidades <- list()
for (i in Spps){
Capacidades[[i]] <- dplyr::filter(Capacidad, Spp == i)
temp <- split(Capacidades[[i]], Capacidades[[i]]$Time)
Capacidades[[i]] <- do.call(cbind, lapply(1:length(temp), function(i){
if (i == 1){
setNames(data.frame(paste("[",temp[[i]][["Spp"]],",",temp[[i]][["ID"]], ",*","]", sep = ""), temp[[i]]["Time"], temp[[i]]["Capacidad"]),
c("V", paste("T", i, sep = ""), i-1))
} else if (i == length(temp)){
setNames(data.frame(temp[[i]]["Time"], temp[[i]]["Capacidad"], rep("\n", NROW(temp[[i]]))),
c(paste("T", i, sep = ""), i-1, "line"))
} else {
setNames(data.frame(temp[[i]]["Time"], temp[[i]]["Capacidad"]),
c(paste("T", i, sep = ""), i-1))
}
}))
}
Capacidad <-do.call("rbind", Capacidades)
cost <-data.frame(ID = Nodos,cost = values(costlayer)[Nodos])
cost$ID <- paste0("[", cost$ID, "]")
cost$line <- "\n"
BF <- data.frame(Spp = Spps, BF = mass* N, Space = "\n")
Beta <- list()
for(i in 1:length(Stacklist)){
Beta[[i]] <- DistConect2(Stacklist[[i]][[1]], m = mass[i], Time = nlayers(Stacklist[[i]]))
Beta[[i]] <- dplyr::filter(Beta[[i]], from %in% Nodos, to %in% Nodos)
Beta[[i]]$Sp <- Spps[i]
}
Beta[[1]] <-right_join(Beta[[1]], Beta[[2]][,c(1,2,4)])
Beta[[1]]$beta <- ifelse(is.na(Beta[[1]]$beta), 0, Beta[[1]]$beta)
Beta[[1]]$Sp <- ifelse(is.na(Beta[[1]]$Sp), names(Stacklist)[1], Beta[[1]]$Sp)
Beta <- do.call(rbind, Beta)
Beta <- dplyr::filter(Beta, Time != 7)
temp <- split(Beta, Beta$Time)
Betas <- do.call(cbind, lapply(1:length(temp), function(i){
if (i == 1){
setNames(data.frame(paste("[",temp[[i]][["Sp"]], ",",temp[[i]][["from"]], ",", temp[[i]][["to"]], ",*","]", sep = ""), temp[[i]]["Time"], temp[[i]]["beta"]),
c("V", paste("T", i, sep = ""), i-1))
} else if (i == length(temp)){
setNames(data.frame(temp[[i]]["Time"], temp[[i]]["beta"], rep("\n", NROW(temp[[i]]))),
c(paste("T", i, sep = ""), i-1, "line"))
} else {
setNames(data.frame(temp[[i]]["Time"], temp[[i]]["beta"]),
c(paste("T", i, sep = ""), i-1))
}
}))
sink(paste0(name, ".dat"))
cat(c("set V :=", Nodos, ";"))
cat("\n")
cat(c("set Sp :=", names(Stacklist), ";"))
cat("\n")
cat(c("set E :=", paste0("(",unique(unite_(Beta, col = "V", sep = ",", from = c("from", "to"))$V), ")"), ";"))
cat("\n")
cat("param T:= 7;")
cat("\n")
cat("param alpha :=")
cat("\n")
cat(do.call(paste, Alpha))
cat(";")
cat("\n")
cat("param beta :=")
cat("\n")
cat(do.call(paste, Betas))
cat(";")
cat("\n")
cat("param b0 :=")
cat("\n")
cat(do.call(paste, Biomasa))
cat(";")
cat("\n")
cat("param u :=")
cat("\n")
cat(do.call(paste, Capacidad))
cat(";")
cat("\n")
cat("param bf := ")
cat("\n")
cat(do.call(paste, BF))
cat(";")
cat("\n")
cat("param c :=")
cat("\n")
cat(do.call(paste, cost))
cat(";")
cat("\n")
sink()
return(list(Nodos = Nodos, Biomasa = Biomasa, Alphas = Alphas, Alpha = Alpha))
}
setwd("/home/derek/Documents/PostdocPablo/AMPL")
library(beepr)
Ns <- seq(from = 100, to = 7300, length.out = 73)
for (i in 1:length(Ns)){
multisppBiomass(Stacklist = Stacklist, costlayer = Cost, Threshold = 0.5, name = paste0("Multispp","_", i), N = Ns[i], mass = mass)
print(i)
beep(1)
}
SOLS1 <- list.files(pattern = "Multispp_[0-9].txt")
SOLS2 <- list.files(pattern = "Multispp_[0-9][0-9].txt")
SOLS <- c(SOLS1, SOLS2)
library(readr)
bla <- list()
for (i in 1:length(SOLS)){
DF <- read_delim(SOLS[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF) <- c("ID", "z")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[DF$ID] <- DF$z
bla[[i]] <- temp
}
Costs1 <- list.files(pattern = "Multispp_[0-9]cost.txt")
Costs2 <- list.files(pattern = "Multispp_[0-9][0-9]cost.txt")
Costs <- c(Costs1, Costs2)
library(readr)
DF <- list()
for(i in 1:length(Costs)){
DF[[i]] <- read_delim(Costs[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF[[i]]) <- c("bf", "cost")
DF[[i]]$SP <- c("A", "B")
}
DF <- do.call(rbind, DF)
DF <- DF %>% filter(SP == "A") %>% mutate(N = bf/mass[1])
library(ggplot2)
library(dplyr)
DF <- filter(DF, cost > 0)
DF$Buy <- "Continuous"
ggplot(DF, aes(x = cost, y = N)) + geom_line() + theme_classic() + geom_hline(yintercept = 4169, lty = 2, col = "red")
animation::saveGIF(for(i in 1:length(bla)){plot(bla[[i]], colNA = "black", main = paste("Target N =", round(DF$N[i],0)))}, movie.name = "Continuous.gif")
#####################################################################################
################################Binary#####################################
#####################################################################################
SOLS1 <- list.files(pattern = "MultisppBIN_[0-9].txt")
SOLS2 <- list.files(pattern = "MultisppBIN_[0-9][0-9].txt")
SOLS <- c(SOLS1, SOLS2)
library(readr)
blaBIN <- list()
for (i in 1:length(SOLS)){
DFBIN <- read_delim(SOLS[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DFBIN) <- c("ID", "z")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[DFBIN$ID] <- DFBIN$z
blaBIN[[i]] <- temp
}
Costs1 <- list.files(pattern = "MultisppBIN_[0-9]cost.txt")
Costs2 <- list.files(pattern = "MultisppBIN_[0-9][0-9]cost.txt")
Costs <- c(Costs1, Costs2)
DFBIN <- list()
for(i in 1:length(Costs)){
DFBIN[[i]] <- read_delim(Costs[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DFBIN[[i]]) <- c("bf", "cost")
DFBIN[[i]]$SP <- c("A", "B")
}
DFBIN <- do.call(rbind, DFBIN)
DFBIN <- DFBIN %>% filter(SP == "A") %>% mutate(N = bf/mass[1])
library(ggplot2)
library(dplyr)
DFBIN <- filter(DFBIN, cost > 0)
DFBIN$Buy <- "Binary"
ggplot(DFBIN, aes(x = cost, y = N)) + geom_line() + theme_classic() + geom_hline(yintercept = 4169, lty = 2, col = "red")
DF <- rbind(DF, DFBIN)
ggplot(DF, aes(x = cost, y = N)) + geom_line(aes(color = Buy)) + theme_classic()+ geom_hline(yintercept = 4169, lty = 2, col = "red")
animation::saveGIF(for(i in 1:length(blaBIN)){plot(blaBIN[[i]], colNA = "black", main = paste("Target N =", round(DF$N[i])))}, movie.name = "Binary.gif")
###############################################################################
##################Accuracy##############################################
###############################################################################
library(RtoAmpl)
setwd("/home/derek/Documents/PostdocPablo/AMPL")
SOLS1 <- list.files(pattern = "Multispp_[0-9].txt")
SOLS2 <- list.files(pattern = "Multispp_[0-9][0-9].txt")
SOLS <- c(SOLS1, SOLS2)
library(readr)
library(dplyr)
DF <- list()
blaRoundC <- list()
for (i in 1:length(SOLS)){
DF[[i]] <- read_delim(SOLS[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF[[i]]) <- c("ID", "Index")
DF[[i]]$BF <- i
}
DF <- do.call(rbind, DF)
SOLS1 <- list.files(pattern = "MultisppBIN_[0-9].txt")
SOLS2 <- list.files(pattern = "MultisppBIN_[0-9][0-9].txt")
SOLS <- c(SOLS1, SOLS2)
library(readr)
DFBIN <- list()
for (i in 1:length(SOLS)){
DFBIN[[i]] <- read_delim(SOLS[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DFBIN[[i]]) <- c("ID", "z")
DFBIN[[i]]$BF <- i
}
DFBIN <- do.call(rbind, DFBIN)
Accuracy <- full_join(DF, DFBIN)
Values <- seq(from = 0, to = 1, by = 0.01)
for(i in 1:length(Values)){
Accuracy[,4+i] <- ifelse(Accuracy$Index > Values[i], 1, 0)
colnames(Accuracy)[4+i] <- paste0("Threshold_", Values[i])
}
#Accuracy <- Accuracy %>% mutate(eval(parse(paste0("Threshold_","0.1"))) = ifelse(Index >= 0.1, 1, 0))
library(caret)
ForGraph <- data.frame(Threshold = Values, Accuracy = NA)
for(i in 1:length(Values)){
ForGraph$Accuracy[i] <- confusionMatrix(as.numeric(data.matrix(Accuracy[,(4+i)])),reference = Accuracy$z, positive = "1")$overall[1]
ForGraph$Kappa[i] <- confusionMatrix(as.numeric(data.matrix(Accuracy[,(4+i)])),reference = Accuracy$z, positive = "1")$overall[2]
ForGraph$TSS[i] <- ((confusionMatrix(as.numeric(data.matrix(Accuracy[,(4 + i)])),reference = Accuracy$z, positive = "1")$byClass[1] + confusionMatrix(as.numeric(data.matrix(Accuracy[,(4 + i)])),reference = Accuracy$z, positive = "1")$byClass[2]) -1)
}
library(tidyr)
Selected <- ForGraph %>% summarize(Accuracy = sample(Threshold[Accuracy == max(Accuracy)],1), Kappa = Threshold[Kappa == max(Kappa)], TSS = Threshold[TSS == max(TSS)])
Selected <- gather(Selected)
ForGraph <- dplyr::arrange(ForGraph, desc(Accuracy)) %>% gather(key = Parameter, value = value, -Threshold)
library(ggplot2)
ggplot(ForGraph, aes(x = Threshold, y = value)) + geom_line(aes(color = Parameter)) + theme_classic() + geom_vline(aes(xintercept = value, color = key), lty = 2, data = Selected)
Reoptim <- select(Accuracy, ID, BF, z, contains(as.character(Selected$value[2])))
CostFun <- data.frame(ID = unique(Reoptim$ID), Costo = values(Cost)[unique(Reoptim$ID)])
Reoptim <- left_join(Reoptim, CostFun)
library(tidyr)
CurvaCostos <- Reoptim %>% group_by(BF) %>% summarise(Binary = sum(Costo*z), Optimo = sum(Costo*Threshold_0.11)) %>% gather(key = Modelo, value = Costo, -BF)
ggplot(CurvaCostos, aes(x = Costo, y = BF)) + geom_line(aes(color = Modelo)) + theme_classic() #+ geom_hline(yintercept = 4169, lty = 2, col = "red")
library(purrr)
a <- Reoptim %>%split(.$BF) %>% map_chr(~confusionMatrix(.x$Threshold_0.16, reference = .x$z, positive = "1")$overall[1])
AccuracyByBf <- data.frame(Accuracy = a, BF = unique(Reoptim$BF))
AccuracyByBf$Accuracy <- as.numeric(as.character(AccuracyByBf$Accuracy))
ggplot(AccuracyByBf, aes(x= BF, y = Accuracy)) + geom_line() + theme_classic()
###gif
SOLS1 <- list.files(pattern = "Multispp_[0-9].txt")
SOLS2 <- list.files(pattern = "Multispp_[0-9][0-9].txt")
SOLS <- c(SOLS1, SOLS2)
library(readr)
blaAcc <- list()
for (i in 1:length(SOLS)){
DF <- read_delim(SOLS[i], " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF) <- c("ID", "z")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[DF$ID] <- ifelse(DF$z > ForGraph$Threshold[1], 1, 0)
blaAcc[[i]] <- temp
}
animation::saveGIF(for(i in 1:length(blaAcc)){plot(blaAcc[[i]], colNA = "black", main = paste("Sp B,", "Final biomass =", i))}, movie.name = "RedondeoAccurate.gif")
##################OptimoContinuo
SOLS <- list.files(pattern = "Optimo_[0-9].txt")
library(readr)
DF <- read_delim("Optimo_1.txt", " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF) <- c("ID", "z")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[DF$ID] <- DF$z
OptimContRast <- temp
SOLS <- list.files(pattern = "Optimo_[0-9].txt")
library(readr)
DF <- read_delim("OptimoBIN_1.txt", " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF) <- c("ID", "z")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[DF$ID] <- DF$z
OptimBinRast <- temp
library(readr)
DF <- read_delim("OptimoBIN_1cost.txt", " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF) <- c("bf", "cost")
DF$SP <- c("A", "B")
DF <- DF %>% filter(SP == "A") %>% mutate(N = bf/mass[1])
DF <- filter(DF, cost > 0)
DF$Buy <- "Continuous"
DFOptimCostBin <- DF
library(RtoAmpl)
setwd("/home/derek/Documents/PostdocPablo/AMPL")
library(readr)
library(dplyr)
DF <- read_delim("Optimo_1.txt", " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DF) <- c("ID", "Index")
DF$N <- 4169
DFBIN <- read_delim("OptimoBIN_1.txt", " ", escape_double = FALSE, col_names = FALSE, trim_ws = TRUE)
colnames(DFBIN) <- c("ID", "z")
DFBIN$N <- 4169
Accuracy <- full_join(DF, DFBIN)
Values <- seq(from = 0, to = 1, by = 0.01)
for(i in 1:length(Values)){
Accuracy[,4+i] <- ifelse(Accuracy$Index > Values[i], 1, 0)
colnames(Accuracy)[4+i] <- paste0("Threshold_", Values[i])
}
library(caret)
ForGraph <- data.frame(Threshold = Values, Accuracy = NA)
for(i in 1:length(Values)){
ForGraph$Accuracy[i] <- confusionMatrix(as.numeric(data.matrix(Accuracy[,(4+i)])),reference = Accuracy$z, positive = "1")$overall[1]
ForGraph$Kappa[i] <- confusionMatrix(as.numeric(data.matrix(Accuracy[,(4+i)])),reference = Accuracy$z, positive = "1")$overall[2]
ForGraph$TSS[i] <- ((confusionMatrix(as.numeric(data.matrix(Accuracy[,(4 + i)])),reference = Accuracy$z, positive = "1")$byClass[1] + confusionMatrix(as.numeric(data.matrix(Accuracy[,(4 + i)])),reference = Accuracy$z, positive = "1")$byClass[2]) -1)
}
library(tidyr)
Selected <- ForGraph %>% summarize(Accuracy = sample(Threshold[Accuracy == max(Accuracy)],1), Kappa = sample(Threshold[Kappa == max(Kappa)],1), TSS = sample(Threshold[TSS == max(TSS)],1))
Selected <- gather(Selected)
ForGraph <- dplyr::arrange(ForGraph, desc(Accuracy)) %>% gather(key = Parameter, value = value, -Threshold)
library(ggplot2)
ggplot(ForGraph, aes(x = Threshold, y = value)) + geom_line(aes(color = Parameter)) + theme_classic() + geom_vline(aes(xintercept = value, color = key), lty = 2, data = Selected)
Reoptim <- select(Accuracy, ID, N, z, contains(as.character(Selected$value[1])))
colnames(Reoptim)[4] <- c("Threshold")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[Reoptim$ID] <- Reoptim$Threshold
RasterAcc <- temp
Reoptim <- select(Accuracy, ID, N, z, contains(as.character(Selected$value[2])))
colnames(Reoptim)[4] <- c("Threshold")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[Reoptim$ID] <- Reoptim$Threshold
RasterKappa <- temp
Reoptim <- select(Accuracy, ID, N, z, contains(as.character(Selected$value[3])))
colnames(Reoptim)[4] <- c("Threshold")
temp <- univariate[[1]]
values(temp) <- NA
values(temp)[Reoptim$ID] <- Reoptim$Threshold
RasterTSS <- temp
plot(SolutionsStack, colNA = "black")
OptimBinRast <- as.factor(OptimBinRast)
rat <- levels(OptimBinRast)[[1]]
rat[["decision"]] <- c("Not buy", "buy")
levels(OptimBinRast) <- rat
RasterAcc <- as.factor(RasterAcc)
rat <- levels(RasterAcc)[[1]]
rat[["decision"]] <- c("Not buy", "buy")
levels(RasterAcc) <- rat
RasterKappa <- as.factor(RasterKappa)
rat <- levels(RasterKappa)[[1]]
rat[["decision"]] <- c("Not buy", "buy")
levels(RasterKappa) <- rat
RasterTSS <- as.factor(RasterTSS)
rat <- levels(RasterTSS)[[1]]
rat[["decision"]] <- c("Not buy", "buy")
levels(RasterTSS) <- rat
library(rasterVis)
myTheme <- BTCTheme()
myTheme$panel.background$col = 'black'
SolutionsStack <- stack(OptimBinRast, RasterAcc, RasterKappa, RasterTSS)
names(SolutionsStack) <- c("Binary", "Accuracy", "Kappa", "TSS")
levelplot(SolutionsStack, col.regions=terrain.colors(2), xlab="", ylab="", par.settings = myTheme)
CostFun <- data.frame(ID = unique(Reoptim$ID), Costo = values(Cost)[unique(Reoptim$ID)])
Reoptim <- left_join(Reoptim, CostFun)
names(univariate) <- as.character(paste("Year",seq(2000, 2070, by = 10)))
library(tidyr)
|
a41dfad04c15d0bf9877e2fe3e17f8f4e73f53fb
|
cd51126dceadcfae4873e108a96d634b47fdc9bf
|
/cachematrix.R
|
ad8e4d5feee98c93daec4f2b68441bbfb4fa7a04
|
[] |
no_license
|
Ludmmat/ProgrammingAssignment2
|
1d2283477e38c6829c3f3dd7a4734e3e35337bc0
|
a2ee71babb98278d08062f509873e47d2857033b
|
refs/heads/master
| 2021-01-16T19:01:47.729810
| 2015-02-21T23:06:12
| 2015-02-21T23:06:12
| 31,142,015
| 0
| 0
| null | 2015-02-21T23:06:12
| 2015-02-21T21:37:10
|
R
|
UTF-8
|
R
| false
| false
| 1,704
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
# cacheSolve function returns the inverse of the matrix. However, it first checks
# to see if the inverse has already been calculated.
# If so, it gets the cache result and skips the computation.
# Otherwise, it calculates the inverse and sets the value in the cache via
# setinverse function.
# Furthermore, this function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
## Return a matrix that is the inverse of 'x'
}
##Sample run:
# x <- cbind(c(1,2),c(-2,-3))
# m <- makeCacheMatrix(x)
# m$get()
## No saved (cache) data in the first round
# cacheSolve(m)
## Getting cached data in the second round
# cacheSolve(m)
|
55405a4783b47d02e44bb33b48474c46313a1639
|
339fc5ede2d01f040ca3b2607b48cab6afeb7008
|
/man/SpASamples.Rd
|
31f356f023f82bd0fa7ed03578446dd462a1cc79
|
[] |
no_license
|
cran/stringgaussnet
|
bdb5814d82e6c54f226f4ea06942d5415a9d6b37
|
707d1f5e8d5325413a90f50c2aef454d0134b347
|
refs/heads/master
| 2021-01-19T12:34:04.869401
| 2015-07-22T00:00:00
| 2015-07-22T00:00:00
| 37,529,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,515
|
rd
|
SpASamples.Rd
|
\name{SpASamples}
\alias{SpASamples}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Samples description for SpA example data.
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
The example data provided in our package are transcriptomic profiles of MD-DCs from 9 patients with AS and 10 healthy controls. DE gene statistics group 75 genes identified by LIMMA. Samples description is also provided.
}
\usage{data("SpASamples")}
\format{
A data frame with 57 observations on the following 6 variables.
\describe{
\item{\code{chipnum}}{The sample ID}
\item{\code{status}}{Indicates whether the sample comes from an AS patient (Patient) or control (Control)}
\item{\code{LPStime}}{LPS stimulation duration before MD-DCs differenciation: no stimulation (H0), 6 hours (H6) or 24 hours (H24)}
\item{\code{subject}}{Identifier indicating sample duplicates}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
This matrix describes factors attributed to each sample in SpADataExpression, and is used to generate multiple gaussian networks in our package.
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
Talpin, A. et al. Monocyte-derived dendritic cells from HLA-B27+ axial spondyloarthritis (SpA) patients display altered functional capacity and deregulated gene expression. Arthritis Res. Ther. 16, 417 (2014).
}
\examples{
data(SpASamples)
head(SpASamples)
}
\keyword{datasets}
\keyword{internal}
|
63a956833440fecdadec52f727635a9b6a38fa0b
|
ba0d52a9447cc2cedcaacafd8349fc50a32363b5
|
/R/data.R
|
cd6d007cfa8a00caeda2f93f9e288f16281924c7
|
[
"CC0-1.0"
] |
permissive
|
robschick/tangled
|
49590a754531b8e50294abb4d86fcd9cc85d037c
|
e4c0e49fa87802dd39fba01dc4fba5cef25e7b31
|
refs/heads/master
| 2023-04-07T19:24:43.838552
| 2022-05-04T19:11:30
| 2022-05-04T19:11:30
| 33,547,111
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,119
|
r
|
data.R
|
#' Information about gear carrying time on entangled right whales
#'
#' A dataset generated by Amy Knowlton of the New England Aquarium as part of
#' the PCAD/PCOD project looking at health and the impacts of disturbance on
#' vital rates in North Atlantic right whales. These data are used to establish
#' the "windows" when animals are entangled and in turn to intersect those
#' windows with estimates of health.
#'
#' @format a data frame containing 102 rows and 9 variables: \describe{
#' \item{EGNo}{4 number code that identifies individual right whales}
#' \item{EventNo}{Integer labelling the consecutive number of each
#' entanglement event} \item{Date prior w/no gear}{A character variable
#' describing the first date the animal was seen without gear} \item{First
#' date w/gear}{A character variable describing the first date the animal was
#' seen with gear} \item{Last date w/gear}{A character variable describing the
#' last date the animal was seen with gear} \item{line gone}{A character
#' variable describing the first date the animal was seen that the gear is
#' verified to be gone from the animal} \item{min time carrying gear}{Integer
#' denoting the minimum number of days the animal was carrying the gear}
#' \item{max dur}{Integer denoting the maximum number of days the animal was
#' carrying the gear} \item{AmyEdits}{Comments from Amy Knowlton about
#' individual entries she updated and/or inserted} }
'etime'
#' Information about estimated start data of entanglement events
#'
#' A dataset generated by Amy Knowlton of the New England Aquarium
#' as part of the PCAD/PCOD project looking at health and the impacts
#' of disturbance on vital rates in North Atlantic right whales. These
#' data are used to establish the start of "windows" that denote when animals
#' are entangled and in turn to intersect those windows with estimates of health.
#'
#' @format a data frame containing 130 rows and 6 variables:
#' \describe{
#' \item{EntanglementId}{A unique identifier for the entanglement event in the NEAq DIGITS database}
#' \item{EGNo}{4 number code that identifies individual right whales}
#' \item{EventNo}{Integer labelling the consecutive number of each entanglement event}
#' \item{StartDate}{A character variable describing the estimated first date of the
#' entanglement event}
#' \item{EndDate}{A character variable describing the observed last date of the
#' entanglement event}
#' \item{EntanglementComment}{Comments from Amy Knowlton about the entanglement events}
#' }
'estStartDates'
#' Information about entanglement events - raw
#'
#'
#'
#' @format A data frame of 1,194 rows and 12 columns:
#' \describe{
#' \item{EntanglementId}{A unique identifier for the entanglement event in the NEAq DIGITS database}
#' \item{EGNo}{4 number code that identifies individual right whales}
#' \item{EventNo}{Integer labelling the consecutive number of each entanglement event}
#' \item{StartDate}{A time variable describing the estimated first date of the
#' entanglement event}
#' \item{EndDate}{A time variable describing the observed last date of the
#' entanglement event}
#' \item{EntanglementComment}{Comments from Amy Knowlton about the entanglement events}
#' \item{AgeSex}{Character vector comment describing the age and gender of the animal, e.g. 'Unknown age female', '2 year old male', etc.}
#' \item{TimeFrame}{Amy Knowlton's descriptive comments concerning the time frame and duration of the entanglement event}
#' \item{Severity}{One of three categories of worsening injury: minor, moderate, severe. These are standard definitions created by NEA}
#' \item{gear}{Integer variable describing whether or not the animal is carrying gear as part of the entanglement: 0 for no gear; 1 for gear}
#' \item{LiveStatus}{Character vector indicating whether the animal is 'ALIVE' or 'DEAD'}
#' \item{Gender}{Character descriptor of the animal's sex: "female," "male," or NA}
#' }
'tangleAll'
#' Information about entanglement events - filtered and modified for use in analysis
#'
#' Whereas \code{tangleAll} contains the raw information on the entanglement events, \code{tangleOut} is designed to be used in the analysis of the effects of entanglement on health. This means that the 'windows' have been created according to different rules, the gear status information has been added, and the indices have been added for the windows to facilitate data extraction from the health data as part of the model output.
#'
#' Starting with the \code{StartDate} and \code{EndDate} from the raw data we have the entire window/duration when the animal may have been entangled. However, in reality, these windows can be really long - most often just associated with sighting gaps in the animal's history. In certain cases, then, it makes sense to pare down these windows into something a bit shorter. We have generated a set of rules for both the starting dates as well as the ending dates; the rules are as follows:
#'
#' For any window that is greater than or equal to 3 months, the window gets shortened to 3 months. We do this by evaluating the duration between \code{StartDateWindow} and \code{EndDateWindow}. If that value exceeds 3 months, then a new start date is calculated by subtracting 3 months from the end date.
#'
#' For any window >= 1 & <2 months, the window gets shortened to 1 month. Processing is as above.
#'
#' For any window < 1 month, the window gets lengthened to 1 month. Processing is as above.
#'
#' For any window >= 2 & <3 months, the window gets set to 2 month. Processing is as above.
#'
#' @format A data frame of 910 rows and 22 columns:
#' \describe{
#' \item{EGNo}{4 number code that identifies individual right whales}
#' \item{EventNo}{Integer labelling the consecutive number of each entanglement event}
#' \item{StartDate}{A time variable describing the estimated first date of the
#' entanglement event}
#' \item{EndDate}{A time variable describing the end of the entanglement event}
#' \item{Severity}{One of three categories of worsening injury: minor, moderate, severe. These are standard definitions created by NEA}
#' \item{gear}{Integer variable describing whether or not the animal is carrying gear as part of the entanglement: 0 for no gear; 1 for gear}
#' \item{StartDateWindow}{Date of the altered start of the window using rules outlined below}
#' \item{EndDateWindow}{Date of the end of the window - same as \code{EndDate}}
#' \item{LastDatewGear}{Date of the last photographic observation of the whale where it was seen carrying gear}
#' \item{LineGone}{Date of the first time the animal was confirmed to be free of gear}
#' \item{firstSevere}{Date of the first (if any) severe entanglement event experienced by the individual whale. This is used as a flag to make sure we do not include any subsequent events in the health calculations, i.e. once an animal has had a severe event, they are forever impaired.}
#' \item{recov12months}{Date of 12 months past the entanglement event. Note that this means different things for different animals. For animals that are known to carry gear, this is 12 months past either \code{LastDatewGear} or \code{LineGone} (whichever is finite); for non-gear carrying whales this is 12 months beyond \code{EndDateWindow}}
#' \item{firstCalf}{Date format denoting January of the first year during which the whale was observed with a calf. This is used in establishing the before/after calf status of the animal for documenting health}
#' \item{firstCalfidx}{Integer corresponding to the location in \code{myName} of the date in \code{firstCalf}}
#' \item{smonyr}{Character version of \code{StartDate} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{emonyr}{Character version of \code{EndDate} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{swindmonyr}{Character version of \code{StartDateWindow} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{ewindmonyr}{Character version of \code{EndDateWindow} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{fsevmonyr}{Character version of \code{firstSevere} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{rec12monyr}{Character version of \code{recov12months} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{afterCalf1}{Logical of whether or not the entanglement event is after the first calving event (\code{firstCalf})}
#' }
'tangleOut'
#' Information about entanglement events of reproductively active females
#'
#' Whereas \code{tangleOut} contains all the filtered information on the entanglement events, \code{tangRepro} contains the same information for only the reproductively active females.
#'
#'
#' @format A data frame of 113 rows and 22 columns:
#' \describe{
#' \item{EGNo}{4 number code that identifies individual right whales}
#' \item{EventNo}{Integer labelling the consecutive number of each entanglement event}
#' \item{StartDate}{A time variable describing the estimated first date of the
#' entanglement event}
#' \item{EndDate}{A time variable describing the end of the entanglement event}
#' \item{Severity}{One of three categories of worsening injury: minor, moderate, severe. These are standard definitions created by NEA}
#' \item{gear}{Integer variable describing whether or not the animal is carrying gear as part of the entanglement: 0 for no gear; 1 for gear}
#' \item{StartDateWindow}{Date of the altered start of the window using rules outlined below}
#' \item{EndDateWindow}{Date of the end of the window - same as \code{EndDate}}
#' \item{LastDatewGear}{Date of the last photographic observation of the whale where it was seen carrying gear}
#' \item{LineGone}{Date of the first time the animal was confirmed to be free of gear}
#' \item{firstSevere}{Date of the first (if any) severe entanglement event experienced by the individual whale. This is used as a flag to make sure we do not include any subsequent events in the health calculations, i.e. once an animal has had a severe event, they are forever impaired.}
#' \item{recov12months}{Date of 12 months past the entanglement event. Note that this means different things for different animals. For animals that are known to carry gear, this is 12 months past either \code{LastDatewGear} or \code{LineGone} (whichever is finite); for non-gear carrying whales this is 12 months beyond \code{EndDateWindow}}
#' \item{firstCalf}{Date format denoting January of the first year during which the whale was observed with a calf. This is used in establishing the before/after calf status of the animal for documenting health}
#' \item{firstCalfidx}{Integer corresponding to the location in \code{myName} of the date in \code{firstCalf}}
#' \item{smonyr}{Character version of \code{StartDate} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{emonyr}{Character version of \code{EndDate} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{swindmonyr}{Character version of \code{StartDateWindow} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{ewindmonyr}{Character version of \code{EndDateWindow} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{fsevmonyr}{Character version of \code{firstSevere} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{rec12monyr}{Character version of \code{recov12months} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{afterCalf1}{Logical of whether or not the entanglement event is after the first calving event (\code{firstCalf})}
#' }
'tangRepro'
#' Information about entanglement events of non-reproductively active whales
#'
#' Whereas \code{tangleOut} contains all the filtered information on the entanglement events, \code{tangNonRepro} contains the same information for all the non-reproductively active females. Note that this can include reproductively active females but only for the times \emph{before} they have their first calf. In other words, it contains males of all ages, and females up until they've had a calf. Once they've had a calf, the event gets tallied in \code{tangRepro}
#'
#'
#' @format A data frame of 797 rows and 22 columns:
#' \describe{
#' \item{EGNo}{4 number code that identifies individual right whales}
#' \item{EventNo}{Integer labelling the consecutive number of each entanglement event}
#' \item{StartDate}{A time variable describing the estimated first date of the
#' entanglement event}
#' \item{EndDate}{A time variable describing the end of the entanglement event}
#' \item{Severity}{One of three categories of worsening injury: minor, moderate, severe. These are standard definitions created by NEA}
#' \item{gear}{Integer variable describing whether or not the animal is carrying gear as part of the entanglement: 0 for no gear; 1 for gear}
#' \item{StartDateWindow}{Date of the altered start of the window using rules outlined below}
#' \item{EndDateWindow}{Date of the end of the window - same as \code{EndDate}}
#' \item{LastDatewGear}{Date of the last photographic observation of the whale where it was seen carrying gear}
#' \item{LineGone}{Date of the first time the animal was confirmed to be free of gear}
#' \item{firstSevere}{Date of the first (if any) severe entanglement event experienced by the individual whale. This is used as a flag to make sure we do not include any subsequent events in the health calculations, i.e. once an animal has had a severe event, they are forever impaired.}
#' \item{recov12months}{Date of 12 months past the entanglement event. Note that this means different things for different animals. For animals that are known to carry gear, this is 12 months past either \code{LastDatewGear} or \code{LineGone} (whichever is finite); for non-gear carrying whales this is 12 months beyond \code{EndDateWindow}}
#' \item{firstCalf}{Date format denoting January of the first year during which the whale was observed with a calf. This is used in establishing the before/after calf status of the animal for documenting health}
#' \item{firstCalfidx}{Integer corresponding to the location in \code{myName} of the date in \code{firstCalf}}
#' \item{smonyr}{Character version of \code{StartDate} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{emonyr}{Character version of \code{EndDate} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{swindmonyr}{Character version of \code{StartDateWindow} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{ewindmonyr}{Character version of \code{EndDateWindow} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{fsevmonyr}{Character version of \code{firstSevere} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{rec12monyr}{Character version of \code{recov12months} designed to intersect with \code{myName} to peel off the health values in the matrix of estimated heatlh, e.g. '2-1986'}
#' \item{afterCalf1}{Logical of whether or not the entanglement event is after the first calving event (\code{firstCalf})}
#' }
'tangNonRepro'
#' Table describing all of the calving events in the right whale population
#'
#' This table is from Philip Hamilton (New England Aquarium) and contains
#' all the known information regarding calving. Each row contains calving
#' information from the perspective of the mom. There are 4 columns in the data
#' frame as follows:
#' \describe{
#' \item{EGNo}{4 number code that identifies individual right whale mothers}
#' \item{CalvingYear}{Integer noting the year that the calf was born}
#' \item{CalfNo}{The 4 number code identifying the ID of the calf. It's not
#' known in all cases, e.g. '1973CalfOf1002'.}
#' \item{CalvingInterval}{Integer describing the time since the mom last had a
#' calf}
#' }
'calfTable'
|
a153057c3337398ebba5923f3c9d0ed14673ef07
|
49a21578d90cb5cc8a07b3688df255a083364e62
|
/man/hg19.GoNLsnps.Rd
|
2c1a0979cef5d84877f5a85546038372526ffd4e
|
[] |
no_license
|
molepi/omicsPrint
|
953438f720d37b8753fd0f2a03d819244269c6d6
|
ad471131be810ae674f638169d9c7e9391a50689
|
refs/heads/master
| 2021-01-18T03:25:45.509787
| 2018-11-30T08:33:52
| 2018-11-30T08:33:52
| 85,822,289
| 3
| 4
| null | 2017-12-13T20:53:22
| 2017-03-22T11:58:56
|
R
|
UTF-8
|
R
| false
| true
| 3,442
|
rd
|
hg19.GoNLsnps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hg19.GoNLsnps}
\alias{hg19.GoNLsnps}
\title{Dataframe with overlaps GoNL variants and 450K probes}
\format{A data frame with 207866 rows and 19 variables:
\describe{
\item{CHROM}{chromosome, X and Y chromosomes are not available,
since they are not included in this GoNL release}
\item{probe}{probe ID}
\item{type}{Infinium probedesign}
\item{strand}{orientation of the probe}
\item{probeType}{whether the probe measures a CpG-site (cg) or
a non-CpG site (ch)}
\item{location_c}{Location of the queried 'C' of the CpG dinucleotide.
Note that this is the location of the C that is actually measured.
For probes that interrogate the reverse strand (plus-strand probes)
this is one base downstream of the C nucleotide on the forward strand}
\item{location_g}{Location of the G nucleotide of the CpG dinucleotide.
Note that this is the location of the queried G. For probes that
interrogate the reverse strand (plus-strand probes) this is one base
upstream of the G nucleotide on the forward strand}
\item{ID}{SNP ID}
\item{snpBeg}{Start coordinate of the variant. Identical to snpEnd for
SNPs.}
\item{snpEnd}{End coordinate of the variant. Identical to snpBeg for SNPs}
\item{AF}{Allele frequency of alternative allele}
\item{REF}{Reference allele}
\item{ALT}{Alternative allele}
\item{FILTER}{Filter information from GoNL.}
\item{MAF}{Minor allele frequency}
\item{variantType}{SNP or INDEL}
\item{distance_3end}{Distance between SNP and 3'end of the probe. For type
I probes the 3'end of the probe coincides with the queried C
nucleotide. For type II probes the 3'end of the probe coincides with
the G nucleotide directly after the C nucleotide.}
\item{distance_c}{Distance from queried C nucleotide. A distance of -1
indicates that the SNPs overlaps the SBE-position for type I probes.}
\item{channel_switch}{Indicates whether a variant in the SBE-location of
type I probes causes a color-channel-switch or overlap with an INDEL.
For plus-strand probes C/T, C/A and C/G SNPs are expected to cause a
color-channel switch. For min-strand probes A/G, G/T and C/G SNPs are
expected to cause a color-channel switch.}
}}
\source{
\url{http://zwdzwd.github.io/InfiniumAnnotation}
\url{https://molgenis26.target.rug.nl/downloads/gonl_public/variants/release5/}
}
\usage{
data(hg19.GoNLsnps)
}
\description{
Dataframe containing all SNPs and short INDELS from GoNLv5 that
overlap with 450K probes. This release does not include X and Y
chromosomes, so only information for autosomal probes is
available. For each overlap there is an unique row. Consequently,
some probes are duplicated (probes that overlap with multiple
variants) and some variants are duplicated (some variants overlap
with more than one probe).
}
\examples{
data(hg19.GoNLsnps)
# Select variants that overlap with queried C nucleotide
snps_c <- hg19.GoNLsnps[hg19.GoNLsnps$distance_c == 0, ]
# Select all INDELS
indels <- hg19.GoNLsnps[hg19.GoNLsnps$variantType == "INDEL",]
# Select SNPs that cause a channel-switch
channel_switch <- hg19.GoNLsnps[!is.na(hg19.GoNLsnps$channel_switch)
& hg19.GoNLsnps$channel_switch == "Yes",]
}
\keyword{datasets}
|
291d256e405483fab3e746f1153a61062d79d7a6
|
6019c5cd51519eacccb2c16d0c64c00fdeeef96c
|
/Basics/R Data Types, Arithmetic & Logical Operators.R
|
90211d9ee52da9298b8178ece2021048be979e66
|
[] |
no_license
|
manikandansubramanian562/R---Programming-Datascience-
|
227d6f756191bf9548f56496f213c8f1fe34f6cc
|
44dda1c4dbc864b54e4cd449174ab9a832f674e4
|
refs/heads/master
| 2020-08-09T22:01:30.414663
| 2019-10-16T03:29:47
| 2019-10-16T03:29:47
| 214,185,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,803
|
r
|
R Data Types, Arithmetic & Logical Operators.R
|
"""R Data Types, Arithmetic & Logical Operators"""
#Basic data types
x <- 32
y <-12 #
z <- x-y
class(z)
print(z)
##Character and Boolean type............................................................
m <- "R is Fantastic"
class(m)
a <- TRUE
class(a)
## Variables............................................................................
# Printable variable
X <- 100
print(X)
Y <- 200
print(y)
print(Y-X)
##Vectors.................................................................................
"""A vector is a one-dimensional array. We can create a vector with all
the basic data type we learnt before. The simplest way to build a vector in R,
is to use the c command."""
vec_num <- c(10,1,49) #Numerical
print(vec_num)
vec_char <- c("a","b","c","d") #Character
print(vec_char)
vec_bool <- c(TRUE,FALSE,TRUE) #Boolean
print(vec_bool)
##We can do arithmetic calculations on vectors.
vec_1 <- c(1,2,3)
vec_2 <- c(4,5,6)
result <- vec_1 + vec_2
print(result)
##Slicing
slicing_vec <- c(1,2,3,4,5,6,7,8)
slicing_vec[1:5]
"""The shortest way to create a range of value is to use the: between two numbers.
For instance, from the above example, we can write c(1:10) to create a vector
of value from one to ten."""
c(1:10)
##Arithmetic Operators............................................................
3+4 #Addition
5-2 #Subtraction
5*5 #Multiplication
(2+3)/2 #Division
5^2 #Exponentiation
28%%4 #Modulo (Reminder)
##Logical Operators..............................................................
logical_vec <- c(0:10)
logical_vec>5
logical_vec[(logical_vec>5)] #print all values greater than 5
logical_vec[(logical_vec>4 & logical_vec<7)] #print all values greater 4 and lesser 7
|
0177318cb09649e4b09f6aa52883c96c224ecac6
|
246189c0e240e174b9ca74e2a42bfecee79cc9e1
|
/man/createClonalReproTransitionMatrix.Rd
|
672609bf53662f76b779ab1d30de42c1e43c013a
|
[] |
no_license
|
ksauby/GTMNERRproc
|
f3bcd140578d710c9b013da83d9ac8d08e781eee
|
fd5a073d5fd2690b6fde64a0313d1a3fdfe07645
|
refs/heads/master
| 2021-04-06T13:03:29.008590
| 2017-11-15T20:35:53
| 2017-11-15T20:35:53
| 83,352,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 940
|
rd
|
createClonalReproTransitionMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createClonalReproTransitionMatrix.R
\name{createClonalReproTransitionMatrix}
\alias{createClonalReproTransitionMatrix}
\title{Create Clonal Reproduction Transition Matrix}
\usage{
createClonalReproTransitionMatrix(clonal_repro_dat, TMdata, stages)
}
\arguments{
\item{clonal_repro_dat}{This dataset is created by the calculateClonalReproduction function. Each row represents information for a unique offspring. Each row contains the identity of the parent, the year, the size of the offspring, and the size of the parent.}
\item{TMdata}{Survey data used to create transition matrix.}
}
\value{
Returns a list in which the first element is the count of the number of individuals in each stage-stage transition and the second element contains the rates at which individuals transition among stages.
}
\description{
Create Clonal Reproduction Transition Matrix
}
|
2e5ae73cc2d2c3c0378386c36d427775e12c5b9a
|
6340b17f789b8c0e497f6905939616385bf13142
|
/ProbableBirdFunction.R
|
2e15652cb31ee247ef89eec9086f349a3997b129
|
[] |
no_license
|
nheer/eBird_LikelyNextBird
|
f3c20dbca45b8c968c82377287e3ed2c27a8b4f7
|
675efd150c0572afe09546d7bb852b0870868714
|
refs/heads/master
| 2021-07-09T10:35:08.097799
| 2017-10-02T15:07:19
| 2017-10-02T15:07:19
| 105,548,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,072
|
r
|
ProbableBirdFunction.R
|
# Function plots top ten most likely birds seen given the observation of an input species
# Inputs: ConditionalProb is a matrix of conditional probability relationships between bird species
# IndividualProb is a table of the likelihood of observing each species
# SpeciesName is a string of format "House Finch" on which the output is conditioned
# NumSpecies is and integer of the number of species to plot
library("plotly")
ProbableSpecies <- function(ConditionalProb, IndividualProb, SpeciesName, NumSpecies) {
# Check that the input species exists in the dataset/check for misspellings
if(!(SpeciesName %in% names(IndividualProb))){
return("Species not in list of observations")
}
# Determine the ten most likely species to when also observing the input species
SpeciesVector <- ConditionalProb[SpeciesName, ]
NewProb <- head(sort(SpeciesVector, TRUE),NumSpecies+1)
NewProb <- NewProb[2:(NumSpecies+1)]
PriorProb <- as.vector(IndividualProb[names(NewProb)])
TopSpecies <- names(NewProb)
NewProb <- as.vector(NewProb)
# Create dataframe containing old and new probabilities
CompareSpecies <- data.frame(NewProb, PriorProb, TopSpecies)
CompareSpecies$TopSpecies <- reorder(x = CompareSpecies$TopSpecies, X = CompareSpecies$NewProb, FUN = sum)
# Plot the prior and updated probability for the top ten
plotlabel <- paste("eBird observation probability given", SpeciesName, "observation")
p <- plot_ly(CompareSpecies, x = CompareSpecies$PriorProb, y = CompareSpecies$TopSpecies,
name = "Prior probability", type = 'scatter',
mode = "markers", marker = list(color = "#9CA8AB"))
p <- add_trace(p, x = CompareSpecies$NewProb, y = CompareSpecies$TopSpecies,
name = "New probability", type = 'scatter', mode = "markers",
marker = list(color = "#33D1FF"))
p <- layout(p, title = plotlabel,
xaxis = list(title = "Probability of observation"), yaxis = list(title = ""),
margin = list(l = 175))
return(p)
}
|
dc9d56089bc9723540d8dc9ec39f020c42765a7a
|
3fc3964396f8010aae9345d37f551c4431c52ff9
|
/R/trac_all.R
|
23db4748ba5264d6e9a45e26dd8e71d5c3d21511
|
[] |
no_license
|
muschellij2/freesurfer
|
ff96f465ebbfbb0b7ce18644be5f4c5ea753fc45
|
7d70f616e760d8d3a453a652d98756e34877fed7
|
refs/heads/master
| 2021-06-24T00:57:12.644687
| 2020-12-08T18:41:34
| 2020-12-08T18:41:34
| 67,370,835
| 9
| 8
| null | 2020-11-15T23:42:38
| 2016-09-04T22:12:47
|
R
|
UTF-8
|
R
| false
| false
| 649
|
r
|
trac_all.R
|
#' @title Tract Reconstruction Helper for trac-all from Freesurfer for All Steps
#' @description Wrapper for the \code{trac-all} function in Freesurfer
#' for All Steps
#'
#' @param infile Input filename (dcm or nii)
#' @param outdir Output directory
#' @param subjid subject id
#' @param verbose print diagnostic messages
#' @param opts Additional options
#'
#' @return Result of \code{\link{system}}
#' @export
trac_all <- function(
infile,
outdir = NULL,
subjid,
verbose = TRUE,
opts = ""
) {
tracker(infile = infile,
outdir = outdir,
subjid = subjid,
verbose = verbose,
opts = opts)
}
|
600e53459023a78c008307be2f66ab9a702318ba
|
0c46573b5cfeea338493749c2d12b2650b531406
|
/man/extract_drop_out_from_df.Rd
|
442d7f7af3ad30fb01289dc1787c8f19ca48c876
|
[] |
no_license
|
cran/dropR
|
42a5f4450594995a5fe0bf18a9be9f08821a6a39
|
9781361dd2f39b54c98d19632b0094fe75db1ae9
|
refs/heads/master
| 2021-01-19T09:05:46.525033
| 2015-01-01T00:00:00
| 2015-01-01T00:00:00
| 28,707,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 492
|
rd
|
extract_drop_out_from_df.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{extract_drop_out_from_df}
\alias{extract_drop_out_from_df}
\title{Extract Drop Out from a Data.Frame}
\usage{
extract_drop_out_from_df(df, q_pos)
}
\arguments{
\item{df}{a data.frame}
\item{q_pos}{columns that contain questions}
}
\description{
Find drop in Data.frame that contains multiple
questions that had been asked sequentially.
}
\examples{
data(dropout)
dropout$pos <- extract_drop_out_from_df(dropout,2:10)
dropout$pos
}
|
315445644771cf49c5a9c52accd592af191b6993
|
8bd71959e8cbf8335ed632581b3194d668e45739
|
/R/train_predict.R
|
1fb3306d144747a8495639ae86469391bd90b1bb
|
[] |
no_license
|
Yael-Travis-Lumer/KMforCSD
|
393ccbefd2bd5684f82e4fcddde35041a7ac7e9a
|
747deae9e90ecc29697855c3bfa5b28f4ecb97b5
|
refs/heads/master
| 2023-05-09T05:01:09.000865
| 2021-06-01T13:07:12
| 2021-06-01T13:07:12
| 280,110,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,097
|
r
|
train_predict.R
|
#' Kernel Machine for Current Status Data - Training
#'
#' \code{KMforCSD} returns the KM vector of coefficients, together with the kernel function and the support vectors.
#'
#' @param data A data frame consisting of the p-dimensional covariates Z, the current status indicator vector delta, and the censoring times C.
#' @param cost A numeric non-negative hyper-parameter. The cost parameter defines a trade-off between model fit and regularization, and should be fine-tuned for best results. Default value is cost=1. Note that cost=1/(n*lambda), where n is the sample size and lambda is the KM regularization parameter.
#' @param kernel A string indicating what type of kernel function should be used. Possible values are: "rbfdot", "polydot", "tanhdot", "vanilladot", "laplacedot", "besseldot", "anovadot", "splinedot". These correspond to \code{\link[kernlab]{dots}}. Default value is kernel="rbfdot" which corresponds to a Gaussian RBF kernel.
#' @param gamma A numeric non-negative kernel hyper-parameter; see \code{\link[kernlab]{dots}}. Note that in \code{\link[kernlab]{dots}} gamma is called sigma. We prefer gamma since it is the inverse of twice the RBF kernel width (and sigma is usually reserved for the kernel width).
#' @param scale A numeric non-negative kernel hyper-parameter; see \code{\link[kernlab]{dots}}.
#' @param offset A numeric kernel hyper-parameter; see \code{\link[kernlab]{dots}}.
#' @param degree A positive integer kernel hyper-parameter; see \code{\link[kernlab]{dots}}.
#' @param order A numeric kernel hyper-parameter; see \code{\link[kernlab]{dots}}.
#' @param alpha_cutoff A small positive numeric value indicating the cutoff for the support vectors. Default value is 0.00001. The support vectors are the covariates that correspond to the non-zero coefficients. Due to numerical errors, we assume that any coefficient with absolute value smaller than the cutoff is essentially zero.
#' @param g_unknown Logical - indicating whether the censoring distribution is unknown and needs to be estimated. Default is TRUE and then a kernel density estimate is used to estimate the censoring density g. If FALSE, then the censoring distribution is assumed to be U(0, tau), which only corresponds to the simulated examples in this package.
#' @param misspecification Logical - indicating whether the censoring distribution is misspecified. Default is FALSE. If TRUE, then the censoring distribution is estimated via tau\*Beta(0.9,0.9) instead of tau\*Beta(1,1)=U(0, tau), which corresponds to the distribution of the simulated examples in this package.
#' @return The function returns a list with the kernel machine fitted model. The list contains 4 items: (1) the n-dimensional vector of coefficients alpha, (2) the intercept b, (3) the kernel function used during training, and (4) the training covariates.
#' @examples
#' d <- exp_data(n=100)
#' data <- d[[1]]
#' sol <- KMforCSD(data)
#' @export
KMforCSD <- function(data,cost=1,kernel="rbfdot",gamma=1,scale=1,offset=1,degree=1,order=1, alpha_cutoff=0.00001,g_unknown=TRUE,misspecification=FALSE){
n <- nrow(data)
p <- ncol(data)-2
lambda <- 1/(n*cost)
delta <- data$delta
C <- data$C
cols_remove <- c("C","delta")
Z <- data[, !(colnames(data) %in% cols_remove)]
Z <- as.matrix(Z)
tau <- max(C)
## define dot product function
dotproduct <- switch(kernel,
"rbfdot" = kernlab::rbfdot(sigma=gamma),
"polydot" = kernlab::polydot(degree,scale,offset),
"tanhdot" = kernlab::tanhdot(scale,offset),
"vanilladot" = kernlab::vanilladot(),
"laplacedot" = kernlab::laplacedot(sigma = gamma),
"besseldot" = kernlab::besseldot(sigma = gamma, order, degree),
"anovadot" = kernlab::anovadot(sigma = gamma, degree),
"splinedot" = kernlab::splinedot()
)
## calculate kernel
K <- kernlab::kernelMatrix(dotproduct, Z)@.Data
Q <- K+n*lambda*diag(n)
vec1 <- matrix(1,n,1)
vec2 <- rbind(vec1,0)
A <- cbind(Q,vec1)
A <- rbind(A,t(vec2))
if (!isTRUE(misspecification)){
if (isTRUE(g_unknown)){
g_hat <- ks::kde(C,eval.points = C)$estimate
} else {
g_hat <- 1/tau
}
} else {
g_hat <- (1/tau)*stats::dbeta(C/tau,0.9,0.9)
}
V <- as.matrix((1-delta)/g_hat,n,1)
V <- rbind(V,0)
sol <- solve(A,V)
alpha <- sol[1:n]
b <- sol[n+1]
SVs_ind=which(abs(alpha)>alpha_cutoff,arr.ind = TRUE)
alpha=alpha[SVs_ind]
SVs=Z[SVs_ind,]
model = structure(list(alpha=alpha,b=b,used_kernel=dotproduct,support_vectors=SVs), class = "KMforCSD")
return(model)
}
#' Kernel Machine for Current Status Data - Prediction
#'
#' \code{predict.KMforCSD} returns the KM decision function, together with the test data (which can be equivalent to the training data).
#'
#' @param object A KMforCSD fitted model. This is the output of \code{\link{KMforCSD}}.
#' @param newdata A data frame consisting of the p-dimensional covariates Z, the current status indicator vector delta, and the censoring times C.
#' @return The function returns a data frame consisting of newdata, and the kernel machine predictions.
#' @examples
#' d <- exp_data(n=100) #training set
#' sol <- KMforCSD(data=d[[1]]) #training
#' d_test <- exp_data(n=50) #test set
#' new_data <- predict(sol,d_test[[1]]) #prediction
#' decision_function <- new_data$response
#' @export
predict.KMforCSD <- function(object,newdata,...){
n_new <- nrow(newdata)
p <- ncol(newdata)-2
cols_remove <- c("C","delta","T")
Z_new <- newdata[, !(colnames(newdata) %in% cols_remove)]
Z_new <- as.matrix(Z_new)
alpha <- object$alpha
b <- object$b
dotproduct <- object$used_kernel
SVs <- object$support_vectors
SVs <- as.matrix(SVs,length(SVs),p)
K_new <- kernlab::kernelMatrix(dotproduct,SVs ,Z_new)@.Data
prediction <- t(K_new)%*%alpha+b
return_data <- newdata
return_data$response <- prediction
return(return_data)
}
|
24134f18520d503b4fb2d5f673fa7ba23242b12b
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/R/elasticache_interfaces.R
|
62dffbc58b54efac3e756893c3342b271b788127
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 230,539
|
r
|
elasticache_interfaces.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include elasticache_service.R
NULL
.elasticache$add_tags_to_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceName = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$add_tags_to_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure", resultWrapper = "AddTagsToResourceResult"))
return(populate(args, shape))
}
.elasticache$authorize_cache_security_group_ingress_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupOwnerId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$authorize_cache_security_group_ingress_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroup = structure(list(OwnerId = structure(logical(0), tags = list(type = "string")), CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), EC2SecurityGroups = structure(list(structure(list(Status = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupOwnerId = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "EC2SecurityGroup", type = "structure"))), tags = list(locationNameList = "EC2SecurityGroup", type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "AuthorizeCacheSecurityGroupIngressResult"))
return(populate(args, shape))
}
.elasticache$batch_apply_update_action_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), CacheClusterIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ServiceUpdateName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$batch_apply_update_action_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProcessedUpdateActions = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), ServiceUpdateName = structure(logical(0), tags = list(type = "string")), UpdateActionStatus = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ProcessedUpdateAction", type = "structure"))), tags = list(locationNameList = "ProcessedUpdateAction", type = "list")), UnprocessedUpdateActions = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), ServiceUpdateName = structure(logical(0), tags = list(type = "string")), ErrorType = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "UnprocessedUpdateAction", type = "structure"))), tags = list(locationNameList = "UnprocessedUpdateAction", type = "list"))), tags = list(type = "structure", resultWrapper = "BatchApplyUpdateActionResult"))
return(populate(args, shape))
}
.elasticache$batch_stop_update_action_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), CacheClusterIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ServiceUpdateName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$batch_stop_update_action_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProcessedUpdateActions = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), ServiceUpdateName = structure(logical(0), tags = list(type = "string")), UpdateActionStatus = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ProcessedUpdateAction", type = "structure"))), tags = list(locationNameList = "ProcessedUpdateAction", type = "list")), UnprocessedUpdateActions = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), ServiceUpdateName = structure(logical(0), tags = list(type = "string")), ErrorType = structure(logical(0), tags = list(type = "string")), ErrorMessage = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "UnprocessedUpdateAction", type = "structure"))), tags = list(locationNameList = "UnprocessedUpdateAction", type = "list"))), tags = list(type = "structure", resultWrapper = "BatchStopUpdateActionResult"))
return(populate(args, shape))
}
.elasticache$complete_migration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Force = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$complete_migration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CompleteMigrationResult"))
return(populate(args, shape))
}
.elasticache$copy_snapshot_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SourceSnapshotName = structure(logical(0), tags = list(type = "string")), TargetSnapshotName = structure(logical(0), tags = list(type = "string")), TargetBucket = structure(logical(0), tags = list(type = "string")), KmsKeyId = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$copy_snapshot_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Snapshot = structure(list(SnapshotName = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), SnapshotStatus = structure(logical(0), tags = list(type = "string")), SnapshotSource = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), TopicArn = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), NumNodeGroups = structure(logical(0), tags = list(type = "integer")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), NodeSnapshots = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), NodeGroupId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), NodeGroupConfiguration = structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string")), ReplicaCount = structure(logical(0), tags = list(type = "integer")), PrimaryAvailabilityZone = structure(logical(0), tags = list(type = "string")), ReplicaAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list")), PrimaryOutpostArn = structure(logical(0), tags = list(type = "string")), ReplicaOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "OutpostArn", type = "string"))), tags = list(locationNameList = "OutpostArn", type = "list"))), tags = list(type = "structure")), CacheSize = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), SnapshotCreateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "NodeSnapshot", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "NodeSnapshot", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), DataTiering = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CopySnapshotResult"))
return(populate(args, shape))
}
.elasticache$create_cache_cluster_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), AZMode = structure(logical(0), tags = list(type = "string")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "PreferredAvailabilityZone", type = "string"))), tags = list(locationNameList = "PreferredAvailabilityZone", type = "list")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSecurityGroupNames = structure(list(structure(logical(0), tags = list(locationName = "CacheSecurityGroupName", type = "string"))), tags = list(locationNameList = "CacheSecurityGroupName", type = "list")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(locationName = "SecurityGroupId", type = "string"))), tags = list(locationNameList = "SecurityGroupId", type = "list")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list")), SnapshotArns = structure(list(structure(logical(0), tags = list(locationName = "SnapshotArn", type = "string"))), tags = list(locationNameList = "SnapshotArn", type = "list")), SnapshotName = structure(logical(0), tags = list(type = "string")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer")), NotificationTopicArn = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthToken = structure(logical(0), tags = list(type = "string")), OutpostMode = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), PreferredOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "PreferredOutpostArn", type = "string"))), tags = list(locationNameList = "PreferredOutpostArn", type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Enabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(locationName = "LogDeliveryConfigurationRequest", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfigurationRequest", type = "list")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_cache_cluster_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheCluster = structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ClientDownloadLandingPage = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheClusterStatus = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeIdsToRemove = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NotificationConfiguration = structure(list(TopicArn = structure(logical(0), tags = list(type = "string")), TopicStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CacheSecurityGroups = structure(list(structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheSecurityGroup", type = "structure"))), tags = list(locationNameList = "CacheSecurityGroup", type = "list")), CacheParameterGroup = structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ParameterApplyStatus = structure(logical(0), tags = list(type = "string")), CacheNodeIdsToReboot = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list"))), tags = list(type = "structure")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheNodes = structure(list(structure(list(CacheNodeId = structure(logical(0), tags = list(type = "string")), CacheNodeStatus = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), Endpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ParameterGroupStatus = structure(logical(0), tags = list(type = "string")), SourceCacheNodeId = structure(logical(0), tags = list(type = "string")), CustomerAvailabilityZone = structure(logical(0), tags = list(type = "string")), CustomerOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNode", type = "structure"))), tags = list(locationNameList = "CacheNode", type = "list")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SecurityGroups = structure(list(structure(list(SecurityGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string")), ReplicationGroupLogDeliveryEnabled = structure(logical(0), tags = list(type = "boolean")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateCacheClusterResult"))
return(populate(args, shape))
}
.elasticache$create_cache_parameter_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_cache_parameter_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroup = structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), IsGlobal = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateCacheParameterGroupResult"))
return(populate(args, shape))
}
.elasticache$create_cache_security_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_cache_security_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroup = structure(list(OwnerId = structure(logical(0), tags = list(type = "string")), CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), EC2SecurityGroups = structure(list(structure(list(Status = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupOwnerId = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "EC2SecurityGroup", type = "structure"))), tags = list(locationNameList = "EC2SecurityGroup", type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateCacheSecurityGroupResult"))
return(populate(args, shape))
}
.elasticache$create_cache_subnet_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupDescription = structure(logical(0), tags = list(type = "string")), SubnetIds = structure(list(structure(logical(0), tags = list(locationName = "SubnetIdentifier", type = "string"))), tags = list(locationNameList = "SubnetIdentifier", type = "list")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_cache_subnet_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSubnetGroup = structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupDescription = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), Subnets = structure(list(structure(list(SubnetIdentifier = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE)), SubnetOutpost = structure(list(SubnetOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SupportedNetworkTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(locationName = "Subnet", type = "structure"))), tags = list(locationNameList = "Subnet", type = "list")), ARN = structure(logical(0), tags = list(type = "string")), SupportedNetworkTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateCacheSubnetGroupResult"))
return(populate(args, shape))
}
.elasticache$create_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupIdSuffix = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), PrimaryReplicationGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$create_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverEnabled = structure(logical(0), tags = list(type = "boolean")), MultiAZEnabled = structure(logical(0), tags = list(type = "boolean")), NumCacheClusters = structure(logical(0), tags = list(type = "integer")), PreferredCacheClusterAZs = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list")), NumNodeGroups = structure(logical(0), tags = list(type = "integer")), ReplicasPerNodeGroup = structure(logical(0), tags = list(type = "integer")), NodeGroupConfiguration = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string")), ReplicaCount = structure(logical(0), tags = list(type = "integer")), PrimaryAvailabilityZone = structure(logical(0), tags = list(type = "string")), ReplicaAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list")), PrimaryOutpostArn = structure(logical(0), tags = list(type = "string")), ReplicaOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "OutpostArn", type = "string"))), tags = list(locationNameList = "OutpostArn", type = "list"))), tags = list(locationName = "NodeGroupConfiguration", type = "structure"))), tags = list(locationNameList = "NodeGroupConfiguration", type = "list")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSecurityGroupNames = structure(list(structure(logical(0), tags = list(locationName = "CacheSecurityGroupName", type = "string"))), tags = list(locationNameList = "CacheSecurityGroupName", type = "list")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(locationName = "SecurityGroupId", type = "string"))), tags = list(locationNameList = "SecurityGroupId", type = "list")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list")), SnapshotArns = structure(list(structure(logical(0), tags = list(locationName = "SnapshotArn", type = "string"))), tags = list(locationNameList = "SnapshotArn", type = "list")), SnapshotName = structure(logical(0), tags = list(type = "string")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer")), NotificationTopicArn = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthToken = structure(logical(0), tags = list(type = "string")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), KmsKeyId = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Enabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(locationName = "LogDeliveryConfigurationRequest", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfigurationRequest", type = "list")), DataTieringEnabled = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$create_snapshot_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), SnapshotName = structure(logical(0), tags = list(type = "string")), KmsKeyId = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_snapshot_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Snapshot = structure(list(SnapshotName = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), SnapshotStatus = structure(logical(0), tags = list(type = "string")), SnapshotSource = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), TopicArn = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), NumNodeGroups = structure(logical(0), tags = list(type = "integer")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), NodeSnapshots = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), NodeGroupId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), NodeGroupConfiguration = structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string")), ReplicaCount = structure(logical(0), tags = list(type = "integer")), PrimaryAvailabilityZone = structure(logical(0), tags = list(type = "string")), ReplicaAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list")), PrimaryOutpostArn = structure(logical(0), tags = list(type = "string")), ReplicaOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "OutpostArn", type = "string"))), tags = list(locationNameList = "OutpostArn", type = "list"))), tags = list(type = "structure")), CacheSize = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), SnapshotCreateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "NodeSnapshot", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "NodeSnapshot", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), DataTiering = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "CreateSnapshotResult"))
return(populate(args, shape))
}
.elasticache$create_user_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserId = structure(logical(0), tags = list(type = "string")), UserName = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), Passwords = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AccessString = structure(logical(0), tags = list(type = "string")), NoPasswordRequired = structure(logical(0), tags = list(type = "boolean")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list")), AuthenticationMode = structure(list(Type = structure(logical(0), tags = list(type = "string")), Passwords = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_user_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserId = structure(logical(0), tags = list(type = "string")), UserName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), AccessString = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Authentication = structure(list(Type = structure(logical(0), tags = list(type = "string")), PasswordCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "CreateUserResult"))
return(populate(args, shape))
}
.elasticache$create_user_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), UserIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$create_user_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), UserIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), PendingChanges = structure(list(UserIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), ReplicationGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "CreateUserGroupResult"))
return(populate(args, shape))
}
.elasticache$decrease_node_groups_in_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), NodeGroupCount = structure(logical(0), tags = list(type = "integer")), GlobalNodeGroupsToRemove = structure(list(structure(logical(0), tags = list(locationName = "GlobalNodeGroupId", type = "string"))), tags = list(locationNameList = "GlobalNodeGroupId", type = "list")), GlobalNodeGroupsToRetain = structure(list(structure(logical(0), tags = list(locationName = "GlobalNodeGroupId", type = "string"))), tags = list(locationNameList = "GlobalNodeGroupId", type = "list")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$decrease_node_groups_in_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DecreaseNodeGroupsInGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$decrease_replica_count_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), NewReplicaCount = structure(logical(0), tags = list(type = "integer")), ReplicaConfiguration = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), NewReplicaCount = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "PreferredAvailabilityZone", type = "string"))), tags = list(locationNameList = "PreferredAvailabilityZone", type = "list")), PreferredOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "PreferredOutpostArn", type = "string"))), tags = list(locationNameList = "PreferredOutpostArn", type = "list"))), tags = list(locationName = "ConfigureShard", type = "structure"))), tags = list(locationNameList = "ConfigureShard", type = "list")), ReplicasToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$decrease_replica_count_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DecreaseReplicaCountResult"))
return(populate(args, shape))
}
.elasticache$delete_cache_cluster_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), FinalSnapshotIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_cache_cluster_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheCluster = structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ClientDownloadLandingPage = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheClusterStatus = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeIdsToRemove = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NotificationConfiguration = structure(list(TopicArn = structure(logical(0), tags = list(type = "string")), TopicStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CacheSecurityGroups = structure(list(structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheSecurityGroup", type = "structure"))), tags = list(locationNameList = "CacheSecurityGroup", type = "list")), CacheParameterGroup = structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ParameterApplyStatus = structure(logical(0), tags = list(type = "string")), CacheNodeIdsToReboot = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list"))), tags = list(type = "structure")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheNodes = structure(list(structure(list(CacheNodeId = structure(logical(0), tags = list(type = "string")), CacheNodeStatus = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), Endpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ParameterGroupStatus = structure(logical(0), tags = list(type = "string")), SourceCacheNodeId = structure(logical(0), tags = list(type = "string")), CustomerAvailabilityZone = structure(logical(0), tags = list(type = "string")), CustomerOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNode", type = "structure"))), tags = list(locationNameList = "CacheNode", type = "list")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SecurityGroups = structure(list(structure(list(SecurityGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string")), ReplicationGroupLogDeliveryEnabled = structure(logical(0), tags = list(type = "boolean")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DeleteCacheClusterResult"))
return(populate(args, shape))
}
.elasticache$delete_cache_parameter_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_cache_parameter_group_output <- function(...) {
list()
}
.elasticache$delete_cache_security_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_cache_security_group_output <- function(...) {
list()
}
.elasticache$delete_cache_subnet_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_cache_subnet_group_output <- function(...) {
list()
}
.elasticache$delete_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), RetainPrimaryReplicationGroup = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DeleteGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$delete_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), RetainPrimaryCluster = structure(logical(0), tags = list(type = "boolean")), FinalSnapshotIdentifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DeleteReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$delete_snapshot_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnapshotName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_snapshot_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Snapshot = structure(list(SnapshotName = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), SnapshotStatus = structure(logical(0), tags = list(type = "string")), SnapshotSource = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), TopicArn = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), NumNodeGroups = structure(logical(0), tags = list(type = "integer")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), NodeSnapshots = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), NodeGroupId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), NodeGroupConfiguration = structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string")), ReplicaCount = structure(logical(0), tags = list(type = "integer")), PrimaryAvailabilityZone = structure(logical(0), tags = list(type = "string")), ReplicaAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list")), PrimaryOutpostArn = structure(logical(0), tags = list(type = "string")), ReplicaOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "OutpostArn", type = "string"))), tags = list(locationNameList = "OutpostArn", type = "list"))), tags = list(type = "structure")), CacheSize = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), SnapshotCreateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "NodeSnapshot", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "NodeSnapshot", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), DataTiering = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DeleteSnapshotResult"))
return(populate(args, shape))
}
.elasticache$delete_user_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_user_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserId = structure(logical(0), tags = list(type = "string")), UserName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), AccessString = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Authentication = structure(list(Type = structure(logical(0), tags = list(type = "string")), PasswordCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "DeleteUserResult"))
return(populate(args, shape))
}
.elasticache$delete_user_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$delete_user_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), UserIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), PendingChanges = structure(list(UserIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), ReplicationGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "DeleteUserGroupResult"))
return(populate(args, shape))
}
.elasticache$describe_cache_clusters_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string")), ShowCacheNodeInfo = structure(logical(0), tags = list(type = "boolean")), ShowCacheClustersNotInReplicationGroups = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_cache_clusters_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), CacheClusters = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ClientDownloadLandingPage = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheClusterStatus = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeIdsToRemove = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NotificationConfiguration = structure(list(TopicArn = structure(logical(0), tags = list(type = "string")), TopicStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CacheSecurityGroups = structure(list(structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheSecurityGroup", type = "structure"))), tags = list(locationNameList = "CacheSecurityGroup", type = "list")), CacheParameterGroup = structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ParameterApplyStatus = structure(logical(0), tags = list(type = "string")), CacheNodeIdsToReboot = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list"))), tags = list(type = "structure")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheNodes = structure(list(structure(list(CacheNodeId = structure(logical(0), tags = list(type = "string")), CacheNodeStatus = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), Endpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ParameterGroupStatus = structure(logical(0), tags = list(type = "string")), SourceCacheNodeId = structure(logical(0), tags = list(type = "string")), CustomerAvailabilityZone = structure(logical(0), tags = list(type = "string")), CustomerOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNode", type = "structure"))), tags = list(locationNameList = "CacheNode", type = "list")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SecurityGroups = structure(list(structure(list(SecurityGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string")), ReplicationGroupLogDeliveryEnabled = structure(logical(0), tags = list(type = "boolean")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheCluster", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "CacheCluster", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeCacheClustersResult"))
return(populate(args, shape))
}
.elasticache$describe_cache_engine_versions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string")), DefaultOnly = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_cache_engine_versions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), CacheEngineVersions = structure(list(structure(list(Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), CacheEngineDescription = structure(logical(0), tags = list(type = "string")), CacheEngineVersionDescription = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheEngineVersion", type = "structure"))), tags = list(locationNameList = "CacheEngineVersion", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeCacheEngineVersionsResult"))
return(populate(args, shape))
}
.elasticache$describe_cache_parameter_groups_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_cache_parameter_groups_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), CacheParameterGroups = structure(list(structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), IsGlobal = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheParameterGroup", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "CacheParameterGroup", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeCacheParameterGroupsResult"))
return(populate(args, shape))
}
.elasticache$describe_cache_parameters_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), Source = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_cache_parameters_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(list(ParameterName = structure(logical(0), tags = list(type = "string")), ParameterValue = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Source = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string")), AllowedValues = structure(logical(0), tags = list(type = "string")), IsModifiable = structure(logical(0), tags = list(type = "boolean")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), ChangeType = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Parameter", type = "structure"))), tags = list(locationNameList = "Parameter", type = "list")), CacheNodeTypeSpecificParameters = structure(list(structure(list(ParameterName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Source = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string")), AllowedValues = structure(logical(0), tags = list(type = "string")), IsModifiable = structure(logical(0), tags = list(type = "boolean")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeTypeSpecificValues = structure(list(structure(list(CacheNodeType = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNodeTypeSpecificValue", type = "structure"))), tags = list(locationNameList = "CacheNodeTypeSpecificValue", type = "list")), ChangeType = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNodeTypeSpecificParameter", type = "structure"))), tags = list(locationNameList = "CacheNodeTypeSpecificParameter", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeCacheParametersResult"))
return(populate(args, shape))
}
.elasticache$describe_cache_security_groups_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_cache_security_groups_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), CacheSecurityGroups = structure(list(structure(list(OwnerId = structure(logical(0), tags = list(type = "string")), CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), EC2SecurityGroups = structure(list(structure(list(Status = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupOwnerId = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "EC2SecurityGroup", type = "structure"))), tags = list(locationNameList = "EC2SecurityGroup", type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheSecurityGroup", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "CacheSecurityGroup", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeCacheSecurityGroupsResult"))
return(populate(args, shape))
}
.elasticache$describe_cache_subnet_groups_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_cache_subnet_groups_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), CacheSubnetGroups = structure(list(structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupDescription = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), Subnets = structure(list(structure(list(SubnetIdentifier = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE)), SubnetOutpost = structure(list(SubnetOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SupportedNetworkTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(locationName = "Subnet", type = "structure"))), tags = list(locationNameList = "Subnet", type = "list")), ARN = structure(logical(0), tags = list(type = "string")), SupportedNetworkTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(locationName = "CacheSubnetGroup", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "CacheSubnetGroup", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeCacheSubnetGroupsResult"))
return(populate(args, shape))
}
.elasticache$describe_engine_default_parameters_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_engine_default_parameters_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(EngineDefaults = structure(list(CacheParameterGroupFamily = structure(logical(0), tags = list(type = "string")), Marker = structure(logical(0), tags = list(type = "string")), Parameters = structure(list(structure(list(ParameterName = structure(logical(0), tags = list(type = "string")), ParameterValue = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Source = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string")), AllowedValues = structure(logical(0), tags = list(type = "string")), IsModifiable = structure(logical(0), tags = list(type = "boolean")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), ChangeType = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Parameter", type = "structure"))), tags = list(locationNameList = "Parameter", type = "list")), CacheNodeTypeSpecificParameters = structure(list(structure(list(ParameterName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Source = structure(logical(0), tags = list(type = "string")), DataType = structure(logical(0), tags = list(type = "string")), AllowedValues = structure(logical(0), tags = list(type = "string")), IsModifiable = structure(logical(0), tags = list(type = "boolean")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeTypeSpecificValues = structure(list(structure(list(CacheNodeType = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNodeTypeSpecificValue", type = "structure"))), tags = list(locationNameList = "CacheNodeTypeSpecificValue", type = "list")), ChangeType = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNodeTypeSpecificParameter", type = "structure"))), tags = list(locationNameList = "CacheNodeTypeSpecificParameter", type = "list"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DescribeEngineDefaultParametersResult"))
return(populate(args, shape))
}
.elasticache$describe_events_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SourceIdentifier = structure(logical(0), tags = list(type = "string")), SourceType = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp")), EndTime = structure(logical(0), tags = list(type = "timestamp")), Duration = structure(logical(0), tags = list(type = "integer")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_events_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), Events = structure(list(structure(list(SourceIdentifier = structure(logical(0), tags = list(type = "string")), SourceType = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string")), Date = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "Event", type = "structure"))), tags = list(locationNameList = "Event", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeEventsResult"))
return(populate(args, shape))
}
.elasticache$describe_global_replication_groups_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string")), ShowMemberInfo = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_global_replication_groups_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroups = structure(list(structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroup", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroup", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeGlobalReplicationGroupsResult"))
return(populate(args, shape))
}
.elasticache$describe_replication_groups_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_replication_groups_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), ReplicationGroups = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ReplicationGroup", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "ReplicationGroup", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeReplicationGroupsResult"))
return(populate(args, shape))
}
.elasticache$describe_reserved_cache_nodes_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReservedCacheNodeId = structure(logical(0), tags = list(type = "string")), ReservedCacheNodesOfferingId = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Duration = structure(logical(0), tags = list(type = "string")), ProductDescription = structure(logical(0), tags = list(type = "string")), OfferingType = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_reserved_cache_nodes_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), ReservedCacheNodes = structure(list(structure(list(ReservedCacheNodeId = structure(logical(0), tags = list(type = "string")), ReservedCacheNodesOfferingId = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp")), Duration = structure(logical(0), tags = list(type = "integer")), FixedPrice = structure(logical(0), tags = list(type = "double")), UsagePrice = structure(logical(0), tags = list(type = "double")), CacheNodeCount = structure(logical(0), tags = list(type = "integer")), ProductDescription = structure(logical(0), tags = list(type = "string")), OfferingType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), RecurringCharges = structure(list(structure(list(RecurringChargeAmount = structure(logical(0), tags = list(type = "double")), RecurringChargeFrequency = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "RecurringCharge", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "RecurringCharge", type = "list")), ReservationARN = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ReservedCacheNode", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "ReservedCacheNode", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeReservedCacheNodesResult"))
return(populate(args, shape))
}
.elasticache$describe_reserved_cache_nodes_offerings_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReservedCacheNodesOfferingId = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Duration = structure(logical(0), tags = list(type = "string")), ProductDescription = structure(logical(0), tags = list(type = "string")), OfferingType = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_reserved_cache_nodes_offerings_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), ReservedCacheNodesOfferings = structure(list(structure(list(ReservedCacheNodesOfferingId = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Duration = structure(logical(0), tags = list(type = "integer")), FixedPrice = structure(logical(0), tags = list(type = "double")), UsagePrice = structure(logical(0), tags = list(type = "double")), ProductDescription = structure(logical(0), tags = list(type = "string")), OfferingType = structure(logical(0), tags = list(type = "string")), RecurringCharges = structure(list(structure(list(RecurringChargeAmount = structure(logical(0), tags = list(type = "double")), RecurringChargeFrequency = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "RecurringCharge", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "RecurringCharge", type = "list"))), tags = list(locationName = "ReservedCacheNodesOffering", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "ReservedCacheNodesOffering", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeReservedCacheNodesOfferingsResult"))
return(populate(args, shape))
}
.elasticache$describe_service_updates_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ServiceUpdateName = structure(logical(0), tags = list(type = "string")), ServiceUpdateStatus = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_service_updates_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), ServiceUpdates = structure(list(structure(list(ServiceUpdateName = structure(logical(0), tags = list(type = "string")), ServiceUpdateReleaseDate = structure(logical(0), tags = list(type = "timestamp")), ServiceUpdateEndDate = structure(logical(0), tags = list(type = "timestamp")), ServiceUpdateSeverity = structure(logical(0), tags = list(type = "string")), ServiceUpdateRecommendedApplyByDate = structure(logical(0), tags = list(type = "timestamp")), ServiceUpdateStatus = structure(logical(0), tags = list(type = "string")), ServiceUpdateDescription = structure(logical(0), tags = list(type = "string")), ServiceUpdateType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), AutoUpdateAfterRecommendedApplyByDate = structure(logical(0), tags = list(type = "boolean")), EstimatedUpdateTime = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ServiceUpdate", type = "structure"))), tags = list(locationNameList = "ServiceUpdate", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeServiceUpdatesResult"))
return(populate(args, shape))
}
.elasticache$describe_snapshots_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), SnapshotName = structure(logical(0), tags = list(type = "string")), SnapshotSource = structure(logical(0), tags = list(type = "string")), Marker = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), ShowNodeGroupConfig = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_snapshots_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), Snapshots = structure(list(structure(list(SnapshotName = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), SnapshotStatus = structure(logical(0), tags = list(type = "string")), SnapshotSource = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), TopicArn = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), NumNodeGroups = structure(logical(0), tags = list(type = "integer")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), NodeSnapshots = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), NodeGroupId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), NodeGroupConfiguration = structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string")), ReplicaCount = structure(logical(0), tags = list(type = "integer")), PrimaryAvailabilityZone = structure(logical(0), tags = list(type = "string")), ReplicaAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list")), PrimaryOutpostArn = structure(logical(0), tags = list(type = "string")), ReplicaOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "OutpostArn", type = "string"))), tags = list(locationNameList = "OutpostArn", type = "list"))), tags = list(type = "structure")), CacheSize = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), SnapshotCreateTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "NodeSnapshot", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "NodeSnapshot", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), DataTiering = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Snapshot", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "Snapshot", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeSnapshotsResult"))
return(populate(args, shape))
}
.elasticache$describe_update_actions_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ServiceUpdateName = structure(logical(0), tags = list(type = "string")), ReplicationGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), CacheClusterIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Engine = structure(logical(0), tags = list(type = "string")), ServiceUpdateStatus = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ServiceUpdateTimeRange = structure(list(StartTime = structure(logical(0), tags = list(type = "timestamp")), EndTime = structure(logical(0), tags = list(type = "timestamp"))), tags = list(type = "structure")), UpdateActionStatus = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ShowNodeLevelUpdateStatus = structure(logical(0), tags = list(type = "boolean")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_update_actions_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(type = "string")), UpdateActions = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CacheClusterId = structure(logical(0), tags = list(type = "string")), ServiceUpdateName = structure(logical(0), tags = list(type = "string")), ServiceUpdateReleaseDate = structure(logical(0), tags = list(type = "timestamp")), ServiceUpdateSeverity = structure(logical(0), tags = list(type = "string")), ServiceUpdateStatus = structure(logical(0), tags = list(type = "string")), ServiceUpdateRecommendedApplyByDate = structure(logical(0), tags = list(type = "timestamp")), ServiceUpdateType = structure(logical(0), tags = list(type = "string")), UpdateActionAvailableDate = structure(logical(0), tags = list(type = "timestamp")), UpdateActionStatus = structure(logical(0), tags = list(type = "string")), NodesUpdated = structure(logical(0), tags = list(type = "string")), UpdateActionStatusModifiedDate = structure(logical(0), tags = list(type = "timestamp")), SlaMet = structure(logical(0), tags = list(type = "string")), NodeGroupUpdateStatus = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), NodeGroupMemberUpdateStatus = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), NodeUpdateStatus = structure(logical(0), tags = list(type = "string")), NodeDeletionDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateStartDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateEndDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateInitiatedBy = structure(logical(0), tags = list(type = "string")), NodeUpdateInitiatedDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateStatusModifiedDate = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "NodeGroupMemberUpdateStatus", type = "structure"))), tags = list(locationNameList = "NodeGroupMemberUpdateStatus", type = "list"))), tags = list(locationName = "NodeGroupUpdateStatus", type = "structure"))), tags = list(locationNameList = "NodeGroupUpdateStatus", type = "list")), CacheNodeUpdateStatus = structure(list(structure(list(CacheNodeId = structure(logical(0), tags = list(type = "string")), NodeUpdateStatus = structure(logical(0), tags = list(type = "string")), NodeDeletionDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateStartDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateEndDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateInitiatedBy = structure(logical(0), tags = list(type = "string")), NodeUpdateInitiatedDate = structure(logical(0), tags = list(type = "timestamp")), NodeUpdateStatusModifiedDate = structure(logical(0), tags = list(type = "timestamp"))), tags = list(locationName = "CacheNodeUpdateStatus", type = "structure"))), tags = list(locationNameList = "CacheNodeUpdateStatus", type = "list")), EstimatedUpdateTime = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "UpdateAction", type = "structure"))), tags = list(locationNameList = "UpdateAction", type = "list"))), tags = list(type = "structure", resultWrapper = "DescribeUpdateActionsResult"))
return(populate(args, shape))
}
.elasticache$describe_user_groups_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_user_groups_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroups = structure(list(structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), UserIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), PendingChanges = structure(list(UserIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), ReplicationGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "DescribeUserGroupsResult"))
return(populate(args, shape))
}
.elasticache$describe_users_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Engine = structure(logical(0), tags = list(type = "string")), UserId = structure(logical(0), tags = list(type = "string")), Filters = structure(list(structure(list(Name = structure(logical(0), tags = list(type = "string")), Values = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), MaxRecords = structure(logical(0), tags = list(type = "integer")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$describe_users_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Users = structure(list(structure(list(UserId = structure(logical(0), tags = list(type = "string")), UserName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), AccessString = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Authentication = structure(list(Type = structure(logical(0), tags = list(type = "string")), PasswordCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), Marker = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "DescribeUsersResult"))
return(populate(args, shape))
}
.elasticache$disassociate_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$disassociate_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "DisassociateGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$failover_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), PrimaryRegion = structure(logical(0), tags = list(type = "string")), PrimaryReplicationGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$failover_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "FailoverGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$increase_node_groups_in_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), NodeGroupCount = structure(logical(0), tags = list(type = "integer")), RegionalConfigurations = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), ReshardingConfiguration = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), PreferredAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list"))), tags = list(locationName = "ReshardingConfiguration", type = "structure"))), tags = list(locationNameList = "ReshardingConfiguration", type = "list"))), tags = list(locationName = "RegionalConfiguration", type = "structure"))), tags = list(locationNameList = "RegionalConfiguration", type = "list")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$increase_node_groups_in_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "IncreaseNodeGroupsInGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$increase_replica_count_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), NewReplicaCount = structure(logical(0), tags = list(type = "integer")), ReplicaConfiguration = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), NewReplicaCount = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "PreferredAvailabilityZone", type = "string"))), tags = list(locationNameList = "PreferredAvailabilityZone", type = "list")), PreferredOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "PreferredOutpostArn", type = "string"))), tags = list(locationNameList = "PreferredOutpostArn", type = "list"))), tags = list(locationName = "ConfigureShard", type = "structure"))), tags = list(locationNameList = "ConfigureShard", type = "list")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$increase_replica_count_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "IncreaseReplicaCountResult"))
return(populate(args, shape))
}
.elasticache$list_allowed_node_type_modifications_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ReplicationGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$list_allowed_node_type_modifications_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ScaleUpModifications = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ScaleDownModifications = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", resultWrapper = "ListAllowedNodeTypeModificationsResult"))
return(populate(args, shape))
}
.elasticache$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure", resultWrapper = "ListTagsForResourceResult"))
return(populate(args, shape))
}
.elasticache$modify_cache_cluster_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeIdsToRemove = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list")), AZMode = structure(logical(0), tags = list(type = "string")), NewAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "PreferredAvailabilityZone", type = "string"))), tags = list(locationNameList = "PreferredAvailabilityZone", type = "list")), CacheSecurityGroupNames = structure(list(structure(logical(0), tags = list(locationName = "CacheSecurityGroupName", type = "string"))), tags = list(locationNameList = "CacheSecurityGroupName", type = "list")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(locationName = "SecurityGroupId", type = "string"))), tags = list(locationNameList = "SecurityGroupId", type = "list")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), NotificationTopicArn = structure(logical(0), tags = list(type = "string")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), NotificationTopicStatus = structure(logical(0), tags = list(type = "string")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean")), EngineVersion = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthToken = structure(logical(0), tags = list(type = "string")), AuthTokenUpdateStrategy = structure(logical(0), tags = list(type = "string")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Enabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(locationName = "LogDeliveryConfigurationRequest", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfigurationRequest", type = "list")), IpDiscovery = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_cache_cluster_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheCluster = structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ClientDownloadLandingPage = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheClusterStatus = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeIdsToRemove = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NotificationConfiguration = structure(list(TopicArn = structure(logical(0), tags = list(type = "string")), TopicStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CacheSecurityGroups = structure(list(structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheSecurityGroup", type = "structure"))), tags = list(locationNameList = "CacheSecurityGroup", type = "list")), CacheParameterGroup = structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ParameterApplyStatus = structure(logical(0), tags = list(type = "string")), CacheNodeIdsToReboot = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list"))), tags = list(type = "structure")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheNodes = structure(list(structure(list(CacheNodeId = structure(logical(0), tags = list(type = "string")), CacheNodeStatus = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), Endpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ParameterGroupStatus = structure(logical(0), tags = list(type = "string")), SourceCacheNodeId = structure(logical(0), tags = list(type = "string")), CustomerAvailabilityZone = structure(logical(0), tags = list(type = "string")), CustomerOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNode", type = "structure"))), tags = list(locationNameList = "CacheNode", type = "list")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SecurityGroups = structure(list(structure(list(SecurityGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string")), ReplicationGroupLogDeliveryEnabled = structure(logical(0), tags = list(type = "boolean")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "ModifyCacheClusterResult"))
return(populate(args, shape))
}
.elasticache$modify_cache_parameter_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ParameterNameValues = structure(list(structure(list(ParameterName = structure(logical(0), tags = list(type = "string")), ParameterValue = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ParameterNameValue", type = "structure"))), tags = list(locationNameList = "ParameterNameValue", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_cache_parameter_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "ModifyCacheParameterGroupResult"))
return(populate(args, shape))
}
.elasticache$modify_cache_subnet_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupDescription = structure(logical(0), tags = list(type = "string")), SubnetIds = structure(list(structure(logical(0), tags = list(locationName = "SubnetIdentifier", type = "string"))), tags = list(locationNameList = "SubnetIdentifier", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_cache_subnet_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSubnetGroup = structure(list(CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheSubnetGroupDescription = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), Subnets = structure(list(structure(list(SubnetIdentifier = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(list(Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE)), SubnetOutpost = structure(list(SubnetOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SupportedNetworkTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(locationName = "Subnet", type = "structure"))), tags = list(locationNameList = "Subnet", type = "list")), ARN = structure(logical(0), tags = list(type = "string")), SupportedNetworkTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "ModifyCacheSubnetGroupResult"))
return(populate(args, shape))
}
.elasticache$modify_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), AutomaticFailoverEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "ModifyGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$modify_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), PrimaryClusterId = structure(logical(0), tags = list(type = "string")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverEnabled = structure(logical(0), tags = list(type = "boolean")), MultiAZEnabled = structure(logical(0), tags = list(type = "boolean")), NodeGroupId = structure(logical(0), tags = list(deprecated = TRUE, type = "string")), CacheSecurityGroupNames = structure(list(structure(logical(0), tags = list(locationName = "CacheSecurityGroupName", type = "string"))), tags = list(locationNameList = "CacheSecurityGroupName", type = "list")), SecurityGroupIds = structure(list(structure(logical(0), tags = list(locationName = "SecurityGroupId", type = "string"))), tags = list(locationNameList = "SecurityGroupId", type = "list")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), NotificationTopicArn = structure(logical(0), tags = list(type = "string")), CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), NotificationTopicStatus = structure(logical(0), tags = list(type = "string")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean")), EngineVersion = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthToken = structure(logical(0), tags = list(type = "string")), AuthTokenUpdateStrategy = structure(logical(0), tags = list(type = "string")), UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RemoveUserGroups = structure(logical(0), tags = list(type = "boolean")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Enabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(locationName = "LogDeliveryConfigurationRequest", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfigurationRequest", type = "list")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "ModifyReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$modify_replication_group_shard_configuration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), NodeGroupCount = structure(logical(0), tags = list(type = "integer")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean")), ReshardingConfiguration = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), PreferredAvailabilityZones = structure(list(structure(logical(0), tags = list(locationName = "AvailabilityZone", type = "string"))), tags = list(locationNameList = "AvailabilityZone", type = "list"))), tags = list(locationName = "ReshardingConfiguration", type = "structure"))), tags = list(locationNameList = "ReshardingConfiguration", type = "list")), NodeGroupsToRemove = structure(list(structure(logical(0), tags = list(locationName = "NodeGroupToRemove", type = "string"))), tags = list(locationNameList = "NodeGroupToRemove", type = "list")), NodeGroupsToRetain = structure(list(structure(logical(0), tags = list(locationName = "NodeGroupToRetain", type = "string"))), tags = list(locationNameList = "NodeGroupToRetain", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_replication_group_shard_configuration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "ModifyReplicationGroupShardConfigurationResult"))
return(populate(args, shape))
}
.elasticache$modify_user_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserId = structure(logical(0), tags = list(type = "string")), AccessString = structure(logical(0), tags = list(type = "string")), AppendAccessString = structure(logical(0), tags = list(type = "string")), Passwords = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NoPasswordRequired = structure(logical(0), tags = list(type = "boolean")), AuthenticationMode = structure(list(Type = structure(logical(0), tags = list(type = "string")), Passwords = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_user_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserId = structure(logical(0), tags = list(type = "string")), UserName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), AccessString = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), Authentication = structure(list(Type = structure(logical(0), tags = list(type = "string")), PasswordCount = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "ModifyUserResult"))
return(populate(args, shape))
}
.elasticache$modify_user_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), UserIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$modify_user_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(UserGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), UserIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), MinimumEngineVersion = structure(logical(0), tags = list(type = "string")), PendingChanges = structure(list(UserIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), ReplicationGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "ModifyUserGroupResult"))
return(populate(args, shape))
}
.elasticache$purchase_reserved_cache_nodes_offering_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReservedCacheNodesOfferingId = structure(logical(0), tags = list(type = "string")), ReservedCacheNodeId = structure(logical(0), tags = list(type = "string")), CacheNodeCount = structure(logical(0), tags = list(type = "integer")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$purchase_reserved_cache_nodes_offering_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReservedCacheNode = structure(list(ReservedCacheNodeId = structure(logical(0), tags = list(type = "string")), ReservedCacheNodesOfferingId = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp")), Duration = structure(logical(0), tags = list(type = "integer")), FixedPrice = structure(logical(0), tags = list(type = "double")), UsagePrice = structure(logical(0), tags = list(type = "double")), CacheNodeCount = structure(logical(0), tags = list(type = "integer")), ProductDescription = structure(logical(0), tags = list(type = "string")), OfferingType = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string")), RecurringCharges = structure(list(structure(list(RecurringChargeAmount = structure(logical(0), tags = list(type = "double")), RecurringChargeFrequency = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "RecurringCharge", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "RecurringCharge", type = "list")), ReservationARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "PurchaseReservedCacheNodesOfferingResult"))
return(populate(args, shape))
}
.elasticache$rebalance_slots_in_global_replication_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), ApplyImmediately = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$rebalance_slots_in_global_replication_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalReplicationGroup = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupDescription = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), Members = structure(list(structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), ReplicationGroupRegion = structure(logical(0), tags = list(type = "string")), Role = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalReplicationGroupMember", type = "structure", wrapper = TRUE))), tags = list(locationNameList = "GlobalReplicationGroupMember", type = "list")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), GlobalNodeGroups = structure(list(structure(list(GlobalNodeGroupId = structure(logical(0), tags = list(type = "string")), Slots = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "GlobalNodeGroup", type = "structure"))), tags = list(locationNameList = "GlobalNodeGroup", type = "list")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "RebalanceSlotsInGlobalReplicationGroupResult"))
return(populate(args, shape))
}
.elasticache$reboot_cache_cluster_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeIdsToReboot = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$reboot_cache_cluster_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheCluster = structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ClientDownloadLandingPage = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), Engine = structure(logical(0), tags = list(type = "string")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheClusterStatus = structure(logical(0), tags = list(type = "string")), NumCacheNodes = structure(logical(0), tags = list(type = "integer")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CacheClusterCreateTime = structure(logical(0), tags = list(type = "timestamp")), PreferredMaintenanceWindow = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(NumCacheNodes = structure(logical(0), tags = list(type = "integer")), CacheNodeIdsToRemove = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list")), EngineVersion = structure(logical(0), tags = list(type = "string")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NotificationConfiguration = structure(list(TopicArn = structure(logical(0), tags = list(type = "string")), TopicStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CacheSecurityGroups = structure(list(structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheSecurityGroup", type = "structure"))), tags = list(locationNameList = "CacheSecurityGroup", type = "list")), CacheParameterGroup = structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ParameterApplyStatus = structure(logical(0), tags = list(type = "string")), CacheNodeIdsToReboot = structure(list(structure(logical(0), tags = list(locationName = "CacheNodeId", type = "string"))), tags = list(locationNameList = "CacheNodeId", type = "list"))), tags = list(type = "structure")), CacheSubnetGroupName = structure(logical(0), tags = list(type = "string")), CacheNodes = structure(list(structure(list(CacheNodeId = structure(logical(0), tags = list(type = "string")), CacheNodeStatus = structure(logical(0), tags = list(type = "string")), CacheNodeCreateTime = structure(logical(0), tags = list(type = "timestamp")), Endpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ParameterGroupStatus = structure(logical(0), tags = list(type = "string")), SourceCacheNodeId = structure(logical(0), tags = list(type = "string")), CustomerAvailabilityZone = structure(logical(0), tags = list(type = "string")), CustomerOutpostArn = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "CacheNode", type = "structure"))), tags = list(locationNameList = "CacheNode", type = "list")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), SecurityGroups = structure(list(structure(list(SecurityGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ReplicationGroupId = structure(logical(0), tags = list(type = "string")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), ARN = structure(logical(0), tags = list(type = "string")), ReplicationGroupLogDeliveryEnabled = structure(logical(0), tags = list(type = "boolean")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "RebootCacheClusterResult"))
return(populate(args, shape))
}
.elasticache$remove_tags_from_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceName = structure(logical(0), tags = list(type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$remove_tags_from_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "Tag", type = "structure"))), tags = list(locationNameList = "Tag", type = "list"))), tags = list(type = "structure", resultWrapper = "RemoveTagsFromResourceResult"))
return(populate(args, shape))
}
.elasticache$reset_cache_parameter_group_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string")), ResetAllParameters = structure(logical(0), tags = list(type = "boolean")), ParameterNameValues = structure(list(structure(list(ParameterName = structure(logical(0), tags = list(type = "string")), ParameterValue = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "ParameterNameValue", type = "structure"))), tags = list(locationNameList = "ParameterNameValue", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$reset_cache_parameter_group_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheParameterGroupName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", resultWrapper = "ResetCacheParameterGroupResult"))
return(populate(args, shape))
}
.elasticache$revoke_cache_security_group_ingress_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupOwnerId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$revoke_cache_security_group_ingress_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CacheSecurityGroup = structure(list(OwnerId = structure(logical(0), tags = list(type = "string")), CacheSecurityGroupName = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), EC2SecurityGroups = structure(list(structure(list(Status = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupName = structure(logical(0), tags = list(type = "string")), EC2SecurityGroupOwnerId = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "EC2SecurityGroup", type = "structure"))), tags = list(locationNameList = "EC2SecurityGroup", type = "list")), ARN = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "RevokeCacheSecurityGroupIngressResult"))
return(populate(args, shape))
}
.elasticache$start_migration_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), CustomerNodeEndpointList = structure(list(structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$start_migration_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "StartMigrationResult"))
return(populate(args, shape))
}
.elasticache$test_failover_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), NodeGroupId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.elasticache$test_failover_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ReplicationGroup = structure(list(ReplicationGroupId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupInfo = structure(list(GlobalReplicationGroupId = structure(logical(0), tags = list(type = "string")), GlobalReplicationGroupMemberRole = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string")), PendingModifiedValues = structure(list(PrimaryClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailoverStatus = structure(logical(0), tags = list(type = "string")), Resharding = structure(list(SlotMigration = structure(list(ProgressPercentage = structure(logical(0), tags = list(type = "double"))), tags = list(type = "structure"))), tags = list(type = "structure")), AuthTokenStatus = structure(logical(0), tags = list(type = "string")), UserGroups = structure(list(UserGroupIdsToAdd = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), UserGroupIdsToRemove = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list", locationName = "PendingLogDeliveryConfiguration")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), MemberClusters = structure(list(structure(logical(0), tags = list(locationName = "ClusterId", type = "string"))), tags = list(locationNameList = "ClusterId", type = "list")), NodeGroups = structure(list(structure(list(NodeGroupId = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), PrimaryEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), ReaderEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Slots = structure(logical(0), tags = list(type = "string")), NodeGroupMembers = structure(list(structure(list(CacheClusterId = structure(logical(0), tags = list(type = "string")), CacheNodeId = structure(logical(0), tags = list(type = "string")), ReadEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PreferredAvailabilityZone = structure(logical(0), tags = list(type = "string")), PreferredOutpostArn = structure(logical(0), tags = list(type = "string")), CurrentRole = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "NodeGroupMember", type = "structure"))), tags = list(locationNameList = "NodeGroupMember", type = "list"))), tags = list(locationName = "NodeGroup", type = "structure"))), tags = list(locationNameList = "NodeGroup", type = "list")), SnapshottingClusterId = structure(logical(0), tags = list(type = "string")), AutomaticFailover = structure(logical(0), tags = list(type = "string")), MultiAZ = structure(logical(0), tags = list(type = "string")), ConfigurationEndpoint = structure(list(Address = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), SnapshotRetentionLimit = structure(logical(0), tags = list(type = "integer")), SnapshotWindow = structure(logical(0), tags = list(type = "string")), ClusterEnabled = structure(logical(0), tags = list(type = "boolean")), CacheNodeType = structure(logical(0), tags = list(type = "string")), AuthTokenEnabled = structure(logical(0), tags = list(type = "boolean")), AuthTokenLastModifiedDate = structure(logical(0), tags = list(type = "timestamp")), TransitEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), AtRestEncryptionEnabled = structure(logical(0), tags = list(type = "boolean")), MemberClustersOutpostArns = structure(list(structure(logical(0), tags = list(locationName = "ReplicationGroupOutpostArn", type = "string"))), tags = list(locationNameList = "ReplicationGroupOutpostArn", type = "list")), KmsKeyId = structure(logical(0), tags = list(type = "string")), ARN = structure(logical(0), tags = list(type = "string")), UserGroupIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LogDeliveryConfigurations = structure(list(structure(list(LogType = structure(logical(0), tags = list(type = "string")), DestinationType = structure(logical(0), tags = list(type = "string")), DestinationDetails = structure(list(CloudWatchLogsDetails = structure(list(LogGroup = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), KinesisFirehoseDetails = structure(list(DeliveryStream = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), LogFormat = structure(logical(0), tags = list(type = "string")), Status = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(locationName = "LogDeliveryConfiguration", type = "structure"))), tags = list(locationNameList = "LogDeliveryConfiguration", type = "list")), ReplicationGroupCreateTime = structure(logical(0), tags = list(type = "timestamp")), DataTiering = structure(logical(0), tags = list(type = "string")), AutoMinorVersionUpgrade = structure(logical(0), tags = list(type = "boolean")), NetworkType = structure(logical(0), tags = list(type = "string")), IpDiscovery = structure(logical(0), tags = list(type = "string")), TransitEncryptionMode = structure(logical(0), tags = list(type = "string")), ClusterMode = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure", wrapper = TRUE))), tags = list(type = "structure", resultWrapper = "TestFailoverResult"))
return(populate(args, shape))
}
|
7b18c42370e97dfd5a672a9e76b70ea0dcfbc57e
|
7cbd54c390f57982bb0f81ae67351cf512f08ad1
|
/Programs/ExamplePipeline/GetQuadraticParametersDFE.R
|
35264d7f7ede628673d5dce748b6dff2157a423b
|
[] |
no_license
|
dortegadelv/HaplotypeDFEStandingVariation
|
ee9eaa9a44169523349bef09d836913221bf24cb
|
eb196acf6bbaa43f475f132b667f0f74b6f7cee4
|
refs/heads/master
| 2022-05-25T03:47:39.948444
| 2022-03-07T22:41:15
| 2022-03-07T22:41:15
| 108,029,910
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,308
|
r
|
GetQuadraticParametersDFE.R
|
Data <- read.table("DifRecRate/PLGivenSTableWithRecs4.txt")
DFETable <- read.table("DifRecRate/DFETableOfProbabilities.txt")
RecValuesOne <- read.table("DifRecRate/ResampledBpRecRatePerVariantNoCpGLeft.txt")
RecValuesTwo <- read.table("DifRecRate/ResampledBpRecRatePerVariantNoCpGRight.txt")
RecValues <- rbind(RecValuesOne,RecValuesTwo)
FullDataTable <- matrix(nrow = nrow(DFETable)*6+12, ncol = 600)
TotalCount <- 1
for (i in 1:2){
print (i)
Subset <- DFETable[i,3:ncol(DFETable)]
for (Element in 1:6){
FullDataTable[TotalCount,] <- 0
Test <- c()
for (RecRate in 1:600){
Test <- rbind(Test, Data[(203-3:ncol(DFETable))*6 + Element + 12,RecRate])
}
Row <- Test %*% t(Subset)
FullDataTable[TotalCount,] <- t(Row)
TotalCount <- TotalCount + 1
}
}
for (i in 1:nrow(DFETable)){
print (i)
Subset <- DFETable[i,3:ncol(DFETable)]
for (Element in 1:6){
FullDataTable[TotalCount,] <- 0
Test <- c()
for (RecRate in 1:600){
Test <- rbind(Test, Data[(203-3:ncol(DFETable))*6 + Element + 12,RecRate])
}
Row <- Test %*% t(Subset)
FullDataTable[TotalCount,] <- t(Row)
TotalCount <- TotalCount + 1
}
}
write.table(FullDataTable, file = "DifRecRate/PLGivenSTableWithRecsFirstDFE.txt",row.names= FALSE,col.names = FALSE)
|
f17e75d75c2ae0669cd590a006007253e1dd955c
|
85d8f91b58f912130362bd6415bbdb5e2e0cc7c0
|
/man/print.ConfusionMatrix.Rd
|
d1d57e37874a27dcafdb03f932ffa2f3a5bfcd11
|
[] |
no_license
|
Displayr/flipRegression
|
c8ab22ffc875ca09deac2ec01ffaf5371501c860
|
871819d800ebb24a7331336bd4cfea24b35afb48
|
refs/heads/master
| 2023-08-21T21:39:02.916680
| 2023-07-19T05:50:48
| 2023-07-19T05:50:48
| 59,715,681
| 7
| 5
| null | 2023-08-03T07:19:54
| 2016-05-26T03:09:43
|
R
|
UTF-8
|
R
| false
| true
| 460
|
rd
|
print.ConfusionMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confusion.R
\name{print.ConfusionMatrix}
\alias{print.ConfusionMatrix}
\title{\code{print.ConfusionMatrix}}
\usage{
\method{print}{ConfusionMatrix}(x, ...)
}
\arguments{
\item{x}{An object of class \code{\link{ConfusionMatrix}}.}
\item{...}{Further arguments, currently unusued.}
}
\description{
\code{print.ConfusionMatrix}
}
\details{
Displays a confusion matrix as a heatmap.
}
|
25319dd2c82fc11245b283fd2c2e8c4f8bfff520
|
64098b83f218221064dacb4307f9b844e9a70373
|
/man/LL.Rd
|
966fc836139aeb779813b1a4b0ea6ba278569dce
|
[
"MIT"
] |
permissive
|
takuizum/irtfun2
|
07800c5e6abeb9eb1892724582be7b9ed2202387
|
def9eac15a1150804f3702cf3f84df1c638a1c38
|
refs/heads/master
| 2021-07-19T00:29:21.794826
| 2020-05-06T09:28:05
| 2020-05-06T09:28:05
| 151,583,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 497
|
rd
|
LL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estheta.R
\name{LL}
\alias{LL}
\title{The log likelihood function of IRT 1~3PLM}
\usage{
LL(u, theta, a, b, c, D)
}
\arguments{
\item{u}{the item response pattern}
\item{theta}{the person ability parameter}
\item{a}{the slope parameter}
\item{b}{the location parameter}
\item{c}{the guessing parameter}
\item{D}{a scale constant}
}
\description{
The log likelihood function of IRT 1~3PLM
}
|
57b3fa81605b6c2876be420dbfac9f38d991f734
|
bcc81487edbe00a92f5a076bef6ab70dea20c4c8
|
/matplot (1).R
|
ec77f7a624ca7e71781099f3d2176dc48404cacb
|
[] |
no_license
|
ismailhm/R-programming
|
625aa49a75d303bef93a0bf0993fa095c7d1c6b0
|
1b62c7859d241a799bf0b3b98c2906df148a046d
|
refs/heads/master
| 2020-04-20T16:42:19.928870
| 2019-02-03T16:52:31
| 2019-02-03T16:52:31
| 168,966,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
matplot (1).R
|
?matplot()
# matplot() plots the matrix columnwise.
FieldGoals
# t() gives the transpose matrix
t(FieldGoals)
matplot(t(FieldGoals / Games), type="b", pch=15:18, col=c(1:4, 6))
legend("bottomleft", inset = 0.01, legend = Players, pch=15:18, col=c(1:4, 6), horiz = F)
matplot(t(FieldGoals / FieldGoalAttempts), type="b", pch=15:18, col=c(1:4, 6))
legend("bottomleft", inset = 0.01, legend = Players, pch=15:18, col=c(1:4, 6), horiz = F)
matplot(t(Salary / FieldGoals), type="b", pch=15:18, col=c(1:4, 6))
legend("bottomleft", inset = 0.01, legend = Players, pch=15:18, col=c(1:4, 6), horiz = F)
matplot(t(Salary / Games), type="b", pch=15:18, col=c(1:4, 6))
legend("bottomleft", inset = 0.01, legend = Players, pch=15:18, col=c(1:4, 6), horiz = F)
matplot(t(Points / Games), type="b", pch=15:18, col=c(1:4, 6))
legend("bottomleft", inset = 0.01, legend = Players, pch=15:18, col=c(1:4, 6), horiz = F)
|
5182c08b9a859ba6fd5a7aa7d5a3e4df2e9ec0df
|
d2722569c550481b2d92cd1dfc0b472b3cb5ada5
|
/scripts/deseq_init.R
|
1e2eab551b623c5d723ffe233fe708f27a1b43ee
|
[] |
no_license
|
moritzschaefer/cuttag
|
3f7ad62688951f17ea2f5236fd186f21c4c39fd1
|
1fb6353ee9eff988ab458b1984255995fae0c440
|
refs/heads/master
| 2023-02-01T23:03:45.520671
| 2020-12-16T14:16:21
| 2020-12-16T14:16:21
| 302,466,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,156
|
r
|
deseq_init.R
|
# from Step28 on (https://www.protocols.io/view/cut-amp-tag-data-processing-and-analysis-tutorial-bjk2kkye.html)
library(DESeq2) ## For differential analysis section
library(GenomicRanges)
library(dplyr)
library(chromVAR)
# Create a master peak list merging all the peaks called for each sample.
masterPeak = GRanges()
## overlap with bam file to get count
for(peaks in snakemake@input[["peaks"]]) { # includes replicates
peakRes = read.table(peaks, header = FALSE, fill = TRUE)
masterPeak = GRanges(seqnames = peakRes$V1, IRanges(start = peakRes$V2, end = peakRes$V3), strand = "*") %>% append(masterPeak, .)
}
masterPeak = reduce(masterPeak)
# save masterPeak file as bed
masterPeakDf <- data.frame(seqnames=seqnames(masterPeak),
starts=start(masterPeak),
ends=end(masterPeak),
names=sapply(1:length(masterPeak), function(i) paste0("masterPeak_", i)),
scores=c(rep(".", length(masterPeak))),
strands=strand(masterPeak))
write.table(masterPeakDf, file=snakemake@output[["master_peaks"]], quote=F, sep="\t", row.names=F, col.names=F)
# Get the fragment counts for each peak in the master peak list.
library(DESeq2)
countMat = matrix(NA, length(masterPeak), length(snakemake@input[["bams"]]))
## overlap with bam file to get count
i = 1
for (bam in snakemake@input[["bams"]]) {
fragment_counts <- getCounts(bam, masterPeak, paired = TRUE, by_rg = FALSE, format = "bam")
countMat[, i] = counts(fragment_counts)[,1]
i = i + 1
}
colnames(countMat) = snakemake@params[["sample_names"]]
# Sequencing depth normalization and differential enriched peaks detection
selectR = which(rowSums(countMat) > 5) ## remove low count genes
dataS = countMat[selectR,]
condition = snakemake@params[["condition_names"]]
batch = snakemake@params[["batch_numbers"]]
colData = DataFrame(condition, batch)
save.image()
dds = DESeqDataSetFromMatrix(countData = dataS,
colData = colData,
design = ~batch + condition) # we control for batch effect
dds = DESeq(dds)
saveRDS(dds, file=snakemake@output[["rds"]])
|
094384983417965276c182482c7e0c7cbdd810a1
|
381817806885ab9cad39cf6c845fc94fad9b69f7
|
/server.R
|
0e0bc612210fad115b0f951cba5f57d1e641578b
|
[] |
no_license
|
josephuses/pse_stocks_calculator
|
9d32e2edb05cfdd42ff4be566a538b4d13c6600e
|
3f7fd2eb5ea630234f5257923726fad54b470e14
|
refs/heads/master
| 2020-12-14T09:49:18.281533
| 2017-11-13T09:01:56
| 2017-11-13T09:01:56
| 95,448,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
source("helpers.R")
shinyServer(function(input, output) {
stock <- reactive(input$stock)
buyPrice <- reactive(input$buy_price)
buyVolume <- reactive(input$buy_vol)
sellPrice <- reactive(input$sell_price)
sellVolume <- reactive(input$sell_vol)
output$caption <- renderText(stock())
output$table <- renderTable({
buy_price <- buyPrice()
buy_vol <- buyVolume()
sell_vol <- sellVolume()
sell_price <- sellPrice()
df <- stock_compute(buy_price = buy_price, buy_volume = buy_vol, sell_price = sell_price, sell_volume = sell_vol)
}, digits = 4)
})
|
ca32e7ef411c97cbac58826d6c35809ca7f05cd8
|
4eef3c0016ed9271e81c880e1b9585e0c8b7da3a
|
/man/add_plate.Rd
|
40a0e4032a3d702da3ac348b416a19ac627262e0
|
[] |
no_license
|
cran/plater
|
f5e75790d83c012824b6e7a82c474d03fb508608
|
dc8d65d32642f03005bcee1c677ff6169f343589
|
refs/heads/master
| 2022-03-10T13:13:43.312268
| 2022-02-11T18:40:02
| 2022-02-11T18:40:02
| 70,241,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,640
|
rd
|
add_plate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_plate.R
\name{add_plate}
\alias{add_plate}
\title{Read a plater-formatted file and combine it with an existing data frame.}
\usage{
add_plate(data, file, well_ids_column, sep = ",")
}
\arguments{
\item{data}{The data frame to merge the file into. Must contain a column with
well names.}
\item{file}{The path of a .csv file formatted as described in
\code{\link{read_plate}}.}
\item{well_ids_column}{The name of the column in \code{data} containing the
well IDs.}
\item{sep}{The character used to separate columns in the file (e.g. "," or ";")
Defaults to ",".}
}
\value{
Returns data as a tibble with as many new columns as plates in
\code{file}. Empty wells are indicated with NA.
}
\description{
Converts data from \code{plater} format to a data frame with one well
per row and merges it into an existing data frame by well name.
}
\details{
If data contains more wells than in \code{file}, NA will be added to the
merged column(s) for those wells. If the file contains more wells than
\code{data}, those wells will be added to the bottom of the result with NA
for the columns in \code{data}.
}
\examples{
# Part of the data is tidy
file <- system.file("extdata", "example-2-part-A.csv", package = "plater")
data <- read.csv(file)
# Part of the data is plate-shaped
plate_shaped <- system.file("extdata", "example-2-part-B.csv", package = "plater")
# Combine the two
data <- add_plate(
data = data,
file = plate_shaped,
well_ids_column = "Wells")
# Now data are tidy
head(data)
}
|
ff8baa720a4ef083c36910ad6b97cea47945e3e7
|
80dba8af3dc948d8e161ed84141f83598a625748
|
/R/effective_entropy.R
|
d5ae91efee3c46dc80b90ec3810450ae0135cb86
|
[
"MIT"
] |
permissive
|
simdiversity/entropy
|
4e937bd7f2b912c96c9a52cf2662676e5685bdda
|
bd6a8824f0b9d377ee592b8efcd01d1afa9d3a8e
|
refs/heads/master
| 2023-03-03T02:04:21.921738
| 2021-01-17T10:40:57
| 2021-01-17T10:40:57
| 259,862,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,885
|
r
|
effective_entropy.R
|
#' Compute effective entropy
#'
#' @param D A dissimilarity matrix
#' @param f Weigh array
#' @param Nloop Number of loops
#' @param Nfine finesse of powers
#' @param pa initial power
#' @param pb final power
#' @example
#' M <- matrix(seq(9), nrow = 3)
#' f <- rowSums(M) / sum(M)
#' D <- dist(f*M)
#' effective_entropy(D,f)
#' @export
effective_entropy <- function(D, f,
Nloop = 4000, Nfine=300,
pa = -4, pb = 3
) {
if ("dist" %in% class(D)) {
D <- as.matrix(D)
} else {
D <- as.matrix(stats::as.dist(D))
}
n <- nrow(D)
power_selection <- pa + (seq(1:Nfine) - 1) * (pb - pa) / (Nfine - 1)
Nsteps_beta <- length(power_selection)
beta_rel <- c()
counter_seq <- seq(length(power_selection))
Delta <- as.numeric(0.5 * t(f) %*% D %*% f)
Dif <- D %*% f - Delta
iterations <- lapply(counter_seq, function(counter) {
power <- power_selection[[counter]]
beta_rel <- 10^power
beta <- beta_rel * as.numeric(1 / Delta) # fixes the inverse temperature
S <- as.matrix(exp(-beta * D)) # creates a D matrix
b <- as.vector(S %*% f) # banality
######### usual approach by memberships z_{ij}, with usual EM-iteration
index <- which.min(D %*% f) # index de l'observation la plus proche
indic_min <- as.numeric(1:n == index) # 0 partout, sauf 1 sur l'observation "centrale"
rho <- as.vector(indic_min)
E <- -sum(f * log(S %*% rho)) # effective entropy
R <- -sum(f * log(S %*% f)) # reduced entropy
HR <- 0 # group entropy
Ty <- 1
# Z <- diag(n) # initialisation du clustering soft, efficient for beta large
#(and Niter, the number of iterations, can be small, convergence occurs rapidly: pure stayers)
# initialisation alternative, BIEN meilleure pour les hautes temperatures
Z <- matrix(0, n, n)
Z[, index] <- 1
eps10 <- 1e-20
Z <- eps10 * matrix(1, n, n) + (1 - eps10) * Z
# number of iterates (soft clustering)
ones <- rep(1,n)
res = soft_clustering(f, Z, S, Nloop)
rho = res$rho
Z = res$Z
list(
"power" = power,
"rho" = rho,
"E" = -sum(f * log( S %*% rho)), # effective entropy
"R" = -sum(f * log( S %*% f)), # reduced entropy
"HR" = -sum(rho * log(rho + 10^(-13))), # group entropy
"Ty" = sum(isFALSE(all.equal(rho,0))),
"banalities" = b,
"beta_rel" = beta_rel,
"beta" = beta,
"S" = S
)
})
iterations
result = list()
for (name in c(
"power", "E", "R", "HR", "Ty", "banalities", "beta", "beta_rel"
)) {
result[[name]] <- as.vector(do.call(rbind,
lapply(iterations, function(el){el[[name]]})
))
}
result$S <- lapply(iterations, function(el){el$S})
result$rho <- do.call(cbind, lapply(iterations, function(el){el$rho}))
result
}
|
edce5d04e92a5e03be6e79e36e9e73c18ff0bfa0
|
5b0652e487cab9e57f3ae534265d69e17584bc88
|
/man/to_sound_selection.Rd
|
b62af6d8e35bb0625d344aa5786113be52945ab6
|
[] |
no_license
|
DanWoodrich/Rraven
|
192aa0f2c57a510016df549c1a5e85564bf9a8d2
|
845a07ea9f6e205d6e26ef4804955638e2206086
|
refs/heads/master
| 2020-03-30T05:18:41.909381
| 2018-09-28T20:50:36
| 2018-09-28T20:50:36
| 150,792,078
| 0
| 0
| null | 2018-09-28T20:36:49
| 2018-09-28T20:36:49
| null |
UTF-8
|
R
| false
| true
| 2,726
|
rd
|
to_sound_selection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/to_sound_selection.R
\name{to_sound_selection}
\alias{to_sound_selection}
\title{Convert Raven's selection table file to sound selection file}
\usage{
to_sound_selection(path = NULL, dest.path = NULL, recursive = FALSE,
parallel = 1, pb = TRUE, sound.file.path, sound.file.col)
}
\arguments{
\item{path}{A character string indicating the path of the directory in which to look for the 'Raven' selection (text) files.
If not provided (default) the function searches into the current working directory.}
\item{dest.path}{A character string indicating the path of the directory in which
sound selection tables will be saved.
If not provided (default) files will be save in the current directory.}
\item{recursive}{Logical. If \code{TRUE} the listing recurse into sub-directories.}
\item{parallel}{Numeric. Controls whether parallel computing is applied.
It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).}
\item{pb}{Logical argument to control progress bar. Default is \code{TRUE}.}
\item{sound.file.path}{A character string indicating the path of the
directory containing the sound file(s). This argument is required.}
\item{sound.file.col}{A character string with the name of the column containing the sound file names in
the selection text files. Required.}
}
\value{
Sound selection table file(s) saved in 'dest.path' or in the working
directory.
}
\description{
\code{to_sound_selection} converts Raven's selection table files into sound selection files
}
\details{
The function converts Raven's selection tables to sound selection tables.
Sound selection table is a more convenient format as it can be open directly in Raven (or drag-and-drop) and
will automatically open the associated sound file. Multiple files can be
simultaneously converted. Files must be in '.txt' format. Selection
files including data from mulitple recordings can be converted only if all the
correspondent sound files are found in the same directory. Note that no data is
imported into the R environment.
}
\examples{
{
#load data
data(selection_files)
# set temporary directory
# setwd(tempdir())
# save 'Raven' selection tables in the temporary directory
out <- lapply(1:2, function(x)
writeLines(selection_files[[x]], con = names(selection_files)[x]))
# try drag and drop selection files into Raven (shouldn't work)
# now convert files
to_sound_selection(sound.file.path = getwd(),
sound.file.col = "Begin Path")
# try drag and drop into Raven again (should work now)
}
}
\seealso{
\code{\link{imp_syrinx}}; \code{\link{imp_raven}}
}
\author{
Marcelo Araya-Salas (\email{araya-salas@cornell.edu})
}
|
70e328bfe538b4c90821eb9b9d353e54c1459930
|
e43909be797573d17925701d9e95a287e7faac21
|
/man/Table.Rd
|
3cb74bbe549592760f649f3f6204e83025aba014
|
[
"MIT"
] |
permissive
|
SciViews/form.io
|
44143ffd8da93a65292b7e8e3ae78c0f9f1a336d
|
dbd7122377eb26de0e5080786f16f7854695d885
|
refs/heads/master
| 2020-08-03T20:15:03.889366
| 2020-04-27T22:23:59
| 2020-04-27T22:23:59
| 211,873,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,762
|
rd
|
Table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Table.R
\name{Table}
\alias{Table}
\alias{tab}
\alias{print.knitr_asis}
\title{Create a Table caption}
\usage{
Table(text = NULL, table = NULL, label = NULL, hold = NULL)
tab(..., hold = FALSE)
\method{print}{knitr_asis}(x, ..., sep = "\\n")
}
\arguments{
\item{text}{The text of the caption. It can contain Markdown formatting.}
\item{table}{The textual description of the table (in plain Markdown).}
\item{label}{The label to use (not required if the function is run from
within an R chunk because the chunk label is used by default).}
\item{hold}{Should we save the caption for later use, and return invisibly?
Default value is \code{FALSE} if \verb{table=} is provided or the function is \emph{not}
called from within an R chunk.}
\item{...}{Same arguments as for \code{\link[=Table]{Table()}}, or arguments to respective
\code{\link[=print]{print()}} methods.}
\item{x}{Any R object to print.}
\item{sep}{Separator to use for printing multiple strings.}
}
\value{
The caption with a code to number the table is returned (invisibly, if
\code{hold = TRUE}).
}
\description{
A caption for a table can be created in two ways: either directly when you
provide both \verb{text=} and \verb{table=}, or indirectly, when you call \code{\link[=Table]{Table()}}
within an R chunk with \verb{table=} not provided. In that case, the caption is
saved for later use, and \code{tab()} retrieves it when needed. The label of the
table is automatically set to the label of the chunk.
}
\examples{
Table("This is my caption", "
| x | y |
|:---:|:---:|
| 1 | 2 |
| 3 | 5 |
")
}
\author{
Philippe Grosjean
}
\concept{automatic numbering of items in documents}
\keyword{utilities}
|
2ce88a8d6d547ed27b7097c782a16a173af22f61
|
c2b24e8dcb0fa8b0e5d44002d9aa675ba968ad44
|
/Practice2/Task8.R
|
2684e3b281896a31ac41d3aa163df24dbacd30f4
|
[] |
no_license
|
thinkingabouther/DataAnalysis
|
00efcba3355baa7af2c8d22dbe4a81171d12af31
|
5c2a0eb98a3f12ae6ff536fb9856d6fe210cd515
|
refs/heads/master
| 2022-09-21T23:12:39.718162
| 2020-06-05T14:30:58
| 2020-06-05T14:30:58
| 255,110,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
Task8.R
|
A <- array(c(1, 50, 1, 75), dim=c(2,2))
B <- c(100, 6625)
solve(A) %*% B
|
ebea9168b96e04d37bb9b9296cbe7da8aba78442
|
0fb33ca8eef07fcb5d3687f4cf2793ef187f79f4
|
/man/scoreFACIT_AI.Rd
|
68f19d1d0488618e37cb7d737c7ae8d59b7f0dc1
|
[
"MIT"
] |
permissive
|
raybaser/FACTscorer
|
e3c10b9a065cb5b6290b211519b72ed9171a1fc2
|
070a1cf479ee8c1f19bf6a295c2ed0d544ff6406
|
refs/heads/master
| 2022-03-16T20:20:29.198088
| 2022-03-12T09:42:36
| 2022-03-12T09:42:36
| 61,918,573
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,542
|
rd
|
scoreFACIT_AI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sx-scoreFACIT_AI.R
\name{scoreFACIT_AI}
\alias{scoreFACIT_AI}
\title{Score the FACIT-AI}
\usage{
scoreFACIT_AI(df, id = NULL, updateItems = FALSE, keepNvalid = FALSE)
}
\arguments{
\item{df}{A data frame with the questionnaire items, appropriately-named.}
\item{id}{(optional) The quoted name of a variable in \code{df} with a unique
value for each row of \code{df}. If an \code{id} variable is provided
here, it will also be included with the scale scores in the output data
frame. This can facilitate accurate merging of the scale scores back into
the input \code{df}.}
\item{updateItems}{(optional) Logical, if \code{TRUE} then updated versions
of the items (i.e., re-coded for score calculation) will be returned in the
output data frame with the scale scores. The default, \code{FALSE}, does
not save any updated versions of the items in the resulting data frame.
Most users will want to omit this argument or, equivalently, set it to
\code{FALSE}.}
\item{keepNvalid}{(optional) Logical, if \code{TRUE} then the output data
frame will have additional variables containing the number of valid,
non-missing responses from each respondent to the items on a given scale
(see Details). If \code{FALSE} (the default), these variables will not be
in the returned data frame. Most users will want to omit this argument
or, equivalently, set it to \code{FALSE}.}
}
\value{
A data frame containing the following scale score is returned:
\itemize{
\item \strong{FACIT_AI} - FACIT Ascites Index
}
If a variable was given to the \code{id} argument, then that variable will
also be in the returned data frame. Additional, relatively unimportant,
variables will be returned if \code{updateItems = TRUE} or \code{keepNvalid
= TRUE}.
}
\description{
Scores the Functional Assessment of Chronic Illness Therapy-Ascites Index
(FACIT-AI) from item responses.
}
\details{
Given a data frame that includes all of the FACIT-AI items as
variables, appropriately named, this function generates the FACIT-AI
scale score. It is crucial that the item variables in the supplied data
frame are named according to FACT conventions. For example, the first item
should be named GP1, the second GP2, and so on. Please refer to the
materials provided by \url{http://www.facit.org} for the particular
questionnaire you are using. In particular, refer to the left margin of the
official questionnaire (i.e., from facit.org) for the appropriate item
variable names.
For more details on the \code{updateItems} and \code{keepNvalid} arguments,
see the documentation entry for \code{\link{scoreFACTG}} and
\code{\link{FACTscorer}}.
}
\section{Note}{
Keep in mind that this function (and R in general) is
case-sensitive.
All items in \code{df} should be \code{numeric} (i.e., of type
\code{integer} or \code{double}).
This function expects missing item responses to be coded as \code{NA},
\code{8}, or \code{9}, and valid item responses to be coded as \code{0},
\code{1}, \code{2}, \code{3}, or \code{4}. Any other value for any of the
items will result in an error message and no scores.
}
\examples{
## FIRST creating a df with fake item data to score
itemNames <- c('C6', 'GF5', 'BMT5', 'B1', 'GP2', 'O2', 'ACT11', 'O1',
'GP1', 'ACT10', 'BL2', 'CX6', 'AI1')
exampleDat <- make_FACTdata(namesAC = itemNames, AConly = TRUE)
## NOW scoring the items in exampleDat
## Returns data frame with ONLY scale score
(scoredDat <- scoreFACIT_AI(exampleDat))
## Using the id argument (makes merging with original data more fool-proof):
(scoredDat <- scoreFACIT_AI(exampleDat, id = "ID"))
## Merge back with original data, exampleDat:
mergeDat <- merge(exampleDat, scoredDat, by = "ID")
names(mergeDat)
## Returns scale score, plus recoded items (updateItems = TRUE)
## Also illustrates effect of setting keepNvalid = TRUE.
scoredDat <- scoreFACIT_AI(exampleDat, updateItems = TRUE, keepNvalid = TRUE)
names(scoredDat)
## Descriptives of scored scales
summary(scoredDat['FACIT_AI'])
}
\references{
FACIT-AI Scoring Guidelines, available at
\url{http://www.facit.org}
}
\seealso{
For additional details on the function arguments, see
\code{\link{scoreFACTG}} and \code{\link{FACTscorer}}. For brevity,
examples are omitted below. However, this function is very similar to the
\code{\link{scoreFACTG7}} function. See the documentation for
\code{\link{scoreFACTG7}} for examples. Alternatively, examples for
\code{scoreFACIT_AI} can be accessed by running
\code{example(scoreFACIT_AI)}.
}
|
dba97c70e0416774e2b8f277db958d128bf96ade
|
172f6eb4329de933c90988cfb1fb04f0f1dcd3ca
|
/livestock_rasters.R
|
8cc036d2fe4fa24dc6c9a585efffb558803d3ef0
|
[] |
no_license
|
maggieklope/HWC
|
60ce400562cdd9f1e8ddaab95e70074b156b3c8e
|
586572f04518788b343597f003c9f8a4edb0497d
|
refs/heads/main
| 2023-07-29T22:56:33.625560
| 2021-09-20T20:30:30
| 2021-09-20T20:30:30
| 390,057,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,474
|
r
|
livestock_rasters.R
|
# =============================================================================
# Name: livestock_rasters.R
#
# Author: Maggie Klope (mmklope@ucsb.edu)
#
# Description: Script that akes an SDM prediction raster and an FAO livestock abundance raster and transforms them to the same resolution. Also outlines how to filter livestock abundance by a threshold.
#
# Inputs: species abundance raster (we used a .grd file from an example run with Wallace)
# livestock abundance raster from http://www.fao.org/livestock-systems/global-distributions/en/
# Outputs:
#
# Notes: -
# -
# =============================================================================
# load packages -----------------------------------------------------------
library(raster)
library(rgdal)
# load data ---------------------------------------------------------------
# loading an example prediction raster we got for elephants from Wallace (example data not mean to be used for analysis)
eleph_raster <- raster("livestock/example_wallace_raster/layer.grd")
plot(eleph_raster)
# checking the crs
crs(eleph_raster) # +proj=longlat +datum=WGS84 +no_defs
# loading global 2010 cattle data
# data originally from: http://www.fao.org/livestock-systems/global-distributions/cattle/en/
cattle <- raster("livestock/fao_datasets/global_cattle/6_Ct_2010_Aw.tif")
plot(cattle)
crs(cattle) # +proj=longlat +datum=WGS84 +no_defs
# checking resolution of livestock data and prediction raster
res(cattle) # 5 arc minutes
res(eleph_raster) # 2.5 arc minutes
# projectRaster() to crop and resample cattle raster at the prediction raster resolution
cattle_crop <- projectRaster(cattle, eleph_raster, method = "bilinear")
res(cattle_crop) # 2.5 arc minutes
# they can now be plotted together
plot(cattle_crop, legend = FALSE)
plot(eleph_raster, add = TRUE, legend = FALSE)
# Might want to focus on areas of high density, so extracting pixels where suitability and cattle density are over a cretain threshold
cattle_filter <- cattle_crop > 5000 # 5000 chosen at random, if going this route, you would want to pick an more informative value
cattle_mask <- mask(cattle_crop, cattle_filter, maskvalue = 0) # crop the raster to high density areas
plot(cattle_mask)
# from here, you could identify pixels where there are overlap, maybe combine with land use data information, look at changes between current vs. predicted ranges, etc.
|
27040ca1bc34f09ff329649ab6eae717407847b9
|
357a66f0bbd70f2cdea9d530d57e4843d72866b7
|
/man/BAMD-package.Rd
|
dfba4d332e2d93e3be26d3550c1cf1f90aa97136
|
[] |
no_license
|
cran/BAMD
|
d2fa7391b51d2b0dde08141fcb764affb8c1bbdf
|
7fe4c58e3e98bb14533cdbb52133619740ad6c17
|
refs/heads/master
| 2016-09-16T12:31:00.669210
| 2011-06-30T00:00:00
| 2011-06-30T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,620
|
rd
|
BAMD-package.Rd
|
\name{BAMD-package}
\alias{BAMD-package}
\alias{BAMD}
\docType{package}
\title{
Bayesian Association Model for Genomic Data with Missing Covariates
}
\description{
This package fits the following linear mixed model
\deqn{ Y = X \beta + Z \gamma + \epsilon }
where the covariates
for the random effects (in the Z-matrix) have missing values.
The Z-matrix consists of Single Nucelotide Polymorphism (SNP) data and the
Y-vector contains the phenotypic trait of interest. The X-matrix typically
describes the family structure of the organisms.
}
\details{
\tabular{ll}{
Package: \tab BAMD\cr
Type: \tab Package\cr
Version: \tab 3.5\cr
Date: \tab 2011-06-30\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
There are two functions in this package. The first, \code{gibbsSampler}, will fit and
estimate the posterior parameters in the model above. This will allow the
experimenter to pick out which covariates were significantly non-zero, since
the routine will return \eqn{(1-\alpha)100\%} confidence intervals. The imputed
missing values at each iteration of the Gibbs sampler will be stored in a file for
use by the second function.
The second function, \code{variableSelector}, is a variable selector that will pick out the ``best''
model, as measured by the Bayes Factor, using a stochastic search algorithm.
}
\author{
Vik Gopal \email{viknesh@stat.ufl.edu}
Maintainer: Vik Gopal <viknesh@stat.ufl.edu>
}
\references{
Gopal, V. "BAMD User Manual"
\url{http://www.stat.ufl.edu/~viknesh/assoc_model/assoc.html}
}
\keyword{ package }
\seealso{
\code{\link{gibbsSampler}}, \code{\link{variableSelector}}
}
|
e3f588fe96cf26bf5a9c5bf0761c219db85945a5
|
f351ca83cbdb2f7c0b6f1451999ccf5c6a4cfbb7
|
/R/covered.call.R
|
8fc7c7773ad13fb4f31b0bd714e2e69c7b49d6da
|
[] |
no_license
|
IanMadlenya/FinancialMath
|
3dafdfe139f78c671d6e1c939cf4271d775c7917
|
7cf9133ac2bb76c1602e65492085b316d7641424
|
refs/heads/master
| 2021-01-23T12:37:56.309260
| 2016-12-16T21:51:34
| 2016-12-16T21:51:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,287
|
r
|
covered.call.R
|
covered.call=function(S,K,r,t,sd,price=NA,plot=FALSE){
all=list(S,K,r,t,sd,plot,price)
#NULL
if(any(lapply(all,is.null)==T)) stop("Cannot input any variables as NULL.")
#Length
if(any(lapply(all,length) != 1)==T) stop("All inputs must be of length 1.")
#Numeric
num2=list(S,K,r,t,sd,price)
na.num2=num2[which(lapply(num2,is.na)==F)]
if(any(lapply(na.num2,is.numeric)==F)) stop("S, K, r, t, sd, and price must be numeric.")
#NA
nalist=list(S,K,r,t,sd,plot)
if(any(lapply(nalist,is.na)==T)) stop("Cannot input any variables, but price, as NA.")
#Logical
stopifnot(is.logical(plot))
NA.Neg=array(c(S,K,r,t,sd,price))
NA.Neg.Str=c("S","K","r","t","sd","price")
app=apply(NA.Neg,1,is.na)
#Positive
na.s=which(app==F & NA.Neg<=0)
if(length(na.s)>0) {errs=paste(NA.Neg.Str[na.s],collapse=" & ")
stop(cat("Error: '",errs, "' must be positive real number(s).\n"))}
#Infinite
na.s2=which(app==F & NA.Neg==Inf)
if(length(na.s2)>0) {errs=paste(NA.Neg.Str[na.s2],collapse=" & ")
stop(cat("Error: '",errs, "' cannot be infinite.\n"))}
if(is.na(price)){
d1 = (log(S/K)+(r+sd^2/2)*t)/(sd*sqrt(t))
d2 = d1 - sd * sqrt(t)
callP = S*pnorm(d1) - K*exp(-r*t)*pnorm(d2)} else callP=price
stock=seq(0,K,length.out=6)
stock=c(stock,round(seq(K,K*2,length.out=6)))
stock=unique(round(stock))
payoff=rep(0,length(stock))
profit=rep(0,length(stock))
for(i in 1:length(stock)){
if(stock[i]<=K) payoff[i]=stock[i]
if(stock[i]>K) payoff[i]=K
profit[i]=payoff[i]+(callP)*exp(r*t)-S
}
if(plot==T){
lpos="topleft";m="Covered Call\nPayoff and Profit"
plot(stock,profit,type="l",xlab="Stock Price",main=m,ylab="$",
ylim=c(min(profit,payoff),max(profit,payoff)),xaxt='n',yaxt='n',lwd=2,col="steelblue")
lines(stock,payoff,lty=2,lwd=2,col="firebrick")
abline(h=0,lty=2,col="gray")
y=round(seq(min(payoff,profit),max(payoff,profit),length.out=8))
axis(2,at=y,labels=y,las=2)
axis(1,at=stock)
legend(lpos,c("Profit","Payoff"),lty=c(1,2),col=c("steelblue","firebrick"),lwd=c(2,2)) }
out1=data.frame(stock,payoff,profit)
names(out1)=c("Stock Price","Payoff","Profit")
out=list(Payoff=out1,Premium=callP)
return(out)
}
|
3b8a29a36b67b96cefe95e4a8dd506f3bd30a8d0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mcclust/examples/vi.dist.Rd.R
|
eb1e0d94868a39538430951c1fd1a54b3af0ff5c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
vi.dist.Rd.R
|
library(mcclust)
### Name: vi.dist
### Title: Variation of Information Distance for Clusterings
### Aliases: vi.dist
### Keywords: cluster
### ** Examples
cl1 <- sample(1:3,10,replace=TRUE)
cl2 <- c(cl1[1:5], sample(1:3,5,replace=TRUE))
vi.dist(cl1,cl2)
vi.dist(cl1,cl2, parts=TRUE)
|
74c09473d32d23ed518dacc72356bf9cbd75112d
|
5cf86249135481d3e8a6d8fe45cc8983f52fa6f6
|
/lin_reg.R
|
ff065b6348e7b100cbf96075839602faeb426bf1
|
[] |
no_license
|
emmaSkarstein/INLA_within_nimble
|
bf1a7eb875447cd15fa57ea343e16e0d8fcf29a0
|
15c398cf3a8184d0dae2ad488c18b1077e6a0ca7
|
refs/heads/main
| 2023-08-23T13:17:41.237448
| 2021-10-14T16:14:42
| 2021-10-14T16:14:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,104
|
r
|
lin_reg.R
|
#Packages needed to run the model
library(nimble)
library(INLA)
library(mvtnorm)
library(MASS)
library(parallel)
library(coda)
# function for generating samples
sample.linreg <- function(){
n = 100
x1 = runif(n) #covariate 1
x2 = runif(n) #covariate 2
err = rnorm(n) # error
y = 3 + 2*x1 -2*x2 + err # Response
return(list(y = y,x = matrix(c(x1,x2),ncol = 2)))
}
# Function that estimates beta0 and precision
#We return the mean, although we need the marginals
#We have to sort that out later
fit.inla <- function(x, y, beta){
data <- list(y=y, x=x)
data$oset = data$x %*% (beta)
res = INLA::inla(y ~ 1 + offset(oset), data = data)
intercept = INLA::inla.emarginal(function(x) x,res$marginals.fixed[[1]])
precision = INLA::inla.emarginal(function(x) x,res$marginals.hyper[[1]])
ret <- c(intercept, precision)
return(ret)
#list(#mlik = res$mlik,
#dists = list(intercept = res$marginals.fixed[[1]],
# precision = res$marginals.hyperpar[[1]])))
}
#Testing the function fit.inla
beta <- (c(0,0))
fit.inla(df$x,df$y, beta)
## Make the INLA fnx compatible with NIMBLE
nimbleINLA<- nimbleRcall(function(x=double(2), y=double(1), beta=double(1)){}, Rfun="fit.inla",
returnType = double(1))
#Compiling the nimble function
CnimbleINLA <- compileNimble(nimbleINLA)
#Testing the compiled function.
#Should give the same results as fit.inla
CnimbleINLA(df$x,df$y, beta)
#Alternative writting of nimble function
#This fails to use INLA
nimINLA <- nimbleFunction(
run=function(x=double(2),y=double(1), beta=double(1)){
library(INLA)
data <- nimbleList(y=y, x=x)
data$oset = data$x %*% (beta)
res = inla(y ~ 1 + offset(oset), data = data)
intercept = inla.emarginal(function(x) x,res$marginals.fixed[[1]])
precision = inla.emarginal(function(x) x,res$marginals.hyper[[1]])
ans <- nimC(intercept, precision)
return(ans)
returnType(double(1))
}
)
#Code for MCMC in NIMBLE
code <- nimbleCode({
#Prior for beta1 and beta2
for(j in 1:2){
beta[j] ~ dnorm(0,5)
}
#Bivariate linear model specification
for(i in 1:N){
linpred[i] <- inla.res[1]+ beta[1]*x[i,1]+ beta[2]*x[i,2]
y[i] ~ dnorm(linpred[i],inla.res[2])
#We test with constant values for constant and the model works
#linpred[i] <- 1+ beta[1]*x[i,1]+ beta[2]*x[i,2]
#y[i] ~ dnorm(linpred[i],sd=1 )
}
#Fitting the inla with the simulated parameters
inla.res[1:2] <- CnimbleINLA(x[1:N,1:2],y[1:N],beta[1:2])
})
## Parameterising the nimble model
#Data
inla_data <- list(y=df$y,
x = df$x)
#Constants
const <- list(N = length(df$y))
# Initial values
idm_inits <- function(){list(beta =c(0,0),
inla.res =c(1,0.7)
)
}
initsList <- idm_inits()
#Putting all together for the creating compilation
modelInfo <- list(
code = code,
constants = const,
data = inla_data,
inits = initsList
)
#Create the model in nimble
mwtc <- nimbleModel(code,data = inla_data,
constants = const,
inits = initsList)
# Create the model in C
Cmwtc <- compileNimble(mwtc,showCompilerOutput = FALSE) #Have issues compiling
mcmcconf <- configureMCMC(Cmwtc, monitors = c("beta", "inla.res"))
Rmcmc <- buildMCMC(mcmcconf,
enableWAIC =FALSE)
# Compile
cmcmc <- compileNimble(Rmcmc,
project = Cmwtc,
resetFunctions = TRUE)
# Run the MCMC
mcmc.out <- runMCMC(cmcmc,
niter = 50000,
nchains = 3,
nburnin = 25000,
#inits = initsList,
#thin =10,
setSeed = TRUE,
samples=TRUE,
samplesAsCodaMCMC = TRUE,
summary = TRUE,
WAIC = FALSE)
#Output from the MCMC
output <- mcmc.out$summary$all.chains
output
# Diagnostics for the model
coda_samples <- mcmc(mcmc.out$samples)
mcmcplot(coda_samples)
#Save the output
save(output, file="output.RData")
|
1071d7b2f83808bfc2f38dfdf62b0a20f8519128
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/embryogrowth/examples/plot.tsd.Rd.R
|
b053ec621c7c4033b6d7dfb48772f4d4a397c34c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
plot.tsd.Rd.R
|
library(embryogrowth)
### Name: plot.tsd
### Title: Plot results of tsd() that best describe temperature-dependent
### sex determination
### Aliases: plot.tsd
### ** Examples
## Not run:
##D CC_AtlanticSW <- subset(DatabaseTSD, RMU=="Atlantic, SW" &
##D Species=="Caretta caretta" & (!is.na(Sexed) & Sexed!=0))
##D tsdL <- with (CC_AtlanticSW, tsd(males=Males, females=Females,
##D temperatures=Incubation.temperature-Correction.factor,
##D equation="logistic"))
##D plot(tsdL)
## End(Not run)
|
10b7178fa5b42e4d8bd94f74a7a29272c0843034
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/assertive.properties/R/assert-is-empty-scalar.R
|
0bd08288153f10e6a2240dd55818bb4390a381fb
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,495
|
r
|
assert-is-empty-scalar.R
|
#' @rdname is_empty
#' @export
assert_has_elements <- function(x, n,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
has_elements,
x,
n = n,
.xname = get_name_in_parent(x),
severity = severity
)
}
#' @rdname is_empty
#' @export
assert_is_empty <- function(x, metric = c("length", "elements"),
severity = getOption("assertive.severity", "stop"))
{
metric <- match.arg(metric)
assert_engine(
is_empty,
x,
metric = metric,
.xname = get_name_in_parent(x),
severity = severity
)
}
#' @rdname is_empty
#' @export
assert_is_non_empty <- function(x, metric = c("length", "elements"),
severity = getOption("assertive.severity", "stop"))
{
metric <- match.arg(metric)
assert_engine(
is_non_empty,
x,
metric = metric,
.xname = get_name_in_parent(x),
severity = severity
)
}
#' @rdname is_empty
#' @export
assert_is_non_scalar <- function(x, metric = c("length", "elements"),
severity = getOption("assertive.severity", "stop"))
{
metric <- match.arg(metric)
assert_engine(
is_non_scalar,
x,
metric = metric,
.xname = get_name_in_parent(x),
severity = severity
)
}
##' @rdname is_empty
#' @export
assert_is_of_dimension <- function(x, n,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
is_of_dimension,
x,
n = n,
.xname = get_name_in_parent(x),
severity = severity
)
}
#' @rdname is_empty
#' @export
assert_is_of_length <- function(x, n,
severity = getOption("assertive.severity", "stop"))
{
assert_engine(
is_of_length,
x,
n = n,
.xname = get_name_in_parent(x),
severity = severity
)
}
#' @rdname is_empty
#' @export
assert_is_scalar <- function(x, metric = c("length", "elements"),
severity = getOption("assertive.severity", "stop"))
{
metric <- match.arg(metric)
assert_engine(
is_scalar,
x,
metric = metric,
.xname = get_name_in_parent(x),
severity = severity
)
}
|
b6c67de27ff0f58492058b30958f921ceac4cf26
|
55eebcf4970cabc7685fc37539b1d4cd8a42a0a3
|
/man/Data2D.Rd
|
c221aefe6e13543fd7cff29cf38bf567c1c23385
|
[] |
no_license
|
cran/clusTransition
|
f56c22a12b9eca1bf13b4ce753203afde2b8c1c9
|
2782b0ad09b265f7fb2aef7fbf097d244dbd8bde
|
refs/heads/master
| 2023-03-08T17:58:33.643054
| 2021-02-22T19:20:02
| 2021-02-22T19:20:02
| 341,417,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 439
|
rd
|
Data2D.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data2D.R
\docType{data}
\name{Data2D}
\alias{Data2D}
\title{Synthetic Datasets (Two Dimensional)}
\format{
A data frame
\describe{
\item{x1}{X1.}
\item{x2}{X2.}
\item{class}{Class membership.}
}
}
\usage{
Data2D
}
\description{
A list of datasets generated at four time points containing two variables and cluster membership at each point.
}
\keyword{datasets}
|
43ec0e0ec76d9a3b89a43c134e16eba7e9c33df7
|
9f68e39be025eaf6dd9028ff7efd529a95645385
|
/ui.R
|
3c999cc25f8174e994041ec7adbf445d55f576cf
|
[] |
no_license
|
marion-paclot/Etalab-Datafin
|
2902d9bffa8dc0a7ee4c3160f645173478bf06ed
|
5526bf718e8335ad84f8ca05a20f8fc4fca3f94c
|
refs/heads/master
| 2020-03-20T21:19:15.893470
| 2018-07-13T10:47:39
| 2018-07-13T10:47:39
| 137,735,007
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,345
|
r
|
ui.R
|
# UI Application Datafin données financières de l'Etat
shinyUI(fluidPage(
# Titre de l'application
titlePanel("Informations relatives à une mission"),
fluidRow(
# Choix d'une année, d'une mission. Affichage en gros
column(3,
selectizeInput('exercice', "Sélection d'un exercice",
choices = annees_col_balance,
selected = '', multiple = FALSE,
options = NULL, width = "100%")
,
selectizeInput('mission', "Sélection d'une mission",
choices = NULL, multiple = FALSE, width = "100%")
),
column(7,
h4('Liste des programmes associés à la mission'),
h2(verbatimTextOutput("liste_programmes")),
downloadButton("downloadData", label = "Téléchargement .xls"))
),
mainPanel(width = 12,
tabsetPanel(type = 'tabs',
tabPanel('CGE', br(), dataTableOutput('donnees_cge')),
tabPanel('RAP', br(), dataTableOutput('donnees_rap')),
tabPanel('PLR', br(), dataTableOutput('donnees_plr')),
tabPanel('Sources', br(), verbatimTextOutput('sources'))
)
)
))
|
fac35da41e8248d4038444e8c4505f0906cf36a9
|
b406a05460da47fd8b34dbc1409f0ef1eabfa9a1
|
/Code/Main_code.R
|
3dc3dd4c08b4e6467a81fe04015dbfd2bd5f440b
|
[] |
no_license
|
muharif/PKM_Transcript_paper
|
6e643e0690fe1a04d2953e6bce9f81cf64502604
|
dedcd79179a5a5a2fd28a6f4229fff15170b6cb9
|
refs/heads/master
| 2022-03-05T13:32:58.267611
| 2019-10-16T12:36:44
| 2019-10-16T12:36:44
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 48,083
|
r
|
Main_code.R
|
#Main code
#####################################################################################################
#####################################################################################################
#Figure 1A was drawn manually based on the source data.
#####################################################################################################
#####################################################################################################
#Figure 1B: Radar plot for TCGA cancer data
rm(list=ls())
rm(list=ls())
rm(list=ls())
library(ggplot2)
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_1B\\"
setwd(path_raw)
matrix<-as.matrix(read.csv("Figure_1B_TCGA.txt",header=T,row.names = 1,sep="\t"))
colnames(matrix)[1]="PKM1"
colnames(matrix)[2]="PKM2"
matrix<-log2(matrix+1)
require(tidyverse)
a<-matrix %>%
as_tibble(rownames = "Tissue") %>%
gather(Gene, value, -Tissue) %>%
mutate(order = case_when(Gene == "PKM1" ~ 1,
Gene == "PKM2" ~ 2,
Gene == "ENST00000389093" ~ 3,
Gene == "ENST00000561609" ~ 4,
Gene == "ENST00000568883" ~ 5,
Gene == "ENST00000568459" ~ 6,
Gene == "ENST00000562997" ~ 7,
Gene == "ENST00000449901" ~ 8,
Gene == "ENST00000564178" ~ 9,
Gene == "ENST00000565154" ~ 10,
Gene == "ENST00000565184" ~ 11,
Gene == "ENST00000566809" ~ 12,
Gene == "ENST00000567087" ~ 13,
Gene == "ENST00000569050" ~ 14
)) %>%
mutate(order = as.factor(order))
ggplot(data=a,aes(x = Tissue, group = Gene, y = value, colour = Gene, fill = Gene)) +
geom_polygon(data = a %>%
filter(order == "14"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "13"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "12"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "11"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "10"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "9"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "8"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "7"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "6"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "5"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "4"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "3"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "2"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "1"), aes(color=Gene),size=1.7, alpha = 0) +
labs(x = "", y = "") +
coord_polar() +
scale_colour_manual(values = c("PKM1" = "yellow green", "PKM2" = "red","ENST00000389093"= "blue","ENST00000561609"="magenta","ENST00000568883"="gold","ENST00000568459"="purple","ENST00000562997"="cyan", "ENST00000449901"="gray","ENST00000564178"="gray","ENST00000565154"="gray","ENST00000565184"="gray","ENST00000566809"="gray","ENST00000567087"="gray","ENST00000569050"="gray")) +
#scale_y_continuous(breaks = c(0, 2, 4, 6, 8, 10)) +
scale_y_continuous(breaks = c(0,1,2,3,4,5,6,7,8,9,10,11)) +
guides(col = guide_legend(ncol = 7)) +
theme(
legend.position = "top",
axis.text = element_text(face = "bold",size=20),
panel.background = element_blank(),
panel.grid.major.y = element_line(color = alpha("gray", alpha = 0.9)),
panel.grid.major.x = element_line(color = alpha("gray", alpha = 0.9)),
axis.text.y = element_text(),
axis.ticks.y = element_line()
)
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
ggsave("./Output_Figure_1B_TCGA.pdf", width = 12, height = 12, dpi=800)
#####################################################################################################
#Figure_1B: Radar plot for GTEX normal tissues
rm(list=ls())
rm(list=ls())
rm(list=ls())
library(ggplot2)
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_1B\\"
setwd(path_raw)
matrix<-as.matrix(read.csv("Figure_1B_GTEx.txt",header=T,row.names = 1,sep="\t"))
colnames(matrix)[1]="PKM1"
colnames(matrix)[2]="PKM2"
matrix<-log2(matrix+1)
require(tidyverse)
a<-matrix %>%
as_tibble(rownames = "Tissue") %>%
gather(Gene, value, -Tissue) %>%
mutate(order = case_when(Gene == "PKM1" ~ 1,
Gene == "PKM2" ~ 2,
Gene == "ENST00000389093" ~ 3,
Gene == "ENST00000561609" ~ 4,
Gene == "ENST00000568883" ~ 5,
Gene == "ENST00000568459" ~ 6,
Gene == "ENST00000562997" ~ 7,
Gene == "ENST00000449901" ~ 8,
Gene == "ENST00000564178" ~ 9,
Gene == "ENST00000565154" ~ 10,
Gene == "ENST00000565184" ~ 11,
Gene == "ENST00000566809" ~ 12,
Gene == "ENST00000567087" ~ 13,
Gene == "ENST00000569050" ~ 14
)) %>%
mutate(order = as.factor(order))
ggplot(data=a,aes(x = Tissue, group = Gene, y = value, colour = Gene, fill = Gene)) +
#geom_point()+
geom_polygon(data = a %>%
filter(order == "14"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "13"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "12"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "11"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "10"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "9"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "8"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "7"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "6"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "5"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "4"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "3"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "2"), aes(color=Gene),size=1.7, alpha = 0) +
geom_polygon(data = a %>%
filter(order == "1"), aes(color=Gene),size=1.7, alpha = 0) +
labs(x = "", y = "") +
coord_polar() +
scale_colour_manual(values = c("PKM1" = "yellow green", "PKM2" = "red","ENST00000389093"= "blue","ENST00000561609"="magenta","ENST00000568883"="gold","ENST00000568459"="purple","ENST00000562997"="cyan", "ENST00000449901"="gray","ENST00000564178"="gray","ENST00000565154"="gray","ENST00000565184"="gray","ENST00000566809"="gray","ENST00000567087"="gray","ENST00000569050"="gray")) +
#scale_y_continuous(breaks = c(0, 2, 4, 6, 8, 10)) +
scale_y_continuous(breaks = c(0,1,2,3,4,5,6,7,8,9,10)) +
guides(col = guide_legend(ncol = 7)) +
theme(
legend.position = "top",
axis.text = element_text(face = "bold",size=20),
panel.background = element_blank(),
panel.grid.major.y = element_line(color = alpha("gray", alpha = 0.9)),
panel.grid.major.x = element_line(color = alpha("gray", alpha = 0.9)),
axis.text.y = element_text(),
axis.ticks.y = element_line()
)
path_out<-paste0(path_raw,"Output\\")
setwd(path_out)
ggsave("./Output_Figure_1B_GTEx.pdf", width = 12, height = 12,dpi=800)
##############################################################################################
##############################################################################################
#figure 2A: clustering plot based on KM P values
rm(list=ls())
rm(list=ls())
rm(list=ls())
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_2A\\"
setwd(path_raw)
mean_int_trans<-as.matrix(read.csv("Figure_2A_cluster_mean_trans_exp.txt",header=T,row.names = 1,sep="\t"))
coef_end<-as.matrix(read.csv("Figure_2A_cluster_KM_coef.txt",header=T,row.names = 1,sep="\t"))
p_end<-as.matrix(read.csv("Figure_2A_cluster_KM_p.txt",header=T,row.names = 1,sep="\t"))
index_1<-which(mean_int_trans<5)
p_end[index_1]=1
log_p_value=-log10(p_end)
index<-which(coef_end<0)
log_p_value[index]=-log_p_value[index]
#use the P value of KM do clustering analysis
library(corrplot)
library(Hmisc)
library(gplots)
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
tiff(filename = "Figure_2A_cluster.tiff",width=1500,height=1200)
hr <- hclust(as.dist(1-cor(t(log_p_value), method="spearman")), method="ward.D2")
my_palete <- colorRampPalette(c("blue","white","red3"))(n=299)
col_breaks = c(seq(-1.3,-1,length=100),
+ seq(-0.999,1,length=100),
+ seq(1.009,1.3,length=100))
heatmap.2(log_p_value,Rowv=FALSE, Colv=FALSE,scale="none",density.info="none",trace="none",col=my_palete,breaks=col_breaks,margins =c(15,15),cexRow=1.5,cexCol=1.5,sepwidth=c(0.5,0.5),sepcolor="black")
dev.off()
##################################################################################
#Figure 2A: Kaplan-Meier plot for TCGA KIRC
#survival data (days)
rm(list=ls())
rm(list=ls())
rm(list=ls())
library("grid")
library("xlsx")
library("XLConnect")
library("ggplot2")
library("ggvis")
library("rgl")
library("dplyr")
library("tidyr")
library("stringr")
library("lubridate")
require(gplots)
library("survival")
require("xlsx")
library("DESeq2")
library("biomaRt")
library("DESeq2")
library("piano")
library("Biobase")
setwd("E:\\PKM_data_code\\Code\\")
source('Cheng_toolbox_beta.R')
gene_list<-as.matrix(c("ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883"))
dataDir<-"E:\\PKM_data_code\\Source_data\\Figure_2A\\"
setwd(dataDir)
TXT_name<-"Figure_2A_KM_plot_TCGA_KIRC.txt"
path_out<-paste0(dataDir,"Output\\")
setwd(path_out)
for (j in 1:length(gene_list)){
output<-Cheng_generateSurvInputfromTCGA(gene_list[j,1],TXT_name,dataDir)
result<-Cheng_generateKMplot(output,outFile=paste0("TCGA_KIRC","_",gene_list[j,1]))
}
##################################################################################
##################################################################################
#Figure 2B: bubble plot
rm(list=ls())
rm(list=ls())
rm(list=ls())
trans_list<-as.matrix(c("ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883"))
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_2B\\"
setwd(path_raw)
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
go_count_end<-as.matrix(read.csv("Figure_2B_GoTerm_count_inform.txt",header=F,sep="\t"))
for(i in 1:length(trans_list)){
setwd(path_raw)
int_trans<-trans_list[i]
file_name<-paste0("Figure_2B_GoTerm_inform_",int_trans,".txt")
matrix<-as.matrix(read.csv(file_name,header=T,sep="\t"))
goid_all<-as.matrix(matrix[,"goid_all"])
Direction<-as.matrix(matrix[,"Direction"])
Generality<-as.matrix(matrix[,"Generality"])
rownames(Direction)<-matrix[,1]
rownames(Generality)<-matrix[,1]
mode(Direction)<-"numeric"
mode(Generality)<-"numeric"
index<-match(goid_all,go_count_end[,1])
goid_count<-as.matrix(go_count_end[index,2])
mode(goid_count)<-"numeric"
size=log10(goid_count)
bubbleRes<-cbind(Direction,Generality,size)
colnames(bubbleRes)<-c("Direction","Generality","size")
bubbleRes<-as.data.frame(bubbleRes)
tes=gsub(' ','.',rownames(read.csv("Figure_2B_common_GO_terms.txt",row.names = 1,sep='\t')))
tes=gsub(',','.',tes)
tes=gsub('-','.',tes)
bubbleRes2=na.omit(bubbleRes[tes,])
bubbleRes=bubbleRes[!rownames(bubbleRes)%in%tes,]
if(int_trans=="ENST00000335181"|int_trans=="ENST00000561609"){
color_com<-"dark green"
}else{
color_com<-"red"
}
require(ggplot2)
ggOut = ggplot(bubbleRes, aes(x=Direction, y=Generality, size=size))+
geom_jitter(aes(size =size,colour=Generality,alpha=.02))+
scale_colour_gradient(guide = FALSE)+
xlim(-25,25)+ylim(0,25)+theme(axis.ticks.length = unit(.2, "cm"), axis.line = element_line(colour = "black"), text = element_text(size=17), legend.position="none", panel.background=element_rect(fill="white"))+
geom_vline(xintercept = 0,linetype="dashed", color = "black", size=1)+
geom_jitter(data = bubbleRes2, colour=color_com, aes(size =size,colour=Generality,alpha=.02))
#print(ggOut)
setwd(path_out)
ggsave(paste0("./Output_Figure_2B_BubblePlot_",int_trans,".pdf"), width = 9, height = 9)
}
##################################################################################
##################################################################################
#Figure 3A: Kaplan-Meier plot in Japanese cohort
rm(list=ls())
rm(list=ls())
rm(list=ls())
library("grid")
library("xlsx")
library("XLConnect")
library("ggplot2")
library("ggvis")
library("rgl")
library("dplyr")
library("tidyr")
library("stringr")
library("lubridate")
require(gplots)
library("survival")
require("xlsx")
library("DESeq2")
library("biomaRt")
library("DESeq2")
library("piano")
library("Biobase")
setwd("E:\\PKM_data_code\\Code\\")
source('Cheng_toolbox_beta.R')
gene_list<-as.matrix(c("ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883"))
dataDir<-"E:\\PKM_data_code\\Source_data\\Figure_3A\\"
setwd(dataDir)
TXT_name<-"Figure_3A_KM_plot_Japanese_KIRC.txt"
path_out<-paste0(dataDir,"Output\\")
dir.create(path_out)
setwd(path_out)
for (j in 1:length(gene_list)){
output<-Cheng_generateSurvInputfromTCGA(gene_list[j,1],TXT_name,dataDir)
result<-Cheng_generateKMplot(output,outFile=paste0("Japanese_KIRC","_",gene_list[j,1]))
}
##################################################################################
##################################################################################
#Figure 3B: KM plot for validating biomarker
rm(list=ls())
rm(list=ls())
rm(list=ls())
library(survival)
th<-2# at least 2 of 4 prognostic transcripts vote high risk
int_trans<-as.matrix(c("ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883"))
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_3B\\"
setwd(path_raw)
cutoff<-as.matrix(read.csv("Fgiure_3B_cutoff_TCGA_KIRC.txt",header=F,row.names = 1,sep="\t"))
#cutoff<-as.matrix(read.csv("Fgiure_3B_cutoff_Japanese.txt",header=F,row.names = 1,sep="\t"))
exp<-as.matrix(read.csv("Fgiure_3B_TCGA_KIRC_exp_survival.txt",header=T,row.names = 1,sep="\t"))
#exp<-as.matrix(read.csv("Fgiure_3B_Japanese_KIRC_exp_survival.txt",header=T,row.names = 1,sep="\t"))
survival<-exp[,c("OS","status")]
exp<-exp[,int_trans]
loc_1=exp[,int_trans[1]]<cutoff[1] #high risk is 1
loc_2=exp[,int_trans[2]]<cutoff[2] #high risk is 1
loc_3=exp[,int_trans[3]]>cutoff[3] #high risk is 1
loc_4=exp[,int_trans[4]]>cutoff[4] #high risk is 1
label_1<-as.matrix(round(loc_1))
label_2<-as.matrix(round(loc_2))
label_3<-as.matrix(round(loc_3))
label_4<-as.matrix(round(loc_4))
label_raw<-cbind(label_1,label_2,label_3,label_4)
colnames(label_raw)<-int_trans
rownames(label_raw)<-rownames(exp)
label_raw<-as.matrix(rowSums(label_raw))
label<-matrix(NA,length(label_raw),1)
index<-which(label_raw>=th)
label[index]=1
label[which(is.na(label))]=0
rownames(label)<-rownames(label_raw)
survival_time<-as.matrix(survival[,1])
status<-as.matrix(survival[,2])
survival_1<-as.matrix(survival_time[which(label==1),1])
survival_0<-as.matrix(survival_time[which(label==0),1])
num_1<-length(survival_1)
num_0<-length(survival_0)
summary(coxph(Surv(survival_time,status) ~ label))
surv_obj<-survfit(Surv(survival_time,status) ~ label)
surv_obj
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
pdf(file = "Output_Figure_3B_TCGA_KITC.pdf",width=5.8,height=6)
#pdf(file = "Output_Figure_3B_Japanese_KITC.pdf",width=5.8,height=6)
plot(surv_obj,col=c("black","red"), mark.time=T, cex=1.4,xlab="Time (year)",lty =1, ylab = "Survival Probability",las=1,cex.lab=1.4)
legend("bottomleft",legend =c(paste0("Low risk (n=",num_0,")"), paste0("High risk (n=",num_1,")")),col=c("black","red"),text.font=2,lty=c(1,1),lwd=2,bty="n",cex=1.4)
survtest <- survdiff(Surv(survival_time, status) ~ label)
log_rank_p<-1 - pchisq(survtest$chisq, 1)
log_rank_p<-formatC(log_rank_p,format="e",digits=2)
mode(log_rank_p)<-"character"
legend("topright", legend =paste0("P=",log_rank_p),text.font=2,bty="n",cex=1.4)
dev.off()
##################################################################################
##################################################################################
#Figure 3C
#Figure 3C was drawn by Cytoscape software (Version 3.6.1).
#In file "Figure_3C_GoTerm_inform.txt", We took the column "Term_name" as node and "Label" as node attribute. The size of each node (go term) depends on the number of genes enriched in this go term (column "size").
#In the column "Label",the overlapped go terms identified from TCGA and Japanese cohort were denoted as 1. Among these overlapped go terms, the go terms which were associated with the four prognostic transcripts were denoted as 4. The go terms exclusively identified from TCGA cohort were denoted as 2.The go terms exclusively identified form Japanese cohort were denoted as 3.
##################################################################################
##################################################################################
#Fgiure 4A
#The Figure 4A was drawn manually based on the source data.
##################################################################################
##################################################################################
#Fgiure 4B
#The homology models were built using Schrodinger Suite software (Schrödinger/2019-3, LLC, New York, NY).
##################################################################################
##################################################################################
#Figure 4C and 4D:
#The Figure 4C and 4D were drawn manually based on the source data.
##################################################################################
##################################################################################
#Fgiure 4E: showing peptide intensity
rm(list=ls())
rm(list=ls())
rm(list=ls())
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_4E\\"
setwd(path_raw)
library(tidyverse)
library(magrittr)
library(ggsci)
library(gridExtra)
library(gsubfn)
library(reshape2)
library(ggsci)
library(ggthemes)
library(stringr)
file <- "Figure_3E_peptides_whole_proteome.csv"
df <- read.csv(file, stringsAsFactors = FALSE) %>% as.tibble %>% na_if(., 0)
ENST00000389093 <- "MSKPHSEAGTAFIQTQQLHAAMADTFLEHMCRLDIDSPPITARNTGIICTIGPASRSVELKKGATLKITLDNAYMEKCDENILWLDYKNICKVVEVGSKIYVDDGLISLQVKQKGADFLVTEVENGGSLGSKKGVNLPGAAVDLPAVSEKDIQDLKFGVEQDVDMVFASFIRKASDVHEVRKVLGEKGKNIKIISKIENHEGVRRFDEILEASDGIMVARGDLGIEIPAEKVFLAQKMMIGRCNRAGKPVICATQMLESMIKKPRPTRAEGSDVANAVLDGADCIMLSGETAKGDYPLEAVRMQHLIAREAEAAIYHLQLFEELRRLAPITSDPTEATAVGAVEASFKCCSGAIIVLTKSGRSAHQVARYRPRAPIIAVTRNPQTARQAHLYRGIFPVLCKDPVQEAWAEDVDLRVNFAMNVGKARGFFKKGDVVIVLTGWRPGSGFTNTMRVVPVP"
ENST00000568883 <- "MSKPHSEAGTAFIQTQQLHAAMADTFLEHMCRLDIDSPPIKKGVNLPGAAVDLPAVSEKDIQDLKFGVEQDVDMVFASFIRKASDVHEVRKVLGEKGKNIKIISKIENHEGVRRFDEILEASDGIMVARGDLGIEIPAEKVFLAQKMMIGRCNRAGKPVICATQMLESMIKKPRPTRAEGSDVANAVLDGADCIMLSGETAKGDYPLEAVRMQHLIAREAEAAMFHRKLFEELVRASSHSTDLMEAMAMGSVEASYKCLAAALIVLTESGRSAHQVARYRPRAPIIAVTRNPQTARQAHLYRGIFPVLCKDPVQEAWAEDVDLRVNFAMNVGKARGFFKKGDVVIVLTGWRPGSGFTNTMRVVPVP"
length_49kDA <- nchar(ENST00000389093)
length_40kDA <- nchar(ENST00000568883)
df_883 <- df %>% mutate(ENST00000568883 = str_detect(ENST00000568883, regex(df$Sequence))) %>%
filter(as.logical(.$ENST00000568883)) %>%
mutate(no_proteins = lengths(strsplit(.$Proteins, ";")))
df_093 <- df %>% mutate(ENST00000389093 = str_detect(ENST00000389093, regex(df$Sequence))) %>%
filter(as.logical(.$ENST00000389093)) %>%
mutate(no_proteins = lengths(strsplit(.$Proteins, ";")))
df_883$start_883 <- apply(df_883, 1, function(x) length(strsplit(strapply(ENST00000568883, paste("(.*)", x[1], sep = ""), simplify = TRUE), "")[[1]]) + 1)
df_883$end_883 <- df_883$start_883 + lengths(strsplit(df_883$Sequence, "")) - 1
df_093$start_093 <- apply(df_093, 1, function(x) length(strsplit(strapply(ENST00000389093, paste("(.*)", x[1], sep = ""), simplify = TRUE), "")[[1]]) + 1)
df_093$end_093 <- df_093$start_093 + lengths(strsplit(df_093$Sequence, "")) - 1
### Top band - ENST00000389093 ###
df_t <- df_093 %>% select(c(1:6, grep("_t", names(df_093)), 19:ncol(df_093))) %>% rowwise() %>%
mutate(median = median(c(Intensity.250ug_t, Intensity.rep1_t, Intensity.rep2_t, Intensity.rep3_t), na.rm = TRUE),
mean = mean(c(Intensity.250ug_t, Intensity.rep1_t, Intensity.rep2_t, Intensity.rep3_t), na.rm = TRUE))
df_t$No_rep <- df_t %>%
select(7:10) %>%
is.na %>%
`!` %>% rowSums
df_t <- df_t[df_t$No_rep != 0,]
### Bottom band - ENST00000568883 ###
df_b <- df_883 %>% select(c(1:6, grep("_b", names(df_883)), 19:ncol(df_883))) %>% rowwise() %>%
mutate(median = median(c(Intensity.250ug_b, Intensity.rep1_b, Intensity.rep2_b, Intensity.rep3_b), na.rm = TRUE),
mean = mean(c(Intensity.250ug_b, Intensity.rep1_b, Intensity.rep2_b, Intensity.rep3_b), na.rm = TRUE))
df_b$No_rep <- df_b %>%
select(7:10) %>%
is.na %>%
`!` %>% rowSums
df_b <- df_b[df_b$No_rep != 0,]
##### Make plots #####
## Top ##
df_t_melted <- melt(df_t, id.vars = c("Sequence", "median"), measure.vars = c("start_093", "end_093"))
df_t_test <- df_t
df_t_test$median <- 500000
df_t_test_melted <- melt(df_t_test, id.vars = c("Sequence", "median"), measure.vars = c("start_093", "end_093"))
df_t_test_melted$col <- "b"
df_t_melted$col <- "a"
df_t_melted_bound <- rbind(df_t_melted, df_t_test_melted)
df_t_melted_bound$name <- paste(df_t_melted_bound$Sequence, df_t_melted_bound$col)
df_t_line <- df_t %>% arrange(Start.position) %>% select(c(1, median, start_093, end_093))
df_empty <- matrix(NA, nrow = nrow(df_t_line), ncol = length_49kDA) %>% data.frame() %>% `colnames<-`(1:length_49kDA)
df_t_line <- cbind(df_t_line, df_empty)
i=1
for(i in 1:nrow(df_t_line)){
df_t_line[i, c((df_t_line[i,"start_093"] + 4) : (df_t_line[i,"end_093"] + 4))] <- df_t_line$median[i]
}
df_t_line_melted <- colSums(df_t_line[,c(5:(length_49kDA+4))], na.rm = TRUE) %>% melt() %>% mutate(pos = c(1:length_49kDA)) %>% filter(value != 0) %>%
`colnames<-`(c("median", "value")) %>% mutate(Sequence = "c", name = "c", col = "c", variable = "c", )
df_t_melted_bound_2 <- rbind(df_t_melted_bound, df_t_line_melted)
df_t_melted_bound_3 <- df_t_melted_bound_2 %>% filter(col != "a")
line_t_III <- ggplot(df_t_melted_bound_3, aes(value, median, group = name, colour = col)) +
geom_line(size = 1, linetype = 1, alpha = 1) + geom_rangeframe(aes(colour = "black")) + theme_tufte() +
ylab("log10 Intensity [IU]") + xlab("") + theme(plot.margin = unit(c(0,-2,0,0.5), "lines")) + scale_y_log10(breaks=c(1e6,1e8)) +
theme(legend.position = "none") + scale_colour_manual(values=c("#3C5488B2", "black", "#00A087B2"))
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
#pdf("Top_band_20190327_III.pdf",width=15,height=6)
pdf("Ouput_Figure_4E_093.pdf",width=15,height=6)
grid.arrange(line_t_III, ncol=2, nrow=2, widths=c(12, 2), heights=c(1, 0.1))
dev.off()
## Bottom ##
df_b_melted <- melt(df_b, id.vars = c("Sequence", "median"), measure.vars = c("start_883", "end_883"))
df_b_test <- df_b
df_b_test$median <- 50000
df_b_test_melted <- melt(df_b_test, id.vars = c("Sequence", "median"), measure.vars = c("start_883", "end_883"))
df_b_test_melted$col <- "b"
df_b_melted$col <- "a"
df_b_melted_bound <- rbind(df_b_melted, df_b_test_melted)
df_b_melted_bound$name <- paste(df_b_melted_bound$Sequence, df_b_melted_bound$col)
df_b_line <- df_b %>% arrange(Start.position) %>% select(c(1, median, start_883, end_883))
df_empty_b <- matrix(NA, nrow = nrow(df_b_line), ncol = length_40kDA) %>% data.frame() %>% `colnames<-`(1:length_40kDA)
df_b_line <- cbind(df_b_line, df_empty_b)
for(i in 1:nrow(df_b_line)){
df_b_line[i, c((df_b_line[i,"start_883"] + 4) : (df_b_line[i,"end_883"] + 4))] <- df_b_line$median[i]
}
df_b_line_melted <- colSums(df_b_line[,c(5:(length_40kDA+4))], na.rm = TRUE) %>% melt() %>% mutate(pos = c(1:length_40kDA)) %>% filter(value != 0) %>%
`colnames<-`(c("median", "value")) %>% mutate(Sequence = "c", name = "c", col = "c", variable = "c", )
df_b_melted_bound_2 <- rbind(df_b_melted_bound, df_b_line_melted)
df_b_melted_bound_3 <- df_b_melted_bound_2 %>% filter(col != "a")
line_b_III <- ggplot(df_b_melted_bound_3, aes(value, median, group = name, colour = col)) +
geom_line(size = 1, linetype = 1, alpha = 1) + geom_rangeframe(aes(colour = "black")) + theme_tufte() +
ylab("log10 Intensity [IU]") + xlab("") + theme(plot.margin = unit(c(0,-2,0,0.5), "lines")) + scale_y_log10(breaks=c(1e6,1e8)) +
theme(legend.position = "none") + scale_colour_manual(values=c("#3C5488B2", "black", "#00A087B2"))
setwd(path_out)
#pdf("Bottom_band_20190401_III.pdf",width=15,height=6)
pdf("Output_Figure_4E_883.pdf",width=15,height=6)
grid.arrange(line_b_III, ncol=2, nrow=2, widths=c(12, 2), heights=c(1, 0.1))
dev.off()
##################################################################################
##################################################################################
#Figure S1: Overlapped DEGs
rm(list=ls())
rm(list=ls())
rm(list=ls())
library(ggplot2)
path_raw<-"E:\\PKM_data_code\\Source_data\\Fgiure_S1\\"
setwd(path_raw)
matrix<-as.matrix(read.csv("Figure_S1_TCGA_KIRC_DEGs_overlap.txt",header=T,row.names = 1,sep="\t"))
matrix<-matrix[,c("overlap","consis","not_consis")]
ratio_matrix<-cbind(matrix[,"consis"]/matrix[,"overlap"],matrix[,"not_consis"]/matrix[,"overlap"])
colnames(ratio_matrix)<-c("consis","not_consis")
#ratio_matrix
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
for (i in 1:dim(ratio_matrix)[1]){
df = data.frame("Classification" = c("Consistent ratio","Not consistent ratio"),
"ratio" = c(ratio_matrix[i,1],ratio_matrix[i,2]))
pie = ggplot(df, aes(x="", y=ratio, fill=Classification)) + geom_bar(stat="identity", width=1)
# Convert to pie (polar coordinates) and add labels
pie = pie + coord_polar("y", start=0)
# Add color scale (hex colors)
pie = pie + scale_fill_manual(values=c("lightcoral", "lightskyblue"))
# Tidy up the theme
pie = pie + theme_classic() + theme(axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
setwd(path_out)
ggsave(paste0("./",rownames(ratio_matrix)[i],".pdf"), width = 5, height = 5)
}
##################################################################################
##################################################################################
#Figure S2
rm(list=ls())
rm(list=ls())
rm(list=ls())
path_raw<-"E:\\PKM_data_code\\Source_data\\Figure_S2\\"
setwd(path_raw)
matrix_raw<-as.matrix(read.csv("Figure_S2_GoTerm_log10P.txt",header=T,sep="\t"))
#Figure_S1_GoTerm_log10P.txt: negtive log10 transformation of P values, then negtive transformation for the pathways enriched with down-regulated genes
matrix<-matrix_raw[,c(2,3,4,5)]
mode(matrix)<-"numeric"
rownames(matrix)<-matrix_raw[,1]
library(corrplot)
library(Hmisc)
library(gplots)
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
tiff(filename = "Output_Figure_S2_pathway_heatmap.tiff",width=1500,height=1200)
hr <- hclust(as.dist(1-cor(t(matrix), method="spearman")), method="ward.D2")
#hc <- hclust(as.dist(1-cor(matrix, method="spearman")), method="ward.D2")
my_palete <- colorRampPalette(c("blue","white","red3"))(n=299)
col_breaks = c(seq(-1.3,-1,length=100),
+ seq(-0.999,1,length=100),
+ seq(1.009,1.3,length=100))
#heatmap.2(log_p_value,Rowv=as.dendrogram(hr), Colv=as.dendrogram(hc),scale="none",density.info="density",trace="none",col=my_palete,breaks=col_breaks,margins =c(15,15),cexRow=1.5,cexCol=1.5,sepwidth=c(0.5,0.5),sepcolor="black")
heatmap.2(matrix,Rowv=as.dendrogram(hr), Colv=FALSE,scale="none",density.info="none",trace="none",col=my_palete,breaks=col_breaks,margins =c(15,60),cexRow=1.5,cexCol=1.5,sepwidth=c(0.5,0.5),sepcolor="black")
dev.off()
##################################################################################
##################################################################################
##################################################################################
#Supplementary Tables
##################################################################################
##################################################################################
#Table S1
#Table S1 shows the exton-intron structure, which was downloaded from the Ensembl website (Version 83, GENCODE Version 24).
##################################################################################
##################################################################################
#Table S2: mean expression value of PKM transcripts in TCGA cancer-types
rm(list=ls())
rm(list=ls())
rm(list=ls())
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S2\\"
setwd(path_raw)
folder_list<-as.matrix(read.csv("Cancer_list.txt",header=T,sep="\t"))
num<-NULL
mean_matrix<-NULL
for (i in 1:length(folder_list)){
print(i)
int_exp<-as.matrix(read.csv(paste0(folder_list[i],"_TPM_exp",".txt"),header=T,row.names = 1,sep="\t"))
num_value<-dim(int_exp)[1]
mean_value<-as.matrix(colMeans(int_exp))
colnames(mean_value)<-folder_list[i]
num<-rbind(num,num_value)
mean_matrix<-cbind(mean_matrix,mean_value)
}
mean_matrix<-t(mean_matrix)
rownames(num)<-folder_list
colnames(num)="Size"
mean_matrix<-cbind(folder_list,num,mean_matrix)
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
write.table(mean_matrix,file="Output_Table_S2.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S3: mean expression values of PKM transcripts in GTEx normal tissues
rm(list=ls())
rm(list=ls())
rm(list=ls())
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S3\\"
setwd(path_raw)
tissue_list<-as.matrix(read.csv("Tissue_list.txt",header=T,sep="\t"))
num<-NULL
mean_matrix<-NULL
for (i in 1:length(tissue_list)){
print(i)
int_exp<-as.matrix(read.csv(paste0("GTEx_",tissue_list[i],"_TPM_exp",".txt"),header=T,row.names = 1,sep="\t"))
num_value<-dim(int_exp)[1]
mean_value<-as.matrix(colMeans(int_exp))
colnames(mean_value)<-tissue_list[i]
num<-rbind(num,num_value)
mean_matrix<-cbind(mean_matrix,mean_value)
}
mean_matrix<-t(mean_matrix)
rownames(num)<-tissue_list
colnames(num)="Size"
mean_matrix<-cbind(tissue_list,num,mean_matrix)
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
write.table(mean_matrix,file="Output_Table_S3.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S4: Log-rank p value of PKM and its 14 transcripts in 25 cancer types
rm(list=ls())
rm(list=ls())
rm(list=ls())
library("grid")
library("xlsx")
library("XLConnect")
library("ggplot2")
library("ggvis")
library("rgl")
library("dplyr")
library("tidyr")
library("stringr")
library("lubridate")
require(gplots)
library("survival")
require("xlsx")
library("DESeq2")
library("biomaRt")
library("DESeq2")
library("piano")
library("Biobase")
setwd("E:\\PKM_data_code\\Code\\")
source('Cheng_toolbox_beta.R')
gene_list<-as.matrix(c("PKM","ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883","ENST00000319622","ENST00000562997","ENST00000568459"))
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S4\\"
setwd(path_raw)
folder_list<-as.matrix(read.csv("Cancer_list.txt",header=T,sep="\t"))
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
p_end<-NULL
#cutoff_end<-NULL
#coef_end<-NULL
for (i in 1:length(folder_list)){
print(i)
cancerType<-as.matrix(folder_list[i,1])
dataDir<-path_raw
setwd(dataDir)
TXT_name<-paste0(folder_list[i],"_TPM_exp_SurvivalData.txt")
setwd(path_out)
cancer_p<-NULL
#cancer_cutoff<-NULL
#cancer_coef<-NULL
for (j in 1:length(gene_list)){
output<-Cheng_generateSurvInputfromTCGA(gene_list[j,1],TXT_name,dataDir)
result<-Cheng_generateKMplot(output,outFile=paste0(cancerType,"_",gene_list[j,1]))
log_rank_p<-as.matrix(result$logRankP)
#cut_off<-as.matrix(result$EXPcut)
#coef<-as.matrix(result$coef)
colnames(log_rank_p)<-gene_list[j,1]
#colnames(cut_off)<-gene_list[j,1]
#colnames(coef)<-gene_list[j,1]
cancer_p<-cbind(cancer_p,log_rank_p)
#cancer_cutoff<-cbind(cancer_cutoff,cut_off)
#cancer_coef<-cbind(cancer_coef,coef)
}
p_end<-rbind(p_end,cancer_p)
#cutoff_end<-rbind(cutoff_end,cancer_cutoff)
#coef_end<-rbind(coef_end,cancer_coef)
}
rownames(p_end)<-folder_list
#rownames(cutoff_end)<-folder_list
#rownames(coef_end)<-folder_list
colnames(p_end)<-gene_list
#colnames(cutoff_end)<-gene_list
#colnames(coef_end)<-gene_list
p_end<-cbind(folder_list,p_end)
setwd(path_out)
write.table(p_end,file="Output_Table_S4.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S5: The enriched of GO terms with DEGs for all transcripts in 25 cancer types
rm(list=ls())
rm(list=ls())
rm(list=ls())
int_trans<-"ENST00000568883"#"ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883")
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S5\\"
path_int_trans<-paste0(path_raw,int_trans,"\\")
setwd(path_int_trans)
goid_all<-as.matrix(read.csv("GoTerm_id.txt",header=F,sep="\t"))
p_matrix_up<-as.matrix(read.csv("Up_GoTerm_p_matrix.txt",header=T,sep="\t"))
rownames(p_matrix_up)<-p_matrix_up[,1]
p_matrix_up<-p_matrix_up[,-1]
mode(p_matrix_up)<-"numeric"
p_matrix_down<-as.matrix(read.csv("Down_GoTerm_p_matrix.txt",header=T,sep="\t"))
rownames(p_matrix_down)<-p_matrix_down[,1]
p_matrix_down<-p_matrix_down[,-1]
mode(p_matrix_down)<-"numeric"
binary_up=p_matrix_up
binary_up[which(!is.na(p_matrix_up))]=1
binary_up[which(is.na(p_matrix_up))]=0
row_sum_up<-as.matrix(rowSums(binary_up))#the frequency of each path in cancers
binary_down=p_matrix_down
binary_down[which(!is.na(p_matrix_down))]=1
binary_down[which(is.na(p_matrix_down))]=0
row_sum_down<-as.matrix(rowSums(binary_down))
binary_all=binary_up+binary_down
binary_all[which(binary_all==2)]=1#the pathwy is simultaneously up- and down-regulated
Generality<-as.matrix(rowSums(binary_all))
Direction<-as.matrix(row_sum_up-row_sum_down)
drop_index<-which(Generality==0)
Generality<-as.matrix(Generality[-drop_index,])
Direction<-as.matrix(Direction[-drop_index,])
goid_all<-as.matrix(goid_all[-drop_index,])
go_inform<-cbind(goid_all,rownames(Direction),Direction,Generality)
colnames(go_inform)<-c("GO_ID","term_name","Direction","Generality")
path_out<-"E:\\PKM_data_code\\Source_data\\Table_S5\\Output\\"
setwd(path_out)
#save(file=paste0(int_trans,"_GoTerm_Generality_direction.Rdata"),Generality,Direction,goid_all)
write.table(go_inform,file=paste0(int_trans,"_GoTerm_Generality_direction.txt"),sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S6-9: DEGs overlapping
rm(list=ls())
rm(list=ls())
rm(list=ls())
setwd("E:\\PKM_data_code\\Code\\")
source('DEGs_DEseq_table_overlap_2.R')
#int_transcript<-as.matrix(c("ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883"))#TCGA-KIRC
#path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S6\\TCGA_KIRC_DEGs\\"#TCGA-KIRC
#int_transcript<-as.matrix(c("ENST00000335181","ENST00000389093","ENST00000568883"))#TCGA-CESC
#path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S7\\TCGA_CESC_DEGs\\"#TCGA-CESC
#int_transcript<-as.matrix(c("ENST00000335181","ENST00000389093","ENST00000568883"))#TCGA-PAAD
#path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S8\\TCGA_PAAD_DEGs\\"#TCGA-PAAD
int_transcript<-as.matrix(c("ENST00000335181","ENST00000389093","ENST00000568883"))#TCGA-BRCA
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S9\\TCGA_BRCA_DEGs\\"#TCGA-BRCA
fdr_th<-0.00001 #for extracting DEGs
index_p<-t(combn(dim(int_transcript)[1],2))
int_pair<-cbind(int_transcript[index_p[,1],1],int_transcript[index_p[,2],1])
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
over_result_common_end<-NULL
for (i in 1:dim(int_pair)[1]){
project_1<-as.matrix(int_pair[i,1])
project_2<-as.matrix(int_pair[i,2])
path_1<-paste0(path_raw,project_1,"\\")
setwd(path_1)
deg_DEseq_1<-as.matrix(read.csv("DEseq_DEGs_result.txt",header=T,sep="\t"))
path_2<-paste0(path_raw,project_2,"\\")
setwd(path_2)
deg_DEseq_2<-as.matrix(read.csv("DEseq_DEGs_result.txt",header=T,sep="\t"))
output<-DEGs_DEseq_table_overlap_2(deg_DEseq_1,deg_DEseq_2,fdr_th)
over_result=output$over_result
file_over_result<-paste0(project_1," VS ",project_2)
over_result_common<-t(as.matrix(over_result[2,]))
rownames(over_result_common)<-file_over_result
over_result_common_end<-rbind(over_result_common_end,over_result_common)
}
comparison<-as.matrix(rownames(over_result_common_end))
over_result_common_end<-cbind(comparison,over_result_common_end)
over_result_common_end<-over_result_common_end[,-c(4,8)]
colnames(over_result_common_end)<-c("Comparison", "Number of DEGs for transcript 1" ,"Number of DEGs for transcript 2","Overlaps", "Consistent DEGs", "Concordance ratio", "Consistent up-regulated DEGs", "Consistent down-regulated DEGs", "P value")
setwd(path_out)
#write.table(over_result_common_end,file="Output_Table_S6.txt",sep="\t",row.names=F,col.names=T,quote=F)
#write.table(over_result_common_end,file="Output_Table_S7.txt",sep="\t",row.names=F,col.names=T,quote=F)
#write.table(over_result_common_end,file="Output_Table_S8.txt",sep="\t",row.names=F,col.names=T,quote=F)
#write.table(over_result_common_end,file="Output_Table_S8.txt",sep="\t",row.names=F,col.names=T,quote=F)
write.table(over_result_common_end,file="Output_Table_S9.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S10: DEGs overlapping
rm(list=ls())
rm(list=ls())
rm(list=ls())
setwd("E:\\PKM_data_code\\Code\\")
source('DEGs_DEseq_table_overlap_2.R')
int_transcript<-as.matrix(c("ENST00000335181","ENST00000389093"))#TCGA-COAD
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S10\\TCGA_COAD_DEGs\\"#TCGA-COAD
fdr_th<-0.00001 #for extracting DEGs
index_p<-t(combn(dim(int_transcript)[1],2))
int_pair<-cbind(int_transcript[index_p[,1],1],int_transcript[index_p[,2],1])
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
over_result_common_end<-NULL
for (i in 1:dim(int_pair)[1]){
project_1<-as.matrix(int_pair[i,1])
project_2<-as.matrix(int_pair[i,2])
path_1<-paste0(path_raw,project_1,"\\")
setwd(path_1)
deg_DEseq_1<-as.matrix(read.csv("DEseq_DEGs_result.txt",header=T,sep="\t"))
path_2<-paste0(path_raw,project_2,"\\")
setwd(path_2)
deg_DEseq_2<-as.matrix(read.csv("DEseq_DEGs_result.txt",header=T,sep="\t"))
output<-DEGs_DEseq_table_overlap_2(deg_DEseq_1,deg_DEseq_2,fdr_th)
over_result=output$over_result
file_over_result<-paste0(project_1," VS ",project_2)
over_result_common<-t(as.matrix(over_result[2,]))
rownames(over_result_common)<-file_over_result
over_result_common_end<-rbind(over_result_common_end,over_result_common)
}
comparison<-as.matrix(rownames(over_result_common_end))
over_result_common_end<-cbind(comparison,over_result_common_end)
over_result_common_end<-t(as.matrix(over_result_common_end[,-c(4,8)]))
colnames(over_result_common_end)<-c("Comparison", "Number of DEGs for transcript 1" ,"Number of DEGs for transcript 2","Overlaps", "Consistent DEGs", "Concordance ratio", "Consistent up-regulated DEGs", "Consistent down-regulated DEGs", "P value")
setwd(path_out)
write.table(over_result_common_end,file="Output_Table_S10.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S11: cox analysis of four transcripts in TCGA KIRC
rm(list=ls())
rm(list=ls())
rm(list=ls())
library(survival)
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S11\\"
setwd(path_raw)
exp_trans<-as.matrix(read.csv("Table_S11_TCGA_KIRC_trans_TPM.txt",header=T,row.names = 1,sep="\t"))
survival<-as.matrix(read.csv("Table_S11_TCGA_KIRC_SurvivalData.txt",header=T,row.names = 1,sep="\t"))
est_622<-as.matrix(exp_trans[,"ENST00000319622"])
est_181<-as.matrix(exp_trans[,"ENST00000335181"])
est_609<-as.matrix(exp_trans[,"ENST00000561609"])
est_093<-as.matrix(exp_trans[,"ENST00000389093"])
est_883<-as.matrix(exp_trans[,"ENST00000568883"])
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_181))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_609))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_093))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_883))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_609+est_181))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_093+est_181))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_883+est_181))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_181+est_622))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_609+est_622))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_093+est_622))
summary(coxph(Surv(as.matrix(survival[,1]), as.matrix(survival[,2])) ~ est_883+est_622))
##################################################################################
##################################################################################
#Table S12: DEGs overlapping
rm(list=ls())
rm(list=ls())
rm(list=ls())
quantile_th<-0.2
setwd("E:\\PKM_data_code\\Code\\")
source('DEGs_DEseq_table_overlap_quantile_th.R')
int_trans<-as.matrix(c("ENST00000335181","ENST00000561609","ENST00000389093","ENST00000568883"))
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S12\\"
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
over_result_common_end<-NULL
for (i in 1:length(int_trans)){
path_1<-paste0(path_raw,"TCGA_KIRC_DEGs\\",int_trans[i],"\\")
setwd(path_1)
deg_DEseq_1<-as.matrix(read.csv("DEseq_DEGs_result.txt",header=T,sep="\t"))
path_2<-paste0(path_raw,"Japanese_KIRC_DEGs\\",int_trans[i],"\\")
setwd(path_2)
deg_DEseq_2<-as.matrix(read.csv("DEseq_DEGs_result.txt",header=T,sep="\t"))
output_end<-DEGs_DEseq_table_overlap_quantile_th(deg_DEseq_1,deg_DEseq_2,quantile_th)
over_result=output_end$over_result
file_over_result<-int_trans[i]
over_result_common<-t(as.matrix(over_result[2,]))
rownames(over_result_common)<-file_over_result
over_result_common_end<-rbind(over_result_common_end,over_result_common)
}
comparison<-as.matrix(rownames(over_result_common_end))
over_result_common_end<-cbind(comparison,over_result_common_end)
over_result_common_end<-over_result_common_end[,-c(4,8)]
colnames(over_result_common_end)<-c("Comparison", "Number of DEGs for transcript 1" ,"Number of DEGs for transcript 2","Overlaps", "Consistent DEGs", "Concordance ratio", "Consistent up-regulated DEGs", "Consistent down-regulated DEGs", "P value")
setwd(path_out)
write.table(over_result_common_end,file="Output_Table_S12.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S13: Go enrichment
rm(list=ls())
rm(list=ls())
rm(list=ls())
library(clusterProfiler)
library(dplyr)
library(tidyr)
library(DOSE)
library(GO.db)
library(org.Hs.eg.db)
library(GSEABase)
fdr_goterm<-0.00001 #go enrichment cutoff
setwd("E:\\PKM_data_code\\Code\\")
source('deg_GoTerm_clusterProfiler.R')
path_raw<-"E:\\PKM_data_code\\Source_data\\Table_S13\\"
setwd(path_raw)
background<-as.matrix(read.csv("Background_gene.txt",header=F,sep="\t"))
#deg_DEGseq<-as.matrix(read.csv("TCGA_DEGs_quantile_th_0.2.txt",header=F,sep="\t"))
deg_DEGseq<-as.matrix(read.csv("Japanese_DEGs_quantile_th_0.2.txt",header=F,sep="\t"))
deg_up<-as.matrix(deg_DEGseq[which(as.numeric(deg_DEGseq[,3])=="1"),2])#extract the gene symbol
pathway_up=enrichGO(gene=deg_up,OrgDb='org.Hs.eg.db',keyType = "SYMBOL",ont="BP",universe = background ,pAdjustMethod = "BH",qvalueCutoff=0.05)
path_table_up<-deg_GoTerm_clusterProfiler(pathway_up)
p.adjust.up<-as.matrix(path_table_up[,"p.adjust"])
mode(p.adjust.up)="numeric"
sig_path_up<-as.matrix(path_table_up[which(p.adjust.up<fdr_goterm),])
path_out<-paste0(path_raw,"Output\\")
dir.create(path_out)
setwd(path_out)
#write.table(sig_path_up,file="TCGA_GoTerms_up.txt",sep="\t",row.names=F,col.names=T,quote=F)
write.table(sig_path_up,file="Janpanese_GoTerms_up.txt",sep="\t",row.names=F,col.names=T,quote=F)
##################################################################################
##################################################################################
#Table S14: Alignment of amino acid sequences of PKM transcripts
#The amino acid sequences of different transcripts were aligned using Uniprot website.
##################################################################################
##################################################################################
#Table S15: Relative protein level
#The quantification of protein is based on the western blots image.
|
524624647eecf138ece57dce805340074d621f2b
|
1be8aec7140b5c2e7d29d8a1b3edc1367aafa0bb
|
/plot1.R
|
0fea76d11bb2d0f8240215dba5b53280bba28939
|
[] |
no_license
|
mixacom/ExData_PeerAssessment2
|
db612b8efa6c5b724cb6c113d0b6d67b4ad344cb
|
ed121d72294537140bd8eaa6a81e1b185748d580
|
refs/heads/master
| 2021-01-22T01:38:07.248679
| 2015-12-27T23:06:20
| 2015-12-27T23:06:20
| 48,663,716
| 0
| 0
| null | 2015-12-27T22:58:22
| 2015-12-27T22:58:21
| null |
UTF-8
|
R
| false
| false
| 961
|
r
|
plot1.R
|
nei <- readRDS("measures of pm/summarySCC_PM25.rds")
scc <- readRDS("measures of pm/Source_Classification_Code.rds")
nei$year <- as.factor(nei$year)
yrs <- split(nei, nei$year)
values <- lapply(seq_along(yrs), function(x) {
assign(c("y1999", "y2002", "y2005", "y2008")[x], yrs[[x]], envir=.GlobalEnv)
}
)
te <- data.frame()
te[1, 1] <- sum(y1999$Emissions)
te[1, 2] <- sum(y2002$Emissions)
te[1, 3] <- sum(y2005$Emissions)
te[1, 4] <- sum(y2008$Emissions)
te <- setNames(te, c("1999", "2002", "2005", "2008"))
years <- as.integer(colnames(te))
pmv <- as.integer(te[1, ])
par(mar = c(4, 4, 1, 1))
png("plot1.png", width = 480, height = 480)
plot(years, pmv, main = "Total emissions from PM2.5 in the US in 1999, 2002, 2005 and 2008", xlab = "Year", ylab = "Emissions Level", yaxt = "n", cex.main = 0.9)
axis(2, at = pmv ,labels = format(pmv, scientific=FALSE))
model <- lm(pmv ~ years)
abline(model, lwd = 1, col = "steelblue")
dev.off()
|
d3c6bbef1cc50adfda87d481736dc583201fa75a
|
3da91b996bd811d1755fef0751aaef1e823f7608
|
/Figure3.R
|
a9e5ddd9d2daf4a571c8f8a675568af8e3317e0b
|
[] |
no_license
|
mengysun/Dissecting-noise-project
|
00dc83e7e90165d9d0c587ecf6be7f6cb1c19428
|
a495f6b4579a2a549569d0dc8c84e208fda43e42
|
refs/heads/master
| 2020-06-01T13:23:50.287333
| 2019-06-08T21:03:48
| 2019-06-08T21:03:48
| 190,793,421
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,621
|
r
|
Figure3.R
|
library(dplyr)
library(ggplot2)
library(ppcor)
library(ggpubr)
library(gridExtra)
library(grid)
library(ggExtra)
Gene_noise_raw<-read.table(file="Data/cl7_noise_raw",sep="\t",header=TRUE)
Gene_noise_table<-read.table(file="Data/Gene_noise_table_all_cells",sep="\t",header=TRUE)
Gene_noise_table$intrinsic_noise<-Gene_noise_raw$Intrinsic_noise
Gene_noise_table$extrinsic_noise<-Gene_noise_raw$Extrinsic_noise
Gene_expression_table<-read.table(file="Data/cl7_expression",sep="\t",header=TRUE)
noise_expression_index<-match(Gene_noise_table$Genes,Gene_expression_table$Genes)
Gene_noise_table$expression<-Gene_expression_table$rpkm[noise_expression_index]
Mouse_genes_all<-read.table(file="Data/Mouse_genes_all.txt",sep="\t",header=TRUE)
Mouse_genes_all$TSS<-ifelse(Mouse_genes_all$Strand==1,Mouse_genes_all$Gene.start..bp.,Mouse_genes_all$Gene.end..bp.)
Gene_infor_index<-match(Gene_noise_table$Genes,Mouse_genes_all$Gene.name)
Gene_noise_table$chr<-Mouse_genes_all$Chromosome.scaffold.name[Gene_infor_index]
Gene_noise_table$TSS<-Mouse_genes_all$TSS[Gene_infor_index]
Gene_noise_table$gene_end<-Mouse_genes_all$Gene.end..bp.[Gene_infor_index]
Gene_noise_table$gene_start<-Mouse_genes_all$Gene.start..bp.[Gene_infor_index]
Mouse_expression_all<-read.table(file="Data/mouse_rpkm.txt",sep="\t",header=TRUE,quote="",fill=FALSE)
exp_name_index<-match(rownames(Mouse_expression_all),Mouse_genes_all$Gene.stable.ID)
Mouse_expression_all$geneName<-Mouse_genes_all$Gene.name[exp_name_index]
Mouse_expression_all$mean_exp<-rowMeans(Mouse_expression_all[,1:39])
noise_allexp_index<-match(Gene_noise_table$Genes,Mouse_expression_all$geneName)
Gene_noise_table$exp_all<-Mouse_expression_all$mean_exp[noise_allexp_index]
#Figure 3
Mouse_GO_term<-read.table(file="Data/GO_term_name.txt",sep="\t",header=TRUE,quote="", fill=FALSE)
Mouse_Mito<-Mouse_GO_term%>%
filter(GO.term.name=="mitochondrion")
Mito_index<-match(Gene_noise_table$Genes,Mouse_Mito$Gene.name)
Gene_noise_table$Mito<-(!is.na(Mito_index))
Gene_noise_Mito<-Gene_noise_table%>%
filter(Mito)
Gene_noise_nonMito<-Gene_noise_table%>%
filter(!Mito)
#Fig.3b
Mito_intrinsic_tab<-data.frame(c(Gene_noise_table$intrinsic_residual,Gene_noise_table$intrinsic_residual_controlEx),
c(Gene_noise_table$Mito,Gene_noise_table$Mito),
c(rep("Dint",length(Gene_noise_table$Genes)),rep("Dint_c",length(Gene_noise_table$Genes))))
names(Mito_intrinsic_tab)<-c("int","is_Mito","i_or_c")
Mito_intrinsic_tab$is_Mito[Mito_intrinsic_tab$is_Mito==TRUE]<-"Mitochondrial genes"
Mito_intrinsic_tab$is_Mito[Mito_intrinsic_tab$is_Mito==FALSE]<-"Non-mitochondrial genes"
Mito_intrinsic_tab$is_Mito<-factor(Mito_intrinsic_tab$is_Mito,
levels = c("Mitochondrial genes","Non-mitochondrial genes"),ordered = TRUE)
Mito_intrinsic_tab$f12<-interaction(Mito_intrinsic_tab$is_Mito,Mito_intrinsic_tab$i_or_c)
ggplot(Mito_intrinsic_tab,aes(y=int,x=f12,fill=is_Mito))+
scale_fill_manual(values=c("#CC79A7", "#56B4E9"))+
geom_boxplot(outlier.size=0.1,fatten=0.5)+
xlab(label="")+
ylab(label="Intrinsic noise")+
theme(axis.text.y=element_text(size=12,family="Times New Roman",color="black"))+
theme(axis.title.y=element_text(size=12,family="Times New Roman",color="black"))+
theme_linedraw()+
theme(legend.position = "bottom")+
theme(legend.title=element_blank())+
theme(legend.text=element_text(size=12,family="Times New Roman",color="black"))+
theme(legend.direction = "vertical")+
theme(legend.margin=margin(-0.5))+
removeGridX()+
theme(axis.text.x=element_blank())+
theme(axis.ticks.x=element_blank())+
theme(axis.title.x=element_blank())+
scale_y_continuous(limits=c(-3000,6000),breaks=c(-2000,0,2000,4000))
#Fig.3a
Mito_extrinsic_tab<-data.frame(c(Gene_noise_table$extrinsic_residual,Gene_noise_table$extrinsic_residual_controlIn),
c(Gene_noise_table$Mito,Gene_noise_table$Mito),
c(rep("Dext",length(Gene_noise_table$Genes)),rep("Dext_c",length(Gene_noise_table$Genes))))
names(Mito_extrinsic_tab)<-c("ext","is_Mito","e_or_c")
Mito_extrinsic_tab$is_Mito[Mito_extrinsic_tab$is_Mito==TRUE]<-"Mitochondrial genes"
Mito_extrinsic_tab$is_Mito[Mito_extrinsic_tab$is_Mito==FALSE]<-"Non-mitochondrial genes"
Mito_extrinsic_tab$is_Mito<-factor(Mito_extrinsic_tab$is_Mito,
levels = c("Mitochondrial genes","Non-mitochondrial genes"),ordered = TRUE)
Mito_extrinsic_tab$f12<-interaction(Mito_extrinsic_tab$is_Mito,Mito_extrinsic_tab$e_or_c)
ggplot(Mito_extrinsic_tab,aes(y=ext,x=f12,fill=is_Mito))+
scale_fill_manual(values=c("#CC79A7", "#56B4E9"))+
geom_boxplot(outlier.size=0.1,fatten=0.5)+
xlab(label="")+
ylab(label="Extrinsic noise")+
theme(axis.title.y=element_text(size=12,family="Times New Roman",color="black"))+
theme(axis.text.y=element_text(size=12,family="Times New Roman",color="black"))+
theme_linedraw()+
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
# panel.background = element_blank(),axis.line = element_line(colour = "black"),panel.border=element_blank())+
theme(legend.position = "bottom")+
theme(legend.title=element_blank())+
theme(legend.text=element_text(size=12,family="Times New Roman",color="black"))+
theme(legend.direction = "vertical")+
theme(legend.margin=margin(-0.5))+
removeGridX()+
theme(axis.text.x=element_blank())+
theme(axis.ticks.x=element_blank())+
theme(axis.title.x=element_blank())+
scale_y_continuous(limits=c(-3000,6000),breaks=c(-2000,0,2000,4000))
#Figure 3c~d
#========================================================================================================================
Gene_noise_raw<-read.table(file="Data/cl7_noise_raw",sep="\t",header=TRUE)
Mouse_genes_all<-read.table(file="Data/Mouse_protein_coding.txt",sep="\t",header=TRUE)
Mouse_genes_all<-Mouse_genes_all%>%
filter(Chromosome.scaffold.name!="MT")
unique_genes<-unique(as.character(Mouse_genes_all$Gene.name))
gene_unique_index<-match(unique_genes,Mouse_genes_all$Gene.name)
Mouse_genes_all<-Mouse_genes_all[gene_unique_index,]
Mouse_GO_term<-read.table(file="Data/GO_term_name.txt",sep="\t",header=TRUE,quote="", fill=FALSE)
Mouse_expression_all<-read.table(file="Data/mouse_rpkm.txt",sep="\t",header=TRUE,quote="",fill=FALSE)
Mouse_expression_all$mean_exp<-apply(Mouse_expression_all[,1:39],1,mean)
gene_exp_index<-match(Mouse_genes_all$Gene.stable.ID,rownames(Mouse_expression_all))
Mouse_genes_all$mean_exp<-Mouse_expression_all$mean_exp[gene_exp_index]
Mouse_genes_all<-Mouse_genes_all%>%
filter(!is.na(mean_exp))
#Genomic features
#TATA box
TATA_box<-read.table(file="Data/mouse_TATA_all.bed",sep="\t")
TATA_box$genes = unlist(lapply(TATA_box$V4, function (x) strsplit(as.character(x), "_", fixed=TRUE)[[1]][1]))
TATA_index<-match(Mouse_genes_all$Gene.name,TATA_box$genes)
Mouse_genes_all$TATA<-(!is.na(TATA_index))
#RegNetwork data
TF_target_dat<-read.table(file="Data/Mouse_regulatory_interaction.csv",sep=",",header=TRUE)
miRNA_target_dat<-TF_target_dat[(grepl("miR",as.character(TF_target_dat$regulator_symbol))),]
target_number_dat<-miRNA_target_dat%>%
group_by(target_symbol)%>%
dplyr::summarise(target_number=length(target_symbol))
noise_target_index<-match(Mouse_genes_all$Gene.name,target_number_dat$target_symbol)
Mouse_genes_all$target_number<-target_number_dat$target_number[noise_target_index]
Mouse_genes_all$target_number[is.na(Mouse_genes_all$target_number)]<-0
Mouse_GO_term<-read.table(file="Data/GO_term_name.txt",sep="\t",header=TRUE,quote="", fill=FALSE)
Mouse_Mito<-Mouse_GO_term%>%
filter(GO.term.name=="mitochondrion")
Mito_index<-match(Mouse_genes_all$Gene.name,Mouse_Mito$Gene.name)
Mouse_genes_all$Mito<-(!is.na(Mito_index))
Mito_genes<-Mouse_genes_all%>%
filter(Mito)
Mito_control<-Mouse_genes_all%>%
filter(!Mito)
bins<-quantile(Mito_genes$mean_exp,prob=c(1:51)/51)
Mito_stratified<-split(Mito_control,cut(Mito_control$mean_exp,breaks=bins))
genes_each_bin<-numeric(50)
for(i in 1:50){
genes_each_bin[i]<-length(Mito_stratified[[i]]$Gene.name)
}
sam_per_bin<-min(genes_each_bin)
stratified_Mito_control<-Mito_control[FALSE,]
names(stratified_Mito_control)<-names(Mito_control)
set.seed(8)
for(i in 1:50){
sub_tab_index<-sample(c(1:genes_each_bin[i]),sam_per_bin)
sub_tab<-Mito_stratified[[i]][sub_tab_index,]
stratified_Mito_control<-rbind(stratified_Mito_control,sub_tab)
}
#Figure 3c
sum(stratified_Mito_control$TATA)
344/2850
sum(Mito_genes$TATA)
125/1603
df <- data.frame(genes=c("Mitochondria_genes", "Control"),
ratio=c(125/1603,344/2850))
df$bar_order <- factor(df$genes, as.character(df$genes))
ggplot(data=df, aes(x=bar_order, y=ratio,fill=bar_order)) +
geom_bar(stat="identity",width=0.3)+
scale_fill_manual(values=c("#CC79A7", "#F0E442"))+
xlab(label="")+
ylab(label="Fraction of genes with TATA-box")+
theme(axis.text.x = element_text(size=12,family="Times New Roman",color="black"))+
theme(axis.title.y = element_text(size=12,angle=90,vjust = 0.5,family="Times New Roman",color="black"))+
theme(axis.text.y=element_text(size=12,family="Times New Roman",color="black"))+
theme_linedraw()+
scale_y_continuous(limits=c(0,0.15),expand=c(0,0))+
scale_x_discrete(labels=c("Mitochondrial\n genes","Non-mitochondrial \n genes (stratified)"))+
scale_x_discrete(breaks = NULL)+
theme(legend.position = "none")
#Figure 3d
df <- data.frame(genes=c("Mitochondria_genes", "Control"),
ratio=c(1058/1603,2085/2850))
df$bar_order <- factor(df$genes, as.character(df$genes))
ggplot(data=df, aes(x=bar_order, y=ratio,fill=bar_order)) +
geom_bar(stat="identity",width=0.3)+
scale_fill_manual(values=c("#CC79A7", "#F0E442"))+
xlab(label="")+
ylab(label="Fraction of genes targeted by miRNA")+
theme(axis.text.x = element_text(size=12,family="Times New Roman",color="black"))+
theme(axis.title.y = element_text(size=12,angle=90,vjust = 0.5,family="Times New Roman",color="black"))+
theme(axis.text.y=element_text(size=12,family="Times New Roman",color="black"))+
theme_linedraw()+
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
# panel.background = element_blank(), axis.line = element_line(colour = "black"))+
# theme(axis.ticks=element_line(size=1.5))+
# theme(axis.ticks.length=unit(5,"mm"))+
scale_y_continuous(limits=c(0,1),expand=c(0,0))+
scale_x_discrete(labels=c("Mitochondrial\n genes","Non-mitochondrial \n genes (stratified)"))+
scale_x_discrete(breaks = NULL)+
theme(legend.position = "none")
|
b34660323c9bdffa1534b645d76c50cae39dc45d
|
825f3001ed26446b607ff83ce7bd04915adc3600
|
/00_data_prep/01_load_moose_data.R
|
5157c9f037b511f0669e5eeee381cbf30486be44
|
[
"Apache-2.0"
] |
permissive
|
Bevann/telemetry
|
a8b25af47689ec6928f512ea4471b6b86cbe361e
|
c47dc42bb2403fcba7f405c3f589cb23746306f0
|
refs/heads/master
| 2020-11-26T16:42:36.253405
| 2019-12-18T19:01:35
| 2019-12-18T19:01:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,375
|
r
|
01_load_moose_data.R
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
library(dplyr)
library(readxl)
library(adehabitatHR)
library(sp)
library(ggplot2)
library(sf)
data_path <- file.path("data")
# import all sheets into single file with name of year
import_sheets <- function(sheetname, year){
mdata.0 <- read_xlsx(file.path(data_path, "Entiako_For_MCPs.xlsx"),
sheet = sheetname) %>%
mutate(year = year)
}
mdata.13 <- import_sheets("2013-14", 2013)
mdata.14 <- import_sheets("2014-15", 2014)
mdata.15 <- import_sheets("2015-16", 2015)
mdata.16 <- import_sheets("2016-17", 2016)
mdata.17 <- import_sheets("2017-18", 2018)
moose <- bind_rows(mdata.13, mdata.14, mdata.15, mdata.16, mdata.17)
# check the distribution of points
ggplot(moose, aes(Y,X)) +
geom_point()
ggplot(moose, aes(Y,X)) +
geom_point() +
facet_wrap(~year)
# remove outliers # largely in 2018.
moose <- moose %>%
filter(X < 53.75)
# Create a SpatialPointsDataFrame by defining the coordinates
moose.sp <- moose[, c("X", "Y", "year")]
coordinates(moose.sp) <- c("Y", "X")
proj4string(moose.sp) <- CRS("+proj=longlat +datum=WGS84 +units=m +no_defs" )
moose.sp <- spTransform(moose.sp, CRS("+init=epsg:3005")) # Transform to UTM
# check moose distribution
#mapview::mapview(mgeo)
# Calculate MCPs for each year
moose.mcp <- mcp(moose.sp, percent = 100)
moose.mcp.95 <- mcp(moose.sp, percent = 95)
# Plot
plot(moose.sp, col = as.factor(moose.sp$year), pch = 16)
plot(moose.mcp, col = alpha(1:5, 0.5), add = TRUE)
plot(moose.mcp.95, col = alpha(1:5, 0.5), add = TRUE)
# convert to sf object
library(sf)
moose.mcp <- st_as_sf(moose.mcp)
moose.mcp.95 <- st_as_sf(moose.mcp.95)
# write out to shapefile
st_write(moose.mcp, file.path("out", "mmcp100.shp"))
st_write(moose.mcp.95, file.path("out", "mmcp95.shp"))
plot(moose.mcp)
|
ce3fbb677b1a4fb0ef58b86caaccf9e264045458
|
91f8d1c6eb5fc33c2d4de0957803634db0b9bd9d
|
/classification-tests/RcodeforDatgen.R
|
5a57199f965be492670190454285aed746ca6ea1
|
[] |
no_license
|
vipinkumar7/Machine-Learning
|
650306ce4317c4f4aef6756c9002e4765efeada0
|
79894ef0f6bf140f099415dfdf184401c1f4e507
|
refs/heads/master
| 2021-01-24T16:09:37.676388
| 2019-05-17T13:08:47
| 2019-05-17T13:08:47
| 17,623,072
| 0
| 1
| null | 2014-07-04T05:59:58
| 2014-03-11T08:20:58
|
Java
|
UTF-8
|
R
| false
| false
| 879
|
r
|
RcodeforDatgen.R
|
##@author Vipin Kumar
### random Binomial distribution for the probability of success 1/2 /3 1/6 respectively
U <- array(0,dim= c(10,30))
for(i in 1:10)
{
U[i,] <- rbinom(30,10,1/6)
}
write(U, "./onebysix.txt",ncolumns=30,append=TRUE, sep = "\t")
for(i in 1:10)
{
U[i,] <- rbinom(30,10,1/6)
}
write(U, "./onebysix.txt",ncolumns=30,append=TRUE, sep = "\t")
U <- array(0,dim= c(10,30))
for(i in 1:10)
{
U[i,] <- rbinom(30,10,1/2)
}
write(U, "./onebytwo.txt",ncolumns=30,append=TRUE, sep = "\t")
for(i in 1:10)
{
U[i,] <- rbinom(30,10,1/2)
}
write(U, "./onebytwo.txt",ncolumns=30,append=TRUE, sep = "\t")
U <- array(0,dim= c(10,30))
for(i in 1:10)
{
U[i,] <- rbinom(30,10,1/3)
}
write(U, "./onebythree.txt",ncolumns=30,append=TRUE, sep = "\t")
for(i in 1:10)
{
U[i,] <- rbinom(30,10,1/3)
}
write(U, "./onebythree.txt",ncolumns=30,append=TRUE, sep = "\t")
|
d2d7411a99559367913d1e4975715f7ed03ccc63
|
f0d4245d2b047fa6ae960c0be28eaf32384086f7
|
/server.R
|
a5b106c106685d21507e129c5fc1461ba830f48b
|
[] |
no_license
|
andersfi/Shiny_TrondheimProvefiske
|
161fb2ee14d643e8a472d1e714383a910f81b5af
|
91245dd1d006ab022c359369005cab41dd0192e0
|
refs/heads/master
| 2021-01-20T20:15:30.792184
| 2016-08-12T09:28:49
| 2016-08-12T09:28:49
| 65,498,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,234
|
r
|
server.R
|
library(shiny)
library(dplyr)
library(knitr)
library(DT) #install.packages('DT')
library(leaflet)
library(tidyr)
library(curl)
shinyServer(function(input, output) {
output$lengthHist <- renderPlot({
# Expression that generates a histogram. The expression is
# wraped in a call to renderPlot to indicate that:
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
x <- fisk_no$lengde_mm[fisk_no$art==input$var & fisk_no$vatn==input$varII] # lengdefordeling
bins <- seq(min(x,na.rm=T),max(x,na.rm=T), length.out = input$n_bins)
# draw the histogram with the specified number of bins
hist(x, col = 'darkgray', border = 'white',xlab="Lengde (mm)",
main="Lengdefordeling i provefiske",ylab="Antall",breaks=bins)
})
# Reactive histogram of mass
output$massHist <- renderPlot({
x <- fisk_no$vekt_g[fisk_no$art==input$var & fisk_no$vatn==input$varII]
bins <- seq(min(x,na.rm=T),max(x,na.rm=T), length.out = input$n_bins)
hist(x, col = 'darkgray', border = 'white',xlab="Vekt (g)",
main="Vektfordeling i provefiske",ylab="Antall",breaks=bins)
})
output$table <- DT::renderDataTable(DT::datatable(
{
data <- fisk_no
if (input$vatn != "alle") {
data <- data[data$vatn == input$vatn,]
}
if (input$art != "alle") {
data <- data[data$art == input$art,]
}
if (input$aar != "alle") {
data <- data[data$aar == input$aar,]
}
data
}
,rownames= FALSE))
# render location map
# first create location map popup
location$popup <- paste0("<strong>",location$waterBody,"</strong>",
"<br><i>vatn_lnr: </i>",location$waterBodyID,
"<br><i>Prøvefisket dato:</i>",location$dato)
output$locationmap <- renderLeaflet({
loc<-location
leaflet(loc) %>% addTiles() %>% addMarkers(lng = ~decimalLongitude, lat = ~decimalLatitude, popup = loc$popup)
})
# render species map
# first create species map popup
location_arter$popup <- paste0("<strong>",location_arter$waterBody,"- ",location_arter$art,"</strong>",
"<br><i>CPUE: </i>",location_arter$CPUE,
"<br><i>WPUE: </i>",location_arter$WPUE,
"<br><i>gj.lengde_mm:</i>",location_arter$gj_lengde_mm,
"<br><i>gj.vekt_g:</i>",location_arter$gj_vekt_g,
"<br><i>Max.vekt_g:</i>",location_arter$max_vekt_g,
"<br><i>Max.lengde_mm:</i>",location_arter$max_lengde_mm
)
# Resultat_provefiske_map
output$resultat_provefiske_map <- renderLeaflet({
# select species
loc_arter <- location_arter[location_arter$art==input$Resultat_provefiske_velgArt,]
# select variable to display as colour palette on map
if (input$Resultat_provefiske_velgVariabel=="CPUE") loc_arter$displayVar <- loc_arter$CPUE
if (input$Resultat_provefiske_velgVariabel=="WPUE") loc_arter$displayVar <- loc_arter$WPUE
if (input$Resultat_provefiske_velgVariabel=="gj_lengde_mm") loc_arter$displayVar <- loc_arter$gj_lengde_mm
if (input$Resultat_provefiske_velgVariabel=="gj_vekt_g") loc_arter$displayVar <- loc_arter$gj_vekt_g
if (input$Resultat_provefiske_velgVariabel=="max_lengde_mm") loc_arter$displayVar <- loc_arter$max_lengde_mm
if (input$Resultat_provefiske_velgVariabel=="max_vekt_g") loc_arter$displayVar <- loc_arter$max_vekt_g
# create colour palett
pal <- colorNumeric(
palette = c("blue", "red"),
domain = loc_arter$displayVar
)
leaflet(loc_arter) %>%
addTiles() %>%
addCircleMarkers(lng = ~decimalLongitude, lat = ~decimalLatitude, popup = loc_arter$popup,
color = ~pal(displayVar),stroke = FALSE, fillOpacity = 0.9
) %>%
addLegend("bottomright", pal = pal, values = ~displayVar,title = input$Resultat_provefiske_velgVariabel,labFormat = labelFormat(prefix = ""),
opacity = 1
)
})
})
|
c2cf72ae796dd18e399e5bcf42e52e39230cf593
|
027c5bdd13efa7d43e7cf9a4949249d89a6eb7f9
|
/part3/apriori.R
|
8fa9e1235949fcdefb6aa8a1e53b18e5bc92aeb7
|
[] |
no_license
|
sjjaved/work
|
492b7abf8c457181a5e89d0e8cb02d232643c831
|
54f7024a28f71b361f8d920d890f0f830d989d8d
|
refs/heads/master
| 2021-01-19T14:04:34.427052
| 2017-04-13T04:44:22
| 2017-04-13T04:44:22
| 88,123,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,060
|
r
|
apriori.R
|
# Load the library for doing association rules
# install.packages('arules')
library(arules)
# Data preprocessing
data("AdultUCI")
AdultUCI[1:2,]
AdultUCI[["fnlwgt"]] <- NULL
AdultUCI[["education-num"]] <- NULL
AdultUCI[[ "age"]] <- ordered(cut(AdultUCI[[ "age"]], c(15,25,45,65,100)),
labels = c("Young", "Middle-aged", "Senior", "Old"))
AdultUCI[[ "hours-per-week"]] <- ordered(cut(AdultUCI[[ "hours-per-week"]],
c(0,25,40,60,168)),
labels = c("Part-time", "Full-time", "Over-time", "Workaholic"))
AdultUCI[[ "capital-gain"]] <- ordered(cut(AdultUCI[[ "capital-gain"]],
c(-Inf,0,median(AdultUCI[[ "capital-gain"]][AdultUCI[[ "capital-gain"]]>0]),Inf)),
labels = c("None", "Low", "High"))
AdultUCI[[ "capital-loss"]] <- ordered(cut(AdultUCI[[ "capital-loss"]],
c(-Inf,0, median(AdultUCI[[ "capital-loss"]][AdultUCI[[ "capital-loss"]]>0]),Inf)),
labels = c("none", "low", "high"))
# Convert the data into a transactions format
Adult <- as(AdultUCI, "transactions")
Adult
# transactions in sparse format with
# 48842 transactions (rows) and
# 115 items (columns)
summary(Adult)
# Plot frequent itemsets
itemFrequencyPlot(Adult, support = 0.1, cex.names=0.8)
# generate rules
min_support = 0.01
confidence = 0.6
rules <- apriori(Adult, parameter = list(support = min_support,
confidence = confidence))
rules
inspect(rules[100:110, ])
# lhs rhs support confidence lift
# {occupation=Farming-fishing} => {sex=Male} 0.02856148 0.9362416 1.4005486
# {occupation=Farming-fishing} => {race=White} 0.02831579 0.9281879 1.0855456
# {occupation=Farming-fishing} => {native-country=United-States} 0.02671881 0.8758389 0.9759474
|
86a58f6821dff37ca4ce0ecd619bb684aa4d4509
|
0041e828de36fcfe9364cd9dafd5a030898c8de9
|
/RBF/validation_rbf.R
|
f5e9d8b2fc63a6536bc75859a089ff9d70bde47b
|
[] |
no_license
|
vcaitite/final-work-neural-network-accent-recognition
|
27671a99a0d0804447cab505c8193569eefd9b84
|
cf7761a86199601f2b7d6ab79f9d665a54b76729
|
refs/heads/main
| 2023-04-01T00:38:01.475420
| 2021-03-28T18:36:35
| 2021-03-28T18:36:35
| 346,911,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,472
|
r
|
validation_rbf.R
|
rm(list=ls())
source("~/Documents/UFMG/9/Redes Neurais/TP2/final-work-neural-network-accent-recognition/RBF/trainRBF.R")
source("~/Documents/UFMG/9/Redes Neurais/TP2/final-work-neural-network-accent-recognition/RBF/YRBF.R")
source("~/Documents/UFMG/9/Redes Neurais/exemplos/escalonamento_matrix.R")
library(caret)
# Carregando base de dados:
path <- file.path("~/Documents/UFMG/9/Redes Neurais/TP2/final-work-neural-network-accent-recognition/databases", "treino.csv")
data_train <- read.csv(path)
executions <- 1
ps <- c(2300) # número de neurônios
results <- matrix(rep(0, (executions*length(ps))), nrow = executions)
for (index in 1:executions){
# Separando dados de entrada e saída e treino e teste:
particao <- createDataPartition(1:dim(data_train)[1],p=.7)
train <- as.matrix(data_train[particao$Resample1,])
validation <- as.matrix(data_train[- particao$Resample1,])
x_train <- as.matrix(train[, 2:(ncol(train)-1)])
y_train <- as.matrix(train[, ncol(train)])
x_validation <- as.matrix(validation[, 2:(ncol(train)-1)])
y_validation <- as.matrix(validation[, ncol(train)])
# Escalonando os valores dos atributos para que fiquem restritos entre 0 e 1
x_all <- rbind(x_train, x_validation)
x_all <- staggeringMatrix(x_all, nrow(x_all), ncol(x_all))
x_train <- x_all[1:nrow(x_train), ]
x_validation <- x_all[(nrow(x_train)+1):(nrow(x_train)+nrow(x_validation)), ]
length_train <- length(y_train)
length_validation <- length(y_validation)
for (p in ps){
# Treinando modelo:
modRBF<-trainRBF(x_train, y_train, p)
# Calculando acurácia de treinamento
y_hat_train <- as.matrix(YRBF(x_train, modRBF), nrow = length_train, ncol = 1)
yt <- (1*(y_hat_train >= 0)-0.5)*2
accuracy_train<-((sum(abs(yt + y_train)))/2)/length_train
#print(paste("Acuracia de treinamento para p = ", p, " é ", accuracy_train))
# Rodando dados de teste:
y_hat_test <- as.matrix(YRBF(x_validation, modRBF), nrow = length_test, ncol = 1)
yt <- (1*(y_hat_test >= 0)-0.5)*2
accuracy_validation<-((sum(abs(yt + y_validation)))/2)/length_validation
results[index, match(p, ps)] <- accuracy_validation
print(paste("Acuracia de teste para p = ", p, " é ", accuracy_validation))
}
}
print("-------------------------------------------------------------------------------------")
for (p in ps){
print(paste("Acuracia de teste media para p = ", p, " é ", mean(results[, match(p, ps)])))
}
|
9fcb3a8326e1082d4fae5815182fd1c6dcbbad8d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phylosim/examples/attachProcess.Sequence.Rd.R
|
e20e359e2b9bd319ff863c11202dbf180bdeabb1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
attachProcess.Sequence.Rd.R
|
library(phylosim)
### Name: attachProcess.Sequence
### Title: Attach a Process object to a set of Site objects aggregated by a
### Sequence object
### Aliases: attachProcess.Sequence Sequence.attachProcess
### attachProcess,Sequence-method
### ** Examples
# create a Sequence object of length 6
s<-Sequence(length=10,alphabets=list(NucleotideAlphabet()))
# attach a JC69 substitution process
attachProcess(s,JC69())
# get the list of attached processes
s$processes
# attach the GTR substitution process to range 3:6
attachProcess(s,GTR(),3:6)
# get the list of attached processes
s$processes
|
409c8764aeeb31f65e9c57aba47aa376ca87072c
|
4743159d442ae24bb6f404aa3bafd8d13d015023
|
/binomial/R/auxiliary-functions.R
|
100f129160544a768b69a8cb8bddde2b81e2b4c8
|
[] |
no_license
|
stat133-sp19/hw-stat133-rachelli429
|
717af42c70e290ae6c4cfbecec47ba75f7e9d2c0
|
ad72a85148ab83ac65f40f736aa9a27ade98322c
|
refs/heads/master
| 2020-04-28T11:51:34.336066
| 2019-05-03T22:24:56
| 2019-05-03T22:24:56
| 175,256,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
r
|
auxiliary-functions.R
|
# private function to compute mean
aux_mean <- function(trials, prob) {
return(trials * prob)
}
# private function to compute variance
aux_variance <- function(trials, prob) {
(trials * prob) * (1 - prob)
}
# private function to compute mode
aux_mode <- function(trials, prob) {
m <- trials * prob + prob
if (as.integer(m) == m) {
return(c(m, m - 1))
} else {
return(as.integer(m))
}
}
# private function to compute skewness
aux_skewness <- function(trials, prob) {
(1 - 2 * prob) / sqrt(trials * prob * (1 - prob))
}
# private function to compute kurtosis
aux_kurtosis <- function(trials, prob) {
(1 - 6 * prob * (1 - prob)) / (trials * prob * (1 - prob))
}
|
ccee24cb056e5a7af80f412b81cb58d4227973bf
|
5ab636955ec3b261267d82e4b39e524269e3baf0
|
/functions/CreateAwardsDBTable.R
|
0a2d3eb08fe0a6984cfe404660037a6587297024
|
[] |
no_license
|
aaronmams/PPP-EIDL-Database
|
4f3e970e4f9ddbd4d044e7518fe93d77494064e3
|
92a8bfb294e046a9901318c77d09db791663fe3d
|
refs/heads/master
| 2023-03-23T23:32:52.497750
| 2021-03-08T20:49:20
| 2021-03-08T20:49:20
| 344,532,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
CreateAwardsDBTable.R
|
# very simple function to create the "awards" table for the database
# function take the following inputs:
# + dataframe - a data frame which should contain data from one of the
# one of the PPP source data .csv files
# function produces the following outputs:
# + awards - a data frame containing a subset of fields in the supplied
# input data frame
create.pppawards.DBtable <- function(ppp.data){
award.fields <- c('LoanNumber','DateApproved','SBAOfficeCode','ProcessingMethod','BorrowerName',
'pppBorrowerID','InitialApprovalAmount','CurrentApprovalAmount','JobsReported')
awards <- ppp.data %>% select_(.dots=award.fields)
return(awards)
}
create.eidlawards.DBtable <- function(eidl.data){
award.fields <- c('ACTIONDATE','FAIN','BorrowerName','eidlBorrowerID','FEDERALACTIONOBLIGATION',
'NONFEDERALFUNDINGAMOUNT','eidlAwardID')
awards <- eidl.data %>% select_(.dots=award.fields)
}
|
a8031c78d78e909a1fd016bf3ba5626901d3f095
|
43e8866a685f3303a9d8dd656bab9184717771da
|
/Sweavetest/man/grades.Rd
|
5fbe6d90b911d6d7bc134a8f860cf8a0064990ba
|
[] |
no_license
|
dmurdoch/Sweavetest
|
f7da4a44de505d12972726de3898250b8b1114d2
|
8c4c3c98b21279789bedef937db62a454e7e7ede
|
refs/heads/master
| 2021-03-13T00:10:52.165019
| 2013-11-10T23:29:51
| 2013-11-10T23:29:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,382
|
rd
|
grades.Rd
|
\name{grades}
\alias{grades}
\title{
Compute grades for all students based on Scantron responses.
}
\description{
This marks all Scantrons with a list of answer keys based on an exam code.
Scores can be adjusted to give different marks for correct, blank and wrong
answers.
}
\usage{
grades(scanex, key = scanex[scanex$"Student ID" == "999999999", ], correct = 1, blank = 0, wrong = 0)
}
\arguments{
\item{scanex}{
A file of Scantron lines.
}
\item{key}{
Scantron lines with the known correct answer key.
}
\item{correct}{
The score to give for correct answers.
}
\item{blank}{
The score to give for blank answers.
}
\item{wrong}{
The score to give for wrong answers.
}
}
\details{
By default, \code{correct}, \code{blank} and \code{wrong} are scalars and are
used for all questions. However, vectors may be used; they will be recycled as
necessary to the length of the first answer key.
If multiple answer keys are given for a given exam code, an answer will be marked
correct if it matches any of the keys.
}
\value{
A data frame with the same rows as the \code{scanex} input. Columns will
include the original columns as well as
\item{Correct}{The answer key for the matching exam code.}
\item{Grade}{The computed grade.}
}
\author{
Duncan Murdoch
}
\seealso{
\code{\link{wrongKey}} for detecting errors and cheating.
}
\keyword{ utils }
|
9b746ec4af7b18d779cdc9f47a177464eeb41d44
|
1eb83aaffb3c50b51fbeb2275f90ed68fec385bb
|
/R/wtp.R
|
870cf28bf4cf8399de7d58dc440f1f83256a1bf5
|
[
"MIT"
] |
permissive
|
dkc88/logitr
|
77668c66f29e933cad1515789045359c973bc641
|
73c90f5a4461add80f820bbd1fd6ea2ed4240d4a
|
refs/heads/master
| 2023-05-24T07:10:11.744989
| 2021-06-15T22:48:52
| 2021-06-15T22:48:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,654
|
r
|
wtp.R
|
# ============================================================================
# Functions for computing the WTP from estimated models
# ============================================================================
#' Get WTP from a preference space model
#'
#' Returns the computed WTP from a preference space model.
#' @keywords logitr wtp
#'
#' @param model The output of a "preference space" model estimated
#' using the `logitr()` function.
#' @param priceName The name of the parameter that identifies price.
#'
#' @details
#' Willingness to pay is computed by dividing the estimated parameters of a
#' utility model in the "preference" space by the price parameter.
#' Uncertainty is handled via simulation.
#'
#' @return A data frame of the WTP estimates.
#' @export
#' @examples
#' # Run a MNL model in the Preference Space:
#' library(logitr)
#'
#' mnl_pref <- logitr(
#' data = yogurt,
#' choiceName = "choice",
#' obsIDName = "obsID",
#' parNames = c("price", "feat", "dannon", "hiland", "yoplait")
#' )
#'
#' # Get the WTP implied from the preference space model
#' wtp(mnl_pref, priceName = "price")
wtp <- function(model, priceName) {
if (is_logitr(model) == FALSE) {
stop('Model must be estimated using the"logitr" package')
}
if (is.null(priceName)) {
stop("Must provide priceName to compute WTP")
}
model <- allRunsCheck(model)
if (model$modelSpace == "pref") {
return(getPrefSpaceWtp(model, priceName))
} else if (model$modelSpace == "wtp") {
wtp_mean <- stats::coef(model)
wtp_se <- model$standErrs
return(getCoefSummaryTable(
wtp_mean, wtp_se, model$numObs, model$numParams))
}
}
getPrefSpaceWtp <- function(model, priceName) {
# Compute mean WTP
coefs <- stats::coef(model)
priceID <- which(names(coefs) == priceName)
pricePar <- -1 * coefs[priceID]
wtp_mean <- coefs / pricePar
wtp_mean[priceID] <- -1 * coefs[priceID]
names(wtp_mean)[priceID] <- "lambda"
# Compute standErrs using simulation (draws from the varcov matrix)
draws <- getUncertaintyDraws(model, 10^5)
priceDraws <- repmatCol(-1 * draws[priceName], ncol(draws))
wtpDraws <- draws / priceDraws
wtpDraws[, priceID] <- draws[, priceID]
wtp_se <- apply(wtpDraws, 2, stats::sd)
return(getCoefSummaryTable(wtp_mean, wtp_se, model$numObs, model$numParams))
}
#' Compare WTP from preference and WTP space models
#'
#' Returns a comparison of the WTP between a preference space and WTP space
#' model.
#' @keywords logitr wtp
#'
#' @param model_pref The output of a "preference space" model estimated using
#' the `logitr()` function.
#' @param model_wtp The output of a "willingness to pay space" model estimated
#' using the `logitr()` function.
#' @param priceName The name of the parameter that identifies price.
#'
#' @details
#' Willingness to pay (WTP) is first computed from the preference space model
#' by dividing the estimated parameters by the price parameter. Then those
#' estimates are compared against the WTP values directly estimated from the
#' "WTP" space model. Uncertainty is handled via simulation.
#'
#' @return A data frame comparing the WTP estimates from preference space and
#' WTP space models.
#' @export
#' @examples
#' # Run a MNL model in the Preference Space:
#' library(logitr)
#'
#' mnl_pref <- logitr(
#' data = yogurt,
#' choiceName = "choice",
#' obsIDName = "obsID",
#' parNames = c("price", "feat", "dannon", "hiland", "yoplait")
#' )
#'
#' # Get the WTP implied from the preference space model
#' wtp_mnl_pref <- wtp(mnl_pref, priceName = "price")
#'
#' # Run a MNL model in the WTP Space:
#' mnl_wtp <- logitr(
#' data = yogurt,
#' choiceName = "choice",
#' obsIDName = "obsID",
#' parNames = c("feat", "dannon", "hiland", "yoplait"),
#' priceName = "price",
#' modelSpace = "wtp",
#' options = list(startVals = wtp_mnl_pref$Estimate)
#' )
#'
#' # Compare the WTP between the two spaces:
#' wtpCompare(mnl_pref, mnl_wtp, priceName = "price")
wtpCompare <- function(model_pref, model_wtp, priceName) {
if (is_logitr(model_pref) == FALSE | is_logitr(model_wtp) == FALSE) {
stop('Models must be estimated using the "logitr" package')
}
model_pref <- allRunsCheck(model_pref)
model_wtp <- allRunsCheck(model_wtp)
pref <- wtp(model_pref, priceName)$Estimate
pref <- c(pref, model_pref$logLik)
wtp <- stats::coef(model_wtp)
wtp <- c(wtp, model_wtp$logLik)
names(pref)[length(pref)] <- "logLik"
names(wtp)[length(wtp)] <- "logLik"
compare <- data.frame(pref = pref, wtp = wtp)
compare$difference <- round(compare$wtp - compare$pref, 8)
compare <- format(compare, scientific = FALSE)
return(compare)
}
|
0755efdf867dfcb3215ca800ea2b5d683bce1492
|
2b9dcadc84d6391c4160f8a8630f2f5c4d999a56
|
/AlexBajcz/code_for_intro_R_workshop_section_v2_Jan_2018.R
|
c11de5cb5210949df9fb16bb01a6b0984dca1015
|
[
"CC-BY-4.0",
"MIT",
"CC-BY-3.0"
] |
permissive
|
kbroman/datacarpentry_R_2018-01-08
|
c047fb9782dddf144e41b525e2f60d52a3b4045b
|
755cfb13aaddb8410570731c42abc74daf912520
|
refs/heads/master
| 2021-09-03T16:50:37.211858
| 2018-01-10T15:20:31
| 2018-01-10T15:20:31
| 116,155,478
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,083
|
r
|
code_for_intro_R_workshop_section_v2_Jan_2018.R
|
#Math operations in R
15 + 5
15 - 5
15 * 5
15/5 #Show space is unnecessary.
#Functions --some of this is going to be a bit different now but that's ok.
log(5)
log(5, 6)
log() #Show data to log is needed.
log(x = 5) #Show that x is the name of the first argument.
log(x = 5, base = 3) #We can name which input goes with which argument.
log(5, 3) #Without doing so, order of inputs matters.
log(3, 5)
log(base = 3, x = 5) #But, with names, order no longer matters.
?log #Show help page for function
#Script files
15 - 5
#Hit enter to demonstrate hitting enter won't run code from script like it does in console.
15 - 5
15 + 5 #Demonstrate the power of comments!
#Objects
math <- 15 + 5 #Makes an object called math that holds "20"
math #Show that R knows what math means.
math / 7.43 #Can now use math in place of the number 20 in math problems.
new_math <- math / 7.43 #Make new object with the value in an old object.
blah = 4 #Show that = sign works too for assignment.
#Object names
meh1 <- 6 #Can contain numbers
1meh <- 6 #Can't start with one though.
t2 <- 8 #Can be short names.
sfieifsegjbwrjgbsrijgbsrjbgkjwbfkwjbfgkjsbfksjbs <- 0 #Or long names. But in between is better.
mean, t, c, data #All function names! Don't pick them.
mean <- 8 #Technically allowed, but don't do it.
column.name <- 8 #Also allowed, but don't do it because functions often use . to separate words.
Age = 18 #Demonstrate case-sensitivity of R
age #Not found because Age and age are different!
COLUMN_NAME #I use caps and underscores in my naming convention.
ColumnName #Others use Camelcase
#Challenge 1:
x <- 50
y <- x * 2
x <- 75
y #Show that y is 100
y <- x * 2 #Show we have to run this again to change y.
y #Show that y is now 150 once we do that.
##Opening and working with the data.
survey <- read.csv("http://kbroman.org/datacarp/portal_clean.csv")
head(surveys)#Showcasing functions for exploring data frames.
tail(surveys)
dim(surveys)
nrow(surveys)
ncol(surveys)
names(surveys)
#Really powerful exploration tools!
str(surveys)
summary(surveys)
##Indexing--extracting values out of an object that we've made.
surveys[25]
surveys[1,5] #Value in row 1, column 5.
surveys[7,] #We can get the whole seventh row...
surveys[5, -1] #Or that but without the first column.
surveys$sex #Columns can also be gotten using their names.
surveys_last <- surveys[nrow(surveys),] #Answer to the 2nd optional challenge. This will save just the last row of surveys into a new object.
##Vectors--optional section
surveys$sex #A vector--no rows or columns, but more than a single value.
1:10 #Creates a simple sequence of values.
c(3, -1000, pi) #The c() function creates a more complex sequence of values you specify.
surveys[1:10,] #Vectors can be used to index multiple rows, columns, or values out of an object.
#Challenge, get data from the first four rows out of surveys but only from the 3rd, 5th, and 8th columns.
surveys[1:4, c(3,5,8)] #Successfully used two vectors to extract these values.
|
f364f3e0773e3e64a399824b5778e94822a9dfed
|
9e6184e5bf4de6e41fec10d66e92300a1b8751cd
|
/src/Data_Analysis_Tool_Box.R
|
492f5ad3247e372ba2a22c4f8792da00fd59656f
|
[] |
no_license
|
mrashid-bioinfo/r-toolbox
|
64047cd48346c8fbaa3c1003e977d5a584d99607
|
996dea535dd5a09209479c65fca1bac85803a831
|
refs/heads/master
| 2020-07-06T22:50:35.124976
| 2020-06-12T14:41:22
| 2020-06-12T14:41:22
| 203,162,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,704
|
r
|
Data_Analysis_Tool_Box.R
|
## ==================================================================================== ##
## Data Analysis Toolbox
##
##
## ==================================================== ##
##
## Function :: tri_nucleotide_context_fill
##
## Fill up trinucleotide context
##
## Paramters
## 1. Dataframe
##
tri_nucleotide_context_fill = function( df = NULL , chr = "Chromosome", start = "Start", end = "End", ref = "Reference", alt = "Variant", mutSamples = "mutSamples", species = 'human' )
{
require(SomaticSignatures)
require("deconstructSigs")
require(dplyr)
require(VariantAnnotation)
df_VR = VRanges( seqnames = df[,chr], ranges = IRanges(start = df[, end], end = df[, end]), ref = df[,ref], alt = df[,alt] )
if( species == "human" )
{
require(BSgenome.Hsapiens.1000genomes.hs37d5)
print("Human")
df_VR_context = as.data.frame( mutationContext(df_VR, BSgenome.Hsapiens.1000genomes.hs37d5) )
}
else if( species == "mouse" ) {
print("Mouse")
require(BSgenome.Mmusculus.UCSC.mm10)
df_VR_context = as.data.frame( mutationContext(df_VR, BSgenome.Mmusculus.UCSC.mm10) )
}
else{
print( "Unknown species" )
}
print("Creating modified VR object")
df_VR_context_mod = df_VR_context %>% separate(context, c("Preceeding","Trailing") )
df_VR_context_mod1 = df_VR_context_mod %>% separate(alteration, c("Ref","Alt"), sep = 1 )
df_VR_context_mod1$tri_context = paste( df_VR_context_mod1$Preceeding, "[", df_VR_context_mod1$Ref, ">" , df_VR_context_mod1$Alt, "]", df_VR_context_mod1$Trailing, sep = "" )
print("Creating Data Frame from VR")
df_VR_context_df = data.frame( Chromosome = df[,chr], Start = df[,start], End = df[,end], Reference = df[,ref], Variant = df[,alt], tri_context = df_VR_context_mod1$tri_context,mutSamples = df[,mutSamples] )
Spir_triContext_perSample = table( df_VR_context_df$mutSamples, df_VR_context_df$tri_context )
return(Spir_triContext_perSample)
}
## ==================================================== ##
##
## Function :: SomSig_Analysis
##
## Description :: Assess COSMIC signature contribution using deConstructSig
##
## Paramters
## 1. Dataframe
##
SomSig_Analysis = function( df = NULL, chr = "Chromosome", start = "Start", end = "End", ref = "Reference", alt = "Variant", mutSamples = "mutSamples", species = 'human', norm_method = 'exome' )
{
require(SomaticSignatures)
require("deconstructSigs")
require(dplyr)
require(VariantAnnotation)
VR = VRanges( seqnames = df[,chr], ranges = IRanges(start = df[, end], end = df[, end]), ref = df[,ref], alt = df[,alt] , sampleNames = df[,mutSamples] )
if( species == 'human' )
{
require(BSgenome.Hsapiens.1000genomes.hs37d5)
print("Human")
VR_context = mutationContext(VR, BSgenome.Hsapiens.1000genomes.hs37d5)
}
else if( species == "mouse" ) {
library(BSgenome.Mmusculus.UCSC.mm10)
print("Mouse")
VR_context = mutationContext(VR, BSgenome.Mmusculus.UCSC.mm10)
}
else{
print( "Unknown species" )
}
sca_mm = motifMatrix(VR_context, normalize = F)
mm_sca=as.data.frame(t(sca_mm))
colnames(mm_sca)=colnames(signatures.cosmic)
uniq_samples = unique(df[,mutSamples])
s=c()
for (i in 1:length(uniq_samples))
{
test1 = whichSignatures(tumor.ref = mm_sca,signatures.ref =signatures.cosmic,sample.id =uniq_samples[i] , contexts.needed = TRUE, tri.counts.method = norm_method )
weights1<- data.frame(test1[["weights"]])
unknown<- test1[["unknown"]]
weights1$unknown <- unknown
s=rbind(s,weights1)
}
return(s)
}
## Mean, Minimum and Maximum of data matrix ##
##
## Computes mean and standard deviation of a matrix [ by column or row ]
##
## Input ::
## 1. matrix
## 2. direction [ 1 for row; 2 for column ]
#
## output :: matrix
##
min_max_mean = function( mat = null, direction = 1 )
{
if( direction == 1 )
{
avg = rowMeans( mat , na.rm = TRUE )
minx = apply( mat , 1, function(X){ min(X, na.rm = T ) } )
maxx = apply( mat , 1, function(X){ max(X, na.rm = T ) } )
}
else if( direction == 2 )
{
avg = colMeans( mat , na.rm = TRUE )
minx = apply( mat , 2, function(X){ min(X, na.rm = T ) } )
maxx = apply( mat , 2, function(X){ max(X, na.rm = T ) } )
}
else{
print( "Invalid direction : Only 1[row] and 2 [column] is allowed" )
}
avg[ is.na(avg) ] = 2
minx[ is.infinite(minx) ] = 2
maxx[ is.infinite(maxx) ] = 2
df = data.frame( avg, minx, maxx );
return(df)
}
## Mean, SD, Lower and Upper ##
##
## Computes mean and standard deviation of a matrix [ by column or row ]
##
## Input ::
## 1. matrix
## 2. direction [ 1 for row; 2 for column ]
#
## output :: dataframe
##
mean_sd = function( mat = null, direction = 1 )
{
if( direction == 1 )
{
avg = rowMeans( mat , na.rm = TRUE )
sd = apply( mat , 1, function(X){ sd(X, na.rm = T ) } )
}
else if( direction == 2 )
{
avg = colMeans( mat , na.rm = TRUE )
sd = apply( mat , 2, function(X){ sd(X, na.rm = T ) } )
}
else{
print( "Invalid direction : Only 1[row] and 2 [column] is allowed" )
}
sd[ is.na(sd) ] = 0.05
lower = avg - sd
upper = avg + sd
df = data.frame( avg, sd, lower, upper );
return(df)
}
## Bed region overlap ##
find_overlap_region = function( df )
{
## !!!! §§§ Still incomplete ##
for( i in 1:dim(cnv_sample_df_mod)[1] )
{
if( i == 1 )
{
old = cnv_sample_df_mod[i,]
}
else
{
curr = cnv_sample_df_mod[i,]
if( old[,"chromosome"] == curr[,"chromosome"] )
{
if( ( as.numeric( curr[ , "start.pos" ] ) <= as.numeric( old[ , "end.pos" ] ) ) && ( as.numeric( curr[ , "end.pos" ] ) >= as.numeric( old[ , "end.pos" ] ) ) )
{
old[,"end.pos"] = curr[,"end.pos"]
}
}
}
}
}
## Chromosome Rename ##
chr_rename = function( chr )
{
a = gsub( "X", "23", chr )
a = gsub( "Y", "24", a );
return( a )
}
## Sequennza CNV Classification ##
sequenza_cnv_conversion = function( df )
{
cnv_class = apply( df, 1, function(X)
{
if( X[2] < 100 )
{
return("Too Small")
}
# Focal amps :
else if( X[1] >= 5 && X[2] <= 1000000 )
{
return("Focal Amplification")
}
# Large amps :
else if( X[1] >= 5 && X[2] > 1000000 )
{
return("Large Amplification")
}
# Focal Gain :
else if( X[1] >= 3 && X[2] > 100 && X[2] <= 1000000 )
{
return("Focal Gain")
}
# Small Gain :
else if( X[1] >= 3 && X[2] > 1000000 )
{
return("Large Gain")
}
# Focal Deletion
else if( X[1] == 0 && X[2] > 100 && X[2] <= 1000000 )
{
return("Focal Deletion")
}
# Deletion
else if( X[1] == 0 && X[2] > 1000000 )
{
return("Deletion")
}
# Loss
else if( X[1] == 1 && X[2] > 100 )
{
return("Loss")
}
} )
return( cnv_class );
}
|
817928fe4b85aef4205cabbea7d6f1508cb9d79a
|
9b5131ef4e41ab8872d230aa15a688a0cb355011
|
/dataOpt/benchmarks.R
|
1123aef40874e090115c0d8eaada989abe04b031
|
[] |
no_license
|
PaulaAlessio/pcalg
|
358be6eba5e09eb98e525df5af48501ad95bbbcc
|
105671103794c134d0241476408785ac6bc01132
|
refs/heads/master
| 2021-01-12T15:27:24.048194
| 2017-03-27T14:38:24
| 2017-03-27T14:38:24
| 71,788,778
| 0
| 0
| null | 2016-10-24T12:53:40
| 2016-10-24T12:53:40
| null |
UTF-8
|
R
| false
| false
| 1,875
|
r
|
benchmarks.R
|
# This script creates a graph with the timings of idaFast
timings <- read.csv("timings.csv", stringsAsFactors=FALSE)
fast <- timings[which(timings$optimized==TRUE & timings$Myc == "Low"), ]
slow <- timings[which(timings$optimized==FALSE & timings$Myc == "Low"), ]
png("timings.png", width=400, height=400, units ="px")
r_slow <- lm(log(slow[,2]) ~ log(slow[,1]) )
r_fast <- lm(log(fast[,2]) ~ log(fast[,1]) )
plot(slow[,1:2],col="red", xlab = "Anzahl von Genen",
ylab = " Laufzeit (Sekunden)", pch=16, cex=1.4, xaxt= "n")
points(fast[,1:2],col="blue",pch= 8, lwd=2)
axis(1,slow[,1],slow[,1])
legend("topleft",legend = c("urspruengliche Funktion",
"optimierte Funktion"), pch = c(16,8),
pt.cex = c(1.4,1), pt.lwd=c(1,2), col = c("red","blue"))
#lwd= c(1,2), cex=c(1.4,1),col = c("red","blue"))
mtext("200", at =c(250), line = -20.55 )
dev.off()
png("timings_log.png", width=400, height=400, units ="px")
x <- c(1:5000)
y <- x^(r_slow$coefficients[2])*exp(r_slow$coefficients[1])
plot(slow[,1:2],col="red", xlab = "Number of genes, n",
ylab = " Time, t (sec)", ylim = c(0.01,50000), pch=16, cex=1.4, yaxt= "n", log = "xy")
lines(x,y,lty=2, col="lightgray",lwd=2)
x <- c(1:5000)
y <- x^(r_fast$coefficients[2])*exp(r_fast$coefficients[1])
lines(x,y,lty=2, col="lightgray",lwd=2)
points(fast[,1:2],col="blue",pch= 8, lwd=2)
axis(2,c(0.01,0.1,1,10,100,1000,10000),c("0.01","0.1","1","10","100","1000","10000"), las = 2)
legend("topleft",legend = c("idaFast",
"idaFastOpt"), pch = c(16,8),
pt.cex = c(1.4,1), pt.lwd=c(1,2), col = c("red","blue"))
#lwd= c(1,2), cex=c(1.4,1),col = c("red","blue"))
slope <- sprintf("%.02f", r_slow$coefficients[2])
text(200,10,bquote( 't ~ '~ O(n^.(slope)) ),srt=38)
slope <- sprintf("%.02f", r_fast$coefficients[2])
text(200,0.17,bquote( 't ~ '~ O(n^.(slope)) ), srt=25)
dev.off()
|
13deefc1d27740743deb75cf83dad174faf70e51
|
72de200654431310d8cee2ab5ca3875d2d9ad732
|
/Statisitcal analysis with R.R
|
4c2fbec976a9d72830e340689623f4559ca093fe
|
[] |
no_license
|
salvatoreleto1/R-Commands
|
e80e6471ca1e7c8e2800005b26733642b4941298
|
9091b4d66d62b78c3b93cab039244d552558018d
|
refs/heads/master
| 2022-11-09T18:54:46.111718
| 2020-06-30T11:33:34
| 2020-06-30T11:33:34
| 276,080,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,123
|
r
|
Statisitcal analysis with R.R
|
# ---------1 Requirements ------
#install.packages("igraph")
library("igraph")
library("rtweet")
#Later you will download Twitter user information about Hollyhood actor
#Chris Hemsworth and some of his friends using rtweet.
#If you do not have Twitter access, you may download an equivalent data file:
#chris2019.RData. This file is emailed to you.
#app="Social Web Analytics 300958" #change this value appropriately
#key= ""
#secret= ""
#access_token=""
#access_secret=""
#twitter_token=create_token(app, key, secret, access_token, access_secret, set_renv = FALSE)
##----------------2 Creating a graph ------
#-----------------2.1 Graph Formula--------
#We can create a graph by providing the graph.formula
#function with the set of vertices, and how they are
#connected (edges
g1 = graph.formula(A - B, A - C, A - D, B - D)
print(g1)
V(g1) #View vertices
E(g1) #View edges
plot(g1)# visualize graph
#---------------2.2 Adjacency Matrix----------
#To create a graph from an adjacency matrix, we must
#first create the matrix.
A = matrix(0, 4, 4) # 4 X 4 matrix, containing all zeros
#add the edges by allocating ones. We will make the same graph
#shown above in g1.
A[1, c(2, 3, 4)] = 1 #We want to connect the first vertex to
#the second, third and fourth vertices
A[2, c(1, 4)] = 1 #We also want to connect the second vertex
#to the first and fourth
A[3, 1] = 1 #connect the thrid vertex to the first
A[4, c(1, 2)] = 1
print(A)
g2 = graph.adjacency(A) # create the graph from adjacency matrix
plot(g2)
g3 = graph.adjacency(A, mode="undirected") #undirected graph
plot(g3)
#----------------2.3 Edge List-----------------
#We can also create a graph using an edge list. An edge list is an
#M*2 matrix, containing M edges. Each row of the edge list provide
#the start and end vertices.
el = matrix(c("A", "A", "A", "B", "B", "C", "D", "D"), 4, 2)
print(el)
#[,1] [,2]
#[1,] "A" "B"
#[2,] "A" "C"
#[3,] "A" "D"
#[4,] "B" "D"
g4 = graph.edgelist(el, directed = FALSE)
plot(g4)
##--------------3 Creating Random Graphs -------------
#erdos.renyi is a random graph
set.seed(20)
g.er = erdos.renyi.game(n = 10, p = 1) #p=probability, n= nodes
plot(g.er, vertex.size = 40)
g.er = erdos.renyi.game(n = 10, p =0) #p=probability, n= nodes
plot(g.er, vertex.size = 40)
#create a Barabasi-Albert Graph
#Here a new vertex prefers to link to highly
#connected vertices
#we must provide n (the number of vertices)
set.seed(20)
g.ba = barabasi.game(n = 10, directed = FALSE)
plot(g.ba, vertex.size = 5)
#We can also provide the k (the power) to change
#the probability of connecting to a vertex
#m (the number of edges to add to each new vertex)
ba = barabasi.game(5,
m = 2,
power = 0.6,
out.pref = TRUE,
zero.appeal = 0.5,
directed = FALSE)
plot(ba, vertex.size = 5)
ba = barabasi.game(5,
m = 2,
power = 1,
out.pref = TRUE,
zero.appeal = 0.5,
directed = FALSE)
plot(ba, vertex.size = 5)
##-------------4 Examining the Graphs-----------
#--------------4.1 Density---------
#See slide 24, week7 for definition of density
#Density = no of edges/possible number of edges = 2E/(V(V-1))
#By visually examining the erdos.renyi and Barab??si???Albert
#which looks denser? Use the function graph.density
#to compute the density of each graph and compare
#the results to your guess.
set.seed(20)
g.er = erdos.renyi.game(n = 100,p = 0.1)
plot(g.er, vertex.size = 5)
set.seed(20)
g.ba = barabasi.game(n = 100, directed=FALSE)
plot(g.ba, vertex.size = 5)
graph.density(g.er)
graph.density(g.ba)
#Barabasi-Albert has lower density since it has fewer edges
#-------------4.2 Diameter---------------
#The diameter is the longest shortest path.
#Which of the two graphs do you expect to have the
#largest diameter?
#We expect the ER graph to have a smaller diameter
#because there are many paths between each of the
#vertices.
#Use the function diameter to compute
#the diameter of each graph.
diameter(g.er)
diameter(g.ba)
#----------------4.3 Degree-------------------
#What do you expect the degree distribution of each
#graph to look like?
#The ER degree distribution should look mound shaped
#(a mean with left and right tails). The BA degree
#distribution should look exponentially decaying
#(many vertices with low degree, a few with high degree).
#We can also compute the degree distribution
#Degree distribution, Pdeg(k)=fraction of nodes in the graph with degree k.
#of the graph using the function degree.distribution.
#Use the help pages to understand the output.
degree(g.er)
degree.distribution(g.er)#a numeric vector of the same length as the maximum degree plus one.
#The first element is the relative frequency zero degree vertices,
#the second vertices with degree one, etc.
degree(g.ba)
degree.distribution(g.ba)
#Which vertex is most central according to Degree Centrality?
#To find the most central, we order the vertices by their degree.
g.er.order =order(degree(g.er), decreasing=TRUE)
g.ba.order= order(degree(g.ba), decreasing=TRUE)
#--------------4.4 Closeness Centrality---------------
#See slide 34, week 7
#The closeness centrality of a vertex v
#is the sum of the distance from v
#to all other vertices.
#Read the R help page for closeness to find what R is computing.
#The R function closeness provides the reciprocal of the sum of path
#lengths.
#Therefore the sum of path lengths is:
1/closeness(g.ba)
#We want the vertex with the shortest path lengths,
#therefore we want the maximum given by the R closeness function.
order(closeness(g.er), decreasing = TRUE)
order(closeness(g.ba), decreasing = TRUE)
#-----------------------4.5 Betweenness---------------
#Betweenness centrality measures how often a vertex is used
#in shortest paths
betweenness(g.er)
order(betweenness(g.er), decreasing = TRUE)
order(betweenness(g.ba), decreasing = TRUE)
#Is the centre the same for all three centrality measures?
#Examine this for the Erd??s-Renyi graph and Barab??si???Albert graph.
#Compare the above orders.
##--------------------5 Small Graph------------------------
#Create the following graph
g3 = graph.formula(A - B, A - C, A - D, B - D, B - E, E - D, C - E)
plot(g3)
#Calculate the Degree Distribution, Degree Centrality,
#Closeness Centrality, Betweenness Centrality
#using the methods shown in the lecture.
#Then check your answer using the R functions.
degree.distribution(g3)
degree(g3)
1/closeness(g3)
betweenness(g3)
##-----------------6 Twitter Graph------------
#Each twitter user has a set of friends and a set of followers:
#Followers of user x:
#users who are following x
#Friends of user x
#users who x is following.
#Therefore, a user can choose their friends, but cannot choose their followers.
#There are many users of Twitter, we want to find the interesting ones.
#Interesting users usually have many followers (because they are interesting).
#So when obtaining information about users on Twitter, we should note:
#Popular people have many followers.
#Using rTweet, we can download user information when given a screen name or ID.
#Exec the following statement to download info from Twitter
user = lookup_users(c("chrishemsworth"))#examine all of the details for Chris Hemsworth
#The above function provides information such as the number of friends,
#the number of followers, if the account is protected and verified
#and the owner's name and id.
names(user)
user$friends_count
user$screen_name
user$followers_count
#Alternatively, download the tweet file to your working directory
load("./chris2019.RData")
#It comes with user, friends and more.friends variables
#which are needed during the tutorial
#-----------------------6.2 Dowloading a user's friends from Twitter---------
#get the friends of user, Chris Hemsworth, directly from Twitter
t <- get_friends("chrishemsworth") #gets user id of friends of chrishemsworth
names(t)
#get Chris Hemsworht's friends, directly from Twitter
friends = lookup_users(t$user_id)
dim(friends)
names(friends)
friends$screen_name[1] #name of friend at index 1
friends$followers_count[1] #examine the follower count of the first friend
friends$screen_name[2]
#Find the 10 friends that have the most followers. What are their names?
#Note the function, sort, will sort the vector of follower counts.
#The function, order, will sort, but provide the position of the sort.
#So to find the top 10, we use order with decreasing=TRUE and choose the
#first ten values, giving us the positions of the top 10.
friendPosition = order(friends$followers_count, decreasing = TRUE)[1:10]
friendPosition
#topFriends = friends$user_id[friendPosition] #ids of top 10 friends
topFriends = friends[friendPosition,] #ids of top 10 friends
topFriends
topFriends$user_id[1]
#Write a for loop to download 100 friends from the 10 most popular friends of
#Chris Hemsworth and store them in more.friends.
#If you are using chris2019.RData, do not do this because it is already present in
#the variable, more.friends
#-----------Do the following to download directly from Twitter------
more.friends = list() #a place to store the friends of friends
#n = length(topFriends)
n= nrow(topFriends)
t = get_friends(topFriends$user_id[1]) #get friends of each friend
more.friends[[1]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[2]) #get friends of each friend
more.friends[[2]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[3]) #get friends of each friend
more.friends[[3]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[4]) #get friends of each friend
more.friends[[4]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[5]) #get friends of each friend
more.friends[[5]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[6]) #get friends of each friend
more.friends[[6]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[7]) #get friends of each friend
more.friends[[7]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[8]) #get friends of each friend
more.friends[[8]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[9]) #get friends of each friend
more.friends[[9]]=lookup_users(t$user_id)
t = get_friends(topFriends$user_id[10]) #get friends of each friend
more.friends[[10]]=lookup_users(t$user_id)
more.friends
class(more.friends[[1]])
dim(more.friends[[1]])
nrow(more.friends[[1]])
#-----------------Restrict to 100 records to manage big data----------------
for(a in 1:10){
if(nrow(more.friends[[a]])>100){
more.friends[[a]]=more.friends[[a]][1:100,]
}
}
more.friends[[1]]$screen_name[1]
more.friends[[1]]$screen_name[2]
more.friends[[2]]$screen_name[2]
#save(user, friends, more.friends, file="chris2019.RData")
##----------------------6.3 Creating the Twitter Graph--------------
#Now we have Chris Hemsworth, Chris Hemsworth's friends,
#and friends of the top 10 friends of Chris Hemsworth
#We can create a graph by constructing an edge list (who links to who).
#We know Chris Hemsworth links to all of his friends,
#so the edge list will contain rows beginning with Chris Hemsworth
#and ending with the friend of Chris Hemsworth. First we must get the
#names of all of Chris Hemsworth's friends.
#Write a for loop to store all 100 screen names in the variable friend.names.
#Not required to code this since rtweet gives this by default
friends$screen_name
#We can now build the edge list using:
chris = rep(user$screen_name, nrow(friends)) # repeat Chris Hemsworth's user name 100 times
el = cbind(chris, friends$screen_name) # bind the columns to create a matrix
el
#Using what you have done above, write the function
user.to.edgelist <- function(user, friends) {
# create the list of friend screen names
user.name = rep(user$screen_name, nrow(friends)) # repeat user's name
el = cbind(user.name, friends$screen_name) # bind the columns to create a matrix
return(el)
}
#We can use the created function user.to.edgelist to create the edge list for Chris Hemsworth:
el.chris = user.to.edgelist(user, friends)
el.chris
topFriends[1,4] #4th column is Screenname
nrow(more.friends[[4]])
topFriends[1,]
user.to.edgelist(topFriends[1,], more.friends[[1]])
#We can also build the edge list for the top 10 friends using a loop:
for (a in c(1:length(more.friends))) {
el.friend = user.to.edgelist(topFriends[a,], more.friends[[a]])
el.chris = rbind(el.chris, el.friend) # append the new edge list to the old one.
}
el.chris
#Now that we have the edge list, we can create the graph:
g = graph.edgelist(el.chris)
g
#Let's plot the graph. Since there are many vertices,
#we will reduce the vertex size and use a special plot layout:
plot(g, layout = layout.fruchterman.reingold, vertex.size = 5)
#This graph contains many vertices that we did not examine. To remove these,
#let's only keep the vertices with degree (in or out) greater than 1.
g2=induced_subgraph(g, which(degree(g, mode = "all") > 1))
#This graph is now easier to visualise:
plot(g2, layout = layout.fruchterman.reingold, vertex.size = 5)
#Who is at the centre of the graph? Use the centrality measures to examine this.
g2.centres=order(closeness(g2), decreasing=TRUE)
length(g2.centres)
g2[g2.centres][,1]#names of the centres
# Examine the graph density. Is it sparse or dense?
graph.density(g2)
#Examine the degree distribution. Is this graph more similar to an Erd??s-Renyi
#graph or a Barab??si???Albert graph?
degree.distribution(g2)
|
4d30cb9a14f3f6b4fc004e02d372190b04ba4277
|
ef572bd2b0515892d1f59a073b8bf99f81d6a734
|
/man/datapack_cogs.Rd
|
8550e9f13db8272fa7a8b52fb6b83e297649a744
|
[
"CC0-1.0"
] |
permissive
|
pepfar-datim/datapackr
|
5bc604caa1ae001b6c04e1d934c0c613c59df1e6
|
9275632673e45948db6846513a53c1436cfc0e47
|
refs/heads/master
| 2023-08-30T23:26:48.454382
| 2023-08-11T13:01:57
| 2023-08-11T13:01:57
| 170,350,211
| 9
| 7
|
CC0-1.0
| 2023-09-11T21:53:24
| 2019-02-12T16:19:47
|
R
|
UTF-8
|
R
| false
| true
| 513
|
rd
|
datapack_cogs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{datapack_cogs}
\alias{datapack_cogs}
\title{Datapack Category option groups}
\format{
\describe{
\item{name}{Name of the Category Option Group for example "01-04 Only"}
\item{id}{Category Option Group UID}
}
}
\usage{
datapack_cogs
}
\description{
Data frame of category option groups (id and name)
along with their individual category options (id and name) as a
nested data frame.
}
\keyword{datasets}
|
a8e5ec82a6fd3351a31cc4add846fbc237d9d3f0
|
8f7871fdf97c6c99d802617e07160277ad291e67
|
/Evaluation_standardised residuals and qqplots.R
|
e2a663a0a9772adae3019a1b6c70d21b2973d2aa
|
[] |
no_license
|
karolzub/Stoat-in-Orkney
|
e4ed556a0490c11ce0d0a698b4e13256fb00fc2a
|
a557f0f72eee09be283df6f85e53d46e77e662ae
|
refs/heads/master
| 2023-07-16T12:37:07.734911
| 2021-09-07T09:51:06
| 2021-09-07T09:51:06
| 247,970,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,197
|
r
|
Evaluation_standardised residuals and qqplots.R
|
### Stoat and rat data - Removal population & trappability estimates
### Stoat model evaluation - standardised residuals and qqplots
set.seed(2012)
### Load the libraries
library(coda)
library(MASS)
library(MCMCpack)
library(MuMIn)
library(jagsUI)
library(ggmcmc)
library(corrplot)
library(nimble)
library(parallel)
library(doParallel)
library(foreach)
library(beepr)
########## Load the data
rem.data<-read.table("C:/Orkney/Model/Removal.csv", sep=";", header=TRUE)
rem.data
#### Effective trapping effort for stoats after accounting for captured rats and closed traps
eff.stoat<-(rem.data$Effort-rem.data$rat-rem.data$closed.trap)
eff.stoat
#### Effective trapping effort for rat after accounting for captured stoats and closed traps
eff.rat<-(rem.data$Effort-rem.data$stoat-rem.data$closed.trap)
eff.rat
#### Number of trapping occasions
n.occ<-max(rem.data$Session)
n.occ
#### NIMBLE BAYESIAN MODEL
rem.model<-nimbleCode({
## Prior for the stoat population size before any removal - whole of SR
mean.stoat~dunif(1, 500)
n.stoat~dpois(mean.stoat)
## Prior for the rat population size before any removal - whole of SR
mean.rat~dunif(100, 10000)
n.rat~dpois(mean.rat)
## Prior for the probability of capture of stoats per trap and night
alpha.stoat~dnorm(0, sd=3.16)
beta.stoat~dnorm(0, sd=3.16)
## Prior for the probability of capture of rats per trap and night
alpha.rat~dnorm(0, sd=3.16)
beta.rat~dnorm(0, sd=3.16)
### Probability of not capturing a stoat after a certain trapping effort (trap nights)
### And including rat by-catch effects
#### First occasion
logit(p.stoat[1])<-alpha.stoat+beta.stoat*eff.stoat[1]
stoat.cap[1]<-p.stoat[1]
### Probability of not capturing a rat after a certain trapping effort (trap nights)
### And including stoat by-catch effects
logit(p.rat[1])<-alpha.rat+beta.rat*eff.rat[1]
rat.cap[1]<-p.rat[1]
# Likelihood - Stoats and rats removed per occasion
### First occasion
av.stoat[1]<-n.stoat ### Stoats not yet caught and available for capture in the new occasion
y.stoat[1]~dbin(stoat.cap[1], av.stoat[1]) #### Stoats
av.rat[1]<-n.rat ### Rats not yet caught and available for capture in the new occasion
y.rat[1]~dbin(rat.cap[1], av.rat[1]) #### Rats
##### Bayesian p-values
###### Rats
exp.rat[1]<-rat.cap[1]*av.rat[1]
new.rat[1]~dbin(rat.cap[1], av.rat[1])
#### Subsequent occasions
for (j in 2:n.occ){
## Stoats removed per occasion
### Stoats not yet caught and available for capture in the new occasion
av.stoat[j]<-n.stoat-sum(y.stoat[1:(j-1)])
### Stoats removed
### Probability of not capturing a stoat after a certain trapping effort (trap nights)
### And including rat by-catch effects
logit(p.stoat[j])<-alpha.stoat+beta.stoat*eff.stoat[j]
stoat.cap[j]<-p.stoat[j]
y.stoat[j]~dbin(stoat.cap[j], av.stoat[j])
## Rats removed per occasion
### Rats not yet caught and available for capture in the new occasion
av.rat[j]<-n.rat-sum(y.rat[1:(j-1)])
### Probability of not capturing a rat after a certain trapping effort (trap nights)
### And including stoat by-catch effects
logit(p.rat[j])<-alpha.rat+beta.rat*eff.rat[j]
rat.cap[j]<-p.rat[j]
y.rat[j]~dbin(rat.cap[j], av.rat[j])
##### Bayesian p-values
###### Rats
exp.rat[j]<-rat.cap[j]*av.rat[j]
new.rat[j]~dbin(rat.cap[j], av.rat[j])
}
})
## Initial values - where to start the MCMC chains
inits<-list(alpha.stoat=rnorm(1, 0, 3.16), beta.stoat=rnorm(1, 0, 3.16), alpha.rat=rnorm(1, 0, 3.16),
beta.rat=rnorm(1, 0, 3.16), n.stoat=rpois(1, sum(rem.data$stoat)*2), n.rat=rpois(1, sum(rem.data$rat)*2))
### Define constant values
const.rem<-list(n.occ=n.occ)
#### Put the data together
data.rem<-list(eff.stoat=log10(eff.stoat), eff.rat=log10(eff.rat), y.stoat=rem.data$stoat, y.rat=rem.data$rat)
###### THE MODEL
Rmodel<-nimbleModel(code=rem.model, constants=const.rem, data=data.rem, inits=inits)
rem.conf<-configureMCMC(Rmodel, monitors=list('exp.rat', 'new.rat'), thin=1)
Rmcmc<-buildMCMC(rem.conf)
Cmodel<-compileNimble(Rmodel)
Cmcmc<-compileNimble(Rmcmc, project = Rmodel)
### Run the model with three chains
rem.m2<-runMCMC(Cmcmc, niter=1000000, nchains=3, nburnin=500000, inits=inits, samplesAsCodaMCMC=FALSE)
### Summary of the posterior & exclude NAs
out.post<-do.call(rbind.data.frame, rem.m2)
exp.count<-out.post[, 1:n.occ]
summary(exp.count)
sim.count<-out.post[, (n.occ+1):ncol(out.post)]
summary(sim.count)
### DHARMA
#### QQ-Plots
library(DHARMa)
#### Catch
pred<-apply(exp.count, 2, function(x) mean(x, na.rm=TRUE))
pred
pred2<-apply(exp.count, 2, function(x) median(x, na.rm=TRUE))
pred2
sim<-createDHARMa(simulatedResponse=t(sim.count), observedResponse=rem.data$rat, fittedPredictedResponse=pred,
integerResponse=TRUE)
plotSimulatedResiduals(sim)
plot(sim)
|
cdfe40ce3631d69392e97cd2df3a1ec7f34a4466
|
de9d075ede442d9b4ed1bd8d05f8e4e60edb02ad
|
/man/prep_dist_fenced.Rd
|
2d451cc6856d6181901fb3c9e7591cf05288286b
|
[
"CC-BY-4.0",
"CC0-1.0",
"MIT"
] |
permissive
|
petrpajdla/settlements
|
8480f43a33f2bf31dc858ced846139fd78ba2fe7
|
e3bc0899e53dcab0126ddd1075f33b0d207ccc67
|
refs/heads/main
| 2023-04-18T15:32:07.270613
| 2022-06-23T14:20:03
| 2022-06-23T14:20:03
| 302,602,094
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 434
|
rd
|
prep_dist_fenced.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output_fns.R
\name{prep_dist_fenced}
\alias{prep_dist_fenced}
\title{Prepare distance to fenced settlements result}
\usage{
prep_dist_fenced(x)
}
\arguments{
\item{x}{A \code{tibble}.}
}
\value{
A long results \code{tibble}.
}
\description{
Functions with \code{prep_} prefix prepare the data into the long format for
binding with results \code{tibble}.
}
|
4d543ccb6b5473fceee3021689827489b55eb663
|
897f0581bfc3403318f56072f7af1163b8189733
|
/rosetta-motif-distances.R
|
c4bcdb4d817deedec5ca22fa17af07f7b8db8b85
|
[] |
no_license
|
jashworth-UTS/ja-scripts
|
2985891e628bae59b1f4b8696739cbf63b5a2dc2
|
ac837ac0fee63c27b3b8ac4d9a5022810fb31976
|
refs/heads/master
| 2021-05-28T18:39:20.272694
| 2015-02-04T02:35:17
| 2015-02-04T02:35:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,273
|
r
|
rosetta-motif-distances.R
|
paths=readLines('dnas')
#paths=rep('2e1c.WT/design_mutations.dna',4)
bg=read.table('halo.bg.file',header=F)
bg=bg[,2]
cat('background probabilities:\n')
cat(bg)
cat('\n')
names=dirname(paths)
names=gsub('/design_mutations.dna','',names)
names=gsub('2e1c.P.OT3.FL11.','',names)
source('rosetta-motifs.R')
pdf=F
mats=lapply(1:length(paths),function(i){
path=paths[i]
name=names[i]
cat(name,' ',path,'\n')
if(pdf){pdf(paste(name,'.pdf',sep=''))}
sl=seqlogoFromFasta(path,plot=pdf)
if(pdf){dev.off()}
return(sl) })
names(mats)=names
disfunc='ED'
#disfunc='KL'
#disfunc='ALLR'
dmats=ppm.dis.matrix(mats,disfunc=disfunc,bg=bg)
require(gplots)
oma=c(7,3,3,7)
pdf=T
cols=redblue(32)
if(pdf){pdf('rosetta.motif.distances.pdf');par(oma=oma)}else{dev.new();par(oma=oma)}
heatmap.2(dmats,symm=T,trace='none',scale='none',dendrogram='row',main='Rosetta motif distances',col=cols,symbreaks=F,symkey=F)
if(pdf){dev.off()}
# heatmap of just Halo TFs
if(pdf){pdf('rosetta.motif.distances.halo.pdf');par(oma=c(5,5,5,5))}else{dev.new();par(oma=oma)}
dmats.halo=dmats[grepl('Halo',rownames(dmats)),grepl('Halo',colnames(dmats))]
heatmap.2(dmats.halo,symm=T,trace='none',scale='none',dendrogram='row',main='Rosetta motif distances',col=cols,symbreaks=F,symkey=F)
if(pdf){dev.off()}
|
71c983af1fd707032b5e65da9f19db4b0ed5de34
|
a157bac2055f4c7503ddc4b34dc909842604a6f2
|
/R/source_data_raw.R
|
5eec8fae440a67dffd995adda9f74c8cac869c62
|
[
"MIT"
] |
permissive
|
2DegreesInvesting/r2dii.usethis
|
c2471bc8ae8fe182734f4b948579acd4d20bc27e
|
08b1e959223d68cbed1b1729ac6b37b342a1a4be
|
refs/heads/master
| 2022-03-03T13:08:53.483371
| 2022-02-14T21:21:02
| 2022-02-14T21:21:02
| 195,466,150
| 1
| 2
|
NOASSERTION
| 2022-03-15T11:53:50
| 2019-07-05T20:52:33
|
R
|
UTF-8
|
R
| false
| false
| 888
|
r
|
source_data_raw.R
|
#' Source all .R files under data-raw/
#'
#' Usually we work on one dataset only, and don't know if our change impacted
#' other datasets. This function helps "refresh" all datasets at once. It may
#' be used interactively while developing the package, or in CI to regularly
#' check we can reproduce all datasets we export, and that the result is
#' consistent with our regression tests.
#'
#' @param path String. Path in the working directory.
#' @return `invisible(path)`, as it's called for its side effect.
#'
#' @export
#'
#' @examples
#' source_data_raw()
source_data_raw <- function(path = "data-raw") {
lapply(r_files_in(path), source)
invisible(path)
}
r_files_in <- function(path) {
# pattern = "[.]R$" is simpler but platform-inconsistent, e.g. "a//b", "a/b".
path_ext <- list.files(path, pattern = NULL, full.names = TRUE)
grep("[.]R$", path_ext, value = TRUE)
}
|
726b2e6bcfd709f8c9d132385a38eba36c3b004a
|
98ea498dbf7813132f27c0f05e93cab1d085c339
|
/Standard GA.r
|
2947a14a9d3ee4a40efa6d4bcaec6a5de9dc1b13
|
[] |
no_license
|
JARH2410/RCodeforCRO
|
041841c429783c8f991e877a5c2e162003944824
|
eefd855be2b8aafac59566bc95cd743ce7df6056
|
refs/heads/master
| 2020-06-24T06:16:18.010189
| 2019-07-29T15:40:58
| 2019-07-29T15:40:58
| 198,876,380
| 1
| 0
| null | 2019-07-25T17:41:31
| 2019-07-25T17:41:30
| null |
UTF-8
|
R
| false
| false
| 1,614
|
r
|
Standard GA.r
|
library(googleAnalyticsR)
library(googleAuthR)
ga_auth(new_user = F)
view_id = 53461765
start_date <- "" #"YYYY-MM-DD"
end_date <- "" #"YYYY-MM-DD"
mf1 <- met_filter(metric =,
operator = ,
comparisonValue = ,
not =
)
mf2 <- met_filter(metric =,
operator = ,
comparisonValue =,
not =
)
df1 <- dim_filter(dimension = ,
operator = ,
expressions = ,
not = F
)
df1 <- dim_filter(dimension = ,
operator = ,
expressions = ,
not = F
)
my_met_filter_clause <- filter_clause_ga4(list(mf1,mf2),
operator = "AND")
my_dim_filter_clause <- filter_clause_ga4(list(df1,df2),
operator = "AND")
ga_data <- google_analytics(viewId = view_id,
date_range = c(start_date, end_date),
metrics = c(""),
dimensions = c(""),
met_filters = my_met_filter_clause,
dim_filters = my_dim_filter_clause,
anti_sample = T
)
head(ga_data)
|
87adf7f4bf6db4e9196077740d5181a45c68faea
|
9301d1bf2b428d9e56994665a6db0d5fda2812ce
|
/man/DSD_Target.Rd
|
1e1766962f48e3a116607240023d278320bbc361
|
[] |
no_license
|
mhahsler/stream
|
ae6ae385d1db3d68bcf7c56bac62edbf5df8ce26
|
0c83157635b48adb9a3401de601f81680d844968
|
refs/heads/master
| 2023-07-20T04:29:25.966443
| 2023-07-14T16:32:49
| 2023-07-14T16:32:49
| 45,126,788
| 39
| 5
| null | 2020-12-01T20:09:04
| 2015-10-28T16:37:22
|
C++
|
UTF-8
|
R
| false
| true
| 1,605
|
rd
|
DSD_Target.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DSD_Target.R
\name{DSD_Target}
\alias{DSD_Target}
\title{Target Data Stream Generator}
\usage{
DSD_Target(
center_sd = 0.05,
center_weight = 0.5,
ring_r = 0.2,
ring_sd = 0.02,
noise = 0
)
}
\arguments{
\item{center_sd}{standard deviation of center}
\item{center_weight}{proportion of points in center}
\item{ring_r}{average ring radius}
\item{ring_sd}{standard deviation of ring radius}
\item{noise}{proportion of noise}
}
\value{
Returns a \code{DSD_Target} object.
}
\description{
A data stream generator that generates a data stream in the shape of a
target. It has a single Gaussian cluster in the center and a ring that
surrounds it.
}
\details{
This DSD will produce a singular Gaussian cluster in the center with a ring around
it.
}
\examples{
# create data stream with three clusters in 2D
stream <- DSD_Target()
get_points(stream, n = 5)
plot(stream)
}
\seealso{
Other DSD:
\code{\link{DSD_BarsAndGaussians}()},
\code{\link{DSD_Benchmark}()},
\code{\link{DSD_Cubes}()},
\code{\link{DSD_Gaussians}()},
\code{\link{DSD_MG}()},
\code{\link{DSD_Memory}()},
\code{\link{DSD_Mixture}()},
\code{\link{DSD_NULL}()},
\code{\link{DSD_ReadDB}()},
\code{\link{DSD_ReadStream}()},
\code{\link{DSD_UniformNoise}()},
\code{\link{DSD_mlbenchData}()},
\code{\link{DSD_mlbenchGenerator}()},
\code{\link{DSD}()},
\code{\link{DSF}()},
\code{\link{animate_data}()},
\code{\link{close_stream}()},
\code{\link{get_points}()},
\code{\link{plot.DSD}()},
\code{\link{reset_stream}()}
}
\author{
Michael Hahsler
}
\concept{DSD}
|
5561f9426872f8f4f387872a4ee7138358ef25cf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bedr/examples/bedr.merge.region.Rd.R
|
6405615294e4458a6dfe4f1ca99d31b4496e3e2d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
bedr.merge.region.Rd.R
|
library(bedr)
### Name: bedr.merge.region
### Title: merge i.e. collapse overlpaping regions
### Aliases: bedr.merge.region
### Keywords: merge
### ** Examples
if (check.binary("bedtools")) {
index <- get.example.regions();
a <- index[[1]];
a.sort <- bedr.sort.region(a);
a.merged <- bedr.merge.region(a.sort);
}
|
5cb86ae281f40a60a0eca9b57e1bf526cd30a9fd
|
c7d88c83f9448c5f7c822b7cc7c8a69fab4cb5c2
|
/plot1.R
|
d136e924b9b78845bb3617b83787800efb6b1a09
|
[] |
no_license
|
RestyAmon/Exploratory-Data-Analysis
|
3651d1b2f7e6a517ab9d48a40d0a579fad6b4c37
|
a83ad63505a2ee6d4ee5c579913722d78b83910e
|
refs/heads/master
| 2020-05-16T22:18:51.291069
| 2019-04-25T10:00:38
| 2019-04-25T10:00:38
| 183,332,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
plot1.R
|
householdpowerconsumption<-read.table("household_power_consumption.txt",sep = ";",header = T,dec = ".",stringsAsFactors = F)
cropdata<-subset(householdpowerconsumption,Date=="1/2/2007"|Date=="2/2/2007")
cropdata$Global_active_power<-as.numeric(cropdata$Global_active_power)
with(cropdata,hist(Global_active_power,ylab = "Frequency",xlab = "Global Active Power (kilowatts)", main="Global Active Power",col="red"))
dev.copy(png,file="plot1.png",height=480,width=480,units="px")
dev.off()
|
9f53a869011236f257f14bc36ee45a2688f9cb9b
|
9379c6f8c601bd7ba9ff499507a52b88df988e81
|
/R/splitAlignment.R
|
495099d88f9139d952dd6c3ce52a7832b7e7ac18
|
[] |
no_license
|
sunnyEV/chopper
|
e35670d264b7a0b328e31bbb355019d768bbfe25
|
8b51bb1b9874af1e5a3505dcf627b170f381c838
|
refs/heads/master
| 2021-09-05T02:48:47.271585
| 2018-01-23T19:04:58
| 2018-01-23T19:04:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,751
|
r
|
splitAlignment.R
|
##' Split alignments into individual files.
##'
##' This function takes an alignment and put each sequence into its
##' individual file that is named the same as the name of the
##' sequence. This function also overwrites the output (it doesn't
##' check that the file created don't already exist).
##'
##' @title Split alignments.
##' @param file path and file name of the alignment (all the seq need
##' to be the same length)
##' @param output folder where the individual sequences will be stored
##' @param split_by If \code{NULL} (default), each sequence will be a
##' in a separate file. Otherwise, a data frame with at least 2
##' columns named \sQuote{groups} and \sQuote{sequences}:
##' sequences identified in the \sQuote{sequences} column, sharing
##' the same values in the \sQuote{groups} column will be written
##' to files.
##' @param format format of the alignment, for now only fasta
##' @param colw width of columns, same as in
##' \code{\link[ape]{write.dna}}
##' @param checkAmbiguity should the presence of ambiguities in the
##' sequences be tested? (default=TRUE). If ambiguities are found,
##' returns a warning.
##' @param checkInternalGaps should the presence of internal gaps be
##' tested? (default=FALSE). If internal gaps are found, returns a
##' warning.
##' @param ... additional parameters to be passed to ape:::write.dna
##' @return TRUE if the function succeed, mostly used for its side
##' effect (create files with individual sequences).
##' @author Francois Michonneau
##' @importFrom ape read.dna
##' @importFrom ape write.dna
##' @export
splitAlignment <- function(file, output,
split_by = NULL,
format="fasta",
colw=10000,
checkAmbiguity=TRUE,
checkInternalGaps=FALSE, ...) {
format <- match.arg(format)
if (checkAmbiguity) {
chk <- checkAmbiguity(file=file, format=format)
if (length(chk) > 0) {
warning("Your alignment has ambiguities.")
}
}
if (checkInternalGaps) {
chkGap <- checkInternalGaps(file=file, format=format)
if (length(chkGap) > 0) {
warning("Your alignment has internal gaps.")
}
}
alg <- ape::read.dna(file=file, format=format)
nbSeq <- dim(alg)[1]
fnm <- file.path(output, dimnames(alg)[[1]])
if (is.null(split_by)) {
for (i in 1:nbSeq) {
fnm_ <- fnm[i]
ape::write.dna(alg[i, ], file=fnm_, format="fasta", colsep="", colw=colw, ...)
tmpSeq <- readLines(fnm_)
tmpSeq[1] <- gsub("\\s+", "", tmpSeq[1])
cat(tmpSeq, file=fnm_, sep="\n")
}
} else {
if (!inherits(split_by, "data.frame")) {
stop(sQuote("split_by"), " must be a data frame.")
}
if (ncol(split_by) < 2) {
stop(sQuote("split_by"), " must have at least 2 columns.")
}
if (! all(c("sequences", "groups") %in% names(split_by))) {
stop(sQuote("split_by"), " must have columns named ",
sQuote("groups"), " and ", sQuote("sequences"))
}
grps <- split(split_by[["sequences"]], split_by[["groups"]])
fnm <- file.path(output, names(grps))
for (i in seq_along(grps)) {
ape::write.dna(alg[na.omit(match(grps[[i]], dimnames(alg)[[1]])), ],
file = fnm[i],
format = "fasta", colsep = "", colw = colw, ...)
tmp_seq <- readLines(fnm[i])
tmp_seq[1] <- gsub("\\s+", "", tmp_seq[1])
cat(tmp_seq, file = fnm[i], sep = "\n")
}
}
TRUE
}
|
196826d62503c2d792f74a0afcb5a93ead943d6f
|
11deaf3286ab3c5440f5cb321ddd870c8be2bc5a
|
/lookr_old_static/testthat.R
|
327c78f5b21bed029363fd4a21ced08d902dd76e
|
[] |
no_license
|
ekhaebig/Comprehension
|
776a20648b38ec9eade0a52b97d27a13a925b5a2
|
d65a4de8e0f71d29bf00d959383b820a993903fe
|
refs/heads/master
| 2021-01-19T05:46:59.459450
| 2014-05-08T00:20:50
| 2014-05-08T00:20:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,069
|
r
|
testthat.R
|
library(testthat)
# run me:
setwd("L:/scripts/lookr/")
source("AOI.R")
setwd("L:/scripts/lookr/tests/")
# test_file("testthat.R")
context(".DefineAOI converts AOI pixel coordinates into screen proportions")
test_that(desc = "DefineAOI is robust to ordering of pixels", {
xy_ordered <- .DefineAOI(c(300, 600), c(100, 700))
x_ordered <- .DefineAOI(c(300, 600), c(700, 100))
y_ordered <- .DefineAOI(c(600, 300), c(100, 700))
no_ordered <- .DefineAOI(c(600, 300), c(700, 100))
expect_identical(xy_ordered, x_ordered)
expect_identical(xy_ordered, y_ordered)
expect_identical(xy_ordered, no_ordered)
expect_true(xy_ordered$x[1] < xy_ordered$x[2])
expect_true(xy_ordered$y[1] < xy_ordered$y[2])
})
context(".GetImageAOI maps AOI names onto AOI coordinates")
test_that(desc = "GetImageAOI handles inputs correctly", {
.UL_AOI <- .DefineAOI(x_pix = c(410, 860), y_pix = c(500, 50))
.UR_AOI <- .DefineAOI(x_pix = c(1060, 1510), y_pix = c(500, 50))
.LR_AOI <- .DefineAOI(x_pix = c(1060, 1510), y_pix = c(1150, 700))
.LL_AOI <- .DefineAOI(x_pix = c(410, 860), y_pix = c(1150, 700))
.LEFT_AOI <- .DefineAOI(x_pix = c(100, 700), y_pix = c(300, 900))
.RIGHT_AOI <- .DefineAOI(x_pix = c(1220, 1820), y_pix = c(300, 900))
.FX_AOI <- .DefineAOI(x_pix = c(885, 1035), y_pix = c(525, 675))
expect_identical(.GetImageAOI("UpperLeftImage"), .UL_AOI)
expect_identical(.GetImageAOI("UpperRightImage"), .UR_AOI)
expect_identical(.GetImageAOI("LowerRightImage"), .LR_AOI)
expect_identical(.GetImageAOI("LowerLeftImage"), .LL_AOI)
expect_identical(.GetImageAOI("FixationImage"), .FX_AOI)
expect_identical(.GetImageAOI("ImageL"), .LEFT_AOI)
expect_identical(.GetImageAOI("ImageR"), .RIGHT_AOI)
expect_error(.GetImageAOI(""))
expect_error(.GetImageAOI("imageL"))
expect_error(.GetImageAOI(NA))
})
context(".GetFramesWithGazeInAOI checks whether points in a trial fall into an AOI")
test_that(desc = "test dummy Trials with no/all looks in an AOI", {
img_AOI <- .LEFT_AOI
# Try points that are in the AOI in only one dimension, on the corners of the
# screen, and close to the edges of the AOI.
XMean <- c(0, 1, .2, .2, 0, 1, 1, 0, .37, .20, .05, NA, .20, .20)
YMean <- c(.5, .5, 0, 1, 0, 1, 0, 1, .50, .76, .50, .50, NA, .24999)
false_trial <- data.frame(XMean, YMean)
# Expect no false positives
expect_false(TRUE %in% .GetFramesWithGazeInAOI(false_trial, img_AOI))
# Try a point within the AOI and try its four corners
XMean2 <- c(.2, img_AOI$x[1], img_AOI$x[2], img_AOI$x[1], img_AOI$x[2])
YMean2 <- c(.5, img_AOI$y[1], img_AOI$y[1], img_AOI$y[2], img_AOI$y[2])
true_trial <- data.frame(XMean = XMean2, YMean = YMean2)
# Expect no false negatives
expect_false(FALSE %in% .GetFramesWithGazeInAOI(true_trial, img_AOI))
})
context("AddAOIData.Trial correctly maps gaze coordinates onto AOIs")
TestAOIs <- function(trial) {
# Map AOI data onto trial.
trial <- AddAOIData.Trial(trial)
# Store some useful information from trial attributes.
q_file <- paste0(trial %@% "TestName", "_stim.png")
r_file <- paste0(trial %@% "TestName", "_aoi.png")
title <- paste0(trial %@% "TestName", ", ", trial %@% "Protocol")
# Build a plot template, then specify different colorings for the location
# and stimuli image plots.
p <- qplot(data = trial, x = XMean, y = YMean) +
labs(title = title) +
coord_fixed(ratio = 1200 / 1920)
q <- p + geom_point(aes(color = GazeByImageAOI))
# r <- p + geom_point(aes(color = GazeByAOI))
ggsave(plot = q, file = q_file, width=16, height=10)
# ggsave(plot = r, file = r_file, width=16, height=10)
}
test_that(desc = "Exhaustively plot possible gaze points, colored by AOIs", {
# Make a trial template to test 241 * 241 points.
XMean <- c(seq(from = 0, to = 1920, by = 16), NA) / 1920
YMean <- c(seq(from = 0, to = 1200, by = 12), NA) / 1200
test_trial <- expand.grid(XMean, YMean)
names(test_trial) <- c("XMean", "YMean")
# Make trials to represent each test possible MP arrangement
TestMPTrial <- function(protocol, target, distractor, test_name) {
trial <- test_trial
trial %@% "Task" <- "MP"
trial %@% "Protocol" <- protocol
trial %@% "TargetImage" <- target
trial %@% "DistractorImage" <- distractor
trial %@% "TestName" <- test_name
suppressWarnings(TestAOIs(trial))
}
TestMPTrial("WFF_Movie", "ImageL", "ImageR", "mp_lrf")
TestMPTrial("WFF_Movie", "ImageR", "ImageL", "mp_rlf")
TestMPTrial("WFF_Area", "ImageL", "ImageR", "mp_lra")
TestMPTrial("WFF_Area", "ImageR", "ImageL", "mp_rla")
# Test all 48 possible RWL trials
orders <- structure(list(UpperLeftImage = structure(c(1L, 1L, 1L, 1L, 1L,
1L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 4L, 4L,
1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 4L,
4L, 4L, 4L, 4L, 4L), .Label = c("PhonologicalFoil", "SemanticFoil",
"TargetImage", "Unrelated"), class = "factor"), UpperRightImage =
structure(c(2L, 2L, 3L, 3L, 4L, 4L, 1L, 1L, 3L, 3L, 4L, 4L, 1L, 1L, 2L, 2L,
4L, 4L, 1L, 1L, 2L, 2L, 3L, 3L, 2L, 2L, 3L, 3L, 4L, 4L, 1L, 1L, 3L, 3L, 4L,
4L, 1L, 1L, 2L, 2L, 4L, 4L, 1L, 1L, 2L, 2L, 3L, 3L), .Label =
c("PhonologicalFoil", "SemanticFoil", "TargetImage", "Unrelated"), class =
"factor"), LowerRightImage = structure(c(3L, 4L, 2L, 4L, 2L, 3L, 3L, 4L, 1L,
4L, 1L, 3L, 2L, 4L, 1L, 4L, 1L, 2L, 2L, 3L, 1L, 3L, 1L, 2L, 3L, 4L, 2L, 4L,
2L, 3L, 3L, 4L, 1L, 4L, 1L, 3L, 2L, 4L, 1L, 4L, 1L, 2L, 2L, 3L, 1L, 3L, 1L,
2L), .Label = c("PhonologicalFoil", "SemanticFoil", "TargetImage",
"Unrelated"), class = "factor"), LowerLeftImage = structure(c(4L, 3L, 4L,
2L, 3L, 2L, 4L, 3L, 4L, 1L, 3L, 1L, 4L, 2L, 4L, 1L, 2L, 1L, 3L, 2L, 3L, 1L,
2L, 1L, 4L, 3L, 4L, 2L, 3L, 2L, 4L, 3L, 4L, 1L, 3L, 1L, 4L, 2L, 4L, 1L, 2L,
1L, 3L, 2L, 3L, 1L, 2L, 1L), .Label = c("PhonologicalFoil", "SemanticFoil",
"TargetImage", "Unrelated"), class = "factor"), Protocol = c("WFF_Movie",
"WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie",
"WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie",
"WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie",
"WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Movie",
"WFF_Movie", "WFF_Movie", "WFF_Movie", "WFF_Area", "WFF_Area", "WFF_Area",
"WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area",
"WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area",
"WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area", "WFF_Area",
"WFF_Area", "WFF_Area", "WFF_Area")), .Names = c("UpperLeftImage",
"UpperRightImage", "LowerRightImage", "LowerLeftImage", "Protocol" ),
row.names = c(NA, 48L), class = "data.frame")
TestRWLTrial <- function(row_num) {
aois <- orders[row_num, ]
trial <- test_trial
trial %@% "Task" <- "RWL"
trial %@% "Protocol" <- aois$Protocol
trial %@% "UpperLeftImage" <- as.character(aois$UpperLeftImage)
trial %@% "UpperRightImage" <- as.character(aois$UpperRightImage)
trial %@% "LowerRightImage" <- as.character(aois$LowerRightImage)
trial %@% "LowerLeftImage" <- as.character(aois$LowerLeftImage)
# The value of the above attributes are also attribute names, so we make the
# location and stimulus attributes point name each other. Yes it looks dumb.
trial %@% (trial %@% "UpperLeftImage") <- "UpperLeftImage"
trial %@% (trial %@% "UpperRightImage") <- "UpperRightImage"
trial %@% (trial %@% "LowerRightImage") <- "LowerRightImage"
trial %@% (trial %@% "LowerLeftImage") <- "LowerLeftImage"
# Come up with a unique test name.
cs[1] <- str_sub(aois$UpperLeftImage, 0, 1)
cs[2] <- str_sub(aois$UpperRightImage, 0, 1)
cs[3] <- str_sub(aois$LowerRightImage, 0, 1)
cs[4] <- str_sub(aois$LowerLeftImage, 0, 1)
cs[5] <- ifelse(aois$Protocol == "WFF_Area", "A", "F")
trial %@% "TestName" <- paste0(c("RWL_", cs), collapse="")
suppressWarnings(TestAOIs(trial))
}
Map(TestRWLTrial, 1:nrow(orders))
})
|
c72a9976702bb2b4f5a819f5901e27d10fa33838
|
1905c665a86ef1bb950607ce30a7fe513626bbc0
|
/presentationCode/Bartsch_ggplot2_Introduction_and_Customization.r
|
d759d2dfb61b6bda596485e62647f1313b3c1f4d
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
USEPA/R-User-Group
|
8ef610b12f08cf70c8a8eb62b728252365bafd30
|
0233e0fcc0b95fc6435c8edbd7281aeeebcc7666
|
refs/heads/master
| 2023-05-25T05:33:35.454187
| 2023-05-11T18:22:55
| 2023-05-11T18:22:55
| 39,468,949
| 12
| 17
|
CC0-1.0
| 2020-04-13T22:50:51
| 2015-07-21T20:43:50
|
HTML
|
UTF-8
|
R
| false
| false
| 8,022
|
r
|
Bartsch_ggplot2_Introduction_and_Customization.r
|
#############################################################################
#File: ggplot2_Introduction_&_Customization.r
#Purpose: Demonstrate how to produce and customize plots using the ggplot
# function in the ggplot2 package. We will create and customize a scatter
# plot, a bar plot, a stacked histogram, and a categorical boxplot.
# We will also plot multiple plots on one panel.
#By: Will Bartsch - EPA-MED, Duluth
#Date: 7/16/14
#############################################################################
#Load the necessary packages.
library(ggplot2)
library(plyr)
library(grid)
#Create a data frame to use for the plots.
dat <- data.frame(condition = rep(c("A", "B", "C"), each=30),
response=rep(c("Yes", "No"), each=15, times=3),
xvar = 50:139 + rnorm(90,sd=15),
yvar = 50:139 + rnorm(90,sd=15))
head(dat)
dim(dat)
########################## A scatter plot #############################
# Basic plot using base R
plot(dat$xvar, dat$yvar)
# Basic plot using the 'qplot' function in ggplot2
qplot(dat$xvar, dat$yvar)
# Basic plot using the 'ggplot' function in ggplot2
ggplot(dat, aes(y=yvar, x=xvar)) + geom_point()
# Change the symbols to a solid triangle and color code according to
# "condition"
# A plot of all the symbols to find the triangle
plot(1:25, pch=c(1:25))
text(1:25, pos=3, offset=0.4)
# A colorblind friendly palette (create it for later use)
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442",
"#0072B2", "#D55E00", "#CC79A7")
# Create the plot
sp <- ggplot(dat, aes(y=yvar, x=xvar, color=condition))
sp + geom_point(shape=17)
# Add a title and axis lables
sp + geom_point(shape=17)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y")
# Same plot with a black and white theme
sp + geom_point(shape=17)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y") + theme_bw()
# Format the legend
sp + geom_point(shape=17, size=3)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y") +
scale_colour_manual("Group", breaks=c("A", "B", "C"),
labels=c("Apple", "Banana", "Cherry"), values=cbbPalette) + # Change the labels and title, change the color palette
theme(legend.background=element_rect(color="black", fill="gray"), # Make the legend background gray with a black border
legend.text=element_text(size=16), #Change label text size
legend.title=element_text(size=16, face="bold"), #Change title text
legend.key.size=unit(1, "cm"), #Change Size of symbol box
legend.position = c(0.89,0.135)) #Set position
# Format Axis, Labels and Title (Maintain previous formating) and add
# trend lines
sp + geom_point(shape=17, size=3)+
ggtitle("Data Presentation") + xlab("X") + ylab("Y") +
scale_colour_manual("Group", breaks=c("A", "B", "C"),
labels=c("Apple", "Banana", "Cherry"), values=cbbPalette)+
theme(legend.background=element_rect(color="black", fill="gray"),
legend.text=element_text(size=16),
legend.title=element_text(size=16, face="bold"),
legend.key.size=unit(1, "cm"),
legend.position = c(0.895,0.135))+
theme(axis.title=element_text(size=16), #Change axis title size
axis.text=element_text(size=12), #Change axis label size
plot.title=element_text(size=20, face="italic"))+ #Change plot title size and make it italic
xlim(0,175) + ylim(0, 175) + #Reset the axis ranges
#scale_y_continuous(limits = c(0,175), expand = c(0,0))+ #Reset the axis ranges and force it to start at 0
#scale_x_continuous(limits = c(0,175), expand = c(0,0))+ #Reset the axis ranges and force it to start at 0
geom_smooth(method=lm, fill=NA, size=1) #Add the linear regression trend lines
############## A bar plot of means with standard error bars #################
# Summarize the data by mean and standard deviation using ddply from
# the plyr package
dat_mean <- ddply(dat, .(condition), summarize, avg=mean(yvar),
stdev=sd(yvar))
dat_mean$std_error <- dat_mean$stdev/sqrt(30)
# Make a basic plot
bp <- ggplot(dat_mean, aes(y=avg, x=condition))
bp + geom_bar(stat="identity", color="black", fill="black")+
geom_errorbar(aes(ymin=avg-std_error,
ymax=avg+std_error), color='red', width=0.5)
# Customize the plot with white background and no grid lines
bp + geom_bar(stat="identity")+ geom_errorbar(aes(ymin=avg-std_error,
ymax=avg+std_error), color='red', width=0.5, size=1.5) +
ggtitle("Example\nBar Plot") +
xlab(expression(paste(italic("Condition")))) +
ylab(expression(paste("NH"[4]^" + ",mu,"g/L")))+
theme(panel.background=element_rect(fill="white"),
panel.grid=element_blank(),
panel.border=element_rect(color="black", fill=NA))
##################### A faceted histogram plot ###########################
hp <- ggplot(dat, aes(x=xvar, fill=condition))
hp + geom_histogram(color="black", binwidth=15) +
facet_grid(condition ~ .) + ggtitle("Results") +
xlab("X") + ylab("Count") +
scale_y_continuous(breaks=seq(0, 12, 3))+
scale_fill_hue("Variable", l=20) #Darken the color scheme and label legend
# If you don't want a legend, use the following line in your script:
theme(legend.position="none")
#################### Categorical Box Plot ###################
boxp <- ggplot(dat, aes(y=yvar, x=condition, fill=response))
boxp + geom_boxplot() + ggtitle("Was There\nA\nResponse")+
xlab("Condition") + ylab("Y Variable")
#################### Plotting Multiple Plots on One Panel ###################
# Multiple plot function (from www.cookbook-r.com)
#
# ggplot objects can be passed in ..., or to plotlist
# (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#Make multiple plots and save them as an object
p1 <- ggplot(dat, aes(y=yvar, x=xvar, color=condition)) +
geom_point(shape=17)
p2 <- ggplot(dat, aes(y=yvar, x=xvar, color=condition)) +
geom_smooth(method=lm)
p3 <- ggplot(dat_mean, aes(y=avg, x=condition)) +
geom_bar(stat="identity", color="black", fill="black")+
geom_errorbar(aes(ymin=avg-1.96*stdev,
ymax=avg+1.96*stdev), color='red', width=0.5)
p4 <- ggplot(dat, aes(y=yvar, x=condition)) +
geom_boxplot()
# Plot them
multiplot(p1,p2,p3,p4, cols=2)
multiplot(p1,p2, cols=2)
multiplot(p1,p2, cols=1)
multiplot(p1,p2,p3, layout=matrix(c(1,2,3,3), nrow=2, byrow=TRUE))
multiplot(p1,p2,p3, layout=matrix(c(1,2,1,3), ncol=2, byrow=TRUE))
############################# Resources ################################
#
# Cookbook for R
# Available online and a great resource. Go to the Graphs section:
# www.cookbook-r.com
#
#
# Use R! ggplot2 - Springer
#
########################################################################
|
2c67f9a7614ee541f9e86e73eaa45ebb796eb836
|
63a2e3fb67e9e7cdaf750e4fee1abb92c20b0be0
|
/codylexyfinal1.R
|
ba05a27de17fc64d6f71a0db2a29c22b375e267f
|
[] |
no_license
|
kstewart22/passionproject
|
7e134716bc7579c213beae16f6eb8dfdcc13826f
|
5f1adb6114d808ecca54da77accda0611d066bf7
|
refs/heads/main
| 2023-05-04T01:15:07.331986
| 2021-05-14T16:19:07
| 2021-05-14T16:19:07
| 367,415,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
codylexyfinal1.R
|
cody <- readLines(file.choose())
lexy <- Corpus(VectorSource(cody))
inspect(lexy)
toSpace <- lexynt_transformer(function (x , pattern ) gsub(pattern, " ", x))
lexy <- tm_map(lexy, toSpace, "/")
lexy <- tm_map(lexy, toSpace, "@")
lexy <- tm_map(lexy, toSpace, "\\|")
# Remove numbers
lexy <- tm_map(lexy, removeNumbers)
# Remove english common stopwords
lexy <- tm_map(lexy, removeWords, stopwords("english"))
# Remove your own stop word
# specify your stopwords as a character vector
lexy <- tm_map(lexy, removeWords, c("like","got", "gonna","guys","get","really","yeah", "just", "you", "hi","bye","um", "can","sorta", "kinda", "kind of"))
# Remove punctuations
lexy <- tm_map(lexy, removePunctuation)
# Eliminate extra white spaces
lexy <- tm_map(lexy, stripWhitespace)
dtm <- TermDocumentMatrix(lexy)
y <- as.matrix(dtm)
x <- sort(rowSums(y),decreasing=TRUE)
f <- data.frame(word = names(x),freq=x)
head(f, 10)
wordcloud2(f, size = .5, minRotation = -pi/6, maxRotation = pi/6,
rotateRatio = 0.9, shape = 'circle', color = 'random-dark')
|
d20098cdf4068070d8edb00e3b87144553701a10
|
52589cf1c513b27d81c2006ecc6e87a160762d27
|
/server.R
|
1dcd156904f79b28e2ef07671dec92165cf37585
|
[] |
no_license
|
chdhatri/Final-Project-Delivery
|
692057ceb706d9271d53e251df577a01eab52891
|
ce1ad6b58ed8c31b1a4a40542707302f037bf5a5
|
refs/heads/master
| 2020-06-12T18:44:19.330441
| 2016-12-06T21:38:06
| 2016-12-06T21:38:06
| 75,773,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,870
|
r
|
server.R
|
#############################CMPE 239 PROJECT#####################
#TEAM VOLTAGE
#Sucharu Gupta
#Dhatri CHennavajula
#Andrew Wong
#Sprush Ujjwal
#UNCOMMENT THESE WHEN YOU ARE RUNNING FIRST TIME TO INSTALL PACKAGES
#install.packages("shiny")
#install.packages("e1071")
#install.packages("randomForest")
library(shiny)
library(e1071)
library(randomForest)
shinyServer(function(input, output) {
setwd("/Users/ssarma/Google Drive/CMPE 239/Project")
output$playerinfo <- renderTable({
genes.data <- read.csv(file='test.data14_15.csv', sep=',', h=T)
gene.data<-subset(genes.data,
DOSE == input$dose &
newCount == input$count &
STRAIN ==input$strain &
TRIAL_RESULT == input$TrailResult)
output.data <- gene.data [c("DOSE", "newCount", "STRAIN", "MICROSOMAL_ACTIVATION_USED",
"TRIAL_RESULT","RESULT","STUDY_CONCLUSION")]
output.data
})
output$result <- renderPrint({
###########################################################
## build test set data frame
###########################################################
#test.data <- data.frame(DOSE = as.numeric(gene.data$DOSE),
test.raw.data <- data.frame(DOSE = as.numeric(input$dose),
newCount = as.integer(input$count),
STRAIN = as.factor(input$strain),
TRIAL_RESULT = as.factor(input$TrailResult))
##########################################################
## load training data
###########################################################
train.raw.data <- read.table(file='gene.train.csv', sep=',', h=T)
model.data <- train.raw.data [c("DOSE","newCount","STRAIN","TRIAL_RESULT","RESULT")]
model.data$RESULT <- as.factor(model.data$RESULT)
########################################################
# BUILD RF Prediction
########################################################
# str(model.data)
levels(test.raw.data$STRAIN) = levels(model.data$STRAIN)
levels(test.raw.data$TRIAL_RESULT) = levels(model.data$TRIAL_RESULT)
# make predictions
rf1.out <- randomForest(RESULT ~ DOSE + newCount +STRAIN+ TRIAL_RESULT,
data = gene.train, importance=T, ntree=500)
# make predictions
glm.pred<-predict(rf1.out, test.raw.data)
paste("The Chemical with the given inputs is ", toString(glm.pred))
})
output$studyConclusion <- renderPrint({
###########################################################
## build test set data frame
###########################################################
#test.data <- data.frame(DOSE = as.numeric(gene.data$DOSE),
test.raw.data <- data.frame(DOSE = as.numeric(input$dose),
newCount = as.integer(input$count),
STRAIN = as.factor(input$strain),
TRIAL_RESULT = as.factor(input$TrailResult))
##########################################################
## load training data
###########################################################
train.raw.data <- read.table(file='gene.train.csv', sep=',', h=T)
model.data <- train.raw.data [c("DOSE","newCount","STRAIN","TRIAL_RESULT","RESULT","STUDY_CONCLUSION")]
model.data$RESULT <- as.factor(model.data$RESULT)
########################################################
# BUILD RF Prediction
########################################################
test.raw.data <- data.frame(DOSE = as.numeric(5000),
newCount = as.integer(0),
STRAIN = as.factor("TA98"),
TRIAL_RESULT = as.factor("Negative"))
levels(test.raw.data$STRAIN) = levels(model.data$STRAIN)
levels(test.raw.data$TRIAL_RESULT) = levels(model.data$TRIAL_RESULT)
rf1.out <- randomForest(RESULT ~ DOSE + newCount +STRAIN+ TRIAL_RESULT,
data = gene.train, importance=T, ntree=500)
# make predictions
rf1.pred<-predict(rf1.out, test.raw.data)
test.raw.data$RESULT <- rf1.pred
levels(test.raw.data$RESULT) = levels(model.data$RESULT)
# str(test.raw.data)
paste("The Chemical with the given inputs is ", test.raw.data$RESULT)
# levels(as.factor(test.raw.data$RESULT)) = levels(model.data$RESULT)
rf2.two <- randomForest(STUDY_CONCLUSION ~ DOSE +newCount+STRAIN+TRIAL_RESULT +RESULT,
data = model.data, importance=T, ntree=500)
#make predictions
rf.pred<-predict(rf2.two, test.raw.data)
paste(rf.pred)
})
})
|
72b09a48ead0e855f0eb9848b8771901578cbf7f
|
be511ba428395f3288d6ad9e0d7b8102bd77cba7
|
/322_asap_rois/120_gender/30_scratch.R
|
ec45613b2c39f2fb82453709ecc58b4e6c34371d
|
[
"MIT"
] |
permissive
|
HumanNeuroscienceLab/face_fam
|
71708b85a466bbcc28ae9f0e49a0d15a70d068e2
|
2980163f9dee4c24f0d4e19a67bb79773b855add
|
refs/heads/master
| 2021-06-13T16:21:41.418312
| 2017-05-14T17:33:17
| 2017-05-14T17:33:17
| 81,000,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,135
|
r
|
30_scratch.R
|
# Setup -------------------------------------------------------------------
if (!any(.libPaths() == "/home/zshehzad/R/x86_64-redhat-linux-gnu-library/3.2")) .libPaths(c("/home/zshehzad/R/x86_64-redhat-linux-gnu-library/3.2", .libPaths()))
if (!any(.libPaths() == "/home/zshehzad/R_libs")) .libPaths(c("~/R_libs", .libPaths()))
library(doMC)
registerDoMC(30)
library(plyr)
library(bigmemory)
library(nlme)
library(ggplot2)
library(ggthemr)
library(RColorBrewer)
ggthemr('pale', type='outer', text_size=14, layout='plain')
subjects <- sprintf("sub%02i", 1:6)
# Load --------------------------------------------------------------------
# regressors etc
fdf <- read.csv("120_gender/z_df_info.csv")
fdf <- fdf[,-1]
head(fdf)
# face features
bdir <- "/data1/famface01/analysis/misc/openface/concatenate_layers"
dfile <- sprintf("clayer%02i.desc", 26)
bm <- attach.big.matrix(file.path(bdir, dfile))
feats <- as.matrix(bm)
# reduce to only vids
vdf <- fdf[fdf$fr==3,]
vdf$ind<- 1:nrow(vdf)
vfeats <- apply(feats, 2, function(x) tapply(x, fdf$vid, mean))
# read in average
base <- "/data1/famface01/analysis/misc/openface"
labels <- read.csv(sprintf('%s/masked_labels_redo.csv', base), header=F)
features <- read.csv(sprintf('%s/masked_reps_redo.csv', base), header=F)
## find the average face
aind <- grep("average_face", labels$V2)
avg.all <- as.numeric(features[aind,])
# read in gender average
base <- "/data1/famface01/analysis/misc/openface/prototypes_gender"
labels <- read.csv(sprintf('%s/labels.csv', base), header=F)
features <- read.csv(sprintf('%s/reps.csv', base), header=F)
avg.male <- as.numeric(features[1,])
avg.female <- as.numeric(features[2,])
# cor the prototypes
round(cor(cbind(avg.all, avg.male, avg.female)), 3)
# Load the time-series ----------------------------------------------------
predir <- "/data1/famface01/analysis/preprocessed"
# get the roi names
peak.tab <- read.csv("/data1/famface01/command/misc/face_representations/240_roi/z_asap_allpeaks.csv")
rnames <- paste(peak.tab$hemi, peak.tab$name, sep=".")
rords <- rep(1:10, 2)
# load the rois
ts.mats <- ldply(subjects, function(subj) {
tsdir <- sprintf("%s/%s/func/unfam_vids/rois_asap_ventral_peaks", predir, subj)
tsfile <- file.path(tsdir, "asap_ventral_peaks_all.1D")
ts <- read.table(tsfile)
colnames(ts) <- rnames
ts
}, .progress="text")
ts.mats <- as.matrix(ts.mats)
# and the timing
load.cur <- function(subj) {
indir <- "/data1/famface01/analysis/encoding/ShapeAnalysis/data"
infile <- sprintf("%s/roi_n_more_%s.rda", indir, subj)
dat <- NULL
load(infile)
dat
}
# Load timing information
dat.vols <- lapply(subjects, load.cur)
names(dat.vols) <- subjects
head(dat.vols$sub01$basics$timing)
# Prototype 1 -------------------------------------------------------------
# This uses the above avgs as the prototypes
# calculate distance to averages
dist.to.avg <- Rfast::Dist(rbind(avg.all, vfeats))[-1,1]
dist.to.male <- Rfast::Dist(rbind(avg.male, vfeats))[-1,1]
dist.to.female <- Rfast::Dist(rbind(avg.female, vfeats))[-1,1]
dist.btw.gender<- Rfast::Dist(rbind(avg.male, avg.female))[1,2]
# can there be some category measure? closeness to one category than another
dist.to.male/dist.to.female
(dist.to.male - dist.to.female)/dist.btw.gender
## want to ask if stuff close to each of the prototypes differs from the group further away
## so we can get the closest 25%
# run a regression to see how well these things fit the data
fit <- glm(vdf$gender ~ dist.to.avg + dist.to.male + dist.to.female, family=binomial(link='logit'))
summary(fit)
# Convolve regressors -----------------------------------------------------
xmat_labs <- function(fn) {
str <- system(sprintf("grep ColumnLabels %s | sed s/'# ColumnLabels = '//", fn), intern=T)
str <- gsub("\"", "", str)
cols <- strsplit(str, ' ; ')[[1]]
cols
}
read.xmat <- function(fn, rm.nums=T) {
xmat <- read.table(fn)
cnames <- xmat_labs(fn)
if (rm.nums) {
cnames <- sub("#0$", "", cnames)
}
colnames(xmat) <- cnames
xmat
}
afni.timing.amp <- function(onsets, amps, runs, nruns, center=F, scale=F) {
amps <- scale(amps, center=center, scale=scale)
lines <- sapply(1:nruns, function(i) {
inds <- runs == i
if (any(inds)) {
amp <- amps[inds]
onset<- onsets[inds]
line <- paste(sprintf("%.5f*%.8f", onset, amp), collapse=" ")
} else {
line <- "*"
}
line
})
as.character(lines)
}
afni.convolve.regressors <- function(regressors, vdf, subj, timing, nruns=max(timing$run), ...) {
regressors <- as.matrix(regressors)
rlabs <- colnames(regressors)
if (is.null(rlabs)) rlabs <- sprintf("Stim%02i", 1:ncol(regressors))
# Reorder regressors
cur.vids <- as.character(vdf$vid)
ref.vids <- as.character(timing$video)
oinds <- sapply(ref.vids, function(vname) which(vname==cur.vids))
if (!all.equal(cur.vids[oinds], ref.vids)) stop("ordering issue")
ro.regressors <- regressors[oinds,,drop=F]
# Get the timing information
lst.amps <- lapply(1:ncol(ro.regressors), function(i) {
onsets <- timing$onset
runs <- timing$run
amps <- afni.timing.amp(onsets, ro.regressors[,i], runs, nruns, center=T)
amps
})
# Save timing information to temp files
amp.fnames <- sapply(1:length(lst.amps), function(i) {
fname <- tempfile(pattern=sprintf("afni_timing_am%02i_", i), fileext=".1D")
writeLines(lst.amps[[i]], fname)
fname
})
# Run 3dDeconvolve
infiles <- sprintf("/data1/famface01/analysis/preprocessed/%s/func/unfam_vids/rois_asap_ventral_peaks/asap_ventral_peaks_run*.nii.gz", subj)
ofile <- tempfile(pattern="xmat_", fileext=".1D")
cmd0 <- "3dDeconvolve -global_times -input '%s' -force_TR 1 -polort -1 -x1D_stop -num_stimts %i %s -x1D %s"
stim_cmds <- sapply(1:length(amp.fnames), function(i) {
fname <- amp.fnames[i]
lab <- rlabs[i]
sprintf("-stim_times_AM1 %i '%s' 'SPMG1(2)' -stim_label %i %s", i, fname, i, lab)
})
stim_cmds <- paste(stim_cmds, collapse=" ")
cmd <- sprintf(cmd0, infiles, ncol(ro.regressors), stim_cmds, ofile)
cat(cmd, "\n")
retcode <- system(cmd, ...)
if (retcode != 0) stop("ERROR: did not run properly\n")
# Read in xmat
xmat <- read.xmat(ofile)
return(xmat)
}
# Get
taskdir <- "/data1/famface01/analysis/task_activity"
regs <- cbind(avg=dist.to.avg, male=dist.to.male, female=dist.to.female, diff=abs((dist.to.male - dist.to.female)/dist.btw.gender))
df.xmats <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(regs, vdf, subj, timing, max(timing$run))
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
rregs <- regs
rregs[,2:3] <- lm(regs[,2:3] ~ regs[,1])$residuals
df.xmats2 <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(rregs, vdf, subj, timing, max(timing$run))
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
rregs2 <- cbind(gender=as.numeric(vdf$gender)-1.5, avg=rregs[,1], lm(rregs[,2:3] ~ vdf$gender)$residuals)
df.xmats3 <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(rregs2, vdf, subj, timing, max(timing$run))
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
# Run regression ----------------------------------------------------------
# Note: that gender effects present in just the average
summary(lm(dist.to.avg ~ gender, data=vdf))
library(nlme)
lme.to.sdf <- function(sfit) {
ts <- sfit$tTable[,4]
zs <- qt(sfit$tTable[,5], Inf, lower.tail=F)
ps <- sfits[[rname]]$tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(sfits[[rname]]$tTable),
tval=ts, pval=ps, zval=zs)
sdf[-1,] # remove intercept
}
# get the lme results
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
motion <- as.matrix(df.xmats[,5:10])
prots <- as.matrix(df.xmats[,11:13])
fit = lme(y ~ faces + quests + motion + prots,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
ri <- 5
motion <- as.matrix(df.xmats[,5:10])
prots <- lm(as.matrix(df.xmats[,12:13]) ~ df.xmats[,11])$residuals
fit1 = lme(y ~ faces + quests + motion + avg + prots,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit1 <- summary(fit1)
avg <- df.xmats[,11]
prots <- as.matrix(df.xmats[,12:13])
fit2 = lme(y ~ faces + quests + motion + avg + prots,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit2 <- summary(fit2)
sfit2
tmp <- anova(sfit1)
tmp$`p-value`
anova(sfit2)
anova(sfit3)
anova(fit1, fit2)
motion <- as.matrix(df.xmats[,5:10])
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
#prots <- as.matrix(df.xmats2[,12:13])
fit = lme(y ~ faces + quests + motion + avg + diff,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
# put them in table form
df.sfit <- ldply(rnames, function(rname) {
# use all the p-values for the fdr (soon)
tTable <- sfits[[rname]]$tTable
# subset for reporting
tTable <- tTable[-c(1,3,4:9),] # rm covariates
ts <- tTable[,4]
zs <- qt(tTable[,5], Inf, lower.tail=F)
ps <- tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(tTable),
tval=ts, pval=ps, zval=zs)
sdf
})
# add on the fdr values
library(fdrtool)
ret <- fdrtool(c(0.01/2,0.05/2,df.sfit$pval), statistic="pvalue", plot=F)
fdr.thr1 <- ret$qval[1]
fdr.thr2 <- ret$qval[2]
ret <- fdrtool(df.sfit$pval, statistic="pvalue", plot=F)
df.sfit$fdr.pval <- ret$qval
# get t-thr
dof <- sfits[[1]]$tTable[2,3]
fdr.tthr1 <- qt(fdr.thr1, dof, lower.tail=F)
fdr.tthr2 <- qt(fdr.thr2, dof, lower.tail=F)
head(df.sfit)
library(ggplot2)
library(ggthemr)
library(RColorBrewer)
ggthemr('pale', type='outer', text_size=14, layout='plain')
fdr.tthr1 <- 2.7
df.sfit2 <- subset(df.sfit, measure != "faces" & hemi == "rh")
ggplot(df.sfit2, aes(x=ord, y=abs(tval), group=measure)) +
geom_line(aes(color=measure)) +
scale_color_hue() +
geom_point(aes(fill=roi), color="black", size=3, shape = 21) +
scale_fill_brewer(palette = "Spectral") +
geom_hline(yintercept=fdr.tthr1, linetype='dotted') +
ylab("Absolute T-Value") +
scale_x_continuous(breaks=df.sfit2$ord, labels=df.sfit2$roi) +
scale_y_continuous(expand=c(0,0)) +
expand_limits(y=c(0,max(abs(df.sfit2$tval))*1.05)) +
theme(axis.line.y = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_blank(),
axis.line.x = element_line(color="black", size=0.5))
motion <- as.matrix(df.xmats3[,5:10])
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
fit = lme(y ~ faces + quests + motion + gender + avg + male,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats3),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
# put them in table form
df.sfit <- ldply(rnames, function(rname) {
# use all the p-values for the fdr (soon)
tTable <- sfits[[rname]]$tTable
# subset for reporting
tTable <- tTable[-c(1,3,4:9),] # rm covariates
ts <- tTable[,4]
zs <- qt(tTable[,5], Inf, lower.tail=F)
ps <- tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(tTable),
tval=ts, pval=ps, zval=zs)
sdf
})
df.sfit2 <- subset(df.sfit, measure %in% c("avg", "male", "gender") & hemi == "rh")
ggplot(df.sfit2, aes(x=ord, y=abs(tval), group=measure)) +
geom_line(aes(color=measure)) +
scale_color_hue() +
geom_point(aes(fill=roi), color="black", size=3, shape = 21) +
scale_fill_brewer(palette = "Spectral") +
geom_hline(yintercept=fdr.tthr1, linetype='dotted') +
ylab("Absolute T-Value") +
scale_x_continuous(breaks=df.sfit2$ord, labels=df.sfit2$roi) +
scale_y_continuous(expand=c(0,0)) +
expand_limits(y=c(0,max(abs(df.sfit2$tval))*1.05)) +
theme(axis.line.y = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_blank(),
axis.line.x = element_line(color="black", size=0.5))
# Random Prototypes --------------------------------------------------------
dmat <- Rfast::Dist(vfeats)
set.seed(42)
rand1 <- sample(vdf$ind[vdf$gender=="Male"], 1)
rand2 <- sample(vdf$ind[vdf$gender=="Female"], 1)
dist.to.rand1 <- dmat[rand1,-rand1]
dist.to.rand2 <- dmat[rand2,-rand2]
# Get
taskdir <- "/data1/famface01/analysis/task_activity"
regs <- cbind(avg=dist.to.avg, male=dist.to.male, female=dist.to.female, rand1=dist.to.rand1, rand2=dist.to.rand2)
df.xmats <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(regs, vdf, subj, timing, max(timing$run))
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
motion <- as.matrix(df.xmats[,5:10])
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
#prots <- as.matrix(df.xmats2[,12:13])
fit = lme(y ~ faces + quests + motion + avg + rand2,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
# put them in table form
df.sfit <- ldply(rnames, function(rname) {
# use all the p-values for the fdr (soon)
tTable <- sfits[[rname]]$tTable
# subset for reporting
tTable <- tTable[-c(1,3,4:9),] # rm covariates
ts <- tTable[,4]
zs <- qt(tTable[,5], Inf, lower.tail=F)
ps <- tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(tTable),
tval=ts, pval=ps, zval=zs)
sdf
})
ggthemr('pale', type='outer', text_size=14, layout='plain')
fdr.tthr1 <- 2.7
df.sfit2 <- subset(df.sfit, measure != "faces" & hemi == "rh")
ggplot(df.sfit2, aes(x=ord, y=abs(tval), group=measure)) +
geom_line(aes(color=measure)) +
scale_color_hue() +
geom_point(aes(fill=roi), color="black", size=3, shape = 21) +
scale_fill_brewer(palette = "Spectral") +
geom_hline(yintercept=fdr.tthr1, linetype='dotted') +
ylab("Absolute T-Value") +
scale_x_continuous(breaks=df.sfit2$ord, labels=df.sfit2$roi) +
scale_y_continuous(expand=c(0,0)) +
expand_limits(y=c(0,max(abs(df.sfit2$tval))*1.05)) +
theme(axis.line.y = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_blank(),
axis.line.x = element_line(color="black", size=0.5))
# What if we don't do a random subset but instead we take the k-means
library(ClusterR)
# this says 2 (hmm so just showing that the neural network clearly clusters into male/female)
opt = Optimal_Clusters_KMeans(vfeats, max_clusters=20, plot_clusters = T, verbose = F,
initializer = "optimal_init", criterion = 'distortion_fK')
opt = Optimal_Clusters_KMeans(vfeats, max_clusters=20, plot_clusters = T, verbose = F,
initializer = "kmeans++", criterion = 'distortion_fK')
opt = Optimal_Clusters_KMeans(vfeats, max_clusters=20, plot_clusters = T, verbose = F,
initializer = "optimal_init", criterion = 'silhouette')
opt = Optimal_Clusters_KMeans(vfeats, max_clusters=20, plot_clusters = T, verbose = F,
initializer = "kmeans++", criterion = 'silhouette')
opt = Optimal_Clusters_KMeans(vfeats, max_clusters=20, plot_clusters = T, verbose = F,
initializer = "optimal_init", criterion = 'variance_explained')
opt = Optimal_Clusters_KMeans(vfeats, max_clusters=30, plot_clusters = T, verbose = F,
initializer = "optimal_init", criterion = 'Adjusted_Rsquared')
#criterion = "variance_explained", initializer = "optimal_init"
#
#?Optimal_Clusters_KMeans
# Nearest Neighbor --------------------------------------------------------
dmat <- Rfast::Dist(vfeats)
k <- 30
gender.knn.probs <- t(sapply(1:nrow(dmat), function(i) {
ninds <- order(dmat[i,])[-i][1:k]
prop.table(table(vdf$gender[ninds]))
}))
gender.knn.dists <- sapply(1:nrow(dmat), function(i) {
ninds <- order(dmat[i,])[-i][1:k]
tab <- table(vdf$gender[ninds])
sel <- names(which.max(tab))
ds <- Rfast::Dist(rbind(vfeats[i,], vfeats[ninds,][vdf$gender[ninds]==sel,]))[1,-1]
mean(ds)
})
cor(cbind(gender.knn.probs, gender.knn.dists, dist.to.avg, dist.to.male, dist.to.female))
# Get
taskdir <- "/data1/famface01/analysis/task_activity"
ps <- apply(gender.knn.probs, 1, min)
regs <- cbind(avg=dist.to.avg, avg.male=dist.to.male, avg.female=dist.to.female,
knn.prob=ps, knn.dist=gender.knn.dists)
df.xmats <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(regs, vdf, subj, timing, max(timing$run),
ignore.stdout=T, ignore.stderr=T)
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
motion <- as.matrix(df.xmats[,5:10])
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
fit = lme(y ~ faces + quests + motion + avg + avg.male + knn.prob + knn.dist,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
# put them in table form
df.sfit <- ldply(rnames, function(rname) {
# use all the p-values for the fdr (soon)
tTable <- sfits[[rname]]$tTable
# subset for reporting
tTable <- tTable[-c(1,3,4:9),] # rm covariates
ts <- tTable[,4]
zs <- qt(tTable[,5], Inf, lower.tail=F)
ps <- tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(tTable),
tval=ts, pval=ps, zval=zs)
sdf
})
df.sfit2 <- subset(df.sfit, measure != "faces" & hemi == "rh")
ggplot(df.sfit2, aes(x=ord, y=abs(tval), group=measure)) +
geom_line(aes(color=measure)) +
scale_color_hue() +
geom_point(aes(fill=roi), color="black", size=3, shape = 21) +
scale_fill_brewer(palette = "Spectral") +
geom_hline(yintercept=fdr.tthr1, linetype='dotted') +
ylab("Absolute T-Value") +
scale_x_continuous(breaks=df.sfit2$ord, labels=df.sfit2$roi) +
scale_y_continuous(expand=c(0,0)) +
expand_limits(y=c(0,max(abs(df.sfit2$tval))*1.05)) +
theme(axis.line.y = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_blank(),
axis.line.x = element_line(color="black", size=0.5))
## manual knn
library(caret)
library(glmnet)
registerDoMC(24)
run_caret <- function(X, y, mthd, tlen=20) {
colnames(X) <- sprintf("feat%02i", 1:ncol(X))
nrepeats <- 10
nfolds <- 8
fitControl <- trainControl(
method = "repeatedcv",
number = nfolds,
repeats = nrepeats,
returnResamp = "final",
savePredictions = "final",
classProbs = T,
allowParallel = T
)
fit <- train(X, y, method = mthd,
trControl = fitControl,
preProcess = c("center","scale"),
tuneLength = tlen)
ri <- as.numeric(rownames(fit$bestTune))
print(fit$results[ri,])
return(fit)
}
extract_probs <- function(fit) {
preds <- fit$pred
preds$Rep <- sub("Fold[0-9]+[.]", "", preds$Resample)
preds <- preds[order(preds$Rep, preds$rowIndex),]
all.equal(preds$obs[preds$Rep=="Rep01"], vdf$gender)
mean.preds <- ddply(preds, .(rowIndex), colwise(mean, .(Female,Male)))
return(mean.preds)
}
X <- vfeats; y <- vdf$gender
library(kernlab)
fit <- run_caret(X, y, "svmLinear2", tlen=10)
fit2 <- run_caret(X, y, "svmLinear3", tlen=10)
mean.preds <- extract_probs(fit)
fit$finalModel$alpha
knnFit <- run_caret(X, y, "knn")
mean.preds <- extract_probs(knnFit)
# Get
taskdir <- "/data1/famface01/analysis/task_activity"
ps <- apply(gender.knn.probs, 1, min)
ps2 <- apply(mean.preds[,-1], 1, min)
regs <- cbind(avg=dist.to.avg, avg.male=dist.to.male, avg.female=dist.to.female,
knn.prob=ps, knn.dist=gender.knn.dists, knn.pred=ps2)
round(cor(regs), 3)
df.xmats <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(regs, vdf, subj, timing, max(timing$run),
ignore.stdout=T, ignore.stderr=T)
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
motion <- as.matrix(df.xmats[,5:10])
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
fit = lme(y ~ faces + quests + motion + avg + avg.male + knn.prob + knn.dist,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
# put them in table form
df.sfit <- ldply(rnames, function(rname) {
# use all the p-values for the fdr (soon)
tTable <- sfits[[rname]]$tTable
# subset for reporting
tTable <- tTable[-c(1,3,4:9),] # rm covariates
ts <- tTable[,4]
zs <- qt(tTable[,5], Inf, lower.tail=F)
ps <- tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(tTable),
tval=ts, pval=ps, zval=zs)
sdf
})
fdr.tthr1 <- 2.7
df.sfit2 <- subset(df.sfit, measure != "faces" & hemi == "rh")
ggplot(df.sfit2, aes(x=ord, y=abs(tval), group=measure)) +
geom_line(aes(color=measure)) +
scale_color_hue() +
geom_point(aes(fill=roi), color="black", size=3, shape = 21) +
scale_fill_brewer(palette = "Spectral") +
geom_hline(yintercept=fdr.tthr1, linetype='dotted') +
ylab("Absolute T-Value") +
scale_x_continuous(breaks=df.sfit2$ord, labels=df.sfit2$roi) +
scale_y_continuous(expand=c(0,0)) +
expand_limits(y=c(0,max(abs(df.sfit2$tval))*1.05)) +
theme(axis.line.y = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_blank(),
axis.line.x = element_line(color="black", size=0.5))
# note: with higher k the class prob is higher!
# todo: maybe do some comparison here. a higher k, might have to do more with some category?
# Category ----------------------------------------------------------------
library(e1071)
# Get distance from hyperplane as measure of category
## from: http://mvpa.blogspot.com/2014/02/code-snippet-extracting-weights-from.html
X <- vfeats; y <- vdf$gender
fit <- svm(X, y, type="C-classification", kernel="sigmoid", cost=1, scale=FALSE, probability=T)
ret <- predict(fit, test.data, decision.values=T, probability = T)
head(attr(ret, "decision.values")) # same as manual calc below
#tmp1 <- attr(ret, "decision.values")
#tmp2 <- attr(ret, "decision.values")
w <- t(fit$coefs) %*% fit$SV;
b <- -1 * fit$rho # (sometimes called w0)
test.data <- X
dist.hyp <- as.numeric((w %*% t(test.data)) + b) / sqrt(w %*% t(w))
labs <- sign((w %*% t(test.data)) + b)
table(vdf$gender, labs) # mostly gets it!
dist.hyp <- attr(ret, "decision.values")
cor(cbind(dist.hyp, gender.knn.dists, dist.to.male)) # ok related
regs <- cbind(avg=dist.to.avg, avg.male=dist.to.male,
dist.hyp=dist.hyp, dist.hyp2=dist.hyp^2, dist.inv.hyp=1/dist.hyp)
round(cor(regs), 3)
df.xmats <- ldply(subjects, function(subj) {
cat(subj, "\n")
## get runs/faces/quests/motion regressors
xfile <- sprintf("%s/%s/face_deviations_unfam/nnet2_only_avgdist2.reml/xmat.1D", taskdir, subj)
xmat <- read.xmat(xfile, rm.nums=T)
xmat <- xmat[,colnames(xmat)!="avg_dist"]
# change the runs to one column indicating the run
rinds <- grep("^Run.*Pol", colnames(xmat))
rnums <- rowSums(sweep(xmat[,rinds], 2, 1:length(rinds), FUN="*"))
runs <- sprintf("run%02i", rnums)
# add the subject and runs
xmat1 <- cbind(subject=subj, run=runs, xmat[,-rinds])
## get regressors of interest
timing <- dat.vols[[subj]]$basics$timing
xmat2 <- afni.convolve.regressors(regs, vdf, subj, timing, max(timing$run),
ignore.stdout=T, ignore.stderr=T)
xmat <- cbind(xmat1, xmat2)
cat("\n")
return(xmat)
})
motion <- as.matrix(df.xmats[,5:10])
sfits <- llply(1:ncol(ts.mats), function(ri) {
cat(ri, "\n")
fit = lme(y ~ faces + quests + motion + avg + avg.male + dist.hyp,
random = ~ 1|subject/run,
data=cbind(y=ts.mats[,ri], df.xmats),
control = lmeControl(opt = "optim"))
sfit <- summary(fit)
sfit
}, .parallel=T)
names(sfits) <- rnames
# put them in table form
df.sfit <- ldply(rnames, function(rname) {
# use all the p-values for the fdr (soon)
tTable <- sfits[[rname]]$tTable
# subset for reporting
tTable <- tTable[-c(1,3,4:9),] # rm covariates
ts <- tTable[,4]
zs <- qt(tTable[,5], Inf, lower.tail=F)
ps <- tTable[,5]
hemi <- sub("[.].*", "", rname)
name <- sub("[lr]h.", "", rname)
ord <- rords[rnames==rname]
sdf <- data.frame(hemi=hemi, roi=name, ord=ord,
measure=rownames(tTable),
tval=ts, pval=ps, zval=zs)
sdf
})
fdr.tthr1 <- 2.7
df.sfit2 <- subset(df.sfit, measure != "faces" & hemi == "rh")
ggplot(df.sfit2, aes(x=ord, y=abs(tval), group=measure)) +
geom_line(aes(color=measure)) +
scale_color_hue() +
geom_point(aes(fill=roi), color="black", size=3, shape = 21) +
scale_fill_brewer(palette = "Spectral") +
geom_hline(yintercept=fdr.tthr1, linetype='dotted') +
ylab("Absolute T-Value") +
scale_x_continuous(breaks=df.sfit2$ord, labels=df.sfit2$roi) +
scale_y_continuous(expand=c(0,0)) +
expand_limits(y=c(0,max(abs(df.sfit2$tval))*1.05)) +
theme(axis.line.y = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.x = element_blank(),
axis.line.x = element_line(color="black", size=0.5))
# Viz ---------------------------------------------------------------------
library(MASS)
# MDS
dmat <- Rfast::Dist(vfeats)
mds <- isoMDS(dmat, k=2)
plot(mds$points, col=as.numeric(vdf$gender)+1)
d <- as.dist(dmat)
tsne1 <- tsne::tsne(d)
tsne2 <- Rtsne::Rtsne(vfeats)
plot(tsne1, col=as.numeric(vdf$gender)+1)
plot(tsne2$Y, col=as.numeric(vdf$gender)+1)
pca <- prcomp(vfeats, retx=T)
plot(pca$x[,1], pca$x[,2], col=as.numeric(vdf$gender)+1)
library(e1071)
gender <- vdf$gender
dat <- tsne1; colnames(dat) <- c("x", "y"); dat <- as.data.frame(dat)
fit = svm(gender~., data=dat, cost=0.25, type='C-classification', kernel='linear')
fit
plot(fit, dat, x~y)
?plot.svm
|
d12665d2256cdb418e06e83a7d9da81d1c4a3bb9
|
1353454ecb0f64f40bf02aaccb16568e2461d698
|
/R/atri_study_data.R
|
11beb1caaba12f075ca203e3b94567df9162fd97
|
[] |
no_license
|
rg08705/test
|
07386dec7a66b5a5f6325e8a206048482d8180d2
|
32130c005c345f3baf776fdd852c4dae120a4c27
|
refs/heads/master
| 2022-02-22T05:20:56.577707
| 2022-02-09T23:27:09
| 2022-02-09T23:27:09
| 191,802,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,889
|
r
|
atri_study_data.R
|
#' Read ATRI study data
#'
#' @param protocol The protocol name (e.g. 'a345-test-1').
#' @param data The study data source name. See below for examples for each data source. Additionally, allowable text input can be the data export label (e.g.'Participant list'), the relative public api pathway (e.g. '/subjects'), or the data lake filename (e.g. 'participant_list.csv' or 'participant_list').
#' @param datalake default is TRUE, which returns data from the latest data extract from the data lake s3 archive topic.
#' With datalake=FALSE, returns the current data using the public API (/public/api/v1/<relative API path>).
#' @return A data.frame of the study data.
#'
#' @seealso \code{\link{atri_api}}
#' @examples
#' \donttest{
#' #--Participant data--#
#' #Participant list
#' subjects<-atri_study_data(protocol="a345-test-1",data="subjects")
#' #Participant Attribute Value list
#' subject_attr<-atri_study_data(protocol="a345-test-1",data="subject_attr")
#' #Participant Attribute Datadic list
#' subject_attr_dd<-atri_study_data(protocol="a345-test-1",data="subject_attr_dd")
#'
#' #--Site data--#
#' #Site list
#' sites<-atri_study_data("a345-test-1","sites")
#' #Site Attribute Value list
#' site_attr<-atri_study_data("a345-test-1","site_attr")
#' #Site Attribute Datadic list
#' site_attr_dd<-atri_study_data("a345-test-1","site_attr_dd")
#'
#' #--Study attribute data--#
#' #Study Attribute Datadic list
#' study_attr_dd<-atri_study_data("a345-test-1","study_attr_dd")
#'
#' #--CRF metadata--#
#' #eCRF list
#' crfs<-atri_study_data("a345-test-1","crfs")
#' #Monitor Review list
#' monitor_review<-atri_study_data("a345-test-1","monitor_review")
#' #Form Lock list
#' form_lock<-atri_study_data("a345-test-1","form_lock")
#'
#' #--Event/Schedule of events data--#
#' #Event list
#' events<-atri_study_data("a345-test-1","events")
#' #Event Attribute Datadic list
#' event_attr_dd<-atri_study_data("a345-test-1","event_attr_dd")
#' #Schedule of Events
#' tracks<-atri_study_data("a345-test-1","tracks")
#' #Subject Track list
#' subject_track<-atri_study_data("a345-test-1","subject_track")
#'
#' #--Schedule of events by track code--#
#' #If datalake=TRUE, must use datalake filelabel or filename
#' soe_a3_default<-atri_study_data("a345-test-1","schedule_of_events_a3_default")
#' soe_a4_default<-atri_study_data("a345-test-1","schedule_of_events_a4_default.csv")
#' #If datalake=FALSE, must use API relative path
#' soe_a3_default<-atri_study_data("a345-test-1","/tracks/a3_default")
#'
#' #--Alternate examples for 'data' parameter input--#
#' #Data export label
#' subject_track.1<-atri_study_data("a345-test-1",data="Subject Track list")
#' #Relative API path
#' subject_track.2<-atri_study_data("a345-test-1",data="/subject/track/list")
#' #Data lake filename
#' subject_track.3<-atri_study_data("a345-test-1",data="subject_track_list.csv")
#' subject_track.4<-atri_study_data("a345-test-1",data="subject_track_list")
#'
#' #--Live data pull--#
#' subject_track_live<-atri_study_data("a345-test-1","subject_track", datalake=F)
#' }
#' @export
atri_study_data <- function(protocol, data, datalake=TRUE) {
d<-tolower(data)
d<-ifelse(substr(d,1,1)=='/', sub("^.","",d),d)
#Subject APIs
if (d %in% c("subjects","participant_list.csv","participant_list","participant list")){
dl_api<-"edc/study_data/latest/participant_list.csv"
pub_api<-"/subjects?output_format=csv"
} else if (d %in% c("subjects/attribute_values","subject_attribute_value_list.csv",
"subject_attribute_value_list","subject_attr","participant attribute value list")) {
dl_api<-"edc/study_data/latest/subject_attribute_value_list.csv"
pub_api<-"/subjects/attribute_values?output_format=csv"
} else if (d %in% c("/attribute_mng/subject/list","subject_attribute_datadic.csv",
"subject_attribute_datadic.csv","subject_attr_dd","participant attribute datadic list")) {
dl_api<-"edc/study_data/latest/subject_attribute_datadic.csv"
pub_api<-"/attribute_mng/subject/list?output_format=csv"
#Site APIs
} else if (d %in% c("sites","site_list.csv","site_list","site list")){
dl_api<-"edc/study_data/latest/site_list.csv"
pub_api<-"/sites?output_format=csv"
} else if (d %in% c("site/attribute_values","site_attribute_value_list.csv",
"site_attribute_value_list","site_attr","site attribute value list")) {
dl_api<-"edc/study_data/latest/site_attribute_value_list.csv"
pub_api<-"/site/attribute_values?output_format=csv"
} else if (d %in% c("/attribute_mng/site/list","site_attribute_datadic.csv",
"site_attribute_datadic","site_attr_dd","site atrribute datadic list")) {
dl_api<-"edc/study_data/latest/site_attribute_datadic.csv"
pub_api<-"/attribute_mng/site/list?output_format=csv"
#Study APIs
} else if (d %in% c("/attribute_mng/study/list","study_attribute_datadic.csv",
"study_attribute_datadic","study_attr_dd","study atrribute datadic list")) {
dl_api<-"edc/study_data/latest/study_attribute_datadic.csv"
pub_api<-"/attribute_mng/study/list?output_format=csv"
#CRF APIs
} else if (d %in% c("crfs","ecrf_list.csv","ecrf_list","ecrf list")){
dl_api<-"edc/study_data/latest/ecrf_list.csv"
pub_api<-"/crfs?output_format=csv"
} else if (d %in% c("monitor","monitor_review_list.csv",
"monitor_review_list","monitor_review","monitor review list")) {
dl_api<-"edc/crf_metadata/latest/monitor_review_list.csv"
pub_api<-"/monitor?output_format=csv"
} else if (d %in% c("lock","form_lock_list.csv",
"form_lock_list","form_lock","form lock list")) {
dl_api<-"edc/crf_metadata/latest/form_lock_list.csv"
pub_api<-"/lock?output_format=csv"
#Event APIs
} else if (d %in% c("events","event_list.csv","event_list","event list")){
dl_api<-"edc/study_data/latest/event_list.csv"
pub_api<-"/events?output_format=csv"
} else if (d %in% c("attribute_mng/event/list","event_attribute_datadic.csv",
"event_attribute_datadic","event_attr_dd","event atrribute datadic list")) {
dl_api<-"edc/study_data/latest/event_attribute_datadic.csv"
pub_api<-"/attribute_mng/event/list?output_format=csv"
} else if (d %in% c("tracks","schedule_of_events_list.csv",
"schedule_of_events_list","soes","schedule of events")){
dl_api<-"edc/study_data/latest/schedule_of_events_list.csv"
pub_api<-"/tracks?output_format=csv"
} else if (d %in% c("subject/track/list","subject_track_list.csv",
"subject_track_list","subject_track","subject track list")){
dl_api<-"edc/study_data/latest/subject_track_list.csv"
pub_api<-"/subject/track/list?output_format=csv"
} else {
d<-gsub('.csv','',d,fixed=T)
files.list<-atri_api(protocol,path=paste0('/docs/s3_archive/data_lake/items/edc/study_data/latest/?format=json&q=',utils::URLencode(d)))
files.list<-files.list[grepl(paste0(d,".csv"),files.list$label,fixed=T),]
if (nrow(files.list)==1 & datalake==T){#if datalake=T and pathway not hardcoded above, check if data parameter matches a datalake file
dl_api<-files.list$label
} else if (substr(data,1,1)=='/'&datalake==F){#if datalake=F and data starts with '/' then attempt to use the pathway
pub_api<-paste0(data,"?output_format=csv")
} else {
stop(paste0(data," pathway not set up. Check documentation or use atri_api function to obtain data source."))}
}
if (datalake) {
temp<-tempfile()
resp<-atri_api(protocol, path=paste0("/docs/s3_archive/data_lake/download/",
dl_api), direct=FALSE, file = paste0(temp,".csv"))
output<-read.csv(paste0(temp,".csv"))
} else {
output<-atri_api(protocol,path=pub_api,direct=FALSE)
}
return(output)
}
|
34ca7dee075187d7ac5a633813575af228fc2177
|
340f0cdacd7bd1994627cb34203915bd17d56186
|
/man/amptest-class.Rd
|
8fd578d3777cc45ab181ac3cce9380719bfa4760
|
[] |
no_license
|
PCRuniversum/chipPCR
|
af593b813f8c59d5027d8a118955666f0fff283e
|
b7c751a8716c814c63825d50007699dbfb7a22f4
|
refs/heads/master
| 2023-03-11T02:18:02.994570
| 2021-02-27T20:04:47
| 2021-02-27T20:04:47
| 19,281,268
| 2
| 3
| null | 2020-07-27T13:48:14
| 2014-04-29T15:22:30
|
R
|
UTF-8
|
R
| false
| false
| 3,654
|
rd
|
amptest-class.Rd
|
\name{amptest}
\docType{class}
\alias{amptest}
\alias{amptest-class}
\alias{summary.amptest}
\alias{summary,amptest-method}
\alias{show.amptest}
\alias{show,amptest-method}
\alias{plot.amptest}
\alias{plot,amptest-method}
\alias{plot,amptest,ANY-method}
\title{Class \code{"amptest"}}
\description{
An S4 class containing the output \code{\link[chipPCR]{amptester}}
function.
}
\section{Slots}{
\describe{
\item{\code{.Data}:}{\code{"numeric"} is a vector containing the
fluorescence values.}
\item{\code{decisions}:}{\code{"logical"} contains outcomes of various tests.
\code{shap.noisy} is presence of noise, \code{lrt.test} states if data are
likely from a amplification curve and both \code{tht.dec} and \code{tht.dec}
defines if the amplification is "positive" or "negative".}
\item{\code{noiselevel}:}{ \code{"numeric"} user-defined threshold for a
significant amplification signal.}
\item{\code{background}:}{range of the background signal in the
amplification curve.}
\item{\code{polygon}:}{The pco test determines if the points in an amplification curve
(like a polygon) are in a "clockwise" order. The sum over the edges
result in a positive value if the amplification curve is "clockwise"
and is negative if the curve is counter-clockwise.}
\item{\code{slope.ratio}:}{ratio of the slopes at the start and the end of
exponential phase..}
}
}
\section{Methods}{
\describe{
\item{summary}{\code{signature(object = "amptest")}: prints summary
of the object. Silently returns \code{vector} of all calculated parameters.}
\item{show}{\code{signature(object = "amptest")}: prints only
\code{.Data} slot of the object.}
\item{plot}{\code{signature(object = "amptest")}: plots input data and graphical
interpretation of \code{link{amptester}} tests' results.}
}
}
\author{
Stefan Roediger, Michal Burdukiewicz
}
\seealso{
\code{\link[chipPCR]{amptester}}
}
\examples{
# Compare a positive and a negative amplification reaction.
# First simulate positive reaction (fluo.pos) and than the
# negative reaction (fluo.neg).
# Simulation of an amplifiaction curve with some noise and a high signal.
fluo.pos <- AmpSim(cyc = 1:40, noise = TRUE)[, 2]
ampt.pos <- amptester(fluo.pos, manual = TRUE, background = c(0, 15),
noiselevel = 0.15)
# Plot amplification curve and result of amptester
plot(fluo.pos, xlab = "Cycles", ylab = "RFU", pch = 19, ylim = c(0, 1))
lines(ampt.pos, col = 2, pch = 19, type = "b")
legend(5, 1, c("Raw data", "amptester output"),
col = c(1,2,3), bty = "n", pch = c(19, 19))
# Borders for background calculation
abline(v = c(0,15), col = 2)
# Level for background threshold
abline(h = 0.15, col = 3, lty = 2)
text(5, 0.18, "Noise threshold")
# Summary of amptester results
summary(ampt.pos)
# Simulation of an amplifiaction curve with high noise and a low signal.
fluo.neg <- AmpSim(cyc = 1:40, noise = TRUE, ampl = 0.13, nnl = 0.4)[, 2]
ampt.neg <- amptester(fluo.neg, manual = TRUE, background = c(0, 15),
noiselevel = 0.15)
# Plot amplification curve and result of amptester
plot(fluo.neg, xlab = "Cycles", ylab = "RFU", pch = 19, ylim = c(0, 1))
lines(ampt.neg, col = 2, pch = 19, type = "b")
legend(5, 1, c("Raw data", "amptester output"),
col = c(1,2,3), bty = "n", pch = c(19, 19))
# Borders for background calculation
abline(v = c(0,15), col = 2)
# Level for background threshold
abline(h = 0.15, col = 3, lty = 2)
text(5, 0.18, "Noise threshold")
# Summary of amptester results
summary(ampt.neg)
#plot amptester results
plot(ampt.neg)
}
\keyword{classes}
|
3f821720c0c8c24343ac577f45d384db963da86e
|
e5e7b95e19071b07bd492dffe0386908e9ec41bb
|
/99-ReferenceApps/shiny_apps/pitchRx2/02_create_pa.R
|
12a3d743b6aacaef464816e450c2b2f6e4bad453
|
[
"MIT"
] |
permissive
|
nwstephens/shiny-day-2016
|
f92c711b15adc80ebe302199a6762a2c958db041
|
dc365e0cab07d1252f1a4085d3d12ca28ed852e5
|
refs/heads/master
| 2021-01-09T20:29:05.486036
| 2016-06-29T19:51:15
| 2016-06-29T19:51:15
| 62,252,998
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,175
|
r
|
02_create_pa.R
|
library("dplyr")
library("DBI")
library("magrittr")
# connect to remote database
source("01_db_connect.R")
# From the default tables, create a table with
# relevant attributes on both atbat/pitch level.
# This saves us the repeating the expensive operation of joining tables
if (!dbExistsTable(db$con, "pa_full")) {
pa_full <- tbl(db, "pitch") %>%
left_join(tbl(db, "atbat"), by = c("num", "gameday_link"))
compute(pa_full, name = "pa_full", temporary = FALSE)
}
if (!dbExistsTable(db$con, "pa")) {
pa <- tbl(db, "pa_full") %>%
select(pitcher_name, batter_name, gameday_link, date, pitch_type, stand, x0:az) %>%
na.omit
compute(pa, name = "pa", temporary = FALSE)
}
# search parameters that need to be indexed
ind <- c("pitcher_name", "batter_name", "gameday_link", "date", "pitch_type", "stand")
create_index <- function(x) {
y <- paste(x, collapse = "_")
z <- paste(x, collapse = ", ")
dbSendQuery(db$con, paste0('CREATE INDEX pa_', y, ' ON pa(', z, ')'))
}
# create all single column indexes
lapply(ind, create_index)
# create all possible multi-column indexes
for (i in seq.int(2, length(ind))) {
apply(combn(ind, i), 2, create_index)
}
|
81c883009f1410d115cbed2314d21abaed192538
|
32a061c47b1b870514d344b27d95d3b030138353
|
/make_latlon_list_clim.R
|
85f3a36393b1340b1e9cf90326861db10d2742c2
|
[] |
no_license
|
jedman/meiyu-jet-data
|
c18be0cbaec26eb0a793febbc759dfaa562a54df
|
bc910d7300f57c9b27b9f693675a4c6d5df1067f
|
refs/heads/master
| 2021-01-10T21:17:58.336114
| 2015-03-16T17:01:42
| 2015-03-16T17:01:42
| 32,340,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,696
|
r
|
make_latlon_list_clim.R
|
### adds up jet counts from dates in a list and makes composites
#library(fields)
#loc is a list with x and y components defining interpolation grid
make_latlon_list_clim <- function(dates,loc){
require(fields)
require(KernSmooth)
latlon_ECbig <- list()
kest_anom <- mat.or.vec(length(loc$x), length(loc$y))
k_m <-list()
k_z <- list()
tmp <- mat.or.vec(length(loc$x), length(loc$y))
tmp2 <- mat.or.vec(length(loc$x), length(loc$y))
#compute mean month and day in the list
print(mean(as.Date(format(as.Date(dates),'%m-%d'),'%m-%d')))
#limit <- which(eof1_yearly >= lim)
#limit2 <-which(eof1_yearly <= -lim)
# loc$x <- seq(90, 130, by=0.5)
# loc$y <- seq(30,58, by=0.5)
for (i in seq(length(dates))){
k_m <-list()
k_c <- list()
y = format(as.Date(dates[i]),'%Y')
datayr <- paste('yr',y,sep='')
index <- which(as.Date(data_eastChbig[[datayr]]$time)==dates[i])
# could specify a range of days by adding an integer to the date
utmp <- data_eastChbig[[datayr]]$u[index]
utmp_min <- mean(utmp)+sd(utmp) #get u threshold to choose on
#utmp_min <- mean(utmp) #get u threshold to choose on
#utmp_min <- 0 # for consistency testing
#index <- which(as.Date(data_eastChbig[[datayr]]$time) == dates[i] & data_eastChbig[[datayr]]$u > utmp_min)
latlon_ECbig$lat <- append(latlon_ECbig$lat,data_eastChbig[[datayr]]$lat[index])
latlon_ECbig$lon <- append(latlon_ECbig$lon,data_eastChbig[[datayr]]$lon[index])
latlon_ECbig$u <- append(latlon_ECbig$u,data_eastChbig[[datayr]]$u[index])
kest_m <- est_kernel(latlon_ECbig)
clim <- make_latlon_clim(dates[i])
kest_c <- est_kernel(clim)
k_m$x <- kest_m$x1
k_m$y <- kest_m$x2
k_m$z <- kest_m$fhat
k_c$x <- kest_c$x1
k_c$y <- kest_c$x2
k_c$z <- kest_c$fhat
test_c <- interp.surface.grid(k_c ,loc)
test_m <- interp.surface.grid(k_m ,loc)
tmp2 <-test_c$z
tmp <- test_m$z
tmp2[is.na(tmp2)] <- 0
tmp[is.na(tmp)] <- 0
kest_anom <- kest_anom + (tmp - tmp2)
}
#utmp_min <- mean(latlon_ECbig$u) +sd(latlon_ECbig$u)
#index <- which(latlon_ECbig$u > utmp_min)
#latlon_ECbig$lat <- latlon_ECbig$lat[index]
#latlon_ECbig$lon <- latlon_ECbig$lon[index]
#latlon_ECbig$neg$lat= latlon_ECbig$neg$lat[index]
#latlon_ECbig$neg$lon= latlon_ECbig$neg$lon[index]
#latlon_ECbig$neg$u= latlon_ECbig$neg$u[index]
#latlon_ECbig$pos$lat= latlon_ECbig$pos$lat[index2]
#latlon_ECbig$pos$lon= latlon_ECbig$pos$lon[index2]
#latlon_ECbig$pos$u= latlon_ECbig$pos$u[index2]
return(kest_anom)
}
|
f3dd623e5c9646b9a5a915bf98e4ab175c1bdc4d
|
076862dd5f008d098230b61c1ed71fec629c69d0
|
/man/get_sv_threshold.Rd
|
39b26eb400c5840a678c8d503cd347c99cf8319f
|
[
"MIT"
] |
permissive
|
suziepalmer10/r_jive
|
c78ec634273092b6bb5f98d76cf2ff0e4c6cfaa0
|
efa364b8d78752e71b15629d8a659b6cf90cd340
|
refs/heads/master
| 2023-03-18T19:30:26.404721
| 2020-10-15T20:58:42
| 2020-10-15T20:58:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
get_sv_threshold.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ajive_decomposition.R
\name{get_sv_threshold}
\alias{get_sv_threshold}
\title{The singular value threshold.}
\usage{
get_sv_threshold(singular_values, rank)
}
\arguments{
\item{singular_values}{Numeric. The singular values.}
\item{rank}{Integer. The rank of the approximation.}
}
\description{
Computes the singluar value theshold for the data matrix (half way between the rank and rank + 1 singluar value).
}
|
74c8507ea55706a95032e6177832248ef16ad707
|
f7f1aee298f409a54b00b74ee16ebc6d361c1216
|
/tmp/snp.annot.R
|
a293c3f416f74ba63a8fca8447e51d532a88fcf0
|
[] |
no_license
|
cmcouto-silva/gdstat
|
00db8e492f5695052698b5b6a2bcdfb292033160
|
05be25edf9f57feb4311fd9086037ae91f0bf3eb
|
refs/heads/master
| 2021-06-29T16:29:14.601903
| 2020-10-14T23:31:43
| 2020-10-14T23:31:43
| 173,332,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,836
|
r
|
snp.annot.R
|
snp.annot <- function (snpIDs, batchsize = 250L, rm.loc = F) {
if (!is.numeric(length(snpIDs)) || length(snpIDs) == 0)
stop("Length of SNP vector must be equal or greater than 1.")
if (!all(grepl("^rs", snpIDs)))
stop("All SNPs must be codified as Reference SNP ID (starting with 'rs').")
snp_annot_function <- function(snp_ids) {
url <- paste0("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?",
"db=snp&id=", paste(snp_ids, collapse = ","), "&report=DocSet",
"&tool=LD&email=cmcouto.silva@gmail.com")
annot <- readLines(url)
indexes <- grep(pattern = "^rs", annot)
indexes <- append(x = indexes, values = length(annot) + 1L)
total <- length(indexes) - 1L
annot.list <- as.list(replicate(total, NULL))
for (i in 1L:total) annot.list[[i]] <- annot[indexes[i]:(indexes[i + 1L] - 1L)]
annot.list <- lapply(annot.list, function(DocSet) {
index <- grep("GENE=", DocSet, fixed = T)
gene <- unlist(strsplit(DocSet[index], "GENE=", fixed = T))
gene <- gene[gene != ""]
})
annot.list <- lapply(annot.list, function(x) {
if (is.null(x)) {
x <- ""
} else {
x
}
})
return(do.call(c, annot.list))
}
i <- 0L
annotation <- character()
while (i < length(snpIDs)) {
if ((i + batchsize) > length(snpIDs)) {
j <- i + abs(length(snpIDs) - i)
} else {
j <- i + batchsize
}
annotation <- append(annotation, snp_annot_function(snpIDs[(i + 1):j]))
i <- j
}
if(rm.loc) {
genes <- sapply(annotation, function(annot){
genes <- unlist(strsplit(annot, ','))
genes <- genes[!grepl("LOC", genes)]
paste(genes, collapse = ",")
})
annotation <- unname(genes)
return(annotation)
} else {
return(annotation)
}
}
|
04a256a7a07fa02ecf2059267cebfdb51657fce7
|
912ce1f7c8e66dd97a62e3a2e5698c4f7116c7d9
|
/Calculation_Stats100cell.R
|
d73fb2e5230f64969083ac05c97d1dcd1b859372
|
[] |
no_license
|
olympiahardy/masters_code
|
e1786821d498de02f9b5eaf4abc295359a0a0980
|
e7c80ae054bf0496336d8b0177dc16148c2bc1d8
|
refs/heads/main
| 2023-01-05T16:04:40.870125
| 2020-11-05T17:14:31
| 2020-11-05T17:14:31
| 310,346,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,300
|
r
|
Calculation_Stats100cell.R
|
# Calculating tp, tn. fp, fn
# MAST
ground_truth <- readRDS(file = "/datastore/2505621h/100cell_Analysis/ground_truth_list_FINAL")
mast_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_MAST_results")
names(mast_results)
DE <- ground_truth[[1]]
nonDE <- ground_truth[[2]]
mast_results <- tibble::rownames_to_column(mast_results, var = "gene")
sig_mast_results <- subset(mast_results, mast_results$FDR < 0.05)
non_sig_mast_results <- subset(mast_results, mast_results$FDR >= 0.05)
tp_mast <- sig_mast_results[sig_mast_results$gene %in% DE$`unlist(DE_genes)`,]
fp_mast <- sig_mast_results[sig_mast_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_mast <- non_sig_mast_results[non_sig_mast_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_mast <- non_sig_mast_results[non_sig_mast_results$gene %in% DE$`unlist(DE_genes)`,]
# SigEMD
emd_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_SigEMD_results")
emd_results <- tibble::rownames_to_column(emd_results, var = "gene")
sig_emd_results <- subset(emd_results, emd_results$adjpvalue < 0.05)
non_sig_emd_results <- subset(emd_results, emd_results$adjpvalue>= 0.05)
tp_emd <- sig_emd_results[sig_emd_results$gene %in% DE$`unlist(DE_genes)`,]
fp_emd <- sig_emd_results[sig_emd_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_emd <- non_sig_emd_results[non_sig_emd_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_emd <- non_sig_emd_results[non_sig_emd_results$gene %in% DE$`unlist(DE_genes)`,]
# DEsingle
deSingle_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/deSingle_results_drosophila_FINAL")
deSingle_results <- tibble::rownames_to_column(deSingle_results, var = "gene")
sig_deSingle_results <- subset(deSingle_results, deSingle_results$adjpvalue < 0.05)
non_sig_deSingle_results <- subset(deSingle_results, deSingle_results$adjpvalue>= 0.05)
tp_deSingle <- sig_deSingle_results[sig_deSingle_results$gene %in% DE$`unlist(DE_genes)`,]
fp_deSingle <- sig_deSingle_results[sig_deSingle_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_deSingle <- non_sig_deSingle_results[non_sig_deSingle_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_deSingle <- non_sig_deSingle_results[non_sig_deSingle_results$gene %in% DE$`unlist(DE_genes)`,]
# RankStat
# RankProducts
RP_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_result_RP_results")
RP_results <- tibble::rownames_to_column(RP_results, var = "gene")
sig_RP_results <- subset(RP_results, RP_results$FDR < 0.05)
non_sig_RP_results <- subset(RP_results, RP_results$FDR>= 0.05)
tp_RP <- sig_RP_results[sig_RP_results$gene %in% DE$`unlist(DE_genes)`,]
fp_RP <- sig_RP_results[sig_RP_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_RP <- non_sig_RP_results[non_sig_RP_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_RP <- non_sig_RP_results[non_sig_RP_results$gene %in% DE$`unlist(DE_genes)`,]
# RankSum
RS_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_result_RSum_results")
RS_results <- tibble::rownames_to_column(RS_results, var = "gene")
sig_RS_results <- subset(RS_results, RS_results$FDR < 0.05)
non_sig_RS_results <- subset(RS_results, RS_results$FDR>= 0.05)
tp_RS <- sig_RS_results[sig_RS_results$gene %in% DE$`unlist(DE_genes)`,]
fp_RS <- sig_RS_results[sig_RS_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_RS <- non_sig_RS_results[non_sig_RS_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_RS <- non_sig_RS_results[non_sig_RS_results$gene %in% DE$`unlist(DE_genes)`,]
# RankDistance
RD_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_result_RD_results")
RD_results <- tibble::rownames_to_column(RD_results, var = "gene")
sig_RD_results <- subset(RD_results, RD_results$FDR < 0.05)
non_sig_RD_results <- subset(RD_results, RD_results$FDR>= 0.05)
tp_RD <- sig_RD_results[sig_RD_results$gene %in% DE$`unlist(DE_genes)`,]
fp_RD <- sig_RD_results[sig_RD_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_RD <- non_sig_RD_results[non_sig_RD_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_RD <- non_sig_RD_results[non_sig_RD_results$gene %in% DE$`unlist(DE_genes)`,]
# ReverseRankDistance
R_RD_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_result_R_RD_results")
R_RD_results <- tibble::rownames_to_column(R_RD_results, var = "gene")
sig_R_RD_results <- subset(R_RD_results, R_RD_results$FDR < 0.05)
non_sig_R_RD_results <- subset(R_RD_results, R_RD_results$FDR>= 0.05)
tp_R_RD <- sig_R_RD_results[sig_R_RD_results$gene %in% DE$`unlist(DE_genes)`,]
fp_R_RD <- sig_R_RD_results[sig_R_RD_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_R_RD <- non_sig_R_RD_results[non_sig_R_RD_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_R_RD <- non_sig_R_RD_results[non_sig_R_RD_results$gene %in% DE$`unlist(DE_genes)`,]
# DifferentialRankProducts
DRP_results <- readRDS(file = "/datastore/2505621h/100cell_Analysis/drosophila_result_DRP_results")
DRP_results <- tibble::rownames_to_column(DRP_results, var = "gene")
sig_DRP_results <- subset(DRP_results, DRP_results$FDR < 0.05)
non_sig_DRP_results <- subset(DRP_results, DRP_results$FDR>= 0.05)
tp_DRP <- sig_DRP_results[sig_DRP_results$gene %in% DE$`unlist(DE_genes)`,]
fp_DRP <- sig_DRP_results[sig_DRP_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_DRP <- non_sig_DRP_results[non_sig_DRP_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_DRP <- non_sig_DRP_results[non_sig_DRP_results$gene %in% DE$`unlist(DE_genes)`,]
# DESeq2
comp_DEseq2 <- readRDS(file = "/datastore/2505621h/100cell_Analysis/comp_data_drosophila_pseudo_DESeq2.rds" )
deseq2_results <- comp_DEseq2@result.table
deseq2_results[is.na(deseq2_results)] <- 1
deseq2_results <- tibble::rownames_to_column(deseq2_results, var = "gene")
sig_deseq2_results <- subset(deseq2_results, deseq2_results$adjpvalue < 0.05)
non_sig_deseq2_results <- subset(DRP_results, deseq2_results$pvalue>= 0.05)
tp_deseq2 <- sig_deseq2_results[sig_deseq2_results$gene %in% DE$`unlist(DE_genes)`,]
fp_deseq2 <- sig_deseq2_results[sig_deseq2_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
tn_deseq2 <- non_sig_deseq2_results[non_sig_deseq2_results$gene %in% nonDE$`unlist(nonDE_genes)`,]
fn_deseq2 <- non_sig_deseq2_results[non_sig_deseq2_results$gene %in% DE$`unlist(DE_genes)`,]
|
59e85a49857793deba1ef1ba9c2d233eae183611
|
cf51091e6e697cb43796cb8e30bfd9ea8e5e95cf
|
/R/no_pooling.R
|
20290ed187775fec88c3a0269c5802b5f8028515
|
[] |
no_license
|
bachl/workshop_panel
|
bfe7e2403c57f5068b5fe69bcc9a99e7b278c0ca
|
8d47ced874e62cde9d569d012d1128468bffc56e
|
refs/heads/master
| 2022-10-13T09:51:40.945083
| 2020-06-14T19:56:46
| 2020-06-14T19:56:46
| 263,283,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 436
|
r
|
no_pooling.R
|
## ---- no-pooling
d %>%
group_by(IDsosci) %>%
nest() %>%
mutate(mdls = map(data, ~tidy(lm(verh1 ~ verhint1, data = .x)))) %>%
unnest(mdls) %>%
ungroup() %>%
select(-data) %>%
na.omit() %>%
filter(statistic != Inf) %>%
filter(term == "verhint1") %>%
mutate_if(is.numeric, round, 2) %>%
print %>%
summarise(estimate = mean(estimate),
std.error = sqrt(mean(std.error^2))) # simple approximation
|
d2e879bc09554e5049963ef84a6c70a260d5c8e0
|
00fecd0d9aabc870090f3b9c7d8a62e00071f782
|
/man/Database.cal.Rd
|
be2c7d82bd76ce9615ab74c07d508c0d2ffc760b
|
[
"MIT"
] |
permissive
|
liamlachs/SizeExtractR
|
f1595ccc38a3025271b310689833e1c945495179
|
ea890cfa43f66462059d0bb45b626021cd4e43ea
|
refs/heads/master
| 2023-04-09T22:13:16.006903
| 2022-02-07T11:25:45
| 2022-02-07T11:25:45
| 368,260,432
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
Database.cal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Database.cal.R
\docType{data}
\name{Database.cal}
\alias{Database.cal}
\title{Calibrated Dataframe}
\format{
An object of class dataframe
}
\usage{
data(Database.cal)
}
\description{
A dataframe of the example data provided with the package. Shown once Area Metrics are calibrated using SizeExtractR::Calibrate_Database().
}
\examples{
head(Database.cal)
}
\keyword{datasets}
|
8e2385690b55c8082d159bda0615e71c9553bf1d
|
0dea251958e6d1f1bfdaad8f94eef10d77bac3c4
|
/shinyapp/server.r
|
a92f386078f95c8433abdc6de7192ce3c20bee17
|
[] |
no_license
|
dbancea/CourseraDataScienceCapstone
|
c3416a5251a8963359f21e017d18fe82ee7008d5
|
77b0d3935ac9deaaa756e8aaa05c7c350e34bb56
|
refs/heads/master
| 2016-09-06T12:12:56.424384
| 2015-08-23T15:22:36
| 2015-08-23T15:22:36
| 41,254,410
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
server.r
|
library(shiny);
library(data.table);
source("predict_word.R");
shinyServer(
function(input, output) {
output$word1 <- renderDataTable(
data.table("Predicted word" = word1(tolower(as.character(input$text)), input$predictions)),
options = list(searching = FALSE, paging=FALSE, info=FALSE)
)
output$word2 <- renderDataTable(
data.table("Predicted words" = word2(tolower(as.character(input$text)), input$predictions)),
options = list(searching = FALSE, paging = FALSE, info=FALSE)
)
}
)
|
5b8613a66b3eace25b35a407bc174c9192e678f9
|
e8ef4a64de01db62556e291294db23b3072f7739
|
/man/normalizeWithMA.Rd
|
37f61aa7746086d6a5d7ce0db8f84ed63a02f1d5
|
[] |
no_license
|
cran/MDimNormn
|
4750fc698a74721e66bfae5abbd22ac6a4488481
|
af6a53d59af83ec424abbc86774dda8cca9fe8d9
|
refs/heads/master
| 2021-01-02T08:57:53.924892
| 2015-08-12T00:00:00
| 2015-08-12T00:00:00
| 40,618,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,837
|
rd
|
normalizeWithMA.Rd
|
\name{normn MA}
\alias{normn_MA}
\title{Multi-dimensional MA normalization for plate effect}
\description{
Normalize data to minimize the difference among the subgroups of the samples generated by experimental factor such as multiple plates (batch effects)\cr
- the primary method is Multi-MA, but other fitting function, \emph{f} in manuscript (e.g. loess) is available, too.\cr
This method is based on the assumptions stated below\cr
\enumerate{
\item The geometric mean value of the samples in each subgroup (or plate) for a single target is ideally same as those from the other subgroups.
\item The subgroup (or plate) effects that influence those mean values for multiple observed targets are dependent on the values themselves. (intensity dependent effects)
}
}
\usage{
normn_MA(mD, expGroup, represent_FUN= function(x) mean(x, na.rm= T),
fitting_FUN= NULL, isLog= TRUE)
}
\arguments{
\item{mD}{ a \code{matrix} of measured values in which columns are the measured molecules and rows are samples }
\item{expGroup}{ a \code{vector} of experimental grouping variable such as plate. The length of code{expGroup} must be same as the number of rows of \code{mD}.}
\item{represent_FUN}{ a \code{function} that computes representative values for each experimental group (e.g. plate). The default is mean ignoring any NA }
\item{fitting_FUN}{ \code{NULL} or a \code{function} that fits to data in MA-coordinates.
If it is \code{NULL} as the default, 'Multi-MA' method is employed.
If a \code{function} is used, two arguments of \code{m_j} and \code{A} are required, which are \eqn{\mathbf{m}_j} coordinate in \eqn{M_d} and \eqn{A} coordinate, respectively. }
\item{isLog}{ TRUE or FALSE, if the normalization should be conducted after log-transformation. The affinity proteomics data from suspension bead arrays is recommended to be normalized using the default, \code{isLog = TRUE}.}
}
\value{
The data after normalization in a \code{matrix}
}
\references{
Hong M-G, Lee W, Pawitan Y, Schwenk JM (201?)
Multi-dimensional normalization of plate effects for multiplexed applications
\emph{unpublished}
}
\author{Mun-Gwan Hong <\email{mun-gwan.hong@scilifelab.se}>}
\examples{
data(sba)
B <- normn_MA(sba$X, sba$plate) # Multi-MA normalization
# MA-loess normalization
B <- normn_MA(sba$X, sba$plate, fitting_FUN= function(m_j, A) loess(m_j ~ A)$fitted)
# weighted linear regression normalization
B <- normn_MA(sba$X, sba$plate, fitting_FUN= function(m_j, A) {
beta <- lm(m_j ~ A, weights= 1/A)$coefficients
beta[1] + beta[2] * A
})
# robust linear regression normalization
if(any(search() == "package:MASS")) { # excutable only when MASS package was loaded.
B <- normn_MA(sba$X, sba$plate, fitting_FUN= function(m_j, A) {
beta <- rlm(m_j ~ A, maxit= 100)$coefficients
beta[1] + beta[2] * A
})
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.