blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d7e181dcd4b576c809ea686bfaa2c9e1b5626a5
|
4a3b76a7cc3bf280ba3399acefba1f72e33bbfd9
|
/R/report.R
|
0182e09bcc2f1a414b7c15430fd97be7f4c9e1ef
|
[
"MIT"
] |
permissive
|
tsamsonov/grwat
|
5bd992b774f4644c52800352ba3b6bdd7601a063
|
f5bb3d71be34639d58b2812936f1d04b835f6f40
|
refs/heads/master
| 2023-07-19T01:33:26.609712
| 2023-07-11T17:36:01
| 2023-07-11T17:36:01
| 137,765,830
| 5
| 5
| null | 2022-04-19T19:30:20
| 2018-06-18T14:53:44
|
R
|
UTF-8
|
R
| false
| false
| 6,074
|
r
|
report.R
|
#' Report hydrograph separation and variables
#'
#' This function generates a graphical HTML report that summarizes separation of hydrograph, its variables and their statistical properties. See example [report](https://www.dropbox.com/s/747xyqp65ipriy5/Spas-Zagorye.html) generated by this command for `spas` dataset included in grwat package.
#'
#' @param sep `data.frame` of hydrograph separation as returned by [grwat::gr_separate()] function.
#' @param vars `data.frame` of hydrograph variables as returned by [grwat::gr_summarize()] function.
#' @param output Character string path to the output file. Must have `.html` extension.
#' @param year Integer value of year used to divide series in two samples compared by Student and Fisher tests. Defaults to `NULL` which means that the year is calculated automatically by Pettitt test. Defaults to `NULL`.
#' @param exclude Integer vector of years to be excluded from reporting. Defaults to `NULL`.
#' @param locale Character string locale. Currently only English (`'EN'`) and Russian (`'RU'`) locales are supported. Defaults to `'EN'`.
#' @param temp Boolean. Plot temperature on the top of hydrograph? Defaults to `FALSE`. If both `temp = TRUE` and `prec = TRUE`, then the axis is drawn for precipitation.
#' @param prec Boolean. Plot precipitation on the top of hydrograph? Defaults to `FALSE`. If both `temp = TRUE` and `prec = TRUE`, then the axis is drawn for precipitation.
#' @param span Integer number of days to accumulate precipitation for plotting. Defaults to `5`.
#'
#' @return No return value, called for side effects
#'
#' @export
#'
#' @example inst/examples/gr_report.R
#'
gr_report <- function(sep, vars, output = 'Report.html', year = NULL, exclude = NULL, temp = FALSE, prec = FALSE, span = 5, locale = 'EN') {
t1 = Sys.time()
output_dir = NULL
if (!R.utils::isAbsolutePath(output))
output_dir = getwd()
rmarkdown::render(input = system.file('reports', 'Report_HTML.Rmd', package = 'grwat'),
output_file = output,
output_dir = output_dir,
encoding = 'UTF-8',
quiet = TRUE,
params = list(name = basename(output),
sep = sep,
vars = vars,
fixedyear = !is.null(year),
year = year,
exclude = exclude,
prec = prec,
temp = temp,
span = span,
locale = locale))
t2 = Sys.time()
message('Elapsed time: ', format(.POSIXct(difftime(t2, t1, units = "secs"), tz = "GMT"), "%H:%M:%S"))
}
#' Tabular representation of tests
#'
#' This function is used to represent the results of [grwat::gr_test_vars()] in a tabular form. Used mainly in [grwat::gr_report()], but can be used for your own purposes.
#'
#' @param tests `list` of tests as returned by [grwat::gr_test_vars()] function.
#' @param format Character string encoding the type of output. Currently `'html'` only is supported.
#'
#' @return HTML table as returned by [knitr::kable()] function.
#' @export
#'
#' @example inst/examples/gr_kable_tests.R
#'
gr_kable_tests <- function(tests, format = 'html'){
gcolor = '#99cc00' # green
ycolor = '#e6e600' # yellow
rcolor = '#ff9966' # red
ncolor = '#FFFFFF' # no
ucolor = '#FFC0CB' # up
dcolor = '#ADD8E6' # down
zcolor = '#D3D3D3' # zero
labs = grlabs[[grenv$loc]]
pvalues = tests$pvalues %>% dplyr::mutate(
Trend = dplyr::case_when(!is.na(Trend) ~ kableExtra::cell_spec(Trend, format,
background = ifelse(is.na(Trend), ncolor,
ifelse(abs(Trend) < 1e-4, zcolor,
ifelse(Trend < 0, dcolor, ucolor))))),
MeanRatio = dplyr::case_when(!is.na(MeanRatio) ~ kableExtra::cell_spec(MeanRatio, format,
background = ifelse(is.na(MeanRatio), ncolor,
ifelse(abs(MeanRatio) < 5, zcolor,
ifelse(MeanRatio < 0, dcolor, ucolor))))),
sdRatio = dplyr::case_when(!is.na(sdRatio) ~ kableExtra::cell_spec(sdRatio, format,
background = ifelse(is.na(sdRatio), ncolor,
ifelse(abs(sdRatio) < 5, zcolor,
ifelse(sdRatio < 0, dcolor, ucolor))))),
Mann.Kendall = dplyr::case_when(!is.na(Mann.Kendall) ~ kableExtra::cell_spec(Mann.Kendall, format,
background = ifelse(is.na(Mann.Kendall), ncolor,
ifelse(Mann.Kendall < 0.01, gcolor,
ifelse(Mann.Kendall < 0.05, ycolor, rcolor))))),
Pettitt = dplyr::case_when(!is.na(Pettitt) ~ kableExtra::cell_spec(Pettitt, format,
background = ifelse(is.na(Pettitt), ncolor,
ifelse(Pettitt < 0.01, gcolor,
ifelse(Pettitt < 0.05, ycolor, rcolor))))),
Student = dplyr::case_when(!is.na(Student) ~ kableExtra::cell_spec(Student, format,
background = ifelse(is.na(Student), ncolor,
ifelse(Student < 0.01, gcolor,
ifelse(Student < 0.05, ycolor, rcolor))))),
Fisher = dplyr::case_when(!is.na(Fisher) ~ kableExtra::cell_spec(Fisher, format,
background = ifelse(is.na(Fisher), ncolor,
ifelse(Fisher < 0.01, gcolor,
ifelse(Fisher < 0.05, ycolor, rcolor)))))
)
tab = knitr::kable(pvalues, booktabs = TRUE, longtable = TRUE, escape = FALSE, format = format,
caption = labs$pheader)
if (format == 'latex')
kableExtra::kable_styling(tab, font_size = 11,
repeat_header_text = "",
latex_options = c("striped", "repeat_header"))
else
kableExtra::kable_styling(tab,
bootstrap_options = "striped")
}
|
cce304930e5db40663c1407ec65b36f881cff8b6
|
3945388ee0fef9e4f99b2c0b4cd49e4fc58082b5
|
/StreamNetworkTools/R/net_position.r
|
f1771484055cdc7003f1f2d88f7eee1745e4875e
|
[
"MIT"
] |
permissive
|
dkopp3/StreamNetworkTools
|
7ea52d4c917bbcf02314a603a3f9b6d5c5b926b5
|
7c693f3edc975493be946d400642bd99c1d9d809
|
refs/heads/master
| 2023-06-23T09:58:49.678987
| 2023-06-09T18:44:18
| 2023-06-09T18:44:18
| 140,187,794
| 3
| 1
|
MIT
| 2021-01-22T18:59:07
| 2018-07-08T17:19:10
|
R
|
UTF-8
|
R
| false
| false
| 11,698
|
r
|
net_position.r
|
#' Network Poisition
#'
#' Calculates distances matrix for each nested (flow connected) network
#'
#' NHDSnapshot and NHDPlusAttributes are required NHDlusV2 files (see
#' \code{\link{net_nhdplus}})
#'
#' Adjusts distances to account for M values
#'
#' @param netdelin output from \code{net_delin}
#' @param nhdplus_path directory for NHDPlusV2 files
#' @param vpu NHDPlusV2 vector Processing Unit
#'
#' @return named \code{list()} of distance matrix (sqKM). Names are To/From
#' comids with net.id appended
#'
#' @examples
#' #read example locations from VPU 11
#' ExLoc <- read.csv("Sample_Locations.csv")
#' # reorder and rename location data.frame
#' ExLoc <- ExLoc[,c("SiteName","W","N")]
#' names(ExLoc) <- c("SITE_ID","X","Y")
#' #find nearest NHDPlusV2 COMID
#' sam_pts <- net_comid(sample_points = ExLoc, CRS = 4269,
#' nhdplus_path = getwd(), vpu = 11, maxdist = 1)
#' b <- net_delin(group_comid = as.character(sam_pts[,"COMID"]), nhdplus_path = getwd(), vpu = "11")
#' c <- net_posit(netdelin = b, nhdplus_path = getwd(), vpu = "11")
#' @export
net_posit <- function (netdelin, nhdplus_path, vpu){
if(!is.character(vpu)){
stop("vpu must be character")
}
if(dim(netdelin$Nested_COMIDs)[1]<1){
stop("No flow connected points")
}
directory <- grep(paste(vpu, "/NHDPlusAttributes", sep = ""),
list.dirs(nhdplus_path, full.names = T), value = T)
#to/from comids
flow.files <- grep("PlusFlow.dbf", list.files(directory[1],
full.names = T), value = T)
flow <- foreign::read.dbf(flow.files)
#reduce the number of flow to prevent errors in navigation
flow_all <- flow[flow[,"TOCOMID"] %in% netdelin$Network[,"net.comid"],]
Vaa <- grep("PlusFlowlineVAA.dbf",
list.files(directory[1], full.names=T),
value = T)
vaa <- foreign::read.dbf(Vaa)
names(vaa) <- toupper(names(vaa))
roots <- unique(netdelin$Nested_COMIDs$root_group.comid)
#check for multiple m's at root
#netdelin$Network[netdelin$Network[,c("group.comid")] %in% roots & netdelin$Network[,c("net.comid")] %in% roots, ]
count <- 1
out <- list()
for (i in roots){
#i <- 10390290
#adjuest flow table to only include root network
flow <- flow_all[flow_all[,"TOCOMID"] %in% netdelin$Network[netdelin$Network$group.comid==i,"net.comid"],]
comids <- netdelin$Nested_COMIDs[netdelin$Nested_COMIDs$root_group.comid == i, ]
#get M values here, the iterate through
path_mvalues <- netdelin$Network[netdelin$Network[,"group.comid"] == netdelin$Network[,"net.comid"], ]
path_mvalues <- path_mvalues[path_mvalues[,"group.comid"] %in% comids[,"upstream_net.comid"], ]
path_mvalues$rowid <- apply(path_mvalues[,c("net.comid","net.id")], 1, paste, collapse = "_")
#calcuate path to root for every comid
PathsToRoot <- data.frame(TOCOMID = character(), FROMCOMID = character(),
length_sqkm_incr = numeric(), pathid = character())
#getwd()
#write.csv(PathsToRoot,"C:/Users/Darin/Dropbox/Dissertation/StreamResiliencyRCN/Community_Group/pathstest_ 22751957.csv",row.names=F)
#clac path and distance to root
for (row in path_mvalues$rowid){
#row <- "10274376_40"
strt <- path_mvalues[path_mvalues$rowid == row, "net.comid"]
end <- i #column#comids[i, "FROMCOMID"]
#if start == end youre already at the root = distance is zero (also theres no downstream ToCOMID)
if (strt != end){
mvalue <- path_mvalues[path_mvalues$rowid == row, "M"]
#because moding down stream. M is the proportion from uptream end. distance to outlet is what's remaining
strt_len <- vaa[vaa[,"COMID"] == strt, "LENGTHKM"] * (1 - mvalue)
#select next down stream (TOCOMID) from strt
fcomid <- flow[flow[, "FROMCOMID"] %in% strt, c("TOCOMID", "FROMCOMID")]
net <- flow[flow[, "FROMCOMID"] %in% strt, c("TOCOMID", "FROMCOMID")]
len <- vaa[vaa[,"COMID"] %in% fcomid[,"TOCOMID"], c("COMID", "LENGTHKM", "DIVERGENCE")]
#choose main divergence path if present, otherwise drop divergence
if(dim(fcomid)[1] > 1){
len <- len[len[,"DIVERGENCE"] == 1, c("COMID", "LENGTHKM")]
fcomid <- data.frame(TOCOMID = len[, "COMID"], FROMCOMID = unique(fcomid[,"FROMCOMID"]))
net <- fcomid
len <- len[, "LENGTHKM"]
} else {
len <- len[ ,"LENGTHKM"]
}
#sum length (value is the distance form upstream location to outlet of downstream comid)
len <- sum(len, strt_len)
net <- data.frame(net, length_sqkm_incr = len)
#delineate path to root node of network
while(fcomid[,"TOCOMID"] != end){
fcomid <- flow[flow[, "FROMCOMID"] %in% fcomid[, "TOCOMID"], c("TOCOMID", "FROMCOMID")]
len <- vaa[vaa[,"COMID"] %in% fcomid[,"TOCOMID"], c("COMID", "LENGTHKM","DIVERGENCE")]
#choose main divergence path if present, otherwise drop divergence
if(dim(fcomid)[1] > 1){
len <- len[len[,"DIVERGENCE"] == 1, c("COMID", "LENGTHKM")]
fcomid <- data.frame(FROMCOMID = unique(fcomid[, "FROMCOMID"]),
TOCOMID = len[, "COMID"])
len <- len[, "LENGTHKM"]
} else {
len <- len[ ,"LENGTHKM"]
}
#sum length (value is the distance form upstream location to outlet of downstream comid)
length_sqkm_incr <- sum(len, max(net[,"length_sqkm_incr"]))
net <- rbind(net, data.frame(fcomid, length_sqkm_incr))
}
#adjust final length to match mvlaue of end
m_adj_end <- netdelin$Network[netdelin$Network[,"group.comid"] == end &
netdelin$Network[,"net.comid"] == end, "M"]
rootlen <- vaa[vaa[,"COMID"] == end,"LENGTHKM"]
net[dim(net)[1],"length_sqkm_incr"] <- (net[dim(net)[1],"length_sqkm_incr"] - rootlen) + (m_adj_end * rootlen)
#add pathid to facilitate pairwise comparisions
net <- data.frame(net, pathid = row)
PathsToRoot <- rbind(PathsToRoot, net)
} else {
#"you'll need to add an else statement to account for strt == end"
#if you addin something here you'll need to rbind again, or move above to abover outside of bracket
#PathsToRoot <- rbind(PathsToRoot, net)
}
}
#print("completed path Distance")
#make dist matrix here
#add in M values for paths
#check paths for multiple ID's
path_ids <- unique(PathsToRoot[ ,"pathid"])
distmat <- matrix(NA, length(path_ids) + 1, length(path_ids) + 1)
colnames(distmat) <- c(i, as.character(path_ids))
rownames(distmat) <- c(i, as.character(path_ids))
diag(distmat) <- 0
#distance to root, 1st row of matrix
rootdist <- aggregate(PathsToRoot[,"length_sqkm_incr"], by = list(PathsToRoot[,"pathid"]), max)
distmat[as.character(i), as.character(rootdist[,"Group.1"])] <- rootdist[,"x"]
distmat[as.character(rootdist[,"Group.1"]), as.character(i)] <- rootdist[,"x"]
#iterate through the upstream comids pairwise
for (p in rownames(distmat)[-1]){
#p <- "10390290"
for (q in colnames(distmat)[-1]){
#q <- "10348934_45"
a <- PathsToRoot[PathsToRoot[, "pathid"] == p, ]
b <- PathsToRoot[PathsToRoot[, "pathid"] == q, ]
#decisions to populate matrix distmat distmat[p,q] <- 1
Da <- any(a[,"FROMCOMID"]%in%b[,"FROMCOMID"] == F)
Db <- any(b[,"FROMCOMID"]%in%a[,"FROMCOMID"] == F)
if(Da == T & Db == T){
#both have extra comids not included
#find the common downstream comid
a_not_b <- a[a[, "FROMCOMID"] %in% b[, "FROMCOMID"] == F, ]
b_not_a <- b[b[, "FROMCOMID"] %in% a[, "FROMCOMID"] == F, ]
if(dim(a_not_b)[1] == 1){
#assign Start len value if the is only one record
mvalue <- path_mvalues[path_mvalues[,"rowid"] == p, "M"]
#mvalue <- netdelin$Network[netdelin$Network[,"group.comid"] == a_not_b[,"FROMCOMID"] &
# netdelin$Network[,"net.comid"] == a_not_b[,"FROMCOMID"], "M"]
#because moding down stream. M is the proportion from uptream end. distance to outlet is what's remaining
dist_a <- vaa[vaa[,"COMID"] == a_not_b[,"FROMCOMID"], "LENGTHKM"] * (1-mvalue)
} else {
#you'll want to pull use one less bc length is at outlet not confluence
dist_a <- a_not_b[dim(a_not_b)[1] - 1, "length_sqkm_incr"]
}
if(dim(b_not_a)[1] == 1){
#assign Start len value if the is only one record
mvalue <- path_mvalues[path_mvalues[,"rowid"] == q, "M"]
#mvalue <- netdelin$Network[netdelin$Network[,"group.comid"] == b_not_a[,"FROMCOMID"] &
# netdelin$Network[,"net.comid"] == b_not_a[,"FROMCOMID"], "M"]
#because moding down stream. M is the proportion from uptream end. distance to outlet is what's remaining
dist_b <- vaa[vaa[,"COMID"] == b_not_a[,"FROMCOMID"], "LENGTHKM"] * (1-mvalue)
} else {
#you'll want to pull use one less bc length is at outlet not confluence
dist_b <- b_not_a[dim(b_not_a)[1] - 1, "length_sqkm_incr"]
}
#sum together - distance between p and q
dist <- sum(dist_a,dist_b)
distmat[p,q] <- dist
}
if(Da == T & Db == F){
#a had extra comids, b doesnot -> a is upstream of b
#distance between them is length at the 1st comid of b - need to adjust for M value
#value at b minus value of a
#adjust last record in unmatched
dist <- a[a[,"FROMCOMID"] %in% b[,"FROMCOMID"] == F, ]
len <- vaa[vaa[,"COMID"] == dist[dim(dist)[1], "TOCOMID"], c("LENGTHKM")]
mvalue <- path_mvalues[path_mvalues[,"rowid"] == q, "M"]
#the root is not in the path_mvalues table
if (length(mvalue)!=1){
mvalue <- netdelin$Network[netdelin$Network$group.comid == dist[dim(dist)[1], "TOCOMID"] &
netdelin$Network$net.comid == dist[dim(dist)[1], "TOCOMID"], "M"]
}
dist <- dist[dim(dist)[1], "length_sqkm_incr"] - len + (mvalue * len)
distmat[p, q] <- dist
}
if(Da == F & Db == T){
#b had extra comids, a doesnot -> b is upstream of a
#distance between them is length at the 1st comid of a - need to adjust for M value
#value at a minus value of b
dist <- b[b[,"FROMCOMID"] %in% a[,"FROMCOMID"] == F, ]
len <- vaa[vaa[,"COMID"] == dist[dim(dist)[1], "TOCOMID"], c("LENGTHKM")]
mvalue <- path_mvalues[path_mvalues[,"rowid"] == p, "M"]
if (length(mvalue)!=1){
mvalue <- netdelin$Network[netdelin$Network$group.comid == dist[dim(dist)[1], "TOCOMID"] &
netdelin$Network$net.comid == dist[dim(dist)[1], "TOCOMID"], "M"]
}
dist <- dist[dim(dist)[1], "length_sqkm_incr"] - len + (mvalue * len)
distmat[p, q] <- dist
}
if(Da == F & Db == F){
#neither a or b have extra comid
if(p!=q){
#they are on the same comid but have different M_values
len <- vaa[vaa[,"COMID"] == a[1, "FROMCOMID"], c("LENGTHKM")]
mvalue <- path_mvalues[path_mvalues[,"rowid"] == p, "M"]
dista<-len*mvalue
mvalue <- path_mvalues[path_mvalues[,"rowid"] == q, "M"]
distb<-len*mvalue
distmat[p,q] <- max(dista, distb) - min(dista, distb)}
#distance between them should be zero
}
}
}
#print("comleted dist matrix")
out[[count]]<-list(distmat)
count <- count+1
}
names(out) <- as.character(roots)
return(out)
}
|
8f1fb34a30aee7afbeede10e11746f303d0a3512
|
a0b273f18dcb908dd4676d0b4f5b258165706060
|
/Lab 1/Lab 1/Lab 1.R
|
76701a60b3e3f08a61c0a1cda481512e07a77a8b
|
[] |
no_license
|
brettv30/Analysis-of-Algorithms
|
bf64cb42efa0c273861acc39d6e4c1bf4a145837
|
b3d7a1ed6e4ee6cea8ebf180bd8e9d20135fef8e
|
refs/heads/master
| 2022-08-31T10:32:24.004987
| 2020-05-28T00:01:23
| 2020-05-28T00:01:23
| 267,448,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,303
|
r
|
Lab 1.R
|
# Brett Vogelsang & Andrew Reeves
# Part 1
# I am first creating two numerical vectors
x<-c(4,2,6)
y<-c(1,0,-1)
# now I am looking for number of variables in the vector
# The result is 3
length(x)
# now I am looking for the sum of all numbers in the vector
# The result is 12
sum(x)
# now I am adding the elements of both vectors
# The result is 5 2 5
x+y
# now I am multiplying the elements of both vectors
# The result is 4 0 -6
x*y
# now I am subtracting all elements in the vector by 2
# The result is 2 0 4
x-2
# now I am squaring all elements in the vector
# The result is 16 4 36
x^2
# Part 2
# I am printing out the numbers starting from 7 and ending at 11, increasing by 1
# This sequence will produce 7 8 9 10 11
7:11
# now I am printing out the sequence of numbers starting at 2 and ending at 9, increasing by 1
# This sequence will produce 2 3 4 5 6 7 8 9
seq(2,9)
# now I am printing out the sequence of numbers starting at 4 and ending at 10, increasing by 2 each time
# This sequence will produce 4 6 8 10
seq(4,10,by=2)
# now I am printing out the letters 'NA' ten differnet times on the same line
# This sequence will produce NA NA NA NA NA NA NA NA NA NA
rep(NA,10)
# Part 3
# I am creating a numerical vector
x = c(5,9,2,3,4,6,7,0,8,12,2,9)
# now I am printing out the element at the second index of the vector
# The result is 9
x[2]
# now I am printing out the elements of the vector starting at the second index and ending at the fourth index
# The result is 9 2 3
x[2:4]
# create a new vector consisting of the elements at indexes 2,3,5,6, & 7 of the original vector
# THe result is 9 2 4 6 7
x[c(2,3,5:7)]
# display the vector without the elements between indexes 10 and 12
# The result is 5 9 2 3 4 6 7 0 8
x[-(10:12)]
#Problem 2: 100 meter dash and Misleading graphs
# read the mens100 file into the dashdata object
dashdata = read.csv("mens100.csv")
# 1 Answer: 2 Variables & 29 observations
# print the number of observations and number variables within dashdata
dim(dashdata)
# Question 2
# create a bar plot of the data with time on the y-axis and year on the x-axis
barplot(dashdata$time,names.arg=dashdata$year,las=2)
# Question 3
# tell 'r' to create two graphs in the same column
par(mfrow = c(2,1))
# compare both bar graphs on top of each other by using the par function
barplot(dashdata$time,names.arg=dashdata$year,las=2)
barplot(dashdata$time,names.arg=dashdata$year,las=2,ylim=c(9.5,12.5),xpd=FALSE)
# Problem 3: Playing around with the cars data in R
# read the cars10 file to the cardata variable
cardata = read.csv("cars10.csv")
# Question 1
# Answer: the number of variables is 11 and the number of observations is 74
# Here I am finding the dimensions of the data frame
dim(cardata)
# Question 2
# We found the number of domestic cars in the cardata file to be 52
# Here I am finding the number of cars in the foreign column with the Domestic tag
length(cardata$foreign[cardata$foreign == "Domestic"])
# We found the numbe of foreign cars in the cardata file to be 22
# Here I am finding the number of cars in the foreign column with the Foreign tag
length(cardata$foreign[cardata$foreign == "Foreign"])
# Questions 3
# find which measurement in the cardata mpg column is the minimum
# Answer: 12
cardata$mpg[which.min(cardata$mpg)]
# Question 4
# find which measurement in the cardata price column is the maximum
# Answer: 15906
cardata$price[which.max(cardata$price)]
# Question 5
# here I create a histogram of the cardata price column
# the histogram is right skewed
hist(cardata$price)
# Question 6
# here I compare both histograms on top of each other by using the par function
# Foreign cars are able to achieve above 40mpg whereas domestic cars can only attain above 30mpg
# However, more domestic cars operate within the range of 15 - 20mpg than foreign cars.
par(mfrow = c(2,1))
hist(cardata$mpg[cardata$foreign == "Domestic"])
hist(cardata$mpg[cardata$foreign == "Foreign"])
# Question 7
# here I create a scatter plot of both the weight and length columns in the cardata file
# we can infer from this graph that the lengthier the car within this population, the heavier it will be, and vice versa
plot(cardata$weight ~ cardata$length)
# Problem 4: Finance data via the quantmod package
# I added some extra commands here because I was playing around a bit
library(quantmod)
getSymbols("INTC",from="2016-01-01")
getSymbols("IBM",from="2016-01-01")
getSymbols("AAL",from= "2016-01-01")
getSymbols("T", from= "2016-01-01")
intcret=as.numeric(quarterlyReturn(INTC))
ibmret=as.numeric(quarterlyReturn(IBM))
aalret=as.numeric(quarterlyReturn(AAL))
tret=as.numeric(quarterlyReturn(T))
par(mfrow=c(3,2))
plot(Ad(INTC))
plot(Ad(IBM))
plot(Ad(AAL))
plot(Ad(T))
hist(Vo(INTC)/1000)
# It is cool how you can notice that the wider the sample size became, the closer the
# dots on the scatter plot became and vice versa
plot(ibmret,intcret)
plot(aalret, tret)
# Problem 5: Airline Data
# read the airline2008Nov file to the mydata variable
mydata = read.csv("airline2008Nov.csv")
# Question 1
# show the dimensions of the data frame
# mydata contains 9997 observations and 26 variables
dim(mydata)
# determine the maximum number of occurrences an airline has within the data frame
# the YV airline has the most flights in the dataset
mydata$UniqueCarrier[which.max(mydata$UniqueCarrier)]
# Question 2
# A: On what day of the week do people most frequently travel?
# B: What is the average taxi time on the most popular flight weekend?
# Question 3
# Part A
# this will print out all rows with missing data
mydata[!complete.cases(mydata),]
# this will create a new data set with no missing data
newdata = na.omit(mydata)
# I am writing the data within the newdata object to a csv file titled cleanairline
write.csv(newdata, "cleanairline.csv")
# Part B
# There are 9997 observations
# Here I am showing the dimensions of the data frame
dim(newdata)
# Part C
# After analyzing this data, our records would be insuffiecient. if
# we were to be audited, this data would not show the correct numbers
# Question 4
# Part A
# Here I create a smaller subset of data only containing information pertaining to ATL, JFK, DFW, and MSP
smalldata = subset(mydata,Dest == "ATL" | Dest == "JFK" | Dest == "DFW" | Dest == "MSP")
# I am resetting the factor variable Dest within the data frame
smalldata$Dest = factor(smalldata$Dest)
# Part B
# Here I am refactoring the titles of ATL, JFK, DFW, and MSP to more comprehensible language
smalldata$Dest = factor(smalldata$Dest, levels = c("ATL", "JFK", "DFW", "MSP"),
labels = c("Atlanta", "NYKennedy", "DallasFtWorth",
"MinneapolisStPaul"),
ordered = TRUE)
# Part C
#create a table of every entry in the Dest variable
table(smalldata$Dest)
# create a pie chart in conjunction with the table function to display a pie chart of the four important observations
pie(table(factor(smalldata$Dest)))
# Question 5
# Part A & B
# Here I write a mathematical equation to calculate ActualElapsedTime from AirTime, TaxIn, and TaxiOut added together
# I place that calculated number into the CalculatedElapsedTime variable
smalldata$CalculatedElapsedTime = (smalldata$AirTime + smalldata$TaxiIn + smalldata$TaxiOut)
# Part C
# Here I am using the head function to print the first six rows of the ActualElapsedTime variable
# and the CalculatedElapsedTime to show that they both are the same.
head(smalldata$ActualElapsedTime)
head(smalldata$CalculatedElapsedTime)
# Question 6
# Part A
# I am adding together all of the times a flight had a weather delay
# there were 71 flights within the data frame that had a weather dealy
sum(smalldata$WeatherDelay > 0)
# Here I am calculating the proportion of flights that had a weather delay against all flights in the data frame
# 4.221 percent of flights had a weather delay
sum(smalldata$WeatherDelay > 0) / length(smalldata$WeatherDelay) * 100
# Part B
# Here I am creating a subset of all the flights bound for JFK
JFKset = subset(smalldata,Dest == "JFK")
# Now I sum all of the flights that had a weather delay, there were 11 in total
sum(JFKset$WeatherDelay > 0)
# Now I am calculating the proportion of flights bound for JFK that had a weather delay against
# all flights bound for JFK
# 3.873 percent of flights going to JFK had a weather delay
sum(JFKset$WeatherDelay > 0) / length(JFKset$WeatherDelay) * 100
# Part C
# Here I calculate the maximum departure delay within the data frame
# This maximum delay was 1286 minutes
which.max(smalldata$DepDelay)
# here I find which carrier had that maximum departure delay
# The carrier was NW
smalldata$UniqueCarrier[which.max(smalldata$DepDelay)]
# Part D
# here I determine the day of the week that had the least flights
# Day 6, Saturday, had the least flights
smalldata$DayOfWeek[which.min(smalldata$Dest)]
# Part E
# Here I create a scatter plot with the TaxiOut information on the Y axis and the TaxiIn information on the X axis
# This graph is useful because it shows that generally most taxiing done by planes takes less than 20 minutes
# However, this graph could be better if we examined the data between 20 on the TaxiIn and 50 on the TaxiOut
plot(smalldata$TaxiOut ~ smalldata$TaxiIn)
|
f66c1f34145476bac95296c0c2fe568c00365ea5
|
a9c540d94681b5e4ffb2300fd320d6c16eab3040
|
/man/group_func_1.Rd
|
64a387c0c16c89b1ec4a08fbf36b61b9ab6fcb41
|
[] |
no_license
|
fbertran/SelectBoost
|
fd5716b73cb07d05cc3c67bbc75b8915672c1769
|
5610332658b95a71dacdbccea721d00a88a9742f
|
refs/heads/master
| 2022-12-01T07:52:52.171876
| 2022-11-29T22:18:18
| 2022-11-29T22:18:18
| 136,206,211
| 6
| 2
| null | 2021-03-21T16:43:02
| 2018-06-05T16:35:45
|
R
|
UTF-8
|
R
| false
| true
| 1,317
|
rd
|
group_func_1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_func_1.R
\name{group_func_1}
\alias{group_func_1}
\title{Generate groups by thresholding.}
\usage{
group_func_1(absXcor, c0)
}
\arguments{
\item{absXcor}{A numeric matrix. The absolute value of a correlation or distance matrix.}
\item{c0}{A numeric scalar. The thresholding}
}
\value{
A list with one entry: the list of groups.
Attributes:
\itemize{
\item "type": "normal"
\item "length.groups" the length of each groups.
}
}
\description{
\code{group_func_1} creates groups of variables based on thresholding the input matrix.
}
\details{
This is a function used to create a list of groups using an input matrix and a
thresholding value c0. A group is made, for every column in the input matrix.
}
\examples{
set.seed(314)
group_func_1(cor(matrix(rnorm(50),10,5)),.4)
}
\references{
\emph{selectBoost: a general algorithm to enhance the performance of variable selection methods in correlated datasets}, Frédéric Bertrand, Ismaïl Aouadi, Nicolas Jung, Raphael Carapito, Laurent Vallat, Seiamak Bahram, Myriam Maumy-Bertrand, Bioinformatics, 2020. \doi{10.1093/bioinformatics/btaa855}
}
\seealso{
\code{\link{group_func_2}} and \code{\link{boost.findgroups}}
}
\author{
Frederic Bertrand, \email{frederic.bertrand@utt.fr}
}
|
924f51d36c10c8415a28f9bfed6a0af5eb21a50d
|
9969b02c26fa5388ac971b8212c761c6abf98efb
|
/R/getTogetherCovarData.r
|
0b52a0dbea47093c9742ccdcbff06d50476354ff
|
[] |
no_license
|
tmcd82070/CAMP_RST
|
0cccd7d20c8c72d45fca31833c78cd2829afc169
|
eca3e894c19936edb26575aca125e795ab21d99f
|
refs/heads/master
| 2022-05-10T13:33:20.464702
| 2022-04-05T21:05:35
| 2022-04-05T21:05:35
| 10,950,738
| 0
| 0
| null | 2017-05-19T20:42:56
| 2013-06-25T21:24:52
|
R
|
UTF-8
|
R
| false
| false
| 23,196
|
r
|
getTogetherCovarData.r
|
#' @export
#'
#' @title getTogetherCovarData
#'
#' @description Put together available covariate data from both the external
#' Environmental Covariate Database, as well as CAMP-collected environmental
#' variables.
#'
#' @param obs.eff.df A data frame with at least variables \code{batchDate} and
#' \code{efficiency}, where \code{efficiency} is \code{NA} for all days
#' requiring an estimate.
#'
#' @param min.date The start date for data to include. This is a text string in
#' the format \code{\%Y-\%m-\%d}, or \code{YYYY-MM-DD}.
#'
#' @param max.date The end date for data to include. Same format as
#' \code{min.date}.
#'
#' @param traps A set of traps for which efficiency data are available.
#'
#' @param enhmodel A logical indicating if function \code{getTogetherCovarData}
#' should use query dates based on provided \code{min.date} and
#' \code{max.date} (\code{TRUE}), or efficency time frames (\code{FALSE}).
#'
#' @return Several items, all housed within a list. The main piece of output is
#' data frame \code{obs.eff.df}, which contains the same number of rows as the
#' \code{obs.eff.df} submitted to the function, but with several additional
#' columns, due to appended covariates.
#'
#' \describe{
#' \item{obs.eff.df}{Same as submitted to the function, but with covariate columns added.}
#' \item{dbDisc}{Available CAMP discharge data.}
#' \item{dbDpcm}{Available CAMP water-depth data.}
#' \item{dbATpF}{Available CAMP air-temperature data.}
#' \item{dbTurb}{Available CAMP turbidity data.}
#' \item{dbWVel}{Available CAMP water-velocity data.}
#' \item{dbWTpC}{Available CAMP water-temperature data.}
#' \item{dbLite}{Available CAMP light-penetration discharge data.}
#' \item{dbFlPG}{Available Environmental Covariate Database discharge data.}
#' \item{dbTpPG}{Available Environemtnal Covariate Database water temperature data.}
#' }
#'
#' @details Function \code{getTogetherCovarData} appends covariate information
#' for each unique \code{batchDate} and \code{TrapPositionID} combination
#' avaiable in data frame \code{obs.eff.df}.
#'
#' Environmental covariate data are queried from the WEST-supported database
#' created for housing USGS and CDEC data at sites important to the CAMP
#' program.
#'
#' Other covariates derive directly from data stored within a particular
#' river's CAMP mdb database. Prior to use in efficiency models, data are
#' first converted to standardized units via the \code{QryCleanEnvCov} SQL
#' query sequence in function \code{getCAMPEnvCov}, which is called once
#' for each covariate.
#'
#' Following querying, all covariates are then fit with a smoothing spline via
#' function \code{estCovar}, with penalization parameter \eqn{\lambda}
#' selected via cross-validation. Smoothing occurs to ensure easy fitting of
#' efficiency models, whose temporal resolution is that of a \code{batchDate}.
#'
#' @seealso \code{getCAMPEnvCov}, \code{estCovar}
#'
#' @references Hastie, T., Tibshirani, R., and Friedman, J. 2009. The Elements
#' of Statistical Learning. 2nd Edition. Springer, New York, New York.
#'
#' @examples
#' \dontrun{
#' ans <- getTogetherCovarData(obs.eff.df,
#' min.date,
#' max.date,
#' traps,
#' enhmodel)
#' }
getTogetherCovarData <- function(obs.eff.df,min.date,max.date,traps,enhmodel){
# obs.eff.df <- obs.eff.df
# min.date <- min.date #2
# max.date <- max.date #2
# traps <- traps
# enhmodel <- TRUE
# ---- Obtain necessary variables from the global environment.
time.zone <- get("time.zone",envir=.GlobalEnv)
# ---- Get from dataframe.
site <- attr(obs.eff.df,"site")
# ---- Find sites we need for this run ----
# Explanation: We have specific subSites in catch.df. The environmental
# database just has Sites. We need to find the Site(s) associated with
# the subSites we are working on so we can query the API later.
#
# The way Jason set this up is not the best. The subSite to Site map
# is stored over as a simple CSV in the helperFiles directory of the package.
# Ultimately, we want to store this information in the postgreSQL database
# itself. This is Issue #105 in the campR Github repository.
# ---- We assemble all the unique ourSiteIDs we need for this run.
luSubSiteID <- read.csv(paste0(find.package("campR"),"/helperFiles/luSubSiteID.csv"))
xwalk <- luSubSiteID[luSubSiteID$subSiteID %in% attr(obs.eff.df,"catch.subsites"),] # Change to catch.subsites. Does this affect eff beta estimation?
uniqueOurSiteIDsToQuery <- unique(na.omit(c(xwalk$ourSiteIDChoice1,xwalk$ourSiteIDChoice2)))
# ---- Loop over subSiteID's to obtain env covars for each ----
df <- vector("list",length(uniqueOurSiteIDsToQuery))
m1 <- vector("list",length(uniqueOurSiteIDsToQuery)) # flow (cfs)
m2 <- vector("list",length(uniqueOurSiteIDsToQuery)) # temperature (C)
m3 <- vector("list",length(uniqueOurSiteIDsToQuery)) # turbidity
# ---- Need to set an initial value for variable covar. This holds the building string of
# ---- a model statement. Don't need to worry about if all the values for all efficiency
# ---- trials are actually present -- we deal with that possibility below. So... just add
# ---- the simple text statement. But if we're calling this function for real passage
# ---- estimation, we don't want this.
obs.eff.df$covar <- "bdMeanNightProp + bdMeanMoonProp + bdMeanForkLength"
for(ii in 1:length(uniqueOurSiteIDsToQuery)){
# ---- Get siteID and dates for this subSite ----
oursitevar <- uniqueOurSiteIDsToQuery[ii]
if(enhmodel == TRUE){
# ---- Use these when constructing passage estimates. Buff out a month each way.
minEffDate <- as.POSIXct(min.date,format="%Y-%m-%d",tz=time.zone) - 90*24*60*60
maxEffDate <- as.POSIXct(max.date,format="%Y-%m-%d",tz=time.zone) + 90*24*60*60
# Truncate any hours from the date vars. Required by cov API.
minEffDate <- format(minEffDate, format="%Y-%m-%d")
maxEffDate <- format(maxEffDate, format="%Y-%m-%d")
} else {
# ---- Use these when building enhanced efficiency trials.
minEffDate <- as.character(min(obs.eff.df$batchDate))
maxEffDate <- as.character(max(obs.eff.df$batchDate))
}
# ---- Make a call to the envir covar API ----
df[[ii]] <- queryEnvCovAPI(min.date = minEffDate,
max.date = maxEffDate,
oursitevar = oursitevar,
type = "D")
nGood <- nrow(df[[ii]])
# ---- Commented-out Jason code ----
# This is Jason's code to do some (all?) QAQC on the returned EnvCov data.
# Trent is not sure we need any of this code, but probably, and
# that's why he is leaving it as comments.
#
# # ---- If we're here, we were successfully able to demo we are the only ones querying.
# res <- RPostgres::dbSendQuery(chPG,paste0("SELECT COUNT(oursiteid) FROM tbld WHERE ('",min.date,"' <= date AND date <= '",max.date,"') AND oursiteid = ",oursitevar," AND ourmetricstatusid = 1 GROUP BY oursiteid;"))
# nGood <- RPostgres::dbFetch(res)
# RPostgres::dbClearResult(res)
# RPostgres::dbDisconnect(chPG)
#
# tableChecker()
#
# # ---- RIGHT HERE, a querying competitor could steal the database if they query, because I've disconnected, and it's
# # ---- open for anyone to grab. Solution is to use campR::tryEnvCovDB
# # ---- inside EnvCovDBpostgres::queryEnvCovDB, but that requires a rebuild of EnvCovDBpostgres. The window of destruction here
# # ---- should be very small.
#
# if(nrow(nGood) > 0){
# # df[[ii]] <- EnvCovDBpostgres::queryEnvCovDB("jmitchell","G:hbtr@RPH5M.",minEffDate,maxEffDate,oursitevar,type="D",plot=FALSE)
# # save.image("C:/Users/jmitchell/Desktop/fixme.RData")
# df[[ii]] <- EnvCovDBpostgres::queryEnvCovDB("envcovread","KRFCszMxDTIcLSYwUu56xwt0GO",minEffDate,maxEffDate,oursitevar,type="D",plot=FALSE)
#
# df[[ii]]$date <- strptime(df[[ii]]$date,format="%Y-%m-%d",tz=time.zone)
# } else {
# # df[[ii]] <- EnvCovDBpostgres::queryEnvCovDB("jmitchell","G:hbtr@RPH5M.",minEffDate,maxEffDate,oursitevar,type="U",plot=FALSE)
# df[[ii]] <- EnvCovDBpostgres::queryEnvCovDB("envcovread","KRFCszMxDTIcLSYwUu56xwt0GO",minEffDate,maxEffDate,oursitevar,type="U",plot=FALSE)
#
# if(sum(!is.na(df[[ii]]$flow_cfs)) > 0 & (sum(df[[ii]]$flow_cfs <= -9997 & !is.na(df[[ii]]$flow_cfs)) > 0) ){
# df[[ii]][df[[ii]]$flow_cfs <= -9997 & !is.na(df[[ii]]$flow_cfs),]$flow_cfs <- NA
# }
# ---- QAQC on temperature values ----
if(sum(!is.na(df[[ii]]$temp_c)) > 0){
# ---- This is mostly due to weird CDEC data.
# ---- If we have temps less than -17.8F, then the Celsius value was 0.00. Chuck these -- they're probably bad values.
# ---- The value of 37.7778 corresponds to 100F. Chuck any values >37.7778 as implausible.
if( any( (df[[ii]][!is.na(df[[ii]]$temp_c),]$temp_c >= 37.7778) | (df[[ii]][!is.na(df[[ii]]$temp_c),]$temp_c <= -17.8) ) ){
df[[ii]][!is.na(df[[ii]]$temp_c) & ( df[[ii]]$temp_c >= 37.7778 | df[[ii]]$temp_c <= -17.8 ),]$temp_c <- NA
}
# ---- They seem to use 32.0 as a substitution value as well. Weird. Get rid of those.
if( any( (df[[ii]][!is.na(df[[ii]]$temp_c),]$temp_c == 0.0) ) ){
df[[ii]][!is.na(df[[ii]]$temp_c) & df[[ii]]$temp_c == 0.0,]$temp_c <- NA
}
}
# ---- Done with API. Compile the good dates for each metric. ----
min.date.flow <- suppressWarnings(min(df[[ii]][!is.na(df[[ii]]$flow_cfs),]$date))
max.date.flow <- suppressWarnings(max(df[[ii]][!is.na(df[[ii]]$flow_cfs),]$date))
min.date.temp <- suppressWarnings(min(df[[ii]][!is.na(df[[ii]]$temp_c),]$date))
max.date.temp <- suppressWarnings(max(df[[ii]][!is.na(df[[ii]]$temp_c),]$date))
# ---- Query this river's Access database for information recorded at the trap. ----
# For now, we only use this for turbidity. I name objects that respect this.
if(ii == 1){
tableChecker()
# ---- Develop the TempReportCriteria_TrapVisit table.
F.buildReportCriteria( site, min.date, max.date ) # was min.date2, max.date2. matter?
# ---- 11/20/2017. Update to run Connie's cleaning query.
db <- get( "db.file", envir=.GlobalEnv )
chRODBC <- odbcConnectAccess(db)
# ---- Run the clean-up query.
F.run.sqlFile( chRODBC, "QryCleanEnvCov.sql")#, min.date2, max.date2 )
# ---- Now, fetch the result.
dbCov <- sqlFetch( chRODBC, "EnvDataRaw_Standardized" )
close(chRODBC)
# ---- Make a dataframe for what we have.
dbDisc <- getCAMPEnvCov(dbCov,"discharge","dischargeUnitID",12)
dbDpcm <- getCAMPEnvCov(dbCov,"waterDepth","waterDepthUnitID",3)
dbATpF <- getCAMPEnvCov(dbCov,"airTemp","airTempUnitID",19)
dbTurb <- getCAMPEnvCov(dbCov,"turbidity","turbidityUnitID",20)
dbWVel <- getCAMPEnvCov(dbCov,"waterVel","waterVelUnitID",8)
dbWTpC <- getCAMPEnvCov(dbCov,"waterTemp","waterTempUnitID",18)
dbLite <- getCAMPEnvCov(dbCov,"lightPenetration","lightPenetrationUnitID",3)
#dbDOxy <- getCAMPEnvCov(dbCov,"dissolvedOxygen","dissolvedOxygenUnitID",36)
#dbCond <- getCAMPEnvCov(dbCov,"conductivity","conductivityUnitID",36)
#dbBaro <- getCAMPEnvCov(dbCov,"barometer","barometerUnitID",33)
#dbWeat <- getCAMPEnvCov(dbCov,"weather",NA,NA)
# ---- Put all database covariates into a list for easier processing.
dbCovar <- list(dbDisc,dbDpcm,dbATpF,dbTurb,dbWVel,dbWTpC,dbLite)#,dbDOxy,dbCond,dbBaro,dbWeat)
# ---- Collapse all the UnitIDs we have.
dfUnitIDs <- NULL
for(i in 1:length(dbCovar)){
l <- length(attr(dbCovar[[i]],"uniqueUnitID"))
if(l > 0){
dfUnitIDs.i <- data.frame("site"=rep(site,l),"covar"=rep(attr(dbCovar[[i]],"cov"),l),"UnitID"=attr(dbCovar[[i]],"uniqueUnitID"))
dfUnitIDs <- rbind(dfUnitIDs,dfUnitIDs.i)
}
}
# ---- Compile unique UnitIDs per covar.
dfUnitIDs <- unique(dfUnitIDs)
rownames(dfUnitIDs) <- NULL
dfUnitIDs$test <- NA
for(i in 1:nrow(dfUnitIDs)){
if(i == 1){
dfUnitIDs[i,]$test <- dfUnitIDs[i,]$UnitID
} else if(dfUnitIDs[i,]$covar != dfUnitIDs[i - 1,]$covar){
dfUnitIDs[i,]$test <- paste0(dfUnitIDs[i,]$UnitID," ")
} else {
dfUnitIDs[i,]$test <- paste0(dfUnitIDs[i - 1,]$test,dfUnitIDs[i,]$UnitID,sep=" ")
}
}
dfUnitIDs <- aggregate(dfUnitIDs,list(dfUnitIDs$covar),function(x) tail(x,1))
dfUnitIDs$Group.1 <- dfUnitIDs$UnitID <- dfUnitIDs$site <- NULL
# # ---- Read in how to map unstandardized weather values to standardized values. Put this in as data.frame...eventually.
# weaMap <- read.csv("//LAR-FILE-SRV/Data/PSMFC_CampRST/felipe products/variables/weather/weatherLookupMapped20170720.csv")
# #weaKey <- weaMap[1:4,c("PrecipLevel","PrecipLevelText")]
# dbWeat <- merge(dbWeat,weaMap[,c("weather","precipLevel")],by=c("weather"),all.x=TRUE)
# dbWeat <- dbWeat[,c("subSiteID","measureDate","precipLevel")]
# names(dbWeat)[names(dbWeat) == "precipLevel"] <- "precipLevel"
}
# ---- Fit a simple smoothing spline and predict: FLOW ----
covar <- NULL
dontDo <- FALSE
if(sum(!is.na(df[[ii]]$flow_cfs)) > 3){
m1[[ii]] <- smooth.spline(df[[ii]][!is.na(df[[ii]]$flow_cfs),]$date,df[[ii]][!is.na(df[[ii]]$flow_cfs),]$flow_cfs,cv=TRUE)
if("covar" %in% names(obs.eff.df)){
if(is.na(obs.eff.df$covar[1])){
obs.eff.df$covar <- paste0("flow_cfs")
} else if(!("flow_cfs" %in% names(obs.eff.df))) {
obs.eff.df$covar <- paste0(obs.eff.df$covar," + flow_cfs")
} else {
dontDo <- TRUE # <---- In this case, we already have this covariate from a previous ii run.
}
} else {
obs.eff.df$covar <- "flow_cfs"
}
if(dontDo == FALSE){
obs.eff.df$flow_cfs <- NA
# ---- Not the best, but works with two possible IDChoices.
if(ii == 1){
obs.eff.df[obs.eff.df$TrapPositionID %in% xwalk[xwalk$ourSiteIDChoice1 == oursitevar,]$subSiteID,]$flow_cfs <- predict(m1[[ii]],as.numeric(obs.eff.df$batchDate))$y
} else if(ii == 2){
obs.eff.df[obs.eff.df$TrapPositionID %in% xwalk[xwalk$ourSiteIDChoice2 == oursitevar,]$subSiteID,]$flow_cfs <- predict(m1[[ii]],as.numeric(obs.eff.df$batchDate))$y
}
#df[[ii]]$pred_flow_cfs <- predict(m1[[ii]])$y
df[[ii]]$pred_flow_cfs <- predict(m1[[ii]],x=as.numeric(df[[ii]]$date))$y
# ---- See if we have any predicted values outside the range for which we have data.
if(sum(df[[ii]]$date < min.date.flow | df[[ii]]$date > max.date.flow) > 0){
df[[ii]][df[[ii]]$date < min.date.flow | df[[ii]]$date > max.date.flow,]$pred_flow_cfs <- NA
}
# ---- Build a dataframe like the CAMP covariates. If we're running against the unit table, we have no statistic.
# ---- For flow, call it 450L, for temp, 451L.
if(!(nGood > 0)){
dbFlPG <- data.frame(subSiteID=NA,measureDate=df[[ii]]$date,flow_cfs=df[[ii]]$flow_cfs,flow_cfsUnitID=rep(450L,nrow(df[[ii]])))
} else {
dbFlPG <- data.frame(subSiteID=NA,measureDate=df[[ii]]$date,flow_cfs=df[[ii]]$flow_cfs,flow_cfsUnitID=df[[ii]]$flow_statistic)
}
# ---- See if we have any predicted values outside the range for which we have data. Off by a day..? Daylight savings? So buffer.
if(sum(obs.eff.df$batchDate + 60*60 < min.date.flow | obs.eff.df$batchDate - 60*60 > max.date.flow) > 0){
obs.eff.df[obs.eff.df$batchDate + 60*60 < min.date.flow | obs.eff.df$batchDate - 60*60 > max.date.flow,]$flow_cfs <- NA
}
}
} else if(!exists("dbFlPG")){
dbFlPG <- NULL
}
# ---- Fit a simple smoothing spline and predict: TEMPERATURE ----
dontDo <- FALSE
if(sum(!is.na(df[[ii]]$temp_c)) > 3){
m2[[ii]] <- smooth.spline(as.numeric(df[[ii]][!is.na(df[[ii]]$temp_c),]$date),df[[ii]][!is.na(df[[ii]]$temp_c),]$temp_c,cv=TRUE)
if("covar" %in% names(obs.eff.df)){
if(is.na(obs.eff.df$covar[1])){
obs.eff.df$covar <- paste0("temp_c")
} else if(!("temp_c" %in% names(obs.eff.df))) {
obs.eff.df$covar <- paste0(obs.eff.df$covar," + temp_c")
} else {
dontDo <- TRUE # <---- In this case, we already have this covariate from a previous ii run.
}
} else {
obs.eff.df$covar <- "temp_c"
}
if(dontDo == FALSE){
obs.eff.df$temp_c <- NA
if(ii == 1){
obs.eff.df[obs.eff.df$TrapPositionID %in% xwalk[xwalk$ourSiteIDChoice1 == oursitevar,]$subSiteID,]$temp_c <- predict(m2[[ii]],as.numeric(obs.eff.df$batchDate))$y
} else if(ii == 2){
obs.eff.df[obs.eff.df$TrapPositionID %in% xwalk[xwalk$ourSiteIDChoice2 == oursitevar,]$subSiteID,]$temp_c <- predict(m2[[ii]],as.numeric(obs.eff.df$batchDate))$y
}
#df[[ii]]$pred_temp_c <- predict(m2[[iii]])$y
df[[ii]]$pred_temp_c <- predict(m2[[ii]],x=as.numeric(df[[ii]]$date))$y
# ---- See if we have any predicted values outside the range for which we have data.
if(sum(df[[ii]]$date < min.date.temp | df[[ii]]$date > max.date.temp) > 0){
df[[ii]][df[[ii]]$date < min.date.temp | df[[ii]]$date > max.date.temp,]$pred_temp_c <- NA
}
# ---- Build a dataframe like the CAMP covariates. If we're running against the unit table, we have no statistic.
# ---- For flow, call it 450L, for temp, 451L. Recall that oursitevar >= 80 means EnvCovDB from unit table is used.
if(!(nGood > 0)){
dbTpPG <- data.frame(subSiteID=NA,measureDate=df[[ii]]$date,temp_c=df[[ii]]$temp_c,temp_cUnitID=451L)
} else {
dbTpPG <- data.frame(subSiteID=NA,measureDate=df[[ii]]$date,temp_c=df[[ii]]$temp_c,temp_cUnitID=df[[ii]]$temp_statistic)
}
# ---- See if we have any predicted values outside the range for which we have data. Off by a day..? Daylight savings? So buffer.
if(sum(obs.eff.df$batchDate + 60*60 < min.date.temp | obs.eff.df$batchDate - 60*60 > max.date.temp) > 0){
obs.eff.df[obs.eff.df$batchDate + 60*60 < min.date.temp | obs.eff.df$batchDate - 60*60 > max.date.temp,]$temp_c <- NA
}
}
} else if(!exists("dbTpPG")){
dbTpPG <- NULL
}
# ---- Next for data from the CAMP db, which could be collected per subSiteID. Reduce to the set of subsiteIDs in this run. This
# ---- is necessary for the Feather, which has many sites that branch into individual subSiteIDs. Do this for each covar.
#dbCov <- dbCovar[[ii]]
# ---- Now, bring in smoothing-spline estimated values. All Units summarized in SQL QryCleanEnvCov.sql.
if(ii == 1){
obs.eff.df <- estCovar(dbDisc,"discharge_cfs",1,traps,obs.eff.df,xwalk,oursitevar)
obs.eff.df <- estCovar(dbDpcm,"waterDepth_cm",1,traps,obs.eff.df,xwalk,oursitevar)
obs.eff.df <- estCovar(dbATpF,"airTemp_F",1,traps,obs.eff.df,xwalk,oursitevar)
obs.eff.df <- estCovar(dbTurb,"turbidity_ntu",1,traps,obs.eff.df,xwalk,oursitevar)
obs.eff.df <- estCovar(dbWVel,"waterVel_fts",1,traps,obs.eff.df,xwalk,oursitevar)
obs.eff.df <- estCovar(dbWTpC,"waterTemp_C",1,traps,obs.eff.df,xwalk,oursitevar)
obs.eff.df <- estCovar(dbLite,"lightPenetration_cm",1,traps,obs.eff.df,xwalk,oursitevar)
#obs.eff.df <- estCovar(dbDOxy,"dissolvedOxygen_mgL",1,traps,obs.eff.df,xwalk,oursitevar)
#obs.eff.df <- estCovar(dbCond,"conductivity_mgL",1,traps,obs.eff.df,xwalk,oursitevar)
#obs.eff.df <- estCovar(dbBaro,"barometer_inHg",1,traps,obs.eff.df,xwalk,oursitevar)
#obs.eff.df <- estCovar(dbWeat,"precipLevel_qual",2,traps,obs.eff.df,xwalk,oursitevar)
}
obs.eff.df <- obs.eff.df[order(obs.eff.df$TrapPositionID,obs.eff.df$batchDate),]
}
# ---- Estimate percQ for RBDD ----
# This will swap out flow_cfs for percQ.
# Note empty dbperQ has Date instead of POSIXct. Don't think this matters.
dbPerQ <- data.frame(subSiteID=integer(),measureDate=as.Date(character()),percQ=numeric(),percQUnitID=integer(),stringsAsFactors=FALSE)
if( site == 42000 ){
dbPerQ <- percQ(hrflow=df[[2]])
# ---- The obs.eff.df batchDate is off by 8 hours; i.e., it's 8 hours earlier than what it should be. This could be due
# ---- to not setting tz = "UTC", which maybe up to now hasn't mattered. I do not know how DST messes with this "8"-hour
# ---- difference. To avoid that headache, merge on a Date type instead.
obs.eff.df$tmpDate <- as.Date(obs.eff.df$batchDate)
obs.eff.df$subSiteID <- as.numeric(levels(obs.eff.df$TrapPositionID))[obs.eff.df$TrapPositionID]
dbPerQ2 <- dbPerQ
names(dbPerQ)[names(dbPerQ) == "measureDate"] <- "batchDate"
dbPerQ$tmpDate <- as.Date(dbPerQ$batchDate)
obs.eff.df <- merge(obs.eff.df,dbPerQ[,c("tmpDate","subSiteID","percQ")],by=c("tmpDate","subSiteID"),all.x=TRUE)
obs.eff.df$tmpDate <- obs.eff.df$subSiteID <- NULL
# ---- Put these back.
names(dbPerQ)[names(dbPerQ) == "batchDate"] <- "measureDate"
names(dbPerQ)[names(dbPerQ) == "TrapPositionID"] <- "subSiteID"
# ---- Adjust the covar variable in obs.eff.df. Note that covar should have flow_cfs, because the RBDD should always
# ---- have flow. Can this break if they request a super short min.date and max.date?
flowPresent <- grepl("flow_cfs",obs.eff.df$covar,fixed=TRUE)
obs.eff.df[flowPresent,]$covar <- gsub("flow_cfs","percQ",obs.eff.df$covar[flowPresent],fixed=TRUE)
# ---- Always true? Remove flow_cfs.
if( "flow_cfs" %in% names(obs.eff.df) ){
obs.eff.df$flow_cfs <- NULL
}
}
# ---- Done ----
return(list(obs.eff.df=obs.eff.df,
dbDisc=dbDisc,
dbDpcm=dbDpcm,
dbATpF=dbATpF,
dbTurb=dbTurb,
dbWVel=dbWVel,
dbWTpC=dbWTpC,
dbLite=dbLite,
dbFlPG=dbFlPG,
dbTpPG=dbTpPG,
dbPerQ=dbPerQ))
}
|
144c4ceb98d0f0a0e88bb603f0839c50df83c883
|
172c1959d13207bf86c9fdcf2b3253ebf6a06451
|
/Scripts/TestRun_10_8_18.R
|
6ae6621004624ac991d5d95a361b59ac136be168
|
[] |
no_license
|
alfencl/CWS_gov
|
62018d032da1993ac46eade5287a8c981a84e0db
|
05b58579aed835dd7d6984a3279c5cf9afb049a7
|
refs/heads/master
| 2020-03-31T19:40:48.349644
| 2018-10-10T05:13:06
| 2018-10-10T05:13:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,276
|
r
|
TestRun_10_8_18.R
|
#Load Libraries
library(tidyverse)
library(pscl)
# Load data
Data <- read_csv("Data/10_8_19_TestRun.csv")
# Create binary public-private variable
Data$OWNERTYPE_KDB18 <- as.factor(Data$OWNERTYPE_KDB18)
summary(Data$OWNERTYPE_KDB18)
# create new variable
Data$OWNTERTYPE_binary <- NA
#Use ifelse statements to aggregate ownership variable into a binary one
Data$OWNTERTYPE_binary <- as.factor(Data$OWNTERTYPE_binary)
Data$OWNTERTYPE_binary <- ifelse(Data$OWNERTYPE_KDB18 == "Private", "Private", NA)
Data$OWNTERTYPE_binary <- ifelse(Data$OWNERTYPE_KDB18 == "Federal Government", "Public", Data$OWNTERTYPE_binary)
Data$OWNTERTYPE_binary <- ifelse(Data$OWNERTYPE_KDB18 == "Local", "Public", Data$OWNTERTYPE_binary)
Data$OWNTERTYPE_binary <- ifelse(Data$OWNERTYPE_KDB18 == "State", "Public", Data$OWNTERTYPE_binary)
Data$OWNTERTYPE_binary <- ifelse(Data$OWNERTYPE_KDB18 == "State Government", "Public", Data$OWNTERTYPE_binary)
#correct variable type and check for accuracy/problems
Data$OWNTERTYPE_binary <- as.factor(Data$OWNTERTYPE_binary)
summary(Data$OWNTERTYPE_binary)
#Create binary compliance variable
Data$HRTW_Compliance_Status <- as.factor(Data$HRTW_Compliance_Status) #make factor
summary(Data$HRTW_Compliance_Status)
#create new variable
Data$Compliance_binary <- NA
#ifelse statements
Data$Compliance_binary <- ifelse(Data$HRTW_Compliance_Status == "IC", 1, Data$Compliance_binary)
Data$Compliance_binary <- ifelse(Data$HRTW_Compliance_Status == "RTC", 1, Data$Compliance_binary)
Data$Compliance_binary <- ifelse(Data$HRTW_Compliance_Status == "OOC", 0, Data$Compliance_binary)
#check
Data$Compliance_binary <- as.factor(Data$Compliance_binary)
summary(Data$Compliance_binary)
#Check/correct variable type for violation count variable, it is an integeer so thats right
summary(Data$Violation_Count)
#check control variable types, source and size
summary(Data$POP_TOTAL) #integer which is good
Data$Source_coded_18 <- as.factor(Data$Source_coded_18)
summary(Data$Source_coded_18)# need to make into two
#make binary source variable
Data$Source_binary <- NA
Data$Source_binary <- ifelse(Data$Source_coded_18 == "GW", "GW", Data$Source_binary)
Data$Source_binary <- ifelse(Data$Source_coded_18 == "GWP", "GW", Data$Source_binary)
Data$Source_binary <- ifelse(Data$Source_coded_18 == "SW", "SW", Data$Source_binary)
Data$Source_binary <- ifelse(Data$Source_coded_18 == "SWP", "SW", Data$Source_binary)
Data$Source_binary <- as.factor(Data$Source_binary)
summary(Data$Source_binary)
# Try GLM for binary compliance against public private binary with controls
logit1 <- glm(Compliance_binary ~ OWNTERTYPE_binary + POP_TOTAL + Source_binary, data = Data, family = binomial(link="logit")) #HMMM saying there is perfect separation in a variable but I'm not sure where.
summary(logit1)
pR2(logit1)
#Try for count (poisson)
logit2 <- glm(Violation_Count ~ OWNTERTYPE_binary + POP_TOTAL + Source_binary, data = Data, family = "poisson")
summary(logit2)
#Subset data into public and private sets
Public <- Data %>% filter(OWNTERTYPE_binary == "Public")
Private <- Data %>% filter(OWNTERTYPE_binary == "Private")
summary(Data$OWNTERTYPE_binary) #yay matches
# Public district types only
summary(Public$`ORG-TYPE_coded_18`)
Public$`ORG-TYPE_coded_18` <- as.factor(Public$`ORG-TYPE_coded_18`)
summary(Public$`ORG-TYPE_coded_18`)
#temporarily make unknowns NAs for now just for test purposes
Public$`ORG-TYPE_coded_18`[Public$`ORG-TYPE_coded_18`== "UNKNOWN"] <- NA
summary(Public$`ORG-TYPE_coded_18`) # some privates are miscoded on ownership. Need to go back into sheet and fix some.
# Try GLM for binary compliance against public management types with controls
logit3 <- glm(Compliance_binary ~ `ORG-TYPE_coded_18` + POP_TOTAL + Source_binary, data = Public, family = binomial(link="logit")) #HMMM saying there is perfect separation in a variable but I'm not sure where.
summary(logit3)
pR2(logit3)
# Problem is I don't know what category to do for reference category... Right now it is city I think.
#Try for count (poisson)
logit4 <- glm(Violation_Count ~ `ORG-TYPE_coded_18` + POP_TOTAL + Source_binary, data = Public, family = "poisson")
summary(logit4)
# significant for some, still numiercally fitting between categories for everything I do which is weird.....
# Private district types only
summary(Private$`ORG-TYPE_coded_18`)
Private$`ORG-TYPE_coded_18` <- as.factor(Private$`ORG-TYPE_coded_18`)
summary(Private$`ORG-TYPE_coded_18`)
#temporarily make unknowns NAs for now just for test purposes
Private$`ORG-TYPE_coded_18`[Private$`ORG-TYPE_coded_18`== "UNKNOWN"] <- NA
summary(Private$`ORG-TYPE_coded_18`) # some publics are miscoded on ownership. Need to go back into sheet and fix some.
# Try GLM for binary compliance against public management types with controls
logit5 <- glm(Compliance_binary ~ `ORG-TYPE_coded_18` + POP_TOTAL + Source_binary, data = Private, family = binomial(link="logit")) #HMMM saying there is perfect separation in a variable but I'm not sure where.
summary(logit5) # nothign...
pR2(logit5)
#Try for count (poisson)
logit6 <- glm(Violation_Count ~ `ORG-TYPE_coded_18` + POP_TOTAL + Source_binary, data = Private, family = "poisson")
summary(logit6) # nothing again
# REgulatory agency
summary(Data$WQ_REG)
Data$WQ_REG <- as.factor(Data$WQ_REG)
summary(Data$WQ_REG)
# Try GLM for reg agency against management type with controls
logit7 <- glm(Compliance_binary ~ WQ_REG + POP_TOTAL + Source_binary, data = Data, family = binomial(link="logit")) #HMMM saying there is perfect separation in a variable but I'm not sure where.
summary(logit7) # nah
pR2(logit7)
#Try for count (poisson)
logit8 <- glm(Violation_Count ~ WQ_REG + POP_TOTAL + Source_binary, data = Data, family = "poisson")
summary(logit8) # sig
# TRY a bigger model with everything...
# Binomial
logit9 <- glm(Compliance_binary ~ `ORG-TYPE_coded_18` + WQ_REG + POP_TOTAL + Source_binary + OWNTERTYPE_binary, data = Data, family = binomial(link="logit")) #HMMM saying there is perfect separation in a variable but I'm not sure where.
summary(logit9) # nah
pR2(logit9)
#Try for count (poisson)
logit10 <- glm(Violation_Count ~ `ORG-TYPE_coded_18` + WQ_REG + POP_TOTAL + Source_binary + OWNTERTYPE_binary, data = Data, family = "poisson")
summary(logit10)
|
ede4ebf22cc71f528e29f60cd4f72d92cc17815d
|
4bb7939d3e0c0386f0f98925ff2e2524a6c6b230
|
/R_code_plot_ecol_var_.r
|
b4246e01cfdfc0e352b6b792dc406d686dc029ea
|
[] |
no_license
|
alessandroperrone5/monitoring_2021
|
eb9b9646feadde070aea8a7b6e7173ef711fa890
|
63fa62bcd17e6187aa67ccfeeed3a5558399f892
|
refs/heads/main
| 2023-02-22T01:03:30.593916
| 2021-01-27T17:07:46
| 2021-01-27T17:07:46
| 309,311,213
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,069
|
r
|
R_code_plot_ecol_var_.r
|
#R code for plotting the relationship among ecological variables
#install.packages is used to install packages
install.packages("sp")
#sp is for spatial analysis
#brackets [""] for import data in R
#library is used to load, attach, list packages
library(sp)
#data is used to recall datasets
data(meuse)
meuse
#let's look inside datasets
#View (with V not v) ... Invoke a spreadsheet-style data viewer on a matrix-like R object. [see table with data]
View(meuse)
#head is used to see the first data
head(meuse)
#exercise: mean of all values
#all the values / number of values
#or...
summary(meuse)
#exercise: plot zinc (y) against cadmium (x)
see an error --> plot(cadmium, zinc)
Error in h(simpleError(msg, call)) :
error in evaluating the argument 'x' in selecting a method for function 'plot': oggetto "cadmium" non trovato
#$ link elements and dataset
not error: plot(meuse$cadmium, meuse$zinc)
#if you attach is not necessary $ --> just once, then you have to #deattach to stop it
attach(meuse)
plot(cadmium,zinc)
#pairs --> scatterplot matrices (to confront all the elements and all their possible relationships)
pairs(meuse)
#####lecture 9/11
#recall dataset : library(...)
library(sp)
data(meuse)
pairs(meuse)
head(meuse)
#cadmium,copper,lead, zinc
#pairs with soil variables (not all dataset)
#from column3 and column6
pairs(meuse[,3:6]) #[,n1:n2] quadratic parentheses for some elements of the dataset #, comma is for introduce new arguments
#to default R consider values in columns
#let's use the name of the columns
pairs(~cadmium+copper+lead+zinc, data=meuse) #tilde ~ : Alt+126, group several objects all together
#let's prettify the graph
#exercise: change the color
pairs(~cadmium+copper+lead+zinc, data=meuse, col="red")
#change the color of single panels by the par() function
#exercise: change the symbol to filled triangles: pch function
pairs(~cadmium+copper+lead+zinc, data=meuse, col="red", pch=17)
#exercise: increase the size of triangles
pairs(~cadmium+copper+lead+zinc, data=meuse, col="red", pch=17, cex=3)
|
18bc7aa2f8dc227da86db9fb0d2bcdc7b4c7b9ce
|
4bd57b8501d4326ecc06c1d1ea499935e1668d95
|
/MASH-dev/DavidSmith/PDG-PfSIM/pdgpfsim.R
|
65512eae3178074636e4300762f0c21c00ec3ef5
|
[] |
no_license
|
aucarter/MASH-Main
|
0a97eac24df1f7e6c4e01ceb4778088b2f00c194
|
d4ea6e89a9f00aa6327bed4762cba66298bb6027
|
refs/heads/master
| 2020-12-07T09:05:52.814249
| 2019-12-12T19:53:24
| 2019-12-12T19:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,852
|
r
|
pdgpfsim.R
|
state = list(
fever = 0, # Fever
severe = 0, # Severe Malaria
P = 0,
MOI = seq(0,10),
iG = -1,
G = na,
C = seq(0,2)
)
now = 0
newInfections = rep(0,5)
oldInfections = rep(0,6)
updateOld6 = function(S){
rmultinom(1, S[1], P6t[,1]) +
rmultinom(1, S[2], P6t[,2]) +
rmultinom(1, S[3], P6t[,3]) +
rmultinom(1, S[4], P6t[,4]) +
rmultinom(1, S[5], P6t[,5]) +
rmultinom(1, S[6], P6t[,6])
}
qq = 4/5
pp = 1/10
Pt = t(matrix(
c(
c(qq, pp, 0, 0, 0),
c(1-qq, (1-pp)*qq, pp, 0, 0),
c(0, (1-pp)*(1-qq), (1-pp)*qq, pp, 0),
c(0, 0, (1-pp)*(1-qq), (1-pp)*qq,0),
c(0, 0, 0, (1-pp)*(1-qq), 1)
)
,5,5))
P6t = Pt[1:4, 1:3]
P6t = rbind(P6t, 0)
P6t = rbind(P6t, 0)
P6t = rbind(P6t, 0)
P6t = cbind(P6t, c(0,P6t[-7,3]))
P6t = cbind(P6t, c(0,P6t[-7,4]))
P6t = cbind(P6t, c(0,P6t[-7,5]))
P6t = cbind(P6t, c(0,0,0,0,0,0,1))
S = matrix(c(1,0,0,0,0,0,0), 7, 1)
Sh = S
plotsh = function(Sh){
mxx = function(x){which.max(x*10^(6:0))}
Pt = 7-apply(Sh, 2, mxx)
Pt = c(Pt[which(Pt>0)],0)
t = 7*(1:length(Pt))
plot(t, Pt, type = "l")
segments(0,2.5, max(t)*7, 2.5)
Pt
}
linesh = function(Sh){
mxx = function(x){which.max(x*10^(6:0))}
Pt = 7-apply(Sh, 2, mxx)
Pt = c(Pt[which(Pt>0)],0)
t = 7*(1:length(Pt))
lines(t, Pt, type = "l")
segments(0,2.5, max(t)*7, 2.5)
Pt
}
meanInfection = function(S, plotit=TRUE){
Sh=S
while(sum(S[-7]>.001)){
S=P6t%*%S
Sh=cbind(Sh,S)
}
t=1:dim(Sh)[2]
plot(7*t, 1-Sh[7,], type = "l", lty=2, xaxt="n")
patent = colSums(Sh[1:3,])
lines(7*t, patent)
ix = min(which(patent<0.5))
segments(0, 0.5, 7*t[ix], 0.5, lty =2)
segments(7*t[ix], 0, 7*t[ix], 0.5, lty=2)
axis(1, 7*t[ix], 7*t[ix])
vv = 0.2
ix = min(which(patent<vv))
segments(0, vv, 7*t[ix], vv, lty =2)
segments(7*t[ix], 0, 7*t[ix], vv, lty=2)
axis(1, 7*t[ix], 7*t[ix])
vv = 0.1
ix = min(which(patent<vv))
segments(0, vv, 7*t[ix], vv, lty =2)
segments(7*t[ix], 0, 7*t[ix], vv, lty=2)
axis(1, 7*t[ix], 7*t[ix])
vv = 0.05
ix = min(which(patent<vv))
segments(0, vv, 7*t[ix], vv, lty =2)
segments(7*t[ix], 0, 7*t[ix], vv, lty=2)
axis(1, 7*t[ix], 7*t[ix])
}
oneInfection = function(S, plotit="plot",i=0){
Sh=S
while(sum(S[-7])>0){
S = updateOld6(S)
Sh = cbind(Sh, S)
}
mxx = function(x){which.max(x*10^(6:0))}
Pt = 7-apply(Sh, 2, mxx)
if(plotit=="plot") plotsh(Pt)
if(plotit=="lines") linesh(Pt,i)
Pt
}
plotsh = function(Pt){
t = 7*(1:length(Pt))
plot(t, Pt, type = "l", xlim = c(0,700))
segments(0,2.5, max(t)*7, 2.5, lty=3)
}
linesh = function(Pt,i=0){
t = 7*(1:length(Pt))
lines(t, Pt, type = "l", xlim = c(0, 700), col = i)
}
par(mfrow = c(2,1))
S = c(2,0,0,0,0,0,0)
meanInfection(S)
Pt=oneInfection(S, "plot")
for(i in 1:10)
oneInfection(S, "lines", i)
|
99df2a1b6dc4dc44ffd0de6d7c04cfc8ceaa90c1
|
066e60e4c6730a945fcba528328fc646945beaa3
|
/tests/testthat/test-01-parse.R
|
98aa8bee4c1a11887312375c34c1d61c0f34515d
|
[
"CC0-1.0"
] |
permissive
|
appling/unitted
|
0b59a79a949e7405cb443306216d7038a30c29ad
|
d1f11723cd274297f44adbb5d1af4990eb0cdf06
|
refs/heads/master
| 2021-01-17T07:34:17.966050
| 2017-07-21T22:38:37
| 2017-07-21T22:38:37
| 18,263,678
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,280
|
r
|
test-01-parse.R
|
context("parse")
knownbug <- function(expr, notes) invisible(NULL)
#### unitted:::parse_units ####
test_that("unitted:::parse_units works", {
# well-formed unit strings using only the space delimiter
expect_that(unitted:::parse_units("kg ha^-1 yr^-1"), equals(list(data.frame(Unit=c("kg","ha","yr"),Power=c(1,-1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("apples^-1 oranges^2.5"), equals(list(data.frame(Unit=c("apples","oranges"),Power=c(-1,2.5),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("mg dm^-1 dm^-2 ug mg^-1"), equals(list(data.frame(Unit=c("mg","dm","dm","ug","mg"),Power=c(1,-1,-2,1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("kg_C^1 ha^-1"), equals(list(data.frame(Unit=c("kg_C","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
# empty unit strings - NA and "" are treated the same
expect_that(unitted:::parse_units(""), equals(list(data.frame(Unit=character(),Power=numeric(),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units(NA), equals(list(data.frame(Unit=character(),Power=numeric(),stringsAsFactors=FALSE))))
# vectors of unit strings
expect_that(unitted:::parse_units(c("","mg dm^-1 dm^-2 ug mg^-1",NA)), equals(list(
data.frame(Unit=character(),Power=numeric(),stringsAsFactors=FALSE),
data.frame(Unit=c("mg","dm","dm","ug","mg"),Power=c(1,-1,-2,1,-1),stringsAsFactors=FALSE),
data.frame(Unit=character(),Power=numeric(),stringsAsFactors=FALSE))))
# delimited unit strings
expect_that(unitted:::parse_units("|kg C|^1 ha^-1"), equals(list(data.frame(Unit=c("kg C","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("|kg C|^1 ha^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg C","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("|kg C|^1 ha^-1",delimiter="?"), equals(list(data.frame(Unit=c("|kg","C|","ha"),Power=c(1,1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("?kg C?^1 ha^-1",delimiter="?"), equals(list(data.frame(Unit=c("kg C","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("$kg C$^1 ha^-1",delimiter="$"), equals(list(data.frame(Unit=c("kg C","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
# unit strings with tabs, newlines, special characters, and delimiters
expect_that(unitted:::parse_units(" mg dm^-1 dm^-2 ug mg^-1 "), equals(list(data.frame(Unit=c("mg","dm","dm","ug","mg"),Power=c(1,-1,-2,1,-1),stringsAsFactors=FALSE))), info="extra spaces")
expect_that(unitted:::parse_units("mg \t dug\nmg^-1 \t\n\t"), equals(list(data.frame(Unit=c("mg","dug","mg"),Power=c(1,1,-1),stringsAsFactors=FALSE))), info="space and tab")
expect_that(unitted:::parse_units("|kg \t\nC |^1 ha^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg \t\nC ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("|kg ?_+.({[*C |^1 ha^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg ?_+.({[*C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("|kg ?_+.({[*\\C |^1 ha^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg ?_+.({[*\\C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("Xkg ?_+.({[*\\C X^1 ha^-1",delimiter="X"), equals(list(data.frame(Unit=c("kg ?_+.({[*\\C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
# extra scary unit strings - ^ and \\ were problems at one point
expect_that(unitted:::parse_units("|kg ?_+^.({[*\\C |^1 ha^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg ?_+^.({[*\\C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("Xkg ?_+^.({[*\\C X^1 ha^-1",delimiter="X"), equals(list(data.frame(Unit=c("kg ?_+^.({[*\\C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("|kg ?_+^.({[*C |^1 ha^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg ?_+^.({[*C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("Xkg ?_+^.({[*C X^1 ha^-1",delimiter="X"), equals(list(data.frame(Unit=c("kg ?_+^.({[*C ","ha"),Power=c(1,-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("|kg C^1 ha|^-1",delimiter="|"), equals(list(data.frame(Unit=c("kg C^1 ha"),Power=c(-1),stringsAsFactors=FALSE))))
expect_that(unitted:::parse_units("Xkg C^1 haX^-1",delimiter="X"), equals(list(data.frame(Unit=c("kg C^1 ha"),Power=c(-1),stringsAsFactors=FALSE))))
# still problematic
knownbug(expect_error(unitted:::parse_units("^and")), "breaks - doesn't notice")
knownbug(expect_error(unitted:::parse_units("or^and")), "'or^and' shouldn't be parsed into 'or'")
# badly formed numeric substrings
knownbug(expect_that(unitted:::parse_units("m^-1/2"), throws_error()), "breaks; bad parse not caught")
expect_that(unitted:::parse_units("m^-(1/2)"), throws_error("Invalid number"))
knownbug(expect_that(unitted:::parse_units("m^(-1/2)"), throws_error("Invalid number")), "breaks; bad parse not caught")
})
#### unitted:::merge_units ####
test_that("unitted:::merge_units works", {
# neither unitted:::parse_units nor unitted:::merge_units does any reordering, so we should get
# out what we put in, with possible exceptions for 1. number formatting and 2.
# delimiter removal
# straight translation:
expect_that(unitted:::merge_units(unitted:::parse_units("kg ha^-1 yr^-1")), equals("kg ha^-1 yr^-1"))
expect_that(unitted:::merge_units(unitted:::parse_units("apples^-1 oranges^2.5")), equals("apples^-1 oranges^2.5"))
expect_that(unitted:::merge_units(unitted:::parse_units("mg dm^-1 dm^-2 ug mg^-1")), equals("mg dm^-1 dm^-2 ug mg^-1"))
# number formatting exception - dropping ^1
expect_that(unitted:::merge_units(unitted:::parse_units("kg_C^1 ha^-1")), equals("kg_C ha^-1"))
expect_that(unitted:::merge_units(unitted:::parse_units("kg_C^1.0000000000000001 ha^-.99999999999999999")), equals("kg_C ha^-1"))
expect_that(unitted:::merge_units(unitted:::parse_units("kg_C^1.1 ha^-.99")), equals("kg_C^1.1 ha^-0.99"))
# delimiter removal exception - delimiters get dropped in parsing
expect_that(unitted:::merge_units(unitted:::parse_units("|kg_C|^1 ha^-1")), equals("kg_C ha^-1"))
expect_that(unitted:::merge_units(unitted:::parse_units("|kg SO_4^2-|^1 ha^-1"), rule="never"), equals("kg SO_4^2- ha^-1"))
# empty unit strings - NA and "" are treated the same
expect_that(unitted:::merge_units(unitted:::parse_units("")), equals(""))
expect_that(unitted:::merge_units(unitted:::parse_units(NA)), equals(""))
# vectors of unit strings
expect_that(unitted:::merge_units(unitted:::parse_units(c("","mg dm^-1 dm^-2 ug mg^-1",NA))), equals(c("","mg dm^-1 dm^-2 ug mg^-1","")))
# potentially ambiguous strings wtih delimiters
expect_equal(unitted:::parse_units(c("|hi there| Wei","|or^1 and|", "Joe")), unitted:::parse_units(unitted:::merge_units(unitted:::parse_units(c("|hi there| Wei","|or^1 and|", "Joe")), "|", "disambiguate")))
expect_equal(unitted:::parse_units(c("|hi there| Wei","|or^1 and|", "Joe")), unitted:::parse_units(unitted:::merge_units(unitted:::parse_units(c("|hi there| Wei","|or^1 and|", "Joe")), "|", "always")))
expect_equal(unitted:::parse_units(c("hi there Wei","or and", "Joe")), unitted:::parse_units(unitted:::merge_units(unitted:::parse_units(c("|hi there| Wei","|or^1 and|", "Joe")), "|", "never")))
expect_equal(unitted:::merge_units(unitted:::parse_units(c("|hi there| Wei","|or^1 and|", "Joe")),"*"), c("*hi there* Wei", "*or^1 and*", "Joe"))
})
#### unitted:::simplify_units ####
test_that("unitted:::simplify_units works", {
# since we've just tested unitted:::parse_units and unitted:::merge_units, let's use them here as
# if they're [probably] functioning correctly.
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units("kg ha^-1 yr^-1"))), equals("kg ha^-1 yr^-1"))
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units("kg kg ha^-1 kg^-2 yr^-1"))), equals("ha^-1 yr^-1"))
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units("kg kg ha^-1 kg^-2 yr^-1"))), equals("ha^-1 yr^-1"))
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units("ha^-1 ha^2.1 kg kg ha^-1 kg^-2 yr^-1"))), equals("ha^0.1 yr^-1"))
# empty unit strings - NA and "" are treated the same
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units(""))), equals(""))
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units(NA))), equals(""))
# vectors of unit strings
expect_that(unitted:::merge_units(unitted:::simplify_units(unitted:::parse_units(c("uni^3 tt^2 ed^1", "", NA, "hex^2 a^3 gon^4")))), equals(c("uni^3 tt^2 ed", "", "", "hex^2 a^3 gon^4")))
})
#### unitted:::sort_units ####
test_that("unitted:::sort_units works", {
# since we've just tested unitted:::parse_units and unitted:::merge_units, let's use them here as
# if they're [probably] functioning correctly.
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units("kg ha^-1 yr^-1"))), equals("kg ha^-1 yr^-1"))
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units("kg ha^-1 kg^-2 yr^-1 kg"))), equals("kg kg ha^-1 kg^-2 yr^-1"))
# gives units with positive powers first, sorted alphabetically, and then units with negative powers, also sorted alphabetically
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units("a^-1 b^-2 c^-1 d^9 e^1 f^3"))), equals("d^9 e f^3 a^-1 b^-2 c^-1"))
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units("b^-1 c^-2 a^-1 f^9 e^1 d^3"))), equals("d^3 e f^9 a^-1 b^-1 c^-2"))
# empty unit strings - NA and "" are treated the same
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units(""))), equals(""))
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units(NA))), equals(""))
# vectors of unit strings
expect_that(unitted:::merge_units(unitted:::sort_units(unitted:::parse_units(c("uni^3 tt^2 ed^1", "", NA, "hex^2 a^3 gon^4")))), equals(c("ed tt^2 uni^3", "", "", "a^3 gon^4 hex^2")))
})
|
312d174b2af2dbc73892f60bfce519ed54af443d
|
ab7841ca6d0244120ea6ce8022a56ff13ac23ca1
|
/man/statTest.Rd
|
60b38a643da0252e321e4eb16ff62796e2271ed6
|
[] |
no_license
|
Huang-lab/oppti
|
3617dbaef74e590ab94578645e4916cdfff0f6e0
|
b171c64707483c110c76c3193ab1045b59fdb51c
|
refs/heads/master
| 2023-04-10T02:09:57.773679
| 2023-03-20T10:13:53
| 2023-03-20T10:13:53
| 197,084,440
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,687
|
rd
|
statTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze.R
\name{statTest}
\alias{statTest}
\title{Analyze dysregulation significance}
\usage{
statTest(dat, dat.imp, marker.proc.list = NULL, pval.insig = 0.2)
}
\arguments{
\item{dat}{an object of log2-normalized protein (or gene) expressions,
containing markers in rows and samples in columns.}
\item{dat.imp}{the imputed data that putatively represents the expressions
of the markers in the (matched) normal states.}
\item{marker.proc.list}{character array, the row names of the data to be
processed for dysregulation significance.}
\item{pval.insig}{p-value threshold to determine spurious (null)
dysregulation events.}
}
\value{
each marker's p-value of the statistical significance between its
observed vs imputed values computed by the KS test.
ranked p-values (KS test) of the significant markers, which are
lower than pval.insig.
ranked significantly dysregulated markers with p-values lower than
pval.insig.
ranked p-values (KS test) of the insignificant markers, which are
greater than pval.insig.
ranked insignificantly dysregulated markers (spurious
dysregulations) with p-values greater than pval.insig.
}
\description{
Rank-order markers by the significance of deviation of the
observed expressions from the (matched) imputed expressions based on the
Kolmogorov-Smirnov (KS) test.
}
\examples{
set.seed(1)
dat = setNames(as.data.frame(matrix(runif(10*10),10,10),
row.names = paste('marker',1:10,sep='')), paste('sample',1:10,sep=''))
dat.imp = artImpute(dat, ku=6)
result = statTest(dat, dat.imp) # the dysregulations on marker4 is
# statistically significant with p-value 0.05244755.
}
|
f393071fece4962e0353b7443d2c0474ca5d3372
|
c3c9324afc6873b0de45a3cffcdfcdb884163288
|
/fiddles/fiddle_bayes/cube/kyoobe-etc.r
|
7dc8425213da1899d1b16ab92a908550320818b8
|
[] |
no_license
|
lefft/boosh
|
273c44b04f65f64937fc319d5f4542a0b79daf4a
|
e4c0665ab1db9c7b47ce4085bf070fed9d8f37ea
|
refs/heads/master
| 2020-03-28T00:13:11.754076
| 2018-01-04T21:16:31
| 2018-01-04T21:16:31
| 64,482,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,448
|
r
|
kyoobe-etc.r
|
# cube/weighted coin sim
# pz values from book chapter c(.25, .75)
probz <- c(.1, .25, .5, .75, .9)
stratz <- c("redo", "init")
runz <- 10000
for (y in seq_along(stratz)){
for (x in seq_along(probz)){
pz <- c(probz[x], 1 - probz[x])
cz <- 100
run <- function(
coins=paste0("coin", c(rep(".9", pz[1]*cz), rep(".1", pz[2]*cz))),
vals=c("heads","tails"),
drawtype="init"
){
draw_init <- sample(coins, size=1)
draw_redo <- sample(coins, size=1)
draw <- ifelse(drawtype=="init", draw_init, draw_redo)
if (draw=="coin.9"){
flip <- sample(vals, size=1, prob=c(.9,.1))
} else {
flip <- sample(vals, size=1, prob=c(.1,.9))
}
return(c(draw_init=draw_init,draw_redo=draw_redo,flip=flip,drawtype=drawtype))
}
sim <- function(numruns){
dimnames <- list(NULL, c("run","draw","redraw","flip","drawtype"))
container_init <- matrix(NA, nrow=numruns, ncol=5, dimnames=dimnames)
container_redo <- matrix(NA, nrow=numruns, ncol=5, dimnames=dimnames)
for (x in seq_len(numruns)){
container_init[x, ] <- c(x, run(drawtype="init"))
container_redo[x, ] <- c(x, run(drawtype="redo"))
}
out <- rbind(container_init, container_redo)
out <- data.frame(out)
return(out)
}
res <- sim(numruns=runz)
res[sample(seq_len(nrow(res)), size=5), ]
library("dplyr")
redo <- res %>% filter(drawtype=="redo")
init <- res %>% filter(drawtype=="init")
res %>% group_by(drawtype) %>% summarize(
prop_draw.9 = sum(draw=="coin.9") / length(draw),
prop_heads = sum(flip=="heads") / length(flip)
) %>% data.frame()
(res %>% group_by(drawtype) %>% summarize(
# prob of {heads, tails}
pHead = sum(flip=="heads") / length(flip),
pTail = sum(flip=="tails") / length(flip),
# prob of drawing {.9 coin, .1 coin}
p.9 = sum(draw=="coin.9") / length(draw),
p.1 = sum(draw=="coin.1") / length(draw),
# prob of having drawn {.9, .1} coin given {heads, tails}
p.9gH = sum(draw=="coin.9" & flip=="heads") / sum(flip=="heads"),
p.1gH = sum(draw=="coin.1" & flip=="heads") / sum(flip=="heads"),
p.9gT = sum(draw=="coin.9" & flip=="tails") / sum(flip=="tails"),
p.1gT = sum(draw=="coin.1" & flip=="tails") / sum(flip=="tails"),
# joint prob of {heads, tails} and drawing {.9, .1} coin
jH.9 = p.9gH * pHead, jH.1 = p.1gH * pHead,
jT.9 = p.9gT * pTail, jT.1 = p.1gT * pTail
) %>% mutate_if(is.numeric, round, digits=3) %>% data.frame() -> boosh)
# joint prob p(flip==h, draw==.9):
# p(draw==.9 | flip==h) * p(flip==h)
prop.table(table(redo$flip, redo$draw, dnn=c("flip","coin")))
prop.table(table(init$flip, init$draw, dnn=c("flip","coin")))
library("reshape2")
booshlong <- melt(boosh, id.vars=1:5)
booshlong$probtype <- ifelse(
grepl("j", booshlong$variable), "joint", "conditional"
)
library("pryr")
mem <- mem_used()
head(booshlong, 4)
library("ggplot2")
ggplot(booshlong[booshlong$drawtype==stratz[y], ],
aes(x=variable, y=value, color=probtype)) +
geom_bar(stat="identity", position="dodge", fill="#e8e7de") +
scale_y_continuous(limits=c(0,1)) +
labs(
x="", y="prob",
title=paste0("with .9coin prob ", pz[1], ", and ", cz, " coins"),
subtitle=paste0("using strategy ", stratz[y], ", ",
runz, " runs", " (mem used: ", mem, ")"),
caption=paste0("pHead = ", mean(boosh$pHead), ", p.9 = ", mean(boosh$p.1))
)
ggsave(paste0("plots_still_janky/", stratz[y], x, " prob ", pz[1], "for .9 ", " and coins ", cz, ".pdf"), device="pdf")
}
}
|
6e7fb7a783a5a65d205187119ed26ef1cb5552d7
|
f355149c7fba0fe44a933778a5f78d4bbc275caa
|
/ALERTING-TRENDS.R
|
b37f49fa7ceb0fb2ab86ad8bc090b85ee8887354
|
[] |
no_license
|
Abdulmalik-Adeleke/R
|
8b4b4667f931764ec57c99e89fb8480cf37ce7b3
|
3090ddb129d8fa70f158d215f29fd95a4b7b7c79
|
refs/heads/main
| 2023-05-30T19:42:51.352335
| 2021-07-04T23:07:26
| 2021-07-04T23:07:26
| 382,964,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,752
|
r
|
ALERTING-TRENDS.R
|
x<- array(c(2, 3, 1, 19, 18, 12, 17, 18, 22, 32, 20, 11, 8, 5))
length(x)
new <- 1
for(i in x)
{
new <- new + 1
# newadded<- x[new]
if (i > x[new])
{
thirdvar <- new + 1
if(newadded > x[thirdvar])
{
cat("downward trend starting at ",i, "\n")
}
else{
}
}
else if (i < newadded)
{
anothernew <- new + 1
thirdvar2 <- x[anothernew]
if(newadded < thirdvar2)
{
cat("upward trend at ",i, "\n")
}
else
}
}
####################
#THIS CODE CHECKS TO SEE IF 3 CONSECUTIVE NUMBER ARE ASC OR DESC
#THE DEFAULT IS 3 BUT ANY NUMBER CAN BE SET
###################
x<- array(c(2, 3, 1, 19, 18, 12, 17, 18, 22, 32, 20, 11, 8, 5))
new <- 1
for(i in x)
{
new <- new + 1
if (i > x[new])
{
count <- 0
while (count < 2) ##in a function '1' is an input parameter like how many elements to check before a notification when a trend shoould be alerted
{
increment <- new + 2
if (x[new] > x[increment])
{
cat("Dowward trend started at ", i ," , ",x[new]," , " ,x[increment],"\n")
}
else
{
}
count <- count + 1
}
}
else if (i < x[new])
{
count <- 0
while (count< 2) ##in a function '1' is an input parameter like how many elements to check before a notification when a trend shoould be alerted
{
increment <- new + 2
if (x[new] < x[increment])
{
cat(" Upward trend started at ",i," , ",x[new]," , " ,x[increment],"\n")
}
else
{
}
count = count + 1
}
}
}
|
fbdf4c0a09f9a8290fdfb9bacfda66c7e0c57fcc
|
e4da77190b6a4bb6f1da8ece129e2b82aa8513aa
|
/section4/src/19781782165460_04_code01_stem_draft1.R
|
9d675e77df68bc16f7dedda1be1a6b5b052cdc5a
|
[] |
no_license
|
rsanchezs/RGraphic
|
922ace26905d1d7a542ee028e14a9007752e48b8
|
f92303086543160fe67f536b331eb5d66390b246
|
refs/heads/master
| 2020-12-24T21:10:54.151452
| 2016-05-19T14:57:25
| 2016-05-19T14:57:25
| 58,759,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 664
|
r
|
19781782165460_04_code01_stem_draft1.R
|
#1 Stem-and-leaf plot
ChickWeight
# Weight versus age of chicks on different diets
names(ChickWeight)
# body weight of the chick (weight)
# number of days since birth when the measurement was made (time)
# unique identifier (chick)
# experimental diet (diet)
wdt1 = ChickWeight$weight[ChickWeight$Diet==1]
wdt2 = ChickWeight$weight[ChickWeight$Diet==2]
stem(wdt1)
help(stem)
stem(wdt1, scale = 2)
#controls the plot length
stem(wdt1, scale = 2, width = 1)
# desired width of plot
install.packages("aplpack")
# Another Plot PACKage
require("aplpack")
stem.leaf(wdt1)
stem.leaf.backback(wdt1, wdt2)
stem.leaf.backback(wdt1, wdt2, back.to.back = FALSE)
|
cdfe3c122b139670e84af50bd9e77e8ae711ae53
|
2d539479a4301f86bf7fdc498078b166e3b8a06d
|
/callSimpleCJS.R
|
3a7d750e6dc857ca4a1db16aca3ca6b1afcd51f5
|
[] |
no_license
|
evanchildress/simpleCJS
|
cb028f7c224770138a3e55b471e03ce766a71a52
|
519971dde2deae70886ab55d96c8e547a43372d4
|
refs/heads/master
| 2021-01-17T08:11:01.932935
| 2016-07-05T17:38:09
| 2016-07-05T17:38:09
| 33,996,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,634
|
r
|
callSimpleCJS.R
|
library(plyr)
library(rjags)
library(ggplot2)
library(abind)
rjags::load.module("dic")
dMData$length[dMData$tagNumberCH=='1BF1FF6207' & dMData$season == 3 & dMData$year == 2005] <- NA
dMData$length[dMData$tagNumberCH=='1BF1FF6521' & dMData$season == 3 & dMData$year == 2005] <- NA
dMData$length[dMData$tagNumberCH=='1BF18CE7ED' & dMData$season == 2 & dMData$year == 2006] <- NA
dMData$length[dMData$tagNumberCH=='1BF20FF1B9' & dMData$season == 3 & dMData$year == 2005] <- NA
dMData$length[dMData$tagNumberCH=='257C67CA48' ] <- NA
dMData$length[dMData$tagNumberCH=='1BF20EB7A4' & dMData$season == 4 & dMData$year == 2008] <- NA
#dMData$riverOrdered <- factor(dMData$river,levels=c('WEST BROOK','WB JIMMY','WB MITCHELL','WB OBEAR'), ordered=T)
# means for standardizing
#####################################################################
# stdBySeasonRiver <- ddply( dMData, .(riverOrdered,riverN,season), summarise,
# lengthMean=mean(length, na.rm=TRUE),
# lengthSd=sd(length, na.rm=TRUE),
# lengthLo = quantile(length,c(0.025), na.rm=TRUE),
# lengthHi = quantile(length,c(0.975), na.rm=TRUE),
# tempMean=mean(fullMeanT, na.rm=TRUE),
# tempMeanP=mean(temperatureForP, na.rm=TRUE),
# tempSd=sd(fullMeanT, na.rm=TRUE),
# tempSdP=sd(temperatureForP, na.rm=TRUE),
# tempLo = quantile(fullMeanT,c(0.025), na.rm=TRUE),
# tempHi = quantile(fullMeanT,c(0.975), na.rm=TRUE),
# flowMean=mean(fullMeanD, na.rm=TRUE),
# flowSd=sd(fullMeanD, na.rm=TRUE),
# dischMeanP=mean(dischargeForP,na.rm=T),
# dischSdP=sd(dischargeForP,na.rm=T),
# flowLo = quantile(fullMeanD,c(0.025), na.rm=TRUE),
# flowHi = quantile(fullMeanD,c(0.975), na.rm=TRUE) )
# ############# To get rid of NA Rivers
# stdBySeasonRiver<-stdBySeasonRiver[!is.na(stdBySeasonRiver$riverN),]
#
# #####################################################################
# stdBySeason <- ddply( dMData, .(season), summarise,
# lengthMean=mean(length, na.rm=TRUE),
# lengthSd=sd(length, na.rm=TRUE),
# lengthLo = quantile(length,c(0.025), na.rm=TRUE),
# lengthHi = quantile(length,c(0.975), na.rm=TRUE),
# tempMean=mean(fullMeanT, na.rm=TRUE),
# tempMeanP=mean(temperatureForP, na.rm=TRUE),
# tempSd=sd(fullMeanT, na.rm=TRUE),
# tempSdP=sd(temperatureForP, na.rm=TRUE),
# tempLo = quantile(fullMeanT,c(0.025), na.rm=TRUE),
# tempHi = quantile(fullMeanT,c(0.975), na.rm=TRUE),
# flowMean=mean(fullMeanD, na.rm=TRUE),
# flowSd=sd(fullMeanD, na.rm=TRUE),
# dischMeanP=mean(dischargeForP,na.rm=T),
# dischSdP=sd(dischargeForP,na.rm=T),
# flowLo = quantile(fullMeanD,c(0.025), na.rm=TRUE),
# flowHi = quantile(fullMeanD,c(0.975), na.rm=TRUE) )
#
# # standardize by river - for age0 fall lengths
# stdByRiver <- ddply( dMData, .(riverOrdered,riverN), summarise,
# lengthSd0 = sd(subset( length, age == 0 & season == 3 ), na.rm=TRUE),
# lengthMean0 = mean(subset( length, age == 0 & season == 3 ), na.rm=TRUE) )
#
# stdByRiver <- stdByRiver[!is.na(stdByRiver$riverN),]
# stdByRiver$river <- as.numeric(stdByRiver$riverOrdered)
#stdBySeasonRiver<-rbind(stdBySeasonRiver,c('zRiv1','0',rep(NA,ncol(stdBySeasonRiver)-2)))
#####
# # fdDATA is flood and drought frequencies and durations
# fdDATA$year <- as.numeric( fdDATA$year )
# fdDATA$year2 <- fdDATA$year
# fdDATA$year <- fdDATA$year-min(fdDATA$year) + 1
#
# floodDur <- matrix(0,max(fdDATA$season),max(fdDATA$year))
# droughtDur <- matrix(0,max(fdDATA$season),max(fdDATA$year))
# floodFreq <- matrix(0,max(fdDATA$season),max(fdDATA$year))
# for ( i in 1:nrow(fdDATA) ){
# floodDur[fdDATA$season[i],fdDATA$year[i]] <- fdDATA$floodDur[i]
# droughtDur[fdDATA$season[i],fdDATA$year[i]] <- fdDATA$droughtDur[i]
# floodFreq[fdDATA$season[i],fdDATA$year[i]] <- fdDATA$floodFreq[i]
#
# }
#####
# function to add dummy rows and columns for zRiv=1
addRowColMeans <- function(m){
m <- cbind( rowMeans(m),m )
m <- rbind( colMeans(m),m )
return ( m )
}
# function to add dummy columns for zRiv=1
addColMeans <- function(m){
m <- cbind( rowMeans(m),m )
return ( m )
}
# tempForN<- array(NA,dim=c(4,5,max(dMData$year-min(dMData$year) + 1)))
# for(s in 1:4){
# for(y in 1:max(dMData$year-min(dMData$year) + 1)){
# tempForN[s,1,y]<-(stdBySeason$tempMean[s]- stdBySeason$tempMean[s] ) / stdBySeason$tempSd[ s ]
# for(r in 1:4){
# tempForN[s,r+1,y]<-(mean(dMData$fullMeanT[dMData$season==s&as.numeric(dMData$riverOrdered)==r&(dMData$year-min(dMData$year) + 1)==y],na.rm=T)
# - stdBySeason$tempMean[ s] ) / stdBySeason$tempSd[ s ]
# if(tempForN[s,r+1,y]=='NaN')tempForN[s,r+1,y]<-(stdBySeason$tempMean[s]- stdBySeason$tempMean[ s] ) / stdBySeason$tempSd[ s ]
# }
# }
# }
# flowForN<- array(NA,dim=c(4,5,max(dMData$year-min(dMData$year) + 1)))
# for(s in 1:4){
# for(y in 1:max(dMData$year-min(dMData$year) + 1)){
# flowForN[s,1,y]<-(stdBySeason$flowMean[s]- stdBySeason$flowMean[s] ) / stdBySeason$flowSd[s]
# for(r in 1:4){
# flowForN[s,r+1,y]<-(mean(dMData$fullMeanD[dMData$season==s&as.numeric(dMData$riverOrdered)==r&(dMData$year-min(dMData$year) + 1)==y],na.rm=T)
# - stdBySeason$flowMean[s] ) / stdBySeason$flowSd[s]
# if(flowForN[s,r+1,y]=='NaN')flowForN[s,r+1,y]<-(stdBySeason$flowMean[s]- stdBySeason$flowMean[s] ) / stdBySeason$flowSd[s]
# }
# }
# }
knownZ<-function(sN, first, last){#, river){
z.iv <- array(NA, dim=length(first))
z.iv[(sN>first)&(sN<=(last))] <- 1
return(z.iv)
}
############ Predictors that are in a matrix have season in rows and river in columns
d <- within(
data = list(),
expr = {
encDATA = as.numeric(dMData$enc) #$msEnc
riverDATA = dMData$riverN #-3
nRivers = length(unique(dMData$riverN))-1 #may need to add one for unobs
#lengthDATA = dMData$length
#availableDATA = dMData$available01
#ind = as.numeric(factor(dMData$tag))
# For standardizing length
# lengthMean = addColMeans( matrix(stdBySeasonRiver$lengthMean,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# lengthSd = addColMeans( matrix(stdBySeasonRiver$lengthSd,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
#
# lengthMean0 = stdByRiver$lengthMean0
# lengthSd0 = stdByRiver$lengthSd0
# environmental covariates pertaining to intervals. These are
# covariates of growth and survival
# For standardizing env predictors of growth and surv
# tempMean = addColMeans( matrix(stdBySeasonRiver$tempMean,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# tempSd = addColMeans( matrix(stdBySeasonRiver$tempSd,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# flowMean = addColMeans( matrix(stdBySeasonRiver$flowMean,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# flowSd = addColMeans( matrix(stdBySeasonRiver$flowSd,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
## Predictors of phi for correcting N1 where countForN ==0
# tempForN = tempForN
# flowForN = flowForN
# not standardizing by season,river because on NAs in river
# tempDATA = ( as.numeric(dMData$fullMeanT) - stdBySeason$tempMean[ as.numeric(dMData$season)] ) / stdBySeason$tempSd[ as.numeric(dMData$season) ]
# flowDATA = ( as.numeric(dMData$fullMeanD) - stdBySeason$flowMean[ as.numeric(dMData$season)] ) / stdBySeason$flowSd[ as.numeric(dMData$season) ]
# emPermNA, used to censor likelihood for permanent emigrants
# 1 on line before last observation with subsequent bottom of the study site antenna hit. 0's before and after if em, NAs otherwise
# trying emPerm without the NAs
#emPermDATA = dMData$emPerm
#intervalDays = as.numeric(dMData$fullMeanIntLen )
# Environmental covariates for p
#flowP = as.numeric(dMData$dischargeForP)
#temperatureP = as.numeric(dMData$temperatureForP)
#For standardizing env predictors of p
# flowMeanP = addRowColMeans( matrix(stdBySeasonRiver$dischMeanP,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# flowSdP = addRowColMeans( matrix(stdBySeasonRiver$dischSdP,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# tempMeanP = addRowColMeans( matrix(stdBySeasonRiver$tempMeanP,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# tempSdP = addRowColMeans( matrix(stdBySeasonRiver$tempSdP,nrow=length(unique(dMData$season)),ncol=length(unique(as.numeric(dMData$riverN)-0))-1) )
# , growthSd = sd(((dMData$lagLength - dMData$length)/(as.numeric(dMData$intervalLength)))*365/4, na.rm=TRUE)
######## NEVER!!!! ######### gr = (dMData$lagLength - dMData$length)/(as.numeric(dMData$intervalLength))
# indexing of the input and state vectors
year = dMData$year-min(dMData$year) + 1
nYears = max(dMData$year)-min(dMData$year)+1
season = as.numeric(as.character(dMData$season))
nAllRows = nrow(dMData)
nFirstObsRows = evalList$nFirstObsRows
firstObsRows = evalList$firstObsRows
#nOcc = length(unique(dMData$sampleNum))
#occ = dMData$sampleNum-min(dMData$sampleNum)-1
nEvalRows = evalList$nEvalRows # rows that will matter if we start using JS, and
evalRows = evalList$evalRows # that matter now for the growth model
z = dMData[,knownZ(sampleNum,first,last)]
#lastPossibleRows = subset( 1:nrow(dMData),dMData$lastAIS==dMData$ageInSamples ) # need to put this in makedMData
#nLastPossibleRows = evalList$nFirstObsRows
#lastObsRows = evalList$lastObsRows
#nLastObsRows = evalList$nLastObsRows
#lastRows = lastPossibleRows
#nLastRows = nLastPossibleRows
#nOut = evalList$nEvalRows # evalRows to output for each trace
#create variables that hold information on counts - data held in statsForN (made in makeDMData.R - based on pheno2Long, so has all cohorts. need to throw away years before dMData's first cohort)
#minYear <- min(dMData$year)
#firstYearIndex <- minYear-statsForN$minYear + 1
# countForN has dummy river 1 in it
#countForN <- statsForN$countForN[,firstYearIndex:dim(statsForN$countForN)[2],]
#meanForN <- statsForN$meanForN
#sdForN <- statsForN$sdForN
# dMDataF <- dMData[ dMData$first == dMData$sampleNum, ]
# nTagged1 <- table(dMDataF$season,dMDataF$year,dMDataF$riverOrdered)
#Fill in random #s for zRiv=1
# nTagged <- abind(matrix(round(runif(4*nYears,10,50)), nrow=4,ncol=nYears),nTagged1)
# floodDurDATA <- floodDur
# droughtDurDATA <- droughtDur
# floodFreqDATA <- floodFreq
}
)
# function to make initial z matrix, with 1s when known alive and NAs otherwise
zInit<-d$z
zInit[is.na(zInit)]<-0
zInit[d$firstObsRows]<-NA
zInit[zInit==1]<-NA
emPermInit <- function(e){
eOut <- array(NA, dim=length(e))
eOut <- ifelse( is.na(e), 0, e )
return(eOut)
}
encInitMS<-function(sN, first, last, river){
for (i in 1:(length(first))){
river[i] <- river[i] - 0
if ((sN[i] >= first[i]) & (sN[i] <= (last[i]))) {
if( is.na(river[i]) ) river[i] <- river[i-1]
}
else river[i] <- NA
}
return(river + 1)
}
inits<- function(){
list(phiBeta = array(0.5,dim=c(4,d$nYears,d$nRivers+1)),
pBeta = array(0.5,dim=c(4,d$nYears,d$nRivers+1))
#psiBeta = array(0, dim=c(4,d$nRivers,d$nRivers)),
#size = dMData$length[evalList$firstObsRows]
#z = zInit,
#censored = emPermInit( d$emPermDATA )
#zRiv = as.numeric(encInitMS(dMData$sampleNum,dMData$first,
# dMData$last,dMData$riverN))
)
}
# MCMC settings
na <- 500
nb <- 2000
ni <- 5000
nt <- 5
nc <- 3
varsToMonitor<-c(
'pBeta'
, 'phiBeta'
, 'psiBeta'
, 'deviance'
# , 'grSigma'
# , 'grBeta'
)
# out <- bugs(
# data=d,
# inits=inits,
# model = "simpleCJS.txt",
# parameters.to.save = varsToMonitor,
# n.chains=nc,
# n.iter = ni,
# n.thin = nt,
# n.burnin=nb,
# debug=T)
rm(dMData)
rm(evalList)
gc()
(beforeAdapt <- Sys.time())
print( beforeAdapt )
adaptedModel<- jags.model(
file = bugsName,
data = d,
inits = inits,
n.chains = nc,
n.adapt = na,
)
(afterAdapt <- Sys.time())
afterAdapt - beforeAdapt
# out1=out ## for running a second set of iters
( beforeJags <- Sys.time() )
print( beforeJags )
out <- jags.samples(
model = adaptedModel,
variable.names = varsToMonitor,
n.iter = ni,
thin = nt,
progress.bar = 'text'
)
( done <- Sys.time() )
print(afterAdapt - beforeAdapt)
print(done - beforeJags)
|
367b271057f4528d72825396421468ee3fb3e215
|
39851ccdf21d02180a5d214ae84082e9c210dd97
|
/inst/extdata/introduction.R
|
89011b520e5067920e90ddc28403bf76500bb67f
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.db
|
61a8b40c56f0d4a3f39ae7d502a9c2971f7fcdc5
|
358aa98df81d5a55188d50825be660d874df4050
|
refs/heads/master
| 2022-06-22T15:01:24.911765
| 2021-07-07T11:19:45
| 2021-07-07T11:19:45
| 137,391,271
| 0
| 1
|
MIT
| 2022-06-07T17:00:00
| 2018-06-14T17:55:29
|
R
|
UTF-8
|
R
| false
| false
| 332
|
r
|
introduction.R
|
## Laden von Daten aus Exceldateien oder MS Access-Datenbanken
# Nur ganz kurz...
library(kwb.db)
xls <- "testdata.xls"
mdb <- "testdata.mdb"
# Anzeige der Tabellen in einer Excel-Datei/MS Access-Datenbank
hsTables(xls)
hsTables(mdb)
hsGetTable(xls, "testdata_de$")
hsGetTable(xls, "testbereich")
hsGetTable(mdb, "testdata_de")
|
609da9b15c3add160402132343f43bbd1a23a6e5
|
ab47dae2fa5a108e6c48b2013ae2ac09a1511856
|
/man/PhyloFactor.Rd
|
5ea481e932e1147b5e1a1a0a6810b2b546f363b7
|
[] |
no_license
|
Nermin-Ghith/phylofactor
|
bee02d7676411e3d37b758ae42828128708da62c
|
666b55cf634067392850ea5035c46eb1a473e969
|
refs/heads/master
| 2023-04-08T13:16:40.626542
| 2021-04-01T15:57:33
| 2021-04-01T15:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 14,654
|
rd
|
PhyloFactor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PhyloFactor.R
\name{PhyloFactor}
\alias{PhyloFactor}
\title{Regression-based phylofactorization}
\usage{
PhyloFactor(Data, tree, X = NULL, frmla = Data ~ X, choice = "var",
transform.fcn = log, contrast.fcn = NULL, method = "glm",
nfactors = NULL, small.output = F, stop.fcn = NULL,
stop.early = NULL, KS.Pthreshold = 0.01, alternative = "greater",
ncores = NULL, delta = 0.65, choice.fcn = NULL,
cluster.depends = "", ...)
}
\arguments{
\item{Data}{Data matrix whose rows are tip labels of the tree, columns are samples of the same length as X, and whose columns sum to 1}
\item{tree}{Phylogeny whose tip-labels are row-names in Data.}
\item{X}{independent variable. If performing multiple regression, X must be a data frame whose columns contain all the independent variables used in \code{frmla}}
\item{frmla}{Formula for input in GLM. Default formula is Data ~ X.}
\item{choice}{Choice, or objective, function for determining the best edges at each iteration using default regression. Must be choice='var' or choice='F'. 'var' minimizes residual variance of clr-transformed data, whereas 'F' maximizes the F-statistic from an analysis of variance.}
\item{transform.fcn}{Function for transforming data prior to projection onto contrast bases. Default is \code{log}, in which case zeros are internally replaced by 0.65. The transform function must preserve matrix class objects.}
\item{contrast.fcn}{Contrast function. Default is an efficient version of \code{BalanceContrast}. Another built-in option is \code{\link{amalgamate}} - for amalgamation-based analyses of compositional data, set \code{transform.fcn=I} and \code{contrast.fcn=amalgamate}.}
\item{method}{Which default objective function to use either "glm", "max.var" or "gam".}
\item{nfactors}{Number of clades or factors to produce in phylofactorization. Default, NULL, will iterate phylofactorization until either dim(Data)[1]-1 factors, or until stop.fcn returns T}
\item{small.output}{Logical, indicating whether or not to trim output. If \code{TRUE}, output may not work with downstream summary and plotting wrappers.}
\item{stop.fcn}{Currently, accepts input of 'KS'. Coming soon: input your own function of the environment in phylofactor to determine when to stop.}
\item{stop.early}{Logical indicating if stop.fcn should be evaluated before (stop.early=T) or after (stop.early=F) choosing an edge maximizing the objective function.}
\item{KS.Pthreshold}{Numeric between 0 and 1. P-value threshold for KS-test as default stopping-function.}
\item{alternative}{alternative hypothesis input to \code{\link{ks.test}} if KS stopping function is used}
\item{ncores}{Number of cores for built-in parallelization of phylofactorization. Parallelizes the extraction of groups, amalgamation of data based on groups, regression, and calculation of objective function. Be warned - this can lead to R taking over a system's memory.}
\item{delta}{Numerical value for replacement of zeros. Default is 0.65, so zeros will be replaced column-wise with 0.65*min(x[x>0])}
\item{choice.fcn}{Function for customized choice function. Must take as input the numeric vector of ilr coefficients \code{y}, the input meta-data/independent-variable \code{X}, and a logical \code{PF.output}. If \code{PF.output==F}, the output of \code{choice.fcn} must be a two-member list containing numerics \code{output$objective} and \code{output$stopStatistic}. Phylofactor will choose the edge which maximizes \code{output$objective} and a customzed input \code{stop.fcn} can be used with the \code{output$stopStatistics} to stop phylofactor internally.}
\item{cluster.depends}{Character parsed and evaluated by cluster to load all dependencies for custom choice.fcn. e.g. \code{cluster.depends <- 'library(bayesm)'}}
\item{...}{optional input arguments for \code{\link{glm}} or, if \code{method=='gam'}, input for \code{mgcv::gam}}
}
\value{
Phylofactor object, a list containing: "Data", "tree" - inputs from phylofactorization. Output also includes "factors","glms","terminated" - T if stop.fcn terminated factorization, F otherwise - "bins", "bin.sizes", "basis" - basis for projection of data onto phylofactors, and "Monophyletic.Clades" - a list of which bins are monophyletic and have bin.size>1. For customized \code{choice.fcn}, Phylofactor outputs \code{$custom.output}.
}
\description{
Regression-based phylofactorization
}
\examples{
set.seed(2)
library(phylofactor)
library(phangorn)
library(mgcv)
mar <- par('mar')
clo <- function(X) X/rowSums(X)
## Example with pseudo-simulated data: real tree with real taxonomy, but fake abundance patterns.
data("FTmicrobiome")
tree <- FTmicrobiome$tree
Taxonomy <- FTmicrobiome$taxonomy
tree <- drop.tip(tree,setdiff(tree$tip.label,sample(tree$tip.label,20)))
### plot phylogeny ###
plot.phylo(tree,use.edge.length=FALSE,main='Community Phylogeny')
nodelabels()
Taxonomy <- Taxonomy[match(tree$tip.label,Taxonomy[,1]),]
X <- as.factor(c(rep(0,5),rep(1,5)))
### Simulate data ###
Factornodes <- c(37,27)
Factoredges <- sapply(Factornodes,FUN=function(n,tree) which(tree$edge[,2]==n),tree=tree)
edgelabels(c('PF 1','PF 2'),edge=Factoredges,cex=2,bg='red')
sigClades <- Descendants(tree,Factornodes,type='tips')
Data <- matrix(rlnorm(20*10,meanlog = 8,sdlog = .5),nrow=20)
rownames(Data) <- tree$tip.label
colnames(Data) <- X
Data[sigClades[[1]],X==0] <- Data[sigClades[[1]],X==0]*8
Data[sigClades[[2]],X==1] <- Data[sigClades[[2]],X==1]*9
Data <- t(clo(t(Data)))
Bins <- bins(G=sigClades,set=1:20)
pf.heatmap(tree=tree,Data=Data)
### PhyloFactor ###
PF <- PhyloFactor(Data,tree,X,nfactors=2)
PF$bins
all(PF$bins \%in\% Bins)
######### Summary tools ##########
PF$factors
# Notice that both of the groups at the first factor are labelled as "Monophyletic"
# due to the unrooting of the tree
PF$ExplainedVar
# A coarse summary tool
s <- pf.summary(PF,Taxonomy,factor=1)
s$group1$IDs # Grabbing group IDs
s$group2$IDs
# A tidier summary tool
td <- pf.tidy(s)
td$`group1, Monophyletic`
# Simplified group IDs - the unique shortest unique prefixes separating the groups
td$`group2, Monophyletic`
## Plotting with summary tools ##
par(mfrow=c(1,1),mar=mar)
plot(as.numeric(X),td$`Observed Ratio of group1/group2 geometric means`,
ylab='Average ratio of Group1/Group2',pch=18,cex=2)
lines(td$`Predicted ratio of group1/group2`,lwd=2)
legend(1,12,legend=c('Observed','Predicted'),pch=c(18,NA),lwd=c(NA,2),
lty=c(NA,1),cex=2)
######### get and plot Phylogenetic info ####
PFedges <- getFactoredEdgesPAR(ncores=2,PF=PF) \%>\% unlist
## unlisting is unwise if any factor corresponds to more than one edge
PFnodes <- tree$edge[PFedges,2]
PFclades <- Descendants(tree,PFnodes,'tips')
par(mfrow=c(3,1))
pf.heatmap(tree=tree,Data=Data)
# edgelabels(c('Factor 1','Factor 2'),edge=PFedges,bg=c('yellow','red'),cex=2)
tiplabels(' ',PFclades[[1]],bg='yellow')
tiplabels(' ',PFclades[[2]],bg='red')
edgelabels(c('PF1','PF2'),edge=PFedges,bg=c('yellow','red'),cex=2)
### predicted data matrix given phylofactors
pred <- pf.predict(PF)
colnames(pred) <- colnames(Data)
pf.heatmap(tree=tree,Data=pf.predict(PF))
### residual data
resid <- Data/pred
resid <- resid \%>\% t \%>\% clo \%>\% t
pf.heatmap(tree=tree,Data=resid)
par(mar=mar)
##################################
##################################################
############### Other features: ##################
#### glm-style manipulation of formula, weights, etc. #########
#w=1:10
#PF.weighted <- PhyloFactor(Data,tree,X,weights=w,nfactors=1)
# predict meta-data with ILR abundances by changing formula & family
# PF.predict.X <- PhyloFactor(Data,tree,X,frmla=X~Data,nfactors=2,family=binomial)
### more glm controls: offset, model, subset...
#PF.fancy <- PhyloFactor(Data,tree,X,frmla=X~Data,nfactors=2,ncores=2,
#family=binomial,weights=w,offset=rnorm(10),model=FALSE,subset=3:8)
#### Stopping Function ###########################
PF.stop <- PhyloFactor(Data,tree,X,stop.early=TRUE)
PF.stop$terminated
# TRUE - this indicates that the factorization was terminated
# when there was sufficiently low signal
PF.stop$nfactors # 2 - the correct number of factors
all(PF.stop$bins \%in\% Bins) #
# TRUE - the factors identified were the correct ones.
#### PhyloFactor has built-in parallelization ####
PF.par <- PhyloFactor(Data,tree,X,nfactors=2,ncores=2)
all.equal(PF$factors,PF.par$factors)
##################################################
######### Phylogenetic PCA - maximize variance ###
pf.var <- PhyloFactor(Data,tree,method='max.var',nfactors=2)
######### Multiple regression ####################
b <- rlnorm(ncol(Data))
a <- as.factor(c(rep(0,5),rep(1,5)))
X <- data.frame('a'=a,'b'=b)
frmla <- Data~a+b
PF.M <- PhyloFactor(Data,tree,X,frmla=frmla,nfactors=2)
PF.M$models[[1]]
PF.M.par <- PhyloFactor(Data,tree,X,frmla=frmla,nfactors=2,ncores=2)
all.equal(PF.M$factors,PF.M.par$factors)
####### transform.fcn and contrast.fcn ###########
## If we had Gaussian or approximately Gaussian data,
#GausData <- log(Data)
#pf.gaussian <- PhyloFactor(GausData,tree,X,frmla=frmla,nfactors=2,transform.fcn=I)
## We can also perform amalgamation-style analyses with contrast.fcn
#pf.amalg <- PhyloFactor(GausData,tree,X,frmla=frmla,
# nfactors=2,transform.fcn=I,contrast.fcn=amalgamate)
##################################################
############################# CUSTOMIZED CHOICE FUNCTIONS ################################
#PhyloFactor can also be used for generalized additive models by inputting choice.fcn
#and cluster.depends to load required packages onto the cluster
### Let's work with some newly simulated data ####
set.seed(1.1)
n=100
Data <- matrix(rlnorm(20*n,meanlog = 8,sdlog = .5),nrow=20)
rownames(Data) <- tree$tip.label
a <- rnorm(n)
b <- rnorm(n)
X <- data.frame(a,b)
Data[sigClades[[1]],] <- t(t(Data[sigClades[[1]],])*(20/(1+exp(5*b))))
## This clade has a nonlinear response with b, decreasing for high values of b.
Data[sigClades[[2]],] <- t(t(Data[sigClades[[2]],])*8*a^-2)
## this clade is abundant only for intermediate values of a.
Data <- t(clo(t(Data)))
par(mfrow=c(2,2))
plot(a,gMean(Data[sigClades[[1]],],MARGIN=2),ylab='Group1 gMean')
plot(b,gMean(Data[sigClades[[1]],],MARGIN=2),ylab='Group1 gMean')
plot(a,gMean(Data[sigClades[[2]],],MARGIN=2),ylab='Group2 gMean')
plot(b,gMean(Data[sigClades[[2]],],MARGIN=2),ylab='Group2 gMean')
######### To input a custom choice.fcn, it needs to take as input the vector of
######### ILR coefficients 'y', the input meta-data 'X', and a logical PF.output.
######### The output of the custom choice function when PF.output=T
######### will be returned in PF$custom.output.
## Demo choice.fcn - generalized additive modelling ##
my_gam <- function(y,X,PF.output=FALSE,...){
dataset <- cbind('Data'=y,X)
gg <- mgcv::gam(Data~s(a)+s(b),data=dataset,...)
if (PF.output){
return(gg)
break
} else {
output <- NULL
## The output of the choice function for PF.output=F must contain two labelled numerics:
## an "objective" statistic and a "stopStatistics".
output$objective <- getStats(gg)['ExplainedVar']
output$stopStatistics <- getStats(gg)['Pval']
return(output)
}
}
load.mgcv <- 'library(mgcv)'
######### For parallelization of customized choice function, we may also need to input
######### cluster.depends which loads all dependencies to cluster.
######### The exact call will be clusterEvalQ(cl,eval(parse(text=cluster.depends)))
PF.G.par <- PhyloFactor(Data,tree,X,choice.fcn=my_gam,sp=c(1,1),
cluster.depends = load.mgcv,nfactors=2,ncores=2)
######### Or we can use the built-in method='gam' and input e.g. smoothing penalty sp
PF.G.par2 <- PhyloFactor(Data,tree,X,method='gam',
frmla=Data~s(a)+s(b),sp=c(1,1),nfactors=2,ncores=2)
all(sigClades \%in\% PF.G.par$bins)
PF.G.par$factors
par(mfrow=c(1,2))
for (ff in 1:2){
gm <- PF.G.par$custom.output[[ff]]
grp <- PF.G.par$groups[[ff]]
if (ff==1){
x=b
nd <- X
nd$a <- rep(mean(a),length(a))
pred <- predict(gm,newdata = nd)
} else {
x=a
nd <- X
nd$b <- rep(mean(b),length(b))
pred <- predict(gm,newdata = nd)
}
y <- BalanceContrast(grp,log(Data))
plot(sort(x),y[order(x)],ylab='ILR Coefficient',
xlab='dominant Independent Variable',
main=paste('Factor',toString(ff),sep=' '))
lines(sort(x),pred[order(x)])
}
######################## Finding Hutchisonian Niches #####################################
### Example of how to use PhyloFactor to identify Gaussian-shapped Hutchinsonian niches ###
set.seed(1)
n=1000
A <- 20
mu=-1
sigma=0.9
Data <- matrix(rlnorm(20*n,meanlog = 8,sdlog = .5),nrow=20)
rownames(Data) <- tree$tip.label
X <- rnorm(n)
Data[sigClades[[1]],] <- t(t(Data[sigClades[[1]],])*A*exp(-(((X-mu)^2)/(2*sigma^2))))
Data <- t(clo(t(Data)))
y1 <- gMean(Data[sigClades[[1]],],MARGIN=2)
y2 <- gMean(Data[setdiff(1:20,sigClades[[1]]),],MARGIN=2)
ratios <- y1/y2
par(mfrow=c(1,1))
plot(X,ratios,
ylab='Group1/Group2 gMean',log='y',
main='Identifying Gaussian-shaped Hutchinsonian Niches',
xlab='Environmental Variable')
frmla=Data~X+I(X^2)
PF.Gaus <- PhyloFactor(Data,tree,frmla=frmla,X,nfactors=1,ncores=2)
all.equal(sigClades[[1]],PF.Gaus$bins[[2]])
y <- PF.Gaus$groups[[1]] \%>\% BalanceContrast(.,log(Data))
plot(X,y,ylab='Group1/Group2 gMean',
main='Identifying Gaussian-shaped Hutchinsonian Niches',
xlab='Environmental Variable')
lines(sort(X),predict(PF.Gaus$models[[1]])[order(X)],lwd=4,col='green')
legend(-2.5,-3,legend=c('Observed','Predicted'),
pch=c(1,NA),col=c('black','green'),lty=c(NA,1),lwd=c(NA,2))
### Because the regression is performed on an ILR coordinate, getting an estimate
### about the optimal habitat preference and the width of habitat preferences
### requires a little algebra
grp <- PF.Gaus$groups[[1]]
r <- length(grp[[1]])
s <- length(grp[[2]])
coefs <- PF.Gaus$models[[1]]$coefficients
a <- coefs['I(X^2)']
b <- coefs['X']
c <- coefs['(Intercept)']
d <- sqrt(r*s/(r+s))
sigma.hat <- sqrt(-d/(2*a))
mu.hat <- -b/(2*a)
A.hat <- exp(c/d+mu.hat^2/(2*sigma.hat^2))
names(A.hat) <- NULL
names(mu.hat) <- NULL
names(sigma.hat) <- NULL
c('A'=A,'A.hat'=A.hat)
c('mu'=mu,'mu.hat'=mu.hat)
#The optimal environment for this simulated organism is mu=-1
c('sigma'=sigma,'sigma.hat'=sigma.hat) #The standard deviation is ~0.9.
}
|
7478b9dfd41295a87e570a0ec3cc316bcaaeca70
|
ec51c427c69927481c374011ad717dd0f6eaa248
|
/man/clean_data.Rd
|
9df947d41bab2f6d194f6090f284135f613f2e51
|
[
"MIT"
] |
permissive
|
PNNL-TES/picarro.data
|
9553d8cea7e82a31e9c4c892f3f2d008c5e66f76
|
00e3108ce38cf3c620a510c61c5ccd96e28943c3
|
refs/heads/master
| 2020-05-29T18:33:15.483000
| 2019-11-11T14:04:56
| 2019-11-11T14:04:56
| 189,304,398
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 772
|
rd
|
clean_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean.R
\name{clean_data}
\alias{clean_data}
\title{Clean the data: create \code{DATETIME} field, remove fractional and unwanted valves}
\usage{
clean_data(raw_data, tz = "", remove_valves = c())
}
\arguments{
\item{raw_data}{A \code{data.frame} of data returned by \code{\link{process_directory}}.}
\item{tz}{Timezone of Picarro timestamps, e.g. "America/New_York", character.
See \url{https://en.wikipedia.org/wiki/List_of_tz_database_time_zones}.}
\item{remove_valves}{An optional vector of integer valve numbers to remove
from the data, e.g. ambient ports.}
}
\value{
The cleaned data.
}
\description{
Clean the data: create \code{DATETIME} field, remove fractional and unwanted valves
}
|
317df0ba5e40929f65f8585e7ce90c82f062fc59
|
d26b260dc5e48e73b46666b0b85b42c8827cfff8
|
/R/empiricalC.R
|
c9f7a01ecc44796a0aeeaf228d1644592ff27d91
|
[] |
no_license
|
cran/mc2d
|
749f0cca928857e6b034d44c157f79658f9fac3c
|
bfaf18b5e38b651618c98a81e1b6d4047587b11b
|
refs/heads/master
| 2023-07-29T20:20:05.134739
| 2023-07-17T16:00:02
| 2023-07-17T17:31:53
| 17,697,342
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,074
|
r
|
empiricalC.R
|
#<<BEGIN>>
dempiricalC <- function(x, min, max, values, prob=NULL, log=FALSE)
#TITLE The Continuous Empirical Distribution
#NAME empiricalC
#KEYWORDS distribution
#DESCRIPTION
#Density, distribution function and random generation for a continuous empirical distribution.
#INPUTS
#{x, q}<<Vector of quantiles.>>
#{p}<<Vector of probabilities.>>
#{n}<<Number of random values. If \samp{length(n) > 1}, the length is taken to be the number required.>>
#{min}<<A finite minimal value.>>
#{max}<<A finite maximal value.>>
#{values}<<Vector of numerical values.>>
#[INPUTS]
#{prob}<<Optional vector of count or probabilities.>>
#{log, log.p}<<logical; if \samp{TRUE}, probabilities \samp{p} are given as \samp{log(p)}.>>
#{lower.tail}<<logical; if \samp{TRUE} (default), probabilities are \samp{P[X <= x]}, otherwise, \samp{P[X > x]}.>>
#DETAILS
#Given \eqn{p_{i}}{p_i}, the distribution value for \eqn{x_{i}}{x_i} with \samp{i} the rank \eqn{i = 0, 1, 2, \ldots, N+1},
# \eqn{x_{0}=min}{x_0 = min} and \eqn{x_{N+1}=max}{x_(N+1) = max} the density is:
# \deqn{f(x)=p_{i}+(\frac{x-x_{i}}{x_{i+1}-x_{i}})(p_{i+1}-p_{i})}{f(x) = p_i + (p_(i+1) - p_i)/(x_(i+1) - x_i) for x_i<=x<x_(i+1)}
# The \samp{p} values being normalized to give the distribution a unit area.
#
#\samp{min} and/or \samp{max} and/or \samp{values} and/or \samp{prob} may vary: in that case,
#\samp{min} and/or \samp{max} should be vector(s). \samp{values} and/or \samp{prob} should be
#matrixes, the first row being used for the first element of \samp{x}, \samp{q}, \samp{p} or the first random value, the
#second row for the second element of \samp{x}, \samp{q}, \samp{p} or random value, ...
#Recycling is permitted if the number of elements of \samp{min} or \samp{max} or the number of rows of \samp{prob} and \samp{values} are equal or
#equals one.</>
#SEE ALSO
#\code{\link{empiricalD}}
#VALUE
#\samp{dempiricalC} gives the density, \samp{pempiricalC} gives the distribution function,
#\samp{qempiricalC} gives the quantile function and \samp{rempiricalC} generates random deviates.
#EXAMPLE
#prob <- c(2,3,1,6,1)
#values <- 1:5
#par(mfrow=c(1,2))
#curve(dempiricalC(x, min=0, max=6, values, prob), from=-1, to=7, n=1001)
#curve(pempiricalC(x, min=0, max=6, values, prob), from=-1, to=7, n=1001)
#
### Varying values
#(values <- matrix(1:10,ncol=5))
### the first x apply to the first row
### the second x to the second one
#dempiricalC(c(1,1),values,min=0, max=11)
#
#
###Use with mc2d
#val <- c(100, 150, 170, 200)
#pr <- c(6, 12, 6, 6)
#out <- c("min", "mean", "max")
###First Bootstrap in the uncertainty dimension
###with rempirical D
#(x <- mcstoc(rempiricalD, type = "U", outm = out, nvariates = 30, values = val, prob = pr))
###Continuous Empirical distribution in the variability dimension
#mcstoc(rempiricalC, type = "VU", values = x, min=90, max=210)
#CREATED 08-02-20
#--------------------------------------------
{
lx <- length(x)
if(lx == 0) return(numeric(0))
if(is.vector(values)) values <- matrix(values,nrow=1)
if(is.null(prob)) prob <- rep(1,length(values[1,]))
if(is.vector(prob)) prob <- matrix(prob, nrow=1)
lmin <- length(min)
lmax <- length(max)
nrv <- nrow(values)
nrp <- nrow(prob)
lpar <- max(lmin,lmax,nrv,nrp) # nb of combination of parameters
if(nrv != 1 && nrp != 1 && nrv != nrp)
stop("values/prob should be vector(s), matrix(es) of 1 row or matrix(es) of the same number of rows")
if(any(!is.finite(values)) || any(!is.finite(min)) || any(!is.finite(max)) || any(!is.finite(prob))) stop("values, prob, min and ax should be finite values")
if(any(min > max) || any(min > apply(values,1,min)) || any(max < apply(values,1,max))) stop("at least one min is not a minimum or max is not a maximum")
if(any(prob < 0) || any(apply(prob,1,sum) == 0)) stop("Prob should be non negative and sum(prob) should be != 0")
onerow <- function(x, min, max, values, prob){
val2 <- sort(unique(values))
probi <- tapply(prob,values,sum)
probi <- probi / sum(probi)
prob <- c(0,0,probi,0,0)
val <- c(-Inf,min,val2,max,Inf)
h <- c(val2,max) - c(min,val2)
a <- c(0,probi)
b <- c(probi,0)
Integ <- sum(h*(a+b)/2)
d <- rep(NA,length(x))
d[x >= max | x <= min] <- 0
lesquel <- which(!is.na(x) & x < max & x > min)
quel <- findInterval(x[lesquel], val) + 1
d[lesquel] <- prob[quel-1]+(x[lesquel]-val[quel-1])/(val[quel]-val[quel-1])*(prob[quel]-prob[quel-1])
d <- d / Integ
return(d)
}
if(lpar == 1) { #Shortcut if only one set of parameters
d <- onerow(x, as.vector(min), as.vector(max), as.vector(values), as.vector(prob))} else
{ # launch lpar time the function
x <- matrix(x, nrow = lpar)
x <- lapply(1:lpar, function(y) x[y,])
prob <- lapply(1:nrp, function(x) prob[x,])
values <- lapply(1:nrv, function(x) values[x,])
d <- as.vector(t(mapply(onerow, x, min, max, values, prob)))
d <- d[1:max(lx,lpar)]
}
if(log) d <- log(d)
if(any(is.na(d))) warning("NaN in dempiricalC")
return(d)
}
#<<BEGIN>>
pempiricalC <- function(q, min, max, values, prob=NULL, lower.tail = TRUE, log.p = FALSE)
#ISALIAS dempiricalC
#--------------------------------------------
{
lq <- length(q)
if(lq == 0) return(numeric(0))
if(is.vector(values)) values <- matrix(values,nrow=1)
if(is.null(prob)) prob <- rep(1,length(values[1,]))
if(is.vector(prob)) prob <- matrix(prob, nrow=1)
lmin <- length(min)
lmax <- length(max)
nrv <- nrow(values)
nrp <- nrow(prob)
lpar <- max(lmin,lmax,nrv,nrp) # nb of combination of parameters
if(nrv != 1 && nrp != 1 && nrv != nrp)
stop("values/prob should be vector(s), matrix(es) of 1 row or matrix(es) of the same number of rows")
if(any(!is.finite(values)) || any(!is.finite(min)) || any(!is.finite(max)) || any(!is.finite(prob))) stop("values, prob, min and ax should be finite values")
if(any(min > max) || any(min > apply(values,1,min)) || any(max < apply(values,1,max))) stop("at least one min is not a minimum or max is not a maximum")
if(any(prob < 0) || any(apply(prob,1,sum) == 0)) stop("Prob should be non negative and sum(prob) should be != 0")
onerow <- function(q,min,max,values,prob){
val2 <- sort(unique(values))
probi <- tapply(prob,values,sum)
h <- c(val2,max) - c(min,val2)
a <- c(0,probi)
b <- c(probi,0)
Integ <- sum(h*(a+b)/2)
probi <- probi / Integ
a <- c(0,probi)
b <- c(probi,0)
probcum <- cumsum(h*(a+b)/2)
probi <- c(0,0,probi,0,0)
probcum <- c(0,0,probcum,1)
val <- c(-Inf,min,val2,max,Inf)
p <- rep(NA,length(q))
p[q >= max] <- 1
p[q <= min] <- 0
lesquel <- which(!is.na(q) & q < max & q > min)
quel <- findInterval(q[lesquel], val) + 1
p[lesquel] <- probcum[quel-1]+(q[lesquel]-val[quel-1])*
(probi[quel-1]+((probi[quel]-probi[quel-1])*(q[lesquel]-val[quel-1])/(2*(val[quel]-val[quel-1]))))
return(p)
}
if(lpar == 1) { #Shortcut if only one set of parameters
p <- onerow(q, as.vector(min), as.vector(max), as.vector(values), as.vector(prob))} else
{ # launch lpar time the function
q <- matrix(q, nrow = lpar)
q <- lapply(1:lpar, function(y) q[y,])
prob <- lapply(1:nrp, function(x) prob[x,])
values <- lapply(1:nrv, function(x) values[x,])
p <- as.vector(t(mapply(onerow, q, min, max, values, prob)))
p <- p[1:max(lq,lpar)]
}
if(!lower.tail) p <- 1-p
if(log.p) p <- log(p)
if(any(is.na(p))) warning("NaN in pempiricalC")
return(p)}
#<<BEGIN>>
qempiricalC <- function(p, min, max, values, prob=NULL, lower.tail = TRUE, log.p = FALSE)
#ISALIAS dempiricalC
#--------------------------------------------
{
lp <- length(p)
if(lp == 0) return(numeric(0))
if(log.p) p <- exp(p)
if(!lower.tail) p <- 1-p
if(is.vector(values)) values <- matrix(values,nrow=1)
if(is.null(prob)) prob <- rep(1,length(values[1,]))
if(is.vector(prob)) prob <- matrix(prob, nrow=1)
lmin <- length(min)
lmax <- length(max)
nrv <- nrow(values)
nrp <- nrow(prob)
lpar <- max(lmin,lmax,nrv,nrp) # nb of combinations of parameters
if(nrv != 1 && nrp != 1 && nrv != nrp)
stop("values/prob should be vector(s), matrix(es) of 1 row or matrix(es) of the same number of rows")
if(any(!is.finite(values)) || any(!is.finite(min)) || any(!is.finite(max)) || any(!is.finite(prob))) stop("values, prob, min and ax should be finite values")
if(any(min > max) || any(min > apply(values,1,min)) || any(max < apply(values,1,max))) stop("at least one min is not a minimum or max is not a maximum")
if(any(prob < 0) || any(apply(prob,1,sum) == 0)) stop("Prob should be non negative and sum(prob) should be != 0")
onerow <- function(p, min, max, values, prob){
val2 <- sort(unique(values))
probi <- tapply(prob,values,sum)
h <- c(val2,max) - c(min,val2)
a <- c(0,probi)
b <- c(probi,0)
Integ <- sum(h*(a+b)/2)
probi <- probi / Integ
a <- c(0,probi)
b <- c(probi,0)
probcum <- cumsum(h*(a+b)/2)
probi <- c(0,0,probi,0,0)
probcum <- c(0,0,probcum,Inf)
val <- c(-Inf,min,val2,max,Inf)
q <- rep(NA,length(p))
q[p > 1] <- NaN
q[p == 1] <- max
q[p < 0] <- NaN
lesquel <- which(!is.na(p) & p < 1 & p >= 0)
quel <- findInterval(p[lesquel],probcum)+1
a <- (probi[quel]-probi[quel-1])/(val[quel]-val[quel-1])/2 # (c-a)/(2b)
b <- probi[quel-1] # a
c <- probcum[quel-1]-p[lesquel] # -S
d <- b^2-4*a*c
q[lesquel] <- ifelse(a == 0, -c/b + val[quel-1], (-b+sqrt(d))/2/a + val[quel-1])
return(q)
}
if(lpar == 1) { #Shortcut if only one set of parameters
q <- onerow(p, as.vector(min), as.vector(max), as.vector(values), as.vector(prob))} else
{ # launch lpar time the function
p <- matrix(p, nrow = lpar)
p <- lapply(1:lpar, function(y) p[y,])
prob <- lapply(1:nrp, function(x) prob[x,])
values <- lapply(1:nrv, function(x) values[x,])
q <- as.vector(t(mapply(onerow, p, min, max, values, prob)))
q <- q[1:max(lp,lpar)]
}
if(any(is.na(q))) warning("NaN in qempiricalC")
return(q)}
#<<BEGIN>>
rempiricalC <- function(n, min, max, values, prob=NULL)
#ISALIAS dempiricalC
#--------------------------------------------
{
if(length(n) > 1) n <- length(n)
if(length(n) == 0 || as.integer(n) == 0) return(numeric(0))
n <- as.integer(n)
if(n < 0) stop("integer(n) cannot be negative in rempiricalc")
if(length(min) > n) min <- min[1:n]
if(length(max) > n) max <- max[1:n]
if(is.vector(values) && length(values) > n) values <- values[1:n]
else if(is.matrix(values) && nrow(values) > n) values <- values[1:n,]
if(is.vector(prob) && length(prob) > n) prob <- prob[1:n]
else if(is.matrix(prob) && nrow(prob) > n) prob <- prob[1:n,]
r <- qempiricalC(runif(n), min=min, max=max, values=values, prob=prob, lower.tail = TRUE, log.p = FALSE)
return(as.vector(r))
}
|
fb9e9205b188563d4cfe399d9d42a21d96293038
|
0669e8ab7e90b69f0a7735f1e1fdff6647a06288
|
/answer_exercises/answer_compareGroups_2.R
|
12071f4a6443f8787b0ef6591e058a92fa124466
|
[] |
no_license
|
isglobal-brge/Curso_R_avanzado
|
d116e7a28289ff497203b93621ee41d7212883a5
|
a7b52ac7aebdc4f10cab9875ef9fb155ae08eafb
|
refs/heads/master
| 2020-03-18T18:57:43.899208
| 2018-06-07T12:03:09
| 2018-06-07T12:03:09
| 135,125,238
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
answer_compareGroups_2.R
|
#
# Exercise 2
#
compareGroups <- function(x, y, pvalue=TRUE){
if(missing(x) | missing(y))
stop("x and y are required")
if(nrow(x)!=length(y))
stop("x and y should have the same number of observations")
if(!is.factor(y) | length(levels(y))!=2)
stop("y must be a 2 level factor")
ans <- list()
for (i in 1: ncol(x)){
xx <- x[,i]
ans[[i]] <- list()
ans[[i]]$table <- descriptive(xx, y)
if (pvalue){
ans[[i]]$pvalue <- pval(xx, y)
}
}
names(ans) <- colnames(x)
class(ans) <- "compareGroups"
ans
}
out <- compareGroups(dd, group)
print.compareGroups <- function(x, n=10, sig=0.05, ...){
ff <- function(x){
x$pval
}
pvalues <- unlist(lapply(x, FUN=ff))
if (length(pvalues)<n){
cat("P-values are: \n")
cat(pvalues)
}
else{
varsig <- names(pvalues[pvalues<sig])
cat("significant variables are: \n")
cat(varsig)
}
}
out
|
3c8a611308874c6f004408a5e5ddf166d220961d
|
51caa4e6e11a996afd5f4771ca967a8488026474
|
/corr.R
|
876c481e8dbc397bda690ba441627512bbc79847
|
[] |
no_license
|
difu1994/Week-2-Assignment-Coursera
|
2679e8b221472e49b6d982cc952ec4fd3be484bb
|
9d9e76a24e25f57b84ba63c15e1a1cff20531104
|
refs/heads/master
| 2022-12-02T03:44:53.313731
| 2020-07-21T18:17:33
| 2020-07-21T18:17:33
| 281,471,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
r
|
corr.R
|
function (directory, threshold = 0)
{
complete <- dget("complete.R")
vector <- as.numeric()
compileddata <- complete(directory, 1:332)
criteria <- compileddata["Values"] > threshold
compileddata2 <- compileddata[criteria, ]
for (i in compileddata2[, "Filename"]) {
setwd(directory)
filedata <- read.csv(file = i, header = TRUE)
setwd("..")
newdata <- na.omit(filedata)
x <- newdata["sulfate"]
y <- newdata["nitrate"]
correlation <- cor(x, y)
vector <- append(vector, correlation)
}
vector
}
|
e41a43f8db230224a2b42f453bbcb847c6def20a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mistr/examples/poisdist.Rd.R
|
61887b17480d28578abb6bef88464b8a8f27215b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
poisdist.Rd.R
|
library(mistr)
### Name: poisdist
### Title: Creates an Object Representing Poisson Distribution
### Aliases: poisdist
### ** Examples
P <- poisdist(1)
d(P, c(2, 3, 4, NA))
r(P, 5)
|
e610b1d335b444f93087fb82c47d343aaf4d38b8
|
5a973c5285cb20d2efbeda0bc0856472cb24ccd3
|
/run_analysis.R
|
3db1b69aaa565cb47f8497f9ba7665a299040841
|
[] |
no_license
|
jkope/Getting-and-Cleaning-Data
|
ab7e61727938e0d5aa5a2f7d76c49c985e4435c8
|
c6bbb4bcddc8822fbb3777f8de59263629893166
|
refs/heads/master
| 2021-01-10T09:51:44.318040
| 2016-01-23T03:39:52
| 2016-01-23T03:39:52
| 50,005,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
run_analysis.R
|
## read data sets
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
features <- read.table("UCI HAR Dataset/features.txt")
## combine test and train data sets
x <- rbind(X_train,X_test) ## 10299 x 561 var
y <- rbind(y_train,y_test) ## 10299 x 1 obs labels
subject <- rbind(subject_train,subject_test) ## 10299 x 1
## extract mean and standard devation variables
meanStd <- grep("mean\\(\\)|std\\(\\)", features[, 2])
xselect <- x[, meanStd]
## clean up variable names
names(xselect) <- gsub("\\(\\)", "", features[meanStd, 2])
names(xselect) <- gsub("mean", "Mean", names(xselect))
names(xselect) <- gsub("std", "Std", names(xselect))
names(xselect) <- gsub("-", "", names(xselect))
# clean up and label descriptive activity names
actLabel <- read.table("UCI HAR Dataset/activity_labels.txt")
actLabel[, 2] <- tolower(gsub("_", "", actLabel[, 2]))
substr(actLabel[2:3, 2], 8, 8) <- toupper(substr(actLabel[2:3, 2], 8, 8))
substr(actLabel[3, 2], 8, 8) <- toupper(substr(actLabel[3, 2], 8, 8))
actLabel <- actLabel[y[, 1], 2]
y[, 1] <- actLabel
names(y) <- "activity"
names(subject) <- "subject"
data <- cbind(subject, y, xselect)
# second set with averages for each activity and subject
Tidyset <- aggregate(. ~subject + activity, data, mean)
Tidyset <- Tidyset[order(Tidyset$subject,Tidyset$activity),]
## write data
write.table(Tidyset, file = "Tidyset.txt", row.names = FALSE)
|
6494de3515dd91cb4ccf4201ac2ef0d6ea415c20
|
ceeab39e925764962faf99ff34bde6b67fe7f0e9
|
/Week1/day1/rfiles/day1.Lesson2.bwmal.r
|
18b9f3301fb479200ff2e35f3708d19acd76bd08
|
[] |
no_license
|
Keniajin/PwaniR_Training_2015
|
d0ede8b79ca2e8f19d09e7fc9c755ebe91cce679
|
94a7d3e6dc31370c35b4572b8760b813a3de3eae
|
refs/heads/master
| 2021-01-22T09:47:12.676486
| 2021-01-12T08:26:09
| 2021-01-12T08:26:09
| 27,430,653
| 0
| 18
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,890
|
r
|
day1.Lesson2.bwmal.r
|
## Analysis of the bwmal data
## Data found in the data folder on the CD provided
## Original Stata .do file By Jim Todd
## First Modified by Greg Fegan and Ritah Meka November 28th 2013
## Last modified by Ken Mwai
##
## As a practical for new R users
## This section at the beginning enables you to put comments about the do-file
###################################################3
# You can also put comments between these markers
# clear #
rm(list = ls())
#-------------------------------------------------------------------------
###########################################################
# Do this first, and then the log file can be saved in the same directory
################################################################
# We need to istall necessary packages, install packages
# install.packages("packages/foreign", repos = NULL)
# install.packages("packages/psych", repos = NULL)
# there are a number of packages within R that can read in Stata .dta binary data files we prefer "foreign"
library(foreign) #allowing reading data from other statistal programs
#library(psych)
library(arm) #helps in drawing hist of categorical data
# lets create a dataframe object called bwmal which will read in all the dat from the stat file bwaml.dta
bwmal <- read.csv("data/bwmal.dta")
# Start by describing the variables, 11 variables, and 791 observations
str(bwmal)
# View enables you to look at the data and also edit() allows you to make changes to the data
View(bwmal)
#allowing editing of the loaded data
edit(bwmal)
## The command summarize to show means and std dev
## This can be for all variables or just for some variables
summary(bwmal)
#summarizes a specific variable
summary(bwmal$matage)
# Listing all the data would take a lot of space
# Better to just list the first 10 observations in 1/10
head(bwmal$matage,n=10)
head(cbind(bwmal$matage, bwmal$mheight),n=10)
#viewing the last data for bwmal$matage
tail(bwmal , n=10)
tail(bwmal$matage,n=10)
# Or listing a subset, such as those who smoke
# Note when we use logical tests ie "if we test some variable equals some value"
# we must use the == (double equals sign)
# Note that in the line below the R command print is assumed
#listing data where smoke==1
bwmal[bwmal$smoke=="1", ]
#assign the data
bwmal.Smoke1 <- bwmal[bwmal$smoke=="1", ]
View(bwmal.Smoke1)
# This command gives the frequency and percentage for different levels of a variable
table(bwmal$smoke)
# To obtain a histogram
#histogram matage
hist(bwmal$matage)
## Generate a new variable, and recode it to show two categories
gestgrp <- bwmal$gestwks
gestgrp[bwmal$gestwks<=36] <- 1
gestgrp[bwmal$gestwks>=37] <- 2
table(gestgrp)
## We can generate labels for the values in each variable
## Two steps. First define the label - smokelbl
## And then apply that label to the values in one variable
bwmal$smoke<-factor(bwmal$smoke,levels=c(0,1),labels=c("Non-smoker","Smoker"))
table(bwmal$smoke)
## Notice the difference with the label applied to the values
# Define labels for sex
bwmal$sex<-factor(bwmal$sex,levels=c(0,1),labels=c("Female","Male"))
table(bwmal$sex)
# We can also label the variable itself to make it clear what it means
##changes the label
#names(bwmal)[4]<-"The sex of the Baby"
##gives a label
table(bwmal[4])
# Create a special group for analysis
bwmal$specialgrp <- 0
bwmal$specialgrp[bwmal$sex=="Male" & bwmal$bweight>4.0 & bwmal$gestwks>40] <- 1
table(bwmal$specialgrp)
#bwmal$specialgrpB<-as.numeric(bwmal$sex=="Male" & bwmal$bweight>4.0 & bwmal$gestwks>40)
#specialgrpB<-subset(bwmal,bwmal$sex=="Male" & bwmal$bweight>4.0 & bwmal$gestwks>40)
## Then we can save the data in a new data file
## The replace option overwrites any file of the same name - BEWARE do not over write your original data
save bwmal_new , replace
## Another useful command is ?
? summarize
? save
? generate
?? generate
|
3cb9a1f2832f856bb383b7abc8f60b1d3be5e4fe
|
8ba0937129d097188a1572bf986117e0a8422205
|
/boilderplate.R
|
4462af92894446390b5bf3569d1249d13bebbeb2
|
[] |
no_license
|
niedermansam/wikiScraper
|
80138186c0f5124a985d1212456173f20d552ba1
|
16c363d11f5d6315a48c0a53b252c9297357223c
|
refs/heads/master
| 2020-07-19T07:19:57.134621
| 2019-09-09T20:49:26
| 2019-09-09T20:49:26
| 206,399,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,202
|
r
|
boilderplate.R
|
library(wikiScraper)
library(tidyverse)
ny <- wiki_page("New_York_City")
ny_dem <- ny %>% wiki_section("Demographics")
ny_dem %>% html_nodes("table")
ny_dem %>%
wiki_table(3, skip=1, format=c("integer", "double", "double")) %>%
wiki_names(rename =c(3,"pct_change", 2, "population"))
ny_boroughs <- ny %>% wiki_section("Boroughs")
ny_boroughs %>% wiki_table(skip=1, header_length = 2)
ny_header <- ny_boroughs %>% rvest::html_nodes("tr")
ny_header <- ny_header[2:3]
metro <- wiki_page("List_of_metro_systems")
metro_table <- metro %>%
wiki_table(
format = c(
rep("string",3),
rep('integer', 3),
"string",
'double')
) %>%
wiki_names()
metro_table$system_length %<>%
str_replace_all("\\(.*\\)|km","") %>%
str_trim() %>% as.double()
metro_table %>%
ggplot(aes(y= annual_ridership_millions, x=system_length, color=yearopened, label=city)) +
geom_point() +
scale_x_continuous(trans='log10') +
scale_y_continuous(trans = "log10")+
ggplot2::labs(
title ="If You Build it, They will Ride",
subtitle = "Metro System Length vs. Annual Ridership",
y="Annual Ridership (Millions)",
x="System Length (km)",
color="Year Opened",
caption="Data retrieved from Wikipedia using wikiScraper, and visualized with ggplot2.") +
theme_clean()
metro_table %>%
ggplot(aes(y= annual_ridership_millions,
x=system_length,
color=yearopened,
label=city)) +
geom_point() +
geom_text(data = metro_table %>% filter(annual_ridership_millions > 1700),
vjust=0,
nudge_y=100) +
ggplot2::labs(
title ="If You Build it, They will Ride",
subtitle = "Metro System Length vs. Annual Ridership",
y="Annual Ridership (Millions)",
x="System Length (km)",
color="Year Opened",
caption="Data retrieved from Wikipedia using wikiScraper, and visualized with ggplot2.") +
theme_clean()
library(leaflet)
cali_power <- wiki_page("List of power stations in California")
cali_gas <- cali_power %>%
wiki_table(4) %>%
wiki_names() %>%
mutate(refs = NULL,
lat = coords %>%
str_extract("\\d+\\.\\d+.(N|S)") %>%
str_remove("°(N|S)") %>%
as.double(),
lon = coords %>%
str_extract("\\d+\\.\\d+.(E|W)") %>%
str_remove("°(E|W)") %>%
as.double(),
label = sprintf("<strong>Natural Gas Plant</strong><br>%s <br> %s MW/year", plant, capacity_mw) %>%
lapply(htmltools::HTML)
)
cali_gas$lon <- -1*cali_gas$lon
leaflet() %>%
addTiles() %>%
addCircleMarkers(data = cali_gas,
radius = as.integer(cali_gas$capacity_mw) /100,
label = cali_gas$label,
color = "orange")
cali_gas_table <- cali_power %>%
wiki_table(4) %>%
wiki_names()
cali_gas$coords
coords <- cali_gas_table$coords
parse_coordinates <- function(coords){
lat = coords %>%
str_extract("\\d+\\.\\d+.(N|S)")
lon = coords %>%
str_extract("\\d+\\.\\d+.(E|W)")
lat <- ifelse(str_detect(lat, "S"), lat %>% str_replace("^","-"), lat )
lon <- ifelse(str_detect(lon, "W"), lon %>% str_replace("^","-"), lon)
lat %<>% str_remove_all("°|[:alpha:]") %>% as.double()
lon %<>% str_remove_all("°|[:alpha:]") %>% as.double()
return(tibble(lat = lat, lon = lon))
}
cali_gas_table %>% bind_rows(parse_coordinates(cali_gas_table$coords))
coords %>%
str_extract("\\d+\\.\\d+.(E|W)")
cali_gas %>% leaflet() %>% addTiles() %>% addMarkers(label=cali_gas$label)
site_html <- ny %>% wiki_section("Boroughs") %>% html_nodes('table')
bts_page <- xml2::read_html('https://www.bts.gov/statistical-products/surveys/vehicle-miles-traveled-and-vehicle-trips-state')
bts <- wiki_table(bts_page,
header_start = 2,
header_length = 2)
beijing <- wiki_page('Beijing')
beijing_table <- beijing %>%
wiki_section('Administrative divisions') %>%
rvest::html_nodes("table")
beijing_table <- beijing_table[1]
beijing_header <- beijing_table %>% rvest::html_nodes('tr')
beijing_header <- beijing_header[3:4]
beijing_header
#%>%
#wiki_table(skip=2, header_length = 2)
ny_boroughs
|
9f865d72078c99213cd688ad8f3fd9bcac341740
|
27ad6f6be1acadcf749a9e6475fa1f75cb22ffc4
|
/server.R
|
1b1b4c8410e4418612fe7cd2d8cbb3ce5f022ab5
|
[] |
no_license
|
PhoenixRavy/course10
|
f83019fc11e5ec8b9fb6c81368c9bf325d5978cd
|
6b39360ba9bb954700ab6f99fc58e96bc964cee5
|
refs/heads/master
| 2020-04-26T11:36:45.848065
| 2019-03-03T02:47:23
| 2019-03-03T02:47:23
| 173,522,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,249
|
r
|
server.R
|
library(shiny)
suppressWarnings(library(tm))
suppressWarnings(library(stringr))
suppressWarnings(library(shiny))
# Loading the files
#quad_gram <- readRDS("~/R/quad_gram.RData")
#tri_gram <- readRDS("~/R/tri_gram.RData")
#bi_gram <- readRDS("~/R/bi_gram.RData")
#uni_gram <- readRDS("~/R/uni_gram.RData")
quad_gram <- readRDS("quad_gram.RData")
tri_gram <- readRDS("tri_gram.RData")
bi_gram <- readRDS("bi_gram.RData")
uni_gram <- readRDS("uni_gram.RData")
#uni_gram <- read.csv("uni_gram.csv",stringsAsFactors = F)
#bi_gram <- read.csv("bi_gram.csv",stringsAsFactors = F)
#tri_gram <- read.csv("tri_gram.csv",stringsAsFactors = F)
#quad_gram <- read.csv("quad_gram.csv",stringsAsFactors = F)
mesg <<- ""
# Cleaning of user input before predicting the next word
Predict <- function(x) {
xclean <- removeNumbers(removePunctuation(tolower(x)))
xs <- strsplit(xclean, " ")[[1]]
if (length(xs)>= 3) {
xs <- tail(xs,3)
if (identical(character(0),head(quad_gram[quad_gram$uni_gram == xs[1] & quad_gram$bi_gram == xs[2] & quad_gram$tri_gram == xs[3], 4],1))){
Predict(paste(xs[2],xs[3],sep=" "))
}
else {mesg <<- "Next word is predicted using quad-gram."; head(quad_gram[quad_gram$uni_gram == xs[1] & quad_gram$bi_gram == xs[2] & quad_gram$tri_gram == xs[3], 4],1)}
}
else if (length(xs) == 2){
xs <- tail(xs,2)
if (identical(character(0),head(tri_gram[tri_gram$uni_gram == xs[1] & tri_gram$bi_gram == xs[2], 3],1))) {
Predict(xs[2])
}
else {mesg<<- "Next word is predicted using tri-gram."; head(tri_gram[tri_gram$uni_gram == xs[1] & tri_gram$bi_gram == xs[2], 3],1)}
}
else if (length(xs) == 1){
xs <- tail(xs,1)
if (identical(character(0),head(bi_gram[bi_gram$uni_gram == xs[1], 2],1))) {mesg<<-"No match found. Most common word 'the' is returned."; head("the",1)}
else {mesg <<- "Next word is predicted using bi-gram."; head(bi_gram[bi_gram$uni_gram == xs[1],2],1)}
}
}
shinyServer(function(input, output) {
output$prediction <- renderPrint({
result <- Predict(input$inputString)
output$text2 <- renderText({mesg})
result
});
output$text1 <- renderText({
input$inputString});
}
)
|
4d4aa7d9ecb89b3b596a9dd4495f6b0107c3e538
|
781c00c6543cca9f7d90ca6c326170bd086d3a60
|
/heart/default/mm_E14.5_heart_H3K4me3_model.r
|
74e85c0510266ccce3745d13af1b161d46f040e0
|
[] |
no_license
|
Lovecraft888/SwPraktikum
|
454844923461aa0959255065fccebf8fe0e39c2e
|
2817f8baf0aa57c8a337afff36a869f718729467
|
refs/heads/master
| 2021-04-14T03:24:50.604256
| 2020-03-22T22:42:47
| 2020-03-22T22:42:47
| 249,204,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88,565
|
r
|
mm_E14.5_heart_H3K4me3_model.r
|
# R script for Peak Model
# -- generated by MACS
p <- c(0.039204044563,0.0457001502735,0.0522754767853,0.0589011318062,0.0654978945348,0.0654950985065,0.0654205377523,0.0653991015355,0.0655500870627,0.0656647242223,0.0656954805334,0.0658287578816,0.0657364889483,0.0657281008634,0.0657588571745,0.0657942735328,0.0659098427018,0.0660291399085,0.065951783126,0.0659489870977,0.0660254118708,0.0660794684176,0.0663255189065,0.0663544111988,0.0664653203206,0.0665137848109,0.0666899345927,0.0668129598371,0.0669769934964,0.0669126848459,0.0669872456001,0.0670394381281,0.0670795145334,0.0671699194479,0.067137299118,0.0672305000607,0.0671130668728,0.0671065428068,0.0670701944392,0.0671419591651,0.0671540752876,0.0671149308917,0.0671587353348,0.0671969477213,0.0673004007678,0.0672948087112,0.0674355421348,0.067542723219,0.0676312641146,0.0679295071314,0.0679919517631,0.0682007218749,0.0680991328472,0.0681047249038,0.0681308211678,0.0682454583274,0.068288330761,0.0682510503839,0.068482188722,0.0685381092876,0.0686257181738,0.0687515394466,0.0688978649267,0.0689444653981,0.0689873378318,0.0689500574547,0.0689994539543,0.0690656266237,0.0689090490398,0.0688298282385,0.0688820207664,0.0688214401536,0.0688969329173,0.0690954509254,0.0691187511611,0.0692576205658,0.069260416594,0.0692436404244,0.0694663906775,0.0695800958277,0.0697031210722,0.0697553136001,0.0698494465523,0.0697842058924,0.0699035030991,0.0698979110425,0.0700172082493,0.0701914940122,0.0701961540594,0.0701514176068,0.0702045421442,0.0702856269644,0.0702567346722,0.0703508676244,0.0704860089914,0.0705577737173,0.0706146262924,0.0706938470937,0.0706164903112,0.0707814559799,0.0707488356499,0.0708401725738,0.0709035492149,0.0710442826385,0.0711663758735,0.0711980641941,0.0712735569577,0.071431066551,0.0717507457846,0.0717898901806,0.071932487623,0.0719958642641,0.0722111584419,0.0723490958372,0.0722437787718,0.0724646650062,0.0726137865146,0.0726053984297,0.0725727780998,0.0724777131382,0.0724404327611,0.072453480893,0.0724255206102,0.0723546878937,0.0724572089308,0.0725354977227,0.0726967353536,0.0727983243813,0.0728309447112,0.0729250776634,0.0729427858425,0.0730350547759,0.0731133435678,0.073161808058,0.0732223886708,0.0732261167085,0.0730453068796,0.0730248026722,0.0731235956715,0.0732475529254,0.07348428332,0.073560708093,0.073593328423,0.0736650931489,0.0737424499314,0.0736445889415,0.0737927784405,0.0738747952701,0.0738785233079,0.073909279619,0.0739036875624,0.0739083476095,0.0740285768257,0.0740863614102,0.074146942023,0.0743687602668,0.0742802193712,0.0743035196069,0.0744619612095,0.0745206778035,0.0745933745388,0.0746567511799,0.0747089437079,0.0747182638021,0.0747145357644,0.0747518161415,0.0749857505079,0.0753287299772,0.0751208918749,0.0752047727233,0.0752662853456,0.0753604182978,0.0753343220338,0.0755328400419,0.0756633213617,0.075685689588,0.075788210625,0.0755421601361,0.075794734691,0.0758487912378,0.0758320150681,0.0759280120392,0.0759727484917,0.0760529013025,0.0761852466412,0.0763623284324,0.0763530083382,0.0765217020446,0.0766037188742,0.0767286081375,0.0769159420324,0.0768777296459,0.0771862247665,0.0771116640122,0.0771741086439,0.0772449413604,0.0773278901994,0.0773288222089,0.0774061789914,0.0774742156796,0.0773157740769,0.0774965839058,0.077455575491,0.0775748726978,0.077725858225,0.0778349033281,0.0778889598749,0.0780893419018,0.0781331463449,0.0783270043058,0.078621519285,0.0786811678883,0.0788172412648,0.0788619777173,0.0787520006048,0.0787743688311,0.0788191052836,0.0789421305281,0.079026943386,0.0789859349712,0.0791173483005,0.0791583567153,0.0792021611584,0.0794994721658,0.0797436586358,0.0797883950883,0.0799011682291,0.0799114203328,0.0799617488419,0.0802012752648,0.0803336206035,0.0803597168675,0.0804985862722,0.08037183299,0.0803942012163,0.0805228185173,0.0807884412042,0.0807185404971,0.0809086704203,0.0809105344392,0.0810531318816,0.0811985253523,0.0812824062008,0.0814967683692,0.0816160655759,0.0817633230654,0.081877960225,0.0821249427234,0.0822805882978,0.0824194577025,0.082348624986,0.0825592591166,0.0827643011907,0.0828519100769,0.0830914364998,0.0831846374426,0.0832228498291,0.0833878154978,0.083488472516,0.0836450500998,0.0839432931166,0.0840737744365,0.0840588622857,0.084164179351,0.0841809555207,0.0841893436055,0.0843776095099,0.0845416431692,0.0846972887436,0.0847690534695,0.0849032628271,0.0849983277887,0.0850570443826,0.0853403752486,0.0854456923139,0.0856274341523,0.0857243631328,0.0859974418951,0.0861419033564,0.0864000699678,0.0864597185712,0.0866218882116,0.0870096041335,0.0871419494722,0.0872528585941,0.087594906054,0.0878707808446,0.0880105822587,0.0882268084459,0.088405754256,0.0886322325469,0.0888260905079,0.0890665489402,0.089166273949,0.0894561288809,0.0895111174372,0.089680743153,0.0897357317092,0.0898895132648,0.0900647310372,0.0903732261577,0.0908215226924,0.0908643951261,0.0912278788028,0.0917451440352,0.0921515001456,0.0926128448123,0.0929847165739,0.0934553813348,0.0938477573039,0.0941432042924,0.0942084449523,0.0944582234789,0.0945905688177,0.0945225321294,0.0946847017698,0.0948860158062,0.0951758707382,0.0953874368783,0.0954685216985,0.0956735637725,0.0958646257052,0.0961367724581,0.0966363295113,0.0969532127166,0.0971927391395,0.0972188354035,0.0974611578547,0.0976242595045,0.0979364826628,0.0980492558035,0.0983857112069,0.0985609289793,0.0986112574884,0.0987669030628,0.0989812652311,0.0993149246062,0.0993866893321,0.0995460629443,0.099652312019,0.0998452379705,0.0999514870453,0.100189149449,0.100268370251,0.100512556721,0.100670066314,0.100564749249,0.100743695059,0.100942213067,0.1012618923,0.10150701078,0.101905910815,0.10204384821,0.102438088198,0.102590937744,0.102791319771,0.103141755316,0.103358913513,0.103468890625,0.103491258851,0.103783909812,0.103783909812,0.10406258063,0.104033688338,0.104401832062,0.104660930683,0.10492655337,0.105310541254,0.105614376327,0.105891183127,0.105960151825,0.106222978484,0.106390740181,0.106777524093,0.106776592084,0.107005866403,0.107194132307,0.107388922278,0.107624720663,0.107812054558,0.108024552707,0.108203498517,0.108484965364,0.108786936419,0.109224048841,0.109311657727,0.109488739518,0.10946916732,0.109575416395,0.109708693743,0.109948220166,0.110035829052,0.110223162947,0.11036482838,0.110395584691,0.110568938445,0.110831765103,0.111060107413,0.111288449723,0.111494423806,0.111680825692,0.112070405632,0.112299679952,0.112405929026,0.112626815261,0.112832789344,0.112915738183,0.113063927682,0.113277357841,0.113545776556,0.113737770498,0.113818855319,0.114053721694,0.114144126609,0.114228939467,0.114374332938,0.11461385936,0.114766708907,0.115041651688,0.11531473045,0.115393951251,0.115704310391,0.115793783296,0.116092958322,0.116318504604,0.116558963036,0.116669872158,0.117018443684,0.116994211439,0.117095800466,0.117362355162,0.117474196294,0.117503088586,0.117558077142,0.117868436282,0.11788800848,0.118160155233,0.118330712958,0.118495678626,0.118682080512,0.118811629822,0.118922538944,0.119149949245,0.119410911884,0.119510636893,0.12010246288,0.120192867794,0.12013508321,0.120415618047,0.120455694453,0.12063930031,0.120719453121,0.120897466921,0.121042860392,0.121133265307,0.120944067393,0.121104373014,0.121447352484,0.121509797115,0.121805244104,0.12174652751,0.122034518423,0.122246084563,0.12236538177,0.1224473986,0.122601180155,0.122780125965,0.122862142795,0.123226558481,0.123314167367,0.123689767167,0.123796948251,0.123920905505,0.1241082394,0.124382250171,0.124674901132,0.124868759092,0.124962892045,0.124913495545,0.125095237383,0.125213602581,0.125322647684,0.12552396172,0.125762556134,0.125781196322,0.125698247483,0.125787720388,0.126116719716,0.126096215509,0.126007674613,0.125968530217,0.125948958019,0.125905153576,0.125694519445,0.125777468284,0.125921929746,0.125875329274,0.125660967106,0.125822204737,0.125909813623,0.126166116216,0.126176368319,0.126300325573,0.126512823723,0.126508163676,0.126648897099,0.126624664854,0.126656353175,0.126506299657,0.126568744288,0.12633853796,0.12632176179,0.126279821366,0.126177300329,0.12612603981,0.125770012209,0.125576154248,0.125454993022,0.125432624796,0.125291891373,0.125160478043,0.125130653742,0.124811906517,0.124542555793,0.124391570266,0.124210760437,0.124033678645,0.123902265316,0.123678583053,0.123441852659,0.123207918292,0.122812746295,0.122841638587,0.122580675948,0.122313189242,0.122306665176,0.122041042489,0.121745595501,0.121459468606,0.121251630504,0.121069888666,0.121087596845,0.120707336998,0.120657940499,0.12039511384,0.120025106097,0.119916993004,0.119814471967,0.119544189233,0.119357787347,0.119199345744,0.118909490812,0.118559987277,0.118123806865,0.117993325545,0.117858184178,0.117524524803,0.117208573607,0.117020307703,0.116791965393,0.116464830084,0.116174975152,0.11604728986,0.116029581681,0.11572201857,0.115447075789,0.115305410356,0.115195433243,0.115189841187,0.115006235329,0.114814241387,0.114517862389,0.114374332938,0.114231735495,0.114068633845,0.114070497864,0.114058381742,0.113908328224,0.11350663216,0.113310910181,0.113180428861,0.113072315767,0.112934378372,0.112655707553,0.112405929026,0.112164538585,0.111893323841,0.111467395533,0.111272605562,0.111130940129,0.110811260896,0.110591306671,0.110336868097,0.110084293542,0.109939832081,0.109720809865,0.10943654699,0.109227776878,0.109060015181,0.108582826354,0.108366600167,0.10785679101,0.10735257391,0.106985362195,0.106474621029,0.10587067892,0.10536552981,0.104961969728,0.104473596788,0.104154849564,0.103843558415,0.103550907455,0.103246140372,0.102891976789,0.102707438923,0.102559249424,0.102278714586,0.101912434881,0.10167011243,0.101395169649,0.101127682943,0.100927300916,0.100720394823,0.100475276344,0.100267438241,0.0998927704513,0.0997902494143,0.0997091645941,0.0995339468217,0.0993838933039,0.0992375678237,0.0991853752958,0.0991145425793,0.0990474379005,0.0988731521375,0.098821891619,0.0986103254789,0.098455611914,0.0982309976419,0.0980231595395,0.0977901571826,0.0975124183732,0.0973306765348,0.0970426856217,0.0968544197173,0.0966978421335,0.096659629747,0.0965039841725,0.0964228993523,0.0962458175611,0.0959550306197,0.0958879259409,0.0957080481214,0.0955682467072,0.0953538845389,0.0951572305496,0.0949503244567,0.0947080020055,0.0945532884406,0.0944078949698,0.0943389262722,0.0941469323301,0.0940276351234,0.0938347091719,0.0937154119651,0.0935327381173,0.0933015997793,0.0931655264028,0.0930275890076,0.0927498501981,0.0926398730857,0.0924823634924,0.0921803924379,0.0919613702224,0.0917684442709,0.0915503540648,0.0913490400284,0.0910731652379,0.0909016755032,0.09067892525,0.0904459228931,0.0901989403948,0.0900367707543,0.0898727370951,0.0896732870776,0.0895269615974,0.0892706590049,0.0891364496473,0.0888549828001,0.0888065183099,0.0887114533483,0.0884887030951,0.0883489016809,0.0881326754937,0.0877980841092,0.0877729198547,0.0878232483638,0.0876648067611,0.0877086112042,0.0875744018466,0.0873302153766,0.0872761588298,0.0870124001618,0.0868492985119,0.0867747377577,0.0865109790897,0.0861987559314,0.0861745236863,0.0857765556607,0.0856274341523,0.0855799016715,0.085367403522,0.0853049588904,0.0852658144944,0.085129741118,0.0848930107234,0.0848547983368,0.084563079386,0.0845817195745,0.0842639043597,0.0840430181254,0.083835180023,0.0836879225335,0.0835443930816,0.0835201608365,0.0834912685442,0.0832573341779,0.0832163257631,0.0828985105483,0.0827969215207,0.0824390299005,0.0824716502304,0.0821920474021,0.0821146906197,0.0817372268015,0.081485584256,0.0813523069079,0.0812171655409,0.0810111914574,0.0808853701846,0.0809291746277,0.0806961722708,0.0806952402614,0.080443597716,0.0804389376688,0.0803345526129,0.0802199154533,0.0800391056244,0.0800307175395,0.0799570887947,0.0796653698439,0.0795013361846,0.0794044072042,0.0792235973752,0.0791238723665,0.0788256293496,0.0786271113415,0.0784239332863,0.0782804038345,0.0781433984486,0.0780921379301,0.0779542005348,0.0776699376594,0.0776550255085,0.077501243953,0.0775375923207,0.0773819467462,0.0772496014075,0.0771480123799,0.0769746586264,0.0769457663341,0.0768600214668,0.076683871685,0.0765468662991,0.0765561863934,0.0765198380257,0.0764303651207,0.076394016753,0.0761740625281,0.0761544903301,0.0760864536419,0.0760165529348,0.0760696774722,0.0759569043314,0.0758301510493,0.0757266980028,0.0757099218331,0.0756437491638,0.0756176528998,0.0754070187691,0.0752429851099,0.0751432601011,0.0749680423287,0.0749568582156,0.0749000056405,0.0748021446506,0.0748142607732,0.074713603755,0.0747490201132,0.0748571332069,0.0747937565658,0.0747751163772,0.0747713883395,0.0746213348217,0.0745449100486,0.0744722133132,0.074340799984,0.0743463920405,0.0741543980984,0.0739046195718,0.0738188747045,0.0737452459597,0.0735439319233,0.0734572550466,0.0733398218587,0.0733482099435,0.0732587370385,0.0732717851705,0.0734125185941,0.0734852153294,0.0735271557537,0.0735057195368,0.073612900621,0.0735327478102,0.0734880113577,0.0733603260661,0.073362190085,0.0732000204446,0.0730835192661,0.073013618559,0.0730481029079,0.072891525324,0.072950241918,0.0730313267382,0.0729828622479,0.0730042984648,0.0728663610695,0.0728188285887,0.0727256276459,0.0726958033442,0.0725261776284,0.0725774381469,0.0724283166385,0.0722148864796,0.0721347336688,0.0720648329617,0.0720620369334,0.0722064983947,0.0721636259611,0.0719017313119,0.0716985532567,0.0715224034749,0.0714422506641,0.071559683852,0.0714739389846,0.071391922155,0.0712344125617,0.0710619908176,0.0709035492149,0.0711113873173,0.0712120443355,0.0711728999395,0.0712157723732,0.071134687553,0.0710042062331,0.0708541527153,0.0707749319139,0.0707441756028,0.0707376515368,0.0706267424149,0.0704990571233,0.0705055811893,0.0702604627099,0.0700703327866,0.0701355734466,0.0701551456446,0.0701551456446,0.0698969790331,0.069968743759,0.0699342594102,0.0699538316082,0.0699761998345,0.0700302563813,0.0698820668823,0.0699128231934,0.0697553136001,0.0696947329873,0.0697795458452,0.0696201722331,0.069480370819,0.0692044960284,0.0691168871422,0.0689034569833,0.0690786747557,0.0689015929644,0.068907185021,0.0687384913146,0.0685632735422,0.0685194690991,0.0685530214385,0.0685176050802,0.0683088349685,0.0684029679206,0.0683051069307,0.0683069709496,0.0683106989873,0.0684393162883,0.0684374522695,0.068429996194,0.0683610274964,0.0683889877792,0.0684719366183,0.0683778036661,0.0683386592701,0.0683750076378,0.0682734186102,0.068211905988,0.0681699655637,0.0680422802722,0.0679742435839,0.0678493543206,0.0678046178681,0.0677170089819,0.0676182159826,0.0674728225119,0.0674616383988,0.0673078568432,0.0673059928243,0.0672258400136,0.0671829675799,0.067194151693,0.0671279790237,0.0671065428068,0.0671028147691,0.0670944266843,0.0671102708445,0.0669965656944,0.0669015007328,0.0668586282991,0.0667616993186,0.0666461301496,0.0665967336499,0.0664727763961,0.0664802324715,0.0664932806035,0.0663208588594,0.0664587962546,0.0663665273213,0.0663012866614,0.0661363209927,0.066215541794,0.0661335249644,0.0661642812755,0.0658986585887,0.0656032116001,0.0655817753833,0.065533310893,0.0654457020068,0.0653636851772,0.0654606141577,0.0652863283947,0.0651623711408,0.0650207057078,0.0649974054722,0.0649442809348,0.0650412099153,0.0649694451893,0.0649852893496,0.0649619891139,0.0648650601334,0.0648939524257,0.0648501479826,0.0648864963503,0.0649908814062,0.0650440059435,0.0646749302102,0.0646646781065,0.0645658851072,0.0645043724849,0.0644214236459,0.0643086505052,0.0642219736284,0.0642704381186,0.0641520729213,0.0641241126385,0.0642117215247,0.0639899032809,0.0639190705644,0.0639498268755,0.0640001553846,0.0639740591206,0.0639675350546,0.0637932492917,0.0638007053671,0.06376342499,0.0637708810654,0.0637084364338,0.0637885892445,0.063582615161,0.0634362896809,0.0633337686439,0.0633412247193,0.0632759840594,0.06317719106,0.0629824010897,0.0629097043543,0.0628975882317,0.062830483553,0.062913432392,0.0628658999112,0.0627615148553,0.0625779089981,0.0625434246493,0.0624436996405,0.0624297194991,0.0624903001119,0.0625462206775,0.062488436093,0.0623588867826,0.0623216064055,0.0623654108486,0.062366342858,0.0624110793105,0.0623737989334,0.0624399716028,0.0623579547732,0.0622694138775,0.0621566407368,0.0621202923691,0.0620485276432,0.0620373435301,0.0620261594169,0.0619506666533,0.0618295054277,0.061819253324,0.0616365794762,0.0616132792405,0.0616626757402,0.0617819729469,0.0618071372014,0.0618211173429,0.0617381685038,0.0615704068068,0.0616654717684,0.0615573586748,0.0614436535247,0.0612926679974,0.0611211782627,0.0609450284809,0.0607632866425,0.0606495814924,0.0606551735489,0.0606691536903,0.0605824768136,0.0604995279745,0.0604370833429,0.0604669076445,0.0604780917577,0.0604911398897,0.0605041880216,0.0604063270317,0.060300077957,0.0602217891651,0.0600913078452,0.0600437753644,0.0601043559772,0.0601248601846,0.0599552344688,0.0598331412338,0.0597324842156,0.0597762886587,0.059707319961,0.0596998638856,0.0597306201967,0.0596905437913,0.0596206430843,0.0593634084822,0.0594351732082,0.0592925757657,0.0591956467853,0.0591201540216,0.0590260210695,0.0589412082115,0.0587725145051,0.0585022317711,0.0585022317711,0.0585096878466,0.0582571132917,0.0582002607166,0.058246861188,0.0581834845469,0.0582002607166,0.0581406121132,0.0581266319718,0.0582347450654,0.0580185188782,0.0580744394439,0.058220764924,0.058240337122,0.0581778924903,0.0580026747179,0.0579346380297,0.0577295959556,0.0576885875408,0.0576047066923,0.0577426440876,0.0576513071637,0.0575469221078,0.0576252108998,0.0575590382304,0.0575869985132,0.0575431940701,0.0576121627678,0.0575525141644,0.0575786104284,0.0573372199866,0.0571498860917,0.0571536141294,0.0569905124796,0.0569467080365,0.0567966545186,0.0566801533402,0.0565916124445,0.0565133236526,0.0565012075301,0.0565739042654,0.0565701762277,0.0564685872001,0.0563306498048,0.0561470439476,0.0561992364755,0.0561367918439,0.056167548155,0.0561861883435,0.0560734152028,0.055999786458,0.0559578460338,0.0559643700998,0.0560603670708,0.0561535680136,0.0560445229105,0.0561097635705,0.0561069675422,0.0560864633348,0.0560146986088,0.0559559820149,0.0558590530344,0.0556232546492,0.055400504396,0.0553194195758,0.0553604279907,0.0552867992459,0.0552001223691,0.0551050574075,0.0550845532001,0.0550929412849,0.0551544539072,0.0551880062466,0.0552513828876,0.055195462322,0.0551926662937,0.0550360887099,0.0550099924459,0.0548878992109,0.0547844461644,0.0545533078263,0.0543408096769,0.0543137814035,0.0542699769604,0.0543547898183,0.0542336285927,0.0541823680742,0.0540640028769,0.0541301755462,0.0541133993765,0.0542476087341,0.0542811610735,0.0542737049981,0.0541833000836,0.0539680059058,0.0538580287934,0.0538337965482,0.0536539187287,0.0535504656822,0.0534992051637,0.0533510156647,0.0532410385523,0.0532214663543,0.0532261264014,0.0532932310802,0.0532335824769,0.0532559507031,0.0533621997779,0.0533016191651,0.0532839109859,0.05324476659,0.0532699308445,0.0530611607327,0.0530248123651,0.0529045831489,0.0529567756769,0.0528234983287,0.0527181812634,0.0526277763489,0.0525280513401,0.0525476235381,0.0472799062533,0.0420727695812,0.0367360835987,0.0314739583704,0.0)
m <- c(0.0311850679641,0.0363219031225,0.0414661870177,0.0466384036761,0.0518357598214,0.051961457256,0.0520694639405,0.0520489799141,0.0521821260855,0.0523040991516,0.0524139680203,0.0525769091392,0.0528264418241,0.0529242064954,0.0530201089825,0.0529819342061,0.0530657324957,0.0532044652198,0.0533562332334,0.0533515777728,0.0533245761017,0.0534074432993,0.0534214096809,0.0535266230891,0.0535759709708,0.0536895642079,0.0537985019845,0.0537994330766,0.0538618162479,0.053923268327,0.0541169354854,0.0541281085907,0.0541709388276,0.0541783875645,0.0542836009727,0.0543171202885,0.054225873262,0.0543264312096,0.0542249421699,0.0542808076963,0.0542007337751,0.0543264312096,0.054390676565,0.0544996143416,0.054355295065,0.0544018496703,0.0544214026046,0.0543934698414,0.0545768949866,0.0546122764867,0.0547826663424,0.054770562145,0.0546662798289,0.0547091100659,0.0549726091324,0.055038716672,0.0551588275539,0.0552668342384,0.0552212107251,0.0552593855016,0.0552565922252,0.0552789384358,0.0554223266204,0.0555424375023,0.0556187870551,0.0556327534368,0.0556858256869,0.0557109651738,0.0557612441476,0.0558394558847,0.0559130121612,0.0560554692537,0.0561997885304,0.0561588204776,0.0562118927278,0.0563282792412,0.0563981111493,0.0565182220312,0.0566141245183,0.0568115160451,0.0570182184931,0.057069428559,0.0571625377698,0.0572370251384,0.0572416805989,0.0573152368754,0.0574614183363,0.0576616031395,0.0577193308502,0.0576765006132,0.0574614183363,0.0575424233497,0.0574902821917,0.057553596455,0.0575936334156,0.0576271527315,0.0575684939287,0.0574558317837,0.0574418654021,0.057527525876,0.0577323661397,0.0577593678108,0.0577882316661,0.0577528501661,0.0577900938504,0.0578636501269,0.0579204467454,0.0579586215219,0.0580712836669,0.0581187693644,0.0581625306935,0.0583673709572,0.0584604801679,0.0586047994446,0.0587900867741,0.0590042379588,0.0591913874725,0.0592761168543,0.0593794680783,0.0595265806313,0.0595433402892,0.0594018142888,0.0594306781442,0.059498647868,0.0595777906972,0.0595265806313,0.059688590658,0.0597705267635,0.0599334678824,0.0599586073693,0.0601057199223,0.0601541367119,0.0602882139754,0.0604548794627,0.0605144693576,0.0605051584365,0.0604250845152,0.0605172626339,0.0604464996337,0.060328250936,0.0602919383438,0.06044929291,0.060483743318,0.0605284357392,0.0606057163841,0.0607044121475,0.0608263852137,0.0609027347665,0.0610181901879,0.061278895978,0.0614595278469,0.0614120421494,0.061607571492,0.0615675345314,0.0615628790709,0.0616923008738,0.0616923008738,0.0617854100846,0.0617211647292,0.0617174403608,0.061702542887,0.0618850369401,0.061736993295,0.0619343848219,0.0619669730456,0.0619194873481,0.0619828016115,0.0620042167299,0.0621494670987,0.0622891309149,0.0623962065073,0.0624697627838,0.0625572854419,0.0625991845868,0.0627323307582,0.0627658500741,0.0627807475478,0.0628673391138,0.0629334466535,0.0629800012588,0.0630172449431,0.0630293491405,0.0631056986934,0.0631159407066,0.0630200382195,0.0631578398514,0.063076834838,0.0630209693116,0.0630423844301,0.0630032785615,0.0630395911537,0.063059144088,0.0631894969831,0.0632090499173,0.0633496448256,0.0633887506941,0.0634576515101,0.0635358632472,0.0635582094577,0.0637062531029,0.0637639808135,0.0637667740899,0.063811466511,0.0638775740507,0.0639343706693,0.0640675168407,0.0641866966305,0.0642472176175,0.0642974965913,0.0643747772362,0.0643878125257,0.064546098184,0.0645898595131,0.0646457250396,0.0646382763027,0.0645395805393,0.0646643468817,0.0648012174216,0.0648673249612,0.0647118325792,0.0646932107371,0.0647509384477,0.0648012174216,0.0649166728429,0.06499488458,0.0650712341328,0.0652155534095,0.0651364105803,0.0652406928964,0.0653486995809,0.0655805415157,0.0654697415549,0.0655293314498,0.0655013986866,0.0655619196736,0.0656494423317,0.0655554020288,0.0657453448188,0.0656792372791,0.065816107819,0.0658151767269,0.0659324943324,0.0658645246086,0.0659278388719,0.0659129413982,0.0659157346745,0.0661233682145,0.0660935732671,0.0662872404255,0.0663188975571,0.0663282084782,0.0663533479651,0.0665535327683,0.0666242957685,0.0667378890056,0.0667872368873,0.0666298823211,0.0666047428342,0.066550739492,0.0665386352946,0.0665963630053,0.0667388200977,0.0667332335451,0.0667183360714,0.0666801612949,0.0667956167163,0.0669948704274,0.067164329191,0.0672667493228,0.067294682086,0.0674836937839,0.0675600433368,0.0675982181132,0.0675851828237,0.0676727054818,0.0676894651397,0.067709018074,0.0676652567449,0.0678030583769,0.0679287558114,0.0678579928112,0.0678403020612,0.0678486818901,0.0680358314038,0.0681447691804,0.0681699086673,0.0680870414697,0.0682164632727,0.0681205607856,0.0681093876803,0.068245327128,0.0684147858916,0.0684929976287,0.0685032396419,0.0686149706948,0.0686885269713,0.0688551924586,0.0689678546036,0.0691056562356,0.0692322847622,0.0691875923411,0.069152210841,0.0692397334991,0.0693142208677,0.0692658040781,0.0693672931179,0.069394294789,0.0694045368022,0.0695320964209,0.0695311653288,0.0697369366846,0.0698309769875,0.0699408458562,0.0699287416588,0.0699678475273,0.0698821870534,0.0700274374222,0.0700674743829,0.0700162643169,0.0701996894622,0.0701819987121,0.0702052760148,0.0701847919884,0.0703002474098,0.0703989431732,0.070621474187,0.0706689598845,0.0708412119244,0.07097622028,0.07095852953,0.0710078774117,0.0709697026353,0.0710348790828,0.0711540588726,0.0713197932678,0.0713114134388,0.0713663478731,0.071304895794,0.0714399041497,0.0715218402551,0.0715060116893,0.0717173695978,0.0718253762823,0.0719026569272,0.0719026569272,0.0718551712297,0.0717574065584,0.0717201628741,0.0716838502819,0.0717434401768,0.0718812418087,0.071969695559,0.0720032148748,0.0719128989404,0.0718169964533,0.0720022837827,0.072237850086,0.0723272349283,0.0723263038362,0.0723309592967,0.0724091710338,0.0723756517179,0.0724389659812,0.0725162466262,0.0727862633374,0.0728830969166,0.072998552338,0.0731642867332,0.0732387741018,0.073162424549,0.0730460380355,0.0730022767064,0.072903580943,0.0728821658245,0.0728672683508,0.0728132650086,0.0727797456927,0.0728886834693,0.0730078632591,0.0732732245098,0.0733179169309,0.0733998530364,0.0737117688925,0.0739808545116,0.0740432376829,0.0741894191438,0.0743663266442,0.0742285250123,0.0742117653544,0.0741819704069,0.0742909081835,0.0744147434338,0.0744342963681,0.0743244274994,0.0743151165783,0.0742676308808,0.0741735905779,0.0742499401308,0.0743449115258,0.0744799198814,0.0746000307633,0.0746940710662,0.0748095264875,0.0749845718038,0.0750851297514,0.0751642725806,0.0752825212782,0.0755097077525,0.075470601884,0.0754650153314,0.075565573279,0.07555719345,0.0755180875815,0.0756456472003,0.0757806555559,0.075859798385,0.0758961109772,0.07595476978,0.0759864269117,0.0762359595966,0.0763020671362,0.0765320468868,0.076671710703,0.0765823258606,0.0766475023082,0.0768197543481,0.0768653778614,0.0769370719537,0.0772489878098,0.0771344634805,0.0772964735073,0.0771651895201,0.0772182617702,0.0774091356523,0.0774631389945,0.0775404196395,0.0776512196003,0.0778169539955,0.0778430245745,0.0779473068906,0.0779445136142,0.078184735378,0.0783411588521,0.0785450680237,0.0787396662742,0.0789230914195,0.0791316560516,0.0791027921963,0.0793839820128,0.0795180592763,0.0795431987632,0.0796949667768,0.0798299751324,0.0798932893957,0.079999433896,0.0801223380542,0.080112096041,0.0802964522784,0.0801269935148,0.0802806237125,0.0804742908709,0.0806465429109,0.0807461697664,0.0809081797931,0.0810338772277,0.081010599925,0.0812163712808,0.0813299645179,0.0816353627293,0.0816968148084,0.0818904819668,0.0819659004275,0.0820282835987,0.0820776314804,0.0823755809549,0.0825841455871,0.0827321892322,0.0828737152326,0.0829956882987,0.0832610495494,0.0835143066027,0.0836763166294,0.0838783636168,0.0840301316304,0.0840059232356,0.0842703533941,0.0843252878285,0.0845701650528,0.0847005179479,0.0849565682775,0.0847889716982,0.0849910186855,0.0851632707255,0.0853094521864,0.0854202521472,0.0854928773316,0.0857749982402,0.0858783494642,0.08608412082,0.0860291863856,0.0863504131628,0.0863559997155,0.0862982720048,0.0864835593342,0.0867507827691,0.086836443243,0.0870459389673,0.0871139086911,0.0871707053097,0.087250779231,0.0875124161133,0.0876194917056,0.0879900663645,0.0881474209307,0.0882610141679,0.0884788897211,0.0885719989319,0.088754492985,0.0889947147488,0.0893652894076,0.089498435579,0.0897535548166,0.0899397732381,0.0901343714886,0.0903894907261,0.0905403276476,0.0906073662793,0.0909164888591,0.0911325022281,0.0911473997018,0.0913894836498,0.0916092213873,0.0918261658484,0.0920375237568,0.0921809119414,0.0924248580736,0.092695805877,0.0927879839956,0.0929481318382,0.0932172174573,0.0932870493654,0.0935682391819,0.0937079029981,0.093906225617,0.0940710289201,0.0941538961177,0.0943410456313,0.0945505413556,0.0947497950666,0.0949592907909,0.0950542621859,0.095046813449,0.0952348940548,0.0952935528575,0.0955039796739,0.0957162686744,0.0958205509905,0.0958522081222,0.0959322820434,0.0962004365705,0.0965859087031,0.096913653125,0.0970272463621,0.0972153269679,0.0972143958758,0.0974276159685,0.0974909302318,0.0977562914825,0.0981026577466,0.0981696963783,0.0982153198916,0.0984657836686,0.0986417600769,0.098901534775,0.0991538607362,0.0993968757763,0.0997702437115,0.10003001841,0.100062606633,0.100170613318,0.100413628358,0.100585880398,0.100771167727,0.100938764307,0.101120327268,0.101255335623,0.101381033058,0.101630565743,0.101827026177,0.102071903402,0.102219947047,0.102273950389,0.102332609192,0.102340989021,0.102538380548,0.102739496443,0.103051412299,0.103024410628,0.103291634063,0.103606343195,0.103931294341,0.104335388316,0.104898699041,0.105498322358,0.105937797833,0.106372617847,0.106866096664,0.107531827522,0.10808768951,0.108528096077,0.108991779946,0.109358630237,0.109608162922,0.109799036804,0.110038327476,0.110353036608,0.110533668477,0.110631433148,0.110803685188,0.11107835736,0.111145395992,0.111424723624,0.111752468046,0.112094178849,0.112265499797,0.11229343256,0.112619314798,0.112812981957,0.113110931431,0.113272010366,0.113459159879,0.11369379509,0.113895842078,0.114179825171,0.114554124198,0.114896766094,0.114907939199,0.115084846699,0.115202164305,0.115483354122,0.115874412807,0.115842755675,0.115776648135,0.115759888478,0.115863239701,0.116152809347,0.116427481519,0.11681667802,0.117029898112,0.117013138455,0.117048519955,0.117309225745,0.117695628969,0.117770116338,0.117878123023,0.117944230562,0.118007544826,0.118108102773,0.118275699353,0.118503816919,0.118729141209,0.118920015091,0.118943292394,0.119522431685,0.119768240001,0.120056878555,0.120376243148,0.120528011161,0.12070398757,0.121045698373,0.121289644505,0.121527072993,0.121878094717,0.122004723244,0.12229894835,0.122356676061,0.122588517996,0.122808255733,0.123021475826,0.123140655615,0.123269146326,0.123608994946,0.123780315893,0.123879942749,0.12406150571,0.124201169526,0.124340833342,0.124385525764,0.124624816435,0.124805448304,0.125073602831,0.125087569213,0.125287754016,0.125330584253,0.125631327004,0.125898550439,0.125892032794,0.126201155374,0.126098735242,0.126095941965,0.126008419307,0.125909723544,0.125932069754,0.126000039478,0.12570488328,0.125629464819,0.125713263109,0.125542873253,0.12553263124,0.125730022767,0.125786819386,0.125739333688,0.125814752149,0.125771921912,0.125821269794,0.125848271465,0.12570488328,0.125742126964,0.125759817715,0.125592221135,0.125438590937,0.125501905201,0.12534268845,0.125310100226,0.125279374187,0.125196506989,0.125417175819,0.125275649818,0.125122019621,0.124941387752,0.124817552501,0.124622023159,0.124460944224,0.124353868632,0.124122026697,0.123921841894,0.123630410064,0.123515885735,0.123305458919,0.123158346366,0.123188141313,0.123293354721,0.123112722852,0.12279801372,0.122573620522,0.122563378509,0.122241220639,0.122152766889,0.122062450955,0.122000998876,0.121771019125,0.121301748703,0.121157429426,0.121055940386,0.120868790873,0.120487974201,0.120241234792,0.119920008015,0.11994421641,0.119628576185,0.119400458619,0.119391147698,0.11925520825,0.119101578052,0.119115544434,0.119265450263,0.119311073776,0.119190962894,0.118795248749,0.118647205104,0.118621134525,0.118581097564,0.118448882485,0.118491712722,0.118164899392,0.117744976851,0.117477753416,0.117309225745,0.117185390494,0.116987067875,0.116582973901,0.116293404255,0.116273851321,0.115984281675,0.115911656491,0.115667710359,0.11557553224,0.115430281871,0.115321344095,0.115204957581,0.115108124002,0.114920043396,0.114474050277,0.114270141105,0.114109993263,0.114010366407,0.113704968196,0.113599754788,0.113349291011,0.113174245694,0.113133277642,0.112740356772,0.112406094705,0.112203116626,0.111841852888,0.111592320203,0.111471278229,0.11116215565,0.110852101978,0.110571843253,0.110309275279,0.11016402491,0.110122125765,0.109878179633,0.109859557791,0.109606300738,0.109388425184,0.109160307618,0.109102579907,0.108958260631,0.108739453985,0.108475023827,0.108247837352,0.1081714878,0.107711528298,0.107692906456,0.107534620798,0.107431269574,0.107166839415,0.106992725191,0.106826059704,0.106724570664,0.106580251387,0.106459209413,0.106438725387,0.106178019597,0.105948970938,0.105707818082,0.105450836661,0.105341898884,0.104975048594,0.104801865462,0.104618440316,0.104360527803,0.104221795079,0.103977848946,0.103737627183,0.103650104524,0.103605412103,0.103363328155,0.103195731576,0.102992753496,0.102700390575,0.102493688127,0.102297227692,0.102134286573,0.102072834494,0.10173112369,0.101425725479,0.101263715452,0.101089601228,0.100994629833,0.100806549227,0.100578431661,0.100443423305,0.100240445226,0.0999601865015,0.09979165883,0.09961940679,0.0993465968025,0.0992162439074,0.0990384053148,0.0990104725516,0.0987423180246,0.0984406441817,0.0983885030236,0.098235803918,0.0980877602728,0.0979629939304,0.0978326410353,0.0978084326405,0.097484412587,0.0971939118494,0.0968959623749,0.0967078817692,0.0963671020578,0.0961222248334,0.0958345173721,0.0955672939372,0.0953382452787,0.0949592907909,0.0948680437643,0.0945402993424,0.0944267061052,0.0941846221572,0.093932296196,0.0936818324191,0.0934360241026,0.0931771804967,0.0928419873379,0.0924853790606,0.0920673187043,0.0917861288877,0.0914732819395,0.0911092249254,0.0906511276084,0.0902554134626,0.0899779480145,0.0897554170008,0.0892852154864,0.0888820526037,0.0886362442873,0.0883839183261,0.0879882041803,0.0878206076009,0.0877917437456,0.0875859723898,0.0873271287838,0.0869062751511,0.0868215457693,0.0869453810197,0.086887653309,0.0865105610053,0.0865552534265,0.0862303022809,0.0860031158066,0.0860003225303,0.0859127998722,0.0859249040696,0.0858448301483,0.0854686689368,0.0851381312385,0.085236827002,0.084986363225,0.0849388775275,0.0847815229613,0.0844835734868,0.0842964239732,0.0841865551045,0.0840208207093,0.0839705417355,0.0839379535117,0.0838690526957,0.0836893519189,0.0836949384716,0.0835627233923,0.0834426125104,0.0832917755889,0.0830841420489,0.0829854462855,0.0828392648246,0.0827666396402,0.0824919674684,0.0824509994156,0.0822517457046,0.0822061221913,0.0821921558097,0.0821269793622,0.0819631071512,0.0818755844931,0.0818253055193,0.0815953257686,0.0815767039265,0.081347655268,0.0810934671226,0.0808346235166,0.0806763378583,0.0805310874895,0.0804184253445,0.0801865834097,0.0800068826329,0.0799202910668,0.0797247617242,0.0796958978689,0.0795310945658,0.0794677803025,0.0793914307497,0.0792396627361,0.0790990678278,0.0790478577619,0.0789845434986,0.0788476729587,0.0787527015638,0.0786260730371,0.0787433906427,0.0785087554315,0.0783933000102,0.0783048462599,0.0783784025364,0.078366298339,0.0781512160622,0.0781139723778,0.078124214391,0.0779733774696,0.0777098784031,0.0777387422584,0.0775543860211,0.0773886516259,0.077380271797,0.0772694718361,0.0773299928231,0.0772461945334,0.0771400500332,0.0770106282302,0.0769584870722,0.076931485401,0.0770413542697,0.077138187849,0.0767173342163,0.0765218048736,0.0765124939526,0.0762834452941,0.076213613386,0.0763281377152,0.076378416689,0.0763942452549,0.0762406150571,0.0760907092278,0.0760748806619,0.0761139865304,0.0759221815563,0.0759482521353,0.0759100773589,0.0757415496874,0.075660544674,0.0755180875815,0.0752881078309,0.0753244204231,0.0753383868047,0.0751391330937,0.075185687699,0.0751456507384,0.0749501213958,0.0748514256324,0.0746642761187,0.0745776845527,0.0746186526054,0.0744464005655,0.0742853216309,0.0742983569204,0.0739873721564,0.0739054360509,0.0737909117217,0.0738700545508,0.0738272243139,0.0737359772873,0.073672663024,0.0735972445633,0.0736242462344,0.073698733603,0.073560000879,0.073508790813,0.0734817891419,0.0732899841677,0.0733942664838,0.0733821622864,0.0731130766672,0.0731568379963,0.0731931505885,0.0729259271536,0.0730730397066,0.0729966901538,0.0729445489957,0.0728439910481,0.0727685725874,0.072592596179,0.0726298398633,0.0724054466654,0.0722676450334,0.0722611273887,0.0720888753487,0.0719399006115,0.0718886905456,0.0718682065192,0.0717266805188,0.0718197897296,0.0718654132429,0.0718831039929,0.0718942770982,0.0719315207825,0.0719343140589,0.0719464182563,0.0719715577432,0.0718412048481,0.0718039611638,0.0717574065584,0.0716056385448,0.0715013562288,0.0713076890704,0.071123332833,0.071131712662,0.0711009866224,0.0710674673066,0.0710767782276,0.0709641160826,0.0707471716215,0.0708523850297,0.0707806909374,0.0708775245166,0.0709901866616,0.0708914908982,0.070837487556,0.0706940993714,0.0706075078053,0.0706466136739,0.0705981968843,0.0705041565814,0.0704129095548,0.0702266911333,0.0700134710406,0.0697387988688,0.0696875888029,0.0695302342367,0.0693933636969,0.0693505334599,0.0694408493944,0.069273252815,0.0692592864334,0.0692620797097,0.0692453200517,0.0694669199734,0.0695311653288,0.06957585775,0.0695190611314,0.069333773802,0.069186661249,0.0691103116961,0.0690246512222,0.0689464394852,0.0689352663799,0.0688803319455,0.0687192530109,0.0686643185765,0.0687713941689,0.0687341504846,0.0686978378924,0.0687425303136,0.068616832879,0.068599142129,0.0685544497078,0.0683728867468,0.0682127389043,0.068089834746,0.0680767994565,0.0680777305486,0.0679222381667,0.0678682348244,0.0679278247193,0.0676624634686,0.0674520366523,0.0674818315997,0.0674222417048,0.0674827626918,0.0673477543362,0.067319821573,0.0674380702706,0.0673747560073,0.0672369543754,0.0673822047442,0.0674445879154,0.0673095795598,0.0673663761783,0.0674008265863,0.0673468232441,0.067250920757,0.0670730821644,0.0668924502955,0.0669520401904,0.0668915192034,0.0668850015586,0.066818894019,0.0665656369657,0.0663095866361,0.0662863093334,0.0661894757542,0.0660907799908,0.0662648942149,0.0662043732279,0.0660945043592,0.065945529622,0.0659445985298,0.0660917110829,0.0661094018329,0.0660479497538,0.0660712270565,0.0660544673986,0.0659576338194,0.0658812842665,0.0659334254246,0.065894319556,0.0659138724903,0.0658729044375,0.0658570758717,0.0658468338585,0.0657220675161,0.0657304473451,0.0656568910686,0.0656243028448,0.0655796104236,0.0654837079365,0.0654092205679,0.0653133180808,0.0651773786331,0.0650246795274,0.0650367837248,0.0649325014087,0.0583673709572,0.0518636925847,0.0452799402909,0.0388209543396,0.0)
ycorr <- c(-6.963945464230894e-05,-6.989245895088918e-05,-7.065343873081527e-05,-7.192464508700628e-05,-7.370318863724368e-05,-7.599175242857074e-05,-7.87928975590728e-05,-8.160523232811023e-05,-8.443116026554575e-05,-8.726938983667123e-05,-9.01207917383864e-05,-9.298582954649826e-05,-9.586751037012458e-05,-9.876409926636466e-05,-0.00010167365476705102,-0.00010459764675679961,-0.00010753366618940426,-0.00011048346330154191,-0.00011344733906419508,-0.00011642417705092498,-0.00011941123852634672,-0.00012240655268221088,-0.00012540958827716105,-0.00012842007346769384,-0.00013144216426319054,-0.00013447331265242465,-0.00013751153097318766,-0.00014055428772801037,-0.00014359862317254437,-0.00014664490746269781,-0.00014969556171272173,-0.00015275805836888768,-0.00015583039200853114,-0.00015891037869233016,-0.00016199634418193312,-0.00016508768589870826,-0.00016818971412269095,-0.00017130234973501912,-0.00017442837086722803,-0.00017756788458144678,-0.00018071762380486648,-0.0001838747303655316,-0.00018703801031405275,-0.00019021103716432676,-0.00019339788954285075,-0.00019659846299556927,-0.00019980828329963844,-0.00020303055575165123,-0.00020626570308309117,-0.0002095135728341462,-0.00021277655608577416,-0.00021605529270226923,-0.00021934916101502846,-0.00022265895281817306,-0.00022598092731771494,-0.00022931417637060534,-0.00023266080708765947,-0.00023602198608252726,-0.00023939420245295921,-0.00024277741093883676,-0.000246170788297485,-0.0002495727481096082,-0.0002529846927316774,-0.00025640554538111936,-0.00025983369638301484,-0.0002632730879893186,-0.00026672307059712376,-0.000270181251533173,-0.0002736481881767367,-0.00027712120659600475,-0.0002806015034244412,-0.00028408812970821373,-0.00028758246617583354,-0.00029108469141989404,-0.00029459715701097953,-0.00029811596084301003,-0.00030163953585963263,-0.00030516564354316844,-0.00030869672884411875,-0.00031223397468108647,-0.00031577696632783506,-0.00031932735406567457,-0.00032288233313425014,-0.00032643791512086467,-0.0003299901226858413,-0.0003335381874982909,-0.0003370829895770276,-0.0003406241149127168,-0.00034415872277876534,-0.0003476839043374914,-0.0003511977229013132,-0.00035469771361598646,-0.00035818055653228227,-0.0003616509756405805,-0.00036510956198914686,-0.00036855543503448566,-0.00037198878211002124,-0.0003754080442135542,-0.0003788139864912605,-0.00038221124443676556,-0.0003855966758643559,-0.00038897015547662624,-0.00039233522987601124,-0.00039569081442868354,-0.0003990341408544193,-0.00040236679661093653,-0.00040568977653104236,-0.000409002700673464,-0.0004123049120747803,-0.0004155928473442137,-0.0004188696002384322,-0.0004221356930435769,-0.0004253881848156766,-0.0004286229041460608,-0.0004318405105772272,-0.0004350398398917353,-0.0004382178542135061,-0.0004413768238118908,-0.00044451424425007635,-0.0004476304782852597,-0.00045072291814175476,-0.00045378675594692,-0.00045682213587583436,-0.0004598313064899229,-0.0004628182057129853,-0.00046578339528832475,-0.00046872662806031994,-0.0004716445745638022,-0.00047453551919237567,-0.0004774021285373289,-0.0004802427488340733,-0.00048306201796748013,-0.0004858606741096791,-0.000488639928193432,-0.0004913967305187091,-0.000494126755231481,-0.0004968295535302841,-0.0004995075733807499,-0.0005021625543843313,-0.0005047919588440384,-0.0005073967417446687,-0.000509978311793092,-0.0005125373662537217,-0.0005150703882073939,-0.0005175773388512753,-0.0005200622666099217,-0.0005225275446797171,-0.0005249717671938695,-0.000527396949199896,-0.0005298078226124076,-0.0005322040722372457,-0.0005345827023204799,-0.0005369407398599365,-0.0005392757531818215,-0.0005415923202922985,-0.0005438911884265415,-0.0005461701313757578,-0.0005484306382356166,-0.0005506706390243009,-0.0005528833687407788,-0.0005550666718046358,-0.0005572226494154751,-0.0005593531152369555,-0.0005614628780522418,-0.0005635494326489695,-0.0005656096789767409,-0.0005676452088992077,-0.000569655859116893,-0.0005716399055610578,-0.0005735998499109794,-0.0005755396334211717,-0.0005774578063224235,-0.0005793546445447278,-0.000581229508499624,-0.0005830812090742534,-0.000584912044425054,-0.0005867194460393955,-0.000588500787718269,-0.0005902573497121704,-0.000591989315940824,-0.0005936960947981728,-0.0005953792425165946,-0.0005970388069269454,-0.0005986747030369691,-0.0006002881389340472,-0.0006018762197826121,-0.0006034369730042238,-0.0006049743932954155,-0.0006064868188973097,-0.0006079743516379281,-0.0006094375831747855,-0.0006108759765663512,-0.000612286959672839,-0.0006136702886229478,-0.0006150243758029285,-0.0006163481640472028,-0.0006176466130562628,-0.0006189202248493838,-0.000620170688902936,-0.0006213976190488159,-0.0006226000019839333,-0.0006237773597252007,-0.000624933344939333,-0.0006260680965942865,-0.0006271824548993804,-0.0006282767823035959,-0.0006293501089288774,-0.0006304025788591892,-0.0006314333214190779,-0.0006324433605865932,-0.0006334349818964574,-0.0006344071640170029,-0.0006353571024096908,-0.0006362828586603164,-0.0006371849162445253,-0.0006380642467655309,-0.0006389225061996468,-0.0006397597974741806,-0.0006405757297823002,-0.0006413707402801353,-0.0006421412011169832,-0.0006428876808851101,-0.0006436092407020755,-0.0006443094150008508,-0.0006449856390196549,-0.0006456395349568013,-0.000646269715499253,-0.000646874643445727,-0.0006474537728748501,-0.0006480046859876485,-0.0006485286065007509,-0.0006490266784069194,-0.0006495013753439819,-0.0006499531206052012,-0.0006503823897243627,-0.0006507906765364309,-0.000651175997105126,-0.0006515393355556892,-0.0006518818717363128,-0.0006522032707849849,-0.0006525058775773853,-0.0006527884085545338,-0.0006530482441957376,-0.0006532835109698416,-0.0006534946967830089,-0.00065368099066629,-0.0006538427238796637,-0.0006539798632614053,-0.0006540928505264138,-0.0006541837285792347,-0.0006542523460381014,-0.0006542974870342724,-0.0006543218348416114,-0.0006543260389129512,-0.00065431045023957,-0.0006542762425026074,-0.0006542247634511545,-0.0006541554348033938,-0.000654068187755336,-0.0006539627388711227,-0.0006538369459130335,-0.0006536921903241512,-0.0006535300358943921,-0.0006533501336094997,-0.0006531543038260444,-0.0006529390684879312,-0.0006527027976861023,-0.0006524426660705508,-0.0006521604634369086,-0.0006518562440399019,-0.0006515293218868675,-0.0006511786228241146,-0.000650801287783444,-0.0006503954713479615,-0.0006499594316758595,-0.0006494953192390015,-0.0006490044576210714,-0.0006484900487221128,-0.0006479510933846936,-0.0006473868357028139,-0.0006467973586945851,-0.0006461842785630698,-0.0006455489775787648,-0.0006448921336543464,-0.0006442137002921412,-0.0006435124102811608,-0.0006427846669717156,-0.0006420286912049096,-0.000641241293909726,-0.0006404209071041382,-0.0006395664795648627,-0.0006386771051846171,-0.0006377513447772502,-0.0006367886113958181,-0.0006357885869664296,-0.0006347520492845863,-0.0006336821586075505,-0.0006325790899557207,-0.000631448040218419,-0.0006302931390931266,-0.0006291178186064421,-0.0006279221466458556,-0.0006267056144209909,-0.0006254698125294203,-0.0006242186772714357,-0.0006229543668989361,-0.0006216737479198626,-0.0006203793962134268,-0.0006190699751877425,-0.0006177418418390781,-0.0006163939252006545,-0.0006150270795729788,-0.0006136449375855195,-0.0006122482381359241,-0.0006108326849399347,-0.0006093949351539871,-0.0006079382578504581,-0.0006064639368675184,-0.0006049700693913391,-0.0006034596649306108,-0.0006019321333167317,-0.0006003871314937453,-0.0005988237861760756,-0.000597240785147758,-0.0005956412754889854,-0.0005940293358630916,-0.0005924063835121389,-0.0005907684111411529,-0.0005891187391236621,-0.000587455562160103,-0.0005857797351613848,-0.0005840936119120523,-0.0005823960597450078,-0.0005806887872849144,-0.0005789709836370462,-0.0005772403871191291,-0.0005754940816461106,-0.0005737332538931687,-0.0005719579440430643,-0.0005701689013694667,-0.0005683685129123454,-0.0005665553171749417,-0.0005647284832176214,-0.0005628888554346916,-0.0005610350946224967,-0.0005591677760979336,-0.0005572890153173861,-0.0005553990908373341,-0.0005534942528685669,-0.0005515748644797421,-0.0005496391757911893,-0.0005476862874948752,-0.0005457168067213371,-0.0005437305189099674,-0.0005417273422238464,-0.0005397079317311503,-0.0005376723542668136,-0.0005356200845607895,-0.0005335534653660627,-0.0005314742206072279,-0.0005293834555600628,-0.0005272810906457529,-0.000525168384494288,-0.0005230459857994518,-0.0005209153815861592,-0.0005187759098888832,-0.0005166262664172859,-0.0005144666566244507,-0.0005122995805620806,-0.0005101220892070578,-0.0005079352948151442,-0.0005057382955871816,-0.0005035315289974863,-0.0005013144567925135,-0.0004990855702342874,-0.0004968451970471681,-0.0004945962447812695,-0.0004923384461497197,-0.0004900697435815121,-0.00048778948744947914,-0.00048549921578438555,-0.0004831991785618047,-0.0004808919245909307,-0.00047857622054073144,-0.000476253160210216,-0.0004739218804060179,-0.00047158062491622064,-0.0004692275500869825,-0.0004668638090000601,-0.0004644924363930688,-0.00046211289318152064,-0.0004597270595397448,-0.0004573292363299229,-0.000454918591751041,-0.00045249663979635846,-0.0004500614356047977,-0.0004476159847970226,-0.0004451636248826699,-0.000442705663247827,-0.00044024238361116923,-0.0004377715318708356,-0.0004352906010558608,-0.000432800736895639,-0.0004303061676641572,-0.00042780532138706574,-0.00042529861944129866,-0.0004227859451490872,-0.00042026524562626305,-0.000417733730951118,-0.00041519030989104524,-0.00041263273787278595,-0.00041006412891401425,-0.00040748595851341314,-0.00040489497996077614,-0.0004022937418529338,-0.0003996836682660116,-0.0003970623443550483,-0.00039442897169872436,-0.0003917860362618693,-0.000389133970357544,-0.00038647448903657994,-0.00038380777951377504,-0.0003811306090607333,-0.0003784444287449854,-0.0003757476049503343,-0.00037303940127791024,-0.00037031904624364154,-0.000367592014795352,-0.000364857181836029,-0.0003621119179369249,-0.00035935662241096155,-0.00035658989623044927,-0.00035381524219730805,-0.0003510355933940542,-0.00034824712899514816,-0.000345453124366857,-0.0003426527490885442,-0.0003398439537616635,-0.0003370254833158356,-0.00033420070313495097,-0.0003313672426074094,-0.0003285268254426013,-0.0003256798667034782,-0.0003228215883248714,-0.0003199543733906254,-0.0003170793480701887,-0.00031419659867374083,-0.0003113056388099249,-0.00030840854842662955,-0.0003055005509032062,-0.00030258332400522015,-0.00029965834391187815,-0.00029672333411109267,-0.0002937787002013878,-0.0002908262367748553,-0.0002878605497386863,-0.000284882426935361,-0.0002818921393743556,-0.0002788897646374066,-0.00027587963454339166,-0.00027286473817690913,-0.0002698423633268317,-0.0002668148539398285,-0.0002637826603012767,-0.00026074415016499685,-0.00025770137848390897,-0.0002546561599027549,-0.0002516068922406947,-0.0002485529825355403,-0.00024549376595108696,-0.00024242390221604695,-0.00023934203736183465,-0.0002362479421198643,-0.00023314133047002574,-0.00023002366379420405,-0.00022689608709749846,-0.00022375906659184632,-0.00022061363422431836,-0.0002174603294526056,-0.0002142965824191592,-0.00021112433852787157,-0.00020794533761456547,-0.00020476125281874367,-0.00020157256965967768,-0.00019837833714378143,-0.0001951761889896558,-0.00019196454225169361,-0.0001887403882491852,-0.00018550321061770278,-0.00018225363184041257,-0.00017899430351048202,-0.0001757236048249757,-0.00017243916194863306,-0.00016913860424972775,-0.0001658197962277183,-0.0001624858532601169,-0.0001591359704613341,-0.00015577181306054475,-0.0001523959497318226,-0.00014900956319995607,-0.0001456108487072544,-0.0001422002019105479,-0.00013877849246388837,-0.00013534845673713583,-0.00013191387452433099,-0.00012847677830839495,-0.00012503614249840758,-0.00012159290187138913,-0.00011814340122178209,-0.00011468559746716941,-0.00011121810997199076,-0.00010774224702584653,-0.00010425857631460174,-0.0001007671053651705,-9.726697271307442e-05,-9.375382491200125e-05,-9.022753438988129e-05,-8.668791614436124e-05,-8.313578372376436e-05,-7.957455750988237e-05,-7.600616827121779e-05,-7.242903022863853e-05,-6.884317806303775e-05,-6.524739841975049e-05,-6.164062544618779e-05,-5.802226250257016e-05,-5.439463497186853e-05,-5.0759904997529986e-05,-4.7117451065909446e-05,-4.346564787531811e-05,-3.9804632951826646e-05,-3.6135052884491015e-05,-3.245507389172873e-05,-2.876376000637409e-05,-2.506266271268455e-05,-2.1352110419713498e-05,-1.763447205452317e-05,-1.3906889996636727e-05,-1.0168776020470785e-05,-6.421962210905658e-06,-2.6667561972959667e-06,1.098276461983329e-06,4.871326322325192e-06,8.647891872748094e-06,1.2429190461963847e-05,1.6215114436390858e-05,2.000661524641832e-05,2.3806038566351578e-05,2.7611843985978117e-05,3.142721806102185e-05,3.525433727590937e-05,3.909041710926819e-05,4.293484787671056e-05,4.678975955090443e-05,5.0656176728461723e-05,5.453317972902674e-05,5.842371630496676e-05,6.232440904552452e-05,6.623448742046646e-05,7.015210068725972e-05,7.407292027338256e-05,7.799699297156694e-05,8.192698285071339e-05,8.586348128250362e-05,8.980624276848183e-05,9.375382747379563e-05,9.770334159169667e-05,0.00010165436963041532,0.00010560849992657924,0.00010956653276066013,0.0001135301444440898,0.00011750218167297462,0.00012148052462605582,0.00012546559817230168,0.00012945558340769731,0.0001334515040666201,0.00013745433842597126,0.00014146368504352786,0.00014547980008544165,0.00014950246820531694,0.00015353107319706274,0.00015756441664108848,0.0001616015777234946,0.000165643617721768,0.0001696897466269831,0.00017374117845620683,0.0001777989713516889,0.00018186516875948915,0.00018593892083342228,0.00019001830072450088,0.00019417619251057945,0.0001985025441909014,0.0002030804139967133,0.00020798148985769527,0.00021363138997034623,0.00021859518242117761,0.0002231998416947526,0.00022748611918655274,0.00023150563415248182,0.00023532073172326327,0.00023899042324654744,0.00024244101864846163,0.0002455813945958207,0.0002483287853287206,0.00025061183561578336,0.00025200639392852553,0.0002539451590899212,0.000256102296454223,0.00025843465330957385,0.00026089130979177065,0.00026341034849721064,0.00026593298490836773,0.0002684602972768441,0.00027099450067852554,0.00027353365002670836,0.00027607630774913265,0.0002786216685271989,0.0002811691521076667,0.0002837163489330127,0.00028626282601815884,0.0002888062768201909,0.00029134883608677835,0.00029388905537870784,0.00029642614065890187,0.00029895956311797473,0.0003014908994745471,0.00030402109781040764,0.00030655134071542746,0.00030908369182095635,0.00031161909469848675,0.00031415867386403677,0.00031669941987273054,0.000319240771216576,0.0003217810495187929,0.0003243213078782031,0.0003268588225885852,0.00032939508571766627,0.0003319283886635976,0.00033445692799367904,0.00033697737082261705,0.0003394895623468103,0.00034199454150733613,0.0003444943761687013,0.000346990149105484,0.0003494835912678335,0.0003519762670543372,0.00035446851183164535,0.00035695830142119675,0.0003594461752534736,0.0003619325494693787,0.0003644187846695203,0.00036690410094302145,0.0003693869646493269,0.00037186731354535267,0.0003743430731274931,0.0003768145989838617,0.0003792774202897938,0.00038173180450274156,0.000384179181999308,0.0003866226072803741,0.0003890616448919226,0.00039149726758712085,0.00039392975212651174,0.0003963590847484621,0.0003987859757663284,0.00040120921337522946,0.00040363097872967726,0.0004060527909424212,0.0004084727497000354,0.0004108886693993348,0.000413298260797957,0.0004157008209488046,0.0004180973251396069,0.00042048675009760623,0.0004228684035593777,0.0004252431518822352,0.0004276102094183539,0.0004299685434150342,0.00043231914640708315,0.0004346643526950566,0.00043700473195800176,0.00043933957841746374,0.00044166925352734593,0.00044399271767793817,0.0004463100774225261,0.0004486201722389452,0.00045092242839415693,0.0004532172674129761,0.00045550484917801466,0.0004577829496837987,0.0004600518224193903,0.00046231315025223687,0.0004645653486627257,0.00046680809300021455,0.00046903969478886206,0.0004712614849505991,0.00047347380873433327,0.0004756761056454096,0.0004778671978309957,0.0004800454780452055,0.00048221313206349946,0.00048436660673906786,0.00048650368084244736,0.000488625245257824,0.0004907322537676646,0.0004928251253773047,0.0004949044463064433,0.0004969714556653745,0.0004990257445064078,0.000501068078724225,0.0005030968883623973,0.0005051141037156267,0.0005071208806550981,0.0005091172128449905,0.0005111048549821036,0.0005130810636840893,0.000515045638161042,0.0005169947027189018,0.0005189263677246292,0.0005208422346325654,0.0005227417832016159,0.000524624463840235,0.0005264882887967824,0.0005283357020738935,0.0005301660729329564,0.0005319786849517054,0.000533774013460804,0.0005355528094119113,0.000537318785408891,0.0005390708702389948,0.0005408088169121429,0.0005425339563413002,0.0005442484804259367,0.0005459491350797729,0.0005476324931686765,0.0005492985931294471,0.0005509459593537957,0.0005525748825836214,0.0005541848401810928,0.0005557749197154314,0.0005573466103073604,0.0005588960808372374,0.0005604225016562955,0.0005619257982139182,0.00056340856610918,0.0005648738982105858,0.000566322057752704,0.0005677542146523125,0.0005691691598980965,0.0005705661359726536,0.0005719431029870241,0.0005733001636027815,0.0005746391197090752,0.0005759604623261725,0.0005772619345294185,0.0005785401709769473,0.0005797942251911153,0.0005810228962284767,0.0005822271242202355,0.0005834084329042189,0.0005845671740211797,0.0005857038825088595,0.0005868195214155399,0.0005879117186684137,0.0005889823414538841,0.0005900336794615584,0.0005910672780349165,0.000592082625598475,0.0005930787161777383,0.0005940530123444033,0.0005950035857258767,0.0005959317572643246,0.0005968364044853918,0.0005977173804416365,0.0005985767050047417,0.0005994121874204635,0.0006002216005633538,0.0006010057003312174,0.0006017635982475705,0.0006024967662300498,0.0006032071488926707,0.0006038963412298242,0.000604561414012351,0.0006052030846463988,0.0006058190164769839,0.000606408254673834,0.0006069696672951405,0.0006075040224113248,0.0006080113512757025,0.0006084932448291677,0.000608948633634493,0.0006093748288336188,0.0006097703711400893,0.0006101370055246444,0.0006104742165063828,0.0006107846942072291,0.0006110691289472999,0.0006113289773211484,0.0006115622924016345,0.0006117682881796667,0.000611945498592474,0.0006120947761540634,0.0006122185729084995,0.0006123178060984057,0.0006123924636995189,0.0006124414291990043,0.0006124641441195189,0.0006124606832281825,0.0006124333471953913,0.000612382152738726,0.0006123065785471945,0.0006122067854662421,0.000612083840219662,0.000611935672080234,0.0006117626261991642,0.0006115634180319989,0.0006113396033370613,0.000611089379952585,0.000610811525969627,0.0006105029534136925,0.0006101682224326433,0.0006098067688934897,0.000609417134880007,0.0006089998509859903,0.0006085547785734286,0.0006080827775718527,0.0006075839855798055,0.0006070585209551414,0.0006065066898220875,0.0006059303236824779,0.0006053284269076794,0.0006047017751797561,0.0006040520425336898,0.0006033788198978749,0.0006026804398040837,0.0006019580618263534,0.0006012101549588938,0.0006004377057152601,0.0005996407892352799,0.0005988186822108238,0.0005979708362886294,0.0005970978699500665,0.0005961959446264353,0.0005952646192870106,0.0005943063463019739,0.0005933212617607988,0.0005923110618430275,0.0005912749371132475,0.0005902128431166694,0.000589125427036862,0.0005880143599841328,0.0005868766751993408,0.0005857147396838259,0.0005845312730108575,0.0005833247947601393,0.0005820962911786985,0.000580844684555055,0.0005795692901868146,0.0005782709222053011,0.0005769501816657972,0.000575603716827402,0.0005742337927215298,0.0005728430549948091,0.0005714293587942446,0.0005699912801434889,0.0005685307283319689,0.0005670472226194594,0.0005655428693364059,0.0005640163410229158,0.0005624688007238779,0.0005609007000686722,0.000559314119691468,0.0005577062093896952,0.0005560775010306967,0.000554429025001829,0.0005527599125574416,0.0005510687117893498,0.0005493541041880724,0.000547616723990981,0.0005458564402614515,0.0005440740863377699,0.0005422670107410949,0.0005404370965085115,0.0005385841324082863,0.0005367078037537418,0.0005348079550208623,0.0005328862791424137,0.0005309439900378966,0.0005289823682982328,0.0005270002710746413,0.0005249982097970498,0.0005229765926431266,0.0005209368215213621,0.0005188782455100838,0.0005168035662201677,0.000514711560434444,0.0005126012452380171,0.0005104723336546238,0.000508325195808237,0.0005061593528747582,0.0005039735369506516,0.0005017675437628665,0.0004995386464733445,0.0004972889965281794,0.0004950153506836115,0.0004927188456613821,0.0004904015975494543,0.00048806438922860114,0.00048570551137126793,0.00048332577784459873,0.000480926193158438,0.00047850743323644523,0.00047607241308711445,0.00047361810761762014,0.0004711456384346536,0.00046865682678847294,0.0004661508081878508,0.0004636270234674089,0.0004610882201898334,0.000458534001254135,0.00045596427988341386,0.0004533802277864015,0.00045078259609700145,0.00044817195475384177,0.0004455515380427293,0.00044291858142010026,0.0004402732335252342,0.0004376145502345219,0.00043494105215010557,0.00043225430545770153,0.0004295554246847679,0.00042684366878392526,0.0004241144072729337,0.00042136915622196917,0.00041860801975560096,0.0004158307889334934,0.0004130396853380742,0.00041023756070040227,0.00040742397740005045,0.0004046005044156617,0.0004017635181905361,0.00039891389679162434,0.0003960528336003649,0.0003931840019298549,0.0003903059183297884,0.0003874177331920881,0.0003845209823961171,0.00038161036214524934,0.0003786841105402204,0.00037574219264061514,0.00037278688156412953,0.00036981997998076496,0.0003668426054628318,0.00036385148846331574,0.0003608455273745321,0.00035782610201246645,0.0003547918632332233,0.0003517455701504736,0.0003486918777233771,0.0003456302916986524,0.00034256057068673653,0.00033948142877081257,0.0003363923349673979,0.0003332943029091594,0.00033019144194750536,0.00032708223679704055,0.00032396754836762435,0.00032084652870608115,0.00031771730078008505,0.0003145789030037841,0.0003114333304133689,0.000308280087698781,0.0003051198800090559,0.0003019514330276324,0.0002987719755163161,0.0002955814603311694,0.0002923809423404556,0.00028917025765743965,0.00028595212808230357,0.00028272819407196287,0.00027949910459147193,0.00027626647488213064,0.0002730308673457605,0.0002697927635829151,0.0002665517172355686,0.0002633099576934343,0.0002600673197169964,0.00025682551314199186,0.00025358118010766723,0.0002503317393785164,0.000247075624066386,0.00024381091964149123,0.00024053592207987188,0.00023725112118078699,0.00023395722221573512,0.00023065604379011073,0.00022734611564577552,0.00022402859115625923,0.00022070594546035003,0.000217380029801548,0.0002140532098730753,0.00021072616452504946,0.0002074025124349368,0.00020408353945885994,0.00020076953696694492,0.00019745920564860026,0.0001941532843710888,0.00019085067075730149,0.00018754918266424356,0.0001842480859695522,0.0001809474932337923,0.00017764657760259206,0.00017434457293158848,0.00017103847650657615,0.0001677292316709699,0.00016441734372398998,0.00016110422270495956,0.00015779176150901598,0.00015448064644822828,0.00015117192095328726,0.00014786434926000752,0.00014455927532704035,0.0001412577802633582,0.00013796339462540365,0.00013467575952936435,0.0001313936778966548,0.00012811657874039852,0.0001248419317801169,0.00012157013768779788,0.00011830206793315676,0.00011503817181515628,0.00011177892238959287,0.00010852340719058812,0.00010527073713721734,0.00010201918395092173,9.877066853872772e-05,9.552420400515289e-05,9.228190114738832e-05,8.904587686210518e-05,8.581440283089989e-05,8.259002879600863e-05,7.937140732405797e-05,7.615871257644101e-05,7.295219629724823e-05,6.975440124322704e-05,6.656463572528254e-05,6.338311921843601e-05,6.0210758720007477e-05,5.704630771982698e-05,5.389013470576948e-05,5.0742093335820236e-05,4.7602192829519926e-05,4.4471722908153927e-05,4.134977791661922e-05,3.82370337376501e-05,3.5131740506217935e-05,3.2036708459564407e-05,2.8952981202976376e-05,2.5879265258510663e-05,2.2816985938593205e-05,1.976468798222422e-05,1.6723040323547516e-05,1.3688814446670592e-05,1.066559787920281e-05,7.65240750271268e-06,4.652335832530542e-06,1.6630196598437095e-06,-1.3194273464768043e-06,-4.295062946920501e-06,-7.2618584950800164e-06,-1.022080165370519e-05,-1.3170917492954125e-05,-1.611001918094077e-05,-1.9037948350808332e-05,-2.195497784842602e-05,-2.486130110938159e-05,-2.7756636916832406e-05,-3.063904544952447e-05,-3.350427571238468e-05,-3.635455254463948e-05,-3.9188042085028786e-05,-4.2005374302970314e-05,-4.480723197041454e-05,-4.759635633121944e-05,-5.037029233654996e-05,-5.313338862664483e-05,-5.5882565820790786e-05,-5.8617524551339564e-05,-6.134001318345455e-05,-6.405070968013828e-05,-6.674895611465456e-05,-6.943495400554082e-05,-7.210705294925996e-05,-7.476454629076638e-05,-7.740966383576686e-05,-8.004052219311132e-05,-8.265770408632992e-05,-8.526439738632486e-05,-8.785995524776645e-05,-9.044333401227066e-05,-9.301551997222972e-05,-9.557252841566805e-05,-9.811771144660352e-05,-0.00010065245313436942,-0.00010317516818464842,-0.00010568410524569469,-0.00010818038333426518,-0.00011065938474335386,-0.00011312036633304434,-0.00011556602334215263,-0.0001179964341162888,-0.00012041553024207713,-0.00012282365712230447,-0.0001252183630585589,-0.00012759795077245915,-0.0001299629408556381,-0.00013231231558307366,-0.00013464890409249232,-0.0001369735535727626,-0.0001392845126688461,-0.00014158142625042889,-0.000143863719400035,-0.0001461272039581987,-0.00014837332831945136,-0.00015060469214568214,-0.00015282115145586134,-0.00015502282911129123,-0.00015720932271644805,-0.00015937901149870376,-0.0001615324977214346,-0.00016366897904058152,-0.00016578835026408056,-0.0001678900719026233,-0.0001699753268346992,-0.00017204201969499316,-0.00017408893011617533,-0.00017611719854831916,-0.00017812590370766665,-0.00018011564594476296,-0.00018208675475740892,-0.0001840398041749219,-0.00018597448561051888,-0.00018789183346242086,-0.00018979025215547959,-0.0001916695590421385,-0.00019353135242998994,-0.00019537713235907343,-0.00019720654485841466,-0.00019902207927316703,-0.0002008225771910135,-0.00020260662371458468,-0.00020437467942388342,-0.00020612794839821441,-0.00020786748905946963,-0.00020959331778744092,-0.00021130567883180745,-0.00021300249569537088,-0.0002146811554683979,-0.00021634207066438137,-0.00021798479421634897,-0.00021961172227115815,-0.00022122470924449336,-0.00022282338386171285,-0.00022440491938230622,-0.0002259692440442194,-0.00022751736543577216,-0.0002290482127406602,-0.00023056447375964447,-0.00023206569391013733,-0.00023355112415431522,-0.00023501825776491172,-0.00023646553077490395,-0.0002378902375439768,-0.00023929368563343082,-0.00024067898668739924,-0.00024204565344729167,-0.00024339358299445434,-0.0002447236973305498,-0.000246035543503385,-0.00024733092959555313,-0.000248612323029189,-0.0002498799449653505,-0.0002511337497995746,-0.0002523744974718955,-0.0002536001060127075,-0.00025481003027150704,-0.00025600389247828424,-0.0002571817002728266,-0.0002583446558711855,-0.00025949156550466863,-0.000260620501341826,-0.00026172920163408534,-0.00026282207792148313,-0.00026389884146254774,-0.000264960219485457,-0.00026591846179725205,-0.0002666846945613087,-0.0002672586597084635,-0.0002676397583730658,-0.0002678295028093609)
xcorr <- c(-600.0,-598.999165971643,-597.9983319432861,-596.9974979149291,-595.9966638865722,-594.9958298582152,-593.9949958298582,-592.9941618015013,-591.9933277731443,-590.9924937447873,-589.9916597164304,-588.9908256880734,-587.9899916597165,-586.9891576313595,-585.9883236030025,-584.9874895746456,-583.9866555462886,-582.9858215179316,-581.9849874895747,-580.9841534612177,-579.9833194328608,-578.9824854045038,-577.9816513761468,-576.9808173477899,-575.9799833194329,-574.9791492910759,-573.978315262719,-572.977481234362,-571.976647206005,-570.9758131776481,-569.9749791492911,-568.974145120934,-567.9733110925772,-566.9724770642201,-565.9716430358633,-564.9708090075062,-563.9699749791492,-562.9691409507923,-561.9683069224353,-560.9674728940784,-559.9666388657214,-558.9658048373644,-557.9649708090075,-556.9641367806505,-555.9633027522935,-554.9624687239366,-553.9616346955796,-552.9608006672227,-551.9599666388657,-550.9591326105087,-549.9582985821518,-548.9574645537948,-547.9566305254378,-546.9557964970809,-545.9549624687239,-544.954128440367,-543.95329441201,-542.952460383653,-541.9516263552961,-540.9507923269391,-539.9499582985821,-538.9491242702252,-537.9482902418682,-536.9474562135113,-535.9466221851543,-534.9457881567973,-533.9449541284404,-532.9441201000834,-531.9432860717264,-530.9424520433695,-529.9416180150125,-528.9407839866556,-527.9399499582986,-526.9391159299416,-525.9382819015847,-524.9374478732277,-523.9366138448707,-522.9357798165138,-521.9349457881568,-520.9341117597999,-519.9332777314429,-518.9324437030859,-517.931609674729,-516.930775646372,-515.929941618015,-514.9291075896581,-513.9282735613011,-512.9274395329442,-511.92660550458714,-510.9257714762302,-509.9249374478732,-508.92410341951626,-507.9232693911593,-506.92243536280233,-505.92160133444537,-504.9207673060884,-503.91993327773145,-502.9190992493745,-501.9182652210175,-500.91743119266056,-499.9165971643036,-498.91576313594663,-497.91492910758967,-496.9140950792327,-495.91326105087575,-494.9124270225188,-493.9115929941618,-492.91075896580486,-491.90992493744784,-490.9090909090909,-489.9082568807339,-488.90742285237695,-487.90658882402,-486.90575479566303,-485.90492076730607,-484.9040867389491,-483.90325271059214,-482.9024186822352,-481.9015846538782,-480.90075062552125,-479.8999165971643,-478.89908256880733,-477.89824854045037,-476.8974145120934,-475.89658048373644,-474.8957464553795,-473.8949124270225,-472.89407839866556,-471.8932443703086,-470.89241034195163,-469.89157631359467,-468.8907422852377,-467.88990825688074,-466.8890742285238,-465.8882402001668,-464.88740617180986,-463.8865721434529,-462.88573811509593,-461.88490408673897,-460.884070058382,-459.88323603002505,-458.8824020016681,-457.8815679733111,-456.88073394495416,-455.8798999165972,-454.87906588824023,-453.8782318598833,-452.8773978315263,-451.87656380316935,-450.8757297748124,-449.8748957464554,-448.87406171809846,-447.87322768974144,-446.8723936613845,-445.8715596330275,-444.87072560467055,-443.8698915763136,-442.86905754795663,-441.86822351959967,-440.8673894912427,-439.86655546288574,-438.8657214345288,-437.8648874061718,-436.86405337781486,-435.8632193494579,-434.86238532110093,-433.86155129274397,-432.860717264387,-431.85988323603004,-430.859049207673,-429.85821517931606,-428.8573811509591,-427.85654712260214,-426.8557130942452,-425.8548790658882,-424.85404503753125,-423.8532110091743,-422.8523769808173,-421.85154295246036,-420.8507089241034,-419.84987489574644,-418.8490408673895,-417.8482068390325,-416.84737281067555,-415.8465387823186,-414.8457047539616,-413.84487072560466,-412.8440366972477,-411.84320266889074,-410.8423686405338,-409.8415346121768,-408.84070058381985,-407.8398665554629,-406.8390325271059,-405.83819849874897,-404.837364470392,-403.83653044203504,-402.8356964136781,-401.8348623853211,-400.83402835696415,-399.8331943286072,-398.83236030025023,-397.83152627189327,-396.8306922435363,-395.82985821517934,-394.8290241868224,-393.8281901584654,-392.82735613010846,-391.8265221017515,-390.82568807339453,-389.82485404503757,-388.8240200166806,-387.82318598832364,-386.8223519599667,-385.8215179316097,-384.82068390325276,-383.81984987489574,-382.8190158465388,-381.8181818181818,-380.81734778982485,-379.8165137614679,-378.8156797331109,-377.81484570475396,-376.814011676397,-375.81317764804004,-374.8123436196831,-373.8115095913261,-372.81067556296915,-371.8098415346122,-370.8090075062552,-369.80817347789826,-368.8073394495413,-367.80650542118434,-366.8056713928273,-365.80483736447036,-364.8040033361134,-363.80316930775643,-362.8023352793995,-361.8015012510425,-360.80066722268555,-359.7998331943286,-358.7989991659716,-357.79816513761466,-356.7973311092577,-355.79649708090074,-354.7956630525438,-353.7948290241868,-352.79399499582985,-351.7931609674729,-350.7923269391159,-349.79149291075896,-348.790658882402,-347.78982485404504,-346.7889908256881,-345.7881567973311,-344.78732276897415,-343.7864887406172,-342.7856547122602,-341.78482068390326,-340.7839866555463,-339.78315262718934,-338.7823185988324,-337.7814845704754,-336.78065054211845,-335.7798165137615,-334.7789824854045,-333.77814845704756,-332.7773144286906,-331.77648040033364,-330.7756463719767,-329.7748123436197,-328.77397831526275,-327.7731442869058,-326.77231025854877,-325.7714762301918,-324.77064220183485,-323.7698081734779,-322.7689741451209,-321.76814011676396,-320.767306088407,-319.76647206005003,-318.7656380316931,-317.7648040033361,-316.76396997497915,-315.7631359466222,-314.7623019182652,-313.76146788990826,-312.7606338615513,-311.75979983319434,-310.7589658048374,-309.7581317764804,-308.75729774812345,-307.7564637197665,-306.7556296914095,-305.75479566305256,-304.7539616346956,-303.75312760633864,-302.7522935779817,-301.7514595496247,-300.75062552126775,-299.7497914929108,-298.7489574645538,-297.74812343619686,-296.7472894078399,-295.7464553794829,-294.7456213511259,-293.74478732276896,-292.743953294412,-291.74311926605503,-290.74228523769807,-289.7414512093411,-288.74061718098415,-287.7397831526272,-286.7389491242702,-285.73811509591326,-284.7372810675563,-283.73644703919933,-282.73561301084237,-281.7347789824854,-280.73394495412845,-279.7331109257715,-278.7322768974145,-277.73144286905756,-276.7306088407006,-275.72977481234363,-274.7289407839867,-273.7281067556297,-272.72727272727275,-271.7264386989158,-270.7256046705588,-269.72477064220186,-268.7239366138449,-267.72310258548794,-266.722268557131,-265.721434528774,-264.72060050041705,-263.7197664720601,-262.71893244370307,-261.7180984153461,-260.71726438698914,-259.7164303586322,-258.7155963302752,-257.71476230191826,-256.7139282735613,-255.71309424520433,-254.71226021684737,-253.7114261884904,-252.71059216013344,-251.70975813177648,-250.70892410341952,-249.70809007506256,-248.7072560467056,-247.70642201834863,-246.70558798999167,-245.7047539616347,-244.70391993327775,-243.70308590492078,-242.70225187656382,-241.70141784820686,-240.7005838198499,-239.69974979149293,-238.69891576313597,-237.698081734779,-236.69724770642205,-235.69641367806508,-234.69557964970812,-233.69474562135116,-232.6939115929942,-231.69307756463718,-230.69224353628022,-229.69140950792325,-228.6905754795663,-227.68974145120933,-226.68890742285237,-225.6880733944954,-224.68723936613844,-223.68640533778148,-222.68557130942452,-221.68473728106756,-220.6839032527106,-219.68306922435363,-218.68223519599667,-217.6814011676397,-216.68056713928274,-215.67973311092578,-214.67889908256882,-213.67806505421186,-212.6772310258549,-211.67639699749793,-210.67556296914097,-209.674728940784,-208.67389491242704,-207.67306088407008,-206.67222685571312,-205.67139282735616,-204.6705587989992,-203.66972477064223,-202.66889074228527,-201.6680567139283,-200.66722268557135,-199.66638865721438,-198.66555462885736,-197.6647206005004,-196.66388657214344,-195.66305254378648,-194.66221851542952,-193.66138448707255,-192.6605504587156,-191.65971643035863,-190.65888240200167,-189.6580483736447,-188.65721434528774,-187.65638031693078,-186.65554628857382,-185.65471226021685,-184.6538782318599,-183.65304420350293,-182.65221017514597,-181.651376146789,-180.65054211843204,-179.64970809007508,-178.64887406171812,-177.64804003336116,-176.6472060050042,-175.64637197664723,-174.64553794829027,-173.6447039199333,-172.64386989157634,-171.64303586321938,-170.64220183486242,-169.64136780650546,-168.6405337781485,-167.63969974979148,-166.6388657214345,-165.63803169307755,-164.6371976647206,-163.63636363636363,-162.63552960800666,-161.6346955796497,-160.63386155129274,-159.63302752293578,-158.63219349457881,-157.63135946622185,-156.6305254378649,-155.62969140950793,-154.62885738115097,-153.628023352794,-152.62718932443704,-151.62635529608008,-150.62552126772312,-149.62468723936615,-148.6238532110092,-147.62301918265223,-146.62218515429527,-145.6213511259383,-144.62051709758134,-143.61968306922438,-142.61884904086742,-141.61801501251045,-140.6171809841535,-139.61634695579653,-138.61551292743957,-137.6146788990826,-136.61384487072564,-135.61301084236868,-134.61217681401166,-133.6113427856547,-132.61050875729774,-131.60967472894077,-130.6088407005838,-129.60800667222685,-128.6071726438699,-127.60633861551293,-126.60550458715596,-125.604670558799,-124.60383653044204,-123.60300250208508,-122.60216847372811,-121.60133444537115,-120.60050041701419,-119.59966638865723,-118.59883236030026,-117.5979983319433,-116.59716430358634,-115.59633027522938,-114.59549624687241,-113.59466221851545,-112.59382819015849,-111.59299416180153,-110.59216013344457,-109.5913261050876,-108.59049207673064,-107.58965804837368,-106.58882402001672,-105.58798999165975,-104.58715596330279,-103.58632193494577,-102.58548790658881,-101.58465387823185,-100.58381984987489,-99.58298582151792,-98.58215179316096,-97.581317764804,-96.58048373644704,-95.57964970809007,-94.57881567973311,-93.57798165137615,-92.57714762301919,-91.57631359466222,-90.57547956630526,-89.5746455379483,-88.57381150959134,-87.57297748123437,-86.57214345287741,-85.57130942452045,-84.57047539616349,-83.56964136780653,-82.56880733944956,-81.5679733110926,-80.56713928273564,-79.56630525437868,-78.56547122602171,-77.56463719766475,-76.56380316930779,-75.56296914095083,-74.56213511259386,-73.5613010842369,-72.56046705587994,-71.55963302752298,-70.55879899916602,-69.55796497080905,-68.55713094245209,-67.55629691409513,-66.55546288573817,-65.5546288573812,-64.55379482902424,-63.55296080066728,-62.55212677231032,-61.551292743953354,-60.55045871559639,-59.54962468723943,-58.54879065888247,-57.547956630525505,-56.54712260216854,-55.54628857381158,-54.545454545454504,-53.54462051709754,-52.54378648874058,-51.54295246038362,-50.542118432026655,-49.54128440366969,-48.54045037531273,-47.53961634695577,-46.538782318598805,-45.53794829024184,-44.53711426188488,-43.53628023352792,-42.535446205170956,-41.534612176813994,-40.53377814845703,-39.53294412010007,-38.53211009174311,-37.531276063386144,-36.53044203502918,-35.52960800667222,-34.52877397831526,-33.527939949958295,-32.52710592160133,-31.52627189324437,-30.525437864887408,-29.524603836530446,-28.523769808173483,-27.52293577981652,-26.52210175145956,-25.521267723102596,-24.520433694745634,-23.51959966638867,-22.51876563803171,-21.517931609674747,-20.517097581317785,-19.516263552960822,-18.51542952460386,-17.514595496246898,-16.513761467889935,-15.512927439532973,-14.51209341117601,-13.511259382819048,-12.510425354462086,-11.509591326105124,-10.508757297748161,-9.507923269391199,-8.507089241034237,-7.506255212677274,-6.505421184320312,-5.50458715596335,-4.503753127606387,-3.502919099249425,-2.5020850708924627,-1.5012510425355003,-0.500417014178538,0.5004170141784243,1.5012510425353867,2.502085070892349,3.5029190992493113,4.503753127606274,5.504587155963236,6.505421184320198,7.506255212677161,8.507089241034237,9.507923269391199,10.508757297748161,11.509591326105124,12.510425354462086,13.511259382819048,14.51209341117601,15.512927439532973,16.513761467889935,17.514595496246898,18.51542952460386,19.516263552960822,20.517097581317785,21.517931609674747,22.51876563803171,23.51959966638867,24.520433694745634,25.521267723102596,26.52210175145956,27.52293577981652,28.523769808173483,29.524603836530446,30.525437864887408,31.52627189324437,32.52710592160133,33.527939949958295,34.52877397831526,35.52960800667222,36.53044203502918,37.531276063386144,38.53211009174311,39.53294412010007,40.53377814845703,41.534612176813994,42.535446205170956,43.53628023352792,44.53711426188488,45.53794829024184,46.538782318598805,47.53961634695577,48.54045037531273,49.54128440366969,50.542118432026655,51.54295246038362,52.54378648874058,53.54462051709754,54.545454545454504,55.54628857381147,56.54712260216843,57.54795663052539,58.54879065888235,59.549624687239316,60.55045871559628,61.55129274395324,62.5521267723102,63.552960800667165,64.55379482902413,65.55462885738109,66.55546288573805,67.55629691409501,68.55713094245198,69.55796497080894,70.5587989991659,71.55963302752286,72.56046705587983,73.5613010842369,74.56213511259386,75.56296914095083,76.56380316930779,77.56463719766475,78.56547122602171,79.56630525437868,80.56713928273564,81.5679733110926,82.56880733944956,83.56964136780653,84.57047539616349,85.57130942452045,86.57214345287741,87.57297748123437,88.57381150959134,89.5746455379483,90.57547956630526,91.57631359466222,92.57714762301919,93.57798165137615,94.57881567973311,95.57964970809007,96.58048373644704,97.581317764804,98.58215179316096,99.58298582151792,100.58381984987489,101.58465387823185,102.58548790658881,103.58632193494577,104.58715596330273,105.5879899916597,106.58882402001666,107.58965804837362,108.59049207673058,109.59132610508755,110.59216013344451,111.59299416180147,112.59382819015843,113.5946622185154,114.59549624687236,115.59633027522932,116.59716430358628,117.59799833194324,118.59883236030021,119.59966638865717,120.60050041701413,121.6013344453711,122.60216847372806,123.60300250208502,124.60383653044198,125.60467055879894,126.6055045871559,127.60633861551287,128.60717264386983,129.6080066722268,130.60884070058376,131.60967472894072,132.61050875729768,133.61134278565464,134.6121768140116,135.61301084236857,136.61384487072564,137.6146788990826,138.61551292743957,139.61634695579653,140.6171809841535,141.61801501251045,142.61884904086742,143.61968306922438,144.62051709758134,145.6213511259383,146.62218515429527,147.62301918265223,148.6238532110092,149.62468723936615,150.62552126772312,151.62635529608008,152.62718932443704,153.628023352794,154.62885738115097,155.62969140950793,156.6305254378649,157.63135946622185,158.63219349457881,159.63302752293578,160.63386155129274,161.6346955796497,162.63552960800666,163.63636363636363,164.6371976647206,165.63803169307755,166.6388657214345,167.63969974979148,168.64053377814844,169.6413678065054,170.64220183486236,171.64303586321932,172.6438698915763,173.64470391993325,174.6455379482902,175.64637197664717,176.64720600500414,177.6480400333611,178.64887406171806,179.64970809007502,180.65054211843199,181.65137614678895,182.6522101751459,183.65304420350287,184.65387823185984,185.6547122602168,186.65554628857376,187.65638031693072,188.65721434528768,189.65804837364465,190.6588824020016,191.65971643035857,192.66055045871553,193.6613844870725,194.66221851542946,195.66305254378642,196.66388657214338,197.66472060050035,198.6655546288573,199.66638865721427,200.66722268557123,201.6680567139283,202.66889074228527,203.66972477064223,204.6705587989992,205.67139282735616,206.67222685571312,207.67306088407008,208.67389491242704,209.674728940784,210.67556296914097,211.67639699749793,212.6772310258549,213.67806505421186,214.67889908256882,215.67973311092578,216.68056713928274,217.6814011676397,218.68223519599667,219.68306922435363,220.6839032527106,221.68473728106756,222.68557130942452,223.68640533778148,224.68723936613844,225.6880733944954,226.68890742285237,227.68974145120933,228.6905754795663,229.69140950792325,230.69224353628022,231.69307756463718,232.69391159299414,233.6947456213511,234.69557964970807,235.69641367806503,236.697247706422,237.69808173477895,238.69891576313591,239.69974979149288,240.70058381984984,241.7014178482068,242.70225187656376,243.70308590492073,244.7039199332777,245.70475396163465,246.7055879899916,247.70642201834858,248.70725604670554,249.7080900750625,250.70892410341946,251.70975813177643,252.7105921601334,253.71142618849035,254.7122602168473,255.71309424520427,256.71392827356124,257.7147623019182,258.71559633027516,259.7164303586321,260.7172643869891,261.71809841534605,262.718932443703,263.71976647206,264.72060050041705,265.721434528774,266.722268557131,267.72310258548794,268.7239366138449,269.72477064220186,270.7256046705588,271.7264386989158,272.72727272727275,273.7281067556297,274.7289407839867,275.72977481234363,276.7306088407006,277.73144286905756,278.7322768974145,279.7331109257715,280.73394495412845,281.7347789824854,282.73561301084237,283.73644703919933,284.7372810675563,285.73811509591326,286.7389491242702,287.7397831526272,288.74061718098415,289.7414512093411,290.74228523769807,291.74311926605503,292.743953294412,293.74478732276896,294.7456213511259,295.7464553794829,296.74728940783984,297.7481234361968,298.74895746455377,299.74979149291073,300.7506255212677,301.75145954962466,302.7522935779816,303.7531276063386,304.75396163469554,305.7547956630525,306.75562969140947,307.75646371976643,308.7572977481234,309.75813177648035,310.7589658048373,311.7597998331943,312.76063386155124,313.7614678899082,314.76230191826517,315.7631359466221,316.7639699749791,317.76480400333605,318.765638031693,319.76647206005,320.76730608840694,321.7681401167639,322.76897414512086,323.7698081734778,324.7706422018348,325.77147623019175,326.7723102585487,327.7731442869057,328.77397831526264,329.7748123436197,330.7756463719767,331.77648040033364,332.7773144286906,333.77814845704756,334.7789824854045,335.7798165137615,336.78065054211845,337.7814845704754,338.7823185988324,339.78315262718934,340.7839866555463,341.78482068390326,342.7856547122602,343.7864887406172,344.78732276897415,345.7881567973311,346.7889908256881,347.78982485404504,348.790658882402,349.79149291075896,350.7923269391159,351.7931609674729,352.79399499582985,353.7948290241868,354.7956630525438,355.79649708090074,356.7973311092577,357.79816513761466,358.7989991659716,359.7998331943286,360.80066722268555,361.8015012510425,362.8023352793995,363.80316930775643,364.8040033361134,365.80483736447036,366.8056713928273,367.8065054211843,368.80733944954125,369.8081734778982,370.80900750625517,371.80984153461213,372.8106755629691,373.81150959132606,374.812343619683,375.81317764804,376.81401167639694,377.8148457047539,378.81567973311087,379.81651376146783,380.8173477898248,381.81818181818176,382.8190158465387,383.8198498748957,384.82068390325264,385.8215179316096,386.82235195996657,387.82318598832353,388.8240200166805,389.82485404503745,390.8256880733944,391.8265221017514,392.82735613010846,393.8281901584654,394.8290241868224,395.82985821517934,396.8306922435363,397.83152627189327,398.83236030025023,399.8331943286072,400.83402835696415,401.8348623853211,402.8356964136781,403.83653044203504,404.837364470392,405.83819849874897,406.8390325271059,407.8398665554629,408.84070058381985,409.8415346121768,410.8423686405338,411.84320266889074,412.8440366972477,413.84487072560466,414.8457047539616,415.8465387823186,416.84737281067555,417.8482068390325,418.8490408673895,419.84987489574644,420.8507089241034,421.85154295246036,422.8523769808173,423.8532110091743,424.85404503753125,425.8548790658883,426.8557130942452,427.85654712260225,428.8573811509591,429.8582151793162,430.859049207673,431.8598832360301,432.86071726438695,433.861551292744,434.8623853211009,435.86321934945795,436.8640533778148,437.8648874061719,438.8657214345287,439.8665554628858,440.86738949124265,441.8682235195997,442.8690575479566,443.86989157631365,444.8707256046705,445.8715596330276,446.8723936613844,447.8732276897415,448.87406171809835,449.8748957464554,450.87572977481227,451.87656380316935,452.8773978315262,453.8782318598833,454.8790658882401,455.8798999165972,456.88073394495405,457.8815679733111,458.88240200166797,459.88323603002505,460.8840700583819,461.88490408673897,462.8857381150958,463.8865721434529,464.88740617180974,465.8882402001668,466.88907422852367,467.88990825688074,468.8907422852376,469.89157631359467,470.8924103419515,471.8932443703086,472.89407839866544,473.8949124270225,474.89574645537937,475.89658048373644,476.8974145120933,477.89824854045037,478.8990825688072,479.8999165971643,480.90075062552114,481.9015846538782,482.90241868223507,483.90325271059214,484.904086738949,485.90492076730607,486.9057547956629,487.90658882402,488.90742285237684,489.9082568807339,490.909090909091,491.90992493744784,492.9107589658049,493.91159299416177,494.91242702251884,495.9132610508757,496.91409507923277,497.9149291075896,498.9157631359467,499.91659716430354,500.9174311926606,501.91826522101746,502.91909924937454,503.9199332777314,504.92076730608846,505.9216013344453,506.9224353628024,507.92326939115924,508.9241034195163,509.92493744787316,510.92577147623024,511.9266055045871,512.9274395329442,513.928273561301,514.9291075896581,515.9299416180149,516.930775646372,517.9316096747289,518.9324437030859,519.9332777314428,520.9341117597999,521.9349457881567,522.9357798165138,523.9366138448706,524.9374478732277,525.9382819015846,526.9391159299416,527.9399499582985,528.9407839866556,529.9416180150124,530.9424520433695,531.9432860717263,532.9441201000834,533.9449541284403,534.9457881567973,535.9466221851542,536.9474562135113,537.9482902418681,538.9491242702252,539.949958298582,540.9507923269391,541.951626355296,542.952460383653,543.9532944120099,544.954128440367,545.9549624687238,546.9557964970809,547.9566305254377,548.9574645537948,549.9582985821517,550.9591326105087,551.9599666388656,552.9608006672227,553.9616346955797,554.9624687239366,555.9633027522937,556.9641367806505,557.9649708090076,558.9658048373644,559.9666388657215,560.9674728940784,561.9683069224354,562.9691409507923,563.9699749791494,564.9708090075062,565.9716430358633,566.9724770642201,567.9733110925772,568.974145120934,569.9749791492911,570.975813177648,571.976647206005,572.9774812343619,573.978315262719,574.9791492910758,575.9799833194329,576.9808173477898,577.9816513761468,578.9824854045037,579.9833194328608,580.9841534612176,581.9849874895747,582.9858215179315,583.9866555462886,584.9874895746455,585.9883236030025,586.9891576313594,587.9899916597165,588.9908256880733,589.9916597164304,590.9924937447872,591.9933277731443,592.9941618015011,593.9949958298582,594.9958298582151,595.9966638865722,596.997497914929,597.9983319432861,598.9991659716429,600.0)
altd <- c(206)
x <- seq.int((length(p)-1)/2*-1,(length(p)-1)/2)
pdf('mm_E14.5_heart_H3K4me3_model.pdf',height=6,width=6)
plot(x,p,type='l',col=c('red'),main='Peak Model',xlab='Distance to the middle',ylab='Percentage')
lines(x,m,col=c('blue'))
legend('topleft',c('forward tags','reverse tags'),lty=c(1,1,1),col=c('red','blue'))
plot(xcorr,ycorr,type='l',col=c('black'),main='Cross-Correlation',xlab='Lag between + and - tags',ylab='Correlation')
abline(v=altd,lty=2,col=c('red'))
legend('topleft','alternative lag(s)',lty=2,col='red')
legend('right','alt lag(s) : 206',bty='n')
dev.off()
|
7ca1de2164aaa39ecd9b9e6d785a84d66164ab9c
|
515779a0a0d0eefdbd47f96e0571a85783b2d7f8
|
/cram_physicalstructure_rf_V2.R
|
a2711f6baf3b7721166ac8ece8d9b37b9dabae20
|
[] |
no_license
|
jeffb999/Watershed_Prioritization
|
a71bccbfeb97a90f8dbdd2e9070890401ad70ef8
|
2e63fd6130a08d7bceaf873030a5c1849adb44d1
|
refs/heads/main
| 2023-07-30T17:39:09.066714
| 2021-09-22T19:52:44
| 2021-09-22T19:52:44
| 409,336,989
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40,330
|
r
|
cram_physicalstructure_rf_V2.R
|
# CRAM Random Forest Draft
# January 8 ,2021
# Heili Lowman
# The following script will walk through a random forest created to predict state-wide CRAM scores, with datasets from SMC and StreamCat databases. The dependent variable in this case will be the California Rapid Assessment Method (CRAM) state-wide.
# Note: this will need to be redone for the 4 sub-CRAM metrics if we choose to model them separately. The below code works through only the overall CRAM index score as the predicted variable.
# Step One - Load In ------------------------------------------------------
setwd("L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/") # saved locally
# Load packages.
library(quantregForest)
library(caret)
library(tidyverse)
library(tidymodels)
library(skimr)
library(sf)
library(ggspatial)
library(nhdplusTools)
library(patchwork)
library(Metrics)
library(gt)
# Load datasets.
# CRAM data available from SCCWRP database.
cram_df <- read_csv("cram_rf_data1.csv") %>% # Loads in dataset pulled on 12/23/20.
rename(COMID = comid) %>%
rename(cram = indexscore) %>%
drop_na(cram) # Drop the NA values.
skim(cram_df) # Examine the dataset.
str(cram_df)
# Watershed characteristics' data available from StreamCat.
ca <- read_csv("streamcat_params.csv")
skim(ca)
str(ca) # Checking to be sure COMID is numeric in both datasets.
# Perennial stream assessment data available from SCCWRP server.
ps6 <- read_csv("ps6_params.csv")
# In the ps6_rf_data script, if there are multiple Length_Fin measures for a given COMID, I have chosen the maximum of them and the associated PSA6 designation with that maximum.
# Bind the datasets together.
mydf <- cram_df %>%
select(stationcode, COMID, physicalstructure) %>%
inner_join(ca) %>% # Join with StreamCat watershed characteristics.
inner_join(ps6) %>% # Join with PSA region dataset.
select(-c(PctOpCat, PctOpWs, PctOpCatRp100, PctOpWsRp100, NPDESDensCat,
NPDESDensWs, TRIDensCat, TRIDensWs, SuperfundDensCat, SuperfundDensWs)) # Remove "open" land use and discharge site columns.
skim(mydf) # Examing completeness of this joined dataset.
length(unique(mydf$COMID)) # Checking for duplicates. 816 unique COMIDs.
# Pull out only one instance of each COMID.
set.seed(1) # Every time I run the code below, it's based on the same random pull of data.
mydf2 <- mydf %>%
filter(stationcode!="109PS0162") %>% #There's only one site missing RdDensCatRp100. Better to drop the site than to drop the metric
group_by(COMID) %>%
sample_n(size = 1) %>%
ungroup()
skim(mydf2) # Checking to make sure the dataset is complete.
# Important to have complete datasets for training data. For testing data, it's less critical.
# Step Two - Training Data ------------------------------------------------
# Create calibration and validation splits with tidymodels initial_split() function.
set.seed(4)
mydf2_split <- mydf2 %>%
initial_split(prop = 0.75, strata = PSA6) # splits data into training and testing set.
# default is 3/4ths split (but 75% training, 25% testing).
# Stratification (strata) = grouping training/testing sets by region, state, etc.
# Using the "strata" call ensures the number of data points in the training data is equivalent to the proportions in the original data set. (Strata below 10% of the total are pooled together.)
# Create a training data set with the training() function
# Pulls from training and testing sets created by initial_split()
mydf2_train <- training(mydf2_split)
mydf2_test <- testing(mydf2_split)
# Examine the environment to be sure # of observations looks like the 75/25 split. 613:202.
# Create a separate dataset of available COMIDS that were not used in the training dataset.
nottrain <- ca %>% # all COMIDS from StreamCat data, sampled or not
filter(!COMID %in% mydf2_train$COMID) # Removing sites used to train the model. n = 140,097
# Step Three - Kitchen Sink model -----------------------------------------
# Create finalized training dataset and include all possible variables.
rf_dat <- mydf2_train %>%
select(-stationcode, -COMID, -PSA6, -Length_Fin)
# Random forest --
# a decision tree model, using predictors to answer dichotomous questions to create nested splits.
# no pruning happens - rather, multiple trees are built (the forest) and then you are looking for consensus across trees
# training data goes down the tree and ends up in a terminal node.
# if testing data goes down the same route, then this upholds our conclusions. Or, if it goes awry, this allows us to look for patterns in how it goes awry.
set.seed(2) # assures the data pulled is random, but sets it for the run below (makes outcome stable)
myrf <- randomForest(y = rf_dat$physicalstructure, # dependent variable
x = rf_dat %>%
select(-physicalstructure), # selecting all predictor variables
importance = T, # how useful is a predictor in predicting values (nothing causal)
proximity = T,
ntrees = 500) # 500 trees.
myrf # examine the results.
# 53.78% variance explained.
summary(myrf)
# mtry allows you to parameterize the number of splits
plot(myrf)
# model performance appears to improve most at ~150 trees
varImpPlot(myrf)
# displays which variables are most important
# helps to winnow down list of predictors
# recommended to weigh left pane more
# right pane also shows how evenly things split based on the list of predictors
# values close to 0 can be dropped, but don't have to be
# In both panes - impervious land cover, urban land use, and stream-road crossings appear important
importance <- myrf$importance
View(importance)
# displays the data plotted in the plot above
# predict()
# returns out of bag predictions for training data
# in the bag: every time a tree is built, it uses ~80% of the original 75% we set aside from the original dataset used to create a tree to assure random data selection
# out of bag: looking at the remaining 20% of the training data to predict, when you want to know what your model does at the training location sites
# Predict CRAM scores state-wide for all COMIDs.
nottrain_prediction <- nottrain %>% # taking all available COMIDS, that haven't been used in training
na.omit() %>% # remove NAs
mutate(physstructure_predicted = predict(myrf, newdata = nottrain %>% na.omit())) # using developed model (myrf), inputting predictor variables (nottrain - which contains COMIDs and associated StreamCat data) to predict output/dependent variable (physstructure_predicted a.k.a. CRAM).
# rePredict CRAM scores for training data.
mydf2_train$physstructure_predicted <- predict(myrf) # Add column of predicted CRAM values to training dataset.
# Creates new dataset of bound rows for both ...
ca_predictions <- bind_rows(nottrain_prediction %>%
mutate(Set = "Non-training"), # statewide COMIDs (not used for training)
mydf2_train %>%
mutate(Set = "Training")) # COMIDS from training dataset
# This creates the dataset that will be plotted to create a state-wide plot of predicted CRAM scores.
# Plot the data.
rf_plot1 <- ggplot(ca_predictions, aes(x = PctImp2011CatRp100, y = physstructure_predicted)) +
geom_point(alpha = 0.1) +
labs(x = "Mean % imperviousness within catchment and within a 100-m buffer of NHD stream lines",
y = "Predicted Physical Structure Score") +
theme_classic() +
facet_wrap(.~Set)
rf_plot1
# Step Four - Predictor Selection -----------------------------------------
# Using caret to select the best predictors
# What are the parameters you want to use to run recursive feature elimination (rfe)?
my_ctrl <- rfeControl(functions = rfFuncs,
method = "cv",
verbose = FALSE,
returnResamp = "all")
# rfe = recursive feature elimination
# THIS STEP TAKES FOR-EV-ER!!!
set.seed(22)
my_rfe <- rfe(y = rf_dat$physicalstructure, # set dependent variable
x = rf_dat %>% select(-physicalstructure), # set predictor variables
size = c(3:10, 15, 20, 25, 30), # sets how many variables are in the overall model
# I have 34 total possible variables, so I've chosen increments of 5 to look at.
rfeControl = my_ctrl) # pull in control from above
# can you make your model even simpler?
# the following will pick a model with the smallest number of predictor variables based on the tolerance ("tol") that you specify (how much less than the best are you willing to tolerate?)
my_size <- pickSizeTolerance(my_rfe$results, metric = "RMSE", tol = 1, maximize = F)
# higher tol (~10) gives you less variables
# lower tol (~1) gives you more variables - "I'd like the simplest model within 1% of the best model."
pickVars(my_rfe$variables, size = my_size)
# pickVars (20):
# "PctImp2011CatRp100" "PctUrbCatRp100" "PctAgWsRp100" "PctImp2011Cat" "PctImp2011WsRp100" "RdDensCatRp100"
# "RdDensWs" "RdDensWsRp100" "RdDensCat" "PctUrbWsRp100" "RdCrsCat" "PctAgWs"
# "PctUrbWs" "PctUrbCat" "RdCrsWs" "PctImp2011Ws" "PctAgCat" "PctAgCatRp100"
# "AgKffactWs" "DamDensWs"
# Proceed with a regular RF that yields mean weighted values and fit those into the following classification scheme:
#Likely condition approach: Compare mean to three CRAM thresholds (50, 75, 90) based on suggested condition classes : https://www.cramwetlands.org/sites/default/files/2019CRAM_TechnicalBulletin.pdf
# Very likely altered: mean < 50
# Likely altered: mean < 75
# Possibly altered: mean < 90
# Likely unaltered: mean >= 90
# Thresholds for physical structure based on 81 reference sites in eCRAM or sent to Lisa Fong
# Very likely altered: mean < 53
# Likely altered: mean < 66
# Possibly altered: mean < 76
# Likely unaltered: mean >= 76
# Predict scores using the above 20 variables:
# Create re-finalized training dataset and include all possible variables.
rf_dat2 <- mydf2_train %>%
select(physicalstructure, PctImp2011CatRp100, PctUrbCatRp100, PctAgWsRp100, PctImp2011Cat, PctImp2011WsRp100, RdDensCatRp100, RdDensWs, RdDensWsRp100, RdDensCat, PctUrbWsRp100, RdCrsCat, PctAgWs, PctUrbWs, PctUrbCat, RdCrsWs, PctImp2011Ws, PctAgCat, PctAgCatRp100, AgKffactWs, DamDensWs)
set.seed(4) # assures the data pulled is random, but sets it for the run below (makes outcome stable)
myrf2 <- randomForest(y = rf_dat2$physicalstructure, # dependent variable
x = rf_dat2 %>%
select(-physicalstructure),
importance = T,
proximity = T,
ntrees = 500)
myrf2 # examine the results.
# 54.32% variance explained.
summary(myrf2)
plot(myrf2) # need min of 100 trees.
varImpPlot(myrf2)
importance2 <- as.data.frame(as.table(myrf2$importance))
View(importance2) # displays the data plotted in the plot above
# Nicer ggplot variable importance plot.
vip_plot_a <- importance2 %>%
filter(Var2 == "%IncMSE") %>%
mutate(Var1 = factor(Var1)) %>%
mutate(Var1_f = fct_reorder(Var1, Freq)) %>%
ggplot(aes(x = Freq, y = Var1_f)) +
geom_point(size = 3, alpha = 0.75) +
labs(x = "% Importance (MSE)",
y = "Variables") +
theme_bw()
vip_plot_b <- importance2 %>%
filter(Var2 == "IncNodePurity") %>%
mutate(Var1 = factor(Var1)) %>%
mutate(Var1_f = fct_reorder(Var1, Freq)) %>%
ggplot(aes(x = Freq, y = Var1_f)) +
geom_point(size = 3, alpha = 0.75) +
labs(x = "Node Purity",
y = "Variables") +
theme_bw()
vip_plot <- vip_plot_a + vip_plot_b
vip_plot
# png(file="physicalstructure_vip_plot.png", units="in", width=8, height=5, res=300)
# vip_plot
# dev.off()
# ggsave("physicalstructure_vip_plot.png",
# #path = "/Users/heilil/Desktop/R_figures",
# width = 25,
# height = 10,
# units = "cm"
# )
# predict(myrf2) # returns out of bag predictions for training data
# Predict CRAM scores state-wide.
nottrain_prediction2 <- nottrain %>% # taking all COMIDS that haven't been used in training
na.omit() %>% # remove NAs
mutate(physstructure_predicted = predict(myrf2, newdata = nottrain %>% na.omit())) # using developed model (myrf2), inputting predictor variables (nottrain - COMIDs and associated StreamCat data) to predict output/dependent variable (physstructure_predicted a.k.a. CRAM).
# rePredict CRAM scores for training and testing data (to be used in validation below).
mydf2_train2 <- mydf2_train
mydf2_train2$physstructure_predicted <- predict(myrf2) # Add column of predicted CRAM scores to training dataset.
mydf2_test2 <- mydf2_test %>%
mutate(physstructure_predicted = predict(myrf2, newdata = mydf2_test %>% select(-c(stationcode, physicalstructure, PSA6, Length_Fin)))) # Adds column of predicted ASCI values to testing dataset.
# Creates new dataset of bound rows for both ...
ca_predictions2 <- bind_rows(nottrain_prediction2 %>%
mutate(Set = "Non-training"), # statewide COMIDs (not used for training data)
mydf2_train2 %>%
mutate(Set = "Training")) # COMIDS from training dataset (used for training the model).
# This creates the dataset that will be plotted.
# Create table of number of sites that fall into each category.
# Add classification column.
# ca_predictions2 <- ca_predictions2 %>%
# mutate(classification = case_when(physstructure_predicted < 50 ~"Very Likely Altered",
# physstructure_predicted < 75 ~"Likely Altered",
# physstructure_predicted < 90 ~"Possibly Altered",
# physstructure_predicted >= 90 ~"Likely Unaltered")) %>%
# mutate(class_f = factor(classification, levels = c("Very Likely Altered", "Likely Altered", "Possibly Altered", "Likely Unaltered"))) # relevel classifications
# # 3 thresholds & 4 categories
# ca_predictions2 <- ca_predictions2 %>%
# mutate(classification = case_when(round(physstructure_predicted, digits = 0) < 53 ~"Very Likely Altered",
# round(physstructure_predicted, digits = 0) < 66 ~"Likely Altered",
# round(physstructure_predicted, digits = 0) < 76 ~"Possibly Altered",
# round(physstructure_predicted, digits = 0) >= 76 ~"Likely Unaltered")) %>%
# mutate(class_f = factor(classification, levels = c("Very Likely Altered", "Likely Altered", "Possibly Altered", "Likely Unaltered"))) # relevel classifications
# 1 threshold & 2 categories
ca_predictions2 <- ca_predictions2 %>%
mutate(classification = case_when(round(physstructure_predicted, digits = 0) < 66 ~"Degraded",
round(physstructure_predicted, digits = 0) >= 66 ~"Intact")) %>%
mutate(class_f = factor(classification, levels = c("Degraded", "Intact"))) # relevel classifications
#### Results .csv ####
# Export results.
#write_csv(ca_predictions2, "cram_rf_results.csv")
#write_csv(ca_predictions2, "L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_rf_results_81ref.csv")
# Summary table by site #.
ca_summary <- ca_predictions2 %>%
count(class_f) # count sites statewide by classification
# The numbering is greatly skewed to the "possibly altered" classification, so perhaps other thresholds are necessary.
# Summary table by stream length (m)
ca_summary_length <- ca_predictions2 %>%
group_by(class_f) %>% # group by classification
summarize(length = sum(Length_Fin, na.rm=TRUE)) # sum stream lengths
# Join and export.
ca_sum <- full_join(ca_summary, ca_summary_length)
#write_csv(ca_sum, "physicalstructure_rf_results_summary.csv")
#write_csv(ca_sum, "L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_rf_results_summary_81ref.csv")
# Step Five - Quantile Regression model -----------------------------------
# Note - for the Healthy Watersheds Project, I did not pursue this structure, but I've kept some example code below in case future iterations call for it.
# Quantile random forest regression mode, instead of looking at the mode of trees, can compare to 10th, 50th, 90th percentiles etc.
# Need to make a new dataset taking the above results of pickVars into account.
# Create finalized training dataset and include all possible variables.
# qrf_dat <- mydf2_train %>%
# select(asci, RdCrsWs, PctAgWs, PctUrbWsRp100, PctOpWsRp100, PctOpWs, DamDensWs, RdDensWs, NABD_DensWs, PctUrbWs, PctUrbCatRp100, RdDensWsRp100, PctOpCat, PctUrbCat, RdDensCat, CBNFWs, PctOpCatRp100, PctAgWsRp100, TRIDensWs, AgKffactWs, FertWs)
# set.seed(20)
# myqrf <- quantregForest(y = qrf_dat$asci, # dependent variable
# x = qrf_dat %>%
# select(-asci),
# importance = T,
# proximity = T,
# keep.inbag=T,
# ntrees = 500)
#predict(myqrf) # automatically presents 10th %tile, median, and 90th %tile
#predict(myqrf, what=c(0.2, 0.3, 0.999)) # to print specific quantiles
#plot(myqrf) # plots the results.
# Again appears to improve after ~100 trees.
# Step Six - Model validation ---------------------------------------------
# Compare predicted vs. actual results, including by PSA region.
# Adding lines of slope=1 and linear models to each plot.
val1 <- ggplot(mydf2_train2, aes(x = physstructure_predicted, y = physicalstructure)) +
geom_point(color = "#2A3927", alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE, color = "#2A3927") +
labs(x = "Physical Structure predicted",
y = "Physical Structure measured",
title = "Training Data\nn=613") +
geom_abline(intercept = 0, slope = 1) +
theme_bw()
val1
lm1 <- lm(physicalstructure~physstructure_predicted, data = mydf2_train2)
summary(lm1)
val2 <- ggplot(mydf2_test2, aes(x = physstructure_predicted, y = physicalstructure)) +
geom_point(color = "#3793EC", alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE, color = "#3793EC") +
scale_x_continuous(breaks = c(0.5, 0.7, 0.9)) +
labs(x = "Physical Structure predicted",
y = "Physical Structure measured",
title = "Testing Data\nn=202") +
geom_abline(intercept = 0, slope = 1) +
theme_bw()
val2
lm2 <- lm(physicalstructure~physstructure_predicted, data = mydf2_test2)
summary(lm2)
# Create the full testing + training dataset to plot together.
mydf2_test2$set <- "Testing"
mydf2_train2$set <- "Training"
full_train_test <- bind_rows(mydf2_test2, mydf2_train2) %>%
mutate(set_f = factor(set, levels = c("Training", "Testing")))
val3 <- ggplot(full_train_test, aes(x = physstructure_predicted, y = physicalstructure, color = set_f)) +
geom_point(alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE) +
scale_color_manual(name = "Set", values = c("#2A3927", "#3793EC"), drop = FALSE) +
labs(x = "Physical Structure predicted",
y = "Physical Structure measured",
title = "All Data\nn=815") +
geom_abline(intercept = 0, slope = 1, color = "black") +
facet_wrap(~PSA6) +
theme_bw()
val3
val_fig <- (val1 + val2) /
(val3)
val_fig + plot_annotation(
title = 'Physical Structure Random Forest Results',
subtitle = 'All modeling performed using StreamCAT datasets.',
caption = 'Linear models are colored according to dataset. Lines of slope = 1 are denoted in black.'
)
# png(file="physicalstructure_rfmodel_validation.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_rfmodel_validation_81ref.png", units="in", width=8, height=5, res=300)
# val_fig + plot_annotation(
# title = 'Physical Structure Random Forest Results',
# subtitle = 'All modeling performed using StreamCAT datasets.',
# caption = 'Linear models are colored according to dataset. Lines of slope = 1 are denoted in black.'
# )
# dev.off()
# Save figure.
# ggsave("cram_rfmodel_validation.png",
# path = "/Users/heilil/Desktop/R_figures",
# width = 35,
# height = 25,
# units = "cm"
# )
lm3 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Central_Valley") %>%
filter(set_f == "Training"))
lm4 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Central_Valley") %>%
filter(set_f == "Testing"))
lm5 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Chaparral") %>%
filter(set_f == "Training"))
lm6 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Chaparral") %>%
filter(set_f == "Testing"))
lm7 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Deserts_Modoc") %>%
filter(set_f == "Training"))
lm8 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Deserts_Modoc") %>%
filter(set_f == "Testing"))
lm9 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "North_Coast") %>%
filter(set_f == "Training"))
lm10 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "North_Coast") %>%
filter(set_f == "Testing"))
lm11 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Sierra") %>%
filter(set_f == "Training"))
lm12 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "Sierra") %>%
filter(set_f == "Testing"))
lm13 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "South_Coast") %>%
filter(set_f == "Training"))
lm14 <- lm(physicalstructure~physstructure_predicted,
data = full_train_test %>%
filter(PSA6 == "South_Coast") %>%
filter(set_f == "Testing"))
Rsq1 <- summary(lm1)$r.squared # get r-squared value
Slp1 <- lm1$coefficients[2] # get the slope
Pval1 <- summary(lm1)$coefficients[2,4] # get p-value also anova(lm1)$'Pr(>F)'[1]
Int1 <- lm1$coefficients[1] # get the y-intercept
PInt1 <- summary(lm1)$coefficients[1,4] # get the Intercept p-value
Rsq2 <- summary(lm2)$r.squared # get r-squared value
Slp2 <- lm2$coefficients[2] # get the slope
Pval2 <- summary(lm2)$coefficients[2,4] # get p-value also anova(lm2)$'Pr(>F)'[1]
Int2 <- lm2$coefficients[1] # get the y-intercept
PInt2 <- summary(lm2)$coefficients[1,4] # get the Intercept p-value
Rsq3 <- summary(lm3)$r.squared # get r-squared value
Slp3 <- lm3$coefficients[2] # get the slope
Pval3 <- summary(lm3)$coefficients[2,4] # get p-value also anova(lm3)$'Pr(>F)'[1]
Int3 <- lm3$coefficients[1] # get the y-intercept
PInt3 <- summary(lm3)$coefficients[1,4] # get the Intercept p-value
Rsq4 <- summary(lm4)$r.squared # get r-squared value
Slp4 <- lm4$coefficients[2] # get the slope
Pval4 <- summary(lm4)$coefficients[2,4] # get p-value also anova(lm4)$'Pr(>F)'[1]
Int4 <- lm4$coefficients[1] # get the y-intercept
PInt4 <- summary(lm4)$coefficients[1,4] # get the Intercept p-value
Rsq5 <- summary(lm5)$r.squared # get r-squared value
Slp5 <- lm5$coefficients[2] # get the slope
Pval5 <- summary(lm5)$coefficients[2,4] # get p-value also anova(lm5)$'Pr(>F)'[1]
Int5 <- lm5$coefficients[1] # get the y-intercept
PInt5 <- summary(lm5)$coefficients[1,4] # get the Intercept p-value
Rsq6 <- summary(lm6)$r.squared # get r-squared value
Slp6 <- lm6$coefficients[2] # get the slope
Pval6 <- summary(lm6)$coefficients[2,4] # get p-value also anova(lm6)$'Pr(>F)'[1]
Int6 <- lm6$coefficients[1] # get the y-intercept
PInt6 <- summary(lm6)$coefficients[1,4] # get the Intercept p-value
Rsq7 <- summary(lm7)$r.squared # get r-squared value
Slp7 <- lm7$coefficients[2] # get the slope
Pval7 <- summary(lm7)$coefficients[2,4] # get p-value also anova(lm7)$'Pr(>F)'[1]
Int7 <- lm7$coefficients[1] # get the y-intercept
PInt7 <- summary(lm7)$coefficients[1,4] # get the Intercept p-value
Rsq8 <- summary(lm8)$r.squared # get r-squared value
Slp8 <- lm8$coefficients[2] # get the slope
Pval8 <- summary(lm8)$coefficients[2,4] # get p-value also anova(lm8)$'Pr(>F)'[1]
Int8 <- lm8$coefficients[1] # get the y-intercept
PInt8 <- summary(lm8)$coefficients[1,4] # get the Intercept p-value
Rsq9 <- summary(lm9)$r.squared # get r-squared value
Slp9 <- lm9$coefficients[2] # get the slope
Pval9 <- summary(lm9)$coefficients[2,4] # get p-value also anova(lm9)$'Pr(>F)'[1]
Int9 <- lm9$coefficients[1] # get the y-intercept
PInt9 <- summary(lm9)$coefficients[1,4] # get the Intercept p-value
Rsq10 <- summary(lm10)$r.squared # get r-squared value
Slp10 <- lm10$coefficients[2] # get the slope
Pval10 <- summary(lm10)$coefficients[2,4] # get p-value also anova(lm10)$'Pr(>F)'[1]
Int10 <- lm10$coefficients[1] # get the y-intercept
PInt10 <- summary(lm10)$coefficients[1,4] # get the Intercept p-value
Rsq11 <- summary(lm11)$r.squared # get r-squared value
Slp11 <- lm11$coefficients[2] # get the slope
Pval11 <- summary(lm11)$coefficients[2,4] # get p-value also anova(lm11)$'Pr(>F)'[1]
Int11 <- lm11$coefficients[1] # get the y-intercept
PInt11 <- summary(lm11)$coefficients[1,4] # get the Intercept p-value
Rsq12 <- summary(lm12)$r.squared # get r-squared value
Slp12 <- lm12$coefficients[2] # get the slope
Pval12 <- summary(lm12)$coefficients[2,4] # get p-value also anova(lm12)$'Pr(>F)'[1]
Int12 <- lm12$coefficients[1] # get the y-intercept
PInt12 <- summary(lm12)$coefficients[1,4] # get the Intercept p-value
Rsq13 <- summary(lm13)$r.squared # get r-squared value
Slp13 <- lm13$coefficients[2] # get the slope
Pval13 <- summary(lm13)$coefficients[2,4] # get p-value also anova(lm13)$'Pr(>F)'[1]
Int13 <- lm13$coefficients[1] # get the y-intercept
PInt13 <- summary(lm13)$coefficients[1,4] # get the Intercept p-value
Rsq14 <- summary(lm14)$r.squared # get r-squared value
Slp14 <- lm14$coefficients[2] # get the slope
Pval14 <- summary(lm14)$coefficients[2,4] # get p-value also anova(lm14)$'Pr(>F)'[1]
Int14 <- lm14$coefficients[1] # get the y-intercept
PInt14 <- summary(lm14)$coefficients[1,4] # get the Intercept p-value
physicalstructure_lms <- data.frame("Region" = c("Statewide", "Statewide", "Central_Valley", "Central_Valley", "Chaparral", "Chaparral", "Deserts_Modoc", "Deserts_Modoc", "North_Coast", "North_Coast", "Sierra", "Sierra", "South_Coast", "South_Coast"),
"Dataset" = c("Training", "Testing", "Training", "Testing", "Training", "Testing", "Training", "Testing", "Training", "Testing", "Training", "Testing", "Training", "Testing"),
"R2" = c(Rsq1, Rsq2, Rsq3, Rsq4, Rsq5, Rsq6, Rsq7, Rsq8, Rsq9, Rsq10, Rsq11, Rsq12, Rsq13, Rsq14),
"Slope" = c(Slp1, Slp2, Slp3, Slp4, Slp5, Slp6, Slp7, Slp8, Slp9, Slp10, Slp11, Slp12, Slp13, Slp14),
"Slope_p" = c(Pval1, Pval2, Pval3, Pval4, Pval5, Pval6, Pval7, Pval8, Pval9, Pval10, Pval11, Pval12, Pval13, Pval14),
"Intercept" = c(Int1, Int2, Int3, Int4, Int5, Int6, Int7, Int8, Int9, Int10, Int11, Int12, Int13, Int14),
"Intercept_p" = c(PInt1, PInt2, PInt3, PInt4, PInt5, PInt6, PInt7, PInt8, PInt9, PInt10, PInt11, PInt12, PInt13, PInt14))
physicalstructure_lms <- physicalstructure_lms %>%
mutate(Slope_p = round(Slope_p, digits=6)) %>%
mutate(Slope_p = ifelse(Slope_p < 0.0001, "<0.0001", Slope_p))
#write_csv(physicalstructure_lms,"L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure.lms.csv" )
# # Import the results of these linear models to generate summary table.
#
# physicalstructure_lms <- read_csv(L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_lms.csv")
#physicalstructure_lms <- read_csv("L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/physicalstructure_lms.csv")
# Run the code and save table to a png file:
summary_table <- physicalstructure_lms %>%
gt(groupname_col = "Region", rowname_col = "Dataset") %>%
fmt_number(columns = vars(R2, Slope, Intercept, Intercept_p), decimals = 4) %>%
tab_header(title = "Physicalstructure Results Validation",
subtitle = "All modeling performed using StreamCAT datasets.") %>%
cols_label(R2 = html("R<sup>2</sup"),
Slope_p = html("<i>p</i>"),
Intercept_p = html("<i>p</i>")) %>%
cols_align(
align = "left",
columns = vars(R2, Slope, Slope_p, Intercept, Intercept_p))
summary_table
# Save table.
# gtsave(summary_table,
# "physicalstructure_rfmodel_lms.png",
# path = "/Users/heilil/Desktop/R_figures")
#webshot::install_phantomjs()
summary_table <- physicalstructure_lms %>%
gt(groupname_col = "Region", rowname_col = "Dataset") %>%
fmt_number(columns = vars(R2, Slope, Intercept, Intercept_p), decimals = 4) %>%
tab_header(title = "Physicalstructure Results Validation",
subtitle = "All modeling performed using StreamCAT datasets.") %>%
cols_label(R2 = html("R<sup>2</sup"),
Slope_p = html("<i>p</i>"),
Intercept_p = html("<i>p</i>")) %>%
cols_align(
align = "left",
columns = vars(R2, Slope, Slope_p, Intercept, Intercept_p)) %>%
gtsave(
"physicalstructure_rfmodel_lms.png",
path = "L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds"
)
#
# gtsave(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_rfmodel_lms.png")
# png("L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_rfmodel_lms.png")
# summary_table
# dev.off()
process_summary <- data.frame("Dataframe" = c("ca", "ca_predictions", "ca_predictions2", "full_train_test", "mydf", "mydf2",
"mydf2_test", "mydf2_test2", "mydf2_train", "mydf2_train2", "nottrain",
"nottrain_prediction", "nottrain_prediction2", "ps6", "rf_dat", "rf_dat2"),
"Count" = c(nrow(ca), nrow(ca_predictions), nrow(ca_predictions2), nrow(full_train_test), nrow(mydf),
nrow(mydf2), nrow(mydf2_test), nrow(mydf2_test2), nrow(mydf2_train), nrow(mydf2_train2),
nrow(nottrain), nrow(nottrain_prediction), nrow(nottrain_prediction2), nrow(ps6),
nrow(rf_dat), nrow(rf_dat2)))
#write.csv(process_summary, "L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_process_summary.csv")
# Chose not to compute confusion matrix / accuracy score since this is more applicable to categorical ouputs from random forest models -
# Instead, calculated Root Mean Squared Error (RMSE) of both training and test datasets.
# If test RMSE values are much greater than training, then possible the model has been over fit.
predtest <- predict(myrf2, mydf2_test2)
rmse(mydf2_test2$physicalstructure,predtest)
# 13.91942
predtrain <- predict(myrf2, mydf2_train2)
rmse(mydf2_train2$physicalstructure,predtrain)
# 6.337931
# Double checking using the original random forest dataset (rf_dat) with all 35 possible variables included to see where the error in number of predictors starts to increase dramatically (to help double check our decision to include 25 parameters).
dc <- rfcv(rf_dat %>%
select(-physicalstructure),
rf_dat$physicalstructure,
step = 0.7, # default is 0.5
scale="log")
dc$error.cv
# 34 24 17 12 8 6 4 3 2 1
# 220.8261 225.4794 230.9605 238.7072 241.4203 248.0797 266.0912 262.9835 296.6211 345.4844
# Appears between X and Y variables, there is an insignificant increase in error.
# However, this model is much larger than the CSCI (20) and ASCI (10) models, so we may decide to trim this down in the future.
# Step Seven - Map results state-wide -------------------------------------
# Using ca_predictions2 dataset generated above. But need to first associate lat/lon with each COMID.
# Load in NHD_Plus_CA dataset from Annie as well as watersheds from Jeff.
# Full state of California
#nhd_ca <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Plus_CA/NHDPlus_V2_FLowline_CA.shp") %>%
nhd_ca <- read_sf("L:/RipRAM_ES/Data/Working/MapStuff/NHDPlus_NAD83.shp") %>%
mutate(COMID = as.numeric(COMID))
# South Coast watersheds - Ventura River, San Juan Creek, San Diego River
#nhd_ca <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Plus_CA/NHDPlus_V2_FLowline_CA.shp") %>%
nhd_ca <- read_sf("L:/RipRAM_ES/Data/Working/MapStuff/NHDPlus_NAD83.shp") %>%
mutate(COMID = as.numeric(COMID))
# South Coast watersheds - Ventura River, San Juan Creek, San Diego River
#nhd_vr <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/VenturaRiver_NHD_Clip.shp") %>%
nhd_vr <- read_sf("L:/RipRAM_ES/Data/Working/MapStuff/VenturaRiver_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
#nhd_sjc <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/SanJuanCreek_NHD_Clip.shp") %>%
nhd_sjc <- read_sf("L:/RipRAM_ES/Data/Working/MapStuff/SanJuanCreek_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
#nhd_sdr <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/SanDiegoRiver_NHD_Clip.shp") %>%
nhd_sdr <- read_sf("L:/RipRAM_ES/Data/Working/MapStuff/SanDiegoRiver_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
# Assign modeled COMIDs to mcomid.
mcomid <- ca_predictions2$COMID
# Filter by and plot only modeled stream reaches.
# # Statewide map. 3 thresholds & 4 categories
# modeled_cram_map <- nhd_ca %>%
# filter(COMID %in% mcomid) %>%
# inner_join(ca_predictions2) %>%
# ggplot() +
# geom_sf(aes(color = class_f)) +
# scale_color_manual(name = "Condition", values = c("red2", "lightpink", "lightskyblue2", "steelblue"), drop = FALSE) +
# theme_bw()
#
# modeled_cram_map
# Note, sometimes this takes forever to render in the "plot" pane.
# Best to just save to your machine (below) and then take a look.
# png(file="physicalstructure_modeled_CA.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_modeled_CA_81ref.png", units="in", width=8, height=5, res=300)
# modeled_cram_map
# dev.off()
# ggsave("physicalstructure_modeled_CA.png",
# #path = "/Users/heilil/Desktop/R_figures",
# width = 35,
# height = 35,
# units = "cm"
# )
# Statewide map. 1 threshold & 2 categories
modeled_cram_map <- nhd_ca %>%
filter(COMID %in% mcomid) %>%
inner_join(ca_predictions2) %>%
ggplot() +
geom_sf(aes(color = class_f)) +
scale_color_manual(name = "Condition", values = c("red2", "steelblue"), drop = FALSE) +
theme_bw()
#modeled_cram_map
# png(file="physicalstructure_modeled_CA.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_modeled_CA_81ref_2.png", units="in", width=8, height=5, res=300)
# modeled_cram_map
# dev.off()
# Ventura River inset
ventura_cram_map <- nhd_vr %>%
filter(COMID %in% mcomid) %>%
inner_join(ca_predictions2) %>%
ggplot() +
geom_sf(aes(color = class_f)) +
scale_color_manual(name = "Condition", values = c("red2", "lightpink", "lightskyblue2", "steelblue"), drop = FALSE) +
labs(title = "Ventura River") +
theme_bw() #+
#theme(legend.position = "none")
ventura_cram_map
# png(file="physicalstructure_modeled_Ventura.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_modeled_Ventura_81ref.png", units="in", width=8, height=5, res=300)
# ventura_cram_map
# dev.off()
# ggsave("physicalstructure_modeled_Ventura.png",
# path = "/Users/heilil/Desktop/R_figures",
# width = 15,
# height = 15,
# units = "cm"
# )
# San Juan Creek inset
sanjuan_cram_map <- nhd_sjc %>%
filter(COMID %in% mcomid) %>%
inner_join(ca_predictions2) %>%
ggplot() +
geom_sf(aes(color = class_f)) +
scale_color_manual(name = "Condition", values = c("red2", "lightpink", "lightskyblue2", "steelblue"), drop = FALSE) +
labs(title = "San Juan Creek") +
theme_bw() +
theme(legend.position = "none")
sanjuan_cram_map
# png(file="physicalstructure_modeled_SanJuanCreek.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_modeled_SanJuanCreek_81ref.png", units="in", width=8, height=5, res=300)
# sanjuan_cram_map
# dev.off()
# San Diego River inset
sandiego_cram_map <- nhd_sdr %>%
filter(COMID %in% mcomid) %>%
inner_join(ca_predictions2) %>%
ggplot() +
geom_sf(aes(color = class_f)) +
scale_color_manual(name = "Condition", values = c("red2", "lightpink", "lightskyblue2", "steelblue"), drop = FALSE) +
labs(title = "San Diego River") +
theme_bw() +
theme(legend.position = "none")
sandiego_cram_map
# png(file="physicalstructure_modeled_SanDiegoRiver.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_modeled_SanDiegoRiver_81ref.png", units="in", width=8, height=5, res=300)
# sandiego_cram_map
# dev.off()
# South coast sites inset figures
scoast <- (ventura_cram_map) /
(sanjuan_cram_map) /
(sandiego_cram_map)
scoast
# png(file="physicalstructure_modeled_SouthCoast.png", units="in", width=8, height=5, res=300)
# png(file="L:/RipRAM_ES/Data/Working/healthy_watershed_random_forest/Results using published thresholds/physicalstructure_modeled_SouthCoast_81ref.png", units="in", width=8, height=5, res=300)
# scoast
# dev.off()
# ggsave("physicalstructure_modeled_SCoast.png",
# path = "/Users/heilil/Desktop/R_figures",
# width = 20,
# height = 40,
# units = "cm"
# )
# Additional Notes - Healthy Watersheds project ---------------------------
#Classification options:
#"Constrained" approach, following Beck et al. 2019: Compare q10, q50, and q90 to CRAM threshold (i.e., 75). This requires the quantile regression model that was not run but for which there is code provided above.
#Likely constrained: q90 < 50
#Possibly constrained: q50 < 75
#Possibly unconstrained: q50 >= 90 and q10 < 90
#Likely unconstrained: q10 > 90
#"Likely condition approach: Compare q50 to three CRAM thresholds (50, 75, 90) @ reference sites (1st, 10th, 30th percentiles)
# Very likely altered: q50 < 50
# Likely altered: q50 < 75
# Possibly altered: q50 < 90
# Likely unaltered: q50 >= 90
# Condition approach favored by Anna per meeting on 11/3/2020.
# Works Cited:
# Hill, Ryan A., Marc H. Weber, Scott G. Leibowitz, Anthony R. Olsen, and Darren J. Thornbrugh, 2016. The Stream-Catchment (StreamCat) Dataset: A Database of Watershed Metrics for the Conterminous United States. Journal of the American Water Resources Association (JAWRA) 52:120-128. DOI: 10.1111/1752-1688.12372.
# End of R script.
|
10ec672e24b8b086f578413472e6c4b31ba47aee
|
64fb4bae567cd021c21f37079cdbe1cd0bfe25ea
|
/R/GmedianCov.R
|
609088d1eefd598afa20598765e283cd2f6c0152
|
[] |
no_license
|
cran/Gmedian
|
9d43a80aabe5b67e6c6cc2f98081483abec67b6f
|
c379a7ddc561b58de729eca4e78e0b2fabba0f2c
|
refs/heads/master
| 2022-06-24T05:29:54.275483
| 2022-06-08T13:00:02
| 2022-06-08T13:00:02
| 54,840,609
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,234
|
r
|
GmedianCov.R
|
GmedianCov <- function(X, init=NULL, nn=TRUE, scores=2, gamma=2, gc=2, alpha=0.75, nstart=1){
### Computation of the Geometric covariation matrix
### with averaged stochastic gradient algorithms
### input : X (n x p matrix, n observations, in dimension p)
### output : (geometric) median (1 x p numeric vector) and (geometric) median covariation matrix (p x p)
### nn : non negative modification of the algorithm to get a non negaztive matrix estimation (TRUE by default)
Gmed.est <- Gmedian(X,init=init,gamma=gamma,alpha=alpha,nstart=nstart)
if (nn==FALSE) GMCM.est <- MedianCovMatRow_rcpp(X,Gmedian=Gmed.est,gamma=gc,alpha=alpha,nstart=nstart)
else GMCM.est <- MedianCovMatRowP_rcpp(X,Gmedian=Gmed.est,gamma=gc,alpha=alpha,nstart=nstart)
if (scores==0){
return(list(median = Gmed.est,covmedian=GMCM.est))
}
else {
### Computation of the eigenvectors and scores
vectors <- RSpectra::eigs_sym(GMCM.est, scores)$vectors
scores <- sweep(X,2,Gmed.est)%*%vectors
sdev.rob <- apply(scores,2,scaleTau2)
return(list(median=Gmed.est,covmedian=GMCM.est,scores=scores,vectors=vectors,sdev=sdev.rob))
}
}
WeiszfeldCov <- function(X, weights=NULL, scores=2, epsilon=1e-08, nitermax = 100){
### Computation of the Geometric covariation matrix
### output : (geometric) median (1 x p numeric vector) and (geometric) median covariation matrix (p x p)
### require library(RSpectra)
X <- as.matrix(X)
n <- nrow(X)
if (is.null(weights)) poids <- rep(1,n)
else poids <- weights
Wmed.est <- Weiszfeld_rcpp(X,poids,epsilon=epsilon,nitermax=nitermax)
WMCM.est <- MedianCovMatW_rcpp(X,Wmed.est$median,poids,epsilon=epsilon,nitermax=nitermax)
if (scores==0){
return(list(median = Wmed.est$median, covmedian=WMCM.est$median, iterm = Wmed.est$iter, itercov = WMCM.est$iter))
}
else {
### Computation of the eigenvectors and scores
vectors <- RSpectra::eigs_sym(WMCM.est$median, scores)$vectors
vscores = sweep(X,2,Wmed.est$median)%*%vectors
sdev.rob <- apply(vscores,2,scaleTau2)
return(list(median=Wmed.est$median, covmedian=WMCM.est$median, scores=vscores, sdev = sdev.rob, vectors=vectors, iterm = Wmed.est$iter, itercov = WMCM.est$iter))
}
}
|
492a3f2515f167219462c7c94a9765725715ac11
|
49fef819eb731bad92033c45d6f1515548e4f177
|
/man/editor-information.Rd
|
7d2ec6d1331742a3cd57ed9cbef1264d0d5962dc
|
[] |
no_license
|
woodhaha/rstudioapi
|
a8944229ee4006f0f76fd23e5ce9dbdd4e31d03d
|
bc145dd6734cba2d9f8d55ea4cdafa0bc126319f
|
refs/heads/master
| 2019-04-09T15:06:59.017194
| 2017-04-27T21:37:22
| 2017-04-27T21:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,210
|
rd
|
editor-information.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/document-api.R
\name{editor-information}
\alias{editor-information}
\alias{getActiveDocumentContext}
\alias{editor-information}
\alias{getSourceEditorContext}
\alias{editor-information}
\alias{getConsoleEditorContext}
\title{Retrieve Information about an RStudio Editor}
\usage{
getActiveDocumentContext()
getSourceEditorContext()
getConsoleEditorContext()
}
\value{
A \code{list} with elements:
\tabular{ll}{
\code{id} \tab The document ID.\cr
\code{path} \tab The path to the document on disk.\cr
\code{contents} \tab The contents of the document.\cr
\code{selection} \tab A \code{list} of selections. See \bold{Details} for more information.\cr
}
}
\description{
Returns information about an RStudio editor.
}
\details{
The \code{selection} field returned is a list of document selection objects.
A document selection is just a pairing of a document \code{range}, and the
\code{text} within that range.
}
\note{
The \code{getActiveDocumentContext} function was added with version 0.99.796
of RStudio, while the \code{getSourceEditorContext} and the \code{getConsoleEditorContext}
functions were added with version 0.99.1111.
}
|
108d9895327fbe547e7bd172b22e56d337579ded
|
ef4eb23543224c14f4cae67190d1f82bd881a4a4
|
/dfg_for_kilimanjaro/fire_ndvi/src/visTerraAqua.R
|
c69b2c7ba6866336a1e0c5a887ed051400bf92f8
|
[] |
no_license
|
environmentalinformatics-marburg/magic
|
33ed410de55a1ba6ff943090207b99b1a852a3ef
|
b45cf66f0f9aa94c7f11e84d2c559040be0a1cfb
|
refs/heads/master
| 2022-05-27T06:40:23.443801
| 2022-05-05T12:55:28
| 2022-05-05T12:55:28
| 9,035,494
| 6
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,458
|
r
|
visTerraAqua.R
|
visTerraAqua <- function(rst,
cld = NULL,
vza = NULL,
plot_names,
plot_shape,
...) {
lib <- c("doParallel", "raster", "rgdal", "reshape2", "matrixStats", "ggplot2")
sapply(lib, function(x) library(x, character.only = TRUE))
source("src/sortByElevation.R")
registerDoParallel(cl <- makeCluster(2))
### NDVI MOD vs. MYD: plot basis
ls_val <- foreach(i = rst, j = list("mod14a1", "myd14a1"), .packages = lib,
.export = "sortByElevation") %dopar% {
mat_val <- extract(i, plot_shape)
df_val <- data.frame(PlotID = plot_shape@data$PlotID, mat_val)
names(df_val)[2:ncol(df_val)] <- substr(names(df_val)[2:ncol(df_val)], 5, 11)
df_val <- sortByElevation(plot_names = plot_names, plot_shape = plot_shape,
val = df_val)
mlt_val <- melt(df_val, id.vars = c(1, ncol(df_val)), variable.name = "date",
value.name = toupper(j))
mlt_val$date <- as.Date(mlt_val$date, format = "%Y%j")
mlt_val[, toupper(j)] <- mlt_val[, toupper(j)] / 10000
return(mlt_val)
}
p <- ggplot() +
geom_line(aes(x = date, y = MOD14A1), data = ls_val[[1]], color = "black",
alpha = .35) +
geom_line(aes(x = date, y = MYD14A1), data = ls_val[[2]], color = "grey",
alpha = .35) +
stat_smooth(aes(x = date, y = MOD14A1), data = ls_val[[1]], method = "lm",
color = "black", se = FALSE, lwd = 1, lty = 1) +
stat_smooth(aes(x = date, y = MYD14A1), data = ls_val[[2]], method = "lm",
color = "grey", se = FALSE, lwd = 1, lty = 1) +
facet_wrap(~ PlotID, ncol = 5, scales = "free_y") +
scale_x_date(labels = date_format("%Y"),
breaks = date_breaks(width = "4 years"),
minor_breaks = waiver()) +
labs(x = "Time", y = "NDVI") +
theme_bw() +
theme(panel.grid = element_blank())
### Cloud frequency
if (!is.null(cld)) {
ls_cld <- foreach(i = cld, .packages = lib, .export = "sortByElevation") %dopar% {
mat_cld <- extract(i, plot_shape)
val_cld <- sapply(1:nrow(mat_cld), function(j) {
val <- sum(is.na(mat_cld[j, ])) / ncol(mat_cld)
return(round(val, 2))
})
df_cld <- data.frame(PlotID = plot_shape@data$PlotID, CLD = val_cld)
df_cld <- sortByElevation(plot_names = plot_names,
plot_shape = plot_shape, val = df_cld)
return(df_cld)
}
p <- p +
geom_text(aes(label = paste("CLD:", CLD)),
data = ls_cld[[1]],
x = -Inf, y = -Inf, hjust = -.2, vjust = -.4, size = 2.5)
}
### View zenith angles
if (!is.null(vza)) {
ls_vza <- foreach(i = vza, .packages = lib, .export = "sortByElevation") %dopar% {
mat_vza <- extract(i, plot_shape)
med_vza <- rowMedians(mat_vza, na.rm = TRUE)
df_vza <- data.frame(PlotID = plot_shape@data$PlotID, VZA = med_vza)
df_vza <- sortByElevation(plot_names = plot_names,
plot_shape = plot_shape, val = df_vza)
return(df_vza)
}
p <- p +
geom_text(aes(label = paste("VZA:", VZA)),
data = ls_vza[[1]],
x = Inf, y = -Inf, hjust = 1.2, vjust = -.4, size = 2.5)
}
stopCluster(cl)
return(p)
}
|
765f52b6b55ce1f87992db2deaf24a65f79dafd7
|
7f1a6429c7b2682207da95ed23f7ee476660453b
|
/plot2.R
|
d2ebb9f843e8e04fec0949089058b580bdf224dc
|
[] |
no_license
|
RezaZahedi/datasciencecoursera
|
9addbdb916e80d8e5bfb93dce3879edd0c18345d
|
3130a0dbf63abf1f17dfe2ea8c641289128865d5
|
refs/heads/master
| 2020-03-29T13:26:43.079820
| 2018-09-23T08:10:22
| 2018-09-23T08:10:22
| 149,959,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
plot2.R
|
data <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?", stringsAsFactors = FALSE)
data$Date <- strptime(data$Date, format = "%d/%m/%Y")
data2 <- subset(data, Date >= "2007-02-01" & Date <= "2007-02-02")
data2$datetime <- strptime(paste(data2$Date, data2$Time), "%Y-%m-%d %H:%M:%S")
data2 <- data2[, 3:10]
#2
png("./plot2.png")
plot(data2$datetime, data2$Global_active_power, type = "l",
xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
b94c4d037be5755f192a214b12b77bf100dbfd4d
|
a10e0c583478ff199d4c9f2db8fb3df5d99a2076
|
/data/create_learning2014b.R
|
f00505a823198eee564d3bf5226ffcd1bd610de1
|
[] |
no_license
|
bramberntzen/IODS-project
|
34fce96fce2655800d9c57386cf1caa319d2e8ec
|
997faea1b065060414f4360ad1fd5fca777669a4
|
refs/heads/master
| 2020-04-02T12:56:08.029137
| 2018-12-09T14:49:43
| 2018-12-09T14:49:43
| 154,459,049
| 0
| 0
| null | 2018-10-24T07:39:30
| 2018-10-24T07:39:30
| null |
UTF-8
|
R
| false
| false
| 2,830
|
r
|
create_learning2014b.R
|
#Bram Berntzen, 5.11.2018, data wrangling
#First exercise week 2
#Read the learning2014 data into R
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
#data structure (variable info)
str(lrn14)
#data dimensions (nr. of rows and columns, or observations and variables)
dim(lrn14)
#Check column names of lrn14
colnames(lrn14)
#create column 'attitude' by scaling the column "Attitude" by the total number of attitude aspects, which is 10
lrn14$attitude <- lrn14$Attitude / 10
#Make vectors for deep, surface and strategic learning questions
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30","D06", "D15", "D23", "D31")
surface_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
strategic_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
#install dplyr with install.packages("dplyr")
#Access dplyr library
library(dplyr)
#select the columns related to deep learning and create column 'deep' by averaging
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
#select the columns related to surface learning and create column 'surf' by averaging
surface_columns <- select(lrn14, one_of(surface_questions))
lrn14$surf <- rowMeans(surface_columns)
#select the columns related to strategic learning and create column 'stra' by averaging
strategic_columns <- select(lrn14, one_of(strategic_questions))
lrn14$stra <- rowMeans(strategic_columns)
#Exclude observations that have zero points, and change at the same time 'Points' to 'points'
lrn14 <- filter(lrn14, Points > 0)
# print out the column names of the data
colnames(lrn14)
#Keep the the variables gender, age, attitude, deep, stra, surf and points
keep_columns <- c("gender","Age","attitude", "deep", "stra", "surf", "Points")
learning2014 <- select(lrn14, one_of(keep_columns))
#Check if the data now has 166 observations and 7 variables
dim(learning2014)
# change 'Age' to 'age' and 'Points' to 'points'
colnames(learning2014)[2] <- "age"
colnames(learning2014)[7] <- "points"
# print out the new column names of the data
colnames(learning2014)
#Figure out which working directory is used by R and change it to the IODS prohect folder through "Session", "set working directory", "choose directory" -> "IODS project folder"
getwd()
#set working directory
setwd("C:/Users/Bram/Desktop/Open data science course/GitHub/IODS-project/data")
#Save as .txt file
write.table(learning2014, file = "learning2014d.txt", sep = " ", dec=".", row.names = FALSE, col.names = TRUE)
#Read .txt file
learning2014 <- read.table(file = "learning2014d.txt", header = TRUE, dec = ".")
#Use str and head to make sure the structure of the file is correct
str(learning2014)
head(learning2014)
|
71afcafc82ad566870c625279c6102eef0df2b6e
|
4aa0f435d17da7d75ea4e636b0d207908bbff0f0
|
/BUS4028F Regression.R
|
c57198243ceb6bdfaa4a5c204b60036332d609ba
|
[] |
no_license
|
rohinjain97/Thesis
|
db7b71ba4dc7096f9e01efb00ef7bdbf9885361b
|
a299ea9f95464b8cfa657efa4f99f6256df96610
|
refs/heads/master
| 2020-03-22T13:05:17.661552
| 2018-07-31T06:00:12
| 2018-07-31T06:00:12
| 140,082,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,195
|
r
|
BUS4028F Regression.R
|
rm(list = ls())
BUS4028F_data=read.csv("BUS4028F.xlsx", header = TRUE, sep = ";" , na.strings = c("","NA"))
BUS4028F_data<-na.omit(BUS4028F_data)
levels(BUS4028F_data$Exemption)<-c(levels(BUS4028F_data$Exemption),1)
BUS4028F_data$Exemption[BUS4028F_data$Exemption=="E"]<-1
BUS4028F_data$Exemption<-droplevels(BUS4028F_data$Exemption)
exam_set=cbind(BUS4028F_data[1],BUS4028F_data[4], BUS4028F_data[5], BUS4028F_data[6],BUS4028F_data[7],
BUS4028F_data[8],BUS4028F_data[9], BUS4028F_data[11], BUS4028F_data[12], BUS4028F_data[13],
BUS4028F_data[14])
#creating the datasets for exam results
exam_set_2012 = exam_set[which(exam_set$RegAcadYear=="2012"),][-1]
exam_set_2013 = exam_set[which(exam_set$RegAcadYear=="2013"),][-1]
exam_set_2014 = exam_set[which(exam_set$RegAcadYear=="2014"),][-1]
exam_set_2015 = exam_set[which(exam_set$RegAcadYear=="2015"),][-1]
exam_set_2016 = exam_set[which(exam_set$RegAcadYear=="2016"),][-1]
exam_set_2017 = exam_set[which(exam_set$RegAcadYear=="2017"),][-1]
exam_set_non_protests = rbind(exam_set_2012, exam_set_2013, exam_set_2014)
exam_set_non_protests = cbind(exam_set_non_protests,Protest=0)
exam_set_protests = rbind(exam_set_2015,exam_set_2016,exam_set_2017)
exam_set_protests = cbind(exam_set_protests,Protest=1)
exam_set_combined=rbind(exam_set_protests,exam_set_non_protests)
attach(exam_set_combined)
res_b1<-glm(Exemption~Gender, family=binomial(link="logit"))
res_b2<-glm(Exemption~DEMOG, family=binomial(link="logit")) #base is black
res_b3<-glm(Exemption~SA.Citizenship.Status, family=binomial(link="logit"))
#res_b4<-glm(Exemption~RegProgram, family=binomial(link="logit")) #base is cb003
res_b5<-glm(Exemption~Protest, family=binomial(link="logit"))
#res_b6<-glm(Exemption~Deferred,family=binomial(link="logit"))
#no deferred, since it was in first semester
#it was decided to exclude regprog due to small numbers in dataset
AIC(res_b1,res_b2,res_b3,res_b5)
#demographic has the lowest AIC, start with this base
res_1<-glm(Exemption~DEMOG, family=binomial(link="logit"))
summary(res_1)
#white and chinese are significant
res_2<-glm(Exemption~DEMOG+Gender, family=binomial(link="logit"))
summary(res_2) #male coeff is significant (p=0.49847), we also ignore Transgender
anova(res_1, res_2, test="Chisq") #gender is not significant
#anova, p=0.1066
res_3<-glm(Exemption~DEMOG+Protest, family=binomial(link='logit'))
summary(res_3)
anova(res_1,res_3,test="Chisq") #adding protest is not significant at 5% (p=0.5949)
res_4<-glm(Exemption~DEMOG+SA.Citizenship.Status, family=binomial(link='logit'))
summary(res_4) #NA in foreign level?? go fix?? I think it may be that F corresponds exactly with international
anova(res_1, res_4, test="Chisq") #P factor level is not significant at p=0.9134
#ignore regprog due to small numbers
#res_5<-glm(Exemption~DEMOG+RegProgram, family=binomial(link='logit'))
#summary(res_5) #None of the coeff are significant, however
#anova(res_1, res_5, test="Chisq") #But CB020 has 9 students, CB025 (14), CB026 (2), CB019(13), CB018(14)
#not sure if I should include Reg Prog since so few students
#no deferred, since it was in first semester
#res_de<-glm(Exemption~DEMOG+Deferred, family=binomial(link='logit'))
#summary(res_de) #coefficient is not significant, however, only 17 students in dataset
#anova(res_1, res_de, test="Chisq") #p=0.456
#the final model consists of Demograhpy, SA citizenship
#protests is not a significant predictor variable
#res_6<-glm(Exemption~DEMOG+SA.Citizenship.Status+SA.Citizenship.Status*Protest, family=binomial(link='logit'))
#summary(res_6) #interaction term not significant
#anova(res_4,res_6, test="Chisq") #p=0.8922
res_7<-glm(Exemption~DEMOG+DEMOG*Protest, family=binomial(link='logit'))
summary(res_7) #only white:protest is significant (looks like a negative effect)
anova(res_1,res_7, test="Chisq") #p=0.07551, !
#it seems like res_1 is the best
res_all<-glm(Exemption~Protest+DEMOG+Gender+SA.Citizenship.Status+RegProgram, family=binomial(link='logit'))
summary(res_all) #protest coefficient is not significant
anova(res_all, test="Chisq") #p=0.154
#even when you put the protest variable first, there is no significance
|
9aa7a23918a38372690840724e7fdf023b417933
|
45d010dd13400f20e5403a1fafcbb5a0c8b92d8f
|
/figS5B.R
|
ed116f2d7f0ca2aba873f25e14da75ee14224766
|
[] |
no_license
|
drsisu/M_and_M
|
ba82281240eb86456d57e724d4db6a1d11ece9dd
|
c167bbf15d7344bf2d0b8a40b3a80368e035d5bb
|
refs/heads/master
| 2022-05-31T03:35:27.803415
| 2018-09-14T14:40:33
| 2018-09-14T14:40:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,497
|
r
|
figS5B.R
|
## mt.v1, ct.v1, bdt.v1,
## mt.v1.AA, ct.v1.AA, bdt.v1.AA,
## mt.v1.nonAA, ct.v1.nonAA, bdt.v1.nonAA,
load("bd_v1.rda")
## Is the mean log10 abundance of $\beta$-defensin in AA at visit V1 and
## specific CST samples significantly different between TERM and sPTB subjects?
mean.log10.bd.v1.cst <- function(cst)
{
idx <- mt.v1$cst==cst
y <- log10(bdt.v1$BD_v1[idx])
x <- mt.v1$sPTB[idx]
r <- t.test( y[x==0], y[x==1] )
s <- c(paste0(sprintf("%.2f",r$estimate[1]), " (", sum(x==0), ")"),
paste0(sprintf("%.2f",r$estimate[2]), " (", sum(x==1), ")"),
sprintf("%.2f",r$p.value))
s
}
cst <- "I"
(r <- mean.log10.bd.v1.cst(cst))
mean.log10.bd.v1.cst.tbl <- matrix(NA,nrow=length(names(table(mt.v1$cst))), ncol=3)
colnames(mean.log10.bd.v1.cst.tbl) <- c("TERM (n)", "sPTB (n)", "p-val")
rownames(mean.log10.bd.v1.cst.tbl) <- names(table(mt.v1$cst))
for ( cst in rownames(mean.log10.bd.v1.cst.tbl) )
{
print(cst)
r <- mean.log10.bd.v1.cst(cst)
mean.log10.bd.v1.cst.tbl[cst,] <- r
}
mean.log10.bd.v1.cst.tbl
tblTexFile <- "../docs/Tables/mean_log10_bd_AA_V1_CST.tex"
tblLabel <- "mean.log10.bd.v1.cst:tbl"
lt <- latex(mean.log10.bd.v1.cst.tbl, file=tblTexFile, label=tblLabel,
caption.loc='bottom',longtable=FALSE, rowlabel="", caption="",
where='!htp',vbar=FALSE)
##
## plotting mean beta-defensin values in TERM and sPTB over all subjects
##
pdf("../pics/mean_log10_bd_V1.pdf", width=6, height=6)
op <- par(mar=c(3.5, 3.5, 0.5, 0.5), mgp=c(2.25,0.6,0),tcl = -0.3)
plot(c(1-0.5,2+0.75), range(log10(bdt.v1$BD_v1)), type='n', xlab="", ylab="log10 ( BD )", axes=F)
box()
axis(2, las=1)
axis(1, at=1, labels=c("TERM"), las=2)
axis(1, at=2, labels=c("sPTB"), las=2)
a <- 0.1
c <- 1.5
lwd <- 3
y <- log10(bdt.v1$BD_v1)
x <- mt.v1$sPTB
dx <- 0.05
r <- t.test( y[x==0], y[x==1] )
m.TERM <- r$estimate[1]
m.sPTB <- r$estimate[2]
points(jitter(rep(1, length(y[x==0])), amount=a), y[x==0])
points(jitter(rep(2, length(y[x==1])), amount=a), y[x==1])
segments(1-c*a, m.TERM, 1+c*a, m.TERM, col='red', lwd=lwd)
segments(2-c*a, m.sPTB, 2+c*a, m.sPTB, col='red', lwd=lwd)
## significance bracket
segments(1+c*a+dx, m.TERM, 2.5, m.TERM, col=1, lwd=0.5)
segments(2+c*a+dx, m.sPTB, 2.5, m.sPTB, col=1, lwd=0.5)
segments(2.5, m.TERM, 2.5, m.sPTB, col=1, lwd=0.5)
text(2.5+dx, (m.TERM + m.sPTB)/2, labels="*", cex=1.5)
par(op)
r
##t = 3.7439, df = 117.25, p-value = 0.0002823
## mean of x mean of y
## 4.476802 4.100683
dev.off()
|
9171fcd2ed4dcba4818efd8f92e298e9d225b91c
|
420cac816c739b8f6a3581c1628d706f7d398beb
|
/R/rhow.R
|
b6aafd549421846d54cb6e0200416f30d0390433
|
[] |
no_license
|
cran/RobustAFT
|
d80a89efb8ffcc80b604d5959893210aab0ae31b
|
357b7400ae0a4d0be157b6a46970eb04d8b9ea51
|
refs/heads/master
| 2023-08-31T10:42:53.415730
| 2023-08-21T16:40:02
| 2023-08-21T17:30:23
| 17,693,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48
|
r
|
rhow.R
|
"rhow" <-
function(z,const){exp(z)-z-const}
|
2d515380cb56d8078f4a978f1e43f4242afb4cff
|
ec162b6438595e5982fb710cafa197dd5bdfefd2
|
/man/check_residuals_autocorrelation.Rd
|
ab34342af7a88868042bd357e5b08024e3089064
|
[] |
no_license
|
a3digit/auditor
|
d6e69a1ab874c2a9cfc577ca4933a4e9b3d8b736
|
782b88b4738c7b006ac59f93f9d8163ec8fd5444
|
refs/heads/master
| 2022-04-04T14:55:13.352890
| 2020-02-18T09:20:09
| 2020-02-18T09:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 894
|
rd
|
check_residuals_autocorrelation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_residuals.R
\name{check_residuals_autocorrelation}
\alias{check_residuals_autocorrelation}
\title{Checks for autocorrelation in target variable or in residuals}
\usage{
check_residuals_autocorrelation(object, method = "pearson")
}
\arguments{
\item{object}{An object of class 'explainer' created with function \code{\link[DALEX]{explain}} from the DALEX package.}
\item{method}{will be passed to the cor.test functions}
}
\value{
autocorrelation between target variable and between residuals
}
\description{
Checks for autocorrelation in target variable or in residuals
}
\examples{
library(DALEX)
dragons <- DALEX::dragons[1:100, ]
lm_model <- lm(life_length ~ ., data = dragons)
lm_exp <- explain(lm_model, data = dragons, y = dragons$life_length)
library(auditor)
check_residuals_autocorrelation(lm_exp)
}
|
d93ca16581c229c3db3db644f1ef952d2a6c93b5
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.wafregional/R/paws.wafregional_interfaces.R
|
b430983bcbf23a93f534cb2210005cad553d684f
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99,364
|
r
|
paws.wafregional_interfaces.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
NULL
associate_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ResourceArn = structure(logical(0), tags = list(type = "string",
max = 1224L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
associate_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
create_byte_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_byte_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ByteMatchSet = structure(list(ByteMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ByteMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TargetString = structure(logical(0), tags = list(type = "blob")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))), PositionalConstraint = structure(logical(0),
tags = list(type = "string", enum = c("EXACTLY",
"STARTS_WITH", "ENDS_WITH", "CONTAINS", "CONTAINS_WORD")))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_geo_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_geo_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(GeoMatchSet = structure(list(GeoMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), GeoMatchConstraints = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = "Country")),
Value = structure(logical(0), tags = list(type = "string",
enum = c("AF", "AX", "AL", "DZ", "AS", "AD",
"AO", "AI", "AQ", "AG", "AR", "AM", "AW", "AU",
"AT", "AZ", "BS", "BH", "BD", "BB", "BY", "BE",
"BZ", "BJ", "BM", "BT", "BO", "BQ", "BA", "BW",
"BV", "BR", "IO", "BN", "BG", "BF", "BI", "KH",
"CM", "CA", "CV", "KY", "CF", "TD", "CL", "CN",
"CX", "CC", "CO", "KM", "CG", "CD", "CK", "CR",
"CI", "HR", "CU", "CW", "CY", "CZ", "DK", "DJ",
"DM", "DO", "EC", "EG", "SV", "GQ", "ER", "EE",
"ET", "FK", "FO", "FJ", "FI", "FR", "GF", "PF",
"TF", "GA", "GM", "GE", "DE", "GH", "GI", "GR",
"GL", "GD", "GP", "GU", "GT", "GG", "GN", "GW",
"GY", "HT", "HM", "VA", "HN", "HK", "HU", "IS",
"IN", "ID", "IR", "IQ", "IE", "IM", "IL", "IT",
"JM", "JP", "JE", "JO", "KZ", "KE", "KI", "KP",
"KR", "KW", "KG", "LA", "LV", "LB", "LS", "LR",
"LY", "LI", "LT", "LU", "MO", "MK", "MG", "MW",
"MY", "MV", "ML", "MT", "MH", "MQ", "MR", "MU",
"YT", "MX", "FM", "MD", "MC", "MN", "ME", "MS",
"MA", "MZ", "MM", "NA", "NR", "NP", "NL", "NC",
"NZ", "NI", "NE", "NG", "NU", "NF", "MP", "NO",
"OM", "PK", "PW", "PS", "PA", "PG", "PY", "PE",
"PH", "PN", "PL", "PT", "PR", "QA", "RE", "RO",
"RU", "RW", "BL", "SH", "KN", "LC", "MF", "PM",
"VC", "WS", "SM", "ST", "SA", "SN", "RS", "SC",
"SL", "SG", "SX", "SK", "SI", "SB", "SO", "ZA",
"GS", "SS", "ES", "LK", "SD", "SR", "SJ", "SZ",
"SE", "CH", "SY", "TW", "TJ", "TZ", "TH", "TL",
"TG", "TK", "TO", "TT", "TN", "TR", "TM", "TC",
"TV", "UG", "UA", "AE", "GB", "US", "UM", "UY",
"UZ", "VU", "VE", "VN", "VG", "VI", "WF", "EH",
"YE", "ZM", "ZW")))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure")),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_ip_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_ip_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(IPSet = structure(list(IPSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), IPSetDescriptors = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("IPV4", "IPV6"))),
Value = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_rate_based_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), RateKey = structure(logical(0),
tags = list(type = "string", enum = "IP")), RateLimit = structure(logical(0),
tags = list(type = "long", max = 2000000000L, min = 2000L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_rate_based_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), MatchPredicates = structure(list(structure(list(Negated = structure(logical(0),
tags = list(type = "boolean")), Type = structure(logical(0),
tags = list(type = "string", enum = c("IPMatch",
"ByteMatch", "SqlInjectionMatch", "GeoMatch",
"SizeConstraint", "XssMatch", "RegexMatch"))),
DataId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list")), RateKey = structure(logical(0),
tags = list(type = "string", enum = "IP")), RateLimit = structure(logical(0),
tags = list(type = "long", max = 2000000000L, min = 2000L))),
tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_regex_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_regex_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexMatchSet = structure(list(RegexMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), RegexMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))), RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_regex_pattern_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_regex_pattern_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexPatternSet = structure(list(RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), RegexPatternStrings = structure(list(structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "list",
max = 10L))), tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), Predicates = structure(list(structure(list(Negated = structure(logical(0),
tags = list(type = "boolean")), Type = structure(logical(0),
tags = list(type = "string", enum = c("IPMatch",
"ByteMatch", "SqlInjectionMatch", "GeoMatch",
"SizeConstraint", "XssMatch", "RegexMatch"))),
DataId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure")),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_rule_group_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_rule_group_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleGroup = structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_size_constraint_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_size_constraint_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SizeConstraintSet = structure(list(SizeConstraintSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), SizeConstraints = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))), ComparisonOperator = structure(logical(0),
tags = list(type = "string", enum = c("EQ", "NE",
"LE", "LT", "GE", "GT"))), Size = structure(logical(0),
tags = list(type = "long", max = 21474836480,
min = 0L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure")),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_sql_injection_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_sql_injection_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SqlInjectionMatchSet = structure(list(SqlInjectionMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), SqlInjectionMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE")))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), DefaultAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK", "ALLOW",
"COUNT")))), tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACL = structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), DefaultAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK", "ALLOW",
"COUNT")))), tags = list(type = "structure")),
Rules = structure(list(structure(list(Priority = structure(logical(0),
tags = list(type = "integer")), RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Action = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK",
"ALLOW", "COUNT")))), tags = list(type = "structure")),
OverrideAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("NONE",
"COUNT")))), tags = list(type = "structure")),
Type = structure(logical(0), tags = list(type = "string",
enum = c("REGULAR", "RATE_BASED", "GROUP"))),
ExcludedRules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "list")),
WebACLArn = structure(logical(0), tags = list(type = "string",
max = 1224L, min = 1L))), tags = list(type = "structure")),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_xss_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_xss_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(XssMatchSet = structure(list(XssMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), XssMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE")))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure")), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_byte_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ByteMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_byte_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_geo_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(GeoMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_geo_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_ip_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(IPSetId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_ip_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_logging_configuration_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
delete_logging_configuration_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_permission_policy_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
delete_permission_policy_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_rate_based_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_rate_based_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_regex_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_regex_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_regex_pattern_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_regex_pattern_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_rule_group_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_rule_group_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_size_constraint_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SizeConstraintSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_size_constraint_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_sql_injection_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SqlInjectionMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_sql_injection_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_xss_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(XssMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_xss_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
disassociate_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
disassociate_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
get_byte_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ByteMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_byte_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ByteMatchSet = structure(list(ByteMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ByteMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TargetString = structure(logical(0), tags = list(type = "blob")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))), PositionalConstraint = structure(logical(0),
tags = list(type = "string", enum = c("EXACTLY",
"STARTS_WITH", "ENDS_WITH", "CONTAINS", "CONTAINS_WORD")))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_change_token_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
get_change_token_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_change_token_status_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_change_token_status_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeTokenStatus = structure(logical(0),
tags = list(type = "string", enum = c("PROVISIONED",
"PENDING", "INSYNC")))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_geo_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(GeoMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_geo_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(GeoMatchSet = structure(list(GeoMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), GeoMatchConstraints = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = "Country")),
Value = structure(logical(0), tags = list(type = "string",
enum = c("AF", "AX", "AL", "DZ", "AS", "AD",
"AO", "AI", "AQ", "AG", "AR", "AM", "AW", "AU",
"AT", "AZ", "BS", "BH", "BD", "BB", "BY", "BE",
"BZ", "BJ", "BM", "BT", "BO", "BQ", "BA", "BW",
"BV", "BR", "IO", "BN", "BG", "BF", "BI", "KH",
"CM", "CA", "CV", "KY", "CF", "TD", "CL", "CN",
"CX", "CC", "CO", "KM", "CG", "CD", "CK", "CR",
"CI", "HR", "CU", "CW", "CY", "CZ", "DK", "DJ",
"DM", "DO", "EC", "EG", "SV", "GQ", "ER", "EE",
"ET", "FK", "FO", "FJ", "FI", "FR", "GF", "PF",
"TF", "GA", "GM", "GE", "DE", "GH", "GI", "GR",
"GL", "GD", "GP", "GU", "GT", "GG", "GN", "GW",
"GY", "HT", "HM", "VA", "HN", "HK", "HU", "IS",
"IN", "ID", "IR", "IQ", "IE", "IM", "IL", "IT",
"JM", "JP", "JE", "JO", "KZ", "KE", "KI", "KP",
"KR", "KW", "KG", "LA", "LV", "LB", "LS", "LR",
"LY", "LI", "LT", "LU", "MO", "MK", "MG", "MW",
"MY", "MV", "ML", "MT", "MH", "MQ", "MR", "MU",
"YT", "MX", "FM", "MD", "MC", "MN", "ME", "MS",
"MA", "MZ", "MM", "NA", "NR", "NP", "NL", "NC",
"NZ", "NI", "NE", "NG", "NU", "NF", "MP", "NO",
"OM", "PK", "PW", "PS", "PA", "PG", "PY", "PE",
"PH", "PN", "PL", "PT", "PR", "QA", "RE", "RO",
"RU", "RW", "BL", "SH", "KN", "LC", "MF", "PM",
"VC", "WS", "SM", "ST", "SA", "SN", "RS", "SC",
"SL", "SG", "SX", "SK", "SI", "SB", "SO", "ZA",
"GS", "SS", "ES", "LK", "SD", "SR", "SJ", "SZ",
"SE", "CH", "SY", "TW", "TJ", "TZ", "TH", "TL",
"TG", "TK", "TO", "TT", "TN", "TR", "TM", "TC",
"TV", "UG", "UA", "AE", "GB", "US", "UM", "UY",
"UZ", "VU", "VE", "VN", "VG", "VI", "WF", "EH",
"YE", "ZM", "ZW")))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_ip_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(IPSetId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_ip_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(IPSet = structure(list(IPSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), IPSetDescriptors = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("IPV4", "IPV6"))),
Value = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_logging_configuration_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_logging_configuration_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(LoggingConfiguration = structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L)),
LogDestinationConfigs = structure(list(structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "list", max = 1L, min = 1L)),
RedactedFields = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_permission_policy_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_permission_policy_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_rate_based_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_rate_based_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), MatchPredicates = structure(list(structure(list(Negated = structure(logical(0),
tags = list(type = "boolean")), Type = structure(logical(0),
tags = list(type = "string", enum = c("IPMatch",
"ByteMatch", "SqlInjectionMatch", "GeoMatch",
"SizeConstraint", "XssMatch", "RegexMatch"))),
DataId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list")), RateKey = structure(logical(0),
tags = list(type = "string", enum = "IP")), RateLimit = structure(logical(0),
tags = list(type = "long", max = 2000000000L, min = 2000L))),
tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_rate_based_rule_managed_keys_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_rate_based_rule_managed_keys_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ManagedKeys = structure(list(structure(logical(0),
tags = list(type = "string"))), tags = list(type = "list")),
NextMarker = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_regex_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_regex_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexMatchSet = structure(list(RegexMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), RegexMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))), RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_regex_pattern_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_regex_pattern_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexPatternSet = structure(list(RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), RegexPatternStrings = structure(list(structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "list",
max = 10L))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Rule = structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), Predicates = structure(list(structure(list(Negated = structure(logical(0),
tags = list(type = "boolean")), Type = structure(logical(0),
tags = list(type = "string", enum = c("IPMatch",
"ByteMatch", "SqlInjectionMatch", "GeoMatch",
"SizeConstraint", "XssMatch", "RegexMatch"))),
DataId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_rule_group_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_rule_group_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleGroup = structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_sampled_requests_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebAclId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), TimeWindow = structure(list(StartTime = structure(logical(0),
tags = list(type = "timestamp")), EndTime = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure")),
MaxItems = structure(logical(0), tags = list(type = "long",
max = 500L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_sampled_requests_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SampledRequests = structure(list(structure(list(Request = structure(list(ClientIP = structure(logical(0),
tags = list(type = "string")), Country = structure(logical(0),
tags = list(type = "string")), URI = structure(logical(0),
tags = list(type = "string")), Method = structure(logical(0),
tags = list(type = "string")), HTTPVersion = structure(logical(0),
tags = list(type = "string")), Headers = structure(list(structure(list(Name = structure(logical(0),
tags = list(type = "string")), Value = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure")),
Weight = structure(logical(0), tags = list(type = "long",
min = 0L)), Timestamp = structure(logical(0), tags = list(type = "timestamp")),
Action = structure(logical(0), tags = list(type = "string")),
RuleWithinRuleGroup = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list")), PopulationSize = structure(logical(0),
tags = list(type = "long")), TimeWindow = structure(list(StartTime = structure(logical(0),
tags = list(type = "timestamp")), EndTime = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_size_constraint_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SizeConstraintSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_size_constraint_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SizeConstraintSet = structure(list(SizeConstraintSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), SizeConstraints = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))), ComparisonOperator = structure(logical(0),
tags = list(type = "string", enum = c("EQ", "NE",
"LE", "LT", "GE", "GT"))), Size = structure(logical(0),
tags = list(type = "long", max = 21474836480,
min = 0L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_sql_injection_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SqlInjectionMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_sql_injection_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SqlInjectionMatchSet = structure(list(SqlInjectionMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), SqlInjectionMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE")))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACL = structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string")), DefaultAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK", "ALLOW",
"COUNT")))), tags = list(type = "structure")),
Rules = structure(list(structure(list(Priority = structure(logical(0),
tags = list(type = "integer")), RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Action = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK",
"ALLOW", "COUNT")))), tags = list(type = "structure")),
OverrideAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("NONE",
"COUNT")))), tags = list(type = "structure")),
Type = structure(logical(0), tags = list(type = "string",
enum = c("REGULAR", "RATE_BASED", "GROUP"))),
ExcludedRules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "list")),
WebACLArn = structure(logical(0), tags = list(type = "string",
max = 1224L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_web_acl_for_resource_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_web_acl_for_resource_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACLSummary = structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_xss_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(XssMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_xss_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(XssMatchSet = structure(list(XssMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), XssMatchTuples = structure(list(structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE")))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_activated_rules_in_rule_group_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
NextMarker = structure(logical(0), tags = list(type = "string",
min = 1L)), Limit = structure(logical(0), tags = list(type = "integer",
max = 100L, min = 0L))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_activated_rules_in_rule_group_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), ActivatedRules = structure(list(structure(list(Priority = structure(logical(0),
tags = list(type = "integer")), RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Action = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK", "ALLOW",
"COUNT")))), tags = list(type = "structure")),
OverrideAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("NONE", "COUNT")))),
tags = list(type = "structure")), Type = structure(logical(0),
tags = list(type = "string", enum = c("REGULAR",
"RATE_BASED", "GROUP"))), ExcludedRules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_byte_match_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_byte_match_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), ByteMatchSets = structure(list(structure(list(ByteMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_geo_match_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_geo_match_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), GeoMatchSets = structure(list(structure(list(GeoMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_ip_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_ip_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), IPSets = structure(list(structure(list(IPSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_logging_configurations_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_logging_configurations_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(LoggingConfigurations = structure(list(structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L)),
LogDestinationConfigs = structure(list(structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "list", max = 1L, min = 1L)),
RedactedFields = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "list")), NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_rate_based_rules_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_rate_based_rules_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Rules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_regex_match_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_regex_match_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), RegexMatchSets = structure(list(structure(list(RegexMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_regex_pattern_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_regex_pattern_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), RegexPatternSets = structure(list(structure(list(RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_resources_for_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ResourceType = structure(logical(0), tags = list(type = "string",
enum = c("APPLICATION_LOAD_BALANCER", "API_GATEWAY")))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_resources_for_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_rule_groups_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_rule_groups_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), RuleGroups = structure(list(structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_rules_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_rules_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Rules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_size_constraint_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_size_constraint_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), SizeConstraintSets = structure(list(structure(list(SizeConstraintSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_sql_injection_match_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_sql_injection_match_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), SqlInjectionMatchSets = structure(list(structure(list(SqlInjectionMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_subscribed_rule_groups_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_subscribed_rule_groups_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), RuleGroups = structure(list(structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), MetricName = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_web_ac_ls_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_web_ac_ls_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), WebACLs = structure(list(structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_xss_match_sets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), Limit = structure(logical(0),
tags = list(type = "integer", max = 100L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_xss_match_sets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextMarker = structure(logical(0),
tags = list(type = "string", min = 1L)), XssMatchSets = structure(list(structure(list(XssMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Name = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
put_logging_configuration_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(LoggingConfiguration = structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L)),
LogDestinationConfigs = structure(list(structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "list", max = 1L, min = 1L)),
RedactedFields = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
put_logging_configuration_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(LoggingConfiguration = structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L)),
LogDestinationConfigs = structure(list(structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L))),
tags = list(type = "list", max = 1L, min = 1L)),
RedactedFields = structure(list(structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI", "QUERY_STRING",
"HEADER", "METHOD", "BODY", "SINGLE_QUERY_ARG",
"ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
put_permission_policy_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0),
tags = list(type = "string", max = 1224L, min = 1L)),
Policy = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
put_permission_policy_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
update_byte_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ByteMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
ByteMatchTuple = structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI",
"QUERY_STRING", "HEADER", "METHOD", "BODY",
"SINGLE_QUERY_ARG", "ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TargetString = structure(logical(0), tags = list(type = "blob")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))),
PositionalConstraint = structure(logical(0),
tags = list(type = "string", enum = c("EXACTLY",
"STARTS_WITH", "ENDS_WITH", "CONTAINS", "CONTAINS_WORD")))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_byte_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_geo_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(GeoMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
GeoMatchConstraint = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = "Country")),
Value = structure(logical(0), tags = list(type = "string",
enum = c("AF", "AX", "AL", "DZ", "AS", "AD",
"AO", "AI", "AQ", "AG", "AR", "AM", "AW",
"AU", "AT", "AZ", "BS", "BH", "BD", "BB",
"BY", "BE", "BZ", "BJ", "BM", "BT", "BO",
"BQ", "BA", "BW", "BV", "BR", "IO", "BN",
"BG", "BF", "BI", "KH", "CM", "CA", "CV",
"KY", "CF", "TD", "CL", "CN", "CX", "CC",
"CO", "KM", "CG", "CD", "CK", "CR", "CI",
"HR", "CU", "CW", "CY", "CZ", "DK", "DJ",
"DM", "DO", "EC", "EG", "SV", "GQ", "ER",
"EE", "ET", "FK", "FO", "FJ", "FI", "FR",
"GF", "PF", "TF", "GA", "GM", "GE", "DE",
"GH", "GI", "GR", "GL", "GD", "GP", "GU",
"GT", "GG", "GN", "GW", "GY", "HT", "HM",
"VA", "HN", "HK", "HU", "IS", "IN", "ID",
"IR", "IQ", "IE", "IM", "IL", "IT", "JM",
"JP", "JE", "JO", "KZ", "KE", "KI", "KP",
"KR", "KW", "KG", "LA", "LV", "LB", "LS",
"LR", "LY", "LI", "LT", "LU", "MO", "MK",
"MG", "MW", "MY", "MV", "ML", "MT", "MH",
"MQ", "MR", "MU", "YT", "MX", "FM", "MD",
"MC", "MN", "ME", "MS", "MA", "MZ", "MM",
"NA", "NR", "NP", "NL", "NC", "NZ", "NI",
"NE", "NG", "NU", "NF", "MP", "NO", "OM",
"PK", "PW", "PS", "PA", "PG", "PY", "PE",
"PH", "PN", "PL", "PT", "PR", "QA", "RE",
"RO", "RU", "RW", "BL", "SH", "KN", "LC",
"MF", "PM", "VC", "WS", "SM", "ST", "SA",
"SN", "RS", "SC", "SL", "SG", "SX", "SK",
"SI", "SB", "SO", "ZA", "GS", "SS", "ES",
"LK", "SD", "SR", "SJ", "SZ", "SE", "CH",
"SY", "TW", "TJ", "TZ", "TH", "TL", "TG",
"TK", "TO", "TT", "TN", "TR", "TM", "TC",
"TV", "UG", "UA", "AE", "GB", "US", "UM",
"UY", "UZ", "VU", "VE", "VN", "VG", "VI",
"WF", "EH", "YE", "ZM", "ZW")))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "list",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_geo_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_ip_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(IPSetId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
IPSetDescriptor = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("IPV4", "IPV6"))),
Value = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_ip_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_rate_based_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
Predicate = structure(list(Negated = structure(logical(0),
tags = list(type = "boolean")), Type = structure(logical(0),
tags = list(type = "string", enum = c("IPMatch",
"ByteMatch", "SqlInjectionMatch", "GeoMatch",
"SizeConstraint", "XssMatch", "RegexMatch"))),
DataId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "list")),
RateLimit = structure(logical(0), tags = list(type = "long",
max = 2000000000L, min = 2000L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_rate_based_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_regex_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
RegexMatchTuple = structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI",
"QUERY_STRING", "HEADER", "METHOD", "BODY",
"SINGLE_QUERY_ARG", "ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))),
RegexPatternSetId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "list",
min = 1L)), ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_regex_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_regex_pattern_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RegexPatternSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
RegexPatternString = structure(logical(0), tags = list(type = "string",
min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_regex_pattern_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_rule_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
Predicate = structure(list(Negated = structure(logical(0),
tags = list(type = "boolean")), Type = structure(logical(0),
tags = list(type = "string", enum = c("IPMatch",
"ByteMatch", "SqlInjectionMatch", "GeoMatch",
"SizeConstraint", "XssMatch", "RegexMatch"))),
DataId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
update_rule_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_rule_group_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(RuleGroupId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
ActivatedRule = structure(list(Priority = structure(logical(0),
tags = list(type = "integer")), RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Action = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK",
"ALLOW", "COUNT")))), tags = list(type = "structure")),
OverrideAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("NONE",
"COUNT")))), tags = list(type = "structure")),
Type = structure(logical(0), tags = list(type = "string",
enum = c("REGULAR", "RATE_BASED", "GROUP"))),
ExcludedRules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L)), ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_rule_group_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_size_constraint_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SizeConstraintSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
SizeConstraint = structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI",
"QUERY_STRING", "HEADER", "METHOD", "BODY",
"SINGLE_QUERY_ARG", "ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE"))),
ComparisonOperator = structure(logical(0), tags = list(type = "string",
enum = c("EQ", "NE", "LE", "LT", "GE", "GT"))),
Size = structure(logical(0), tags = list(type = "long",
max = 21474836480, min = 0L))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "list",
min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_size_constraint_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_sql_injection_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(SqlInjectionMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
SqlInjectionMatchTuple = structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI",
"QUERY_STRING", "HEADER", "METHOD", "BODY",
"SINGLE_QUERY_ARG", "ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE")))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_sql_injection_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_web_acl_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(WebACLId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
ActivatedRule = structure(list(Priority = structure(logical(0),
tags = list(type = "integer")), RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
Action = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK",
"ALLOW", "COUNT")))), tags = list(type = "structure")),
OverrideAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("NONE",
"COUNT")))), tags = list(type = "structure")),
Type = structure(logical(0), tags = list(type = "string",
enum = c("REGULAR", "RATE_BASED", "GROUP"))),
ExcludedRules = structure(list(structure(list(RuleId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "list")), DefaultAction = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("BLOCK", "ALLOW",
"COUNT")))), tags = list(type = "structure"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
update_web_acl_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_xss_match_set_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(XssMatchSetId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ChangeToken = structure(logical(0), tags = list(type = "string",
min = 1L)), Updates = structure(list(structure(list(Action = structure(logical(0),
tags = list(type = "string", enum = c("INSERT", "DELETE"))),
XssMatchTuple = structure(list(FieldToMatch = structure(list(Type = structure(logical(0),
tags = list(type = "string", enum = c("URI",
"QUERY_STRING", "HEADER", "METHOD", "BODY",
"SINGLE_QUERY_ARG", "ALL_QUERY_ARGS"))), Data = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TextTransformation = structure(logical(0), tags = list(type = "string",
enum = c("NONE", "COMPRESS_WHITE_SPACE", "HTML_ENTITY_DECODE",
"LOWERCASE", "CMD_LINE", "URL_DECODE")))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
update_xss_match_set_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(ChangeToken = structure(logical(0),
tags = list(type = "string", min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
|
29259ca7a0f3810021b7dcfe56a92c13665739bd
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RnavGraph/demo/ng_2d_iris.R
|
78fa6a64f1fe2ae8a06e475413f5063a14ee2b64
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,304
|
r
|
ng_2d_iris.R
|
require(RnavGraph) || stop("RnavGraph library not available")
local({
## Import the data
ng.iris <- ng_data(name = "iris", data = iris[,1:4],
shortnames = c('s.L', 's.W', 'p.L', 'p.W'),
group = iris$Species,
labels = substr(iris$Species,1,2))
## get the variable graph node names
V <- shortnames(ng.iris)
## create the linegraph and its complement
G <- completegraph(V)
LG <- linegraph(G)
LGnot <- complement(LG)
## they are all from the graph
class(G)
## geberate NG_graph objects
ng.lg <- ng_graph(name = '3D Transition', graph = LG, layout = 'circle')
ng.lgnot <- ng_graph(name = '4D Transition', graph = LGnot, layout = 'circle')
## visualization instructions for 2d scatterplots
viz3dTransition <- ng_2d(ng.iris,ng.lg, glyphs=c("s.L","s.W","p.L","p.W"))
viz4dTransition <- ng_2d(ng.iris,ng.lgnot, glyphs=c("s.L","s.W","p.L","p.W"))
## pack them into list
viz <- list(viz3dTransition, viz4dTransition)
graphs <- list(ng.lg, ng.lgnot)
## start navGraph
nav <- navGraph(data = ng.iris, graph = graphs, viz = viz, settings=list(tk2d=list(linked=FALSE)))
})
cat(paste("\n\nThe source code of this demo file is located at:\n",system.file("demo", "ng_2d_iris.R", package="RnavGraph"),"\n\n\n"))
|
45104f5785053e9bf2e9be0c743123ee01f92f3c
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/onlineforecast/R/score.R
|
e394c3d99056cdd1cc84814311181a57a21bd796
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,729
|
r
|
score.R
|
# Do this in a separate file to see the generated help:
#library(devtools)
#document()
#load_all(as.package("../../onlineforecast"))
#?score
#' Calculates the score for each horizon for a matrix with residuals for each horizon.
#'
#' Applies the \code{scorefun} on all horizons (each column) of the residuals matrix. See the description of each parameter for more details.
#'
#' @title Calculate the score for each horizon.
#' @param Residuals A matrix with residuals (columns named \code{hxx}) for which to calculate the score for each horizon.
#' @param scoreperiod as a logical vector controlling which points to be included in the score calculation. If NA then all values are included.
#' @param usecomplete if TRUE then only the values available for all horizons are included (i.e. if at one time point there is a missing value, then values for this time point is removed for all horizons in the calculation).
#' @param scorefun The score function.
#' @return A list with the a numeric vector with the score value for each horizon and the applied \code{scoreperiod} (note can be different from the given scoreperiod, if only complete observations are used (as per default)).
#' @examples
#'
#' # Just a vector to be forecasted
#' y <- c(filter(rnorm(100), 0.95, "recursive"))
#' # Generate a forecast matrix with a simple persistence model
#' Yhat <- persistence(y, kseq=1:4)
#' # The residuals for each horizon
#' Resid <- residuals(Yhat, y)
#'
#' # Calculate the score for the k1 horizon
#' score(Resid)$scoreval
#'
#' # The first values were excluded, since there are NAs
#' head(Resid)
#' score(Resid)$scoreperiod
#'
#' @importFrom stats complete.cases
#' @export
score <- function(Residuals, scoreperiod = NA, usecomplete = TRUE, scorefun = rmse){
# If no scoreperiod is given, then use all
if(is.na(scoreperiod[1])){
scoreperiod <- rep(TRUE,nrow(Residuals))
}else{
# Do checking of scoreperiod
txt <- "It must be set to an index (int or logical) defining which points to be evaluated in the scorefun()."
if( length(scoreperiod) != nrow(Residuals) ){
stop("scoreperiod is not same length as nrow(Residuals): ",txt)
}else{
if( all(is.na(scoreperiod)) ){ stop("scoreperiod is all NA: ",txt) }
}
}
# Take only the rows which have a value for each horizon?
if(usecomplete){
scoreperiod <- scoreperiod & complete.cases(Residuals)
}
# Calculate the objective function for each horizon
scoreval <- sapply(1:ncol(Residuals), function(i){
scorefun(Residuals[scoreperiod,i])
})
nams(scoreval) <- gsub("h","k",nams(Residuals))
#
return(list(scoreval=scoreval,scoreperiod=scoreperiod))
}
|
917e196f8c17defe69ce6e6c0905d6259cbca0cf
|
db7ec77d86cc64a1f7288a409d0c86fc80db7b10
|
/call_script.R
|
1ba336c063ddbaf02660e97579a35b0be9c89502
|
[] |
no_license
|
dusty-turner/fantasyfootball2020
|
5e62c9c21c7937662ddba4ac1148198e78a382cc
|
f1fc71b19658db941da4ee098d3f6a234aff66c0
|
refs/heads/master
| 2023-01-23T15:33:08.981305
| 2020-11-17T23:12:31
| 2020-11-17T23:12:31
| 285,445,781
| 1
| 0
| null | 2020-09-20T02:56:33
| 2020-08-06T01:44:17
|
HTML
|
UTF-8
|
R
| false
| false
| 1,615
|
r
|
call_script.R
|
library(tidyverse)
# leagueID <- list(89417258)
# names <- list("OA")
leagueID <- list(847888,35354777,89417258,206814)
names <- list("jim","headshed","OA","Twitter_Guy")
per_id <- 9
run_reports <- function(leagueID, per_id = per_id, names) {
# leagueID=847888
# per_id=9
# unlink("ff2020_reports",recursive = T,force = T)
if(!dir.exists("03_ff2020_reports")){
dir.create("03_ff2020_reports")
}
rmarkdown::render("ff2020.Rmd",params=list(
per_id=per_id,
leagueID=leagueID))
file.rename(from="ff2020.html", to = paste0("ffdashboard_",names,"_",per_id,".html"))
file.copy(from=str_c("ffdashboard_",names,"_",per_id,".html"),
to=str_c("03_ff2020_reports/ffdashboard_",names,"_",per_id,".html" ),overwrite = T)
file.remove(paste0(getwd(),"/","ffdashboard_",names,"_",per_id,".html"))
unlink(x = "ff2020_cache*",recursive = T, force = T)
if(Sys.info()[[6]]=="turne" & names %in% c("headshed","OA","jim")){
file.copy(from=str_c("03_ff2020_reports/ffdashboard_",names,"_",per_id,".html"),
to=str_c("../blog/static/ff2020/ffdashboard_",names,"_",per_id,".html"),
overwrite = TRUE
)
setwd("../blog")
blogdown::serve_site()
blogdown::stop_server()
setwd("../fantasyfootball2020/")
shell("C:/Users/turne/Desktop/Dustys_Files/R_Work/fantasyfootball2020/09_personal/shell1.sh")
# shell(str_c(getwd(),"/09_personal/shell1.sh"))
}
}
# map2(leagueIDs,rep(per_id_number,length(leagueIDs)),run_reports)
leagueID %>%
purrr::walk2(names,.f = ~run_reports(leagueID = .x,names = .y,per_id = per_id))
|
dad2aeb211d049df21d9af1d846a44db80546b3f
|
9693dfebe7b0eec8b7b6d3ea65369b7da975c41f
|
/manuscript/scripts/drake-figs/COAD_ra_path.R
|
27dc38ce7eef016aca7f14d1497e9b870449091a
|
[
"MIT"
] |
permissive
|
spakowiczlab/atbac
|
06d585309cfa880e448b0cc991feb7d466ffb53b
|
6cb5774a796e06aa17be4e88f152a427bb6fd020
|
refs/heads/master
| 2023-05-02T22:24:13.627164
| 2023-04-19T18:21:49
| 2023-04-19T18:21:49
| 271,302,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
COAD_ra_path.R
|
### Find the JSON path information in the appropriate directory.
jinfo <- "COAD_ra.json"
if (!file.exists(jinfo)) stop("Cannot locate file: '", jinfo, "'.\n", sep='')
### parse it
library(rjson)
temp <- fromJSON(file = jinfo)
paths <- temp$paths
detach("package:rjson")
### clean up
rm(jinfo, temp)
|
085874dd8b628c4f1b34c4b436b1ff383a0fe7cc
|
f0352034f8467e2c82a31443ae6e3125039879ac
|
/man/plotReducedDims.Rd
|
8784abbed8a65d918e10b7f9dd8f0dbd3f923fcb
|
[] |
no_license
|
epurdom/clusterExperiment
|
8d5d43a250a1a3c28d4745aae4b72285458ba1a2
|
ae86ee09697c13ccd5d32f964e28ab7d82b455d6
|
refs/heads/master
| 2022-11-04T01:54:19.806886
| 2022-10-11T22:00:27
| 2022-10-11T22:00:27
| 47,139,877
| 39
| 15
| null | 2021-01-27T21:26:28
| 2015-11-30T19:06:53
|
R
|
UTF-8
|
R
| false
| true
| 3,748
|
rd
|
plotReducedDims.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotReduceDim.R
\name{plotReducedDims}
\alias{plotReducedDims}
\alias{plotReducedDims,ClusterExperiment-method}
\title{Plot 2-dimensionsal representation with clusters}
\usage{
\S4method{plotReducedDims}{ClusterExperiment}(
object,
whichCluster = "primary",
reducedDim = "PCA",
whichDims = c(1, 2),
plotUnassigned = TRUE,
legend = TRUE,
legendTitle = "",
nColLegend = 6,
clusterLegend = NULL,
unassignedColor = NULL,
missingColor = NULL,
pch = 19,
xlab = NULL,
ylab = NULL,
...
)
}
\arguments{
\item{object}{a ClusterExperiment object}
\item{whichCluster}{argument that can be a single numeric or character value
indicating the \emph{single} clustering to be used. Giving values that result in more than one clustering will result in an error. See details of \code{\link{getClusterIndex}}.}
\item{reducedDim}{What dimensionality reduction method to use. Should match
either a value in \code{reducedDimNames(object)} or one of the built-in
functions of \code{\link{listBuiltInReducedDims}()}}
\item{whichDims}{vector of length 2 giving the indices of which dimensions to
show. The first value goes on the x-axis and the second on the y-axis.}
\item{plotUnassigned}{logical as to whether unassigned (either -1 or -2
cluster values) should be plotted.}
\item{legend}{either logical, indicating whether to plot legend, or character
giving the location of the legend (passed to \code{\link{legend}})}
\item{legendTitle}{character value giving title for the legend. If NULL, uses
the clusterLabels value for clustering.}
\item{nColLegend}{The number of columns in legend. If missing, picks number
of columns internally.}
\item{clusterLegend}{matrix with three columns and colnames
'clusterIds','name', and 'color' that give the color and name of the
clusters in whichCluster. If NULL, pulls the information from
\code{object}.}
\item{unassignedColor}{If not NULL, should be character value giving the
color for unassigned (-1) samples (overrides \code{clusterLegend}) default.}
\item{missingColor}{If not NULL, should be character value giving the color
for missing (-2) samples (overrides \code{clusterLegend}) default.}
\item{pch}{the point type, passed to \code{plot.default}}
\item{xlab}{Label for x axis}
\item{ylab}{Label for y axis}
\item{...}{arguments passed to \code{\link{plot.default}}}
}
\value{
A plot is created. Nothing is returned.
}
\description{
Plot a 2-dimensional representation of the data, color-code by a
clustering.
}
\details{
If \code{plotUnassigned=TRUE}, and the color for -1 or -2 is set to
"white", will be coerced to "lightgrey" regardless of user input to
\code{missingColor} and \code{unassignedColor}. If \code{plotUnassigned=FALSE},
the samples with -1/-2 will not be plotted, nor will the category show up in the
legend.
If the requested \code{reducedDim} method has not been created yet,
the function will call \code{\link{makeReducedDims}} on the FIRST assay of
\code{x}. The results of this method will be saved as part of the object
and returned INVISIBLY (meaning if you don't save the output of the
plotting command, the results will vanish). To pick another assay, you
should call `makeReducedDims` directly and specify the assay.
}
\examples{
#clustering using pam: try using different dimensions of pca and different k
data(simData)
cl <- clusterMany(simData, nReducedDims=c(5, 10, 50), reducedDim="PCA",
clusterFunction="pam", ks=2:4, findBestK=c(TRUE,FALSE),
removeSil=c(TRUE,FALSE), makeMissingDiss=TRUE)
plotReducedDims(cl,legend="bottomright")
}
\seealso{
\code{\link{plot.default}}, \code{\link{makeReducedDims}}, \code{\link{listBuiltInReducedDims}()}
}
|
3729e71e19eb583968ae3e1c361b1fe04404445c
|
3aaf257b3eb138c404f74ba06a2a9861a198f419
|
/man/lr_neg.Rd
|
84b4a9938c11cef5b629552de5e88f3f2d195834
|
[] |
no_license
|
cran/ClinSigMeasures
|
f86d53d1fc6b69588f9454967ce682a753fe3f7b
|
99da607cbf920c25671a57fe2b9ef88d16ea9b35
|
refs/heads/master
| 2023-03-10T20:27:13.800615
| 2021-02-25T08:10:05
| 2021-02-25T08:10:05
| 342,293,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
rd
|
lr_neg.Rd
|
\name{lr_neg}
\alias{lr_neg}
\title{Likelihood Ratio Negative Calculation From a 2x2 Table}
\description{Calculates diagnostic test likelihood ratio negative and 95 percent confidence intervals for data from a 2x2 table}
\usage{lr_neg(Cell1, Cell2, Cell3, Cell4)}
\arguments{
\item{Cell1}{Value for cases with a positive test}
\item{Cell2}{Value for controls with a positive test}
\item{Cell3}{Value for cases with a negative test}
\item{Cell4}{Value for controls with a negative test}}
\value{Likelihood Ratio Negative and 95 percent confidence intervals}
\author{Mike Malek-Ahmadi}
\references{
1. Grimes DA, Schultz KF. Refining clinical diagnosis with likelihood ratios. Lancet 2005;365:1500-1505.
2. Dujardin B, Van den Ende J, Van Gompel A, Unger JP, Van der Stuyft P. Likelihood ratios: a real improvement for clinical decision making? European Journal of Epidemiology 1994 Feb;10(1):29-36.
}
\examples{
#From Table 1 in Dujardin et al (1994)
lr_neg(72, 9, 25, 137)
}
|
6d665ae261a34bd8b3a4051c892e67d8c5a772db
|
68051033da7272304dc75ca688ad3f8ef3559a66
|
/sims/code/plotfxn.R
|
d960107d92277608ff6e8a570d09ad7a1e40d01f
|
[] |
no_license
|
andywdahl/rgwas-scripts
|
e37ddf3ad4ac5fb4b25c9b054bdc5967bd3daefe
|
11e7201ee2af67d04d6114904db472fceddd7ab3
|
refs/heads/master
| 2022-05-29T12:48:47.033135
| 2022-04-04T02:37:05
| 2022-04-04T02:37:05
| 163,999,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,993
|
r
|
plotfxn.R
|
plotfxn <- function(x,ys,xlabs,xname=expression( sigma[pop]^2 )){
layout( cbind( rbind(
c( 1, 3, 4, 5, 2, 6 ),
c( 7, 14+c(1:3,5,4) ),
8+c( 1, 3, 4, 5, 2, 6 )
),8), widths=c( 1.7, rep( 6, 3 ), 2.4, 6, 4.8 ), heights=c( 1.1, 5.3, 1.6 ) )
##### top
par( mar=rep(0,4) )
plot.new()
plot.new()
for( zzz in 1:4 ){
plot.new()
text( .5, .5, cex=4.7, lab=c( 'Null SNPs', 'Hom SNPs (K=1)', 'Hom SNPs', 'Het SNPs' )[zzz] )
}
plot.new() ##### left
##### right
par( mar=c( 10, 0, 10, 0 ) )
plot.new()
legend( 'center', fill=cols, leg=leg, cex=4, border=F, bty='n' )#, horiz=T )
##### bottom
par( mar=rep(0,4) )
plot.new()
plot.new()
par( mar=c( 1.0, 2, 1.0, 2.0 )/2 )
for( zzz in 1:4 ){
plot(range(x)+c(-.1,.1),0:1,type='n',axes=F,ylab='',xlab='')
text( mean(range(x)), .25, cex=3.5, lab=xname )
axis( 1, cex.axis=2.4, at=x, line=-10.1, lab=F )
axis( 1, cex.axis=2.4, at=x, line=- 9.7, lab=xlabs, padj=.4, tick=F )
}
##### meat
par( mar=c( 1.0, 1, 1.0, 1.0 )/2 )
for( zzz in 1:4 )
{
allys <- ys[zzz,meths,,]
if( zzz != 4 ){
ylim <- c( -2.2, 0 )
ats <- c(.01 ,.05 ,.2 ,1)
ats <- log10(ats)
labs<- c('.01','.05','.2' ,'1' )
allys <- log10(allys)
} else {
ylim <- c( 0, 1 )
ats <- c(0,.25,.5 ,.75,1)
labs <- ats
}
plot( range(x)+c(-.1,.1), ylim, type='n', axes=F, ylab='', main='', xlab='' )
box()
legend( 'topleft', bty='n', cex=4, leg=letters[zzz], adj=c(2.5,-.3) )
abline( a=-2 , b=0, col='lightgrey', lty=3, lwd=8 )
if( zzz %in% c(1,4) ){
axis( 2, cex.axis=2.7, padj=-.3 , at=ats , lab=labs )
mtext( side=2, line=6.2, cex=2.3 , ifelse( zzz == 1, 'False Positive Rate', 'True Positive Rate' ) )
}
for( meth in rev(meths) )
for( sig2.i in 1:2 )
try({
lines( x, allys[meth,,sig2.i], col=cols[meth], lty=c(1,2)[sig2.i], lwd=ifelse( meth %in% c( 'oracle++', 'mvgmm', 'mvgmm+', 'mvgmmq+' ), 7, 3 ) )
points( x, allys[meth,,sig2.i], col=cols[meth], pch=18-sig2.i, cex=6 )
})
}
plot.new()
}
|
42fc706058b372252951986149ed690d9cde3c85
|
77c2026e3a751eee972f0abf470d2ffed974535f
|
/R/print.SingleEnvAnalysis.R
|
d35769465f4f3dc9f62df6717e76f7d7d71e24a0
|
[] |
no_license
|
shingocat/PBTools
|
db05b587f600ab6f399cd1b9adb7d1c1960ce65a
|
880c110bc89ec24d7902a89c3faa56601da7f8ac
|
refs/heads/master
| 2020-06-01T03:11:36.934855
| 2015-06-08T06:53:17
| 2015-06-08T06:53:17
| 25,959,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,490
|
r
|
print.SingleEnvAnalysis.R
|
print.SingleEnvAnalysis <- function
(
data,
level = 1
)
{
if(!inherits(data, "SingleEnvAnalysis"))
stop("\tError: The argument of data must be of class SingleSiteAnalysis!\n");
if(missing(level))
level <- 1;
if(!is.numeric(level))
stop("\tError: The argument of level should be of value 1 or 2 where 1 is for concise details, and 2 is for precise details.\n");
if(length(level) != 1)
stop("\tError: The argument of level should be of length 1.\n");
if(!any(level %in% c(1,2)))
stop("\tError: The argument of level only accepted the integer 1 or 2. \n");
#cat(rep("-", times = 50), sep = "");
if(level == 1){
cat("Single Environment Analysis:");
cat("\n");
for(i in 1:data$trait.number)
{
trait.name <- data$traits[[i]]$name;
cat("The phenotypic trait is ", trait.name, ".\n", sep = "");
for(j in 1:length(data$traits[[i]]$analysis$sea$envs))
{
env.name <- data$traits[[i]]$analysis$sea$envs[[j]]$name;
cat("*******************Start of ", env.name, " environment********************\n", sep = "");
#cat("\t\tOn the environment:", env.name, ".\n", sep = "");
cat("The variance table:\n");
print(data$traits[[i]]$analysis$sea$envs[[j]]$varcomp.table, row.names = FALSE);
cat("\n");
cat("Adjust Means of each genotype:\n");
print(data$traits[[i]]$analysis$sea$envs[[j]]$sum.out, row.names = FALSE);
cat("*******************End of ", env.name, " environment********************\n", sep = "");
cat("\n");
}#--- end stmt of for(j in 1:length(data$traits[[i]]$analysis$sea$envs))
cat("\n\n");
} #--- end stmt of for(i in 1:data$trait.number) ---#
}else
{
for(i in 1:data$trait.number)
{
if(is.null(data$traits[[i]]$analysis$sea))
{
next;
} else
{
trait.name <- data$traits[[i]]$name;
cat(rep("=",times=40), sep="");
cat("\n");
cat("RESPONSE VARIABLE: ", trait.name, "\n", sep="");
cat(rep("=",times=40), sep="");
cat("\n");
cat("\n");
for(j in 1:length(data$traits[[i]]$analysis$sea$envs))
{
env.name <- data$traits[[i]]$analysis$sea$envs[[j]]$name;
cat(rep("-",times=40), sep="");
cat("\n");
cat("ANALYSIS FOR: Env = ", env.name, "\n", sep="");
cat(rep("-",times=40), sep="");
cat("\n");
cat("\n");
cat("Data Summary:\n");
cat("\n");
cat("Number of observations read: ", data$traits[[i]]$envs[[j]]$obsread, "\n", sep = "");
cat("Number of observations used: ", data$traits[[i]]$envs[[j]]$obsused, "\n", sep = "");
print(data$traits[[i]]$analysis$sea$envs[[j]]$factor.summary, row.names=FALSE);
cat("\n");
cat("Variance Components Table: \n");
cat("\n");
print(data$traits[[i]]$analysis$sea$envs[[j]]$varcomp.table, row.names = FALSE);
cat("\n\n");
cat("Testing for the Significance of Genotypic Effect: \n");
cat("\n");
print(data$traits[[i]]$analysis$sea$envs[[j]]$m2Vm1, row.names = FALSE);
cat("\n")
print(data$traits[[i]]$analysis$sea$envs[[j]]$geno.test,row.names = FALSE);
cat("\n");
cat("Genotoype LSMean and Standard Errors:\n");
cat("\n");
print(data$traits[[i]]$analysis$sea$envs[[j]]$summary.statistic, row.names = FALSE);
cat("\n");
cat("Standard Error of The Difference (SED):\n");
print(data$traits[[i]]$analysis$sea$envs[[j]]$sedTable, row.names = FALSE);
cat("\n");
# #--- contrast outcomes---#
# if(!is.null(data$traits[[i]]$analysis$sea$envs[[j]]$contrast))
# {
# cat(rep("-", times = 40), sep = "");
# cat("\n");
# cat("Contrast Analysis\n")
# cat(rep("-", times = 40), sep = "");
# cat("\n");
# if(nrow(data$traits[[i]]$analysis$sea$envs[[j]]$contrast$outcome) == 0)
# cat("There are no significant contrasts!\n")
# else
# print(data$traits[[i]]$analysis$sea$envs[[j]]$contrast$outcome, row.names = FALSE);
# cat("\n");
# }
} # end stmt of for(j in 1:1:length(data$traits[[i]]$analysis$sea$envs))
}# end stmt of if(is.null(data$traits[[i]]$analysis$sea))
}# end stmt of for(i in 1:data$trait.number)
}
}
|
92e4d9101a0d5b65f1f0eb0bfc24f308c2c78cd7
|
639e8fc3a59d319dfd1ea257570e4f4d71a55dd7
|
/Course3_DataClean/week 3/swirl managing dplyr.R
|
a0c9acbe84215d836e6e100fcf6e53c39e88efc9
|
[] |
no_license
|
Bttobar/Bttobar-JH_datascience
|
fc4bdac65dba3948c575ce5130ad48fc7dcf2e1f
|
40bfbb305eafaf597c3b5fbc2fb5477e5e8082d1
|
refs/heads/master
| 2023-01-08T00:37:27.622459
| 2020-11-02T02:16:56
| 2020-11-02T02:16:56
| 291,361,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,760
|
r
|
swirl managing dplyr.R
|
# swirl course : Manipulating data with dplyr
mydf <- read.csv(path2csv,stringsAsFactors = FALSE)
dim(mydf)
head(mydf)
library(dplyr)
packageVersion("dplyr")
cran <- tbl_df(mydf) #create a dataframe
rm("mydf") #para remover el df
print(cran) # mucho mas informatifo que df solo
# "Introduction to dplyr" vignette written by the package authors, "The dplyr philosophy is to have small
# | functions that each do one thing well." Specifically, dplyr supplies five 'verbs' that cover most fundamental data
# | manipulation tasks: select(), filter(), arrange(), mutate(), and summarize().
# con el select() n tenemos que estar escribiendo data$clase + se muestra por pantalla en el orden en que las pusimos.
select(cran, r_arch:country)
select(cran, country:r_arch)
select(cran, -time)
-5:20
-(5:20)
select(cran, -(X:size))
filter(cran,package=="swirl")
filter(cran, r_version == "3.1.1", country == "US")
filter(cran, r_version <= "3.0.2", country == "IN")
filter(cran, country == "US" | country == "IN")
filter(cran, size>100500, r_os == "linux-gnu")
is.na(c(3, 5, NA, 10))
!is.na(c(3, 5, NA, 10))
filter(cran,!is.na(r_version) )
#order rows ==> arrange
cran2 <- select(cran,size:ip_id)
arrange(cran2,ip_id)
arrange(cran2,desc(ip_id))
arrange(cran2, package, ip_id) # si hay multiples rows iguales, el siguiente filtro es ip_id
arrange(cran2,country,desc(r_version),ip_id)
cran3 <- select(cran,ip_id,package,size)
# It's common to create a new variable based on the value of one or more variables already in a dataset. The mutate()
# | function does exactly this.
mutate(cran3,size_mb = size /2^20)
mutate(cran3, size_mb = size / 2^20, size_gb = size_mb / 2^10)
mutate(cran3,correct_size = size + 1000)
summarize(cran, avg_bytes = mean(size))
|
c4ab82b93911f378821f162dbb2192cafcd7dbfd
|
471c1214a8b8d165b896a6d6baca6b50cab4b397
|
/wps/create_wrfinput.R
|
39300e2b4f701646c1172a262dcfd66dd1a5a164
|
[] |
no_license
|
NCAR/wrf_hydro_docker
|
2ae07010ec40927ba375f5171d05fd66d2cc99d4
|
3b4db4a2e7174d2f512ade0d4553e374645eec13
|
refs/heads/main
| 2023-02-17T19:21:29.954110
| 2022-11-30T21:06:20
| 2022-12-02T15:26:38
| 103,539,713
| 19
| 38
| null | 2023-08-30T04:05:45
| 2017-09-14T14:04:14
|
Python
|
UTF-8
|
R
| false
| false
| 9,077
|
r
|
create_wrfinput.R
|
#!/usr/bin/env Rscript
############################################################
# R script to create wrfinput file from geogrid.
# Usage: Rscript create_wrfinput.R
# Developed: 07/09/2017, A. Dugger
# Mirrors the HRLDAS routines here:
# https://github.com/NCAR/hrldas-release/blob/release/HRLDAS/HRLDAS_forcing/lib/module_geo_em.F
# from M. Barlage.
# Modified:
# - Added command line arguments (J. Mills)
# - Added treatment for dealing with 0 SOILTEMP values over water cells
############################################################
library(optparse)
library(ncdf4)
option_list = list(
make_option(c("--geogrid"), type="character", default=NULL,
help="Path to input geogrid file", metavar="character"),
make_option(c("--outfile"), type="character", default="wrfinput_d01.nc",
help="output file name [default= %default]", metavar="character"),
make_option(c("--filltyp"), type="integer", default=3, help="Soil type to use as a fill value in case
conflicts between soil water and land cover water cells. If the script encounters a cell
that is classified as land in the land use field (LU_INDEX) but is classified as a water
soil type, it will replace the soil type with the value you specify. Ideally there are
not very many of these, so you can simply choose the most common soil type in your domain.
Alternatively, you can set to a bad value (e.g., -8888) to see how many of these conflicts
there are. If you do this DO NOT RUN THE MODEL WITH THESE BAD VALUES. Instead, fix them
manually with a neighbor fill or similar fill algorithm. [default= %default]", metavar="character"),
make_option(c("--laimo"), type="integer", default=8,
help="output file name [default= %default]", metavar="character"),
make_option(c("--missfloat"), type="numeric", default=(-1.e+36),
help="Missing values to use when defining netcdf file for floats [default= %default]",
metavar="character"),
make_option(c("--missint"), type="integer", default=(-9999),
help="Missing values to use when defining netcdf file for integers [default= %default]",
metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
if (is.null(opt$geogrid)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file).n", call.=FALSE)
}
#### Input geogrid:
geoFile <- opt$geogrid
#### Output wrfinput file:
wrfinFile <- opt$outfile
#### Soil type to use as a fill value in case conflicts between soil water and land cover water cells:
# If the script encounters a cell that is classified as land in the land use field (LU_INDEX)
# but is classified as a water soil type, it will replace the soil type with the value you
# specify below. Ideally there are not very many of these, so you can simply choose the most
# common soil type in your domain. Alternatively, you can set to a "bad" value (e.g., -8888)
# to see how many of these conflicts there are. If you do this DO NOT RUN THE MODEL WITH THESE
# BAD VALUES. Instead, fix them manually with a neighbor fill or similar fill algorithm.
fillsoiltyp <- opt$filltyp
#### Month to use for LAI initialization:
# This may or may not be used depending on your NoahMP options.
laimo <- opt$laimo
#### Missing values to use when defining netcdf file:
missFloat <- opt$missfloat
missInt <- opt$missint
#######################################################
# Do not update below here.
#######################################################
# Create initial file
cmd <- paste0("ncks -O -4 -v XLAT_M,XLONG_M,HGT_M,SOILTEMP,LU_INDEX,MAPFAC_MX,MAPFAC_MY,GREENFRAC,LAI12M,SOILCTOP ", geoFile, " ", wrfinFile)
print(cmd)
system(cmd, intern=FALSE)
# Variable name adjustments
cmd <- paste0("ncrename -O -v HGT_M,HGT ", wrfinFile, " ", wrfinFile)
print(cmd)
system(cmd, intern=FALSE)
cmd <- paste0("ncrename -O -v XLAT_M,XLAT ", wrfinFile, " ", wrfinFile)
print(cmd)
system(cmd, intern=FALSE)
cmd <- paste0("ncrename -O -v XLONG_M,XLONG ", wrfinFile, " ", wrfinFile)
print(cmd)
system(cmd, intern=FALSE)
cmd <- paste0("ncrename -O -v LU_INDEX,IVGTYP ", wrfinFile, " ", wrfinFile)
print(cmd)
system(cmd, intern=FALSE)
# Now create and add new vars
ncid <- nc_open(wrfinFile, write=TRUE)
# Dimensions
sndim <- ncid$dim[['south_north']]
wedim <- ncid$dim[['west_east']]
soildim <- ncdim_def("soil_layers_stag", "", vals=1:4, create_dimvar=FALSE)
timedim <- ncid$dim[['Time']]
# Attributes
gridid <- ncatt_get(ncid, 0)[["GRID_ID"]]
iswater <- ncatt_get(ncid, 0)[["ISWATER"]]
isoilwater <- ncatt_get(ncid, 0)[["ISOILWATER"]]
isurban <- ncatt_get(ncid, 0)[["ISURBAN"]]
isice <- ncatt_get(ncid, 0)[["ISICE"]]
mminlu <- ncatt_get(ncid, 0)[["MMINLU"]]
# New Variables
# SOILTEMP will show 0 value over water. This can cause issues when varying land cover fields
# from default. Setting to mean non-zero values for now to have something reasonable.
soilt <- ncvar_get(ncid, "SOILTEMP")
soilt[soilt < 100] <- NA
soilt_mean <- mean(c(soilt), na.rm=TRUE)
soilt[is.na(soilt)] <- soilt_mean
tmn <- soilt - 0.0065 * ncvar_get(ncid, "HGT")
use <- ncvar_get(ncid, "IVGTYP")
msk <- use
msk[msk == iswater] <- (-9999)
msk[msk >= 0] <- 1
msk[msk < 0] <- 2
ice <- msk * 0.0
soil_top_cat <- ncvar_get(ncid, "SOILCTOP")
idim <- dim(soil_top_cat)[1]
jdim <- dim(soil_top_cat)[2]
kdim <- dim(soil_top_cat)[3]
soi <- msk * 0.0
for (i in 1:idim) {
for (j in 1:jdim) {
dominant_value = soil_top_cat[i,j,1]
dominant_index = 1
if ( msk[i,j] < 1.5 ) {
for (k in 2:kdim) {
if ( ( k != isoilwater ) & ( soil_top_cat[i,j,k] > dominant_value ) ) {
dominant_value <- soil_top_cat[i,j,k]
dominant_index <- k
}
}
if ( dominant_value < 0.01 ) dominant_index <- 8
} else {
dominant_index <- isoilwater
}
soi[i,j] <- dominant_index
}
}
soi[use == iswater] <- isoilwater
soi[use != iswater & soi == isoilwater] <- fillsoiltyp
veg <- 100.0 * ncvar_get(ncid, "GREENFRAC")
vegmin <- apply(veg, c(1,2), min)
vegmax <- apply(veg, c(1,2), max)
lai <- ncvar_get(ncid, "LAI12M")
lai <- lai[,,laimo]
canwat <- msk * 0.0
snow <- msk * 0.0
tsk <- msk * 0.0 + 290.0
smois <- array(rep(msk, 4), dim=c(dim(msk),4))
smois[,,1] <- 0.20
smois[,,2] <- 0.21
smois[,,3] <- 0.25
smois[,,4] <- 0.27
tslb <- array(rep(msk, 4), dim=c(dim(msk),4))
tslb[,,1] <- 285.0
tslb[,,2] <- 283.0
tslb[,,3] <- 279.0
tslb[,,4] <- 277.0
zs <- c(0.05, 0.25, 0.7, 1.5)
dzs <- c(0.1, 0.3, 0.6, 1.0)
# Define and place new vars
vardef <- ncvar_def("TMN", "K", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "TMN", tmn)
vardef <- ncvar_def("XLAND", "", list(wedim, sndim, timedim), missval=missInt, prec='integer')
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "XLAND", msk)
vardef <- ncvar_def("SEAICE", "", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "SEAICE", ice)
vardef <- ncvar_def("ISLTYP", "", list(wedim, sndim, timedim), missval=missInt, prec='integer')
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "ISLTYP", soi)
vardef <- ncvar_def("SHDMAX", "%", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "SHDMAX", vegmax)
vardef <- ncvar_def("SHDMIN", "%", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "SHDMIN", vegmin)
vardef <- ncvar_def("LAI", "m^2/m^2", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "LAI", lai)
vardef <- ncvar_def("CANWAT", "kg/m^2", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "CANWAT", canwat)
vardef <- ncvar_def("SNOW", "kg/m^2", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "SNOW", snow)
vardef <- ncvar_def("TSK", "K", list(wedim, sndim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "TSK", tsk)
vardef <- ncvar_def("SMOIS", "m^3/m^3", list(wedim, sndim, soildim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "SMOIS", smois)
vardef <- ncvar_def("TSLB", "K", list(wedim, sndim, soildim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "TSLB", tslb)
vardef <- ncvar_def("ZS", "m", list(soildim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "ZS", zs)
vardef <- ncvar_def("DZS", "m", list(soildim, timedim), missval=missFloat)
ncid <- ncvar_add(ncid, vardef)
ncvar_put(ncid, "DZS", dzs)
nc_close(ncid)
# Remove extra vars
cmd <- paste0("ncks -O -x -v SOILTEMP,GREENFRAC,LAI12M,SOILCTOP ", wrfinFile, " ", wrfinFile)
print(cmd)
system(cmd, intern=FALSE)
quit("no")
|
d3bb87c0717ae11fc31191c7b3e23a2978dfa5ed
|
d26bf0f6b95a744ac432981f67c236b9c0768237
|
/R/02-wine-clusters.R
|
484bb11880614f9fc6026da9d4779ae561182158
|
[] |
no_license
|
yjallan/Unsupervised-Machine-Learning
|
221f4bb70407400fa85eaa96597a8e5c69bb299b
|
80fc238b821a8673d50d94da9c2227c866ff111a
|
refs/heads/master
| 2020-04-12T20:48:41.628137
| 2018-12-21T18:49:38
| 2018-12-21T18:49:38
| 162,747,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,391
|
r
|
02-wine-clusters.R
|
# 02 wine data
library(caret)
library(plyr)
library(dplyr)
library(magrittr)
library(reshape2)
library(randomForest)
library(mlbench)
library(dendextend)
library(mclust)
library(gridExtra)
source("R/random_projection_gauss.R")
source("R/clustergram.R")
source("R/clustergram-had.R")
source("R/clustergram-hdm.R")
wq <- read.table(file.path(getwd(), "data", "winequality", "winequality-red.csv"),
sep=";",
header=TRUE)
# discard obs less than 5 and greater than 7, since there isn't enough information
wq <- na.omit(wq)
wq <- wq[wq$quality %in% c(5,6,7),]
wq$quality <- factor(wq$quality, ordered=TRUE)
wq_nl <- dplyr::select(wq, -quality)
wq_pp <- preProcess(wq_nl) %>% predict(wq_nl)
#' PCA
wq_pca <- prcomp(wq_nl, retx=TRUE, scale.=TRUE, center=TRUE)
wq_data <- data.frame(PC=1:length(wq_pca$sdev), Variance=wq_pca$sdev^2)
wq_propvar <- (wq_pca$sdev^2/sum(wq_pca$sdev^2)) %>% cumsum
wq_propvar <- data.frame(x=1:length(wq_propvar), cumvar=wq_propvar)
## all Principal componenets as X's and Quality as Y
wqpca=cbind(as.data.frame(wq_pca$x), quality=wq$quality)
##
#' ICA
wq_ica = fastICA::fastICA(wq_nl, n.comp=11, verbose=TRUE, row.norm=TRUE)
## all ICA componenets as X's and Quality as Y
wqica=cbind(as.data.frame(wq_ica$S), quality=wq$quality)
#finding kurtosis of all rows except quality
wqn <- sapply(wqica[,!(names(wqica) %in% c("quality"))], function(x) (e1071::kurtosis(x))^2)
wqn <- melt(wqn)
wqn$name <- row.names(wqn)
wqn <- wqn[wqn$value < 1,]
# remove all...in wqn
wqica <- wqica[,!(names(wqica) %in% wqn$name)]
#' random projections
wq_rca <- Map(function(x) {
gaussian_random_projection(wq_nl, 8)
}, 1:100)
# get the ones which immitate the result best.
wqrcadiff <- Map(function(x) {
sum((wq_nl - (x$RP %*% MASS::ginv(x$R)))^2)
}, wq_rca) %>% melt
bestrca <- wqrcadiff %>% arrange(value) %>% head(1)
names(bestrca) <- c("value", "k")
wqrca <- cbind(as.data.frame(wq_rca[[bestrca$k]]$RP), quality=wq$quality)
# apply randomforest to get the mean gini, variable importance.
wq_rf <- randomForest(quality ~., wq) #applying RF
wqrf <- as.data.frame(varImp(wq_rf)) #getting variable Importance
wqrf$names <- row.names(wqrf)
wqrf <- wqrf %>% arrange(desc(Overall))
wqrf <- wqrf[,c("names", "Overall")]
wqrf.name <- wqrf$names[1:(length(wqrf$names)-2)] #keeping all except worst 2 variables
wqrf <- wq[,(names(wq) %in% c(wqrf.name, "quality"))] #creating the dataset for these features
# em
gen_cluster_plots <- function(hdmdata) {
hdm_cl <- Mclust(hdmdata, G=1:13) #number of clusters to be taken
return(hdm_cl)
}
# find optimal through EM
wqnilem <- gen_cluster_plots(wq_nl)
wqpcaem <- gen_cluster_plots(wq_pca$x[,1:6])
wqicaem <- gen_cluster_plots(select(wqica, -quality))
wqrcaem <- gen_cluster_plots(select(wqrca, -quality))
wqrfem <- gen_cluster_plots(wq[,c(wqrf.name, "quality")])
# generate plots using ggplot...
wqnilembic <- wqnilem$BIC[,] %>% as.data.frame %>% add_rownames %>% melt("rowname") %>% na.omit
wqpcaembic <- wqpcaem$BIC[,] %>% as.data.frame %>% add_rownames %>% melt("rowname") %>% na.omit
wqicaembic <- wqicaem$BIC[,] %>% as.data.frame %>% add_rownames %>% melt("rowname") %>% na.omit
wqrcaembic <- wqrcaem$BIC[,] %>% as.data.frame %>% add_rownames %>% melt("rowname") %>% na.omit
wqrfembic <- wqrfem$BIC[,] %>% as.data.frame %>% add_rownames %>% melt("rowname") %>% na.omit
#wqxxxembic$z gives a matrix whose [i,k]th entry is the probability that
#observation i in the test data belongs to the kth class.
c(dim(wqnilem$z)[2], dim(wqpcaem$z)[2], dim(wqicaem$z)[2],
dim(wqrcaem$z)[2], dim(wqrfem$z)[2])
# 13 9 12 13 7
#number of optimal clusters for each method respectively
# rm. to draw train validate, test for the cluster plots.
folds <- createFolds(wq$quality, k = 6, list = TRUE, returnTrain = FALSE)
train_ind <- c(folds$Fold1, folds$Fold2)
valid_ind <- c(folds$Fold3)
test_ind <- c(folds$Fold4)
#assigns a cluster to each observation by minimizing distance using kmeans
predict.kmeans <- function(km, data) {
k <- nrow(km$centers)
n <- nrow(data)
d <- as.matrix(dist(rbind(km$centers, data)))[-(1:k),1:k]
out <- apply(d, 1, which.min)
return(out)
}
wq_kmeans <- function(mclustobj, traindata, validdata) {
# assign the clusters to the traindata...
# determine cluster to prediction...
clus <- predict.kmeans(mclustobj, dplyr::select(traindata, -quality))
info <- data.frame(quality=traindata$quality, clust=clus)
info$clust %<>% as.factor
mappings <- info %>% group_by(clust, quality) %>% tally %>% group_by(clust) %>% top_n(1) %>% dplyr::select(clust, quality)
names(mappings) <- c("clust", "pred_quality")
valid_pred <- predict.kmeans(mclustobj, dplyr::select(validdata, -quality))
valid_pred %<>% as.factor
valid_pred <- data.frame(quality=validdata$quality, clust=valid_pred)
valid_pred$clust %<>% as.factor
fin_data <- valid_pred %>% left_join(mappings)
return(fin_data)
#pred_data <- predict.kmeans(kclust, dplyr::(traindata, -quality))
}
wq_mclust <- function(mclustobj, traindata, validdata) {
# assign the clusters to the traindata...
# determine cluster to prediction...
clus <- predict(mclustobj, dplyr::select(traindata, -quality))
info <- data.frame(quality=traindata$quality, clust=clus$classification)
info$clust %<>% as.factor
mappings <- info %>% group_by(clust, quality) %>% tally %>% group_by(clust) %>% top_n(1) %>% dplyr::select(clust, quality)
names(mappings) <- c("clust", "pred_quality")
valid_pred <- predict(mclustobj, dplyr::select(validdata, -quality))
valid_pred <- valid_pred$classification %>% as.factor
valid_pred <- data.frame(quality=validdata$quality, clust=valid_pred)
valid_pred$clust %<>% as.factor
fin_data <- valid_pred %>% left_join(mappings)
return(fin_data)
}
wq_nl1 <- cbind(wq_nl, quality=wq$quality)
wqpca1 <- wqpca
wqrf1 <- wq[,c(wqrf.name, "quality")]
optimk <- data.frame()
for (k in 2:13) {
#for (k in 2:2) {
wq_nl_cl <- kmeans(dplyr::select(wq_nl1[train_ind,], -quality), k)
wqpca_cl <- kmeans(dplyr::select(wqpca1[train_ind,], -quality), k)
wqica_cl <- kmeans(dplyr::select(wqica[train_ind,], -quality), k)
wqrca_cl <- kmeans(dplyr::select(wqrca[train_ind,], -quality), k)
wqrf_cl <- kmeans(dplyr::select(wqrf1[train_ind,], -quality), k)
print(k)
print("AA")
wq_nl_em <- Mclust(dplyr::select(wq_nl1[train_ind,], -quality), G=k)
wqpca_em <- Mclust(dplyr::select(wqpca1[train_ind,], -quality), G=k)
wqica_em <- Mclust(dplyr::select(wqica[train_ind,], -quality), G=k)
wqrca_em <- Mclust(dplyr::select(wqrca[train_ind,], -quality), G=k)
wqrf_em <- Mclust(dplyr::select(wqrf1[train_ind,], -quality), G=k)
print("BB")
train_nl_kmeanscore <- wq_kmeans(wq_nl_cl, wq_nl1[train_ind,], wq_nl1[train_ind,])
valid_nl_kmeanscore <- wq_kmeans(wq_nl_cl, wq_nl1[train_ind,], wq_nl1[valid_ind,])
test_nl_kmeanscore <- wq_kmeans(wq_nl_cl, wq_nl1[train_ind,], wq_nl1[test_ind,])
print("CC")
train_pca_kmeanscore <- wq_kmeans(wqpca_cl, wqpca1[train_ind,], wqpca1[train_ind,])
valid_pca_kmeanscore <- wq_kmeans(wqpca_cl, wqpca1[train_ind,], wqpca1[valid_ind,])
test_pca_kmeanscore <- wq_kmeans(wqpca_cl, wqpca1[train_ind,], wqpca1[test_ind,])
train_ica_kmeanscore <- wq_kmeans(wqica_cl, wqica[train_ind,], wqica[train_ind,])
valid_ica_kmeanscore <- wq_kmeans(wqica_cl, wqica[train_ind,], wqica[valid_ind,])
test_ica_kmeanscore <- wq_kmeans(wqica_cl, wqica[train_ind,], wqica[test_ind,])
train_rca_kmeanscore <- wq_kmeans(wqrca_cl, wqrca[train_ind,], wqrca[train_ind,])
valid_rca_kmeanscore <- wq_kmeans(wqrca_cl, wqrca[train_ind,], wqrca[valid_ind,])
test_rca_kmeanscore <- wq_kmeans(wqrca_cl, wqrca[train_ind,], wqrca[test_ind,])
train_rf_kmeanscore <- wq_kmeans(wqrf_cl, wqrf1[train_ind,], wqrf1[train_ind,])
valid_rf_kmeanscore <- wq_kmeans(wqrf_cl, wqrf1[train_ind,], wqrf1[valid_ind,])
test_rf_kmeanscore <- wq_kmeans(wqrf_cl, wqrf1[train_ind,], wqrf1[test_ind,])
#mclust
train_nl_mclust <- wq_mclust(wq_nl_em, wq_nl1[train_ind,], wq_nl1[train_ind,])
valid_nl_mclust <- wq_mclust(wq_nl_em, wq_nl1[train_ind,], wq_nl1[valid_ind,])
test_nl_mclust <- wq_mclust(wq_nl_em, wq_nl1[train_ind,], wq_nl1[test_ind,])
train_pca_mclust <- wq_mclust(wqpca_em, wqpca1[train_ind,], wqpca1[train_ind,])
valid_pca_mclust <- wq_mclust(wqpca_em, wqpca1[train_ind,], wqpca1[valid_ind,])
test_pca_mclust <- wq_mclust(wqpca_em, wqpca1[train_ind,], wqpca1[test_ind,])
train_ica_mclust <- wq_mclust(wqica_em, wqica[train_ind,], wqica[train_ind,])
valid_ica_mclust <- wq_mclust(wqica_em, wqica[train_ind,], wqica[valid_ind,])
test_ica_mclust <- wq_mclust(wqica_em, wqica[train_ind,], wqica[test_ind,])
train_rca_mclust <- wq_mclust(wqrca_em, wqrca[train_ind,], wqrca[train_ind,])
valid_rca_mclust <- wq_mclust(wqrca_em, wqrca[train_ind,], wqrca[valid_ind,])
test_rca_mclust <- wq_mclust(wqrca_em, wqrca[train_ind,], wqrca[test_ind,])
train_rf_mclust <- wq_mclust(wqrf_em, wqrf1[train_ind,], wqrf1[train_ind,])
valid_rf_mclust <- wq_mclust(wqrf_em, wqrf1[train_ind,], wqrf1[valid_ind,])
test_rf_mclust <- wq_mclust(wqrf_em, wqrf1[train_ind,], wqrf1[test_ind,])
###
at_nl <- caret::confusionMatrix(train_nl_kmeanscore$quality, train_nl_kmeanscore$pred_quality)$overall['Accuracy']
av_nl <- caret::confusionMatrix(valid_nl_kmeanscore$quality, valid_nl_kmeanscore$pred_quality)$overall['Accuracy']
ate_nl <- caret::confusionMatrix(test_nl_kmeanscore$quality, test_nl_kmeanscore$pred_quality)$overall['Accuracy']
at_pca <- caret::confusionMatrix(train_pca_kmeanscore$quality, train_pca_kmeanscore$pred_quality)$overall['Accuracy']
av_pca <- caret::confusionMatrix(valid_pca_kmeanscore$quality, valid_pca_kmeanscore$pred_quality)$overall['Accuracy']
ate_pca <- caret::confusionMatrix(test_pca_kmeanscore$quality, test_pca_kmeanscore$pred_quality)$overall['Accuracy']
at_ica <- caret::confusionMatrix(train_ica_kmeanscore$quality, train_ica_kmeanscore$pred_quality)$overall['Accuracy']
av_ica <- caret::confusionMatrix(valid_ica_kmeanscore$quality, valid_ica_kmeanscore$pred_quality)$overall['Accuracy']
ate_ica <- caret::confusionMatrix(test_ica_kmeanscore$quality, test_ica_kmeanscore$pred_quality)$overall['Accuracy']
at_rca <- caret::confusionMatrix(train_rca_kmeanscore$quality, train_rca_kmeanscore$pred_quality)$overall['Accuracy']
av_rca <- caret::confusionMatrix(valid_rca_kmeanscore$quality, valid_rca_kmeanscore$pred_quality)$overall['Accuracy']
ate_rca <- caret::confusionMatrix(test_rca_kmeanscore$quality, test_rca_kmeanscore$pred_quality)$overall['Accuracy']
at_rf <- caret::confusionMatrix(train_rf_kmeanscore$quality, train_rf_kmeanscore$pred_quality)$overall['Accuracy']
av_rf <- caret::confusionMatrix(valid_rf_kmeanscore$quality, valid_rf_kmeanscore$pred_quality)$overall['Accuracy']
ate_rf <- caret::confusionMatrix(test_rf_kmeanscore$quality, test_rf_kmeanscore$pred_quality)$overall['Accuracy']
###
at_nl_em <- caret::confusionMatrix(train_nl_mclust$quality, train_nl_mclust$pred_quality)$overall['Accuracy']
av_nl_em <- caret::confusionMatrix(valid_nl_mclust$quality, valid_nl_mclust$pred_quality)$overall['Accuracy']
ate_nl_em <- caret::confusionMatrix(test_nl_mclust$quality, test_nl_mclust$pred_quality)$overall['Accuracy']
at_pca_em <- caret::confusionMatrix(train_pca_mclust$quality, train_pca_mclust$pred_quality)$overall['Accuracy']
av_pca_em <- caret::confusionMatrix(valid_pca_mclust$quality, valid_pca_mclust$pred_quality)$overall['Accuracy']
ate_pca_em <- caret::confusionMatrix(test_pca_mclust$quality, test_pca_mclust$pred_quality)$overall['Accuracy']
at_ica_em <- caret::confusionMatrix(train_ica_mclust$quality, train_ica_mclust$pred_quality)$overall['Accuracy']
av_ica_em <- caret::confusionMatrix(valid_ica_mclust$quality, valid_ica_mclust$pred_quality)$overall['Accuracy']
ate_ica_em <- caret::confusionMatrix(test_ica_mclust$quality, test_ica_mclust$pred_quality)$overall['Accuracy']
at_rca_em <- caret::confusionMatrix(train_rca_mclust$quality, train_rca_mclust$pred_quality)$overall['Accuracy']
av_rca_em <- caret::confusionMatrix(valid_rca_mclust$quality, valid_rca_mclust$pred_quality)$overall['Accuracy']
ate_rca_em <- caret::confusionMatrix(test_rca_mclust$quality, test_rca_mclust$pred_quality)$overall['Accuracy']
at_rf_em <- caret::confusionMatrix(train_rf_mclust$quality, train_rf_mclust$pred_quality)$overall['Accuracy']
av_rf_em <- caret::confusionMatrix(valid_rf_mclust$quality, valid_rf_mclust$pred_quality)$overall['Accuracy']
ate_rf_em <- caret::confusionMatrix(test_rf_mclust$quality, test_rf_mclust$pred_quality)$overall['Accuracy']
rowdata <- cbind(k=k, at_nl=at_nl, av_nl=av_nl, ate_nl=ate_nl,
at_pca=at_pca, av_pca=av_pca, ate_pca=ate_pca,
at_ica=at_ica, av_ica=av_ica, ate_ica=ate_ica,
at_rca=at_rca, av_rca=av_rca, ate_rca=ate_rca,
at_rf=at_rf, av_rf=av_rf, ate_rf=ate_rf,
at_nl_em = at_nl_em , av_nl_em = av_nl_em , ate_nl_em = ate_nl_em ,
at_pca_em= at_pca_em, av_pca_em= av_pca_em, ate_pca_em= ate_pca_em,
at_ica_em= at_ica_em, av_ica_em= av_ica_em, ate_ica_em= ate_ica_em,
at_rca_em= at_rca_em, av_rca_em= av_rca_em, ate_rca_em= ate_rca_em,
at_rf_em = at_rf_em , av_rf_em = av_rf_em, ate_rf_em = ate_rf_em)
optimk <- rbind(optimk, rowdata)
}
plotoptimk <- melt(optimk, id=c("k"))
#kmeans...
wqnilkm_t <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_nl", "av_nl", "ate_nl")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - No Transformation") +
ylab("Accuracy") +
xlab("Number of Clusters (Kmeans)") +
scale_colour_discrete(name="",
breaks=c("at_nl", "av_nl", "ate_nl"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqpcakm_t <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_pca", "av_pca", "ate_pca")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - PCA") +
ylab("Accuracy") +
xlab("Number of Clusters (Kmeans)") +
scale_colour_discrete(name="",
breaks=c("at_pca", "av_pca", "ate_pca"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqicakm_t <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_ica", "av_ica", "ate_ica")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - ICA") +
ylab("Accuracy") +
xlab("Number of Clusters (Kmeans)") +
scale_colour_discrete(name="",
breaks=c("at_ica", "av_ica", "ate_ica"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqrcakm_t <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_rca", "av_rca", "ate_rca")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - RP") +
ylab("Accuracy") +
xlab("Number of Clusters (Kmeans)") +
scale_colour_discrete(name="",
breaks=c("at_rca", "av_rca", "ate_rca"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqrfkm_t <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_rf", "av_rf", "ate_rf")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - With Feature Selection") +
ylab("Accuracy") +
xlab("Number of Clusters (Kmeans)") +
scale_colour_discrete(name="",
breaks=c("at_rf", "av_rf", "ate_rf"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
# clustergram plots
n_clustergram <- function(ndata) {
k_def <- many_kmeans(ndata, 2:13)
pr <- princomp(ndata)
pr1 <- predict(pr)[,1]
return((clustergram(k_def, pr1)))
}
create_clustergram <- function(x, title="") {
i_pos <- !duplicated(x$i)
means <- ddply(x, c("cluster", "i"), summarise,
min = min(adj), max = max(adj), mean=mean(adj))
return(ggplot(x, aes(i)) +
geom_ribbon(aes(y = adj, group = obs, fill = y, ymin = adj - line.width/2, ymax = adj + line.width/2)) +
#geom_errorbar(aes(ymin = min, ymax = max), data = means, width = 0.1) +
geom_point(aes(y=mean, size=1), data=means)+
scale_x_continuous("Cluster", breaks = x$i[i_pos], labels = x$k[i_pos]) +
ggtitle(title) +
labs(y = "Cluster average", colour = "Obs\nvalue", fill = "Obs\nvalue") +
theme_bw() +
theme(legend.position="none")
)
}
wqnilkm <- n_clustergram(wq_nl) %>% create_clustergram("No Transformation")
wqpcakm <- n_clustergram(wq_pca$x[,1:6]) %>% create_clustergram("PCA")
wqicakm <- n_clustergram(select(wqica, -quality)) %>% create_clustergram("ICA")
wqrcakm <- n_clustergram(select(wqrca, -quality)) %>% create_clustergram("RP")
wqrfkm <- n_clustergram(wq[,(names(wq) %in% wqrf.name)]) %>% create_clustergram("RF")
grid.arrange(wqnilkm, wqnilkm_t,
wqpcakm, wqpcakm_t,
wqicakm, wqicakm_t,
wqrcakm, wqrcakm_t,
wqrfkm, wqrfkm_t, ncol=2)
# EM
wqnilem_p <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_nl_em", "av_nl_em", "ate_nl_em")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - No Transformation") +
ylab("Accuracy") +
xlab("Number of Clusters (EM)") +
scale_colour_discrete(name="",
breaks=c("at_nl_em", "av_nl_em", "ate_nl_em"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqpcaem_p <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_pca_em", "av_pca_em", "ate_pca_em")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - PCA") +
ylab("Accuracy") +
xlab("Number of Clusters (EM)") +
scale_colour_discrete(name="",
breaks=c("at_pca_em", "av_pca_em", "ate_pca_em"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqicaem_p <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_ica_em", "av_ica_em", "ate_ica_em")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - ICA") +
ylab("Accuracy") +
xlab("Number of Clusters (EM)") +
scale_colour_discrete(name="",
breaks=c("at_ica_em", "av_ica_em", "ate_ica_em"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqrcaem_p <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_rca_em", "av_rca_em", "ate_rca_em")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - RP") +
ylab("Accuracy") +
xlab("Number of Clusters (EM)") +
scale_colour_discrete(name="",
breaks=c("at_rca_em", "av_rca_em", "ate_rca_em"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqrfem_p <- ggplot(plotoptimk[(plotoptimk$variable %in% c("at_rf_em", "av_rf_em", "ate_rf_em", "ate_rf_em")),],
aes(x=k, y=value, colour=variable, group=variable)) +
geom_point() +
geom_line() +
ggtitle("Wine Quality - With Feature Selection") +
ylab("Accuracy") +
xlab("Number of Clusters (EM)") +
scale_colour_discrete(name="",
breaks=c("at_rf_em", "av_rf_em", "ate_rf_em"),
labels=c("Training", "Validation", "Testing"))+
theme_bw()
wqnilem_t <- ggplot(wqnilembic, aes(x=rowname, y=value, colour=variable, group=variable)) +
geom_line() +
theme_bw() +
ggtitle("No Transformation")
wqpcaem_t <- ggplot(wqpcaembic, aes(x=rowname, y=value, colour=variable, group=variable)) + geom_line() + theme_bw() +
ggtitle("PCA")
wqicaem_t <- ggplot(wqicaembic, aes(x=rowname, y=value, colour=variable, group=variable)) + geom_line() + theme_bw()+
ggtitle("ICA")
wqrcaem_t <- ggplot(wqrcaembic, aes(x=rowname, y=value, colour=variable, group=variable)) + geom_line() + theme_bw()+
ggtitle("RP")
wqrfem_t <- ggplot(wqrfembic, aes(x=rowname, y=value, colour=variable, group=variable)) + geom_line() + theme_bw() +
ggtitle("RF")
grid.arrange(wqnilem_t,wqnilem_p,
wqpcaem_t,wqpcaem_p,
wqicaem_t,wqicaem_p,
wqrcaem_t,wqrcaem_p,
wqrfem_t, wqrfem_p,
ncol=2)
|
4bcd2cdd1c47da2e03ff5c40f27d1bac1a6391ba
|
9a9262ea8998a2a599c8c56c0c40dfb77b7f7af0
|
/man/sentence.Rd
|
73d7958fff00f4aa0c2c66b3255a33096ff6070e
|
[] |
no_license
|
KNIGHTTH0R/words
|
1dbf4cf165a713c9cfbb9d753a5f9a30c6e51389
|
3b19dc800b4b74a672a3b128493654a31e363b3b
|
refs/heads/master
| 2021-05-29T19:16:39.099239
| 2015-09-22T06:19:23
| 2015-09-22T06:19:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 696
|
rd
|
sentence.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/random-words-and-sentences.R
\name{sentence}
\alias{sentence}
\title{Random sentence with n words}
\usage{
sentence(w, lang = "en", maxchar = Inf)
}
\arguments{
\item{w}{Number of words in sentence.}
\item{maxchar}{Maximal number of characters per sentence. Note that whole
words (not part of words) are excluded if the maximal number
is exceeded.}
}
\value{
a string with n words (if length is not constrained)
}
\description{
Random sentence with n words
}
\examples{
sentence(5) # random sentence with 5 words
sentence(5, max=20) # random sentence cut off after 20 chars
}
\keyword{internal}
|
648f6ccd3190b1d393802c28527c672bfe83a334
|
3a3381595febf214f0f1ef5a661ffe6c5241fd2b
|
/cachematrix.R
|
7182d1f7243f6501d0718968fc82854f7e43b17a
|
[] |
no_license
|
esteban-g/ProgrammingAssignment2
|
2c51a46abc7d3a657ebf69f0ea20b48961cb904f
|
c97120e5e7e64b9c78e476cd7916b628a407d00a
|
refs/heads/master
| 2021-05-28T21:16:10.357722
| 2015-10-25T11:23:23
| 2015-10-25T11:23:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
cachematrix.R
|
## These functions implements something that in Java could be seen as
## a "JavaBean" the so called: "makeCacheMatrix" accessing to properties
## using getter and setter methods. And a "cache" function which takes a
## "RBean" object, i.e., the makeCacheMatrix object and by using the
## 'solve' function calculates the inverse of the matrix.
## RBean - a "bean" in R containing a matrix object
# subfunctions: set,get, setinv, getinv, list
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
#setmean <- function(mean) m <<- mean
setinv <- function(inv) m <<- inv
#getmean <- function() m
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns a matrix that is the inverse of 'x' obtaine from a RBean
cacheSolve <- function(x, ...) {
#use the function solve(x) x:square invertible matrix
m <- x$getinv()
if(!is.null(m)) {
message("Getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
9d70d121b5eb2a22ba661707cd1ec45f73cbead0
|
6d01685a11eeaa00cc64c1839050f39c1b55be7c
|
/src/R/plot-flow-by-road.R
|
e7d86e97d640a0bf8a887ae172c10801fac3bc3e
|
[] |
no_license
|
TheYuanLiao/speed2flow
|
4b21a7811e497e90a97b654dd1b9dc35f7e84ad2
|
c7d6f01fbde30064f32c3e0f9f4d1f70d9fbaafd
|
refs/heads/master
| 2023-01-29T16:22:37.460688
| 2020-12-16T14:01:09
| 2020-12-16T14:01:09
| 297,274,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,057
|
r
|
plot-flow-by-road.R
|
# Title : Visualize the time history of flow
# Objective : Empirical vs fitted
# Created by: Yuan Liao
# Created on: 2020-11-20
library(dplyr)
library(ggplot2)
library(lubridate)
library(ggpubr)
library(latticeExtra)
library(viridisLite)
library(latex2exp)
df_para <- read.csv('results/istanbul_params.csv')
df <- read.csv('dbs/flow_istanbul_m_estimated.csv')
# df <- df %>%
# mutate(time = ymd_hms(time)) %>%
# mutate(hour = hour(time))
df_fit <- df[,c("HERE_segID", "time", "speed_gt", "direction", "flow_fit")]
names(df_fit) <- c("HERE_segID", "hour", "speed", "direction", "flow")
df_fit$src <- 'BPR'
df_em <- df[,c("HERE_segID", "time", "speed_gt", "direction", "flow")]
names(df_em) <- c("HERE_segID", "hour", "speed", "direction", "flow")
df_em$src <- 'Sensor'
df <- rbind(df_fit, df_em)
df_day <- df %>%
group_by(hour, HERE_segID, direction, src) %>%
summarise(flow.min = min(flow),
flow.max = max(flow),
flow.ave = median(flow))
roads <- unique(df_para$HERE_segID)
rdplot <- function(rd) {
df_day_r <- df_day[df_day$HERE_segID == rd,]
df_para_r <- df_para[df_para$HERE_segID == rd,]
df_r <- df[df$HERE_segID == rd, ]
# line names and optimal parameters
para0 <- TeX(sprintf("$Direction{ }0:{ }\\alpha = %.2f,{ }\\beta = %.2f,{ }R^2 = %.2f$",
df_para_r[df_para_r$direction==0,]$alpha,
df_para_r[df_para_r$direction==0,]$beta,
df_para_r[df_para_r$direction==0,]$r2
))
para1 <- TeX(sprintf("$Direction{ }1:{ }\\alpha = %.2f,{ }\\beta = %.2f,{ }R^2 = %.2f$",
df_para_r[df_para_r$direction==1,]$alpha,
df_para_r[df_para_r$direction==1,]$beta,
df_para_r[df_para_r$direction==1,]$r2
))
g1 <- ggplot(data = df_day_r) +
theme_minimal() +
scale_x_discrete() +
labs(title = paste('HERE road', rd), x = "Time of day", y = "Flow") +
annotate("text", x=-Inf, y = Inf, label = para0, vjust=1, hjust=0) +
annotate("text", x=-Inf, y = Inf, label = para1, vjust=2.5, hjust=0) +
geom_ribbon(data = df_day_r, aes(x = as.factor(hour),
ymin = flow.min,
ymax = flow.max,
group = interaction(as.factor(direction), src),
fill=as.factor(direction)), color=NA, alpha = 0.05) +
geom_line(aes(x=as.factor(hour),
y=flow.ave,
group = interaction(as.factor(direction), src),
color = as.factor(direction),
linetype=src), size=0.7)
g2 <- ggplot(data = df_r) +
theme_minimal() +
labs(x = "Speed", y = "Flow", subtitle = 'Direction') +
geom_point(aes(x=speed,
y=flow,
color = src), size=0.3) +
facet_grid(.~direction)
G <- ggarrange(g1, g2, ncol = 1, nrow = 2)
h <- 8
ggsave(filename = paste0("figures/", "flow_road_", rd, ".png"), plot=G,
width = h, height = h, unit = "in", dpi = 300)
}
#rd <- roads[1]
#rdplot(rd)
lapply(roads, rdplot)
|
d7d6491ee6143530f6303fdb0f50186b9e098a17
|
7256a3ad8c1d6c9c166b91284d27af6191ae40fa
|
/Make_Everything.R
|
7e28f205a322fba485bc2fcbb938266f2da8c6a9
|
[] |
no_license
|
loire/AMR_mada2020
|
ecfdf3aedc3999730126465b3cbe2ca24cc26a9b
|
ade4f5bd1f32990383059d6ba2a2d38ddf4dd942
|
refs/heads/master
| 2022-12-07T19:48:40.027665
| 2022-11-29T12:20:53
| 2022-11-29T12:20:53
| 244,861,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,961
|
r
|
Make_Everything.R
|
require(tidyverse)
require(ggtree)
require(randomcoloR)
require(rlist)
require(tidytree)
require(ape)
require(phytools)
require(ggnewscale)
require(ggpubr)
require(cowplot)
require(ggtext)
require(circlize)
require(flipPlots)
##############################################################################################
#
# Phylogenetic Analysis per Host
#
##############################################################################################
# first draw the basic tree
core = read.tree(file = "Fastree_sept_rooted.tree")
# Now get host and localisation data
meta = read.csv("dataset/dataset_sample_2.csv",header =T)
rownames(meta)= as.character(meta$sample)
meta %>% group_by(phylogroup) %>% count(sort =T) %>% ungroup %>% mutate(percentage = round(n/sum(n)*100,1)) %>% ggtexttable(rows = NULL)
metaHost = meta %>% mutate(Host = as.character(Host)) %>% select(Host)
colorHost = c("Human"="#000000",
"Cattle"="#8DD3C7",
"Chicken"="#e8eb34",
"Pig" = "#BEBADA",
"Dog" = "#FB8072",
"Cat" ="#BC80BD",
"Duck" = "#FDB462",
"Goose" = "#B3DE69",
"Horse" = "#FCCDE5",
"Turkey" = "#D9D9D9",
"Water" = "#80B1D3",
"Animal" = "#b14624")
names(colorHost)
p = ggtree(core,layout="circular",size = 0.2,branch.length = 0.01)
#p = ggtree(core,size = 0.2,branch.length = 0.01)
pg = as.data.frame(meta$phylogroup)
colnames(pg)="phylogroup"
rownames(pg) = rownames(meta)
pg$phylogroup= as.character(pg$phylogroup)
pg$node = rownames(pg)
p$data = p$data %>% left_join(pg,by=c("label"="node"))
phyancestors = p$data$phylogroup %>% unique
names(phyancestors)=phyancestors
phyancestors = phyancestors[1:7]
for(i in names(phyancestors) %>% unique)
{
p$data %>% filter(phylogroup==i) %>% select(label) %>% as.vector -> F_labels
phyancestors[i] = findMRCA(core,F_labels$label)
}
dataphy = phyancestors %>% as.data.frame %>% mutate(phylogroup = names(phyancestors))
names(dataphy) = c("id","phylogroup")
dataphy$id =as.integer(dataphy$id)
dataphy
colphy = RColorBrewer::brewer.pal(n=7,name="Set2")
dataphy$color = colphy
dataphy %>% glimpse
test = randomColor(count=7)
test
p = p + geom_highlight(data = dataphy,mapping=aes(node = id,fill = color)) +
scale_fill_brewer(palette = "Set2") + guides(fill=F)
p = p + new_scale_fill()
metaHost = metaHost %>% mutate(Host2= ifelse(Host=="Human","Human",ifelse(Host=="Water","Water","Animal")))
p = gheatmap(p,metaHost[,c("Host","Host2"), drop=F], width=0.2,font.size=8, colnames = F,
hjust = 0, offset = 0.0) + scale_fill_manual(name="Host",breaks = names(colorHost),values=colorHost) +
scale_color_manual(values=test) + guides(color = guide_legend(override.aes = list(size=3))) +
theme(legend.box = "vertical",legend.position = "top")
plot_phylogeny = p + geom_cladelab(data = dataphy,mapping=aes(node =id,label =phylogroup,color=color),
offset=0.13,offset.text=0.03,show.legend=F,fontsize=3) + scale_color_brewer(palette="Set2") +
guides(color=F) + theme(legend.key.size = unit(0.4, 'cm')) +
theme(
plot.margin=unit(c(0, 0, 0, 0), units="line"),
legend.position="top",
legend.margin=unit(0, "lines"))
plot_phylogeny
ggsave("phylogeny_Host.pdf")
#####################################################################################
# Add genetic distance between Hosts
#####################################################################################
colorHost = c("Human"="#000000",
"Cattle"="#8DD3C7",
"Chicken"="#e8eb34",
"Pig" = "#BEBADA",
"Dog" = "#FB8072",
"Cat" ="#BC80BD",
"Duck" = "#FDB462",
"Goose" = "#B3DE69",
"Horse" = "#FCCDE5",
"Turkey" = "#D9D9D9",
"Water" = "#80B1D3")
## ADD ONE FOR INTERHOSTS DATA
colorHost_inter = append(colorHost,c("Inter"="white"))
# first draw the basic tree
core = read.tree(file = "dataset/Fastree_sept_rooted.tree")
## leave some space for inlay
## Metadata parsing
metadata = read.csv("dataset/Metadata.csv")
metadata = metadata %>%
mutate(Host = ifelse(Host =="Poultry","Chicken",as.character(Host)))
meta = metadata %>% select(sample = True_indiv_name,Host,Numero_foyer,Fokontany)
host = meta %>% select(sample,Host)
rownames(host) = meta$sample
geo = meta %>% select(sample,Numero_foyer,Fokontany)
geo
rownames(geo) = meta$sample
host %>% View
geo %>% View
## Get pairwise distances from tree
distTips = cophenetic.phylo(core)
distTips %>% dim
# Deal with matrix to keep only upper diag values (avoid duplicated values)
# diag = T to also remove values on the diagonal (distance between same samples)
lowdiag = lower.tri(distTips,diag=T)
distTips[lowdiag]=NA
distTips = distTips %>% as.table %>% as.data.frame %>% filter(!is.na(Freq))
distTips
# it checks out, we have the correct number of unique pairs I think)
distTips %>% dim
((510*510-510)/2)
# Now make a graph with color according to type (within / between)
distTips %>% glimpse
######### Dist_tips according to host
distTipsHost = distTips %>% left_join(host,by=c("Var1"="sample")) %>%
mutate(host1 = Host) %>% select(-Host) %>%
left_join(host,by=c("Var2"="sample")) %>%
mutate(host2 = Host) %>% select(-Host) %>%
mutate(type=ifelse(host1==host2,host1,"Between Host")) %>%
select(-host1,-host2) %>% mutate(InterIntra = ifelse(type=="Between Host","Between Host","Within Host"))
distancePlot = distTipsHost %>% mutate(type = fct_infreq(type)) %>%
ggplot() +
geom_violin(aes(x=type,y=Freq,fill=type,),
color="black",show.legend= F) +
geom_boxplot(aes(x=type,y=Freq),width=.15,outlier.shape=NA) +
geom_point(data = distTipsHost %>% filter(type == "Horse"),
aes(x=type,y=Freq,fill=type),pch=21,color="black",show.legend=F,size=3) +
scale_fill_manual(values = colorHost_inter) +
geom_text(data = distTipsHost %>% group_by(type) %>% summarize(n=n()),aes(x=type,y=-0.02,label=n)) +
theme_pubr() + xlab("Host pair") + ylab("Genetic distances") + theme(axis.text.x = element_text(angle = 45,hjust = 1))
distancePlot
globalDistancePlotHost = distTipsHost %>% ggplot() + geom_density(aes(x=Freq,color=InterIntra)) +
theme_pubr() + xlab("Genetic distances") + scale_color_discrete(name="")
globalDistancePlotHost
# ggdraw() + draw_plot(plot_phylogeny,x = 0,y = 0,width = 0.7,height = 1) +
# draw_plot(distancePlot,x = 0.7,y = 0,width = 0.3,height = 0.5) +
# draw_plot(globalDistancePlotHost ,x = 0.7,y = 0.5,width = 0.3,height = 0.5)
#
plot_phylogeny
ggsave("Phylogenie_host.svg")
distancePlot
ggsave("DistanceHost.svg",width = 7)
globalDistancePlotHost
ggsave("DistanceHostglob.svg")
###### Figure 3 (Phylogenie + host distance) is then assembled based on this three files
###### Now for the supplementary figures and analysis related to household:
### First get the distances
distTips_foyer = distTips %>% left_join(geo,by=c("Var1"="sample")) %>%
mutate(Foyer1 = Numero_foyer) %>% select(-Numero_foyer) %>%
left_join(geo,by=c("Var2"="sample")) %>%
mutate(Foyer2 = Numero_foyer) %>% select(-Numero_foyer) %>%
mutate(type=ifelse(Foyer1==Foyer2,Foyer1,"Between Foyer")) %>%
mutate(InterIntra = ifelse(type=="Between Foyer","Between Foyer","Within Foyer")) %>%
mutate(IntraFoyer = ifelse(type=="Between Foyer","Between Household",Foyer1))
p = ggtree(core,layout="circular",size = 0.2)
meta = read.csv("dataset/Metadata.csv",header =T)
meta %>% glimpse
metad = meta %>% select(sample= True_indiv_name,Numero_foyer)
rowsNames = metad$sample
metad = metad %>% select(-sample)
rownames(metad)= rowsNames
metad = metad %>%
mutate(Household = as.factor(Numero_foyer))
metad %>% head
metad = metad %>% select(-Numero_foyer)
metad %>% glimpse
metad$Household %>% unique
pal = randomcoloR::distinctColorPalette(k=70)
pal
metad %>% row.names
p1 = gheatmap(p,metad, width=0.2, font.size=8, colnames = F,
hjust = 0) + scale_fill_manual(name="Household",values = pal) +
guides(fill="none")
theme(plot.margin=unit(c(3,1,1.5,1.2),"cm")) +
theme_tree() + theme(legend.position = NA)
p1
ggsave("Phylogenie_household.svg")
p2 = distTips_foyer %>%
ggplot() +
geom_boxplot(aes(x=IntraFoyer,y=Freq,fill=IntraFoyer),show.legend =F,outlier.shape=NA) +
scale_fill_manual(values = pal) +
theme_pubr() + theme(axis.text.x = element_text(angle = 90)) + xlab("") + ylab("Paiwise genetic distances")
p2
ggsave("Distance_genetique_foyers.svg",width = 12)
#### Will have to be edited in inkscape to keep only "Between Household"
p3 = distTips_foyer %>%
mutate(InterIntra=fct_recode(InterIntra,"Between households"="Between Foyer",
"Within households" = "Within Foyer")) %>%
ggplot() +
geom_density(aes(x=Freq,color=InterIntra),alpha = 0.8,size = 0.8) +
theme_pubr() + xlab("Pairwise genetic distances") +
scale_color_brewer(name="",type="qual", palette=1) + theme_pubr()
p3
ggsave("GloblaDistance_household.svg")
p4 = distTips_foyer %>%
mutate(InterIntra=fct_recode(InterIntra,"Between households"="Between Foyer",
"Within households" = "Within Foyer")) %>%
ggplot() +
geom_violin(aes(y=Freq,x=InterIntra,fill=InterIntra)) +
geom_boxplot(aes(y=Freq,x=InterIntra),outlier.shape = NA,width =0.1) +
theme_pubr() + ylab("Pairwise genetic distances") +
scale_fill_brewer(name="",type="qual", palette=1) + xlab("") + theme(axis.text.x=element_blank())
p4
ggsave("ViolinPlot_househould.svg")
# to assemble in order to produce figure
############################################################################################
###### Now for the supplementary figures and analysis related to fokontany:
distTipsGeoFokontany = distTips %>% left_join(geo,by=c("Var1"="sample")) %>%
mutate(Fokontany1 = Fokontany) %>% select(-Fokontany) %>%
left_join(geo,by=c("Var2"="sample")) %>%
mutate(Fokontany2 = Fokontany) %>% select(-Fokontany) %>%
mutate(type=ifelse(Fokontany1==Fokontany2,Fokontany1,"Between Fokontany")) %>%
select(-Fokontany1,-Fokontany2) %>% mutate(InterIntra = ifelse(type=="Between Fokontany","Between Fokontany","Within Fokontany"))
pal=randomcoloR::distinctColorPalette(k=2)
distTipsGeoFokontany %>%
ggplot() +
geom_violin(aes(x=InterIntra,y=Freq,fill=InterIntra)) +
geom_boxplot(aes(x=InterIntra,y=Freq),show.legend =F,outlier.shape=NA,width=0.05) +
scale_fill_manual(name=NULL,values = pal) +
theme_pubr() + theme(axis.text.x = element_text(angle = 45,hjust=1)) + xlab("") + ylab("Paiwise genetic distances")
ggsave("figures/Violin_DistanceFokontanyGlobal.svg")
# distTipsGeoFokontany %>% glimpse
# distTipsGeoFokontany %>%
# mutate(InterIntra=fct_recode(InterIntra,"Between households"="Between Foyer",
# "Within households" = "Within Foyer")) %>%
# ggplot() +
# geom_violin(aes(y=Freq,x=InterIntra,fill=InterIntra)) +
# geom_boxplot(aes(y=Freq,x=InterIntra),outlier.shape = NA,width =0.1) +
# theme_pubr() + ylab("Pairwise genetic distances") +
# scale_fill_brewer(name="",type="qual", palette=1) + xlab("") + theme(axis.text.x=element_blank())
globalDistancePlotGeoFokontany = distTipsGeoFokontany %>% ggplot() + geom_density(aes(x=Freq,color=InterIntra),alpha = 0.8,size = 0.8) +
theme_pubr() + xlab("Genetic distances") + scale_color_brewer(name="",type="qual",
palette=1)
globalDistancePlotGeoFokontany
ggsave("GlobaldistanceFokontany.svg")
p = ggtree(core,layout="circular",size = 0.2)
meta = read.csv("dataset/Metadata.csv",header =T)
meta %>% glimpse
metad = meta %>% select(sample= True_indiv_name,Fokontany)
rowsNames = metad$sample
metad = metad %>% select(-sample)
rownames(metad)= rowsNames
metad = metad %>%
mutate(Fokontany = as.factor(Fokontany))
metad %>% head
metad$Fokontany %>% unique
pal = randomcoloR::distinctColorPalette(k=10)
gheatmap(p,metad, width=0.2, font.size=8, colnames = F,
hjust = 0) + scale_fill_manual(name="Fokontany",values = pal) +
theme(legend.direction = "vertical",legend.position ="left") +
theme_tree()
ggsave("Phylogeny_fokontany.svg",width=6)
distTipsGeoFokontany %>%
ggplot() +
geom_violin(aes(x=type,y=Freq,fill=type),show.legend =F) +
geom_boxplot(aes(x=type,y=Freq),show.legend =F,outlier.shape=NA,width = 0.1) +
scale_fill_manual(values = pal) +
theme_pubr() + theme(axis.text.x = element_text(angle = 45,hjust = 1)) + xlab("") + ylab("Paiwise genetic distances")
ggsave("Violin_foko_distance.svg")
##### to assemble in order to produce figure sup
#################################################################################
################ Now let's do the heatmaps (genes and ST by Host)
#################################################################################
###### Fist resistance genes
data = readxl::read_xlsx("dataset/ResistanceGenesWithMetadataWideFormat.xlsx")
data = data %>% mutate(Sample = True_indiv_name)
data %>% select(Sample) %>% unique %>% count
######### get list of ecoli samples #######
GoodSamples = read.table("dataset/Sample_510.txt")
samples = GoodSamples %>% select(V2)
colnames(samples)="id"
######## save the good ones
data = data %>% filter(True_indiv_name %in% samples$id)
df = data %>% select(Sample,Host,Numero_foyer,Fokontany,mdf.A.:blaTEM.32) %>%
gather("Resistance.gene","presence",mdf.A.:blaTEM.32)
df$Abtype = ifelse(substring(df$Resistance.gene,1,6)=="blaCTX" , "CTX" ,
ifelse(substring(df$Resistance.gene,1,6)=="blaSHV","SHV",
ifelse(substring(df$Resistance.gene,1,6)=="blaTEM","TEM","Others")))
# Prepare dataset for matrix, change sample and gene size here
gene_counts = df %>% filter(presence ==1 ) %>%
group_by(Host,Resistance.gene) %>% count
gene_counts = gene_counts %>% pivot_wider(names_from=Host,values_from=n)
gene_counts = gene_counts %>% pivot_longer(cols=2:12,names_to="Host",values_to="n")
hg = df %>% filter(presence ==1 ) %>% group_by(Resistance.gene) %>% count %>% select(Resistance.gene)
counts = df[row.names(df %>% select(Sample) %>% unique),"Host"] %>% table %>% as.data.frame
counts %>% glimpse
counts = rbind(counts,data.frame("Host"="All",Freq = sum(counts$Freq)))
colnames(counts) = c("Host","counts")
hs = counts
formatgene = function(genename){
tmp = genename %>% gsub("\\.","-", . ) %>% gsub("bla","",.)
res = paste("<i>bla</i><sub>",tmp,"</sub>",sep="")
return(res)
}
######## gyrb & parC
readxl::read_xlsx("dataset/parC_QRDR_AA.xlsx") -> parC
splitsample = function(test){
return(str_split(test,".scfd")[[1]][1])
}
parC %>% rowwise %>% mutate(sample = splitsample(isolate)) %>% select(sample,phenotype) -> parC
readxl::read_xlsx("dataset/Metadata.xlsx") -> meta
meta %>%
left_join(parC,by = c("Reads_names"="sample")) %>%
select(sample = Reads_names,Host,phenotype) -> parC
parC = parC %>% mutate(mut = ifelse(phenotype=="WT" | is.na(phenotype),0,1)) %>%
group_by(Host) %>% summarize(Resistance.gene = "parC",n = sum(mut))
parC = parC[c(2,1,3)]
parC
parC = parC %>% mutate(Host = ifelse(Host=="Poultry","Chicken",Host)) %>% droplevels()
readxl::read_xlsx("dataset/gyrA_QRDR_AA.xlsx") -> gyrB
splitsample = function(test){
return(str_split(test,".scfd")[[1]][1])
}
gyrB %>% rowwise %>% mutate(sample = splitsample(isolate)) %>% select(sample,phenotype) -> gyrB
readxl::read_xlsx("dataset/Metadata.xlsx") -> meta
meta %>%
left_join(gyrB,by = c("Reads_names"="sample")) %>%
select(sample = Reads_names,Host,phenotype) -> gyrB
gyrB= gyrB %>% mutate(mut = ifelse(phenotype=="WT" | is.na(phenotype),0,1)) %>%
group_by(Host) %>% summarize(Resistance.gene = "gyrB",n = sum(mut))
gyrB = gyrB[c(2,1,3)]
gyrB = gyrB %>% mutate(Host = ifelse(Host=="Poultry","Chicken",Host)) %>% droplevels()
gene_counts = rbind(as.data.frame(gene_counts),as.data.frame(gyrB),as.data.frame(parC))
gene_counts = gene_counts %>% filter(Host!="Poultry")
gene_counts = gene_counts %>% mutate(n=ifelse(n==0,NA,n)) %>% droplevels()
all = gene_counts %>% group_by(Resistance.gene) %>% summarize(Host="All",n=sum(n,na.rm=T))
rbind(as.data.frame(all),as.data.frame(gene_counts)) %>%
mutate(type = ifelse(Host == "All","All","Host")) %>%
filter(grepl("bla",Resistance.gene) | Resistance.gene=="parC" | Resistance.gene=="gyrB") %>% left_join(counts,by="Host") %>%
mutate(mutype = ifelse(grepl("bla",Resistance.gene),"betalactamase","other")) %>%
mutate(freq = n/counts*100) %>% ungroup %>% filter(!is.na(n)) %>%
mutate(Host = paste(Host," <br><span style='font-size:9pt'>(N=",counts,")</span>",sep="") ) %>%
mutate(Host = fct_reorder(Host,counts,unique,.desc = T)) %>%
mutate(Resistance.gene = formatgene(Resistance.gene)) %>%
mutate(Resistance.gene = fct_reorder(Resistance.gene,n,sum,.desc=F)) %>%
ggplot() + geom_tile(aes(x=Host,y=Resistance.gene,fill=freq)) +
geom_text(aes(x=Host, y=Resistance.gene, label=round(freq,1))) +
theme_bw() + scale_fill_viridis_c(name="Percentage",direction=-1) +
ylab("Resistance genes") + xlab("Hosts") +
theme_minimal() + theme(panel.grid=element_blank()) +
theme(panel.grid = element_blank(),text= element_text(family = "Helvetica",size=14)) +
theme(axis.text.y = element_markdown(hjust = 0),axis.text.x = element_markdown()) +
facet_grid(rows = vars(mutype),cols = vars(type),scales = "free",space="free") + xlab(NULL)
ggsave("Figure_heatmap_prevalence_gene_hote.svg",height = 7,width=10)
ggsave("Figure_heatmap_prevalence_gene_hote.pdf",height = 7,width=10)
ggsave("Figure_heatmap_prevalence_gene_hote.png",height = 7,width=10)
######### No association genes / host
gene_counts %>%
filter(grepl("bla",Resistance.gene) |
Resistance.gene=="parC" |
Resistance.gene=="gyrB") %>%
pivot_wider(values_fill = 0,values_from = n,names_from=Resistance.gene) -> genedf
genedf %>% select(-Host) %>% as.matrix -> genematrix
genematrix[is.na(genematrix)] = 0
rownames(genematrix) = genedf$Host
chisq.test(genematrix,simulate.p.value = T)
################### Now for the ST Heatmap #######################
ST = read.csv("dataset/ST_data_final.txt",sep="\t",h=F,stringsAsFactors = F) %>% select(V1,V2)
colnames(ST) = c("sample","ST")
samples$id2 = gsub("_.+","",samples$id)
ST %>% filter(sample %in% samples$id2) -> ST
data %>% select(True_indiv_name,Host) %>% mutate(True_indiv_name =gsub("_.+","", True_indiv_name)) %>%
left_join(ST,by=c("True_indiv_name"="sample")) %>% group_by(ST,Host) %>% count -> ST_counts
ST_counts$ST %>% unique
ST_counts %>% group_by(ST) %>% summarize(Host = "All",n=sum(n)) -> All_count
All_count
ST_counts = rbind(as.data.frame(ST_counts),as.data.frame(All_count))
ST_counts %>% group_by(Host) %>% summarize(total = sum(n)) %>% ungroup -> ST_host_counts
dev.off()
ST_counts %>%
mutate(type = ifelse(Host =="All","All","Host")) %>%
#filter(Host!="Horse") %>%
left_join(ST_host_counts,by="Host") %>% ungroup %>%
mutate(Host = paste(Host," <br><span style='font-size:9pt'>(N=",total,")</span>",sep="") ) %>%
mutate(Host = fct_reorder(factor(Host),n,sum,.desc=T)) %>%
mutate(ST = fct_reorder(factor(ST),n,sum,.desc = F)) %>%
mutate(ST = fct_recode(ST,Unknown="0")) %>%
mutate(ST = fct_lump_n(ST,25)) %>%
mutate(ST = fct_relevel(ST, "Other", after = 0)) %>%
mutate(freq = n/total*100) %>%
ggplot() + geom_tile(aes(x=Host,y=ST,fill=freq)) +
geom_text(aes(x=Host, y=ST, label=round(freq,1)),check_overlap = TRUE) +
theme_bw() + scale_fill_viridis_c(option = "plasma",name="Percentage",direction=-1) +
ylab("Sequence Type") + xlab("Hosts") +
theme_minimal() + theme(panel.grid=element_blank()) +
theme(panel.grid = element_blank(),text= element_text(family = "Helvetica",size=14)) +
theme(axis.text.y = element_markdown(hjust = 0),axis.text.x = element_markdown()) +
facet_grid(cols = vars(type),scales = "free",space="free") + xlab(NULL)
ggsave("Figure_ST_frequency_host.pdf",height = 7,width=10)
ggsave("Figure_ST_frequency_host.svg",height = 7,width=10)
ggsave("Figure_ST_frequency_host.png",height = 7,width=10)
#################################################################################
# NO association between ST and Host
ST_counts %>% filter(Host!="All") %>%
pivot_wider(values_fill=0 ,values_from = n,names_from = Host) %>%
as.data.frame -> ST_dataframe
ST_matrix = as.matrix(ST_dataframe)
ST_matrix[,-1] -> ST_matrix
rownames(ST_matrix) = ST_dataframe$ST
chisq.test(ST_matrix,simulate.p.value = T)
##########################################################################
# Now the pieChart of clusters !
##########################################################################
clust = read.table("dataset/cluster_phydelity_k2_sol0_Fastree_sept_rooted.txt",header=T)
clust %>% glimpse
clust %>% select(CLUSTER) %>% unique %>% dim
# Add infos in SNP number
distanceSNP = read.csv2(file="dataset/Distances_in_cluster_phydelity_norecomb.txt",sep=" ",h=T)
colnames(distanceSNP) = c("Cluster","TAXA1","TAXA2","SNP")
distanceSNP %>% glimpse
distanceSNP %>%
group_by(Cluster) %>%
summarise(meanSNP = mean(SNP),maxSNP=max(SNP))
clust = clust %>% left_join(distanceSNP %>%
group_by(Cluster) %>%
summarise(meanSNP = mean(SNP),maxSNP=max(SNP)) ,by=c("CLUSTER"="Cluster"))
# filter cluster with a #SNP > 20 in any transmission
clust_toremove = distanceSNP %>% filter(SNP>20) %>% select(Cluster) %>% unique
clust = clust %>% filter(!CLUSTER %in% clust_toremove$Cluster )
meta = readxl::read_xlsx("dataset/Metadata.xlsx")
df = meta %>% select(True_indiv_name,Host,Numero_foyer) %>%
left_join(clust,by = c("True_indiv_name" = "TAXA"))
df %>% select(CLUSTER) %>% unique %>% dim
colorHost = c("Human"="#000000",
"Cattle"="#8DD3C7",
"Poultry"="#e8eb34",
"Pig" = "#BEBADA",
"Dog" = "#FB8072",
"Cat" ="#BC80BD",
"Duck" = "#FDB462",
"Goose" = "#B3DE69",
"Horse" = "#FCCDE5",
"Chicken" = "#D9D9D9",
"Water" = "#80B1D3")
######### get list of ecoli samples #######
GoodSamples = read.table("dataset/Sample_510.txt")
samples = GoodSamples %>% select(V2)
colnames(samples)="id"
samples$id2 = gsub("_.+","",samples$id)
######## get list of bad samples
df %>% filter(!True_indiv_name %in% samples$id2)
######## save the good ones
df = df %>% filter(True_indiv_name %in% samples$id)
df %>% glimpse
df %>% filter(CLUSTER!="NA") %>% select(Numero_foyer) %>% unique %>% dim
df %>% write.csv("dataset/CompositionClusterSup20.csv")
df[which(df$Host=="Poultry"),]$Host="Chicken"
left_join(df %>% group_by(Host) %>% count() %>% mutate(all = n) %>% select(-n),
df %>% filter(CLUSTER!="NA") %>% group_by(Host) %>% count() %>% mutate(cluster = n) %>% select(-n)) %>%
gather(key="type",value = "count",-"Host" ) %>%
ggplot() + geom_bar(aes(x=Host,y=count,fill = Host),stat ="identity") + facet_grid(type ~ .) +
scale_fill_manual(values = colorHost) + theme_pubr()
ggsave("GlobalCompositionCluster.svg",width =6)
###### shamelessly stolen from some internet page to make nice looking pie chart (yuck) in R
cp <- coord_polar("y")
cp$is_free <- function() TRUE
meanSNPs = df %>% filter(CLUSTER!="NA") %>% select(CLUSTER,meanSNP) %>% unique
labmeanSNPS = as.character(signif(meanSNPs$meanSNP,digits = 3))
names(labmeanSNPS) = meanSNPs$CLUSTER
library(extrafont)
font_import()
df %>% filter(CLUSTER !="NA") %>%
mutate(CLUSTER = as.factor(CLUSTER)) %>% select(CLUSTER,Numero_foyer) %>% unique %>%
group_by(CLUSTER) %>% count() %>% transmute(nfoyer = n) -> clusterfoyer
pie = function(nf)
{
df %>%
filter(CLUSTER !="NA") %>%
mutate(CLUSTER = as.factor(CLUSTER)) %>%
group_by(CLUSTER,Host,meanSNP) %>% count %>%
ungroup %>% group_by(CLUSTER) %>% mutate(size = sqrt(sum(n))) %>% ungroup %>%
mutate(CLUSTER = fct_reorder(CLUSTER,meanSNP,mean)) %>% left_join(clusterfoyer,by="CLUSTER") %>%
filter(nfoyer ==nf) %>%
ggplot(aes(group=CLUSTER,x=size/2, y=n,width=size, fill=Host))+
geom_bar(width = 1, stat = "identity")+
scale_fill_manual(values = colorHost) + guides(fill=F) +
facet_wrap(~ CLUSTER,ncol = 13,drop=T,scales="free_y",
labeller = labeller(CLUSTER = labmeanSNPS)) +
cp +
theme_void() +
theme(strip.background = element_blank(),
#strip.text.x = element_blank(),
strip.text.x = element_text(size = 8),
axis.ticks=element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
panel.border = element_blank(),
aspect.ratio = 1,legend.position = "top",
text = element_text(family = "Times"))
}
library(egg)
library(grid)
p1_fixed <- set_panel_size(pie(1),
width = unit(0.7, "cm"),
height = unit(0.7, "cm"))
p2_fixed <- set_panel_size(pie(2),
width = unit(0.7, "cm"),
height = unit(0.7, "cm"))
p3_fixed <- set_panel_size(pie(3),
width = unit(0.7, "cm"),
height = unit(0.7, "cm"))
p4_fixed <- set_panel_size(pie(4),
width = unit(0.7, "cm"),
height = unit(0.7, "cm"))
p5_fixed <- set_panel_size(pie(5),
width = unit(0.7, "cm"),
height = unit(0.7, "cm"))
grid.newpage()
g = grid.arrange(grobs = list(p1_fixed,p2_fixed,p3_fixed,p4_fixed,p5_fixed),ncol=1,nrow=5)
ggsave("PieCharts.svg",g)
#### OK this one is tricky, best to edit it in inkscape to produce the figures but this a good basis
require(EMT)
allpop = df %>% group_by(Host) %>%
count %>%
mutate(freq = n/sum(.$n)) %>% select(-n)
multitest = function(clustid){
testpop = allpop %>% left_join(df %>%
filter(CLUSTER==clustid) %>%
select(Host) %>%
group_by(Host) %>%
count,by="Host",) %>%
mutate(n=ifelse(is.na(n),0,n))
testpopstat = multinomial.test(testpop$n,testpop$freq)
foyerclust = df %>% filter(CLUSTER==clustid) %>% select(Numero_foyer)
foyerpop = df %>% filter(Numero_foyer %in% foyerclust$Numero_foyer) %>% group_by(Host) %>%
count %>%
mutate(freq = n/sum(.$n)) %>% select(-n)
testfoyer = foyerpop %>% left_join(df %>%
filter(CLUSTER==clustid) %>%
select(Host) %>%
group_by(Host) %>%
count,by="Host",) %>%
mutate(n=ifelse(is.na(n),0,n))
testfoyerstat = multinomial.test(testfoyer$n,testfoyer$freq)
return(c(pop = testpopstat$p.value,foyer = testfoyerstat$p.value))
}
pvalcluster = df %>% filter(CLUSTER!="NA") %>% group_by(CLUSTER) %>% summarize(pvalpop = multitest(CLUSTER)["pop"],
pvalfoy = multitest(CLUSTER)["foyer"])
pvalcluster %>% filter(pvalpop < 0.05)
pvalcluster %>% dim
annotation=df %>% filter(CLUSTER!="NA") %>%
group_by(CLUSTER,meanSNP,maxSNP) %>%
summarize(nHousehold = length(unique(Numero_foyer)),
nHost = length(Host),
Households = paste0(Numero_foyer,collapse="",sep=","),
Samples = paste0(True_indiv_name,collapse="",sep=","),
HostType = paste0(Host,collapse="",sep=",") ) %>%
left_join(pvalcluster)
write.csv2(annotation,"tables/table_sup_Clusters.csv")
##########################################################################
# Now just the finisher figure : The Sankey Diagram !
##########################################################################
####### Here we will combine plasmids data with Host data
ST_data = read.csv2("dataset/ST_data_final.txt",sep="\t",header=F)
colnames(ST_data) = c("Sample","ST")
ST_data
stpla = read.csv2("dataset/ST_plasm.csv")
stpla$Sample = gsub("_.+","",stpla$Sample)
stpla = stpla %>% select(Sample,ST2,Host,Resgenes,groupeinc) %>%
na.omit %>% mutate(ST2= as.factor(ST2)) %>% left_join(ST_data,by="Sample")
tt = stpla %>% select(Sample,ST,Host,Resgenes,groupeinc) %>%
na.omit %>% mutate(ST= as.factor(ST)) %>% filter(ST!= "Unknown")
df = stpla %>% select(ST,Host,Resgenes,groupeinc) %>%
na.omit %>% mutate(ST= as.factor(ST)) %>%
mutate(ST = fct_lump(ST,n=30)) %>%
group_by_all() %>% count
dim(df)
stpla %>% select(ST,Host,Resgenes,groupeinc) %>%
na.omit %>% mutate(ST= as.factor(ST)) %>%
mutate(ST = fct_lump(ST,n=30)) %>% group_by(Resgenes,groupeinc) %>% count(sort=T) %>%
write.table("Association_Resgenes_incgroup.csv",sep=",",row.names = F)
df[,c(2,1,4,3)]
df$Host = ifelse(df$Host=="Poultry","Chicken",df$Host)
df$ST = as.factor(ifelse(df$ST==0,"Unknown",df$ST))
sankey = SankeyDiagram(max.categories = 31,data = df[,c(2,1,4,3)],weights = df$n,link.color = "Source",label.show.percentages = T,label.show.varname=F)
sankey
# install.packages("webshot")
# webshot::install_phantomjs()
htmlwidgets::saveWidget(sankey, file ="sankey1.html") # save html widget
webshot::webshot("sankey1.html",file="sankey1.pdf")
############# Chi.square test for composition ###########
TODO
dataset = read_csv2("dataset/Table_info_samples.csv")
dataset %>% select(Host,ST) %>% table %>% chisq.test(simulate.p.value = TRUE)
dataset %>% select(Host,phylogroup) %>% table %>% chisq.test(simulate.p.value = TRUE)
dataset %>% select(Fokontany,phylogroup) %>% table %>% chisq.test(simulate.p.value = TRUE)
dataset %>% select(Fokontany,ST) %>% table %>% chisq.test(simulate.p.value = TRUE)
dataset %>% select(Household,phylogroup) %>% table %>% chisq.test(simulate.p.value = TRUE)
dataset %>% select(Household,ST) %>% table %>% chisq.test(simulate.p.value = TRUE)
|
c4983a36ea1b027f16377e0cb46c70fb41955c64
|
465b6d3d6738a8af925c7cb5de1ca5bfebcb8f1e
|
/analysis.R
|
4f539c388cd70ecdc28422c723969ed2b555fd4c
|
[] |
no_license
|
lukaszgolder/pump-it-up
|
93dfebe0f962a2c0ff49d37b4c8ce2f519d3fdfa
|
8710d80f9705001d22b955477c73f665bfe022fd
|
refs/heads/master
| 2021-09-02T05:18:27.110770
| 2017-12-30T18:25:35
| 2017-12-30T18:25:35
| 115,814,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,109
|
r
|
analysis.R
|
library(randomForest)
library(caret)
source("R/functions.R")
source("R/utilities.R")
train_values <- read.csv(train_values_url)
train_labels <- read.csv(train_labels_url)
test_values <- read.csv(test_values_url)
train <- merge(train_labels, train_values)
train <- featureEngineering(train)
set.seed(42)
model <- randomForest(
as.factor(status_group) ~ longitude + latitude + extraction_type_group + quality_group + quantity + waterpoint_type + construction_year + install_3 + population,
data = train,
importance = TRUE,
ntree = 50,
nodesize = 1
)
prediction_train <- predict(model, train)
importance(model)
confusionMatrix(prediction_train, train$status_group)
test <- featureEngineering(test_values)
prediction_test <- predict(model, test)
submission <- data.frame(test$id)
submission$status_group <- prediction_test
names(submission)[1] <- "id"
write.csv(submission, output_url, row.names=FALSE)
# Accuracy : 0.8354
# Accuracy : 0.8411 - Added install_3
# Accuracy : 0.8557 - Increased ntree to 50
# Accuracy : 0.8612 - Decreased nodesize to 1
# Accuracy : 0.9351 - Added population
|
62721e86497e0f1fcb6491d87a1538edb80c037a
|
fc94f96b2c059309718802204e6bf5a89bf8393e
|
/Data Visualization in R/Electronic Materials (Flash Drives)/ConsBio_Day1.R
|
adb1edbde6b4f0eb7a1649f726e7c93288e7a359
|
[] |
no_license
|
Sharanya-ms/CompSciBio_Workshop_2020
|
fd3d9d839269349d6e9cce4cdc35dff6a783e6d5
|
0bc6b25602b4af119808aa931a6d37331109b618
|
refs/heads/master
| 2022-11-26T15:32:55.357907
| 2020-08-12T18:48:02
| 2020-08-12T18:48:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,241
|
r
|
ConsBio_Day1.R
|
# R code and comments for Conservation Biology as part of the Computing in Modern Biology Workshop 2020
# Code assembled by Hayley C. Lanier
# Day 1: Plotting species threat data and population size
# Command to set working directory to dataset location
#setwd("~/Desktop/Computing/ConsBio")
#Read in dataset. Original data located at https://www.iucnredlist.org/resources/summary-statistics
# IUCN Red List Categories: EX - Extinct, EW - Extinct in the Wild, CR - Critically Endangered (includes CR(PE) and CR(PEW)), EN - Endangered, VU - Vulnerable, NT - Near Threatened, DD - Data Deficient, LC - Least Concern. CR(PE) & CR(PEW): The tags 'Possibly Extinct' and 'Possibly Extinct in the Wild' have been developed to identify CR species that are likely already extinct (or extinct in the wild), but require more investigation to confirm this. NOTE that these are not IUCN Red List Categories; they are tags that can be attached to the CR category to highlight those taxa that are possibly extinct. They are included in the above table to indicate a plausible upper estimate for number of recently extinct species on The IUCN Red List.
mammals<-read.table("mammals_IUCN.txt",row.names=1,header=T,sep="\t")
View(mammals) # look at the dataset
names(mammals) # list the column names for the mammals dataset
row.names(mammals) # list the names for each row of data in the mammals dataset
iucn.status<-row.names(mammals) # creates a vector of names for categories of conservation status from the row names in the dataset
iucn.colors<-c("grey5","red","orange","yellow","grey","green","darkgreen") # creates a vector of colors associated with each conservation status rank
#################################################
# How much conservation risk are mammals under? #
#################################################
# Plot the data for all mammals by conservation status
pie(mammals$Total,labels=iucn.status,col=iucn.colors,main="All mammals")
# Count the number of mammal species in each category
rowSums(mammals[,1:27]) # sums across all rows, for all columns all orders (first 27 columns of the dataset)); Should be the same results as using: mammals$Total to read out the total number column (which is column 28)
# Calculate that number as a % of total mammals
# This calculates the sum within each row by conservation status, divides it by the total number of mammals (sum(mammals) and then rounds the result to just two significant digits)
round(rowSums(mammals)/sum(mammals),2)
###########################################
# Which group of mammals is at more risk? #
###########################################
# Barplot all of the data together
barplot(as.matrix(mammals[1:27]),col=iucn.colors,xlab="number of species",horiz=T,las=2,cex.names=0.6)
### Barplot a subset of the IUCN data
# Pull out just four Orders of mammals to compare
comp.table<-cbind(mammals$Carnivora,mammals$Lagomorpha,mammals$Primates,mammals$Pholidota)
# Plot the result together and add a legend (really two commands, separated with a semi-colon)
barplot(comp.table,col=iucn.colors,names=c("Carnivora","Lagomorphs","Primates","Pangolins"),ylab="number of species",main="Which group is at higher risk?"); legend(4,470,iucn.status,pch=22,pt.bg=iucn.colors)
# set to plot just one plot in window (1 row, 1 column)
par(mfrow=c(1,2)) # Set up the plot background to show 2 plots side-by-side (1 row, 2 columns)
# par(mfrow=c(1,1)) # Undoes the previous command - just get rid of the comment
# Plot the conservation status for each gropu as it's own pie chart
pie(mammals$Carnivora,labels=iucn.status,col=iucn.colors,main="Carnivores")
pie(mammals$Lagomorpha,labels=iucn.status,col=iucn.colors,main="Lagomorphs")
pie(mammals$Primates,labels=iucn.status,col=iucn.colors,main="Primates")
pie(mammals$Pholidota,labels=iucn.status,col=iucn.colors,main="Pangolins")
#######################################################
# Digging into Laogmorpha data - who is at more risk? #
#######################################################
# read in dataset
lago<-read.csv("lagomorph-status.csv")
# organize dataset and colors for plotting
lago$status<-factor(lago$status,levels=c("CR","EN","VU","DD","NT","LC")) # set the order for the level of conservation threat (high to low; otherwise it will just be alphabetized)
lago$Trend<-factor(lago$Trend,levels=c("Unknown","Decreasing","Stable","Increasing")) # set the order for the level of conservation threat (high to low; otherwise it will just be alphabetized)
lago.colors<-c("red","orange","yellow","grey","green","darkgreen") # same colors as earlier, but without including a color for extinct species
lago.col.trend<-c("grey","darkorange","lightblue","darkgreen")
# summarize data for plotting
ls<-table(lago$Family,lago$status) # summarizes data by family and conservation threat status
# plot results as pie charts
pie(ls[1,],col=lago.colors,main="Rabbits & hares")
pie(ls[2,],col=lago.colors,main="Pikas")
# plot results as a table
barplot(t(ls),col=lago.colors,ylab="number of species",names=c("rabbits & hares","pikas"),main="Conservation Status")
######################################################
# How do population trends inform our understanding? #
######################################################
trends<-table(lago$Family,lago$Trend)
barplot(t(trends),col=lago.col.trend,ylab="number of species",names=c("rabbits & hares","pikas"),main="Population Trend")
# plot trends as pie charts
pie(trends[1,],col=lago.col.trend,main="Rabbits & hares")
pie(trends[2,],col=lago.col.trend,main="Pikas")
############################
# Working with census data #
############################
install.packages("popbio") #install the popbio package (a developed set of data and tools for working in conservation/population biology
library(popbio) # turns on the popbio [ackage]
data(grizzly) # loads the Yellowstone grizzly dataset that comes with the popbio package
# Let's learn more about the grizzly dataset
View(grizzly) # look at the dataset
?grizzly # learn more about the dataset
names(grizzly) # learn what the column names are for this dataset
# some housekeeping commands
attach(grizzly) # makes dataset run without using the grizzly$
par(mfrow=c(1,1)) # plot one plot per window
par(bty="n")
###########################################################
# How do we know if a population is growing or shrinking? #
###########################################################
# Plot 25 years of grizzly population changes in Yellowstone
plot(year[1:25], N[1:25]) # a basic, no-frills plot (not quite as nice to eary to interpret
# A better looking plot of the same data
plot(year[1:25], N[1:25], type='o', pch=16, las=1, xlab="Year", ylab="Adult females", main="Yellowstone grizzly bears")
# export your plot as a pdf
dev.print(pdf,file="grizzlyBears_1960-1983.pdf") # Handy command to print out a plot as a pdf as shown on the screen
# Plot the full range of the Yellowstone data
plot(year, N, type='o', pch=16, las=1, xlab="Year", ylab="Adult females", main="Yellowstone grizzly bears")
############################################
# Case studies: looking at population data #
############################################
detach(grizzly) # some housekeeping to detach the grizzly data
# Read in your dataset by either modifying the command below or changing which line is commented out
popDat<-read.table("yourFileNameHere",header=T,sep="\t")
# Desert Yellowhead (a flowering plant from Wyoming)
#popDat<-read.table("DesertYellowhead.txt",header=T,sep="\t")
# Collared pikas (from the Yukon Territory, Canada)
#popDat<-read.table("collared_pikas.txt",header=T,sep="\t")
# Red-cockaded woodpeckers (North Carolina or Central Florida population)
#popDat<-read.table("woodpecker_NC.txt",header=T,sep="\t")
#popDat<-read.table("woodpecker_CF.txt",header=T,sep="\t")
# Vancouver Island marmot
#popDat<-read.table("Vl_marmot.txt",header=T,sep="\t")
# Olympic marmot
popDat<-read.table("Oly-Marmots.txt",header=T,sep="\t")
attach(popDat)
# plot your data
# Change 'Your Dataset' to the name of the species/population you are plotting
plot(year, N, type='o', pch=16, col="darkblue", las=1, xlab="Year", ylab="population size", main="Your Dataset")
detach(popDat)
|
cc500c491d4f3d870102d4974ecb4a0f77f03bcd
|
391b74b9d9bac0df1f63bc737d639b90def97cf6
|
/plot1.R
|
af373922655ddc0e788c196a2f69d5723fa47cce
|
[] |
no_license
|
jsink13/ExData_Plotting1
|
48be902d6c6275321af59e61fdf070aeba55c46a
|
db84d9a0fef282c998c739f1fd5a62a6e7694161
|
refs/heads/master
| 2020-12-26T03:55:36.502310
| 2015-01-11T22:11:04
| 2015-01-11T22:11:04
| 29,107,008
| 1
| 0
| null | 2015-01-11T21:36:11
| 2015-01-11T21:36:10
| null |
UTF-8
|
R
| false
| false
| 477
|
r
|
plot1.R
|
raw_data<-read.table("C:\\Rfiles\\household_power_consumption.txt", sep=";", header=TRUE, na.strings="?")
raw_data$Date = as.Date(raw_data$Date, '%d/%m/%Y')
data = subset(raw_data, raw_data$Date == '2007-02-01' | raw_data$Date == '2007-02-02')
rm(raw_data) # free the unused memory
windows()
with(data, hist(Global_active_power, col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power"))
dev.copy(png,file = "plot1.png", width=480, height = 480)
dev.off()
|
6e994d6c430e013ab7cbe050dbcce14aaf7fcd7f
|
7d5daf759e6da40fdc3cd44068d1847539104690
|
/www/Modules/CRUD_db.R
|
de740923ef86ffe7a783ede741dfb9c3dd56b63e
|
[] |
no_license
|
vikram-rawat/honda_dashboard
|
8d73f3fce10866985cf6378b1d137582f5168977
|
d4a9b5802d235c1150e89e5396349d890ea0d769
|
refs/heads/master
| 2020-04-18T08:37:21.707566
| 2019-06-08T05:11:42
| 2019-06-08T05:11:42
| 167,402,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,027
|
r
|
CRUD_db.R
|
# library -----------------------------------------------------------------
# library(shiny)
# library(shinyBS)
# library(shinydashboard)
# library(shinyjs)
# library(flexdashboard)
# library(DBI)
# library(pool)
# library(DT)
# ModuleUi ----------------------------------------------------------------
mod_crud_ui <- function(id) {
ns <- NS(id)
fluidPage(shinyjs::useShinyjs(),
sidebarLayout(
sidebarPanel(
radioButtons(ns('method'),'Choose an Action',choices =
c('Insert','Update','Delete')),
uiOutput(ns('well_panel'))
),
mainPanel(useShinyjs(),
dataTableOutput(ns('State'))
# ,tableOutput(ns('test'))
)
))
}
# ModuleServer ------------------------------------------------------------
mod_crud_serve <- function(input, output, session, connstring) {
# namespace ---------------------------------------------------------------
ns <- session$ns
# Time --------------------------------------------------------------------
time_value<-reactive({
invalidateLater(5000, session)
Sys.time()
})
output$Time <- renderUI({
disabled(textInput(ns('timeid'), 'Current Time', value = time_value()))
})
# all_usernames -----------------------------------------------------------
all_usernames<-reactive({
getdata()[,userid]
})
# selectusername ----------------------------------------------------------
output$user_list<-renderUI({
selectInput(ns('select_user'),'Select UserName',
choices = all_usernames())
})
# select roles ----------------------------------------------------------
output$role_select<-renderUI({
selectInput(ns('role_list'),'Select Rights',
choices = c('Admin','Visitor'))
})
# Create Encrypted Data Frame ---------------------------------------------
encrypt_df<-reactive({
req(input$password)
somevalue<-data.frame(userid=input$username,
password=input$password,
date_inserted=input$timeid,
role=input$role_list)
encrypt_sql(table = somevalue,decode_file = 'www/login/user.bin')
})
encrypt_df_up<-reactive({
req(input$password_up)
somevalue<-data.frame(userid=input$select_user,
password=input$password_up,
date_inserted=input$timeid,
role=input$role_list)
encrypt_sql(table = somevalue,decode_file = 'www/login/user.bin')
})
# output$test<-renderTable(encrypt_df())
# Insert Data -------------------------------------------------------------------
observeEvent(input$InsertData,{
if(input$username %in% all_usernames()){
showModal(
modalDialog(
title = "Alert",
"Someone has already chosen this UserName",
easyClose = FALSE
))
}else{
dbWriteTable(connstring,
'emp_user',
encrypt_df(),
append=TRUE)
showModal(
modalDialog(
title = "Data Inserted",
"Please Don't click Insert again untill you want to change it!",
easyClose = FALSE
)
)
}
})
# delete Data -------------------------------------------------------------
observeEvent(input$delete_data,{
dbSendStatement(connstring,
paste("delete from emp_user where userid ='"
,encrypt_sql(data.frame(userid=input$select_user),
'www/login/user.bin')[1]
,"'",sep = '')
)
showModal(
modalDialog(
title = "Data Deleted",
"Please Don't click Delete again.",
easyClose = FALSE
)
)
})
# Update Data -------------------------------------------------------------
observeEvent(input$Update_data,{
dbSendStatement(connstring,
paste("delete from emp_user where userid ='"
,encrypt_sql(data.frame(userid=input$select_user),
'www/login/user.bin')[1]
,"'",sep = '')
)
dbWriteTable(connstring,
'emp_user',
encrypt_df_up(),
append=TRUE)
showModal(
modalDialog(
title = "Data Corrected",
"Please Don't click Update again untill you Want to change it!",
easyClose = FALSE
)
)
})
# DataTable ---------------------------------------------------------------
getdata <- reactivePoll(500,session,
checkFunc = function(){
checkDF<-decrypt_sql(sql_con = 'www/main_data.sqlite'
,sql_table = 'emp_user'
,decode_file = 'www/login/user.bin')
checkDF[,':='(date_inserted=ymd_hms(date_inserted))]
return(checkDF[,mean(date_inserted)])
},
valueFunc = function(){
someDF<-decrypt_sql(sql_con = 'www/main_data.sqlite'
,sql_table = 'emp_user'
,decode_file = 'www/login/user.bin')
someDF[,':='(date_inserted=ymd_hms(date_inserted))]
return(someDF)
})
output$State <- renderDataTable(getdata())
# Render Well Panel -------------------------------------------------------
observeEvent(input$method,
{
if(input$method == 'Insert'){
output$well_panel<-renderUI(
wellPanel(
uiOutput(ns('Time')),
bsTooltip(ns('Time'), 'Time of Entry into the System'),
bsTooltip(ns('username'), 'Type a Unique UserName'),
textInput(ns('username'), 'UserName', value =
''),
passwordInput(ns('password'),'Enter Password'),
bsTooltip(ns('password'), 'Type a Password'),
uiOutput(ns('role_select')),
bsTooltip(ns('role_select'), 'Assign Roles'),
actionButton(ns("InsertData"),"Insert Data")
)
)
}else if(input$method == 'Delete'){
output$well_panel<-renderUI(
wellPanel(
uiOutput(ns('user_list')),
actionButton(ns("delete_data"),"Delete Data")
))
}else if(input$method == 'Update'){
output$well_panel<-renderUI(
wellPanel(
uiOutput(ns('Time')),
bsTooltip(ns('Time'), 'Time of Entry into the System'),
uiOutput(ns('user_list')),
passwordInput(ns('password_up'),'Enter Password'),
bsTooltip(ns('password_up'), 'Type a Password'),
uiOutput(ns('role_select')),
bsTooltip(ns('role_select'), 'Assign Roles'),
actionButton(ns("Update_data"),"Update Data")
))
}
})
# End ---------------------------------------------------------------------
}
|
0f39d07aa7864d1a3d848ddccd436adc57f63f8f
|
10ddc648602995325c6a467d0b17595cb8766fdb
|
/R/add_meta_element.R
|
1f86469dc76c954ad450a9548512cc27a39b1314
|
[
"MIT"
] |
permissive
|
rich-iannone/hyper
|
cfd4479ee232aadee89477872e3e814061ce3b34
|
ef5f6e4672a4fe6b8d5d9d4875b04d299159c97a
|
refs/heads/master
| 2021-04-29T17:20:31.200263
| 2018-05-14T04:14:13
| 2018-05-14T04:14:13
| 121,667,337
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,158
|
r
|
add_meta_element.R
|
#' Add a meta element to the HTML head
#'
#' Add metadata to the HTML document with a
#' \code{<meta>} element.
#' @importFrom dplyr select distinct pull filter
#' @importFrom tibble rownames_to_column add_row
#' @export
add_meta_element <- function(x,
name = NULL,
content = NULL,
charset = NULL,
http_equiv = NULL) {
x_in <- list(x)
# Get the input components to the function
input_component_list <-
get_input_component_list(input_list = x_in)
# If there is no input object, stop function
if (input_component_list$input_contains_obj_x == FALSE) {
stop("There is no input element",
call. = FALSE)
}
# Determine whether there is an input component
# that contains the `_dtd`, `html`, and `body` types
input_component_lineage <-
x_in[[1]]$stmts %>%
dplyr::select(type) %>%
dplyr::distinct() %>%
dplyr::pull(type)
if (!(all(c("_dtd", "html", "body") %in% input_component_lineage))) {
stop("A meta element can only be added once the main elements of the page are available",
call. = FALSE)
}
if (!is.null(name) & is.null(content)) {
stop("The `content` is required if `name` is supplied",
call. = FALSE)
}
if (!is.null(http_equiv) & is.null(content)) {
stop("The `content` is required if `http_equiv` is supplied",
call. = FALSE)
}
if (!is.null(content) & (is.null(name) & is.null(http_equiv))) {
stop("If `content` is supplied then either `name` or `http_equiv` are required",
call. = FALSE)
}
if (!is.null(name) & !is.null(http_equiv)) {
stop("Values cannot be supplied for both `name` and for `http_equiv`",
call. = FALSE)
}
# Define attributes for `<meta charset=...>`-type element
if (!is.null(charset)) {
tag_attrs <-
generate_stmt(
attr_name = "charset",
attr_value = charset)
}
# Define attributes for `<meta name=... content=...>`-type element
if (!is.null(name) & !is.null(content)) {
tag_attrs <-
paste(
generate_stmt(
attr_name = "name",
attr_value = name),
generate_stmt(
attr_name = "content",
attr_value = content),
collapse = " ")
}
# Define attributes for `<meta http-equiv=... content=...>`-type element
if (!is.null(name) & !is.null(content)) {
tag_attrs <-
paste(
generate_stmt(
attr_name = "http-equiv",
attr_value = http_equiv),
generate_stmt(
attr_name = "content",
attr_value = content),
collapse = " ")
}
# Create the opening tag
opening_tag <-
create_opening_tag(
type = "meta",
attrs_str = tag_attrs)
head_end_row <-
x$stmts %>%
tibble::rownames_to_column() %>%
dplyr::filter(type == "head" & mode == "close") %>%
dplyr::pull(rowname) %>%
as.numeric()
x$stmts <-
x$stmts %>%
tibble::add_row(
type = "meta",
mode = "open_close",
level = 2L,
text = opening_tag,
.before = head_end_row)
x
}
|
8885a9e7980287aa2a8e82fdc39562910da33e4a
|
c091056e779a3ea6686fbf9200a9e1ba1edef1e9
|
/tests/testthat.R
|
d0d93b96c395cadac182d708b749db4e40c319a9
|
[] |
no_license
|
AnthonyTedde/vortex
|
7b2c75575b2fd1c9cbaad933897b75e6c4bf0a84
|
4c4e8e37bcf15aacba6638a2c9f7e420f65c1b8f
|
refs/heads/master
| 2020-07-15T13:57:19.914012
| 2019-09-05T13:16:12
| 2019-09-05T13:16:12
| 205,578,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(vortex)
test_check("vortex")
|
1a4572adc786e3eb0e06c155ff839d03ea699905
|
dd7b1e7337a4e344d4754f3f036e04c5975df256
|
/man/test_fort_pb.Rd
|
7552e7318bc6e158d1356a4fa1c0d2960806e459
|
[] |
no_license
|
bnaras/SUtools
|
2aa8e9de0c74c3bc122c720c14f6330b38388963
|
2eb1b3a6fe15181f18db7e5d1d0774a106f74c73
|
refs/heads/master
| 2022-11-25T06:06:41.985476
| 2022-11-18T01:17:57
| 2022-11-18T01:17:57
| 132,827,545
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 281
|
rd
|
test_fort_pb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testpb.R
\name{test_fort_pb}
\alias{test_fort_pb}
\title{Call the Fortran to exercise the saved progress bar}
\usage{
test_fort_pb()
}
\description{
Call the Fortran to exercise the saved progress bar
}
|
f4b7d6642b1cc9aa0e9b32f73bb440309600f3c2
|
3d6b591e2176740ba82b29fe56e77f5b48b6839b
|
/Plot3.R
|
63956c4867e53e4ed196f0657095528ad49dd16b
|
[] |
no_license
|
logarithm3/ExData_Plotting1
|
996d03b8d032bec7b7b5f440bc35e3e907fa1a8d
|
7a42571f30234a9f75aa3277ed2efe0cb3773d9d
|
refs/heads/master
| 2021-01-16T19:28:41.505572
| 2015-09-13T20:18:40
| 2015-09-13T20:18:40
| 31,861,685
| 0
| 0
| null | 2015-03-08T19:10:23
| 2015-03-08T19:10:23
| null |
UTF-8
|
R
| false
| false
| 1,499
|
r
|
Plot3.R
|
power<- read.csv("household_power_consumption.txt", sep = ";",dec = ".", numerals = c("allow.loss"))
# make Sub_metering a numeric correct date & time format
power[, "Sub_metering_1"] <- as.numeric(as.character( power[, "Sub_metering_1"] ))
power[, "Sub_metering_2"] <- as.numeric(as.character( power[, "Sub_metering_2"] ))
power[, "Sub_metering_3"] <- as.numeric(as.character( power[, "Sub_metering_3"] ))
power[, "NDateTime"] <- paste(power[, "Date"], power[, "Time"] )
power[, "finalDate"] <- as.POSIXct(power[, "NDateTime"], format = "%d/%m/%Y %H:%M:%S")
# filter dates
powerfiltered <- power[power$finalDate >= as.POSIXct("1/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S")
& power$finalDate < as.POSIXct("3/2/2007 00:00:00", format = "%d/%m/%Y %H:%M:%S"),]
# Save PNG file
png("plot3.png", width = 480, height = 480 )
# plot 1st line
plot(powerfiltered$finalDate,powerfiltered$Sub_metering_1, col = "white", xlab = "",
ylab = "Energy sub metering", ylim=c(0,40))
lines(powerfiltered$finalDate, powerfiltered$Sub_metering_1, col='black', type='l',lwd=1)
# plot 2nd line
lines(powerfiltered$finalDate, powerfiltered$Sub_metering_2, col='red', type='l',lwd=1)
# plot 3rd line
lines(powerfiltered$finalDate, powerfiltered$Sub_metering_3, col='blue', type='l',lwd=1)
legend("topright",pch=25, col= c("black", "red", "blue"),legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
7c11cb7a893124942978cea01c37a4e0d9e582e3
|
68e741c606ca4cae7d64fc8e18c8d18932342559
|
/man/recordTable.Rd
|
544a6ee69705f3152a6d6d5ac39985398ef80baf
|
[] |
no_license
|
cslg094822/camtrapR
|
97e8d6f84ff988635df6e5e8e642f4fd3f586b1f
|
1f971170d58f796d2dc59b1e5d6d1c2388f558cb
|
refs/heads/master
| 2020-03-31T23:55:25.067340
| 2017-10-25T12:04:26
| 2017-10-25T12:04:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,396
|
rd
|
recordTable.Rd
|
\name{recordTable}
\alias{recordTable}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Generate a species record table from camera trap images
}
\description{
Generates a record table from camera trap images. Images must be sorted into station directories at least. The function can read species identification from a directory structure (Station/Species or Station/Camera/Species) or from image metadata tags.
}
\usage{
recordTable(inDir,
IDfrom,
cameraID,
camerasIndependent,
exclude,
minDeltaTime = 0,
deltaTimeComparedTo,
timeZone,
stationCol,
writecsv = FALSE,
outDir,
metadataHierarchyDelimitor = "|",
metadataSpeciesTag,
additionalMetadataTags,
removeDuplicateRecords = TRUE
)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{inDir}{
character. Directory containing station directories. It must either contain images in species subdirectories (e.g. inDir/StationA/SpeciesA) or images with species metadata tags (without species directories, e.g. inDir/StationA).
}
\item{IDfrom}{
character. Read species ID from image metadata ("metadata") of from species directory names ("directory")?
}
\item{cameraID}{
character. Where should the function look for camera IDs: 'filename', 'directory'. 'filename' requires images renamed with \code{\link{imageRename}}. 'directory' requires a camera subdirectory within station directories (station/camera/species). Can be missing.
}
\item{camerasIndependent}{
logical. If \code{TRUE}, species records are considered to be independent between cameras at a station.
}
\item{exclude}{
character. Vector of species names to be excluded from the record table
}
\item{minDeltaTime}{
integer. Time difference between records of the same species at the same station to be considered independent (in minutes)
}
\item{deltaTimeComparedTo}{
character. For two records to be considered independent, must the second one be at least \code{minDeltaTime} minutes after the last independent record of the same species (\code{"lastIndependentRecord"}), or \code{minDeltaTime} minutes after the last record (\code{"lastRecord"})?
}
\item{timeZone}{
character. Must be an argument of \code{\link[base]{OlsonNames}}
}
\item{stationCol}{
character. Name of the camera trap station column. Assuming "Station" if undefined.
}
\item{writecsv}{
logical. Should the record table be saved as a .csv?
}
\item{outDir}{
character. Directory to save csv to. If NULL and \code{writecsv = TRUE}, recordTable will be written to \code{inDir}.
}
\item{metadataHierarchyDelimitor}{
character. The character delimiting hierarchy levels in image metadata tags in field "HierarchicalSubject". Either "|" or ":".
}
\item{metadataSpeciesTag}{
character. In custom image metadata, the species ID tag name.
}
\item{additionalMetadataTags}{
character. Additional camera model-specific metadata tags to be extracted. (If possible specify tag groups as returned by \code{\link{exifTagNames}})
}
\item{removeDuplicateRecords}{
logical. If there are several records of the same species at the same station (also same camera if cameraID is defined) at exactly the same time, show only one?
}
}
\details{
The function can handle a number of different ways of storing images, and supports species identification by moving images into species directories as well as metadata tagging. In every case, images need to be stored into station directories. If images are identified by moving them into species directories, a camera directory is optional: "Station/Species/XY.JPG" or "Station/Camera/Species/XY.JPG". Likewise, if images are identified using metadata tagging, a camera directory can be used optionally: "Station/XY.JPG" or "Station/Camera/XY.JPG".
If images are identified by metadata tagging, \code{metadataSpeciesTag} specifies the metadata tag group name that contains species identification tags. \code{metadataHierarchyDelimitor} is "|" for images tagged in DigiKam and images tagged in Adobe Bridge / Lightroom with the default settings. It is only necessary to change it if the default was changed in these programs.
\code{minDeltaTime} is a criterion for temporal independence of species recorded at the same station. Setting it to 0 will make the function return all records. Setting it to a higher value will remove records that were taken less than \code{minDeltaTime} minutes after the last record (\code{deltaTimeComparedTo = "lastRecord"}) or the last independent record (\code{deltaTimeComparedTo = "lastIndependentRecord"}).
\code{camerasIndependent} defines if the cameras at a station are to be considered independent. If \code{TRUE}, records of the same species taken by different cameras are considered independent (e.g. if they face different trails). Use \code{FALSE} if both cameras face each other and possibly \code{TRUE} ).
\code{exclude} can be used to exclude "species" directories containing irrelevant images (e.g. "team", "blank", "unidentified"). \code{stationCol} can be set to match the station column name in the camera trap station table (see \code{\link{camtraps}}).
Many digital images contain Exif metadata tags such as "AmbientTemperature" or "MoonPhase" that can be extracted if specified in \code{metadataTags}. Because these are manufacturer-specific and not standardized, function \code{\link{exifTagNames}} provides a vector of all available tag names. Multiple names can be specified as a character vector as: \code{c(Tag1, Tag2, ...)}. The metadata tags thus extracted may be used as covariates in modelling species distributions.
}
\value{
A data frame containing species records and additional information about stations, date, time and (optionally) further metadata.
}
\section{Warning }{
Custom image metadata must be organised hierarchically (tag group - tag; e.g. "Species" - "Leopard Cat"). Detailed information on how to set up and use metadata tags can be found in \href{https://CRAN.R-project.org/package=camtrapR/vignettes/SpeciesIndividualIdentification.html#metadata-tagging}{vignette 2: Species and Individual Identification}.
Custom image metadata tags must be written to the images. The function cannot read tags from .xmp sidecar files. Make sure you set the preferences accordingly. In DigiKam, go to Settings/Configure digiKam/Metadata. There, make sure "Write to sidecar files" is unchecked.
Please note the section about defining argument \code{timeZone} in the vignette on data extraction (accessible via \code{vignette("DataExtraction")} or online (\url{https://cran.r-project.org/package=camtrapR/vignettes/DataExtraction.html})).
}
\references{
Phil Harvey's ExifTool \url{http://www.sno.phy.queensu.ca/~phil/exiftool/ }
}
\author{
Juergen Niedballa
}
\note{
The results of a number of other function will depend on the output of this function (namely on the arguments \code{exclude} for excluding species and \code{minDeltaTime}/ \code{deltaTimeComparedTo} for temporal independence):
\tabular{l}{
\code{\link{detectionMaps}} \cr
\code{\link{detectionHistory}} \cr
\code{\link{activityHistogram}} \cr
\code{\link{activityDensity}} \cr
\code{\link{activityRadial}} \cr
\code{\link{activityOverlap}} \cr
\code{\link{activityHistogram}} \cr
\code{\link{surveyReport}} \cr
}
}
\examples{
wd_images_ID <- system.file("pictures/sample_images", package = "camtrapR")
if (Sys.which("exiftool") != ""){ # only run these examples if ExifTool is available
rec.db1 <- recordTable(inDir = wd_images_ID,
IDfrom = "directory",
minDeltaTime = 60,
deltaTimeComparedTo = "lastRecord",
writecsv = FALSE,
additionalMetadataTags = c("EXIF:Model", "EXIF:Make")
)
# note argument additionalMetadataTags: it contains tag names as returned by function exifTagNames
rec.db2 <- recordTable(inDir = wd_images_ID,
IDfrom = "directory",
minDeltaTime = 60,
deltaTimeComparedTo = "lastRecord",
exclude = "NO_ID",
writecsv = FALSE,
timeZone = "Asia/Kuala_Lumpur",
additionalMetadataTags = c("EXIF:Model", "EXIF:Make", "NonExistingTag")
)
# note the warning that the last tag in "additionalMetadataTags" was not found
any(rec.db1$Species == "NO_ID")
any(rec.db2$Species == "NO_ID")
#############
# here's how the removeDuplicateRecords argument works
\donttest{ # this is because otherwise the test would run too long to pass CRAN tests
rec.db3a <- recordTable(inDir = wd_images_ID,
IDfrom = "directory",
minDeltaTime = 0,
exclude = "NO_ID",
timeZone = "Asia/Kuala_Lumpur",
removeDuplicateRecords = FALSE
)
rec.db3b <- recordTable(inDir = wd_images_ID,
IDfrom = "directory",
minDeltaTime = 0,
exclude = "NO_ID",
timeZone = "Asia/Kuala_Lumpur",
removeDuplicateRecords = TRUE
)
anyDuplicated(rec.db3a[, c("Station", "Species", "DateTimeOriginal")]) # got duplicates
anyDuplicated(rec.db3b[, c("Station", "Species", "DateTimeOriginal")]) # no duplicates
# after removing duplicates, both are identical:
whichAreDuplicated <- which(duplicated(rec.db3a[, c("Station", "Species", "DateTimeOriginal")]))
all(rec.db3a[-whichAreDuplicated,] == rec.db3b)
}
} else { # show function output if ExifTool is not available
message("ExifTool is not available. Cannot test function")
data(recordTableSample)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
6dcd53b4a73b4a60c84cbf4cc3fb298085ccbdf6
|
cd9a84d431a9b764f733b521d6ea41a644374d00
|
/Support/lib/r/histogram-total-facet-year.r
|
73876c749e8aa3d8fcdba9fa33dc0dd278abea72
|
[
"MIT"
] |
permissive
|
lifepillar/Ledger.tmbundle
|
c1db306068c4c7c31fedec78784ce98c9ae6be0d
|
33a99502db980c538b21e2012ea2efcad3288003
|
refs/heads/master
| 2021-03-12T22:48:36.364836
| 2019-09-11T17:30:42
| 2019-09-11T17:30:42
| 7,275,360
| 9
| 5
| null | 2019-09-11T17:30:49
| 2012-12-21T15:34:40
|
Ruby
|
UTF-8
|
R
| false
| false
| 768
|
r
|
histogram-total-facet-year.r
|
# Freedman–Diaconis rule for the bin width:
#bw <- 2 * IQR(ledger_data$total) / ((length(ledger_data$total) + 1)^(1/3))
# Scott's normal reference rule:
#bw <- 3.5 * sd(ledger_data$total) / ((length(ledger_data$total) + 1)^(1/3))
# Sturges' formula:
#bw <- ceiling(max(ledger_data$total) - min(ledger_data$total) / ceiling(1 + log2(length(ledger_data$total))))
# Square-root choice:
bw <- ceiling(max(ledger_data$total) - min(ledger_data$total)) / (sqrt(length(ledger_data$total))+1)
p <- qplot(total, data = ledger_data, geom = "histogram", binwidth = bw, position="identity",
facets = . ~ year, xlab = commodity, ylab = "Number of days")
suppressMessages(ggsave(file = "histogram-total-facet-year.svg", plot = p, width = 12, height = 4))
|
f55b1d990f900344ef458853b56544f987121308
|
b12919cea8afc353904b3bc75812d609a7f60562
|
/R/PathPolicy.R
|
33d500afe48618f54dc4437203f1de127a0d9df1
|
[] |
no_license
|
kirgush/rlsm
|
22889016243bcb1d4d9fff17c8cbf9a8c4e157ff
|
146deb5fe6dc400c019effce72f6006c21880434
|
refs/heads/master
| 2020-07-10T17:09:59.479888
| 2018-01-18T14:34:31
| 2018-01-18T14:34:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 585
|
r
|
PathPolicy.R
|
## Copyright 2017 <Jeremy Yee> <jeremyyee@outlook.com.au>
## Path policy
################################################################################
PathPolicy <- function(path, expected, Reward, control,
basis = matrix(c(1), nrow = 1),
basis_type = "power", spline = FALSE,
knots = matrix(NA, nrow = 1), Basis = function(){},
n_rbasis = 0) {
.Call('_rlsm_PathPolicy', PACKAGE = 'rlsm', path, expected,
Reward, control, basis, basis_type, spline, knots, Basis, n_rbasis)
}
|
f57d9fe8cca49a57db0a88b05ec4fbe2a120e81d
|
88833352620acb74a45bd1b1f32ed637e8976762
|
/hw1/hw1.R
|
ec7f96b20fa1f2b77115cc6e85a6c6d794a0b1b8
|
[] |
no_license
|
xingcheg/STAT547-FDA
|
ebe5c4f0e28ae8b83619b6d1fce9c2316cbdb7e9
|
fc0d037c2107a9a9b81af866e4655ce18c82781d
|
refs/heads/master
| 2020-07-22T09:55:02.600873
| 2019-12-01T05:42:48
| 2019-12-01T05:42:48
| 207,158,392
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,074
|
r
|
hw1.R
|
library(fda)
library(ggplot2)
library(plotly)
library(reshape2)
######################## Problem 1 #######################
data(pinch)
dim(pinch)
##### (b) #####
pinch_mu <- apply(pinch, 1, mean)
pinch_sd <- apply(pinch, 1, sd)
r_pinch <- melt(pinch)
names(r_pinch) <- c("time", "rep", "value")
r_pinch$time <- rep(pinchtime, 20)
r_pinch$rep <- as.factor(r_pinch$rep)
## raw
ggplot(data = r_pinch) +
geom_line(aes(x = time, y = value, group = rep),
alpha = 0.5) +
geom_line(data = data.frame(x = pinchtime, y = pinch_mu),
aes(x=x, y=y), colour = "red", size = 1.5) +
geom_line(data = data.frame(x = pinchtime, y = pinch_sd),
aes(x=x, y=y), colour = "blue", size = 1) +
theme_bw()
## smoothed
ggplot(data = r_pinch) +
geom_smooth(aes(x = time, y = value, group = rep),
alpha = 0.5, span = 0.1, se = FALSE, col = "gray") +
geom_line(data = data.frame(x = pinchtime, y = pinch_mu),
aes(x=x, y=y), colour = "red", size = 1.5) +
geom_line(data = data.frame(x = pinchtime, y = pinch_sd),
aes(x=x, y=y), colour = "blue", size = 1) +
theme_bw()
##### (c) #####
pinch_Cov <- cov(t(pinch))
persp(x=pinchtime, y=pinchtime, z=pinch_Cov)
contour(x=pinchtime, y=pinchtime, z=pinch_Cov)
#plot_ly(x=pinchtime, y=pinchtime, z=pinch_Cov) %>% add_surface(
# contours = list(
# z = list(
# show=TRUE,
# usecolormap=TRUE,
# highlightcolor="#ff0000",
# project=list(z=TRUE)
# )
# )
#)
######################## Problem 2 #######################
D2 <- read.csv("/Users/apple/Desktop/ISU 2019 fall/STAT547/data/DataSets/Dow_companies_data.csv")
##### (a) #####
P1 <- D2$XOM[1]
date <- as.Date(as.character(D2$Date), format = "%m/%d/%y")
D_XOM <- data.frame(date = date, v1 = D2$XOM,
v2 = 100*((D2$XOM/P1)-1))
ggplot(data = D_XOM) +
geom_line(aes(x = date, y = v1)) +
ylab("stock value") +
ggtitle("Exxon–Mobil (XOM)") + theme_bw()
ggplot(data = D_XOM) +
geom_line(aes(x = date, y = v2)) +
ylab("cumulative return function (%)") +
ggtitle("Exxon–Mobil (XOM)") + theme_bw()
D_XOM$v2[length(D_XOM$v2)]
##### (b) #####
D2_1 <- D2[,-1]
D2_2 <- apply(D2_1, 2, FUN = function(x){
return( 100*(x - x[1])/x[1] )
})
r_D2_2 <- melt(D2_2)
names(r_D2_2) <- c("date", "stock", "CRF")
r_D2_2$date <- rep(date, 30)
stock_mean <- apply(D2_2, 1, mean)
stock_med <- apply(D2_2, 1, median)
DD <- data.frame(date = rep(date,2),
CRF = c(stock_mean, stock_med),
label = rep(c("mean", "median"), each = 252))
ggplot(data = r_D2_2) +
geom_line(aes(x = date, y = CRF, group = stock), colour = "gray") +
geom_line(data = DD, aes(x = date, y = CRF, colour = label), size = 1) +
theme_bw()
##### (c) #####
fbplot(fit = D2_2, ylim = c(-20,90), xlab="Trading day",
ylab = "Cumulative return")
try( fbplot(fit = D2_2, ylim = c(-20,90), prob = c(0.9, 0.6, 0.3),
color = c(8,4,2),
xlab="Trading day",
ylab = "Cumulative return"), silent = TRUE)
|
c585fb5c446e1868ea0c8acb091d21dd5a84d8c9
|
8166e672f3c7a57f8e52bce0f32799e41de165a9
|
/scripts/8_Cereals_Model_Choice.R
|
a69948962b3defe132fa7d74b6fa4806f03b9a8d
|
[] |
no_license
|
mnavascues/DAR_ABC
|
6cde8de4f7d5a91e8299afd84edf9f6fad1983b1
|
23e5d86466bcdb03839527f529651f42d9c12575
|
refs/heads/master
| 2023-07-07T10:02:09.973109
| 2023-06-29T07:40:58
| 2023-06-29T07:40:58
| 545,404,223
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,643
|
r
|
8_Cereals_Model_Choice.R
|
library(abcrf)
source("scripts/sim14c.R")
load(file = "results/num_of_sims.rda")
# load target (i.e. observed) summary statistics
load(file = "results/Cereals_sumstats.rda")
# lead reference tables for the 3 models
load(file = "results/Cereals_independent_model_reftable.rda")
reftable = reftable[!is.na(reftable$count_A),]
reftable = reftable[reftable$count_A!=1,]
reftable = reftable[!is.na(reftable$count_B),]
reftable = reftable[reftable$count_B!=1,]
reftable_independent = reftable
load(file = "results/Cereals_interdependent_model_reftable.rda")
reftable = reftable[!is.na(reftable$count_A),]
reftable = reftable[reftable$count_A!=1,]
reftable = reftable[!is.na(reftable$count_B),]
reftable = reftable[reftable$count_B!=1,]
reftable_interdependent = reftable
load(file = "results/Cereals_parallel_model_reftable.rda")
reftable = reftable[!is.na(reftable$count_A),]
reftable = reftable[reftable$count_A!=1,]
reftable = reftable[!is.na(reftable$count_B),]
reftable = reftable[reftable$count_B!=1,]
reftable_parallel = reftable
rm(reftable);gc()
# test independent vs. parallel
if ( !file.exists("results/Cereals_model_choice_1.rda") ){
model = as.factor(c(rep("independent",nrow(reftable_independent)),
rep("parallel",nrow(reftable_parallel))))
sumstats = rbind(reftable_independent[names(cereals_sumstats_2_categories)],
reftable_parallel[names(cereals_sumstats_2_categories)])
RF_model_choice = abcrf(model~., data = data.frame(model,sumstats),
lda=F, ntree = 1000, paral = TRUE)
posterior_model_choice = predict(RF_model_choice, cereals_sumstats_2_categories,
training = data.frame(model,sumstats),
ntree = 1000, paral = TRUE)
save(RF_model_choice, posterior_model_choice, file="results/Cereals_model_choice_1.rda")
}
#load(file="results/Cereals_model_choice_1.rda")
posterior_model_choice
K = posterior_model_choice$post.prob / (1-posterior_model_choice$post.prob)
(K)
interpret_K(K)
err.abcrf(RF_model_choice,data.frame(model,sumstats),paral=T)
# test interdependent vs. parallel
if ( !file.exists("results/Cereals_model_choice_2.rda") ){
model = as.factor(c(rep("interdependent",nrow(reftable_interdependent)),
rep("parallel",nrow(reftable_parallel))))
sumstats = rbind(reftable_interdependent[names(cereals_sumstats_2_categories)],
reftable_parallel[names(cereals_sumstats_2_categories)])
RF_model_choice = abcrf(model~., data = data.frame(model,sumstats),
lda=F, ntree = 1000, paral = TRUE)
posterior_model_choice = predict(RF_model_choice, cereals_sumstats_2_categories,
training = data.frame(model,sumstats),
ntree = 1000, paral = TRUE)
save(RF_model_choice, posterior_model_choice, file="results/Cereals_model_choice_2.rda")
}
#load(file="results/Cereals_model_choice_2.rda")
posterior_model_choice
K = posterior_model_choice$post.prob / (1-posterior_model_choice$post.prob)
(K)
interpret_K(K)
err.abcrf(RF_model_choice,data.frame(model,sumstats),paral=T)
# test independent vs. interdependent
if ( !file.exists("results/Cereals_model_choice_3.rda") ){
model = as.factor(c(rep("independent",nrow(reftable_independent)),
rep("interdependent",nrow(reftable_interdependent))))
sumstats = rbind(reftable_independent[names(cereals_sumstats_2_categories)],
reftable_interdependent[names(cereals_sumstats_2_categories)])
RF_model_choice = abcrf(model~., data = data.frame(model,sumstats),
lda=F, ntree = 1000, paral = TRUE)
posterior_model_choice = predict(RF_model_choice, cereals_sumstats_2_categories,
training = data.frame(model,sumstats),
ntree = 1000, paral = TRUE)
save(RF_model_choice, posterior_model_choice, file="results/Cereals_model_choice_3.rda")
}
#load(file="results/Cereals_model_choice_3.rda")
posterior_model_choice
K = posterior_model_choice$post.prob / (1-posterior_model_choice$post.prob)
(K)
interpret_K(K)
err.abcrf(RF_model_choice,data.frame(model,sumstats),paral=T)
# test 3 models
if ( !file.exists("results/Cereals_model_choice_4.rda") ){
model = as.factor(c(rep("independent",nrow(reftable_independent)),
rep("interdependent",nrow(reftable_interdependent)),
rep("parallel",nrow(reftable_parallel))))
sumstats = rbind(reftable_independent[names(cereals_sumstats_2_categories)],
reftable_interdependent[names(cereals_sumstats_2_categories)],
reftable_parallel[names(cereals_sumstats_2_categories)])
RF_model_choice = abcrf(model~., data = data.frame(model,sumstats),
lda=F, ntree = 1000, paral = TRUE)
posterior_model_choice = predict(RF_model_choice, cereals_sumstats_2_categories,
training = data.frame(model,sumstats),
ntree = 1000, paral = TRUE)
save(RF_model_choice, posterior_model_choice, file="results/Cereals_model_choice_4.rda")
}
#load(file="results/Cereals_model_choice_4.rda")
posterior_model_choice
K = posterior_model_choice$post.prob * (1-nrow(reftable_parallel)/length(model)) / (1-posterior_model_choice$post.prob) / (nrow(reftable_parallel)/length(model))
(K)
interpret_K(K)
RF_model_choice$model.rf$confusion.matrix
err.abcrf(RF_model_choice,data.frame(model,sumstats),paral=T)
|
53cbc1b9765149a0f6b0ea3ffa3273c1540adc03
|
b1dae7310bdc730b1968d0fbb0dc545202d205c5
|
/data/tagset_occdesire/tagset_occdesire.R
|
4c45ff49e74fad837e84bac169f7076212e22ab4
|
[] |
no_license
|
cjmarraro/wageGapData
|
0cc3190fec03c920ddd45e2de4fe7e70ed870623
|
b4beed7090ffff25120e47cd382e36aa30299a4e
|
refs/heads/master
| 2020-03-07T07:50:25.686122
| 2018-03-31T01:07:25
| 2018-03-31T01:07:25
| 127,360,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,317
|
r
|
tagset_occdesire.R
|
# Set working directory
# setwd()
new_data <- read.table('tagset_occdesire.dat', sep=' ')
names(new_data) <- c('R0000100','R0172000')
# Handle missing values
new_data[new_data == -1] = NA # Refused
new_data[new_data == -2] = NA # Dont know
new_data[new_data == -3] = NA # Invalid missing
new_data[new_data == -4] = NA # Valid missing
new_data[new_data == -5] = NA # Non-interview
# If there are values not categorized they will be represented as NA
vallabels = function(data) {
data$R0172000 <- cut(data$R0172000, c(1.0,201.0,260.0,301.0,401.0,580.0,601.0,740.0,801.0,821.0,901.0,980.0,0.0,990.0,995.0,996.0,984.0), labels=c("1 TO 195: 001-195 PROFESSIONAL,TECHNICAL AND KINDRED","201 TO 245: 201-245 MANAGERS,OFFICIALS AND PROPRIETORS","260 TO 285: 260-285 SALES WORKERS","301 TO 395: 301-395 CLERICAL AND KINDRED","401 TO 575: 401-575 CRAFTSMEN,FOREMEN AND KINDRED","580 TO 590: 580-590 ARMED FORCES","601 TO 715: 601-715 OPERATIVES AND KINDRED","740 TO 785: 740-785 LABORERS, EXCEPT FARM","801 TO 802: 801-802 FARMERS AND FARM MANAGERS","821 TO 824: 821-824 FARM LABORERS AND FOREMAN","901 TO 965: 901-965 SERVICE WORKERS, EXCEPT PRIVATE HOUSEHOLD","980 TO 984: 980-984 PRIVATE HOUSEHOLD","0: 00 NONE","990: 990 SAME AS PRESENT JOB","995: 995 DID NOT WORK","996: 996 NEVER WORKED"), right=FALSE)
return(data)
}
varlabels <- c( "ID# (1-12686) 79",
"OCC EXPCTNS IN 5 YRS 3-D 79"
)
# Use qnames rather than rnums
qnames = function(data) {
names(data) <- c("CASEID_1979","EXP-10A_1979")
return(data)
}
********************************************************************************************************
# Remove the '#' before the following line to create a data file called "categories" with value labels.
#categories <- vallabels(new_data)
# Remove the '#' before the following lines to rename variables using Qnames instead of Reference Numbers
#new_data <- qnames(new_data)
#categories <- qnames(categories)
# Produce summaries for the raw (uncategorized) data file
summary(new_data)
# Remove the '#' before the following lines to produce summaries for the "categories" data file.
#categories <- vallabels(new_data)
#summary(categories)
************************************************************************************************************
|
0f3ef8954e2d0b16c5f5526dbf7659a593016d88
|
63f2f0f81ad5e96d9dc19d082bd22272d3721f83
|
/R/Improvement.R
|
bafba52e4af3fbaf309b5498a5cbc91d03bebbe2
|
[] |
no_license
|
timriffe/TwoSex
|
769547f8391474c91b68f5bd2013a98bac5444c0
|
94009f55f60931ba47639060f4fa300f92148f50
|
refs/heads/master
| 2016-09-09T17:19:36.872870
| 2013-10-05T17:48:05
| 2013-10-05T17:48:05
| 7,383,661
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,892
|
r
|
Improvement.R
|
setwd("/home/triffe/git/DISS/")
source("R/UtilityFunctions.R")
source("R/MeanFunctions.R")
yearsUS <- 1969:2009
yearsES <- 1975:2009
dxmUS <- local(get(load("Data/HMD_dx/dxmUS.Rdata")))
dxfUS <- local(get(load("Data/HMD_dx/dxfUS.Rdata")))
dxmES <- local(get(load("Data/HMD_dx/dxmES.Rdata")))
dxfES <- local(get(load("Data/HMD_dx/dxfES.Rdata")))
# make sum to 1
dxmUS <- dxmUS %col% colSums(dxmUS)
dxfUS <- dxfUS %col% colSums(dxfUS)
dxmES <- dxmES %col% colSums(dxmES)
dxfES <- dxfES %col% colSums(dxfES)
mxmUS <- local(get(load("Data/HMD_mux/muxmUS.Rdata")))
mxfUS <- local(get(load("Data/HMD_mux/muxfUS.Rdata")))
mxmES <- local(get(load("Data/HMD_mux/muxmES.Rdata")))
mxfES <- local(get(load("Data/HMD_mux/muxfES.Rdata")))
PxUS <- local(get(load("Data/HMD_Px/PxUS.Rdata")))
PxES <- local(get(load("Data/HMD_Px/PxES.Rdata")))
iota <- .997
iota ^ c(1:110)
wmean(.5:110.5,mx2dxHMD(mxmUS[,yr]*iota ^ c(1:111)))
wmean(.5:110.5,mx2dxHMD(mxmUS[,yr]))
ExpectedDx2 <- function(Px, mx, iota = 1){
impr <- iota ^ c(1:111)
N <- length(mx)
EDx <- matrix(0, nrow = N, ncol = N, dimnames = list(Ex = 0:(N-1), Age = 0:(N-1)))
# Population age loop
for (i in 1:110){
dxn <- mx2dxHMD(mx * c(rep(1,(i)),impr[1:(N-i)]))[i:N]
dxn <- dxn / sum(dxn)
# distribute each age of Populatin over death times
EDx[1:length(dxn), i] <- Px[i] * dxn
}
EDx[1,N] <- Px[N]
EDx[is.na(EDx)] <- 0
EDx
}
Pxm <- with(PxUS, Male[Year == 2009])
Pxf <- with(PxUS, Female[Year == 2009])
xlabs <- c("1.0%","0.8%","0.6%","0.4%","0.2%","0.0%","0.2%","0.4%","0.6%","0.8%","1.0%")
Males2009 <- rowSums(ExpectedDx2(Pxm, mxmUS[,"2009"]))
Females2009 <- rowSums(ExpectedDx2(Pxf, mxfUS[,"2009"]))
pdf("latex/Figures/exPyramidUSimpr.pdf", height = 5, width = 5)
par(mai = c(.6,.6,.3,.3), xaxs = "i", yaxs = "i")
plot(NULL, type = "n",axes = FALSE, xlab = "",ylab = "", xlim = c(-1, 1), ylim = c(0,111),
panel.first = list(
rect(-1, 0, 1, 111, col = gray(.95), border = NA),
abline(v = seq(-1, 1, by = .2), col = "white"),
abline(h = seq(0, 110, by = 10), col = "white"),
text(seq(-1, 1, by = .2),0, xlabs, xpd = TRUE, pos = 1, cex = .7),
text(-1, seq(0, 110, by = 10), seq(0, 110, by = 10), pos = 2, xpd = TRUE, cex = .7),
text(-1.17, 116, expression(e[y]), xpd = TRUE, cex = 1),
text(0, -12, "Percentage", xpd = TRUE, cex = 1)
))
barplot(-100 * (Males2009 / sum(Males2009 + Females2009)), border = NA, col = "#44444450",
add = TRUE, horiz = TRUE, space = 0, axes = FALSE,axisnames=FALSE)
barplot(100 * (Females2009 / sum(Males2009 + Females2009)), border = NA, col = "#44444450",
add = TRUE, horiz = TRUE, space = 0, axes = FALSE,axisnames=FALSE)
PyramidOutline(rowSums(ExpectedDx2(Pxm, mxmUS[,"2009"],.995)),
rowSums(ExpectedDx2(Pxf, mxfUS[,"2009"],.993)), scale =100, border = gray(.2), xpd = TRUE, lwd = .5)
text(c(0.27, 0.44), c(50, 100), c("2009 fixed", expression(e^iota==0.995)),
col = c("white", "black"), cex = 1.2)
segments(0.2439776, 98.40316, 0.1937150, 95.93115)
dev.off()
# now spain 2009
Pxm <- with(PxES, Male[Year == 2009])
Pxf <- with(PxES, Female[Year == 2009])
Males2009 <- rowSums(ExpectedDx2(Pxm, mxmES[,"2009"]))
Females2009 <- rowSums(ExpectedDx2(Pxf, mxfES[,"2009"]))
pdf("latex/Figures/exPyramidESimpr.pdf", height = 5, width = 5)
par(mai = c(.6,.6,.3,.3), xaxs = "i", yaxs = "i")
plot(NULL, type = "n",axes = FALSE, xlab = "",ylab = "", xlim = c(-1, 1), ylim = c(0,111),
panel.first = list(
rect(-1, 0, 1, 111, col = gray(.95), border = NA),
abline(v = seq(-1, 1, by = .2), col = "white"),
abline(h = seq(0, 110, by = 10), col = "white"),
text(seq(-1, 1, by = .2),0, xlabs, xpd = TRUE, pos = 1, cex = .7),
text(-1, seq(0, 110, by = 10), seq(0, 110, by = 10), pos = 2, xpd = TRUE, cex = .7),
text(-1.17, 116, expression(e[y]), xpd = TRUE, cex = 1),
text(0, -12, "Percentage", xpd = TRUE, cex = 1)
))
barplot(-100 * (Males2009 / sum(Males2009 + Females2009)), border = NA, col = "#44444450",
add = TRUE, horiz = TRUE, space = 0, axes = FALSE,axisnames=FALSE)
barplot(100 * (Females2009 / sum(Males2009 + Females2009)), border = NA, col = "#44444450",
add = TRUE, horiz = TRUE, space = 0, axes = FALSE,axisnames=FALSE)
PyramidOutline(rowSums(ExpectedDx2(Pxm, mxmES[,"2009"],.995)),
rowSums(ExpectedDx2(Pxf, mxfES[,"2009"],.993)), scale =100, border = gray(.2), xpd = TRUE, lwd = .5)
text(c(0.27, 0.45), c(50, 100), c("2009 fixed", expression(e^iota==0.995)),
col = c("white", "black"), cex = 1.2)
segments(0.2774860, 97.64254, 0.2037675, 93.83945)
dev.off()
|
469513624a1cf92a6d095f31ce804d16c25e9207
|
d56c35df537cfb4dbf7626b8bf63dec73d5ad3b4
|
/R/rangemap_fig.R
|
148a0159a783d5a91a91720e1558523735e4d69b
|
[] |
no_license
|
vansh0901/rangemap
|
9e733431240fa5a508dd0833b0aff659c97b5277
|
4f61ec2a0e86e3fb5f9633cb7877140886d67cd4
|
refs/heads/master
| 2020-12-09T22:31:16.097220
| 2020-01-13T15:50:04
| 2020-01-13T15:50:04
| 233,435,275
| 0
| 0
| null | 2020-01-12T18:02:23
| 2020-01-12T18:02:23
| null |
UTF-8
|
R
| false
| false
| 26,922
|
r
|
rangemap_fig.R
|
#' Figures of species range maps
#'
#' @description rangemap_fig generates customizable figures of species range maps
#' using objects produced by other functions of this package.
#'
#' @param range an object produced with any of the following functions:
#' \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}},
#' \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}.
#' @param polygons a SpatialPolygon object to be used as base map for plotting the species
#' range. If not provided, a simplified world map will be used.
#' @param add_extent (logical) if TRUE, the extent of occurrence of the species will be
#' added to the figure. Ignored if the \code{range} is product of the \code{\link{rangemap_bound}}
#' function and administrative areas were selected only based on names. Default = FALSE.
#' @param add_occurrences (logical) if TRUE, the species occurrence records will be added
#' to the figure. Ignored if the \code{range} is product of the \code{\link{rangemap_bound}}
#' function and administrative areas were selected only based on names. Default = FALSE.
#' @param basemap_color color for the basemap (\code{polygons}) to be ploted in the figure.
#' Default = "grey93".
#' @param range_color color for the species \code{range} to be ploted in the figure.
#' Default = "darkgreen".
#' @param extent_color color for the species extent of occurrence to be ploted in the figure.
#' Default = "blue".
#' @param occurrences_color color for the species \code{occurrences} to be ploted in the
#' figure. Default = "yellow".
#' @param grid (logical) if TRUE, labels and grid division ticks will be inserted in
#' \code{grid_sides}. Default = FALSE.
#' @param grid_sides (character) sides in which the labels will be placed in the figure.
#' Options are the same than for other position character indicators (see details). Default =
#' "bottomleft".
#' @param ylabels_position (numeric) if \code{grid} = TRUE, separation (in lines) of y axis
#' labels from the axis. Bigger numbers will increase separation. Default = 1.3.
#' @param legend (logical) if TRUE, a legend of the plotted features will be added to the
#' figure at \code{legend_position}. Default = FALSE.
#' @param legend_position (numeric or character) site in the figure where the legend will
#' be placed. If numeric, vector of leght two indicating x and y coordinates to be used to
#' position the legend. See details for options of character indicators of position. Default =
#' "bottomright".
#' @param northarrow (logical) if TRUE, a simple north arrow will be placed in
#' \code{northarrow_position}. Default = FALSE.
#' @param northarrow_position (numeric or character) site in the figure where the north
#' legend will be placed. If numeric, vector of leght two indicating x and y coordinates
#' to be used to position the north arrow. See details for options of character indicators
#' of position. Default = "topright".
#' @param scalebar (logical) if TRUE, a simple scale bar will be inserted in the figure at
#' \code{scalebar_position} with a length of \code{scalebar_length}. Default = FALSE.
#' @param scalebar_position (numeric or character) site in the figure where the scale bar
#' will be placed. If numeric, vector of leght two indicating x and y coordinates to be used
#' to position the scale bar. See details for options of character indicators of position.
#' Default = "bottomleft".
#' @param scalebar_length (numeric) length of the scale bar in km. Using entire numbers
#' divisble for two is recommended. Default = 100.
#' @param zoom (numeric) zoom factor when ploting the species range in a map. Default = 1.
#' Values lower than 1 will zoom in into the species range and values bigger than 1 will
#' zoom out. A value of 2 will duplicate the area that the figure is covering.
#' @param save_fig (logical) if TRUE, the figure will be written in the working directory.
#' Default = FALSE.
#' @param name (character) if \code{save_fig} = TRUE, name of the figure to be exported.
#' Default = "range_fig".
#' @param format (character) if \code{save_fig} = TRUE, format in which the figure will be
#' written. Options include "bmp", "png", "jpeg", "tiff", and "pdf". Default = "png".
#' @param resolution (numeric) if \code{save_fig} = TRUE, resolution (ppi) in wich the figure
#' will be exported. Default = 300.
#' @param width (numeric) if \code{save_fig} = TRUE, width of the figure in mm. Default = 166.
#' @param height (numeric) if \code{save_fig} = TRUE, height of the figure in mm. Default = 166.
#'
#' @return A figure of the species distributional range in a geographical context, with map
#' components defined by the user.
#'
#' @details Ranges should be generated with any of the functions: \code{\link{rangemap_buff}},
#' \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and
#' \code{\link{rangemap_tsa}}.
#'
#' Position of distinct elements depend on the spatial configuration of the species range.
#' Therefore, their position may need to be changed if the elements are needed. Position
#' options are: "bottomright", "bottomleft", "topleft", and "topright". Numerical descriptions
#' of positions are also allowed.
#'
#' @usage
#' rangemap_fig(range, polygons, add_extent = FALSE, add_occurrences = FALSE,
#' basemap_color = "grey93", range_color = "darkgreen", extent_color = "blue",
#' occurrences_color = "yellow", grid = FALSE, grid_sides = "bottomleft",
#' ylabels_position = 1.3, legend = FALSE, legend_position = "bottomright",
#' northarrow = FALSE, northarrow_position = "topright", scalebar = FALSE,
#' scalebar_position = "bottomleft", scalebar_length = 100, zoom = 1,
#' save_fig = FALSE, name = "range_fig", format = "png", resolution = 300,
#' width = 166, height = 166)
#'
#' @export
#'
#' @importFrom sp CRS spTransform plot
#' @importFrom rnaturalearth ne_countries
#' @importFrom scales alpha
#' @importFrom maps map.scale
#' @importFrom graphics points
#'
#' @examples
#' suppressWarnings({if(!require(spocc)){
#' install.packages("spocc")
#' library(spocc)
#' }})
#'
#' # getting the data from GBIF
#' occs <- occ(query = "Dasypus kappleri", from = "gbif",
#' limit = 1000)$gbif$data[[1]]
#'
#' # keeping only georeferenced records
#' occ_g <- occs[!is.na(occs$latitude) & !is.na(occs$longitude),
#' c("name", "longitude", "latitude")]
#'
#' level <- 0
#' adm <- "Ecuador"
#' dissolve <- FALSE
#' save <- FALSE
#' countries <- c("PER", "BRA", "COL", "VEN", "ECU", "GUF", "GUY", "SUR", "BOL")
#'
#' # creating the species range map
#' range <- rangemap_bound(occurrences = occ_g, country_code = countries, adm_areas = adm,
#' boundary_level = level, dissolve = dissolve, save_shp = save)
#'
#' # arguments for the species range figure
#' extent <- TRUE
#' occ <- TRUE
#' legend <- TRUE
#' north <- TRUE
#'
#' # creating the species range figure
#' rangemap_fig(range, add_extent = extent, add_occurrences = occ,
#' legend = legend, northarrow = north)
#'
#' #dev.off() # for returning to default par settings
rangemap_fig <- function(range, polygons, add_extent = FALSE, add_occurrences = FALSE,
basemap_color = "grey93", range_color = "darkgreen", extent_color = "blue",
occurrences_color = "yellow", grid = FALSE, grid_sides = "bottomleft",
ylabels_position = 1.3, legend = FALSE, legend_position = "bottomright",
northarrow = FALSE, northarrow_position = "topright", scalebar = FALSE,
scalebar_position = "bottomleft", scalebar_length = 100, zoom = 1,
save_fig = FALSE, name = "range_fig", format = "png", resolution = 300,
width = 166, height = 166) {
# testing for potential errors
if (missing(range)) {
stop("range must exist. Check the function's help for more details.")
}
# projections
#WGS84 <- sp::CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0") # generic
f_proj <- range$Species_range@proj4string
# bringing maps if polygons false
if (missing(polygons)) {
polygons <- rnaturalearth::ne_countries(scale = 50)
}
polygons <- sp::spTransform(polygons, f_proj)
# getting species range
if (class(range) == "list") {
range_sp <- range$Species_range # species range
}
if (class(range) %in% c("SpatialPolygons", "SpatialPolygonsDataFrame")) {
range_sp <- range # species range
}
if (add_extent == TRUE) {
extent_sp <- range$Extent_of_occurrence # species extent of occ
}
if (add_occurrences == TRUE) {
occ_sp <- range$Species_unique_records # species records
}
# plot a background map and the range
## limits of map
xbox <- as.numeric(c(range_sp@bbox[1, 1:2]))
ybox <- as.numeric(c(range_sp@bbox[2, 1:2]))
xlim <- c(xbox[1] - ((((xbox[2] - xbox[1]) * zoom) -
(xbox[2] - xbox[1])) / 2),
xbox[2] + ((((xbox[2] - xbox[1]) * zoom) -
(xbox[2] - xbox[1])) / 2))
ylim <- c(ybox[1] - ((((ybox[2] - ybox[1]) * zoom) -
(ybox[2] - ybox[1])) / 2),
ybox[2] + ((((ybox[2] - ybox[1]) * zoom) -
(ybox[2] - ybox[1])) / 2))
## generic plot
par(mar = c(0, 0, 0, 0))
sp::plot(polygons, xlim = xlim, ylim = ylim, col = basemap_color, xaxt = "n", yaxt = "n")
sp::plot(range_sp, col = scales::alpha(range_color, 0.75), border = FALSE, add = TRUE) #plot the species range
box()
# adding other attributes to the map
## entent of occurrence
if (add_extent == TRUE) {
sp::plot(extent_sp, col = scales::alpha(extent_color, 0.4), border = FALSE, add = TRUE)
}
## occurrences
if (add_occurrences == TRUE) {
points(occ_sp, pch = 21, bg = scales::alpha(occurrences_color, 0.8), cex = 0.95) #plot my sample sites
}
## grid
if (grid == TRUE) {
if (grid_sides == "bottomleft") {
axis(side = 1, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 2, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
if (grid_sides == "bottomright") {
axis(side = 1, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 4, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
if (grid_sides == "topleft") {
axis(side = 3, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 2, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
if (grid_sides == "topright") {
axis(side = 3, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 4, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
}
## north arrow
if (northarrow == TRUE) {
north_arrow(position = northarrow_position, xlim, ylim, Ncex = 0.6)
}
## scale bar
if (scalebar == TRUE) {
if (class(scalebar_position) == "character") {
if (scalebar_position == "topright"){
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.80)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * 0.93)
}
if (scalebar_position == "topleft") {
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.02)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * 0.93)
}
if (scalebar_position == "bottomleft") {
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.02)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * 0.04)
}
if (scalebar_position == "bottomright") {
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.80)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * 0.04)
}
}else {
xscale <- scalebar_position[1]
yscale <- scalebar_position[2]
}
maps::map.scale(x = xscale, y = yscale, relwidth = 0.1, metric = TRUE,
ratio = F, cex = 0.8)
}
## legend
if (legend == TRUE) {
if (class(legend_position) == "character") {
if (add_extent == FALSE & add_occurrences == FALSE) {
legend(legend_position, legend = c("Species range"),
bty = "n", inset = 0.07, pt.bg = scales::alpha(range_color, 0.75),
pch = 22, col = scales::alpha(range_color, 0.75), pt.cex = 2, cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == TRUE) {
legend(legend_position, legend = c("Occurrences", "Species range", "Extent of occurrence"),
bty = "n", inset = 0.07, pch = c(21, 22, 22),
col = c("black", scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(1, 2, 2), cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == FALSE) {
legend(legend_position, legend=c("Species range", "Extent of occurrence"),
bty="n", inset = 0.07, pch = c(22, 22),
col = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(2, 2), cex = 0.8)
}
if (add_extent == FALSE & add_occurrences == TRUE) {
legend(legend_position, legend=c("Species range", "Ocurrences"),
bty="n", inset = 0.07, pch = c(21, 22),
col = c("black", scales::alpha(range_color, 0.75)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75)),
pt.cex = c(1, 2), cex = 0.8)
}
}else {
xleg <- legend_position[1]
yleg <- legend_position[2]
if (add_extent == FALSE & add_occurrences == FALSE) {
legend(x = xleg, y = yleg, legend = c("Species range"),
bty = "n", inset = 0.07, pt.bg = scales::alpha(range_color, 0.75),
pch = 22, col = scales::alpha(range_color, 0.75), pt.cex = 2, cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == TRUE) {
legend(x = xleg, y = yleg, legend = c("Occurrences", "Species range", "Extent of occurrence"),
bty = "n", inset = 0.07, pch = c(21, 22, 22),
col = c("black", scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(1, 2, 2), cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == FALSE) {
legend(x = xleg, y = yleg, legend=c("Species range", "Extent of occurrence"),
bty="n", inset = 0.07, pch = c(22, 22),
col = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(2, 2), cex = 0.8)
}
if (add_extent == FALSE & add_occurrences == TRUE) {
legend(x = xleg, y = yleg, legend=c("Species range", "Ocurrences"),
bty="n", inset = 0.07, pch = c(21, 22),
col = c("black", scales::alpha(range_color, 0.75)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75)),
pt.cex = c(1, 2), cex = 0.8)
}
}
}
# saving the figure
if (save_fig == TRUE) {
cat("\nWriting figure in working directory.\n")
if (format == "bmp") {
bmp(filename = paste(name, "bmp", sep = "."), width = width, height = height,
units = "mm", res = resolution)
}
if (format == "png") {
png(filename = paste(name, "png", sep = "."), width = width, height = height,
units = "mm", res = resolution)
}
if (format == "jpeg") {
jpeg(filename = paste(name, "jpg", sep = "."), width = width, height = height,
units = "mm", res = resolution)
}
if (format == "tiff") {
tiff(filename = paste(name, "tif", sep = "."), width = width, height = height,
units = "mm", res = resolution)
}
if (format == "pdf") {
pdf(file = paste(name, "pdf", sep = "."), width = width)
}
par(mar = c(0, 0, 0, 0), cex = 0.85)
sp::plot(polygons, xlim = xlim, ylim = ylim, col = basemap_color, xaxt = "n", yaxt = "n")
sp::plot(range_sp, col = scales::alpha(range_color, 0.75), border = FALSE, add = TRUE) #plot the species range
box()
# adding other attributes to the map
## entent of occurrence
if (add_extent == TRUE) {
sp::plot(extent_sp, col = scales::alpha(extent_color, 0.4), border = FALSE, add = TRUE)
}
## occurrences
if (add_occurrences == TRUE) {
points(occ_sp, pch = 21, bg = scales::alpha(occurrences_color, 0.8), cex = 0.95) #plot my sample sites
}
## grid
if (grid == TRUE) {
if (grid_sides == "bottomleft") {
axis(side = 1, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 2, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
if (grid_sides == "bottomright") {
axis(side = 1, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 4, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
if (grid_sides == "topleft") {
axis(side = 3, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 2, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
if (grid_sides == "topright") {
axis(side = 3, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -1.3, 0), cex.axis = 0.7)
axis(side = 4, tcl = 0.3, lwd.ticks = 1,
mgp = c(0, -ylabels_position, 0), cex.axis = 0.7, las = 1)
}
}
## north arrow
if (northarrow == TRUE) {
north_arrow(position = northarrow_position, xlim, ylim, Ncex = 0.6)
}
## scale bar
if (scalebar == TRUE) {
if (class(scalebar_position) == "character") {
if (scalebar_position == "topright"){
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.75)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * 1.01)
}
if (scalebar_position == "topleft") {
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.07)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * 1.01)
}
if (scalebar_position == "bottomleft") {
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.07)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * -0.01)
}
if (scalebar_position == "bottomright") {
xscale <- xlim[1] + ((xlim[2] - xlim[1]) * 0.75)
yscale <- ylim[1] + ((ylim[2] - ylim[1]) * -0.01)
}
}else {
xscale <- scalebar_position[1]
yscale <- scalebar_position[2]
}
maps::map.scale(x = xscale, y = yscale, relwidth = 0.1, metric = TRUE,
ratio = F, cex = 0.8)
}
## legend
if (legend == TRUE) {
if (class(legend_position) == "character") {
if (add_extent == FALSE & add_occurrences == FALSE) {
legend(legend_position, legend = c("Species range"),
bty = "n", inset = 0.07, pt.bg = scales::alpha(range_color, 0.75),
pch = 22, col = scales::alpha(range_color, 0.75), pt.cex = 2, cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == TRUE) {
legend(legend_position, legend = c("Occurrences", "Species range", "Extent of occurrence"),
bty = "n", inset = 0.07, pch = c(21, 22, 22),
col = c("black", scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(1, 2, 2), cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == FALSE) {
legend(legend_position, legend=c("Species range", "Extent of occurrence"),
bty="n", inset = 0.07, pch = c(22, 22),
col = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(2, 2), cex = 0.8)
}
if (add_extent == FALSE & add_occurrences == TRUE) {
legend(legend_position, legend=c("Species range", "Ocurrences"),
bty="n", inset = 0.07, pch = c(21, 22),
col = c("black", scales::alpha(range_color, 0.75)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75)),
pt.cex = c(1, 2), cex = 0.8)
}
}else {
xleg <- legend_position[1]
yleg <- legend_position[2]
if (add_extent == FALSE & add_occurrences == FALSE) {
legend(x = xleg, y = yleg, legend = c("Species range"),
bty = "n", inset = 0.07, pt.bg = scales::alpha(range_color, 0.75),
pch = 22, col = scales::alpha(range_color, 0.75), pt.cex = 2, cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == TRUE) {
legend(x = xleg, y = yleg, legend = c("Occurrences", "Species range", "Extent of occurrence"),
bty = "n", inset = 0.07, pch = c(21, 22, 22),
col = c("black", scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(1, 2, 2), cex = 0.8)
}
if (add_extent == TRUE & add_occurrences == FALSE) {
legend(x = xleg, y = yleg, legend=c("Species range", "Extent of occurrence"),
bty="n", inset = 0.07, pch = c(22, 22),
col = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.bg = c(scales::alpha(range_color, 0.75), scales::alpha(extent_color, 0.4)),
pt.cex = c(2, 2), cex = 0.8)
}
if (add_extent == FALSE & add_occurrences == TRUE) {
legend(x = xleg, y = yleg, legend=c("Species range", "Ocurrences"),
bty="n", inset = 0.07, pch = c(21, 22),
col = c("black", scales::alpha(range_color, 0.75)),
pt.bg = c(scales::alpha(occurrences_color, 0.8), scales::alpha(range_color, 0.75)),
pt.cex = c(1, 2), cex = 0.8)
}
}
}
invisible(dev.off())
}
}
#' North arrow for map plots
#' @description north_arrow plots a North arrow in user defined places in a map.
#'
#' @param position (character or numeric) position of the North arrow. If character, options
#' are: "topright", "topleft", "bottomleft", or "bottomright". Default = "topright".
#' @param xlim (numeric) vector of two numbers indicating the x limits of the plotting area.
#' @param ylim (numeric) vector of two numbers indicating the y limits of the plotting area.
#' @param Ncex (numeric) cex for the North label (N).
#' @param exproting (logical) whether or not the map will be exported as a figure.
#'
#' @export
#' @importFrom graphics polygon
north_arrow <- function(position = "topright", xlim, ylim, Ncex = 0.6) {
if (class(position) == "character") {
if (position == "topright") {
xpos <- xlim[1] + ((xlim[2] - xlim[1]) * 0.9315)
ypos <- ylim[1] + ((ylim[2] - ylim[1]) * 1.035)
xarrow <- c((xlim[1] + ((xlim[2] - xlim[1]) * 0.91)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.93)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.95)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.93)))
yarrow <- c((ylim[1] + ((ylim[2] - ylim[1]) * 0.97)),
(ylim[1] + ((ylim[2] - ylim[1]) * 1.015)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.97)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.98)))
}
if (position == "topleft") {
xpos <- xlim[1] + ((xlim[2] - xlim[1]) * 0.0715)
ypos <- ylim[1] + ((ylim[2] - ylim[1]) * 1.035)
xarrow <- c((xlim[1] + ((xlim[2] - xlim[1]) * 0.01)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.03)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.05)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.03)))
yarrow <- c((ylim[1] + ((ylim[2] - ylim[1]) * 0.97)),
(ylim[1] + ((ylim[2] - ylim[1]) * 1.015)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.97)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.98)))
}
if (position == "bottomleft") {
xpos <- xlim[1] + ((xlim[2] - xlim[1]) * 0.0715)
ypos <- ylim[1] + ((ylim[2] - ylim[1]) * 0.055)
xarrow <- c((xlim[1] + ((xlim[2] - xlim[1]) * 0.05)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.07)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.09)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.07)))
yarrow <- c((ylim[1] + ((ylim[2] - ylim[1]) * -0.01)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.035)),
(ylim[1] + ((ylim[2] - ylim[1]) * -0.01)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.00)))
}
if (position == "bottomright") {
xpos <- xlim[1] + ((xlim[2] - xlim[1]) * 0.9315)
ypos <- ylim[1] + ((ylim[2] - ylim[1]) * 0.055)
xarrow <- c((xlim[1] + ((xlim[2] - xlim[1]) * 0.91)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.93)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.95)),
(xlim[1] + ((xlim[2] - xlim[1]) * 0.93)))
yarrow <- c((ylim[1] + ((ylim[2] - ylim[1]) * -0.01)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.035)),
(ylim[1] + ((ylim[2] - ylim[1]) * -0.01)),
(ylim[1] + ((ylim[2] - ylim[1]) * 0.00)))
}
}else{
xpos <- position[1] + ((xlim[2] - xlim[1]) * 0.0215)
ypos <- position[2]
xarrow <- c(position[1],
(position[1] + ((xlim[2] - xlim[1]) * 0.02)),
(position[1] + ((xlim[2] - xlim[1]) * 0.04)),
(position[1] + ((xlim[2] - xlim[1]) * 0.02)))
yarrow <- c((position[2] - ((ylim[2] - ylim[1]) * 0.065)),
(position[2] - ((ylim[2] - ylim[1]) * 0.02)),
(position[2] - ((ylim[2] - ylim[1]) * 0.065)),
(position[2] - ((ylim[2] - ylim[1]) * 0.055)))
}
polygon(xarrow, yarrow, border = "black", col = "grey25")
#text(x = xpos , y = ypos, cex = 0.6, labels = "N")
}
|
72fea597bb3b57adcb47acaba11a5fe3a11a1ee4
|
c4ec91f2f28d71720effa6bba6c5b18281ecb2da
|
/codes/A2.R
|
a0ab65247bb6454a7543b0e674a6e795342716cd
|
[] |
no_license
|
Marciompi/Statistical-Methods-Project-Business_failure
|
feebf182522611bede065c67a7046da8e3e5e34e
|
387b5cbff173a5038135d14926efd49c7e700eb6
|
refs/heads/master
| 2023-06-29T21:29:42.224649
| 2021-08-05T13:04:24
| 2021-08-05T13:04:24
| 257,052,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,877
|
r
|
A2.R
|
library(ggplot2)
library(stringr)
library(magrittr) # needs to be run every time you start R and want to use %>%
library(dplyr)
library(RColorBrewer)
dfA2 <- dfAB[dfAB$status=='failed',]
dim(dfA2)
dfA2$region <- as.character(dfA2$region)
dfA2$'region'[dfA2$'region' == "Valle d'Aosta/Vallée d'Aoste"] <- "Valle d'Aosta"
#dfA2$y <- cut(dfA2$year, 4)
hist(dfA2$year, main=str_c('Failed campnies by year'),
xlab="Size",col="#03ab84", breaks = "sturges")
dfA2$period = "1990 - 2010"
dfA2$period[dfA2$year>=2011] = "2011 - 2014"
dfA2$period[dfA2$year>=2015] = "2015 - 2020"
table(dfA2$period)
###################################
### SIZE GROUPED BARPLOT EXAPLE ###
###################################
palette = brewer.pal(3,"Reds")
barplot(prop.table(table(dfA2$period,dfA2$size), 1) , main=str_c('Failed Companies by year'),
xlab="Size", col = palette,legend = rownames(table(dfA2$period,dfA2$size)), beside=TRUE)
barplot(prop.table(table(dfA2$year,dfA2$size), 1) , main=str_c('Failed "',form,'" by year'),
xlab="Size", beside=TRUE)
##APPLY THIS IN A FOR LOOP FOR ALL LEGAL FORMS
tests = data.frame(row.names = unique(dfA2$period))
palette = brewer.pal(3,"Reds")
for (form in unique(dfA2$'Legal form')){
jpeg(str_c("img/A/2/LegalForm/size/",form,".jpg"))
df = dfA2[dfA2$`Legal form`==form,]
barplot(prop.table(table(df$period,df$size), 1) , main=str_c('Failed "',form,'" by period'),
xlab="Size", col = palette,
legend = rownames(table(df$period,df$size)), beside=TRUE)
dev.off()
test1 = ks.test(df$size[df$period == '1990 - 2010'],df$size)
test2 = ks.test(df$size[df$period == '2011 - 2014'],df$size)
test3 = ks.test(df$size[df$period == '2015 - 2020'],df$size)
tests[form] = c(test1$statistic,test2$statistic,test3$statistic)
}
write.csv(tests,'img/A/2/A2legalformSizePeriods.csv')
##APPLY THIS IN A FOR LOOP FOR ALL REGIONS
tests = data.frame(row.names = unique(dfA2$period))
palette = brewer.pal(3,"Oranges")
for (region in unique(dfA2$'region')){
jpeg(str_c("img/A/2/Region/size/",region,".jpg"))
df = dfA2[dfA2$`region`==region,]
barplot(prop.table(table(df$period,df$size), 1) , main=str_c('Failed company in "',region,'" by period'),
xlab="Size", col = palette,
legend = rownames(table(df$period,df$size)), beside=TRUE)
dev.off()
test1 = ks.test(df$size[df$period == '1990 - 2010'],df$size)
test2 = ks.test(df$size[df$period == '2011 - 2014'],df$size)
test3 = ks.test(df$size[df$period == '2015 - 2020'],df$size)
tests[region] = c(test1$statistic,test2$statistic,test3$statistic)
}
write.csv(tests,'img/A/2/A2regionSizePeriods.csv')
###################################
### AGE DISTRIBUTION EXAPLE ###
###################################
dfA2$y = dfA2$year
dfA2$year <- as.character(dfA2$year)
ggplot(dfA2, aes(x=age, fill=period)) + geom_density(alpha=0.4) #+
# scale_fill_brewer(palette="Set1")
ggplot(dfA2[dfA2$y>2013 & dfA2$y<2019,], aes(x=age, color=year)) + geom_density(size=1, alpha=0.4) +
ggtitle(str_c('Failed companies in the last five years'))
library(comprehenr)
##APPLY THIS IN A FOR LOOP FOR ALL LEGAL FORMS
testslast5 = data.frame(row.names = c(2014,2015,2016,2017,2018))
testsPeriod = data.frame(row.names = unique(dfA2$period))
for (form in unique(dfA2$'Legal form')){
df = dfA2[dfA2$`Legal form`==form,]
ggplot(df, aes(x=age, fill=period)) + geom_density(alpha=0.2) +
ggtitle(str_c('Failed "',form,'" by period'))
ggsave(str_c("img/A/2/LegalForm/age/",form,"_period.jpg"),dpi=300)
ggplot(df[df$y>2013 & df$y<2019,], aes(x=age, color=year)) + geom_density(size=0.5, alpha=0.4)+
ggtitle(str_c('Failed "',form,'" in the last five years'))
ggsave(str_c("img/A/2/LegalForm/age/",form,"_last5.jpg"),dpi=300)
testlast5 = to_vec(for (y in 2014:2018) ks.test(df$age[df$year == y],df$age)$statistic)
testperiods = to_vec(for (p in unique(dfA2$period)) ks.test(df$age[df$period == p],df$age)$statistic)
testsPeriod[form] = testperiods
testslast5[form] = testlast5
}
write.csv(testsPeriod,'img/A/2/A2legalformAgePeriods.csv')
write.csv(testslast5,'img/A/2/A2legalformAgeLast5.csv')
##APPLY THIS IN A FOR ALL REGIONS
testslast5 = data.frame(row.names = c(2014,2015,2016,2017,2018))
testsPeriod = data.frame(row.names = unique(dfA2$period))
for (region in unique(dfA2$region)){
df = dfA2[dfA2$region==region,]
# ggplot(df, aes(x=age, fill=period)) + geom_density(alpha=0.2) +
# ggtitle(str_c('Failed companies in "',region,'" by period'))
# ggsave(str_c("img/A/2/Region/age/",region,"_period.jpg"),dpi=300)
#
# ggplot(df[df$y>2013 & df$y<2019,], aes(x=age, color=year)) + geom_density(size=0.5, alpha=0.4)+
# ggtitle(str_c('Failed companies in "',region,'" in the last five years'))
# ggsave(str_c("img/A/2/Region/age/",region,"_last5.jpg"),dpi=300)
#
testlast5 = to_vec(for (y in 2014:2018) ks.test(df$age[df$year == y],df$age)$statistic)
testperiods = to_vec(for (p in unique(dfA2$period)) ks.test(df$age[df$period == p],df$age)$statistic)
testsPeriod[region] = testperiods
testslast5[region] = testlast5
}
write.csv(testsPeriod,'img/A/2/A2regionAgePeriods.csv')
write.csv(testslast5,'img/A/2/A2regionAgeLast5.csv')
###################################
### CAPITAL DISTRIBUTION EXAPLE ###
###################################
dfA2$y = dfA2$year
dfA2$year <- as.character(dfA2$year)
ggplot(dfA2, aes(x=capital, fill=period)) + geom_density(alpha=0.4) + xlim(-100,100)
# scale_fill_brewer(palette="Set1")
ggplot(dfA2[dfA2$y>2013 & dfA2$y<2019,], aes(x=capital, color=year)) + geom_density(size=1, alpha=0.4)
##APPLY THIS IN A FOR LOOP FOR ALL LEGAL FORMS
for (form in unique(dfA2$'Legal form')){
df = dfA2[dfA2$`Legal form`==form,]
ggplot(df, aes(x=capital, fill=period)) + geom_density(alpha=0.2)+ xlim(-100,100) +
ggtitle(str_c('Failed "',form,'" by period'))
ggsave(str_c("img/A/2/LegalForm/capital/",form,"_period.jpg"),dpi=300)
ggplot(df[df$y>2013 & df$y<2019,], aes(x=capital, color=year)) + xlim(-100,100) + geom_density(size=0.5, alpha=0.4)+
ggtitle(str_c('Failed "',form,'" in the last five years'))
ggsave(str_c("img/A/2/LegalForm/capital/",form,"_last5.jpg"),dpi=300)
}
##APPLY THIS IN A FOR ALL REGIONS
for (region in unique(dfA2$region)){
df = dfA2[dfA2$region==region,]
ggplot(df, aes(x=capital, fill=period)) + geom_density(alpha=0.2) + xlim(-100,100)+
ggtitle(str_c('Failed companies in "',region,'" by period'))
ggsave(str_c("img/A/2/Region/capital/",region,"_period.jpg"),dpi=300)
ggplot(df[df$y>2013 & df$y<2019,], aes(x=capital, color=year))+ xlim(-100,100) + geom_density(size=0.5, alpha=0.4)+
ggtitle(str_c('Failed companies in "',region,'" in the last five years'))
ggsave(str_c("img/A/2/Region/capital/",region,"_last5.jpg"),dpi=300)
}
|
d0222f5a7c99d2b84ffd98fdc1e15413b43e883d
|
b9e7c33c9234e5409b249712b987e4d81508ae00
|
/plot3.R
|
979afd9ce7bca82d34094bbb7f0f47bef24a9fe2
|
[] |
no_license
|
skingham/ExData_Plotting1
|
02482d3723adde0d07f219e39c6c87cf8d96c537
|
1abfc6a8d80790d031694dc8ff72d6e270d38c32
|
refs/heads/master
| 2020-12-27T01:03:28.825880
| 2015-01-11T23:21:08
| 2015-01-11T23:21:08
| 29,104,332
| 0
| 0
| null | 2015-01-11T20:12:05
| 2015-01-11T20:12:05
| null |
UTF-8
|
R
| false
| false
| 1,543
|
r
|
plot3.R
|
# Find all lines of the correct date range
greplines <- grep('^[1-2]/2/2007', readLines('household_power_consumption.txt'))
firstDateRangeLine <- greplines[1]
dateRangeCount <- length(greplines)
# Read in the file, skipping to the first line of our date range, and only reading in the correct number of lines
colNames <- c('Date', 'Time', 'Global_active_power', 'Global_reactive_power', 'Voltage', 'Global_intensity', 'Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3')
pwr <- read.csv('household_power_consumption.txt',
header=FALSE, sep=';',
col.names=colNames,
colClasses=c('character', 'character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'),
skip=firstDateRangeLine-1,
nrows=dateRangeCount)
# Construct a correct date from date and time strings
pwr$DateTime <- strptime(paste(pwr$Date, pwr$Time), "%d/%m/%Y %H:%M:%S")
# Get relevant data points into a new data frame
sub1 <- data.frame(DateTime=pwr$DateTime, Sub_metering_1=pwr$Sub_metering_1)
sub2 <- data.frame(DateTime=pwr$DateTime, Sub_metering_2=pwr$Sub_metering_2)
sub3 <- data.frame(DateTime=pwr$DateTime, Sub_metering_3=pwr$Sub_metering_3)
# Plot three lines, and add legend
png('plot3.png', width=480, height=480, units="px")
plot(sub1, type="l", ylab="Energy sub metering", xlab="")
lines(sub2, col="red")
lines(sub3, col="blue")
legend("topright", c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lty=c(1,1), col=c("black", "red", "blue"))
dev.off()
|
a5ba22d242c258add81af6cf807bbe1753e034e2
|
44598c891266cd295188326f2bb8d7755481e66b
|
/DbtTools/Transforms/R/toRange.R
|
6086805086a245973d30b30a6c7568138467362e
|
[] |
no_license
|
markus-flicke/KD_Projekt_1
|
09a66f5e2ef06447d4b0408f54487b146d21f1e9
|
1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4
|
refs/heads/master
| 2020-03-13T23:12:31.501130
| 2018-05-21T22:25:37
| 2018-05-21T22:25:37
| 131,330,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,220
|
r
|
toRange.R
|
`toRange` <-
function(data, lower, upper){
data <- as.matrix(data)
if(lower==upper){
error('interval width can not be 0!')
}
if (lower > upper){
temp <- upper;
upper <- lower;
lower <- upper;
}
range <- upper - lower
n <- dim(data)[1]
d <- dim(data)[2]
if ((n==1) & (d > 1)){ # row vector to colum vector
data <- t(data)
wasRowVector <- 1
}
else{
wasRowVector <- 0
}
nRow <- dim(data)[1]
nCol <- dim(data)[2]
# Min = ones(Rows,1)*nanmin(data);
min <-apply(data,2,min,na.rm=TRUE)
min <- matrix(min,nRow,nCol,byrow=TRUE)
# Max = ones(Rows,1)*nanmax(data);
max <- apply(data,2,max,na.rm=TRUE)
max <- matrix(max,nRow,nCol,byrow=TRUE)
# Range = Max-Min;
range <- max-min
# Range(Range==0) =1; % falls Min==Max lass Daten in Ruhe
range[range==0]<-1
# ScaledData = (data-Min)./Range; % scale to [0,1]
scaleData <- (data-min)/range
# ScaledData = lower+ ScaledData *(upper-lower); % scale to [lower, upper]
scaleData <- lower + scaleData * (upper-lower)
if(wasRowVector==1){
scaleData = t(scaleData)
}
return(scaleData)
}
|
08eb9eae4c1639d8df0afb80b491cc9cf5be0743
|
91c35aca2930e60581b57b694e85b7040246b96f
|
/public/mosquito/simple28oct13.R
|
bf72e4650c9673b66445c98fed6d609bf08a53e2
|
[] |
no_license
|
waughsh/fdoh
|
b5abe8d2a30d4a6d60427f055cae5f3c71ece1a4
|
30ecab89f5916ba8baa962aafb71cf819cbb61a4
|
refs/heads/master
| 2020-04-05T14:58:12.156605
| 2015-09-09T03:32:54
| 2015-09-09T03:32:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 84,130
|
r
|
simple28oct13.R
|
#SET COLOR PALETTE
library(RColorBrewer)
symcols <- colorRampPalette(brewer.pal(8, "Set1"))(8)
blackcols <- c("gray90",rep("gray75", 67))
symcols[6] <- "darkred"
countynames <- map("county", "florida", names=TRUE, plot=FALSE)
countynames
countycols <- colorRampPalette(brewer.pal(9, "Spectral"))(68)
countycols2 <- colorRampPalette(brewer.pal(5, "Pastel2"))(67)
countycols3 <- rep(c("grey30", "grey40", "grey50", "grey10"),30)
yearcols <- colorRampPalette(brewer.pal(6, "Set1"))(6)
yearcols <- adjustcolor(yearcols, alpha.f=0.8)
yearcols[6] <- adjustcolor("grey", alpha.f=0.8)
alachuacol <- adjustcolor("beige", alpha.f=0.2)
#Set site names
sitenames <- c("Transfer Station NE 63 Ave.",
"Monteocha Park",
"CR 1491 High Springs",
"LaPierre Home (High Springs)",
"Woodlands Subdivision (Tower Rd.)",
"SE 171 St. (Hawthorne)",
"Bowman Residence (Archer)",
"Micanopy",
"Hawthorne Chix Site",
"UF AES NW 71 St.")
#READ IN DATA
setwd("C:/Users/BrewJR/Documents/workingdirectory/mosquito")
jun04 <- read.csv("2013-06-04.csv", skip=3, header=T, sep=",")
jun12 <- read.csv("2013-06-12.csv", skip=3, header=T, sep=",")
jun18 <- read.csv("2013-06-18.csv", skip=3, header=T, sep=",")
jun25 <- read.csv("2013-06-25.csv", skip=3, header=T, sep=",")
jul04 <- read.csv("2013-07-04.csv", skip=3, header=T, sep=",")
jul10 <- read.csv("2013-07-10.csv", skip=4, header=T, sep=",")
jul17 <- read.csv("2013-07-17.csv", skip=4, header=T, sep=",")
jul24 <- read.csv("2013-07-24.csv", skip=4, header=T, sep=",")
jul30 <- read.csv("2013-07-30.csv", skip=4, header=T, sep=",")
aug06 <- read.csv("2013-08-06.csv", skip=4, header=T, sep=",")
aug13 <- read.csv("2013-08-13.csv", skip=4, header=T, sep=",")
aug20 <- read.csv("2013-08-20.csv", skip=4, header=T, sep=",")
sep03 <- read.csv("2013-09-03.csv", skip=4, header=T, sep=",")
sep10 <- read.csv("2013-09-10.csv", skip=4, header=T, sep=",")
sep17 <- read.csv("2013-09-17.csv", skip=4, header=T, sep=",")
sep24 <- read.csv("2013-09-24.csv", skip=4, header=T, sep=",")
oct01 <- read.csv("2013-10-01.csv", skip=4, header=T, sep=",")
oct08 <- read.csv("2013-10-08.csv", skip=4, header=T, sep=",")
oct16 <- read.csv("2013-10-16.csv", skip=4, header=T, sep=",")
oct22 <- read.csv("2013-10-22.csv", skip=4, header=T, sep=",")
#Prepare data for merging
names(jun04)[1] <- "site"
names(jun04)[11] <- "total"
jun04$X10 <- 0
jun04 <- jun04[-34,]
jun04$Code.. <- NULL
jun04$date <- as.Date("2013-06-04")
names(jun12)[1] <- "site"
names(jun12)[11] <- "total"
jun12$X10 <- 0
jun12 <- jun12[-34,]
jun12$Code.. <- NULL
jun12$date <- as.Date("2013-06-12")
names(jun18)[1] <- "site"
names(jun18)[11] <- "total"
jun18$X10 <- 0
jun18 <- jun18[-34,]
jun18$Code.. <- NULL
jun18$date <- as.Date("2013-06-18")
names(jun25)[1] <- "site"
names(jun25)[11] <- "total"
jun25$X10 <- 0
jun25 <- jun25[-34,]
jun25$Code.. <- NULL
jun25$date <- as.Date("2013-06-25")
names(jul04)[1] <- "site"
names(jul04)[11] <- "total"
jul04$X10 <- 0
jul04 <- jul04[-34,]
jul04$Code.. <- NULL
jul04$date <- as.Date("2013-07-04")
names(jul10)[1] <- "site"
names(jul10)[12] <- "total"
jul10 <- jul10[-34,]
jul10$Code.. <- NULL
jul10$date <- as.Date("2013-07-10")
names(jul17)[1] <- "site"
names(jul17)[12] <- "total"
jul17 <- jul17[-34,]
jul17$Code.. <- NULL
jul17$date <- as.Date("2013-07-17")
names(jul24)[1] <- "site"
names(jul24)[12] <- "total"
jul24 <- jul24[-34,]
jul24$Code.. <- NULL
jul24$date <- as.Date("2013-07-24")
jul24 <- jul24[-c(34,35),]
names(jul30)[1] <- "site"
names(jul30)[12] <- "total"
jul30 <- jul30[-34,]
jul30$Code.. <- NULL
jul30$date <- as.Date("2013-07-30")
jul30 <- jul30[-c(34,35),]
names(aug06)[1] <- "site"
names(aug06)[12] <- "total"
aug06 <- aug06[-34,]
aug06$Code.. <- NULL
aug06$date <- as.Date("2013-08-06")
aug06 <- aug06[-c(34,35),]
names(aug13)[1] <- "site"
names(aug13)[12] <- "total"
aug13 <- aug13[-34,]
aug13$Code.. <- NULL
aug13$date <- as.Date("2013-08-13")
names(aug20)[1] <- "site"
names(aug20)[12] <- "total"
aug20 <- aug20[-34,]
aug20$Code.. <- NULL
aug20$date <- as.Date("2013-08-20")
aug20 <- aug20[-c(34:90),]
aug20 <- aug20[,-c(13:21)]
names(sep03)[1] <- "site"
names(sep03)[12] <- "total"
sep03 <- sep03[-34,]
sep03$Code.. <- NULL
sep03$date <- as.Date("2013-09-03")
sep03 <- sep03[-c(34:90),]
names(sep10)[1] <- "site"
names(sep10)[12] <- "total"
sep10 <- sep10[-34,]
sep10$Code.. <- NULL
sep10$date <- as.Date("2013-09-10")
sep10 <- sep10[-c(34:90),]
sep10 <- sep10[,-c(13:21)]
names(sep17)[1] <- "site"
names(sep17)[12] <- "total"
sep17 <- sep17[-34,]
sep17$Code.. <- NULL
sep17$date <- as.Date("2013-09-17")
sep17 <- sep17[-c(34:90),]
sep17 <- sep17[,-c(13:21)]
names(sep24)[1] <- "site"
names(sep24)[12] <- "total"
sep24 <- sep24[-34,]
sep24$Code.. <- NULL
sep24$date <- as.Date("2013-09-24")
sep24 <- sep24[-c(34:90),]
sep24 <- sep24[,-c(13:21)]
names(oct01)[1] <- "site"
names(oct01)[12] <- "total"
oct01 <- oct01[-34,]
oct01$Code.. <- NULL
oct01$date <- as.Date("2013-10-01")
oct01 <- oct01[-c(34:90),]
oct01 <- oct01[,-c(13:21)]
names(oct08)[1] <- "site"
names(oct08)[12] <- "total"
oct08 <- oct08[-34,]
oct08$Code.. <- NULL
oct08$date <- as.Date("2013-10-08")
oct08 <- oct08[-c(34:90),]
oct08 <- oct08[,-c(13:21)]
names(oct16)[1] <- "site"
names(oct16)[12] <- "total"
oct16 <- oct16[-34,]
oct16$Code.. <- NULL
oct16$date <- as.Date("2013-10-16")
oct16 <- oct16[-c(34:90),]
oct16 <- oct16[,-c(13:21)]
names(oct22)[1] <- "site"
names(oct22)[12] <- "total"
oct22 <- oct22[-34,]
oct22$Code.. <- NULL
oct22$date <- as.Date("2013-10-22")
oct22 <- oct22[-c(34:90),]
oct22 <- oct22[,-c(13:21)]
#MERGE THE DATA
merged <- rbind(jun04,
jun12,
jun18,
jun25,
jul04,
jul10,
jul17,
jul24,
jul30,
aug06,
aug13,
aug20,
sep03,
sep10,
sep17,
sep24,
oct01,
oct08,
oct16,
oct22)
#CONVERT SOME STUFF TO NUMERIC
merged$X1 <- as.integer(merged$X1)
merged$X2 <- as.integer(merged$X2)
merged$X3 <- as.integer(merged$X3)
merged$X4 <- as.integer(merged$X4)
merged$X5 <- as.integer(merged$X5)
merged$X6 <- as.integer(merged$X6)
merged$X7 <- as.integer(merged$X7)
merged$X8 <- as.integer(merged$X8)
merged$X9 <- as.integer(merged$X9)
merged$X10 <- as.integer(merged$X10)
merged$total <- as.integer(merged$total)
#Rename the "site" column to be "mosq" instead
merged$mosq <- merged$site
merged$site <- NULL
#Create a vector column
library(car)
merged$vector <- merged$mosq
merged$wnv <- merged$mosq
merged$slev <- merged$mosq
merged$eeev <- merged$mosq
merged$chik <- merged$mosq
merged$malaria <- merged$mosq
merged$dengue <- merged$mosq
#Populate the vector column appropriately
merged$vector <- recode(merged$vector, "'Aedes albopictus'='vector'")
merged$vector <- recode(merged$vector, "'A. aegypti'='vector'")
merged$vector <- recode(merged$vector, "'A. vexans'='vector'")
merged$vector <- recode(merged$vector, "'O. triseriatus'='vector'")
merged$vector <- recode(merged$vector, "'Anopheles crucians'='vector'")
merged$vector <- recode(merged$vector, "'A. quadrimaculatus'='vector'")
merged$vector <- recode(merged$vector, "'Culex erraticus'='vector'")
merged$vector <- recode(merged$vector, "'C. nigrapalpus'='vector'")
merged$vector <- recode(merged$vector, "'C. nigrapalpus'='vector'")
merged$vector <- recode(merged$vector, "'C. nigripalpus'='vector'")
merged$vector <- recode(merged$vector, "'C. quinquefasciatus'='vector'")
merged$vector <- recode(merged$vector, "'C. quinquefasciatus'='vector'")
merged$vector <- recode(merged$vector, "'C. restuans'='vector'")
merged$vector <- recode(merged$vector, "'C. salinarius'='vector'")
merged$vector <- recode(merged$vector, "'C. salinarius'='vector'")
merged$vector <- recode(merged$vector, "'Culiseta inornata'='vector'")
merged$vector <- recode(merged$vector, "'Culiseta melanura'='vector'")
merged$vector <- recode(merged$vector, "'Culex erraticus'='vector'")
merged$vector <- recode(merged$vector, "'Ochlerotatus atlanticus'='nonvector'")
merged$vector <- recode(merged$vector, "'O. canadensis'='nonvector'")
merged$vector <- recode(merged$vector, "'O. dupreei'='nonvector'")
merged$vector <- recode(merged$vector, "'O. fulvus pallens'='nonvector'")
merged$vector <- recode(merged$vector, "'O. infirmatus'='nonvector'")
merged$vector <- recode(merged$vector, "'O. mitchellae'='nonvector'")
merged$vector <- recode(merged$vector, "'O. mitchellae'='nonvector'")
merged$vector <- recode(merged$vector, "'O. solicitans'='nonvector'")
merged$vector <- recode(merged$vector, "'O. sollicitans'='nonvector'")
merged$vector <- recode(merged$vector, "'O. taeniorhynchus'='nonvector'")
merged$vector <- recode(merged$vector, "'Coquillettidia perturbans'='nonvector'")
merged$vector <- recode(merged$vector, "'Coquillettidia perturbans'='nonvector'")
merged$vector <- recode(merged$vector, "'Mansonia titillans'='nonvector'")
merged$vector <- recode(merged$vector, "'M. dyari'='nonvector'")
merged$vector <- recode(merged$vector, "'Psorophora ciliata'='nonvector'")
merged$vector <- recode(merged$vector, "'P. columbiae'='nonvector'")
merged$vector <- recode(merged$vector, "'P. ferox'='nonvector'")
merged$vector <- recode(merged$vector, "'P. howardii'='nonvector'")
merged$vector <- recode(merged$vector, "'Toxorhynchites rutilus'='nonvector'")
merged$vector <- recode(merged$vector, "'Uranotaenia sapphirina'='nonvector'")
merged$vector <- recode(merged$vector, "'U. lowii'='nonvector'")
merged$vector <- recode(merged$vector, "'Wyeomyia mitchellii'='nonvector'")
merged$vector <- recode(merged$vector, "'W. vanduzeei'='nonvector'")
merged$vector <- recode(merged$vector, "'Other'='nonvector'")
merged$vector <- recode(merged$vector, "'Other (Cx coronator)'='nonvector'")
#Populate the WNV column appropriately
merged$wnv <- recode(merged$wnv, "'Aedes albopictus'='wnv'")
merged$wnv <- recode(merged$wnv, "'A. aegypti'='nownv'")
merged$wnv <- recode(merged$wnv, "'A. vexans'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. triseriatus'='nownv'")
merged$wnv <- recode(merged$wnv, "'Anopheles crucians'='nownv'")
merged$wnv <- recode(merged$wnv, "'A. quadrimaculatus'='nownv'")
merged$wnv <- recode(merged$wnv, "'Culex erraticus'='wnv'")
merged$wnv <- recode(merged$wnv, "'Culex erraticus'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. nigrapalpus'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. nigrapalpus'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. nigripalpus'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. quinquefasciatus'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. quinquefasciatus'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. restuans'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. salinarius'='wnv'")
merged$wnv <- recode(merged$wnv, "'C. salinarius'='wnv'")
merged$wnv <- recode(merged$wnv, "'Culiseta inornata'='nownv'")
merged$wnv <- recode(merged$wnv, "'Culiseta melanura'='nownv'")
merged$wnv <- recode(merged$wnv, "'Ochlerotatus atlanticus'='wnv'")
merged$wnv <- recode(merged$wnv, "'O. canadensis'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. dupreei'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. fulvus pallens'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. infirmatus'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. mitchellae'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. mitchellae'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. solicitans'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. sollicitans'='nownv'")
merged$wnv <- recode(merged$wnv, "'O. taeniorhynchus'='nownv'")
merged$wnv <- recode(merged$wnv, "'Coquillettidia perturbans'='wnv'")
merged$wnv <- recode(merged$wnv, "'Coquillettidia perturbans'='wnv'")
merged$wnv <- recode(merged$wnv, "'Mansonia titillans'='nownv'")
merged$wnv <- recode(merged$wnv, "'M. dyari'='nownv'")
merged$wnv <- recode(merged$wnv, "'Psorophora ciliata'='nownv'")
merged$wnv <- recode(merged$wnv, "'P. columbiae'='nownv'")
merged$wnv <- recode(merged$wnv, "'P. ferox'='nownv'")
merged$wnv <- recode(merged$wnv, "'P. howardii'='nownv'")
merged$wnv <- recode(merged$wnv, "'Toxorhynchites rutilus'='nownv'")
merged$wnv <- recode(merged$wnv, "'Uranotaenia sapphirina'='nownv'")
merged$wnv <- recode(merged$wnv, "'U. lowii'='nownv'")
merged$wnv <- recode(merged$wnv, "'Wyeomyia mitchellii'='nownv'")
merged$wnv <- recode(merged$wnv, "'W. vanduzeei'='nownv'")
merged$wnv <- recode(merged$wnv, "'Other'='nownv'")
merged$wnv <- recode(merged$wnv, "'Other (Cx coronator)'='nownv'")
#Populate the slev column appropriately
merged$slev <- recode(merged$slev, "'Aedes albopictus'='slev'")
merged$slev <- recode(merged$slev, "'A. aegypti'='noslev'")
merged$slev <- recode(merged$slev, "'A. vexans'='noslev'")
merged$slev <- recode(merged$slev, "'O. triseriatus'='noslev'")
merged$slev <- recode(merged$slev, "'Anopheles crucians'='noslev'")
merged$slev <- recode(merged$slev, "'A. quadrimaculatus'='noslev'")
merged$slev <- recode(merged$slev, "'Culex erraticus'='noslev'")
merged$slev <- recode(merged$slev, "'Culex erraticus'='noslev'")
merged$slev <- recode(merged$slev, "'C. nigrapalpus'='slev'")
merged$slev <- recode(merged$slev, "'C. nigrapalpus'='slev'")
merged$slev <- recode(merged$slev, "'C. nigripalpus'='slev'")
merged$slev <- recode(merged$slev, "'C. quinquefasciatus'='noslev'")
merged$slev <- recode(merged$slev, "'C. quinquefasciatus'='noslev'")
merged$slev <- recode(merged$slev, "'C. restuans'='noslev'")
merged$slev <- recode(merged$slev, "'C. salinarius'='noslev'")
merged$slev <- recode(merged$slev, "'C. salinarius'='noslev'")
merged$slev <- recode(merged$slev, "'Culiseta inornata'='noslev'")
merged$slev <- recode(merged$slev, "'Culiseta melanura'='noslev'")
merged$slev <- recode(merged$slev, "'Ochlerotatus atlanticus'='noslev'")
merged$slev <- recode(merged$slev, "'O. canadensis'='noslev'")
merged$slev <- recode(merged$slev, "'O. dupreei'='noslev'")
merged$slev <- recode(merged$slev, "'O. fulvus pallens'='noslev'")
merged$slev <- recode(merged$slev, "'O. infirmatus'='noslev'")
merged$slev <- recode(merged$slev, "'O. mitchellae'='noslev'")
merged$slev <- recode(merged$slev, "'O. mitchellae'='noslev'")
merged$slev <- recode(merged$slev, "'O. solicitans'='noslev'")
merged$slev <- recode(merged$slev, "'O. sollicitans'='noslev'")
merged$slev <- recode(merged$slev, "'O. taeniorhynchus'='slev'")
merged$slev <- recode(merged$slev, "'Coquillettidia perturbans'='noslev'")
merged$slev <- recode(merged$slev, "'Coquillettidia perturbans'='noslev'")
merged$slev <- recode(merged$slev, "'Mansonia titillans'='noslev'")
merged$slev <- recode(merged$slev, "'M. dyari'='noslev'")
merged$slev <- recode(merged$slev, "'Psorophora ciliata'='noslev'")
merged$slev <- recode(merged$slev, "'P. columbiae'='noslev'")
merged$slev <- recode(merged$slev, "'P. ferox'='noslev'")
merged$slev <- recode(merged$slev, "'P. howardii'='noslev'")
merged$slev <- recode(merged$slev, "'Toxorhynchites rutilus'='noslev'")
merged$slev <- recode(merged$slev, "'Uranotaenia sapphirina'='noslev'")
merged$slev <- recode(merged$slev, "'U. lowii'='noslev'")
merged$slev <- recode(merged$slev, "'Wyeomyia mitchellii'='noslev'")
merged$slev <- recode(merged$slev, "'W. vanduzeei'='noslev'")
merged$slev <- recode(merged$slev, "'Other'='noslev'")
merged$slev <- recode(merged$slev, "'Other (Cx coronator)'='noslev'")
#Populate the eeev column appropriately
merged$eeev <- recode(merged$eeev, "'Aedes albopictus'='eeev'")
merged$eeev <- recode(merged$eeev, "'A. aegypti'='eeev'")
merged$eeev <- recode(merged$eeev, "'A. vexans'='eeev'")
merged$eeev <- recode(merged$eeev, "'O. triseriatus'='eeev'")
merged$eeev <- recode(merged$eeev, "'Anopheles crucians'='noeeev'")
merged$eeev <- recode(merged$eeev, "'A. quadrimaculatus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Culex erraticus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Culex erraticus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'C. nigrapalpus'='eeev'")
merged$eeev <- recode(merged$eeev, "'C. nigrapalpus'='eeev'")
merged$eeev <- recode(merged$eeev, "'C. nigripalpus'='eeev'")
merged$eeev <- recode(merged$eeev, "'C. quinquefasciatus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'C. quinquefasciatus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'C. restuans'='noeeev'")
merged$eeev <- recode(merged$eeev, "'C. salinarius'='noeeev'")
merged$eeev <- recode(merged$eeev, "'C. salinarius'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Culiseta inornata'='eeev'")
merged$eeev <- recode(merged$eeev, "'Culiseta melanura'='eeev'")
merged$eeev <- recode(merged$eeev, "'Ochlerotatus atlanticus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. canadensis'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. dupreei'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. fulvus pallens'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. infirmatus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. mitchellae'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. mitchellae'='noeeev'")
merged$eeev <- recode(merged$eeev, "'O. solicitans'='eeev'")
merged$eeev <- recode(merged$eeev, "'O. sollicitans'='eeev'")
merged$eeev <- recode(merged$eeev, "'O. taeniorhynchus'='eeev'")
merged$eeev <- recode(merged$eeev, "'Coquillettidia perturbans'='eeev'")
merged$eeev <- recode(merged$eeev, "'Coquillettidia perturbans'='eeev'")
merged$eeev <- recode(merged$eeev, "'Mansonia titillans'='noeeev'")
merged$eeev <- recode(merged$eeev, "'M. dyari'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Psorophora ciliata'='noeeev'")
merged$eeev <- recode(merged$eeev, "'P. columbiae'='noeeev'")
merged$eeev <- recode(merged$eeev, "'P. ferox'='noeeev'")
merged$eeev <- recode(merged$eeev, "'P. howardii'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Toxorhynchites rutilus'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Uranotaenia sapphirina'='noeeev'")
merged$eeev <- recode(merged$eeev, "'U. lowii'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Wyeomyia mitchellii'='noeeev'")
merged$eeev <- recode(merged$eeev, "'W. vanduzeei'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Other'='noeeev'")
merged$eeev <- recode(merged$eeev, "'Other (Cx coronator)'='noeeev'")
#Populate the chik column appropriately
merged$chik <- recode(merged$chik, "'Aedes albopictus'='chik'")
merged$chik <- recode(merged$chik, "'A. aegypti'='chik'")
merged$chik <- recode(merged$chik, "'A. vexans'='nochik'")
merged$chik <- recode(merged$chik, "'O. triseriatus'='nochik'")
merged$chik <- recode(merged$chik, "'Anopheles crucians'='nochik'")
merged$chik <- recode(merged$chik, "'A. quadrimaculatus'='nochik'")
merged$chik <- recode(merged$chik, "'Culex erraticus'='nochik'")
merged$chik <- recode(merged$chik, "'Culex erraticus'='nochik'")
merged$chik <- recode(merged$chik, "'C. nigrapalpus'='nochik'")
merged$chik <- recode(merged$chik, "'C. nigrapalpus'='nochik'")
merged$chik <- recode(merged$chik, "'C. nigripalpus'='nochik'")
merged$chik <- recode(merged$chik, "'C. quinquefasciatus'='nochik'")
merged$chik <- recode(merged$chik, "'C. quinquefasciatus'='nochik'")
merged$chik <- recode(merged$chik, "'C. restuans'='nochik'")
merged$chik <- recode(merged$chik, "'C. salinarius'='nochik'")
merged$chik <- recode(merged$chik, "'C. salinarius'='nochik'")
merged$chik <- recode(merged$chik, "'Culiseta inornata'='nochik'")
merged$chik <- recode(merged$chik, "'Culiseta melanura'='nochik'")
merged$chik <- recode(merged$chik, "'Ochlerotatus atlanticus'='nochik'")
merged$chik <- recode(merged$chik, "'O. canadensis'='nochik'")
merged$chik <- recode(merged$chik, "'O. dupreei'='nochik'")
merged$chik <- recode(merged$chik, "'O. fulvus pallens'='nochik'")
merged$chik <- recode(merged$chik, "'O. infirmatus'='nochik'")
merged$chik <- recode(merged$chik, "'O. mitchellae'='nochik'")
merged$chik <- recode(merged$chik, "'O. mitchellae'='nochik'")
merged$chik <- recode(merged$chik, "'O. solicitans'='nochik'")
merged$chik <- recode(merged$chik, "'O. sollicitans'='nochik'")
merged$chik <- recode(merged$chik, "'O. taeniorhynchus'='nochik'")
merged$chik <- recode(merged$chik, "'Coquillettidia perturbans'='nochik'")
merged$chik <- recode(merged$chik, "'Coquillettidia perturbans'='nochik'")
merged$chik <- recode(merged$chik, "'Mansonia titillans'='nochik'")
merged$chik <- recode(merged$chik, "'M. dyari'='nochik'")
merged$chik <- recode(merged$chik, "'Psorophora ciliata'='nochik'")
merged$chik <- recode(merged$chik, "'P. columbiae'='nochik'")
merged$chik <- recode(merged$chik, "'P. ferox'='nochik'")
merged$chik <- recode(merged$chik, "'P. howardii'='nochik'")
merged$chik <- recode(merged$chik, "'Toxorhynchites rutilus'='nochik'")
merged$chik <- recode(merged$chik, "'Uranotaenia sapphirina'='nochik'")
merged$chik <- recode(merged$chik, "'U. lowii'='nochik'")
merged$chik <- recode(merged$chik, "'Wyeomyia mitchellii'='nochik'")
merged$chik <- recode(merged$chik, "'W. vanduzeei'='nochik'")
merged$chik <- recode(merged$chik, "'Other'='nochik'")
merged$chik <- recode(merged$chik, "'Other (Cx coronator)'='nochik'")
#Populate the malaria column appropriately
merged$malaria <- recode(merged$malaria, "'Aedes albopictus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'A. aegypti'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'A. vexans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. triseriatus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Anopheles crucians'='malaria'")
merged$malaria <- recode(merged$malaria, "'A. quadrimaculatus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Culex erraticus'='malaria'")
merged$malaria <- recode(merged$malaria, "'Culex erraticus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. nigrapalpus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. nigrapalpus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. nigripalpus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. quinquefasciatus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. quinquefasciatus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. restuans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. salinarius'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'C. salinarius'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Culiseta inornata'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Culiseta melanura'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Ochlerotatus atlanticus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. canadensis'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. dupreei'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. fulvus pallens'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. infirmatus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. mitchellae'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. mitchellae'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. solicitans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. sollicitans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'O. taeniorhynchus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Coquillettidia perturbans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Coquillettidia perturbans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Mansonia titillans'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'M. dyari'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Psorophora ciliata'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'P. columbiae'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'P. ferox'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'P. howardii'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Toxorhynchites rutilus'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Uranotaenia sapphirina'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'U. lowii'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Wyeomyia mitchellii'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'W. vanduzeei'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Other'='nomalaria'")
merged$malaria <- recode(merged$malaria, "'Other (Cx coronator)'='nomalaria'")
#Populate the dengue column appropriately
merged$dengue <- recode(merged$dengue, "'Aedes albopictus'='dengue'")
merged$dengue <- recode(merged$dengue, "'A. aegypti'='dengue'")
merged$dengue <- recode(merged$dengue, "'A. vexans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. triseriatus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Anopheles crucians'='nodengue'")
merged$dengue <- recode(merged$dengue, "'A. quadrimaculatus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Culex erraticus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Culex erraticus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. nigrapalpus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. nigrapalpus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. nigripalpus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. quinquefasciatus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. quinquefasciatus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. restuans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. salinarius'='nodengue'")
merged$dengue <- recode(merged$dengue, "'C. salinarius'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Culiseta inornata'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Culiseta melanura'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Ochlerotatus atlanticus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. canadensis'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. dupreei'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. fulvus pallens'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. infirmatus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. mitchellae'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. mitchellae'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. solicitans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. sollicitans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'O. taeniorhynchus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Coquillettidia perturbans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Coquillettidia perturbans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Mansonia titillans'='nodengue'")
merged$dengue <- recode(merged$dengue, "'M. dyari'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Psorophora ciliata'='nodengue'")
merged$dengue <- recode(merged$dengue, "'P. columbiae'='nodengue'")
merged$dengue <- recode(merged$dengue, "'P. ferox'='nodengue'")
merged$dengue <- recode(merged$dengue, "'P. howardii'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Toxorhynchites rutilus'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Uranotaenia sapphirina'='nodengue'")
merged$dengue <- recode(merged$dengue, "'U. lowii'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Wyeomyia mitchellii'='nodengue'")
merged$dengue <- recode(merged$dengue, "'W. vanduzeei'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Other'='nodengue'")
merged$dengue <- recode(merged$dengue, "'Other (Cx coronator)'='nodengue'")
mosquito <- as.data.frame(unique(sort(merged$date)))
colnames(mosquito) <- "date"
#wnv
for(i in mosquito$date){mosquito$wnv[which(mosquito$date == i)] <-
sum(merged$total[which(merged$wnv=="wnv" &
merged$date == i)])}
#slev
for(i in mosquito$date){mosquito$slev[which(mosquito$date == i)] <-
sum(merged$total[which(merged$slev=="slev" &
merged$date == i)])}
#eeev
for(i in mosquito$date){mosquito$eeev[which(mosquito$date == i)] <-
sum(merged$total[which(merged$eeev=="eeev" &
merged$date == i)])}
#chik
for(i in mosquito$date){mosquito$chik[which(mosquito$date == i)] <-
sum(merged$total[which(merged$chik=="chik" &
merged$date == i)])}
#malaria
for(i in mosquito$date){mosquito$malaria[which(mosquito$date == i)] <-
sum(merged$total[which(merged$malaria=="malaria" &
merged$date == i)])}
#dengue
for(i in mosquito$date){mosquito$dengue[which(mosquito$date == i)] <-
sum(merged$total[which(merged$dengue=="dengue" &
merged$date == i)])}
#vector
for(i in mosquito$date){mosquito$vector[which(mosquito$date == i)] <-
sum(merged$total[which(merged$vector=="vector" &
merged$date == i)])}
#nonvector
for(i in mosquito$date){mosquito$nonvector[which(mosquito$date == i)] <-
sum(merged$total[which(merged$vector=="nonvector" &
merged$date == i)])}
#total
mosquito$total <- mosquito$vector +mosquito$nonvector
###################
# OVERALL SIMPLE SPREADHSEET#####
par(mfrow=c(1,1))
setwd("C:/Users/BrewJR/Documents/workingdirectory/mosquito")
trap12 <- read.csv("simple.csv", head=T, sep=",")
trap12$date <- as.Date(trap12$date, "%m/%d/%Y")
trap12 <- trap12[which(trap12$date < as.Date("2013-01-01")),]
trap13 <- as.data.frame(mosquito$date)
colnames(trap13) <- "date"
for (i in trap13$date){
trap13$total[which(trap13$date == i)] <- mosquito$total[which(mosquito$date == i)]/10}
for (i in trap13$date){
trap13$vector[which(trap13$date == i)] <- mosquito$vector[which(mosquito$date == i)]/10}
trap <- rbind(trap12, trap13)
trap$dayofyear <- format(trap$date, format="%j")
trap$year <- format(trap$date, format= "%Y")
#####
#SITE-SPECIFIC DATA
site <- trap13
#wnv
site$X1wnv <- NA
for (i in site$date){site$X1wnv[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X2wnv <- NA
for (i in site$date){site$X2wnv[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X3wnv <- NA
for (i in site$date){site$X3wnv[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X4wnv <- NA
for (i in site$date){site$X4wnv[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X5wnv <- NA
for (i in site$date){site$X5wnv[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X6wnv <- NA
for (i in site$date){site$X6wnv[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X7wnv <- NA
for (i in site$date){site$X7wnv[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X8wnv <- NA
for (i in site$date){site$X8wnv[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X9wnv <- NA
for (i in site$date){site$X9wnv[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
site$X10wnv <- NA
for (i in site$date){site$X10wnv[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$wnv=="wnv")], na.rm=T)}
#slev
site$X1slev <- NA
for (i in site$date){site$X1slev[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X2slev <- NA
for (i in site$date){site$X2slev[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X3slev <- NA
for (i in site$date){site$X3slev[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X4slev <- NA
for (i in site$date){site$X4slev[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X5slev <- NA
for (i in site$date){site$X5slev[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X6slev <- NA
for (i in site$date){site$X6slev[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X7slev <- NA
for (i in site$date){site$X7slev[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X8slev <- NA
for (i in site$date){site$X8slev[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X9slev <- NA
for (i in site$date){site$X9slev[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
site$X10slev <- NA
for (i in site$date){site$X10slev[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$slev=="slev")], na.rm=T)}
#eeev
site$X1eeev <- NA
for (i in site$date){site$X1eeev[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X2eeev <- NA
for (i in site$date){site$X2eeev[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X3eeev <- NA
for (i in site$date){site$X3eeev[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X4eeev <- NA
for (i in site$date){site$X4eeev[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X5eeev <- NA
for (i in site$date){site$X5eeev[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X6eeev <- NA
for (i in site$date){site$X6eeev[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X7eeev <- NA
for (i in site$date){site$X7eeev[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X8eeev <- NA
for (i in site$date){site$X8eeev[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X9eeev <- NA
for (i in site$date){site$X9eeev[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
site$X10eeev <- NA
for (i in site$date){site$X10eeev[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$eeev=="eeev")], na.rm=T)}
#chik
site$X1chik <- NA
for (i in site$date){site$X1chik[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X2chik <- NA
for (i in site$date){site$X2chik[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X3chik <- NA
for (i in site$date){site$X3chik[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X4chik <- NA
for (i in site$date){site$X4chik[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X5chik <- NA
for (i in site$date){site$X5chik[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X6chik <- NA
for (i in site$date){site$X6chik[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X7chik <- NA
for (i in site$date){site$X7chik[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X8chik <- NA
for (i in site$date){site$X8chik[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X9chik <- NA
for (i in site$date){site$X9chik[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
site$X10chik <- NA
for (i in site$date){site$X10chik[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$chik=="chik")], na.rm=T)}
#malaria
site$X1malaria <- NA
for (i in site$date){site$X1malaria[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X2malaria <- NA
for (i in site$date){site$X2malaria[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X3malaria <- NA
for (i in site$date){site$X3malaria[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X4malaria <- NA
for (i in site$date){site$X4malaria[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X5malaria <- NA
for (i in site$date){site$X5malaria[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X6malaria <- NA
for (i in site$date){site$X6malaria[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X7malaria <- NA
for (i in site$date){site$X7malaria[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X8malaria <- NA
for (i in site$date){site$X8malaria[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X9malaria <- NA
for (i in site$date){site$X9malaria[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
site$X10malaria <- NA
for (i in site$date){site$X10malaria[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$malaria=="malaria")], na.rm=T)}
#dengue
site$X1dengue <- NA
for (i in site$date){site$X1dengue[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X2dengue <- NA
for (i in site$date){site$X2dengue[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X3dengue <- NA
for (i in site$date){site$X3dengue[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X4dengue <- NA
for (i in site$date){site$X4dengue[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X5dengue <- NA
for (i in site$date){site$X5dengue[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X6dengue <- NA
for (i in site$date){site$X6dengue[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X7dengue <- NA
for (i in site$date){site$X7dengue[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X8dengue <- NA
for (i in site$date){site$X8dengue[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X9dengue <- NA
for (i in site$date){site$X9dengue[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
site$X10dengue <- NA
for (i in site$date){site$X10dengue[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$dengue=="dengue")], na.rm=T)}
#vector
site$X1vector <- NA
for (i in site$date){site$X1vector[which(site$date==i)] <- sum(merged$X1[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X2vector <- NA
for (i in site$date){site$X2vector[which(site$date==i)] <- sum(merged$X2[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X3vector <- NA
for (i in site$date){site$X3vector[which(site$date==i)] <- sum(merged$X3[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X4vector <- NA
for (i in site$date){site$X4vector[which(site$date==i)] <- sum(merged$X4[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X5vector <- NA
for (i in site$date){site$X5vector[which(site$date==i)] <- sum(merged$X5[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X6vector <- NA
for (i in site$date){site$X6vector[which(site$date==i)] <- sum(merged$X6[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X7vector <- NA
for (i in site$date){site$X7vector[which(site$date==i)] <- sum(merged$X7[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X8vector <- NA
for (i in site$date){site$X8vector[which(site$date==i)] <- sum(merged$X8[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X9vector <- NA
for (i in site$date){site$X9vector[which(site$date==i)] <- sum(merged$X9[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
site$X10vector <- NA
for (i in site$date){site$X10vector[which(site$date==i)] <- sum(merged$X10[which(merged$date== i &
merged$vector=="vector")], na.rm=T)}
################ PLOT
par(mfrow=c(2,1))
# OVERALL
plot(trap$dayofyear[which(trap$year==2012)], trap$vector[which(trap$year==2012)], type="n", xaxt="n",
xlab="Month", ylab="Average vectors per trap", ylim=c(0,1100))
axis(side=1, at=trap$dayofyear[which(trap$year==2012)],
labels=format(as.Date(trap$dayofyear[which(trap$year==2012)],
format="%j"),format="%b\n%d"), cex.axis=0.2)
xspline(trap$dayofyear[which(trap$year==2013)], trap$vector[which(trap$year==2013)],
border=yearcols[1], shape=0.5, lwd=3)
xspline(trap$dayofyear[which(trap$year==2012)], trap$vector[which(trap$year==2012)],
border=yearcols[2], shape=0.5)
xspline(trap$dayofyear[which(trap$year==2011)], trap$vector[which(trap$year==2011)],
border=yearcols[3], shape=0.5)
xspline(trap$dayofyear[which(trap$year==2010)], trap$vector[which(trap$year==2010)],
border=yearcols[4], shape=0.5)
xspline(trap$dayofyear[which(trap$year==2009)], trap$vector[which(trap$year==2009)],
border=yearcols[5], shape=0.5)
xspline(trap$dayofyear[which(trap$year==2008)], trap$vector[which(trap$year==2008)],
border=yearcols[6], shape=0.5)
abline(v=max(trap$dayofyear[which(trap$year==2013)]), lty=1, lwd=3, col=adjustcolor("grey", alpha.f=0.4))
abline(h=max(trap$vector[which(trap$dayofyear==max(trap$dayofyear[which(trap$year==2013)]))]), lty=1, lwd=3, col=adjustcolor("grey", alpha.f=0.4))
legend(x="topright", lty=1, col=yearcols[1:6], legend=c("2013", "2012", "2011", "2010", "2009", "2008"),
border=FALSE, bty="n", cex=0.6, lwd=c(3,1,1,1,1,1))
# For website
plot(trap$dayofyear[which(trap$year==2013)], trap$vector[which(trap$year==2013)], type="n", xaxt="n",
xlab="Date", ylab="Mosquitoes", ylim=c(0,650),
main="Average vector mosquitoes per trap, 2013")
axis(side=1, at=trap$dayofyear[which(trap$year==2013)],
labels=format(as.Date(trap$dayofyear[which(trap$year==2013)],
format="%j"),format="%b\n%d"), cex.axis=0.4)
xspline(trap$dayofyear[which(trap$year==2013)], trap$vector[which(trap$year==2013)],
border=adjustcolor(yearcols[1], alpha.f=0.6), shape=0, lwd=3)
points(trap$dayofyear[which(trap$year==2013)], trap$vector[which(trap$year==2013)],
pch=16, col="grey")
points(trap$dayofyear[which(trap$year==2013)], trap$vector[which(trap$year==2013)],
pch=21, col=adjustcolor("red", alpha.f=0.6))
# BY DISEASE TYPE
par(mfrow=c(3,2))
#wnv
plot(mosquito$date, mosquito$wnv, type="n", xlab="Date", ylab="Msqts. captured")
points(mosquito$date, mosquito$wnv, pch=16, col=adjustcolor("grey", alpha.f=0.6))
points(mosquito$date, mosquito$wnv, pch=21, col=yearcols[1])
xspline(mosquito$date, mosquito$wnv, border=adjustcolor(yearcols[1],alpha.f=0.5), shape=0, lwd=3)
title(main="Mosquitoes capable of carrying WNV captured this summer", cex.main=0.6)
#slev
plot(mosquito$date, mosquito$slev, type="n", xlab="Date", ylab="Msqts. captured")
points(mosquito$date, mosquito$slev, pch=16, col=adjustcolor("grey", alpha.f=0.6))
points(mosquito$date, mosquito$slev, pch=21, col=yearcols[2])
xspline(mosquito$date, mosquito$slev, border=adjustcolor(yearcols[2],alpha.f=0.5), shape=0, lwd=3)
title(main="Mosquitoes capable of carrying SLEV captured this summer", cex.main=0.6)
#eeev
plot(mosquito$date, mosquito$eeev, type="n", xlab="Date", ylab="Msqts. captured")
points(mosquito$date, mosquito$eeev, pch=16, col=adjustcolor("grey", alpha.f=0.6))
points(mosquito$date, mosquito$eeev, pch=21, col=yearcols[3])
xspline(mosquito$date, mosquito$eeev, border=adjustcolor(yearcols[3],alpha.f=0.5), shape=0, lwd=3)
title(main="Mosquitoes capable of carrying EEEV captured this summer", cex.main=0.6)
#chik
plot(mosquito$date, mosquito$chik, type="n", xlab="Date", ylab="Msqts. captured")
points(mosquito$date, mosquito$chik, pch=16, col=adjustcolor("grey", alpha.f=0.6))
points(mosquito$date, mosquito$chik, pch=21, col=yearcols[4])
xspline(mosquito$date, mosquito$chik, border=adjustcolor(yearcols[4],alpha.f=0.5), shape=0, lwd=3)
title(main="Mosquitoes capable of carrying Chikungunya captured this summer", cex.main=0.6)
#malaria
plot(mosquito$date, mosquito$malaria, type="n", xlab="Date", ylab="Msqts. captured")
points(mosquito$date, mosquito$malaria, pch=16, col=adjustcolor("grey", alpha.f=0.6))
points(mosquito$date, mosquito$malaria, pch=21, col=yearcols[5])
xspline(mosquito$date, mosquito$malaria, border=adjustcolor(yearcols[5],alpha.f=0.5), shape=0, lwd=3)
title(main="Mosquitoes capable of carrying Malaria captured this summer", cex.main=0.6)
#dengue
plot(mosquito$date, mosquito$dengue, type="n", xlab="Date", ylab="Msqts. captured")
points(mosquito$date, mosquito$dengue, pch=16, col=adjustcolor("grey", alpha.f=0.6))
points(mosquito$date, mosquito$dengue, pch=21, col=yearcols[6])
xspline(mosquito$date, mosquito$dengue, border=adjustcolor(yearcols[6],alpha.f=0.5), shape=0, lwd=3)
title(main="Mosquitoes capable of carrying Dengue captured this summer", cex.main=0.6)
#WNV vectors CAPTURED
par(mfrow=c(5,2))
#By SITE
#X1
plot(site$date, site$X1wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1wnv, col=symcols[1], lwd=1)
points(site$date, site$X1wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X1wnv, col=symcols[1], pch=21)
#X2
plot(site$date, site$X2wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2wnv, col=symcols[1])
points(site$date, site$X2wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X2wnv, col=symcols[1], pch=21)
#X3
plot(site$date, site$X3wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3wnv, col=symcols[1])
points(site$date, site$X3wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X3wnv, col=symcols[1], pch=21)
#X4
plot(site$date, site$X4wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4wnv, col=symcols[1])
points(site$date, site$X4wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X4wnv, col=symcols[1], pch=21)
#X5
plot(site$date, site$X5wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5wnv, col=symcols[1])
points(site$date, site$X5wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X5wnv, col=symcols[1], pch=21)
#X6
plot(site$date, site$X6wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6wnv, col=symcols[1])
points(site$date, site$X6wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X6wnv, col=symcols[1], pch=21)
#X7
plot(site$date, site$X7wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7wnv, col=symcols[1])
points(site$date, site$X7wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X7wnv, col=symcols[1], pch=21)
#X8
plot(site$date, site$X8wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8wnv, col=symcols[1])
points(site$date, site$X8wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X8wnv, col=symcols[1], pch=21)
#X9
plot(site$date, site$X9wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9wnv, col=symcols[1])
points(site$date, site$X9wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X9wnv, col=symcols[1], pch=21)
#X10
plot(site$date, site$X10wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10wnv, col=symcols[1])
points(site$date, site$X10wnv, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X10wnv, col=symcols[1], pch=21)
title(main="WNV vectors", outer=TRUE, line=-1)
#slev vectors CAPTURED
par(mfrow=c(5,2))
#By SITE
#X1
plot(site$date, site$X1slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1slev, col=symcols[2], lwd=1)
points(site$date, site$X1slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X1slev, col=symcols[2], pch=21)
#X2
plot(site$date, site$X2slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2slev, col=symcols[2])
points(site$date, site$X2slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X2slev, col=symcols[2], pch=21)
#X3
plot(site$date, site$X3slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3slev, col=symcols[2])
points(site$date, site$X3slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X3slev, col=symcols[2], pch=21)
#X4
plot(site$date, site$X4slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4slev, col=symcols[2])
points(site$date, site$X4slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X4slev, col=symcols[2], pch=21)
#X5
plot(site$date, site$X5slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5slev, col=symcols[2])
points(site$date, site$X5slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X5slev, col=symcols[2], pch=21)
#X6
plot(site$date, site$X6slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6slev, col=symcols[2])
points(site$date, site$X6slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X6slev, col=symcols[2], pch=21)
#X7
plot(site$date, site$X7slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7slev, col=symcols[2])
points(site$date, site$X7slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X7slev, col=symcols[2], pch=21)
#X8
plot(site$date, site$X8slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8slev, col=symcols[2])
points(site$date, site$X8slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X8slev, col=symcols[2], pch=21)
#X9
plot(site$date, site$X9slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9slev, col=symcols[2])
points(site$date, site$X9slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X9slev, col=symcols[2], pch=21)
#X10
plot(site$date, site$X10slev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10slev, col=symcols[2])
points(site$date, site$X10slev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X10slev, col=symcols[2], pch=21)
title(main="SLEV vectors", outer=TRUE, line=-1)
#eeev vectors CAPTURED
par(mfrow=c(5,2))
#By SITE
#X1
plot(site$date, site$X1eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1eeev, col=symcols[3], lwd=1)
points(site$date, site$X1eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X1eeev, col=symcols[3], pch=21)
#X2
plot(site$date, site$X2eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2eeev, col=symcols[3])
points(site$date, site$X2eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X2eeev, col=symcols[3], pch=21)
#X3
plot(site$date, site$X3eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3eeev, col=symcols[3])
points(site$date, site$X3eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X3eeev, col=symcols[3], pch=21)
#X4
plot(site$date, site$X4eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4eeev, col=symcols[3])
points(site$date, site$X4eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X4eeev, col=symcols[3], pch=21)
#X5
plot(site$date, site$X5eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5eeev, col=symcols[3])
points(site$date, site$X5eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X5eeev, col=symcols[3], pch=21)
#X6
plot(site$date, site$X6eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6eeev, col=symcols[3])
points(site$date, site$X6eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X6eeev, col=symcols[3], pch=21)
#X7
plot(site$date, site$X7eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7eeev, col=symcols[3])
points(site$date, site$X7eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X7eeev, col=symcols[3], pch=21)
#X8
plot(site$date, site$X8eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8eeev, col=symcols[3])
points(site$date, site$X8eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X8eeev, col=symcols[3], pch=21)
#X9
plot(site$date, site$X9eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9eeev, col=symcols[3])
points(site$date, site$X9eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X9eeev, col=symcols[3], pch=21)
#X10
plot(site$date, site$X10eeev, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10eeev, col=symcols[3])
points(site$date, site$X10eeev, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X10eeev, col=symcols[3], pch=21)
title(main="EEEV vectors", outer=TRUE, line=-1)
#chik vectors CAPTURED
par(mfrow=c(5,2))
#By SITE
#X1
plot(site$date, site$X1chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1chik, col=symcols[4], lwd=1)
points(site$date, site$X1chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X1chik, col=symcols[4], pch=21)
#X2
plot(site$date, site$X2chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2chik, col=symcols[4])
points(site$date, site$X2chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X2chik, col=symcols[4], pch=21)
#X3
plot(site$date, site$X3chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3chik, col=symcols[4])
points(site$date, site$X3chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X3chik, col=symcols[4], pch=21)
#X4
plot(site$date, site$X4chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4chik, col=symcols[4])
points(site$date, site$X4chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X4chik, col=symcols[4], pch=21)
#X5
plot(site$date, site$X5chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5chik, col=symcols[4])
points(site$date, site$X5chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X5chik, col=symcols[4], pch=21)
#X6
plot(site$date, site$X6chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6chik, col=symcols[4])
points(site$date, site$X6chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X6chik, col=symcols[4], pch=21)
#X7
plot(site$date, site$X7chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7chik, col=symcols[4])
points(site$date, site$X7chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X7chik, col=symcols[4], pch=21)
#X8
plot(site$date, site$X8chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8chik, col=symcols[4])
points(site$date, site$X8chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X8chik, col=symcols[4], pch=21)
#X9
plot(site$date, site$X9chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9chik, col=symcols[4])
points(site$date, site$X9chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X9chik, col=symcols[4], pch=21)
#X10
plot(site$date, site$X10chik, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10chik, col=symcols[4])
points(site$date, site$X10chik, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X10chik, col=symcols[4], pch=21)
title(main="Chikungunya vectors", outer=TRUE, line=-1)
#malaria vectors CAPTURED
par(mfrow=c(5,2))
#By SITE
#X1
plot(site$date, site$X1malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1malaria, col=symcols[5], lwd=1)
points(site$date, site$X1malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X1malaria, col=symcols[5], pch=21)
#X2
plot(site$date, site$X2malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2malaria, col=symcols[5])
points(site$date, site$X2malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X2malaria, col=symcols[5], pch=21)
#X3
plot(site$date, site$X3malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3malaria, col=symcols[5])
points(site$date, site$X3malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X3malaria, col=symcols[5], pch=21)
#X4
plot(site$date, site$X4malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4malaria, col=symcols[5])
points(site$date, site$X4malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X4malaria, col=symcols[5], pch=21)
#X5
plot(site$date, site$X5malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5malaria, col=symcols[5])
points(site$date, site$X5malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X5malaria, col=symcols[5], pch=21)
#X6
plot(site$date, site$X6malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6malaria, col=symcols[5])
points(site$date, site$X6malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X6malaria, col=symcols[5], pch=21)
#X7
plot(site$date, site$X7malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7malaria, col=symcols[5])
points(site$date, site$X7malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X7malaria, col=symcols[5], pch=21)
#X8
plot(site$date, site$X8malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8malaria, col=symcols[5])
points(site$date, site$X8malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X8malaria, col=symcols[5], pch=21)
#X9
plot(site$date, site$X9malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9malaria, col=symcols[5])
points(site$date, site$X9malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X9malaria, col=symcols[5], pch=21)
#X10
plot(site$date, site$X10malaria, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10malaria, col=symcols[5])
points(site$date, site$X10malaria, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X10malaria, col=symcols[5], pch=21)
title(main="Malaria vectors", outer=TRUE, line=-1)
#dengue vectors CAPTURED
par(mfrow=c(5,2))
#By SITE
#X1
plot(site$date, site$X1dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1dengue, col=symcols[6], lwd=1)
points(site$date, site$X1dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X1dengue, col=symcols[6], pch=21)
#X2
plot(site$date, site$X2dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2dengue, col=symcols[6])
points(site$date, site$X2dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X2dengue, col=symcols[6], pch=21)
#X3
plot(site$date, site$X3dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3dengue, col=symcols[6])
points(site$date, site$X3dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X3dengue, col=symcols[6], pch=21)
#X4
plot(site$date, site$X4dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4dengue, col=symcols[6])
points(site$date, site$X4dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X4dengue, col=symcols[6], pch=21)
#X5
plot(site$date, site$X5dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5dengue, col=symcols[6])
points(site$date, site$X5dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X5dengue, col=symcols[6], pch=21)
#X6
plot(site$date, site$X6dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6dengue, col=symcols[6])
points(site$date, site$X6dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X6dengue, col=symcols[6], pch=21)
#X7
plot(site$date, site$X7dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7dengue, col=symcols[6])
points(site$date, site$X7dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X7dengue, col=symcols[6], pch=21)
#X8
plot(site$date, site$X8dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8dengue, col=symcols[6])
points(site$date, site$X8dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X8dengue, col=symcols[6], pch=21)
#X9
plot(site$date, site$X9dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9dengue, col=symcols[6])
points(site$date, site$X9dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X9dengue, col=symcols[6], pch=21)
#X10
plot(site$date, site$X10dengue, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10dengue, col=symcols[6])
points(site$date, site$X10dengue, col=adjustcolor("grey", alpha.f=0.6), pch=16)
points(site$date, site$X10dengue, col=symcols[6], pch=21)
title(main="Dengue vectors", outer=TRUE, line=-1)
#### ALL DISEASES COMBINED
symnames <- c("WNV", "SLEV", "EEEV", "Chik", "Malaria", "Dengue")
par(mfrow=c(5,2))
#X1
plot(site$date, site$X1wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[1])
lines(site$date, site$X1wnv, col=symcols[1])
lines(site$date, site$X1slev, col=symcols[2])
lines(site$date, site$X1eeev, col=symcols[3])
lines(site$date, site$X1chik, col=symcols[4])
lines(site$date, site$X1malaria, col=symcols[5])
lines(site$date, site$X1dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X2
plot(site$date, site$X2wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[2])
lines(site$date, site$X2wnv, col=symcols[1])
lines(site$date, site$X2slev, col=symcols[2])
lines(site$date, site$X2eeev, col=symcols[3])
lines(site$date, site$X2chik, col=symcols[4])
lines(site$date, site$X2malaria, col=symcols[5])
lines(site$date, site$X2dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X3
plot(site$date, site$X3wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[3])
lines(site$date, site$X3wnv, col=symcols[1])
lines(site$date, site$X3slev, col=symcols[2])
lines(site$date, site$X3eeev, col=symcols[3])
lines(site$date, site$X3chik, col=symcols[4])
lines(site$date, site$X3malaria, col=symcols[5])
lines(site$date, site$X3dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X4
plot(site$date, site$X4wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[4])
lines(site$date, site$X4wnv, col=symcols[1])
lines(site$date, site$X4slev, col=symcols[2])
lines(site$date, site$X4eeev, col=symcols[3])
lines(site$date, site$X4chik, col=symcols[4])
lines(site$date, site$X4malaria, col=symcols[5])
lines(site$date, site$X4dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X5
plot(site$date, site$X5wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[5])
lines(site$date, site$X5wnv, col=symcols[1])
lines(site$date, site$X5slev, col=symcols[2])
lines(site$date, site$X5eeev, col=symcols[3])
lines(site$date, site$X5chik, col=symcols[4])
lines(site$date, site$X5malaria, col=symcols[5])
lines(site$date, site$X5dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X6
plot(site$date, site$X6wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[6])
lines(site$date, site$X6wnv, col=symcols[1])
lines(site$date, site$X6slev, col=symcols[2])
lines(site$date, site$X6eeev, col=symcols[3])
lines(site$date, site$X6chik, col=symcols[4])
lines(site$date, site$X6malaria, col=symcols[5])
lines(site$date, site$X6dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X7
plot(site$date, site$X7wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[7])
lines(site$date, site$X7wnv, col=symcols[1])
lines(site$date, site$X7slev, col=symcols[2])
lines(site$date, site$X7eeev, col=symcols[3])
lines(site$date, site$X7chik, col=symcols[4])
lines(site$date, site$X7malaria, col=symcols[5])
lines(site$date, site$X7dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X8
plot(site$date, site$X8wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[8])
lines(site$date, site$X8wnv, col=symcols[1])
lines(site$date, site$X8slev, col=symcols[2])
lines(site$date, site$X8eeev, col=symcols[3])
lines(site$date, site$X8chik, col=symcols[4])
lines(site$date, site$X8malaria, col=symcols[5])
lines(site$date, site$X8dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X9
plot(site$date, site$X9wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[9])
lines(site$date, site$X9wnv, col=symcols[1])
lines(site$date, site$X9slev, col=symcols[2])
lines(site$date, site$X9eeev, col=symcols[3])
lines(site$date, site$X9chik, col=symcols[4])
lines(site$date, site$X9malaria, col=symcols[5])
lines(site$date, site$X9dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
#X10
plot(site$date, site$X10wnv, type="n", xlab="Date", ylab="Mosquitoes",
main=sitenames[10])
lines(site$date, site$X10wnv, col=symcols[1])
lines(site$date, site$X10slev, col=symcols[2])
lines(site$date, site$X10eeev, col=symcols[3])
lines(site$date, site$X10chik, col=symcols[4])
lines(site$date, site$X10malaria, col=symcols[5])
lines(site$date, site$X10dengue, col=symcols[6])
legend(x="topleft", lty=1, col=symcols[1:6], legend=symnames, border=F, bty="n", cex=0.25)
title(main="All vectors", outer=TRUE, line=-1)
####BEGIN MAPPING
par(mfrow=c(1,1))
library(maps)
library(mapdata)
library(maptools)
library(RColorBrewer)
#READ TRAP SITES
setwd("C:/Users/BrewJR/Documents/workingdirectory/mosquito")
traplatlong <- read.csv("traplatlong.csv", header=TRUE, sep=",")
#TRAP COLLECTION SITES
#Make colors
#Create a names vector for the labelling
names <- traplatlong$name
names <- as.character(names)
names[4] <- NA
names[9] <- "Hawthorne"
names[6] <- NA
names[3] <- "High Springs"
names[7] <- "Archer"
library(rJava)
library(OpenStreetMap)
library(rgdal)
#mapsat <- openmap(c(30.05, -82.8), c(29.3,-81.9), type="bing")
#plot(mapsat)
#mapcit <- openmap(c(30.05, -82.8), c(29.3,-81.9))
#plot(mapcit)
#CONVERT TO LAT LONG PROJECTION (Raster?)
#mapsatll <- openproj(mapsat, projection = "+proj=longlat")
#plot(mapsatll, raster=TRUE)
#DISEASE SPECIFIC MAPS
#DISEASE SPECIFIC MAPS
par(mfrow=c(3,2))
par(oma=c(1,1,1,1))
par(mar=c(2,1,1,1))
#WNV
map("county", "florida", fill=TRUE, col=c(alachuacol,countycols3),
xlim=c(-82.8,-81.9), ylim=c(29.3,30.05))
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[1], alpha.f=0.5),
cex= 2*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("wnv",names(site)) >1)]/
apply(site[,which(regexpr("wnv",names(site)) >1)],2, mean))))
text(traplatlong$long, traplatlong$lat, labels=names, cex=0.4, adj=c(0.5,2), las=3)
title("WNV vectors")
legend(-82.7,29.5, ncol=1, legend=c("below normal", "normal", "above normal"),
pch=16, col=adjustcolor(symcols[1], alpha.f=0.5),
pt.cex=c(1,2,4), cex=0.4, x.intersp=1.3, y.intersp=1.2, bor=T,bty="n")
#slev
map("county", "florida", fill=TRUE, col=c(alachuacol,countycols3),
xlim=c(-82.8,-81.9), ylim=c(29.3,30.05))
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[2], alpha.f=0.5),
cex= 2*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("slev",names(site)) >1)]/
apply(site[,which(regexpr("slev",names(site)) >1)],2, mean))))
text(traplatlong$long, traplatlong$lat, labels=names, cex=0.4, adj=c(0.5,2), las=3)
title("SLEV vectors")
legend(-82.7,29.5, ncol=1, legend=c("below normal", "normal", "above normal"),
pch=16, col=adjustcolor(symcols[2], alpha.f=0.5),
pt.cex=c(1,2,4), cex=0.4, x.intersp=1.3, y.intersp=1.2, bor=T,bty="n")
#eeev
map("county", "florida", fill=TRUE, col=c(alachuacol,countycols3),
xlim=c(-82.8,-81.9), ylim=c(29.3,30.05))
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[3], alpha.f=0.5),
cex= 2*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("eeev",names(site)) >1)]/
apply(site[,which(regexpr("eeev",names(site)) >1)],2, mean))))
text(traplatlong$long, traplatlong$lat, labels=names, cex=0.4, adj=c(0.5,2), las=3)
title("EEEV vectors")
legend(-82.7,29.5, ncol=1, legend=c("below normal", "normal", "above normal"),
pch=16, col=adjustcolor(symcols[3], alpha.f=0.5),
pt.cex=c(1,2,4), cex=0.4, x.intersp=1.3, y.intersp=1.2, bor=T,bty="n")
#chik
map("county", "florida", fill=TRUE, col=c(alachuacol,countycols3),
xlim=c(-82.8,-81.9), ylim=c(29.3,30.05))
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[4], alpha.f=0.5),
cex= 2*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("chik",names(site)) >1)]/
apply(site[,which(regexpr("chik",names(site)) >1)],2, mean))))
text(traplatlong$long, traplatlong$lat, labels=names, cex=0.4, adj=c(0.5,2), las=3)
title("Chikungunya vectors")
legend(-82.7,29.5, ncol=1, legend=c("below normal", "normal", "above normal"),
pch=16, col=adjustcolor(symcols[4], alpha.f=0.5),
pt.cex=c(1,2,4), cex=0.4, x.intersp=1.3, y.intersp=1.2, bor=T,bty="n")
#malaria
map("county", "florida", fill=TRUE, col=c(alachuacol,countycols3),
xlim=c(-82.8,-81.9), ylim=c(29.3,30.05))
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[5], alpha.f=0.5),
cex= 2*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("malaria",names(site)) >1)]/
apply(site[,which(regexpr("malaria",names(site)) >1)],2, mean))))
text(traplatlong$long, traplatlong$lat, labels=names, cex=0.4, adj=c(0.5,2), las=3)
title("Malaria vectors")
legend(-82.7,29.5, ncol=1, legend=c("below normal", "normal", "above normal"),
pch=16, col=adjustcolor(symcols[5], alpha.f=0.5),
pt.cex=c(1,2,4), cex=0.4, x.intersp=1.3, y.intersp=1.2, bor=T,bty="n")
#dengue
map("county", "florida", fill=TRUE, col=c(alachuacol,countycols3),
xlim=c(-82.8,-81.9), ylim=c(29.3,30.05))
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[6], alpha.f=0.5),
cex= 2*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("dengue",names(site)) >1)]/
apply(site[,which(regexpr("dengue",names(site)) >1)],2, mean))))
text(traplatlong$long, traplatlong$lat, labels=names, cex=0.4, adj=c(0.5,2), las=3)
title("Dengue vectors")
legend(-82.7,29.5, ncol=1, legend=c("below normal", "normal", "above normal"),
pch=16, col=adjustcolor(symcols[6], alpha.f=0.5),
pt.cex=c(1,2,4), cex=0.4, x.intersp=1.3, y.intersp=1.2, bor=T,bty="n")
title(main="Disease vectors by location", lines=-1, outer=T)
#####
#### PRETTIER MAPS
###WATER COLOR
par(mfrow=c(1,1))
joewatercolor <- openmap(c(29.9, -82.65), c(29.4,-82.0),
type="stamen-watercolor")
joemapwatercolor <- openproj(joewatercolor, projection = "+proj=longlat")
plot(joemapwatercolor, raster=TRUE)
points(traplatlong$long, traplatlong$lat,
pch=16, col=adjustcolor(symcols[1], alpha.f=0.5),
cex= 4*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("wnv",names(site)) >1)]/
apply(site[,which(regexpr("wnv",names(site)) >1)],2, mean))))
points(traplatlong$long+0.008, traplatlong$lat,
pch=16, col=adjustcolor(symcols[2], alpha.f=0.5),
cex= 4*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("slev",names(site)) >1)]/
apply(site[,which(regexpr("slev",names(site)) >1)],2, mean))))
points(traplatlong$long, traplatlong$lat+0.0085,
pch=16, col=adjustcolor(symcols[3], alpha.f=0.5),
cex= 4*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("eeev",names(site)) >1)]/
apply(site[,which(regexpr("eeev",names(site)) >1)],2, mean))))
points(traplatlong$long, traplatlong$lat+0.001,
pch=16, col=adjustcolor(symcols[4], alpha.f=0.5),
cex= 4*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("chik",names(site)) >1)]/
apply(site[,which(regexpr("chik",names(site)) >1)],2, mean))))
points(traplatlong$long+0.001, traplatlong$lat,
pch=16, col=adjustcolor(symcols[5], alpha.f=0.5),
cex= 4*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("malaria",names(site)) >1)]/
apply(site[,which(regexpr("malaria",names(site)) >1)],2, mean))))
points(traplatlong$long-0.002, traplatlong$lat+0.0023,
pch=16, col=adjustcolor(symcols[6], alpha.f=0.5),
cex= 4*as.numeric(as.vector(site[which(site$date== max(site$date)),which(regexpr("dengue",names(site)) >1)]/
apply(site[,which(regexpr("dengue",names(site)) >1)],2, mean))))
#text(traplatlong$long, traplatlong$lat,
# labels=names,
# cex=0.9,
#adj=c(0.5,2),
# las=3)
legend(-82.1,29.49,
ncol=1,
legend=c("below normal", "normal",
"above normal"),
pch=16,
col=adjustcolor("black", alpha.f=0.5),
pt.cex=c(2,4,8),
cex=0.6,
x.intersp=2, y.intersp=1,
bor=T,bty="n",)
legend(-82.6,29.49,
ncol=2,
legend=symnames,
pch=16,
col=adjustcolor(symcols, alpha.f=0.5),
cex=0.8,
pt.cex=3,
x.intersp=2, y.intersp=1,
bor=T,bty="n",)
#Breakdown by mosquito type
dennis2 <- as.data.frame(matrix(rep(999, 700),
nrow=length(unique(sort(merged$date)))))
colnames(dennis2)[1] <- "date"
colnames(dennis2)[2:35] <- unique(sort(as.character(merged$mosq)))
dennis2$date <- unique(sort(merged$date))
for (i in dennis2$date){
for (j in colnames(dennis2)[2:35]){
dennis2[which(dennis2$date == i), j] <-
sum(merged$total[which(merged$date == i &
merged$mosq == j)])
}
}
library(splines)
zapcols <- colorRampPalette(brewer.pal(9, "Set1"))(34)
plot(dennis2$date, dennis2[,27], type="n", xlab="Date", ylab="Mosquitoes",
main="Mosquito types this summer")
for (i in 2:34){
xspline(dennis2$date, dennis2[,i], shape=0.5, border=adjustcolor(
zapcols[i], alpha.f=0.5),
lwd=3)}
legend(x="topleft", lwd=2, col=zapcols[2:34],
legend=colnames(dennis2)[2:34],
cex=0.5, y.intersp=0.8)
text(x=max(dennis2$date), y=1000, labels="M. dyari", cex=0.5)
text(x=dennis2$date[14]+5, y=2500, labels="M. dyari", cex=0.5)
text(x=max(dennis2$date-15), y=2500, labels="Culex erraticus", cex=0.5)
text(x=max(dennis2$date-75), y=3300, labels="C. nigripalpus", cex=0.5)
text(x=max(dennis2$date-75), y=2400, labels="P. colubmiae", cex=0.5)
text(x=max(dennis2$date-70), y=2600, labels="M. dyari", cex=0.5)
text(x=max(dennis2$date), y=dennis2[which(dennis2$date==max(dennis2$date)),2:35],
labels=names(dennis2[which(dennis2$date==max(dennis2$date)),2:35]),
cex=0.1)
text(x=min(dennis2$date), y=dennis2[which(dennis2$date==min(dennis2$date)),2:35],
labels=names(dennis2[which(dennis2$date==min(dennis2$date)),2:35]),
cex=0.1)
text(x=dennis2$date[11], y=dennis2[which(dennis2$date==dennis2$date[11]),2:35],
labels=names(dennis2[which(dennis2$date==dennis2$date[11]),2:35]),
cex=0.1)
library(xtable)
dennis2$date <- as.character(dennis2$date)
dennis3 <- xtable(dennis2[c(1,5,9,12,15,27)], tabular.environment="longtable", floating=FALSE)
print(dennis3)
save.image("~/workingdirectory/mosquito/mosq28oct13.RData")
write.csv(dennis2, "mosquitospecies.csv")
|
7c2525e622b15e712edf6a14355a6a171ed37928
|
aed2befcda06b44324e887a55c289e32e5c7c8ae
|
/meetupr.R
|
fefa23500f9a336298f5c1a67536500d7e6896e6
|
[] |
no_license
|
benubah/gsoc-test
|
8f95c3074c15df91e68c2d9c13d8d7840ce89314
|
39db428b67e5b6972377aa3ec938a54277ea7291
|
refs/heads/master
| 2021-04-12T11:30:51.323308
| 2019-04-01T02:06:12
| 2019-04-01T02:06:12
| 126,233,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
meetupr.R
|
# code to retrieve R user groups on meetup.com
# Somehow, the two urls return different number of groups
Sys.setenv(MEETUP_KEY = "")
api_key <- Sys.getenv(MEETUP_KEY)
url <-"R-User-Group"
groups <- find_groups(url, api_key = api_key)
url2 <-"r-project-for-statistical-computing"
groups2 <- find_groups(url2, api_key = api_key)
|
44e5516102483c8edc625ae7932dcaf937b5b5f3
|
d3d250eb0ee72ff7dd780a4b04aa74cb5e194954
|
/R/thermalTime.R
|
40052d225974503411b7170e4425bba8b82f3cbb
|
[] |
no_license
|
sanchezi/openSilexStatR
|
1387df2f993435a1754cec4d400c2b75a09af728
|
fb00e867c7a503aa07cf4c809b5846a5c7b99b01
|
refs/heads/master
| 2022-12-10T01:21:50.656609
| 2020-09-04T13:00:05
| 2020-09-04T13:00:05
| 269,555,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,608
|
r
|
thermalTime.R
|
#-------------------------------------------------------------------------------
# Program: thermalTime.R
# Objective: calculation of thermal time according to several methods
# Author: I.Sanchez
# Creation: 12/12/2016
# Update : 01/09/2020
#-------------------------------------------------------------------------------
##' a function to calculate thermal time
##' @description this function calculates the thermal time for an experiment
##' according to several methods
##' @param datain input dataframe of meteo data from phis web service
##' @param inSpecie character, studied specie
##' @param method character, a method of thermal time's calculation ("parent","baseline")
##' @param inDateS a date of sowing or thinning etc... start event ("YYYY-MM-DD")
##' @param inDateE a date of harvesting ("YYYY-MM-DD")
##' @param inTemp numeric, a baseline temperature for baseline's method
##' @details Parent et. al. (2010). Modeling temperature-compensated physiological rates,
##' based on the co-ordination of responses to temperature of developmental processes.
##' Journal of Experimental Botany. 61 (8):2057-2069
##'
##' if the Parent's model is chosen inTemp must be NULL
##'
##' The input dataframe is extracted from phis web service (getEnvironment function)
##' and is structured as follow: date, value, sensor, codeVariable and facility
##' @return a dataframe
##'
##' @importFrom lubridate yday ymd
##' @importFrom dplyr mutate arrange summarise filter n
##'
##' @examples
##' # Example for the model of Parent 2010
##' data(meteoDT)
##' head(meteoDT)
##' test<-thermalTime(datain=meteoDT,inSpecie="maize",method="parent",
##' inDateS="2017-04-02",
##' inDateE="2017-06-15",inTemp=NULL)
##' head(test)
##' @export
thermalTime<-function(datain,inSpecie,method,inDateS=NULL,inDateE=NULL,inTemp=NULL){
#---------------------------------------------------------
#-- 1/ datamanagement of meteo data from phis-si
#---------------------------------------------------------
myMeteo<-as.data.frame(datain)
myMeteo$myDate<-lubridate::ymd_hms(myMeteo[,"date"])
# 1a: take mean of sensors data by date
# we have 1 value per quarter hour!
myMeteoMean<-summarise(group_by(myMeteo,myDate),tMean=mean(value,na.rm=TRUE))
myMeteoMean<-arrange(myMeteoMean,myDate)
# 1b: Day retrieve the yyyymmdd in myDate and nDay gives what is the nth day of myDate
myMeteoMean<-mutate(myMeteoMean,
Day=as.character(ymd(substr(myDate,1,10))),
nDay=yday(myDate))
# filter on inDataS and inDateE: start and end of events!
myMeteoMean<-filter(myMeteoMean,Day >= inDateS,Day <= inDateE)
#---------------------------------------------------------
#-- 2/ calculation of TT according to the chosen method
#---------------------------------------------------------
if (method == "parent"){
#------------------------
# Initialisation of parameters' model
R<-8.134
if (inSpecie=="maize"){
Ha<-76800
Hd<-285000
Sd<-933
} else if (inSpecie=="rice"){
Ha<-87500
Hd<-333000
Sd<-1090
} else if (inSpecie=="arabidopsis"){
Ha<-63100
Hd<-358000
Sd<-1180
}
# calculation of DAS or DAE or DAT
myMeteoMean$pDate<-yday(inDateS)
myMeteoMean<-mutate(myMeteoMean,DAT=nDay - pDate)
myMeteoMean<-filter(myMeteoMean,DAT >= 0)
# Temperature in kelvin unit
myMeteoMean<-mutate(myMeteoMean,tKelvin=tMean + 273)
# Theoretical function at 20 degree C
f20<-(293*exp(-Ha/(R*293))) / (1+exp((Sd/R)-(Hd/(R*293))))
# Time calculation (number of days at 20 degree C) on temperatures per quarter hour
myMeteoMean<-mutate(myMeteoMean,
ft=(tKelvin*exp(-Ha/(R*tKelvin))) /
(1+exp((Sd/R)-(Hd/(R*tKelvin)))) )
# t20 by number of records per day
tp<-summarise((group_by(myMeteoMean,Day)),countT=n())
myMeteoMean<-dplyr::left_join(myMeteoMean,tp,by="Day")
myMeteoMean<-mutate(myMeteoMean,t20=(ft/f20)*(1/countT),
t20Cumul=cumsum(t20))
dataout<-as.data.frame(summarise(group_by(myMeteoMean,Day),TT=max(t20Cumul,na.rm=TRUE)))
#----------------------------------
} else if (method == "baseline"){
#----------------------------------
myMeteoMean<-summarise(group_by(myMeteoMean,Day),tMean=mean(tMean,na.rm=TRUE))
myMeteoMean<-arrange(myMeteoMean,Day)
myMeteoMean$baselineTemp<-inTemp
dataout<-as.data.frame(mutate(myMeteoMean,TT=cumsum(tMean-baselineTemp)))
}
return(dataout)
}
|
a03f0dd4b295495208f8c87fa4a2b18d9a86f6d7
|
858f45ab0c198584e2368c249ad82cf30e74f591
|
/R/mycpt.R
|
5bc150ea6ce43ae43303edd9808d566fd80c161d
|
[] |
no_license
|
pra1981/generalizedPELT
|
18f8f32084292b49c7c294f6a38bc2649c154d8c
|
e3949b3ea1efb1ddd09b09e5aba55ce28cfb19d2
|
refs/heads/master
| 2020-04-25T07:52:00.506399
| 2018-06-25T17:19:07
| 2018-06-25T17:19:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,863
|
r
|
mycpt.R
|
library(MASS)
###############################
## Objects and simulate #######
runs.mycpt <- function(runs,minseglen=75,pen=-1,sigma,mu,order,each){
# It simulates one per run :)
library(Matrix)
library(stats)
library(MASS)
obj <- simulate.dataset.mycpt(sigma,mu,order,each,runs,
attrb=list(pen=pen,minseglen=minseglen))
obj$attrb$n=length(order)*each
obj$attrb$m=length(order)-1
if(obj$attrb$pen==-1){obj$attrb$pen=log(obj$attrb$n)} # prepare for BIC
return(obj)
}
simulate.1.mycpt <- function(sigma,mu,order,each){
element=c()
for(x in order){
element=rbind(element,mvrnorm(n=each,mu=mu[[x]],Sigma=sigma[[x]]))
}
return(element)
}
simulate.dataset.mycpt <- function(sigma,mu,order,each,runs=2,return.data.only=FALSE,attrb=list()){
if(return.data.only){
return(replicate(n=runs,expr=simulate.1.mycpt(sigma,mu,order,each),simplify = FALSE))
}
attrb2 <- list(runs=runs,n=each*length(order),p=length(mu[[1]]),changepoints=(1:length(order))*each,sigma=sigma,mu=mu,order=order,each=each)
return(list(attrb=c(attrb,attrb2),
data=replicate(n=runs,simulate.1.mycpt(sigma,mu,order,each),simplify = FALSE)))
}
###############################
## Cost computation ###########
cost.tot.sol.mycpt <- function(data,tau.vec,type="1d.mean",pen=0){
# Handleable form so same whether p=1 or not
data=matrix(data)
# Last element, also length(data) [or dim(data)[1]]
len=tail(tau.vec,1)
# Remove last element of tau.vec
tau.vec=tau.vec[1:(length(tau.vec)-1)]
# Number of internal changepoints
m=length(tau.vec)
# Compute interval cost for each of m+1 intervals
int.cost = sapply(1:(m+1), function(i) cost.mycpt(intv.dat=
# Where the i th interval is (c(0,tau.vec)[i]+1):(c(tau.vec,len)[i])
data[(c(0,tau.vec)[i]+1):(c(tau.vec,len)[i]),],type=type))
return(sum(int.cost)+m*pen)
}
cost.mycpt <- function(intv.dat,type="1d.mean",n=1){
return(
switch(type,
"1d.mean"=cost.1d.mean.mycpt(intv.dat=intv.dat),
"1d.meanvar"=cost.1d.meanvar.mycpt(intv.dat=intv.dat),
"pd.mean"=cost.pd.mean.mycpt(intv.dat=intv.dat),
"pd.meanvar.diag"=cost.pd.meanvar.diag.mycpt(intv.dat=intv.dat),
"pd.meanvar.full"=cost.pd.meanvar.full.mycpt(intv.dat=intv.dat),
"mbic.1d.mean"=cost.mbic.1d.mean.mycpt(intv.dat=intv.dat,n=n),
"mbic.1d.meanvar"=cost.mbic.1d.meanvar.mycpt(intv.dat=intv.dat,n=n),
"mbic.pd.mean"=cost.mbic.pd.mean.mycpt(intv.dat=intv.dat,n=n),
"mbic.pd.meanvar.diag"=cost.mbic.pd.meanvar.diag.mycpt(intv.dat=intv.dat,n=n),
"mbic.pd.meanvar.full"=cost.mbic.pd.meanvar.full.mycpt(intv.dat=intv.dat,n=n)
)
)
}
cost.1d.mean.mycpt <- function(intv.dat,t=0){
return(sum((intv.dat-mean(intv.dat))^2))
}
cost.1d.meanvar.mycpt <- function(intv.dat,t=0){
t.n=length(intv.dat)
#sigma.sq.hat=(t.n-1)*var(intv.dat)/t.n
sigma.sq.hat=sum((intv.dat-mean(intv.dat))^2)/t.n
if(sigma.sq.hat<0.0000000001){
sigma.sq.hat=0.0000000001
}
return(t.n*log(sigma.sq.hat))
}
cost.pd.mean.mycpt <- function(intv.dat,t=0){
# When Sigma is known to be I_p
mu.hat=colMeans(intv.dat)
return(sum((intv.dat-mu.hat)^2))
}
cost.pd.meanvar.diag.mycpt <- function(intv.dat,t=0){
log.sigma.sq.hat = log(colSums((intv.dat-colMeans(intv.dat))^2)/dim(intv.dat)[1])
return(dim(intv.dat)[1]*sum(log.sigma.sq.hat))
}
cost.pd.meanvar.full.mycpt <- function(intv.dat,t=0){
## intv.dat had one time stamp in the same row. Each column is a stream
## Fits a p-dim normal to data and returns cost and mean,var
# Number of observations
len = dim(intv.dat)[1]
p = dim(intv.dat)[2]
# Mean ML-estimate
mu.hat=colMeans(intv.dat)
# Subtract mean from data
z=as.matrix(sweep(intv.dat,2,mu.hat))
## Compute sigma.hat
# For every row compute t(x_i-mu)(x_i-mu) and sum over i
sigma.hat=apply(z, 1, function(x) t(z)%*%(z))
# Sum each t(x-mu)(x-mu), put into matrix, divide by normalizing
sigma.hat=matrix(sigma.hat[,1],ncol=p,nrow=p )/len
## SVD
# Is it possible to not get an eigenvalue?
# How large does an eigenvalue need to be before it is counted in?
# Is the req fulfilled?
# Compute eigenvalues
eigen = svd(x=sigma.hat,nu=0,nv=0)
# kutte ut numerisk null
eigen$d = eigen$d[eigen$d>10^-10]
# Compute cost based on rank=eigen$d and det(S)=prod(eigen$d)
cost=len*(length(eigen$d)*(log(2*pi)+1)+log(prod(eigen$d))) #cost.K is negative
return(cost)
}
cost.mbic.1d.mean.mycpt <- function(intv.dat,t=0,n){
return(sum((intv.dat-mean(intv.dat))^2)+log(length(intv.dat)/n))
}
cost.mbic.1d.meanvar.mycpt <- function(intv.dat,t=0,n){
t.n=length(intv.dat)
#sigma.sq.hat=(t.n-1)*var(intv.dat)/t.n
sigma.sq.hat=sum((intv.dat-mean(intv.dat))^2)/t.n
if(sigma.sq.hat<0.0000000001){
sigma.sq.hat=0.0000000001
}
return(t.n*log(sigma.sq.hat)+log(length(intv.dat)/n))
}
cost.mbic.pd.mean.mycpt <- function(intv.dat,t=0,n){
# When Sigma is known to be I_p
mu.hat=colMeans(intv.dat)
return(sum((intv.dat-mu.hat)^2)+log(length(intv.dat)/n))
}
cost.mbic.pd.meanvar.diag.mycpt <- function(intv.dat,t=0,n){
log.sigma.sq.hat = log(colSums((intv.dat-colMeans(intv.dat))^2)/dim(intv.dat)[1])
return(dim(intv.dat)[1]*sum(log.sigma.sq.hat)+log(length(intv.dat)/n))
}
cost.mbic.pd.meanvar.full.mycpt <- function(intv.dat,t=0,n){
## intv.dat had one time stamp in the same row. Each column is a stream
## Fits a p-dim normal to data and returns cost and mean,var
# Number of observations
len = dim(intv.dat)[1]
p = dim(intv.dat)[2]
# Mean ML-estimate
mu.hat=colMeans(intv.dat)
# Subtract mean from data
z=as.matrix(sweep(intv.dat,2,mu.hat))
## Compute sigma.hat
# For every row compute t(x_i-mu)(x_i-mu) and sum over i
sigma.hat=apply(z, 1, function(x) t(z)%*%(z))
# Sum each t(x-mu)(x-mu), put into matrix, divide by normalizing
sigma.hat=matrix(sigma.hat[,1],ncol=p,nrow=p )/len
## SVD
# Is it possible to not get an eigenvalue?
# How large does an eigenvalue need to be before it is counted in?
# Is the req fulfilled?
# Compute eigenvalues
eigen = svd(x=sigma.hat,nu=0,nv=0)
# kutte ut numerisk null
eigen$d = eigen$d[eigen$d>10^-10]
# Compute cost based on rank=eigen$d and det(S)=prod(eigen$d)
cost=len*(length(eigen$d)*(log(2*pi)+1)+log(prod(eigen$d))) #cost.K is negative
return(cost+log(length(intv.dat)/n))
}
###############################
## Other important functions ##
build.solution.mycpt <-function(permanent,n){
## Build solution
i=1
tau=rep(NA,n)
cpt=n
while(cpt!=0){
# assign that this is a changepoint
tau[i]=cpt
# Previous changepoint is r(changepoint) (=r[cpt+1])
cpt = permanent$r[cpt+1]
i=i+1
}
# Reverse vector and strip NAs
#
return(rev(tau[!is.na(tau)]))
}
are.cpt.vecs.identical <- function(sol1,sol2){
return(mapply(function(x,y) identical( as.integer(x),as.integer(y)),sol1,sol2))
}
###############################
## PELT and competing #########
## PELT that allows restriction min(tau_j - tau_(j-1))>1
gpelt.both.mycpt <-function(data,attrb,type="1d.mean",both=TRUE){
# Calculate PELT
#pelt.mat=lapply(data,function(x) pelt4.mycpt(attrb=attrb,dat=x,type=type,mBIC.style=FALSE))
pelt.mat=lapply(data,function(x) gpelt.mycpt(attrb=attrb,dat=x,type=type))
#Should be same
#pelt.mat=lapply(data,pelt4.mycpt,attrb=attrb,type=type)
pelt.cpts = lapply(pelt.mat,function(x) build.solution.mycpt(permanent = x, n=attrb$n))
if(both){
return(pelt=list(cpts=pelt.cpts,permanent=pelt.mat))
}
return(pelt.cpts)
}
gpelt.mycpt <- function(attrb,dat,type="1d.mean"){
# Not manually debugging
my.debug=FALSE
# This is an overly complex way to do it, but gives a table
# "permantent"
# that is easier to interpret to understand the algorithm
# Is type among the selection of cost functions
if(attrb$p==1){
if(!is.element(type,c("1d.mean","1d.meanvar","pd.meanvar.diag",
"mbic.1d.mean","mbic.1d.meanvar","mbic.pd.meanvar.diag"))){
return("Type is not valid.")
}
dat=matrix(dat,ncol=1)
}else{
if(!is.element(type,c("pd.mean","pd.meanvar.diag","pd.meanvar.full",
"mbic.pd.mean","mbic.pd.meanvar.diag","mbic.pd.meanvar.full"))){
return("Type is not valid.")
}
}
### Initialize first step such that
# inherit = 0, F(0) = -\pen, s.set={0}, r(0)=0
# Outer data frame of t,F,r
permanent <- data.frame(t=seq(0,attrb$n),F.val=rep(NA,attrb$n+1),r=rep(NA,attrb$n+1))
permanent[1,2:3]=c(-attrb$pen,0)
### Initialize first step such that
for(t in attrb$minseglen:min(2*attrb$minseglen-1,attrb$n)){
# predecessor is 0th data point
permanent[permanent$t==t,2:3]=
c(cost.mycpt(intv.dat=dat[(1):t,],type=type,n=attrb$n),0)
}
# Return if finished
if(attrb$n<2*attrb$minseglen){
return(permanent)
}
# Else construct Inherit such that
# When we inherit fromm time t, we get the s.set at Inherit[[t+1]]
#inherit$q is the data point we inherit from,
# inherit$s is the pruned s.set at the time we inherit from
Inherit=as.list(c(rep(0,2*attrb$minseglen),rep(NA,attrb$n-3*attrb$minseglen+1)))
if(my.debug){t=2*attrb$minseglen-1}
####
## Compute for the rest of the data points
for(t in (2*attrb$minseglen):(attrb$n)){
if(my.debug&&(t%%25==0)){cat("t=",t,".\n")}
if(my.debug){t=t+1}
### Combine inherited and earned data points to get s.set
s.set=c(Inherit[[t-attrb$minseglen+1]], #inherited
max(attrb$minseglen,t-2*attrb$minseglen+1):(t-attrb$minseglen)) #earned
### For a changepoint at t find best most recent changepoint s
# Use cost function to compute int.cost C(s+1,t) for all s in s.set
temp<-data.frame(
s=s.set,
int.cost = sapply(s.set, function(x) cost.mycpt(intv.dat=dat[(x+1):t,],type=type,n=attrb$n))
)
## Compute full cost and pruning cost
temp$full.cost <- permanent[s.set+1,2] + temp$int.cost + attrb$pen
temp$prune.cost<- permanent[s.set+1,2] + temp$int.cost
## Determine smallest (optimal) full cost
# Save smallest (optimal) full cost
permanent$F.val[t+1]=min(temp$full.cost)
# Save previous changepoint, the s with smallest full cost
# That is the last s for which F.val is minimal
permanent$r[t+1]=tail(temp$s[temp$full.cost==permanent$F.val[t+1]],1)
### Remove non-optimal predecessors
### Remember which data points to inherit
# s with smaller pre-beta cost, the ones to keep
A=temp$prune.cost<=permanent$F.val[t+1]
## Only add element to next s.set if it has a valid predecessor
if(length(A==TRUE)==0){
Inherit[[t+1]]=NULL
}else{
Inherit[[t+1]]=temp$s[A]
}
# Debug
if(my.debug){t}
if(my.debug){s.set} #current s.set to go through, out to be 0 until 2*minseglen
if(my.debug){t}
if(my.debug){temp}
if(my.debug){Inherit[[t+1]]}
# if(my.debug){inherit$s[inherit$q==t-(attrb$minseglen)]} #Inherited, first part of s.set
if(my.debug){t-(attrb$minseglen)} # Inherited from
if(my.debug){inherit$s[inherit$q==t]} # legacy (inheritance passed on from this node (ouht to be 0 until 2*attrb$minseglen)
if(my.debug){temp}
if(my.debug){permanent}
if(my.debug){View(permanent)}
}
if(FALSE){cat('\n1 run of gPELT performed.\n')}
return(permanent)
}
## The OP method that allows restriction min(tau_j - tau_(j-1))>1
op.both.mycpt <-function(data,attrb,type="1d.mean",both=TRUE){
# Calculate PELT
pelt.mat=lapply(data,function(x) op.mycpt(attrb=attrb,dat=x,type=type))
pelt.cpts = lapply(pelt.mat,function(x) build.solution.mycpt(permanent = x, n=attrb$n))
if(both){
return(pelt=list(cpts=pelt.cpts,permanent=pelt.mat))
}
return(pelt.cpts)
}
op.mycpt <- function(attrb,dat,type="1d.mean"){
# exact same as OP, nothing is ever pruned
# Not manually debugging
my.debug=FALSE
# Is type among the selection of cost functions
if(attrb$p==1){
if(!is.element(type,c("1d.mean","1d.meanvar","pd.meanvar.diag",
"mbic.1d.mean","mbic.1d.meanvar","mbic.pd.meanvar.diag"))){
return("Type is not valid.")
}
dat=matrix(dat,ncol=1)
}else{
if(!is.element(type,c("pd.mean","pd.meanvar.diag","pd.meanvar.full",
"mbic.pd.mean","mbic.pd.meanvar.diag","mbic.pd.meanvar.full"))){
return("Type is not valid.")
}
}
## Initialize first step such that
# s = 0, F(0) = -\pen, s.set={0}, r(0)=0
# Outer data frame of t,F,r
permanent <- data.frame(t=seq(0,attrb$n),F.val=rep(NA,attrb$n+1),r=rep(NA,attrb$n+1))
permanent[1,2:3]=c(-attrb$pen,0)
s.set=c(0)
if(my.debug){t=attrb$minseglen-1}
## Compute for all data sets lengths shorter than attrb$n+1
# Work in delay by starting at minseglen
for(t in (attrb$minseglen):attrb$n){
if(my.debug&&(t%%25==0)){cat("t=",t,".\n")}
if(my.debug){t=t+1}
## Use cost function to compute int.cost C(s+1,t) for all s in s.set
# This is the only place the cost function is evaluated
temp<-data.frame(
s=s.set,
int.cost = sapply(s.set, function(x) cost.mycpt(intv.dat=dat[(x+1):t,],type=type,n=attrb$n))
)
# This is an overly complex way to do it, but gives a table
# "permantent" that is easier to interpret to unerstand the algorithm
## Compute full cost and pruning cost
temp$full.cost <- permanent[s.set+1,2] + temp$int.cost + attrb$pen
temp$prune.cost<- permanent[s.set+1,2] + temp$int.cost
## Determine smallest (optimal) full cost
# Save smallest (optimal) full cost
#¤Edit [t+1] to [permanent$s==t], maybe if not slower ;)
permanent$F.val[t+1]=min(temp$full.cost)
# Save previous changepoint, the s with smallest full cost
# That is the last s for which F.val is minimal
permanent$r[t+1]=tail(temp$s[temp$full.cost==permanent$F.val[t+1]],1)
## Prune - prepare next s.set
# s with smaller pre-beta cost
A=temp$prune.cost<=permanent$F.val[t+1]
# or superceding t
B=temp$s>permanent$r[t+1] #####
# B=rep(FALSE,length(A))
# if(B&!A){
# warning(paste("B&!A for t=",t,".\n"))
# }
## Only add element to set if it has a valid predecessor
if(t>=(2*attrb$minseglen-1)){s.set = c(s.set,t+1-(attrb$minseglen))}
# Debug
if(my.debug){cat("t=",t,".\n")}
if(my.debug){temp}
if(my.debug){permanent}
if(my.debug){View(permanent)}
}
return(permanent)
}
## Working title of op
pelt5.both.mycpt<- function(data,attrb,type="1d.mean",both=TRUE){
return(op.both.mycpt(data,attrb,type,both))
}
pelt5.mycpt<- function(attrb,dat,type="1d.mean"){
return(op.mycpt(attrb,dat,type))
}
## Gives same result as PELT in changepoint package, but with my implementation of mBIC
pelt2.both.mycpt <-function(data,attrb,type="1d.mean",both=TRUE){
# Calculate PELT
pelt.mat=lapply(data,function(x) pelt2.mycpt(attrb=attrb,dat=x,type=type))
pelt.cpts = lapply(pelt.mat,function(x) build.solution.mycpt(permanent = x, n=attrb$n))
if(both){
return(pelt=list(cpts=pelt.cpts,permanent=pelt.mat))
}
return(pelt.cpts)
}
pelt2.mycpt <- function(attrb,dat,type="1d.mean"){
# Not manually debugging
my.debug=FALSE
if(attrb$p==1){
if(!is.element(type,c("1d.mean","1d.meanvar","pd.meanvar.diag",
"mbic.1d.mean","mbic.1d.meanvar","mbic.pd.meanvar.diag"))){
return("Type is not valid.")
}
dat=matrix(dat,ncol=1)
}else{
if(!is.element(type,c("pd.mean","pd.meanvar.diag","pd.meanvar.full",
"mbic.pd.mean","mbic.pd.meanvar.diag","mbic.pd.meanvar.full"))){
return("Type is not valid.")
}
}
## Initialize first step such that
# s = 0, F(0) = -\pen, s.set={0}, r(0)=0
# Outer data frame of t,F,r
permanent <- data.frame(t=seq(0,attrb$n),F.val=rep(NA,attrb$n+1),r=rep(NA,attrb$n+1))
permanent[1,2:3]=c(-attrb$pen,0)
s.set=c(0)
if(my.debug){t=attrb$minseglen-1}
## Compute for all data sets lengths shorter than attrb$n+1
# Work in delay by starting at minseglen
for(t in (attrb$minseglen):attrb$n){
if(my.debug&&(t%%25==0)){cat("t=",t,".\n")}
if(my.debug){t=t+1}
## Use cost function to compute int.cost C(s+1,t) for all s in s.set
# This is the only place the cost function is evaluated
temp<-data.frame(
s=s.set,
int.cost = sapply(s.set, function(x) cost.mycpt(intv.dat=dat[(x+1):t,],type=type,n=attrb$n))
)
# This is an overly complex way to do it, but gives a table
# "permantent" that is easier to interpret to unerstand the algorithm
## Compute full cost and pruning cost
temp$full.cost <- permanent[s.set+1,2] + temp$int.cost + attrb$pen
temp$prune.cost<- permanent[s.set+1,2] + temp$int.cost
## Determine smallest (optimal) full cost
# Save smallest (optimal) full cost
#¤Edit [t+1] to [permanent$s==t], maybe if not slower ;)
permanent$F.val[t+1]=min(temp$full.cost)
# Save previous changepoint, the s with smallest full cost
# That is the last s for which F.val is minimal
permanent$r[t+1]=tail(temp$s[temp$full.cost==permanent$F.val[t+1]],1)
## Prune - prepare next s.set
# s with smaller pre-beta cost
A=temp$prune.cost<=permanent$F.val[t+1]
# or superceding t
#skip this
## Only add element to next s.set if it has a valid predecessor
if(t>=(2*attrb$minseglen-1)){s.set = c(temp$s[A],t-(attrb$minseglen-1))}
# Debug
if(my.debug){temp}
if(my.debug){permanent}
if(my.debug){View(permanent)}
}
return(permanent)
}
###############################
## Other functions ############
est.param.mycpt<-function(obj,tau.vec,type){
est=list(type="1d.mean",tau.vec=tau.vec)
tvt=c(0,tau.vec)
# Set names
if(is.element(type,c("1d.mean","pd.meanvar.diag"))){
# Estimate the mean
param <- lapply(1:length(tau.vec), function(i) {
list(cpt = tau.vec[i], mean= colMeans(data[(tvt[i]+1):tvt[i+1],]))})
}else if(is.element(type,c("1d.meanvar","pd.meanvar.diag"))){
# Estimate the mean and the variance of each
param <- lapply(1:length(tau.vec), function(i) {
list(cpt = tau.vec[i], mean= colMeans(data[(tvt[i]+1):tvt[i+1],]),
sigma= colSums((data[(tvt[i]+1):tvt[i+1],]-
colMeans(data[(tvt[i]+1):tvt[i+1],]))^2)/(tvt[i+1]-tvt[i])
)})
}else if(type=="pd.meanvar.full"){
# Estimate the mean and the variance of each
param <- lapply(1:length(tau.vec), function(i) {
list(cpt = tau.vec[i], mean= colMeans(data[(tvt[i]+1):tvt[i+1],]),
sigma= est.param.full.mycpt(data[(tvt[i]+1):tvt[i+1],])
)})
}else{
warning(paste("Not valid type =",type,".\nChoose among types:\n","1d.mean, ","1d.meanvar, ",
"pd.mean, \n","pd.meanvar.diag, ","pd.meanvar.full."))
}
setNames(param,sapply(1:length(tau.vec),function(i) paste0("c",i)))
est$param=param
return(est)
}
est.list.mean.mycpt <- function(tau){
return(list(cpt=cpt,mean=colMeans(intv.dat)))
}
simulate.mycpt <-function(n=10,p=5,fixed=FALSE){
warning("Can I delete this, or is it in use? [simulate.mycpt]")
## No changes in the data set
library('mvtnorm')
# Simulates a data set that I may use later
# Every column
if(fixed){set.seed(0)}
mu=round(rnorm(p,mean=10,sd=8))
q=matrix(rnorm(p*p,mean=2,sd=5),nrow=p,ncol=p)
sigma=q%*%t(q)
return(list(dat=rmvnorm(n=n, mean=mu,sigma=sigma),mean=mu,sigma=sigma))
}
#types=c("1d.mean","1d.meanvar","pd.mean","pd.meanvar.diag","pd.meanvar.full")
mycpt <- function(dat=0,sim=FALSE,n=100,p=1,minseglen=75,pen=-1){
# It simulates one single
library(Matrix)
library(stats)
warning("Check if this is still in use, I don't think so. :)")
## Simulate dataset
if(sim){
sim1 <- simulate.mycpt(n=n,p=p)
dat <- sim1$dat
sigma<- sim1$sigma
mean <- sim1$mean
}
## Save parameters
attrb <- list(minseglen=minseglen)
if(sim){attrb$genparams <- list(mean=mean,sigma=sigma)}
attrb$n=ifelse(is.null(dim(dat)),length(dat),dim(dat)[1])
attrb$p=ifelse(is.null(dim(dat)),1,dim(dat)[2])
# Set penalty term \beta
attrb$pen <- ifelse(pen==-1,0,pen)
obj=list(attrb=attrb,dat=as.matrix(dat))
class(obj) <- 'mycpt'
return(obj)
}
|
7717b2fbadd4d623bbb31afde75e7322198f3939
|
fc3504c980414b28887e35b54cbf016a394af62f
|
/plot2.R
|
2fd0002e50daf1cd67b6f67999ed09ee9fd2dc4c
|
[] |
no_license
|
weiconglyu/ExData_Plotting1
|
f75c85e3e51ece42aacd61af30c9453ea68274d7
|
877b71020efa2bf053a9af593c3561e75e0790e5
|
refs/heads/master
| 2021-05-27T10:59:51.458936
| 2014-08-02T14:30:13
| 2014-08-02T14:30:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
plot2.R
|
data <- read.table("household_power_consumption.txt",
sep = ";", header = TRUE, stringsAsFactors = FALSE)
t <- data$Date == "1/2/2007" | data$Date == "2/2/2007"
data <- data[t, ]
png("plot2.png", width = 480, height = 480, units = "px",
bg = "transparent")
with(data, plot(strptime(paste(Date, Time), "%d/%m/%Y %H:%M:%S"),
as.numeric(Global_active_power), type = "l", xlab = "",
ylab = "Global Active Power (kilowatts)"))
dev.off()
|
119454ab35acd0e0e6c58c104ccd232affdf6b26
|
c407762045c37b35dac8c10e05fb4ded836e5cb7
|
/PAnalytics.R
|
ee5a7320ac3829396b9aa1a8233afe66854f46dd
|
[] |
no_license
|
prophet555/InvestmentProcess
|
641c638392aa7187f11741739602f515274265df
|
6ef549ca0d9f2dbd326974ebb586ce7005bde890
|
refs/heads/master
| 2023-05-11T08:16:02.960170
| 2013-10-14T02:54:13
| 2013-10-14T02:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,827
|
r
|
PAnalytics.R
|
# Performance Analytics
# http://www.youtube.com/watch?v=vUVAaDqz4cs
# http://cran.r-project.org/web/packages/PerformanceAnalytics/PerformanceAnalytics.pdf
options(digits=4)
options(scipen=999) # removes scientific notations in results
library(PerformanceAnalytics)
library(xts)
library(tseries)
library(plyr) # for renaming datasets
library(fPortfolio) # for portfolio optimisations
library(quadprog)
# library("plottrix") # for 3d pie charts
# functions list.
# Return.read() function loads csv files where dates in first column, returns for period in subsequent columns
# return.calculate(prices, method=c("compound", "simple")), assumes adjusted close prices
# to.period in xts package to create regular price data i.e monthly/daily/yearly
# aggregate.zoo supports management and conversion of irregular time series
# calculateReturns() is the same function
# get some data to chart, (can use strategy returns, hedge fund manager returns)
library(Quandl)
token <- 'jMzykr2TqHKytNTHknXH'
Quandl.auth(token)
startDate <-"2012-01-01"
endDate <- "2013-09-01"
AAPL <- "GOOG/NASDAQ_AAPL.4"
LULU <- "GOOG/NASDAQ_LULU.4"
MX <- "GOOG/NYSE_MX.4"
TGA <- "GOOG/NASDAQ_TGA.4"
NLY <- "GOOG/NYSE_NLY.4"
stockList <- c(AAPL, LULU, MX, TGA, NLY)
data <- Quandl(stockList, start_date=startDate, end_date=endDate, type="xts", transformation="rdiff")
# collapse = ("daily", "weekly", "monthly", "quarterly", "annual")
#the .4 returns the 4th column of the google dataset
# dataRN <- rename(data, c("GOOG.NASDAQ_AAPL - Close" = "AAPL",
# "GOOG.NASDAQ_LULU - Close" ="LULU",
# "GOOG.NYSE_MX - Close"="MX",
# "GOOG.NASDAQ_TGA - Close"="TGA",
# "GOOG.NYSE_NLY - Close"="NLY"))
names(data) <- c("AAPL", "LULU", "MX", "TGA", "NLY")
# dataRet <- Return.calculate(data, method=c("simple","compound")) # no need for this, if already transformed
table.Stats(data) # loads summaries
charts.PerformanceSummary(data, colorset=rich6equal)
###################### Creating Efficient Frontier with Chosen Assets ###############################
## Need a matrix with expected returns and covariances, in timeSeries format, i.e SPISECTOR
## http://cran.r-project.org/web/packages/fPortfolio/fPortfolio.pdf
## coerce zoo with as.timeSeries.foo(), as.timeSeries
# Get stock data
data2 <- Quandl(stockList, start_date=startDate, end_date=endDate, type="xts", transformation="rdiff")
colnames(data2) <- c("AAPL", "LULU", "MX", "TGA", "NLY")
data2 <- as.timeSeries(data2)
data.cov <- covEstimator(data2)
# define number of assets and constraints
assets <- ncol(data2)
constraints <- c('LongOnly') #specify as long only
constraints <- c('minW[1:assets]=0', 'maxW[1:assets]=0.5') #specify with min weights to max weights
# constraints <- c('minW[1:assets]=0', 'maxW[1:assets]=0.5', 'minsumW[c("LULU", "AAPL")]=0.1')
# can also add in constraints where you hold a certain amount of assets in your portfolio.
# optimisation specs
spec <- portfolioSpec()
setNFrontierPoints(spec) <- 25
setSolver(spec) <- "solveRquadprog"
# check the constraints
portfolioConstraints(data2, spec, constraints)
# do optimisation
frontier <- portfolioFrontier(data2, spec, constraints)
print(frontier)
# plot frontier
tailoredFrontierPlot(frontier) #plots the efficient frontier
#plot weights
weightsPlot(frontier)
weightsPlot(frontier, col=rainbow(assets)) # uses different colours
weightsPlot(frontier, col=heat.colors(assets))
##################### USE Quantmod Package to get symbols from YAHOO instead of Quandl#############
### this has an easier function to search and load the data than quandl, but I prefer quandl due to it's multi
### data source and larger datasets
library("quantmod")
getSymbols("AAPL") # downloads as class xts and zoo
chartSeries(AAPL, theme="white")
chartSeries(AAPL) #creates a bloomberg style chart
ticker <- c("DJIA", "^GDAXI", "^ATX")
data <- dailyReturn(AAPL, subset="2011-10-01:2012-09-30") # daily returns function
# Creating a risk report (Excel)
mean = c()
sd = c()
var005 = c()
var001 = c()
cvar005 = c()
ticker = DJIA # get all the data from Dow jones stock and it's in the time series format
i = 1
for (t in ticker) {
mean = c(mean, mean(data[,i]))
sd = c(sd, sd(data[,i]))
var005 = c(var005, quantile(data[,i], 0.05))
var001 = c(var001, quantile(data[,i], 0.01))
cvar = mean(sort(data[,i])[1:(round(length(data[,i])*0.05))])
i = i + 1
}
risk_report = data.frame(ticker, mean, sd, var005, var001, cvar005)
names(risk_report) = c("Symbol", "Mean (1y)", "St. Dev (1y)", "VaR (0.05)", "VaR (0.001)", "CVaR (0.05)" )
write.csv2(risk_report, file="risk_report.csv")
risk_cor = cor(data) # correlation of the assets
write.csv2(risk_cor, file="risk_cor.csv")
|
223b5a641cde42bdc82f6968c92efb484ba8c812
|
06a82c51579c6600933df98a28ec10efcd5f4bff
|
/app6c/ui.R
|
09944946a6169a14152c789023573a6fd06ebc66
|
[] |
no_license
|
OldMortality/shinies
|
733b5264db870b5fbe8d03845ab3606f4898e07c
|
16de43356a51fe628bad78b7175ad8b834815ce3
|
refs/heads/master
| 2021-06-04T21:21:34.394444
| 2020-03-05T02:48:56
| 2020-03-05T02:48:56
| 145,629,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,419
|
r
|
ui.R
|
# app 6c.
# like 6b,but you type in your own sample data, rather
# than take samples.
library(shiny)
library(shinydashboard)
library(shinyjs)
library(ggplot2)
shinyUI <- dashboardPage(
dashboardHeader(title = "What can we say based on a single sample mean?",
titleWidth = 850),
dashboardSidebar(useShinyjs(),
# link to the css stylesheet. It is in the www folder.
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css"),
sliderInput("mu.2", "Hypothesized mean:",
min = 1400, max = 2000, value = 1740,step=1
),
textInput("samplemean", label = h4("Your sample mean (mm)")
,value=1750),
textInput("samplesd", label = h4("Your sample sd (mm)")
,value=213),
textInput("samplesize", label = h4("Your sample size")
,value=10),
checkboxInput("showsamplemean","Show sample mean and CI",FALSE),
checkboxInput("showsampledist","Show sampling distribution",TRUE)
),
dashboardBody(
# Boxes need to be put in a row (or column)
fluidRow(
column(width = 6,
box(
title="Population (unknown)",
width=NULL,
plotOutput("plot1",height=140),
height = 200),
box(
title="Confidence interval",
width=NULL,
plotOutput("thissamplemean",height=65),
height = 125),
box(
title="Distribution of all sample means (unknown)",
width=NULL,
plotOutput("samplingdistribution",height=390),
height = 450)
),
column(width=6,
box(
title="",
width=NULL,
#htmlOutput('sampleSummary',height=400),
height = 200),
box(
title="",
htmlOutput('onesamplesummary',height=75),
width=NULL,
height = 125),
box(
width=NULL,
title="",
htmlOutput('sampleMeanSummary',height=390),
height = 450)
)
)
)
)
|
b489cfc4f9a04d7593b2083c80244ce788176683
|
bef88657b897dabd82d9f25b324b85eb4906faa5
|
/R/d3dendrogram.R
|
d2eb4bc474955cebb7b213af58fff2f01edc9ab7
|
[] |
no_license
|
RanaivosonHerimanitra/dendextend
|
87ed77d136113a794a1b8bd0d81c8164f1de7380
|
e6769519763e1efb707ef0afbf6ed988910253d2
|
refs/heads/master
| 2021-01-20T23:06:05.828089
| 2014-07-29T08:35:00
| 2014-07-29T08:35:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,368
|
r
|
d3dendrogram.R
|
# Copyright (C) Mark van der Loo and Tal Galili
#
# This file is part of dendextend.
#
# dendextend is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# dendextend is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
# library(dendextend)
# library(whisker)
# plot dendrogram to html string.
#
# d a dendrogram object
# height/widht : pixels, height/widht of the plot
# rightmargin : pixels to reserve on the right side for leaf labels.
#
d3dendrogram <- function(d,height=500,width=700,rightmargin=200){
# library(whisker)
e <- new.env()
e$json_dendrogram <- as.json.dendrogram(d)
e$height <- height
e$width <- width
e$rightmargin <- rightmargin
whisker::whisker.render(d3dendro_template(),data=e)
}
as.json.dendrogram <- function(d){
# internal helper function
add_json <- function(x){
v <- attributes(x)
lab <- ifelse(is.null(v$label),"",v$label)
json <<- paste(json,sprintf('{ "name" : "%s", "y" : %s',lab,v$height))
if ( is.leaf(x) ){
json <<- paste(json,"}\n")
} else {
json <<- paste(json,',\n "children" : [' )
for ( i in seq_along(x) ){
add_json(x[[i]])
s <- ifelse(i<length(x),",","")
json <<- paste(json,s)
}
json <<- paste(json," ]}")
}
}
json <- ""
add_json(d)
json
}
d3dendro_template <- function(){
'<!doctype html>
<html><head>
<style>
.node circle {
fill: #fff;
stroke: steelblue;
stroke-width: 1.5px;
}
.node {
font: 14px sans-serif;
}
.link {
fill: none;
stroke: #ccc;
stroke-width: 1.5px;
}
line {
stroke: black;
}
</style>
<script type="text/javascript" src="http://d3js.org/d3.v3.min.js"></script>
</head>
<body>
<script type="text/javascript">
var width = {{{width}}};
var height = {{{height}}};
var cluster = d3.layout.cluster()
.size([height, width-200]);
var diagonal = d3.svg.diagonal()
.projection (function(d) { return [x(d.y), y(d.x)];});
var svg = d3.select("body").append("svg")
.attr("width",width)
.attr("height",height)
.append("g")
.attr("transform","translate(100,0)");
var xs = [];
var ys = [];
function getXYfromJSONTree(node){
xs.push(node.x);
ys.push(node.y);
if(typeof node.children != "undefined"){
for ( j in node.children){
getXYfromJSONTree(node.children[j]);
}
}
}
var ymax = Number.MIN_VALUE;
var ymin = Number.MAX_VALUE;
var xmax = Number.MIN_VALUE;
var xmin = Number.MAX_VALUE;
var json = {{{json_dendrogram}}}
getXYfromJSONTree(json);
var nodes = cluster.nodes(json);
var links = cluster.links(nodes);
nodes.forEach( function(d,i){
if(typeof xs[i] != "undefined"){
d.x = xs[i];
}
if(typeof ys[i] != "undefined"){
d.y = ys[i];
}
});
nodes.forEach( function(d){
if(d.y > ymax)
ymax = d.y;
if(d.y < ymin)
ymin = d.y;
});
nodes.forEach( function(d){
if(d.x > xmax)
xmax = d.x;
if(d.x < xmin)
xmin = d.x;
});
xinv = d3.scale.linear().domain([ymin, ymax]).range([0, width-{{{rightmargin}}}]);
x = d3.scale.linear().domain([ymax, ymin]).range([0, width-{{{rightmargin}}}]);
y = d3.scale.linear().domain([xmin, xmax]).range([60,height-50]);
var link = svg.selectAll(".link")
.data(links)
.enter().append("path")
.attr("class","link")
.attr("d", diagonal);
var node = svg.selectAll(".node")
.data(nodes)
.enter().append("g")
.attr("class","node")
.attr("transform", function(d) {
return "translate(" + x(d.y) + "," + y(d.x) + ")";
});
node.append("circle")
.attr("r", 4.5);
node.append("text")
.attr("dx", function(d) { return d.children ? -8 : 8; })
.attr("dy", 3)
.style("text-anchor", function(d) { return d.children ? "end" : "start"; })
.text( function(d){ return d.name;});
var g = d3.select("svg").append("g")
.attr("transform","translate(100,40)");
g.append("line")
.attr("x1",x(ymin))
.attr("y1",0)
.attr("x2",x(ymax))
.attr("y2",0);
g.selectAll(".ticks")
.data(x.ticks(5))
.enter().append("line")
.attr("class","ticks")
.attr("x1", function(d) { return xinv(d); })
.attr("y1", -5)
.attr("x2", function(d) {return xinv(d); })
.attr("y2", 5);
g.selectAll(".label")
.data(x.ticks(5))
.enter().append("text")
.attr("class","label")
.text(String)
.attr("x", function(d) {return xinv(d); })
.attr("y", -5)
.attr("text-anchor","middle");
</script>
</body>
</html>'
}
|
02ad95de1bbe4cdf362dcb5b6d2c91bbe4b939ff
|
ae9c3f4e717658e5ccd2566c8f87074ee4890301
|
/R/sar.R
|
30b42f1f9962ebc9ff5a1dc8c24a063c1d2a98c3
|
[] |
no_license
|
cmerow/meteR
|
55dc8626c6034dc4c83a24960fc90b7060a1613a
|
d459165539ddad416b44f87862b4b15e6c255d75
|
refs/heads/master
| 2020-05-22T06:55:56.493291
| 2019-03-14T17:18:57
| 2019-03-14T17:18:57
| 26,815,258
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,404
|
r
|
sar.R
|
#' @title Compute METE species area relationship (SAR)
#'
#' @description Uses raw data or state variables to calculate METE SAR
#' and EAR (endemics area relatiohsip) as well as compute the observed
#' SAR or EAR from data, if provided
#'
#' @details Currently only doublings of area are supported. Predictions
#' and comparison to data can be made via several options. If \code{spp}
#' and \code{abund} are not provided then only theoretical predictions
#' are returned without emperical SAR or EAR results. In this case areas
#' can either be specified by providing \code{Amin} and \code{A0} from
#' which a vector of doubling areas is computed, or my providing \code{row},
#' \code{col} and \code{A0} in which case \code{row} and \code{col} are
#' taken to be the number of desired rows and columns used to construct
#' a grid across the landscape. If data are provided in the form of
#' \code{spp} and \code{abund} then either \code{row} and \code{col} or
#' \code{x} and \code{y} must be provided for each data entry (i.e. the
#' length of \code{row} and \code{col} or \code{x} and \code{y} must equal
#' the length of \code{spp} and \code{abund}). If \code{x} and \code{y}
#' are provided then the landscape is gridded either by specifying
#' \code{Amin} (the size of the smallest grid cell) or by providing the
#' number or desired rows and columns via the \code{row} and \code{col}
#' arguments.
#'
#' SARs and EARs can be predicted either interatively or non-iteratively.
#' In the non-iterative case the SAD and SSAD (which are used to calculate
#' the SAR or EAR prediction) are derived from state variables at one
#' anchor scale. In the iterative approach state variables are re-calculated
#' at each scale. Currently downscaling and upscaling are done differently (
#' downscaling is only implemented in the non-iterative approach, whereas
#' upscaling is only implemented in the iterative approach). The reason is
#' largely historical (downscaling as originally done non-iteratively while
#' upscaling was first proposed in an iterative framework). Future implementations
#' in \code{meteR} will allow for both iterative and non-iterative approaches
#' to upscaling and downscaling. While iterative and non-iterative methods lead to
#' slightly different predictions these are small in comparison to typical ranges of
#' state variables (see Harte 2011).
#'
#'
#' @param spp vector of species identities
#' @param abund numberic vector abundances associated with each record
#' @param row identity of row in a gridded landscape associated with each record, or desired number of rows to divide the landcape into
#' @param col identity of column in a gridded landscape associated with each recod, or desired number of columns to divide the landcape into
#' @param x the x-coordinate of an individual if recorded
#' @param y the y-coordinate of an individual if recorded
#' @param S0 total number of species
#' @param N0 total abundance
#' @param Amin the smallest area, either the anchor area for upscaling or the desired area to downscale to
#' @param A0 the largest area, either the area to upscale to or the total area from which to downscale
#' @param upscale logical, should upscaling or downscaling be carried out
#' @param EAR logical, should the EAR or SAR be computed
#'
#' @export
#'
#' @examples
#' \dontrun{
#' data(anbo)
#'
#' ## using row and col from anbo dataset
#' anbo.sar1 <- meteSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' plot(anbo.sar1)
#'
#' ## using simulated x, y data
#' anbo.sar2 <- meteSAR(anbo$spp, anbo$count, x=anbo$x, y=anbo$y, row=4, col=4)
#' plot(anbo.sar2)
#'
#' ## using just state variable
#' thr.sar <- meteSAR(Amin=1, A0=16, S0=50, N0=500)
#' }
#' @return an object of class \code{meteRelat} with elements
#' \describe{
#' \item{\code{pred}}{predicted relationship; an object of class \code{sar}}
#' \item{\code{obs}}{observed relationship; an object of class\code{sar}}
#' }
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso sad, meteESF, metePi
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
meteSAR <- function(spp, abund, row, col, x, y, S0 = NULL, N0 = NULL,
Amin, A0, upscale=FALSE, EAR=FALSE) {
## figure out vector of sizes in units of cells; right now only doublings supported
## not needed if upscale is TRUE
if(!upscale) {
areaInfo <- .findAreas(
spp=if(missing(spp)) NULL else spp,
abund=if(missing(abund)) NULL else abund,
row=if(missing(row)) NULL else row,
col=if(missing(col)) NULL else col,
x=if(missing(x)) NULL else x,
y=if(missing(y)) NULL else y,
Amin=if(missing(Amin)) NULL else Amin,
A0=if(missing(A0)) NULL else A0)
areas <- areaInfo$areas
row <- areaInfo$row
col <- areaInfo$col
nrow <- areaInfo$nrow
ncol <- areaInfo$ncol
Amin <- areaInfo$Amin
A0 <- areaInfo$A0
}
if(upscale & EAR) stop('upscaling EAR not currently supported')
## the ESF
if(!missing(spp) & !missing(abund)) {
S0 <- length(unique(spp))
N0 <- sum(abund)
}
if(is.null(S0) | is.null(N0)) stop('must provide spp and abund data or state variables S0 and N0')
thisESF <- meteESF(S0=S0, N0=N0)
## calculate empirical SAR
if(!missing(spp) & !missing(abund)) {
eSAR <- empiricalSAR(spp, abund, row=row, col=col, Amin=Amin, A0=A0, EAR=EAR)
} else {
eSAR <- NULL
}
## calculate theoretical SAR
if(upscale) {
thrSAR <- upscaleSAR(thisESF, Amin, A0, EAR)
} else {
thrSAR <- downscaleSAR(thisESF, areas*Amin, A0, EAR)
}
out <- list(obs=eSAR, pred=thrSAR)
class(out) <- 'meteRelat'
return(out)
}
#================================================================
#' @title Empirical SAR or EAR
#'
#' @description computes observed SAR or EAR from raw data
#'
#' @details Currently only doublings of area are supported. There are
#' several options for specifying areas. Either \code{row} and \code{col} or
#' \code{x} and \code{y} must be provided for each data entry (i.e. the
#' length of \code{row} and \code{col} or \code{x} and \code{y} must equal
#' the length of \code{spp} and \code{abund}). If \code{x} and \code{y}
#' are provided then the landscape is gridded either by specifying
#' \code{Amin} (the size of the smallest grid cell) or by providing the
#' number or desired rows and columns via the \code{row} and \code{col}
#' arguments. If only \code{row} and \code{col} are provided these are taken
#' to be the row and column identities of each data entry
#'
#'
#'
#' @param spp vector of species identities
#' @param abund numberic vector abundances associated with each record
#' @param row identity of row in a gridded landscape associated with each record, or desired number of rows to divide the landcape into
#' @param col identity of column in a gridded landscape associated with each recod, or desired number of columns to divide the landcape into
#' @param x the x-coordinate of an individual if recorded
#' @param y the y-coordinate of an individual if recorded
#' @param Amin the smallest area, either the anchor area for upscaling or the desired area to downscale to
#' @param A0 the largest area, either the area to upscale to or the total area from which to downscale
#' @param EAR logical, should the EAR or SAR be computed
#'
#' @export
#'
#' @examples
#' data(anbo)
#' anbo.obs.sar <- empiricalSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' plot(anbo.obs.sar)
#' anbo.obs.ear <- empiricalSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16, EAR=TRUE)
#' plot(anbo.obs.ear)
#'
#' ## empirical SAR from simulated x, y data
#' anbo$x <- runif(nrow(anbo), 0, 1) + anbo$column
#' anbo$y <- runif(nrow(anbo), 0, 1) + anbo$row
#' meteSAR(anbo$spp, anbo$count, x=anbo$x, y=anbo$y, row=4, col=4)
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, downscaleSAR, upscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
empiricalSAR <- function(spp, abund, row, col, x, y, Amin, A0, EAR=FALSE) {
## figure out vector of sizes in units of cells; right now only doublings supported
areaInfo <- .findAreas(
spp=if(missing(spp)) NULL else spp,
abund=if(missing(abund)) NULL else abund,
row=if(missing(row)) NULL else row,
col=if(missing(col)) NULL else col,
x=if(missing(x)) NULL else x,
y=if(missing(y)) NULL else y,
Amin=if(missing(Amin)) NULL else Amin,
A0=if(missing(A0)) NULL else A0)
areas <- areaInfo$areas
row <- areaInfo$row
col <- areaInfo$col
nrow <- areaInfo$nrow
ncol <- areaInfo$ncol
Amin <- areaInfo$Amin
A0 <- areaInfo$A0
## loop over areas
out <- lapply(areas, function(a) {
nspp <- .getSppInGroups(spp, abund, row, col, .getNeighbors(a, nrow, ncol), EAR)
data.frame(A=a*Amin, S=nspp)
})
out <- do.call(rbind, out)
## make output of class `sar' and tell it about empirical v. theoretical and ear v. sar
attr(out, 'source') <- 'empirical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
#================================================================
#' @title Downscale the species area relationship (SAR) or endemics area relationship (EAR)
#'
#' @description Compute METE SAR by downscaling from some larger area \code{A0} to a smaller areas.
#'
#' @details Downscaling is done non-iteratively (i.e. the SAD and SSAD are calculated based on state variables at the anchor scale A0) thus unlike the upscaling SAR function, downscaling can be computed for any arbitrary scale
#' \eqn{\leq A_0}.
#'
#' @param x an object of class meteESF
#' @param A numerical vector of areas (<= \code{A0}) for which the METE prediction is desired
#' @param A0 total study area
#' @param EAR logical. TRUE computes the endemics area relatinship
#'
#' @export
#'
#' @examples
#' data(anbo)
#' anbo.esf <- meteESF(spp=anbo$spp, abund=anbo$count)
#' anbo.thr.downscale <- downscaleSAR(anbo.esf, 2^(seq(-3, 4, length=7)), 16)
#' plot(anbo.thr.downscale)
#'
#' ## theoretical SARs from state variables only
#' thr.downscale <- downscaleSAR(meteESF(S0=40, N0=400), 2^seq(-1,4,by=1), 16)
#' thr.downscaleEAR <- downscaleSAR(meteESF(S0=40, N0=400), 2^seq(-1, 4, by=1), 16, EAR=TRUE)
#' plot(thr.downscale, ylim=c(0, 40), col='red')
#' plot(thr.downscaleEAR, add=TRUE, col='blue')
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, empiricalSAR, upscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
downscaleSAR <- function(x, A, A0, EAR=FALSE) {
n0 <- 1:x$state.var['N0']
## difference between EAR and SAR is for EAR we get Pi(n0) [fun .getPin0]
## and for SAR we get 1 - Pi(0) [1 - .getPi0]
if(EAR) {
piFun <- function(a) .getPin0(n0, a, A0)
} else {
piFun <- function(a) 1 - .getPi0(n0, a, A0)
}
## function to get species number at scale `a'
getspp <- function(a) {
probs <- piFun(a) *
with(x,
metePhi(n0, La[1], La[2], Z,
state.var['S0'], state.var['N0'],
ifelse(is.na(state.var['E0']), state.var['N0']*10^3, state.var['E0'])))
return(x$state.var['S0'] * sum(probs))
}
## loop over A
nspp <- sapply(A, getspp)
## should return matrix with column for area and column for spp
out <- data.frame(A=A, S=nspp)
attr(out, 'source') <- 'theoretical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
#================================================================
#' @title upscale SAR
#'
#' @description Based on information at an anchor scale (\code{A0})
#' calcuate predicted species area relationship at larger scales
#'
#' @details Currently only doublings of area are supported and only
#' the SAR (not EAR) is supported. Upscaling works by iteratively
#' solving for the constraints (\eqn{S} and \eqn{N} at larger scales)
#' that would lead to the observed data at the anchor scale. See
#' references for more details on this approach.
#'
#'
#' @param x an object of class meteESF
#' @param A0 the anchor scale at which community data are availible.
#' @param Aup the larges area to which to upscale
#' @param EAR logical. TRUE computes the endemics area relatinship; currently not supported
#'
#' @export
#'
#' @examples
## combine SAR for scales at which we have data with upscaled SAR
#' data(anbo)
#' anbo.sar <- meteSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' anbo.sar
#' plot(anbo.sar, xlim=c(1, 2^10), ylim=c(3, 50), log='xy')
#'
#' ## get upscaled SAR and add to plot
#' anbo.esf <- meteESF(spp=anbo$spp, abund=anbo$count) # need ESF for upscaling
#' anbo.sarUP <- upscaleSAR(anbo.esf, 16, 2^10)
#' plot(anbo.sarUP, add=TRUE, col='blue')
#'
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, empiricalSAR, downscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
upscaleSAR <- function(x, A0, Aup, EAR=FALSE) {
## vector of areas starting with anchor area A0
Aups <- A0 * 2^(0:ceiling(log(Aup/A0)/log(2)))
## vector of abundances at each area
N0s <- x$state.var['N0'] * 2^(0:ceiling(log(Aup/A0)/log(2)))
## vector of number of species at each area
S0s <- numeric(length(Aups))
S0s[1] <- x$state.var['S0']
## vector to hold termination codes from nleqslv about whether optimization succeeded
termcodes <- numeric(length(Aups))
## need to recursively solve constraint fun (solution in `.solveUpscale') up to Aup
for(i in 2:length(Aups)) {
S0s[i] <- .solveUpscale(S0s[i-1], N0s[i-1])
}
## should return matrix with column for area and column for spp
out <- data.frame(A=Aups, S=S0s)
attr(out, 'source') <- 'theoretical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
|
e8297925b25bd2826320622923e17dfdd33b85ed
|
5940c0ee54f892aa5f0933ee6991fbbf8ab1c406
|
/man/iterateModels_raw.Rd
|
f15e72a446f85a7b57a2377916d85b4684883ad7
|
[] |
no_license
|
sboehringer/package
|
fff874cf7c7d9bd29d80da7e94e39ce8dbd99a1d
|
e069efa130486b82ad39f41ba65d971c5ac96ea5
|
refs/heads/master
| 2022-12-27T08:56:44.158868
| 2022-12-20T12:43:54
| 2022-12-20T12:43:54
| 220,493,110
| 2
| 0
| null | 2019-11-11T14:27:07
| 2019-11-08T15:18:23
|
R
|
UTF-8
|
R
| false
| true
| 1,967
|
rd
|
iterateModels_raw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rdata.R
\name{iterateModels_raw}
\alias{iterateModels_raw}
\title{Iterate combinations of parameters}
\usage{
iterateModels_raw(
modelList,
models,
f_iterate = function(...) list(...),
...,
callWithList = FALSE,
callMode = NULL,
restrictArgs = TRUE,
parallel = FALSE,
lapply__
)
}
\arguments{
\item{modelList}{list specifying the models (see details)}
\item{models}{matrix containing indeces to sub-models (see details)}
\item{f_iterate}{function to be iterated across models}
\item{...}{extra arguments to be passed to \code{f_iterate()}}
\item{callWithList}{boolean to indicate whether model combination is to be supplied as a list.
Otherwise model specification is inlined as arguments (see details)}
\item{callMode}{'inline', 'list', 'inlist'}
\item{restrictArgs}{boolean to indicate whether over-supplied arguments (with respect to \code{f_iterate()})}
\item{parallel}{boolean to inidcate whether iteration should be parallelized with
\code{parallelize.dynamic}}
\item{lapply__}{the iterator to be used (ignored at this moment)}
}
\value{
list containing the result of \code{f_iterate()} for all paramter combinations
}
\description{
This function takes a list of parameters for which several values are to be evaluated. These values can be vectors of numbers or lists that contain blocks of parameters. All combinations are formed and passed to a user supplied function \code{f_iterate()}. This functions takes an index of the combination together with parameter values. Argument \code{callWithList} controls whether there is exactly one argument per parameter position or wether one more step of unlisting takes place. In case that a block of parameters is supplied, all values of the block are passed as individual arguments to \code{f_iterate()} in case \code{callWithList == FALSE}.
}
\details{
#@param selectIdcs restrict models to the given indeces
}
|
54a67ae303419c20f0ccd4c7aa1c0bfecd15a61f
|
977e25b030bc27e923f52b08305a6dec2cfd02fd
|
/finance_basics_with_r/xts-zoo2/5_extra/periodocity_example.R
|
86368bd2f5a471bd20f579effbd389f50a576423
|
[] |
no_license
|
printfCRLF/rr
|
d4cd813fafef7d64da2722ade9e14220c12e17ff
|
4116f726f5ad7a8cadbe6841d13abbdb998ee294
|
refs/heads/master
| 2021-04-15T15:08:37.032087
| 2019-07-12T08:29:26
| 2019-07-12T08:29:26
| 126,468,211
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 899
|
r
|
periodocity_example.R
|
temps_csv <- read.csv("Temps.csv")
temps <- as.xts(temps_csv)
determine_periodicity <- function() {
periodicity(temps)
p <- periodicity(edhec)
print(p)
edhec_yearly <- to.yearly(edhec)
p <- periodicity(edhec_yearly)
print(p)
}
find_number_of_periods <- function() {
n_months <- nmonths(edhec)
print(n_months)
n_quarters <- nquarters(edhec)
print(n_quarters)
n_years <- nyears(edhec)
print(n_years)
}
index_tool <- function() {
.index(temps)
.indexwday(temps)
index <- which(.indexwday(temps) == 6 | .indexwday(temps) == 0)
weekends <- temps[index]
print(weekdays)
}
modity_timestamps <- function() {
z_unique <- make.index.unique(z, eps = 1e-4)
z_dup <- make.index.unique(z, drop = TRUE)
z_round <- align.time(z, n = 3600)
}
#determine_periodicity()
#find_number_of_periods()
index_tool()
modity_timestamps()
|
03a61e197cbe60f08be6b085ff5d4b2a62f1b47e
|
74bc807e2c59d6e4eb881228bce0e1616db971cd
|
/r/rethinking/binomial_distribution.R
|
77101e32b037b541725aeb3f405588f15b4f95c2
|
[] |
no_license
|
luigiselmi/datascience
|
29f850bb8debd98560eb24b2144fee7ff1ff2fe8
|
7cffbeb6a8292633c8e204a8ac87e01790b23944
|
refs/heads/master
| 2023-07-11T00:39:08.367239
| 2023-06-27T10:20:54
| 2023-06-27T10:20:54
| 98,037,236
| 18
| 13
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,432
|
r
|
binomial_distribution.R
|
# Binomial distribution
# Estimation of the bias of a coin.
# We assume that a coin can be biased so that the probability
# theta that after a toss it shows head or tail
# may not be 0.5. We assume at the beginning that theta can have any
# value between 0 and 1 but with different probabilities. We have some
# sound even if not complete information about the plausible values of
# theta. In our example we assume a triangular prior p(theta). We use
# the binomial distribution as the likelihood. Then we set the sample
# size n and we set the number of successes k. Finally, we compute the
# posterior distribution as the product of the prior and the likelihood
# distributions.
# Example 1 from Kruschke ch.5 par. 5.3
# range of values of the parameter
theta <- seq(from = 0, to = 1, by = 0.1)
# define the prior distribution for each value of theta according to our
# knowledge before seeing the data.
p1 <- 0.4 * theta[1:6]
p2 <- 0.4 - 0.4 * theta[7:11]
prior <- c(p1,p2)
plot(theta, prior, type = "h", col = "skyblue")
# sample the likelihood at each value of the parameter theta
# for one toss. The binomial distribution used as the likelihood
# is also called Bernoulli distribution when the sample size n = 1.
# The way in which we extract the sample from the likelihood distribution
# is called grid approximation because the elements of the sample are taken
# from one data point and a set of equally spaced values of theta. This
# approximation works because we are dealing with only one parameter and
# values in a limited interval. Other approximation are quadratic and MCMC.
n = 1 # sample size
k = 1 # number success events out the sample
likelihood <-dbinom(k, size = n, prob = theta)
plot(theta, likelihood, ylab = "likelihood p(x | theta)", type = "h", col = "skyblue")
# compute the marginal likelihood p(D)
marginal <- sum(likelihood * prior)
# compute the posterior distribution for theta using the Bayes rule
posterior <- likelihood * prior / marginal
# compute the posterior mode (value with most occurrences)
mode_posterior <- theta[which.max(posterior)]
plot(theta, posterior, ylab = "posterior p(theta | x)", type = "h", col = "skyblue")
# Example 2 from Kruschke ch.5 par. 5.3.1
# Influence of sample size.
theta <- s <- seq(from = 0, to = 1, by = 0.001)
# define the prior distribution for each value of theta
p1 <- 0.4 * theta[1:500]
p2 <- 0.4 - 0.4 * theta[501:1001]
prior <- c(p1,p2)
plot(theta, prior, ylab = "prior_1000", type = "h", col = "skyblue")
# compute likelihood at each value of the parameter theta
n = 40 # sample size
k = 10 # number success events out the sample
likelihood <-dbinom(k, size = n, prob = theta)
# compute the likelihood mode (value with most occurrences)
mode_likelihood <- theta[which.max(likelihood)]
plot(theta, likelihood, ylab = "likelihood_1000 p(x | theta)", type = "h", col = "skyblue")
text( .5 , 0.1 , paste("mode =", mode_likelihood))
# compute the marginal likelihood p(D)
marginal <- sum(likelihood * prior)
# compute the posterior distribution for theta
posterior <- likelihood * prior / marginal
# compute the posterior mode (value with most occurrences)
mode_posterior <- theta[which.max(posterior)]
plot(theta, posterior, ylab = "posterior_1000 p(theta | x)", type = "h", col = "skyblue")
text( .7 , 0.0020 , paste("mode =", mode_posterior))
|
f2458add20e4079f8e326ded6742747f6abad645
|
255aa1f4187b1851be410fa3498e78dc95b8eaca
|
/plot3.R
|
f9ec6cb613ffb4f29eaf08a62605b53c45188958
|
[] |
no_license
|
tregubov-kv/ExData_Plotting1
|
7f17f462764b2dc251f448da2c004b692c36f700
|
a8c83e2c78617eabd48b0ef6df993edae5dbc9aa
|
refs/heads/master
| 2021-01-18T00:20:36.081108
| 2015-04-10T14:47:02
| 2015-04-10T14:47:02
| 33,672,975
| 0
| 0
| null | 2015-04-09T14:16:00
| 2015-04-09T14:16:00
| null |
UTF-8
|
R
| false
| false
| 1,002
|
r
|
plot3.R
|
data <- read.table(".//exdata_data_household_power_consumption//household_power_consumption.txt", header = T, sep = ";", na.strings = "?")
data1 <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
rm(data)
data1$Time <- strptime(paste(data1$Date, data1$Time), "%d/%m/%Y %H:%M:%S")
data1$Date <- as.Date(data1$Date, "%d/%m/%Y")
par(mar = c(4,4,2,2), font.main = 2,cex = 0.6,
ps = 18)
Sys.setlocale("LC_ALL","C")
with(data1,
plot(Time,Sub_metering_1,
type = "l",
xlab = "",
ylab = "Energy sub metring"
))
with(data1,
lines(Time,Sub_metering_2,
col = "red"
))
with(data1,
lines(Time,Sub_metering_3,
col = "blue",
))
legend("topright",
lty = 1,
x.intersp = 0.7,
y.intersp = 0.9,
col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
)
dev.copy(png, file = "plot3.png", units = "px", width = 480, height = 480)
dev.off()
|
5bd5fb1e95a80538a4f4ac929d171462d1fe8953
|
84f5ac25a17b16191b40d979b4d4fc0bc21b0b9e
|
/man/cliches.Rd
|
da365113f983ac82a78c0ece3216bc5ce00b4a9c
|
[] |
no_license
|
cran/lexicon
|
1d23c81e51c828020110b9f7d2d01980bdf78bf3
|
03761ddba87f3ac1dd6af743508a7e8303be061b
|
refs/heads/master
| 2021-01-12T01:03:22.432056
| 2019-03-21T09:40:03
| 2019-03-21T09:40:03
| 78,337,774
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,541
|
rd
|
cliches.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cliches.R
\docType{data}
\name{cliches}
\alias{cliches}
\title{Common Cliches}
\format{A character vector with 697 elements}
\usage{
data(cliches)
}
\description{
A dataset containing a character vector of cliches.
}
\section{License}{
(The MIT License)
Copyright (c) 2016 Duncan Beaton <mailto:dunckr@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
}
\references{
\url{https://github.com/dunckr/retext-cliches}
}
\keyword{datasets}
|
ca61efb7190c249485499b663da380200d33e1a6
|
d89873be285ea095c421aca70c4de4ce2480f41d
|
/ML/winecsv.R
|
678b73a406bf691527001abba352667b2f46417d
|
[] |
no_license
|
rusimody/PCISummerSchool2017
|
198026b16c3cd34255b8e5abb35beb24d0225b7e
|
3d59539aacda3532faa1ba293976b7230f73135d
|
refs/heads/master
| 2021-01-23T01:46:08.475385
| 2017-06-10T18:14:18
| 2017-06-10T18:14:18
| 92,891,351
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,549
|
r
|
winecsv.R
|
library(e1071)
library(randomForest)
setwd("C:/Users/neel jambhekar/Desktop/persistent")
E=read.csv("wine.csv")
data=E
cl=as.factor(data[,1])
data=data[,-1]
s=sample(1:nrow(E),nrow(E)/5) #randomly sample 20 % for test
train=E[-s,]
cl_train=cl[-s]
val=E[s,]
cl_val=cl[s]
svm_cv=svm(train,cl_train,cross=5,
kernel = "polynomial",
cost = 100,gamma = 0.5,degree = 2,coef0 = 1)
print(svm_cv$accuracies)
print(svm_cv$tot.accuracy)
weight_svm=c(0.3,0.3,0.4)
names(weight_svm)=levels(cl_train)
svm_weighted=svm(train,cl_train,cross = 5,class.weights = weight_svm)
#build final model with entire training set (with tuned parameters) and use this to predict on validation set
model_svm=svm(train,cl_train,kernel = "polynomial",cost = 2.30,gamma = 0.1,degree = 1,coef0 = 1)
pred_svm=predict(model_svm,val)
#find accuracy
t_svm=table(pred_svm,cl_val)
acc_svm=sum(diag(t_svm))/sum(t_svm)
print(acc_svm)
#create submission
pred_mat=matrix(nrow = nrow(val),ncol = 2)
pred_mat[,1]=pred_svm
pred_mat[,2]=cl_val
colnames(pred_mat)=c("predicted","actual")
write.csv(pred_mat,"submit.csv",row.names = FALSE)
#rfcv
rf_cv=rfcv(train,cl_train,cv.fold = 5)
print(rf_cv$error.cv)
pred_rf=predict(model_rf,val)
t_rf=table(pred_rf,cl_val)
acc_rf=sum(diag(t_rf))/sum(t_rf)
#rf
#train_control <- trainControl(method="cv", number=10)
#model <- train(train,cl_train,trControl= train_control,method = "rf",mrty=4,ntree= 100)
model=train(train,cl_train,method= "cv",number = 10)
print(model)
|
35c80f3aa14dd6bf9a6b52be873a3d0a9eaa43fc
|
e429174458860830dd3f9428832d5494648d09f7
|
/freqTableBuilder.R
|
7aae61c0572ad0cacd75a94757458210cbd1f94d
|
[] |
no_license
|
syarb002/textPrediction
|
00333a50d0d21a1589f698af255a5f4aaeae3ad3
|
03451b483eda03468b12696fe4c7afd8c59d8218
|
refs/heads/master
| 2021-01-13T03:14:07.657551
| 2016-12-26T23:44:59
| 2016-12-26T23:44:59
| 77,412,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 829
|
r
|
freqTableBuilder.R
|
# Frequency Table Builder for Capstone Project
#
# Grabs 1,000 documents at a time, tokenizes them, then summarizes by
# frequency. Limits analysis so that the system memory isn't exceeded.
i <- 1 # Set this to wherever you left off if processing interrupted
while (i < 333667) {
if (i == 1) {
dfm4G <- dfm(sampleCorp[1:1000], ngrams = 4, concatenator = " ")
} else {
dfm4G <- dfm(sampleCorp[i:(i+999)], ngrams = 4, concatenator = " ")
}
temp <- colSums(as.matrix(quanteda::as.DocumentTermMatrix(dfm4G)))
temp <- data.table(names(temp), temp)
if (i == 1) {
freq4 <- temp
} else {
freq4 <- data.table(setnames(summarize(group_by(full_join(freq4, temp, by = "V1"), V1), sum(temp.x, temp.y)), c("V1", "temp")))
freq4[is.na(temp) == TRUE]$temp <- 1
}
i <- i + 1000
print(i)
}
save.image()
|
7bc97597ca1809ac55339a743eb1d5a62ad483cb
|
6db590e37ee3c17eb381d8e5c719b82ddcf010d8
|
/Team5_Harkathon_AT.R
|
a81e1ae0107f8e2556bf17cc811e8b6297dbe6c3
|
[] |
no_license
|
netalexhk/SOA_PPAS_HarkAthon_2020
|
b290f35217e72db631bdf078a999b0652d0f33f4
|
d4bc1b46539c8e5541d2d796133dd1d29df42bf6
|
refs/heads/master
| 2022-12-18T12:17:38.898742
| 2020-09-25T21:08:08
| 2020-09-25T21:08:08
| 298,679,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,694
|
r
|
Team5_Harkathon_AT.R
|
---
## Load packages
library(dplyr)
library(lubridate)
library(car)
library(ggplot2)
library(tidyr)
library(tidyverse)
library(xgboost)
library(readr)
library(MLmetrics)
training <- read_csv("R/wd-PAF/training_prep_2.csv")
testing <- read_csv("R/wd-PAF/testing_2.csv")
mod.mat <- model.matrix( ~ .,
data = training %>% select (-result))
response <- training$result
d.mat <- xgb.DMatrix(data = mod.mat, label = response)
mod.mat.testing <- model.matrix( ~ .,
data = testing)
d.mat.testing <- xgb.DMatrix(data = mod.mat.testing)
dim(d.mat)
dim(d.mat.testing)
watchlist <- list(train=d.mat, test=d.mat.testing)
param <- list(max_depth = 2, eta = 1, nthread = 2, objective = 'binary:logistic')
nrounds <- 100
bstDMatrix <- xgboost(data = d.mat, max.depth = 2,
eta = 1, nthread = 2, nrounds = 2,
objective = "binary:logistic")
xgb.importance(model = bstDMatrix)
cat('running cross validation\n')
xgb.cv(param, d.mat, nrounds, nfold = 20, metrics = {'error'})
xgb.cv(param, d.mat, nrounds, nfold = 20, metrics = 'error', showsd = FALSE)
pred <- predict(bstDMatrix, mod.mat)
pred_1 <- ifelse(pred > 0.5, 1, 0)
ypred1 <- predict(bstDMatrix, d.mat.testing, ntreelimit = 1)
ypred1_re <- ifelse(ypred1 > 0.5, 1, 0)
ypred2 <- predict(bstDMatrix, d.mat.testing)
ypred1_re <- ifelse(ypred2 > 0.5, 1, 0)
cat('error of ypred1=', mean(as.numeric(pred > 0.5)), '\n')
cat('error of ypred1=', mean(as.numeric(ypred1 > 0.5)), '\n')
cat('error of ypred1=', mean(as.numeric(ypred2 > 0.5)), '\n')
F1_Score(y_true = response, pred_1, positive = "1")
|
1b73ac660b721443728b477bd4026397764a910f
|
63db753857de5950ce413f2101d2b24854b2a7e3
|
/R/portion_2.R
|
78e81485dd0a04fd58c1d18ed94f107f3c826232
|
[] |
no_license
|
AACSB/assessmentAACSB
|
8415f16f42426f52ea7f24b635329e2287140d48
|
9d407ebfcff3ee47abb4972fc56a6b9ce0b17af1
|
refs/heads/master
| 2023-02-18T10:34:53.889473
| 2020-12-22T20:35:35
| 2020-12-22T20:35:35
| 323,725,499
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
r
|
portion_2.R
|
#' Return the total enrollment for each Group/Racial/Gender category for a given
#' state code.
#'
#' \code{get_category_by_state} returns total enrollment by diversity category
#' for a given state code.
#'
#' This function parses the \code{yaml} configuration file that lists all
#' of the sources for each customer. .
#'
#' @param data a tibble or data frame.
#' @param state_code the 2-character state code
#' @return Returns a data frame.
#'
#' @examples
#' get_category_by_state(my_df, "FL")
#' @importFrom magrittr %>%
|
ff7edf52e2bf0d91ece2b1247c54b6edff94518a
|
5640b05cb5e4182a3b5388531877a506c34e543b
|
/data-analysis/calculate_incidence.R
|
a5e8c893e67834035ce998f358e4613eb3224fe2
|
[] |
no_license
|
zeldow/edp-extras
|
4f72d7d3fb6aa86d1ca882a3472787734ed2d8ff
|
7d2cd96a356bdde4fe5fc0ee172118dbfcbdc1af
|
refs/heads/master
| 2022-11-26T23:30:20.837344
| 2020-07-20T21:27:55
| 2020-07-20T21:27:55
| 281,157,776
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,063
|
r
|
calculate_incidence.R
|
setwd("X:/dissertation/paper2/")
library(haven)
library(dplyr)
library(stringr)
outcome <- read_sas("outcome.sas7bdat")
## O1 - outcomes and time on study
out1 <- aggregate(outcome$Diag1, by = list(outcome$site1), FUN = sum)
time1 <- aggregate(list(time = outcome$surv1), by = list(outcome$site1), FUN = sum)
o1 <- inner_join(time1,out1,by = "Group.1")
o1$time <- o1$time/365
o1$incidence <- o1$x/o1$time
## O2 - outcomes and time on study
out2 <- aggregate(outcome$Diag5, by = list(outcome$site1), FUN = sum)
time2 <- aggregate(list(time = outcome$surv5), by = list(outcome$site1), FUN = sum)
o2 <- inner_join(time2,out2,by = "Group.1")
o2$time <- o2$time/365
o2$incidence <- o2$x/o2$time
outcome$lnsurv1 <- log(outcome$surv1/365)
outcome$lnsurv5 <- log(outcome$surv5/365)
## some subjects have survtime = 0
sum(outcome$Diag1[which(outcome$surv1==0)])
sum(outcome$Diag5[which(outcome$surv5==0)])
glm.res <- function(out1, surv,SITE,lab) {
dum.glm <- glm(eval(as.name(out1)) ~ 1, data = outcome,
subset = eval(as.name(surv)) > 0 & site1 == SITE & anypostlab %in% lab,
offset = eval(as.name(stringr::str_c("ln", surv))),
family = poisson(link = log))
res <- c(exp(dum.glm$coefficients), exp(BSagri::CIGLM(dum.glm)))
names(res) <- c("Incidence", "Lower 95%", "Upper 95%")
return(res)
}
res1a <- glm.res("Diag1", "surv1", "SITE1", c(0, 1))
res2a <- glm.res("Diag1", "surv1", "SITE1", c(1))
res3a <- glm.res("Diag1", "surv1", "SITE1", c(0))
res4a <- glm.res("Diag1", "surv1", "SITE2", c(0, 1))
res5a <- glm.res("Diag1", "surv1", "SITE2", c(1))
res6a <- glm.res("Diag1", "surv1", "SITE2", c(0))
res7a <- glm.res("Diag1", "surv1", "SITE3", c(0, 1))
res8a <- glm.res("Diag1", "surv1", "SITE3", c(1))
res9a <- glm.res("Diag1", "surv1", "SITE3", c(0))
## O2
res1b <- glm.res("Diag5", "surv5", "SITE1", c(0, 1))
res2b <- glm.res("Diag5", "surv5", "SITE1", c(1))
res3b <- glm.res("Diag5", "surv5", "SITE1", c(0))
res4b <- glm.res("Diag5", "surv5", "SITE2", c(0, 1))
res5b <- glm.res("Diag5", "surv5", "SITE2", c(1))
res6b <- glm.res("Diag5", "surv5", "SITE2", c(0))
res7b <- glm.res("Diag5", "surv5", "SITE3", c(0, 1))
res8b <- glm.res("Diag5", "surv5", "SITE3", c(1))
res9b <- glm.res("Diag5", "surv5", "SITE3", c(0))
all.res <- matrix(NA_character_, nrow = 9, ncol = 4)
for(i in 1:9){
all.res[i,1] <- sprintf("%.3f", eval(as.name(str_c("res",i,"a"))))[1]
all.res[i,2] <- str_c("(",
sprintf("%.3f", eval(as.name(str_c("res",i,"a"))))[2],
"-",
sprintf("%.3f", eval(as.name(str_c("res",i,"a"))))[3],
")")
all.res[i,3] <- sprintf("%.3f", eval(as.name(str_c("res",i,"b"))))[1]
all.res[i,4] <- str_c("(",
sprintf("%.3f", eval(as.name(str_c("res",i,"b"))))[2],
"-",
sprintf("%.3f", eval(as.name(str_c("res",i,"b"))))[3],
")")
}
print(o1)
print(o2)
print(all.res) ## coresponds to Table 2 of paper
###############################################################
###############################################################
######### ADD IN PREDICTIONS ##################################
###############################################################
###############################################################
## thresholds for diabetes
a1c.thres <- 6.5
glu.thres <- 126
rand.thres <- 200
## fasting glucose predictions
setwd("X:/dissertation/paper2/glucose/final/output")
load("final_pred_10000.Rdata")
glu.w.data <- pred.w.data
glu.wo.data <- pred.wo.data
rm(pred.w.data, pred.wo.data)
## a1c predictions
setwd("X:/dissertation/paper2/a1c/final/output")
load("all_pred_10000.Rdata")
a1c.w.data <- pred.w.data
a1c.wo.data <- pred.wo.data
rm(pred.w.data, pred.wo.data)
### get ids for the predictions
setwd("X:/dissertation/paper2")
fast <- read_sas("./glucose/data/glu_more.sas7bdat")
#rand <- read_sas("./random/data/glu_ran_more.sas7bdat")
a1c <- read_sas("./a1c/data/a1c_more.sas7bdat")
a1c.nodata.ids <- unique(a1c$studyid[which(is.na(a1c$rellab))])
length(a1c.nodata.ids)
a1c.data.ids <- unique(a1c$studyid[which(!is.na(a1c$rellab))])
length(a1c.data.ids)
glu.nodata.ids <- unique(fast$studyid[which(is.na(fast$rellab))])
length(glu.nodata.ids)
glu.data.ids <- unique(fast$studyid[which(!is.na(fast$rellab))])
length(glu.data.ids)
# rand.nodata.ids <- unique(rand$studyid[which(is.na(rand$rellab))])
# length(rand.nodata.ids)
#
# rand.data.ids <- unique(rand$studyid[which(!is.na(rand$rellab))])
# length(rand.data.ids)
glm.res2 <- function(out1, surv,SITE,lab) {
dum.glm <- glm(eval(as.name(out1)) ~ 1, data = outcome.sup,
subset = eval(as.name(surv)) > 0 & site1 == SITE & anypostlab %in% lab,
offset = eval(as.name(stringr::str_c("ln", surv))),
family = poisson(link = log))
res <- c(exp(dum.glm$coefficients), exp(BSagri::CIGLM(dum.glm)))
names(res) <- c("Incidence", "Lower 95%", "Upper 95%")
return(res)
}
total.preds <- dim(a1c.w.data)[1]
total.preds <- 800
for(i in 1:total.preds) {
#for(i in 100:100) {
a1c.p1 <- a1c.w.data[i,]
a1c.p2 <- a1c.wo.data[i,]
a1c.pred <- data.frame(studyid = c(a1c.data.ids, a1c.nodata.ids), a1c.pred = c(a1c.p1, a1c.p2), stringsAsFactors = FALSE)
#a1c.pred$studyid <- as.character(a1c.pred$studyid)
glu.p1 <- glu.w.data[i,]
glu.p2 <- glu.wo.data[i,]
glu.pred <- data.frame(studyid = c(glu.data.ids, glu.nodata.ids), glu.pred = c(glu.p1, glu.p2), stringsAsFactors = FALSE)
outcome.sup <- dplyr::inner_join(outcome, a1c.pred, by = "studyid")
outcome.sup <- dplyr::inner_join(outcome.sup, glu.pred, by = "studyid")
outcome.sup$a1c.high <- as.numeric(outcome.sup$a1c.pred >= a1c.thres)
outcome.sup$glu.high <- as.numeric(outcome.sup$glu.pred >= glu.thres)
outcome.sup$new.o <- as.numeric(outcome.sup$a1c.high == 1 | outcome.sup$glu.high == 1 | outcome.sup$Diag1 == 1)
res1 <- glm.res2("new.o", "surv5", "SITE1", c(0, 1))
res2 <- glm.res2("new.o", "surv5", "SITE1", c(1))
res3 <- glm.res2("new.o", "surv5", "SITE1", c(0))
res4 <- glm.res2("new.o", "surv5", "SITE2", c(0, 1))
res5 <- glm.res2("new.o", "surv5", "SITE2", c(1))
res6 <- glm.res2("new.o", "surv5", "SITE2", c(0))
res7 <- glm.res2("new.o", "surv5", "SITE3", c(0, 1))
res8 <- glm.res2("new.o", "surv5", "SITE3", c(1))
res9 <- glm.res2("new.o", "surv5", "SITE3", c(0))
}
case.glu1 <- (glu.w.data >= 126)
summary(colMeans(case.glu1))
case.glu2 <- (glu.wo.data >= 126)
summary(colMeans(case.glu2))
case.a1c1 <- (a1c.w.data >= 6.5)
summary(colMeans(case.a1c1))
case.a1c2 <- (a1c.wo.data >= 6.5)
summary(colMeans(case.a1c2))
|
c9982ae4f5763697d5dac523d16a36ec26e8f6fe
|
14839c7f9ee1fbf176a1c675c42192a0c3534a61
|
/rprog-data-ProgAssignment3-data/rankall.R
|
fc09dfc0bcdf4d2d251aa03b35a12597a7f4d186
|
[] |
no_license
|
carlosrojasmatas/rtraining
|
70b21e8edd7c51bb2aaec41dd34fef76d05b0b4c
|
2eb1f62d93f1dc0918147854b0c05e3bfef744f6
|
refs/heads/master
| 2021-01-10T13:27:29.804331
| 2016-01-02T15:28:18
| 2016-01-02T15:28:18
| 48,320,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,330
|
r
|
rankall.R
|
rankall <- function(out, num = "best") {
outs <- c("heart attack" = 11,"heart failure" = 17, "pneumonia" = 23)
asNumeric <- function(x) {
suppressWarnings(as.numeric(x))
}
## Read outcome data
outcome <- read.csv("outcome-of-care-measures.csv",colClasses="character")
## Check that state and outcome are valid
colIdx <- match(out,names(outs))
if(is.na(colIdx)) {
stop(paste("invalid outcome: ", out))
}
states <- unique(outcome$State)
rs <- data.frame(hospital=as.character(),state=as.character())
for(st in states){
subState <- subset(outcome,State == st)
outByHosp <- subState[,c(2,outs[[colIdx]])]
outByHosp[,2] <- sapply(outByHosp[,2],asNumeric)
# comp <- outByHosp[complete.cases(outByHosp),]
comp <- outByHosp
if (num == "best"){
rs <- rbind(rs,data.frame(hospital = comp[which.min(comp[,2]),1],state=st))
}else if (num == "worst"){
rs <- rbind(rs,data.frame(hospital = comp[which.max(comp[,2]),1],state=st))
}else {
ord <- comp[order(comp[,2],comp[,1]),]
rs <- rbind(rs, data.frame(hospital= ord[num,1], state=st))
}
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
rs[order(as.character(rs$state)),]
}
|
72b14fd1a266ae2d4318c58d681e42a07ec0f205
|
ccc964ca50ccb486f3ff87ea26005a19c7ae72e5
|
/R_codes/Corr2/Create_OR_correlation.R
|
ec58ffd115167953d4686dcf792041a57760b638
|
[] |
no_license
|
dtmlinh/Car-Crash-Fatalities-Exploration-Tool
|
426d1b52b5c42b057de251c6276b6fd5d7b63ac5
|
3608aa473f2f091f10709e432be3e7321dffe1f5
|
refs/heads/master
| 2021-05-24T17:26:27.484837
| 2021-03-29T16:27:36
| 2021-03-29T16:27:36
| 15,200,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 331
|
r
|
Create_OR_correlation.R
|
setwd("data")
OR=read.csv("OR.csv")
OR$State="Oregon"
OR=subset(OR,select=c("County","State","Fatalities.Count"))
OR=aggregate(.~County+State,data=OR,FUN="mean")
data=read.csv("Distance.csv")
data$County=toupper(data$County)
OR$County=toupper(OR$County)
dta=merge(OR,data,by=c("County","State"))
write.csv(dta,file="Corr2_OR.csv")
|
c00d99107b95f62abed325dbe2cdbf8f0a98aa31
|
0e7b0b1e3af416651e84b7906d9a5187a4b87a16
|
/inst/UnitTests/runit.tests_1.R
|
1e41140742cce0e06964b4916e83266833371bea
|
[] |
no_license
|
cran/VFP
|
dbce80aecd16e7bca17dd7f98a1835549930ffe7
|
da114228bfeb7cd9cc169265999661332a442dc5
|
refs/heads/master
| 2022-11-19T01:11:33.431124
| 2022-11-08T14:10:06
| 2022-11-08T14:10:06
| 145,906,962
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,574
|
r
|
runit.tests_1.R
|
# TODO: Add unit-test functions which can be automatically run
#
# Author: schueta6
###############################################################################
library(VCA)
###
data(MultiLotReproResults)
fit.all.models <- fit.vfp(MultiLotReproResults, model.no=1:9)
means <- MultiLotReproResults$Mean
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc Tests model 6 against parameter estimates of VFP-software version 14.0
TF001.model6 <- function(x)
{
ref.coef <- c( 0.094117579, -0.010166326, 0.001864494, 2.030292924)
ref.deviance <-0.8143275
# For comparison values from Sadlers Variance Function Program 2016:
#B1 = 0.0941176016, B2 = -0.010166383, B3 = 0.0018645146, J = 2.03029006
#Log[LR] = 0.81433
tst.coef <- as.numeric(fit.all.models$Model$model6$coefficients)
tst.deviance <- as.numeric(fit.all.models$Model$model6$deviance)
checkEquals(tst.coef, ref.coef,tolerance=1E-6)
checkEquals(tst.deviance, ref.deviance,tolerance=1E-6)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc Tests model 7 against parameter estimates of VFP-software version 14.0
TF002.model7 <- function(x)
{
ref.coef <- c(0.0780494174,0.0005268289,2.3337820976)
ref.deviance <- 2.05935
tst.coef <- as.numeric(fit.all.models$Model$model7$coefficients)
tst.deviance <- as.numeric(fit.all.models$Model$model7$deviance)
checkEquals(tst.coef, ref.coef,tolerance=1E-6)
checkEquals(tst.deviance, ref.deviance,tolerance=1E-6)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc Tests model 8 against parameter estimates of VFP-software version 14.0
TF003.model8 <- function(x)
{
ref.coef <- c(0.49406868,0.01869976,3.97360976)
ref.deviance <- 8.892474
# For comparison values from Sadlers Variance Function Program 2016:
#B1 = 0.4939963105, B2 = 0.0187035392, J = 3.97287275
#Log[LR] = 8.89247
tst.coef <- as.numeric(fit.all.models$Model$model8$coefficients)
tst.deviance <- as.numeric(fit.all.models$Model$model8$deviance)
checkEquals(tst.coef, ref.coef,tolerance=1E-6)
checkEquals(tst.deviance, ref.deviance,tolerance=1E-6)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 1 will be recovered
TF004.exact1 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
Daten <-data.frame(Mean,VC=VC0,DF)
res <- fit.vfp(Data=Daten,model.no=1,quiet=T)$Models$model1$coefficients
checkEquals(as.numeric(res),c(1),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 2 will be recovered
TF005.exact2 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * Mean^2
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=2,quiet=T)$Models$model2$coefficients
checkEquals(as.numeric(res),c(1),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 3 will be recovered, exponent 1
TF006.exact3 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 + Mean^2)
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=3,quiet=T)$Models$model3$coefficients
checkEquals(as.numeric(res),c(1,1),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 3 will be recovered, negative exponent
TF007.exact3minus <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 - 0.005*Mean^2)
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=3,quiet=T)$Models$model3$coefficients
checkEquals(as.numeric(res),c(1,-0.005),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 4 will be recovered
TF008.exact4 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 + Mean)^2
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=4,quiet=T)$Models$model4$coefficients
checkEquals(as.numeric(res),c(1,1),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 5 will be recovered
TF009.exact5 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 + Mean^3)
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=5,K=3,quiet=T)$Models$model5$coefficients
checkEquals(as.numeric(res),c(1,1),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 6 will be recovered
TF010.exact6 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- 1000 - 100 * Mean + Mean^3
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=6,quiet=T)$Models$model6$coefficients
checkEquals(as.numeric(res),c(1000,-100,1,3),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 7 will be recovered
TF011.exact7 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 + Mean^3)
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=7,quiet=T)$Models$model7$coefficients
checkEquals(as.numeric(res),c(1,1,3),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 8 will be recovered, positive beta2
TF012.exact8 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 + Mean)^3
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=8,quiet=T)$Models$model8$coefficients
checkEquals(as.numeric(res),c(1,1,3),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 3 will be recovered, negative beta2
TF013.exact8minus <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * (1 -0.05*Mean)^0.5
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=8,quiet=T)$Models$model8$coefficients
checkEquals(as.numeric(res),c(1,-0.05,0.5),tolerance=.Machine$double.eps^0.5)
}
#* **target fit.vfp
#* **riskid RA01
#* **funid Fun101
#* **desc check whether exact parameters of model 9 will be recovered
TF014.exact9 <- function(x){
#exact parameters should be recovered from variances which are equal to the expected values from the assumed VF.
Mean <- seq(1,10,1)
VC0 <- rep(1,10)
DF <- rep(1,10)*10 # corresponds to 10 replicas per pointVC <- as.numeric(lapply(DF, function(x) {return(rchisq(df=x,1)/x)}))
VC <- VC0 * Mean^3
Daten <-data.frame(Mean,VC=VC,DF)
res <- fit.vfp(Data=Daten,model.no=9,quiet=T)$Models$model9$coefficients
checkEquals(as.numeric(res),c(1,3),tolerance=.Machine$double.eps^0.5)
}
#* **target predictMean
#* **riskid RA04
#* **funid Fun106
#* **desc Test whether the predictMean-function correctly handles situations where no concentrations can be found.
TF015.predictMean <- function(x)
{
cat("\n\n#####################################################################################\n\n")
cat("\nWithin TF078.predictMean\n\n")
print(ls())
if(!"fit.all.models" %in% ls()) {
library(VCA)
data(MultiLotReproResults)
fit.all.models <- fit.vfp(MultiLotReproResults, model.no=1:9)
}
res <- predictMean(fit.all.models, model.no=6, type="cv", newdata=4.2)
checkEquals(res$Mean, 15.08867, tolerance=1e-6)
checkEquals(res$LCL, 6.253166, tolerance=1e-6)
checkEquals(res$UCL, 9272564, tolerance=1e-6) # now upper bound found, max X-value returned with message
}
#* **target getMat.VCA
#* **riskid RA06
#* **funid Fun201
#* **desc Test whether sequences of variance components are correctly processed in function 'getMat.VCA'
TF016.getMat.VCA_sequences <- function()
{
data(VCAdata1, package="VCA")
lst <- anovaVCA(y~(lot+device)/day/run, VCAdata1, by="sample")
mat <- getMat.VCA(lst, 4:6)
mat <- mat[order(as.numeric(sub("sample.", "", rownames(mat)))),]
VC <- sapply(lst, function(x) sum(x$aov.tab[4:6, "VC"]))
DF <- sapply(lst, function(x){
Ci <- getMat(x, "Ci.MS")
Ci <- Ci[3:5, 3:5]
MS <- x$aov.tab[4:6, "MS"]
DF <- x$aov.tab[4:6, "DF"]
DF <- VCA:::SattDF(MS, Ci, DF, "total")
DF
})
Mean <- sapply(lst, function(x) x$Mean)
checkEquals(mat[,"VC"], as.numeric(VC))
checkEquals(mat[,"Mean"], as.numeric(Mean))
checkEquals(mat[,"DF"], as.numeric(DF))
}
#* **target getMat.VCA
#* **riskid RA06
#* **funid Fun201
#* **desc Are equences of variance components correctly processed when fitting VFP-models directly on a list of VCA-objects
TF017.fit.vfp_VC_sequences <- function()
{
data(VCAdata1, package="VCA")
lst <- anovaVCA(y~(lot+device)/day/run, VCAdata1, by="sample")
mat0 <- getMat.VCA(lst, 4:6)
vfp <- fit.vfp(lst, 1, vc=4:6)
mat1 <- vfp$Data
checkEquals(mat0[,"VC"], mat1[,"VC"])
checkEquals(mat0[,"Mean"], mat1[,"Mean"])
checkEquals(mat0[,"DF"], mat1[,"DF"])
}
#* **target predictMean
#* **riskid RA04
#* **funid Fun106
#* **desc Test predictMean-function comprehensively, 7 models, 3 predictions each back and forth.
TF018.predictMean <- function(x)
{
rng <- range(MultiLotReproResults$Mean)
x0 <- round(seq(rng[1]*0.025, rng[2]*.075, length.out=10), 3)
for(i in c(1,3,4,6,7,8,9)) {
if(i == 5)
next
pred <- predict(fit.all.models, model.no=i, type="cv", newdata=x0)$Fitted
x1 <- predictMean(fit.all.models, model.no=i, newdata=pred, type="cv", tol=1e-6)
#print(cbind(X0=x0, pred=round(unlist(x1$Mean), 3)))
}
}
#* **target fit.vfp
#* **riskid RA03
#* **funid Fun105
#* **desc extensive testing of S3 method coef for objects of class "VFP"
TF019.coef <- function(x)
{
models <- names(fit.all.models$Models)
for(i in 1:length(models)) {
model <- as.numeric(sub("model", "", models[i]))
coef0 <- as.numeric(fit.all.models$Models[[models[i]]]$coefficients)
coef1 <- as.numeric(coef(fit.all.models, model.no=model))
checkEquals(coef1, coef0, tol=1e-12)
}
}
|
a500416eec39099ec72204a6119c3849ac98faa3
|
c6e26f1bc6a874774973b3549bcf68d6d8e1991a
|
/tests/testthat/test-domains.R
|
1deac4127d89e35ac36c2da0081fb2ce8703be94
|
[] |
no_license
|
Smaraki11/analogsea
|
0550824155c59e5b27d51112b5e15f09b0057aca
|
4791aae460c093d5029c283776c9438882dd8c6e
|
refs/heads/master
| 2021-01-18T03:10:50.589038
| 2015-08-12T05:44:29
| 2015-08-12T05:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 143
|
r
|
test-domains.R
|
# tests for domains
context("domains")
test_that("incorrect input to what param returns NULL", {
expect_error(domains_records("asfasd"))
})
|
562474ecf5ff0f7f279a302b1995e7ad33de7a03
|
4ca76a3cef4af592ba8ab121aae35f5eccb59670
|
/man/rtauargus_options.Rd
|
b6065ab64aa09d6d801159647c969a761e020242
|
[
"MIT"
] |
permissive
|
InseeFrLab/rtauargus
|
5e9405d3453a534adc235ec71f5d559de00f8f62
|
f3810aff361d2eb7aa31d47e38fe1943f42733ad
|
refs/heads/master
| 2023-08-18T22:24:22.967560
| 2023-07-20T16:33:36
| 2023-07-20T16:33:36
| 442,119,707
| 4
| 4
|
MIT
| 2023-09-05T11:56:35
| 2021-12-27T09:57:18
|
R
|
UTF-8
|
R
| false
| true
| 6,001
|
rd
|
rtauargus_options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/options.R
\name{rtauargus_options}
\alias{rtauargus_options}
\alias{reset_rtauargus_options}
\title{Manages options of rtauargus package}
\usage{
rtauargus_options()
# options(rtauargus.<opt> = <val>)
reset_rtauargus_options(...)
}
\arguments{
\item{...}{names of the options to reset, separated by commas. If no name is
specified, all the options will be reset. The prefix \code{"rtauargus."}
is not required. \cr
noms des options à réinitialiser, séparés par des virgules. Si
aucun nom n'est spécifié, toutes les options du package seront
réinitialisées. Le préfixe \code{"rtauargus."} est facultatif.}
}
\description{
Manages (displays, modifies, resets) the options of rtauargus package. \cr
Gère les options du package (affiche, modifie, réinitialise).
}
\details{
The options of the package define the default behaviour of the functions.
These options are used if a mandatory argument of a function is not set
by the user. They let not to systematically repeat the same parameter
for each call of a function. The name of the option is the same as the
name of the function prefixed by \code{rtauargus.} :
\emph{For example, \code{rtauargus.decimals} will be used if the argument
\code{decimals} in the \code{micro_asc_rda} function is not set by the
user.}
On loading the package, all the rtauargus options, that are not already
been set by the user, are set with their default values (see table below).
The already defined options keep the values set by the user.
The options can be set during a session with the following instruction
\code{options(rtauargus.}...\code{ = }...\code{)}, or with a configuration
file where the user have written its own options with such instructions,
but this latter is not a proper way if reproducibility is sought.
Les options du package définissent les comportements par défaut des
fonctions.
If the user inadvertently removes some options, the functions will use
the default values of the package. \cr
(Ces options sont utilisées si un argument obligatoire d’une fonction n’est
pas renseigné. Elles permettent de ne pas répéter systématiquement le même
paramètre à chaque appel d'une fonction. Le nom de l’option est le nom de
l’argument d’une fonction précédé de \code{rtauargus.} :
\emph{Par exemple, \code{rtauargus.decimals} sera la valeur utilisée si l’argument
\code{decimals} de la fonction \code{micro_asc_rda} n’est pas renseigné par
l’utilisateur.}
Au chargement, le package attribue une valeur par défaut à toutes les options
de rtauargus qui ne sont pas encore déclarées (cf. tableau ci-dessous). Les
options déjà définies par l'utilisateur gardent leurs valeurs.
Elles peuvent être redéfinies pour une session par une instruction
\code{options(rtauargus.}...\code{ = }...\code{)}, ou de manière globale si
de telles instructions sont placées dans un fichier de configuration propre à
l'utilisateur (fortement déconseillé si le programme a vocation à être
reproductible).
En cas d'effacement accidentel d'une option par l'utilisateur, les fonctions
utiliseront les valeurs par défaut du package.)
}
\section{List of options}{
\tabular{lll}{
\strong{Option} \tab \strong{Default Value} \tab \strong{Function} \cr
\code{------------------------} \tab \code{---------------------------------} \tab \code{-------------}\cr
rtauargus.decimals \tab \Sexpr{rtauargus:::op.rtauargus$rtauargus.decimals} \tab \link{micro_asc_rda}\cr
rtauargus.totcode \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.totcode}" \tab \cr
rtauargus.missing \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.missing}" \tab \cr
rtauargus.hierleadstring \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.hierleadstring}" \tab \cr
\code{------------------------} \tab \code{---------------------------------} \tab \code{-------------}\cr
rtauargus.response_var \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.response_var}" \tab \link{micro_arb} \cr
rtauargus.weighted \tab \Sexpr{rtauargus:::op.rtauargus$rtauargus.weighted} \tab \cr
rtauargus.linked \tab \Sexpr{rtauargus:::op.rtauargus$rtauargus.linked} \tab \cr
rtauargus.output_type \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.output_type}" \tab \cr
rtauargus.output_options \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.output_options}" \tab \cr
\code{------------------------} \tab \code{---------------------------------} \tab \code{-------------}\cr
rtauargus.missing_dir \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.missing_dir}" \tab \link{run_arb} \cr
rtauargus.tauargus_exe \tab "\Sexpr{rtauargus:::op.rtauargus$rtauargus.tauargus_exe}" \tab \cr
rtauargus.show_batch_console \tab \Sexpr{rtauargus:::op.rtauargus$rtauargus.show_batch_console} \tab \cr
rtauargus.import \tab \Sexpr{rtauargus:::op.rtauargus$rtauargus.import} \tab
}
}
\examples{
rtauargus_options()
# modifies some options
options(
rtauargus.tauargus_exe = "Z:/tmp/TauArgus.exe",
rtauargus.output_type = "4",
rtauargus.weighted = TRUE
)
str(rtauargus_options())
# resets some options (prefix "rtauargus." facultatif)
reset_rtauargus_options("output_type", "rtauargus.tauargus_exe")
str(rtauargus_options())
# resets everything
reset_rtauargus_options()
str(rtauargus_options())
}
\seealso{
\link{options}, R options system \cr
le système d'options de R dans lequel s'insèrent les options de ce package.
}
|
76c4199a3add7094bade942459a606a11e55bec5
|
8b4254eeceaf1640422f113907e8361c6c2e35cb
|
/R/game_flow.R
|
9a7da45c6243af5637d2e6fd7447aa62b3cc24a7
|
[
"MIT"
] |
permissive
|
lbenz730/ncaahoopR
|
f2d823032a397f7003a3ce4d543f914aade09ff7
|
c3b6b3cc363d7632a817943b09e3d486c4f490c7
|
refs/heads/master
| 2023-06-22T17:48:19.757112
| 2023-06-19T15:30:16
| 2023-06-19T15:30:16
| 144,089,351
| 190
| 66
|
MIT
| 2023-04-18T14:02:15
| 2018-08-09T02:07:02
|
R
|
UTF-8
|
R
| false
| false
| 3,117
|
r
|
game_flow.R
|
#' Game Flow Chart
#'
#' Renders Game Flow Chart
#'
#' @param game_id ESPN game_id for which to render chart
#' @param home_col Color of home team for chart
#' @param away_col Color of away team for chart
#' @export
game_flow <- function(game_id, home_col, away_col) {
### Error Testing
if(is.na(game_id)) {
stop("game_id is missing with no default")
}
if(is.na(home_col)) {
stop("home_col is missing with no default")
}
if(is.na(away_col)) {
stop("away_col is missing with no default")
}
### Get Data
data <-
get_pbp_game(game_id, extra_parse = F) %>%
dplyr::filter(!wrong_time)
if(is.null(data)) {
warning("PBP Data Not Available for Game Flow Chart")
return(NULL)
}
home_team <- data$home[1]
away_team <- data$away[1]
plot_lines <- 1200
msec <- max(data$secs_remaining_absolute)
sec <- msec - 2400
ot_counter <- 0
while(sec > 0) {
sec <- sec - 300
plot_lines <- c(plot_lines, 2400 + ot_counter * 300)
ot_counter <- ot_counter + 1
}
date <- format(as.Date(data$date[1]), "%B %d, %Y")
### Get into Appropriate Format
x <- rbind(
dplyr::select(data, secs_remaining_absolute, home_score) %>%
dplyr::mutate("score" = home_score, team = "home") %>%
dplyr::select(-home_score),
dplyr::select(data, secs_remaining_absolute, away_score) %>%
dplyr::mutate("score" = away_score,
"team" = "away") %>%
dplyr::select(-away_score)
) %>%
dplyr::mutate("secs_elapsed" = max(secs_remaining_absolute) - secs_remaining_absolute)
### Message
avg_sd <- round(sum(data$play_length * data$score_diff/max(data$secs_remaining_absolute)), 2)
home_win <- data$home_score[nrow(data)] > data$away_score[nrow(data)]
avg_sd <- ifelse(home_win, avg_sd, -avg_sd)
avg_sd <- paste0("Average Score Differential for ",
ifelse(home_win, home_team, away_team), ": ", avg_sd)
max_score <- max(c(data$home_score, data$away_score))
### Make Plot
ggplot2::ggplot(x, aes(x = secs_elapsed/60, y = score, group = team, col = team)) +
ggplot2::geom_step(size = 1) +
ggplot2::theme_bw() +
ggplot2::geom_vline(xintercept = plot_lines/60, lty = 2, alpha = 0.5, size = 0.8) +
ggplot2::labs(x = "Minutes Elapsed",
y = "Score",
col = "",
title = paste("Game Flow Chart for", home_team, "vs.", away_team),
subtitle = date,
caption = "Luke Benz (@recspecs730) Data Accessed via ncaahoopR") +
ggplot2::theme(plot.title = element_text(size = 16, hjust = 0.5),
plot.subtitle = element_text(size = 12, hjust = 0.5),
axis.title = element_text(size = 14),
plot.caption = element_text(size = 8, hjust = 0),
legend.position = "bottom",) +
ggplot2::scale_x_continuous(breaks = seq(0, msec/60, 5)) +
ggplot2::scale_color_manual(values = c(away_col, home_col),
labels = c(away_team, home_team)) +
ggplot2::annotate("text", x = 10, y = max_score - 10, label = avg_sd)
}
|
15e1ceaa552cced9529f5d45b8d4372d381a5bd2
|
d14bcd4679f0ffa43df5267a82544f098095f1d1
|
/inst/apps/shockabsorber_quantiles/server.R
|
365ce6695d94317258d532d5ce63a56e6b99ad4f
|
[] |
no_license
|
anhnguyendepocen/SMRD
|
9e52aa72a5abe5274f9a8546475639d11f058c0d
|
c54fa017afca7f20255291c6363194673bc2435a
|
refs/heads/master
| 2022-12-15T12:29:11.165234
| 2020-09-10T13:23:59
| 2020-09-10T13:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 163
|
r
|
server.R
|
server = function(input, output, session) {
output$squant <- renderPlot({
input$shockquants
return(isolate(eval(parse(text=input$shockquant))))
})
}
|
25551eb2c6798a9d641f751c83384adf106f19cc
|
77af7f6a404826f6819e67bd516d97a49795f1c7
|
/Pollutantmean.R
|
31ab986d3b9a0cfb783c94bbc8d3d299513a4406
|
[] |
no_license
|
Shakespeare1998/RStudio
|
852ebc096deab311f4ff18ce327b0be494e6f35a
|
fc28e54ed061e7869ea08a0850733a0b896081f6
|
refs/heads/main
| 2023-06-02T05:06:06.539613
| 2021-06-18T15:52:16
| 2021-06-18T15:52:16
| 378,198,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
r
|
Pollutantmean.R
|
pollutantmean<- function(directory="/Users/athindrabandi/Desktop/specdata", pollutant, id=1:332){
filenames <- list.files(path=directory, pattern="*.csv")
val<- vector()
for (i in id){
filename <- sprintf("%03d.csv", i)
path<- paste(directory,filename,sep="/")
dat<- read.csv(path)
d<- dat[,pollutant]
d<- d[!is.na(d)]
val<-c(val,d)
}
mean(val)
}
|
15b96eaeb82e3658872ce8a5c8624e32d0dc054f
|
0cc55fc29238ab75cb4b8d0774f8f5d3150232b4
|
/shiny/16-renderImage.R
|
c4e2475296eed233517f9e65c11a4cc6eaa72cb2
|
[] |
no_license
|
kelfan/rLearning
|
237a6daf18a4275aead953546a222e1b75bbbf7c
|
9b712fd3b0a81d3c323fdfa1981af5641c11b2ae
|
refs/heads/master
| 2021-05-05T10:38:36.900619
| 2018-11-18T11:49:43
| 2018-11-18T11:49:43
| 118,072,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 100
|
r
|
16-renderImage.R
|
library(shiny)
runApp('E:/Dropbox/Dropbox/20181110-Caren-R-network-research/shiny/16-renderImage')
|
311ea2577788d7824b3c6134535836945b62bd82
|
d633249f08fc289db62d52930ff7d7358de8fa70
|
/tests/testthat.R
|
6c317b03100023faad2905072b0453121964ccc4
|
[
"MIT"
] |
permissive
|
terminological/uk-covid-datatools
|
37c59ac22adce135999b496bab82cfa7463772c9
|
c42a5d539f52c732b3c4618a453a7bcaf98cb9a9
|
refs/heads/master
| 2021-12-25T22:32:39.693962
| 2021-06-24T14:02:09
| 2021-06-24T14:02:09
| 248,539,323
| 2
| 2
|
MIT
| 2020-11-30T09:34:58
| 2020-03-19T15:33:48
|
R
|
UTF-8
|
R
| false
| false
| 68
|
r
|
testthat.R
|
library(testthat)
library(ukcovidtools)
test_check("ukcovidtools")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.