blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc2a662865df34d88cdffb24368bbb699ed94d46
|
b492eb6163924881f98adce4b9368b72cff84955
|
/Plot 1.R
|
d48f67c4f9a00d6dcff3dcde7785d426d6935095
|
[] |
no_license
|
PolinaKoroleva/ExData_Plotting1
|
b2abdd9ed1802cb64db53a6174af52d9593ed0ef
|
f2f973b7e33451cefa4a7ab2aac6dd0cda1b2318
|
refs/heads/master
| 2020-12-28T17:35:55.946398
| 2020-02-06T09:25:39
| 2020-02-06T09:25:39
| 238,424,408
| 0
| 0
| null | 2020-02-05T13:22:25
| 2020-02-05T10:32:03
| null |
UTF-8
|
R
| false
| false
| 632
|
r
|
Plot 1.R
|
# Plot 1
setwd("~/Desktop/Coursera R/ExData_Plotting1")
hpc <- read.csv("~/Desktop/Coursera R/household_power_consumption.txt", sep=";", na.strings="?")
View(hpc)
# subset the data from the dates
names(hpc) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
data <- subset(hpc,hpc$Date=="1/2/2007" | hpc$Date =="2/2/2007")
str(data)
### Plot 1
hist(data$Global_active_power, col = "red", xlab = "Glabal Active Power (kilowatts)",
main = "Global Active Power")
dev.copy(png, filename="plot1.png", width=480, height=480)
dev.off()
|
97371d4b811b80048fe98dff875923ae8987bebc
|
2e6555b08b874efe0a455a3bd284c715a7cff976
|
/R/breakdowngraph_func.R
|
7c3df559b5aa99ab9092b1b57297d6dd8189bacc
|
[] |
no_license
|
dutchjes/MSMSsim
|
99d17ead953cb382392cad70eef3a7877e1ffe20
|
89ced2837f6f597c79198d8fe152fa5d23a6e2ad
|
refs/heads/master
| 2022-01-12T11:12:22.893585
| 2019-06-26T13:43:59
| 2019-06-26T13:43:59
| 75,643,765
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,321
|
r
|
breakdowngraph_func.R
|
## Plotting of breakdown curves for each compound
## Need to load a all.frag table generated from MassBank
#' Title
#'
#' @param all.frag
#' @param plot default is TRUE
#' @param start passed to rainbow() function
#' @param end passed to rainbow() function
#'
#' @return Just a graphing function
#' @export
#'
#' @examples
breakdown <- function(all.frag, plot = TRUE, start = 0, end = 1){
for(i in 1:length(unique(all.frag$CompoundID))){
take <- unique(all.frag$CompoundID)[i]
dat <- subset(all.frag, all.frag$CompoundID == take);
dat <- subset(dat, dat$FragmentationMode == "HCD")
mode <- as.data.frame(table(dat$IonMode));
ionmode <- mode[which(mode$Freq == max(mode$Freq)),1];
datnow <- subset(dat, dat$IonMode == ionmode);
res <- as.data.frame(table(dat$Resolution));
resol <- res[which(res$Freq == max(res$Freq)),1];
datnow <- subset(datnow, datnow$Resolution == resol)
#ces <- unique(datnow$CollisionEnergy)
#colo <- rainbow(length(ces))
#datnow <- subset(datnow, datnow$CollisionEnergy == ces[1])
frag <- unique(datnow$FragmentAnnotatedFormula)
frags <- rainbow(length(frag), start = start, end = end)
datnow <- subset(datnow, datnow$FragmentAnnotatedFormula == frag[1])
fragmax <- as.data.frame(c(rep(NA, length(frag))))
if(plot == TRUE){
png(filename = paste("FragmentsatCEsforCompound", take, ".png", sep = ""));
plot.new()
par(fig = c(0,0.85,0,1), new = TRUE)
plot(datnow$CollisionEnergy, log10(datnow$FragmentIntensity), xlim = c(0,100), ylim = c(log10(min(dat$FragmentIntensity)),
log10(max(dat$FragmentIntensity))+1),
pch = 19, col = frags[1], type = "b", main = paste("Compound", take, "IonMode", ionmode, "Resol", resol, sep = " "),
xlab = "Collision Energy (NCE)", ylab = "Log10(Intensity)", xaxt = "n");
axis(side = 1, at = c(0,15,30,45,60,75,90,100))
fragmax[1,1] <- max(datnow$FragmentIntensity)
# legend("topright", legend = frag, pch = 19, col = colo, cex = 0.5);
# png(filename = paste("FragmentsatCEsforCompound", take, ".png", sep = ""));
# plot(datnow$FragmentMZ, datnow$FragmentIntensity, xlim = c(50,max(dat$FragmentMZ)+50), ylim = c(0,max(dat$FragmentIntensity)+1000),
# pch = 19, col = colo[1], main = paste("Compound", take, "IonMode", ionmode, "Resol", resol, sep = " "));
# legend("topright", legend = ces, pch = 19, col = colo);
#
for(j in 2:length(frag)){
datnow <- subset(dat, dat$IonMode == ionmode);
datnow <- subset(datnow, datnow$Resolution == resol)
datnow <- subset(datnow, datnow$FragmentAnnotatedFormula == frag[j])
points(datnow$CollisionEnergy, log10(datnow$FragmentIntensity), pch = 19, col = frags[j], type = "b")
fragmax[j,1] <- max(datnow$FragmentIntensity)
}
#
fragmax[,2] <- c(rep(1, length(fragmax[,1])))
par(fig = c(0.7,1,0,1), new = TRUE)
plot(log10(fragmax[,1]) ~ fragmax[,2], pch = 19, col = frags, axes = FALSE, xlim = c(0.95,1.05), ylim = c(log10(min(dat$FragmentIntensity)),
log10(max(dat$FragmentIntensity))+1),
xlab = "merged spectrum", ylab = "")
box()
dev.off()
}
}
}
|
b8720b2c1c89cebc3284dd66430e70419ed757c6
|
dbd98b2572d2043ef924cfb4d3f3d7cfecf91773
|
/experiments/experiments_vary_p_LVIDA.R
|
d45ed1b023fd2a4636d844f80033baf628f1f23c
|
[] |
no_license
|
btaschler/scl_replicate
|
bedb52b778df3481b0ae428e94340723e0547a9a
|
b4ddeddf715d26254e75f4798e4a375539cee89b
|
refs/heads/master
| 2023-04-06T16:03:21.909567
| 2021-03-31T20:40:47
| 2021-03-31T20:40:47
| 232,549,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,201
|
r
|
experiments_vary_p_LVIDA.R
|
###
### LV-IDA - Vary p - shrna data
###
# rm(list = ls())
#
args <- commandArgs(TRUE)
if(length(args) == 0){
p_ <- 25
p_seq = c(p_)
}else{
print(args)
numberOfArgs <- length(args)
p_ <- sub("-","",args[numberOfArgs])
p_seq <- c(as.numeric(p_))
}
cat(paste("Running for p=", p_, "\n"))
library(tidyverse)
library(biglasso)
# library(cad)
library(pcalg)
library(huge)
library(corpcor)
library(filehash)
# Load functions
#source( file.path(getwd(), "experiments", "experiments_add_path.R") )
# CHD:
for(f in list.files(file.path(getwd(), "R") )){
source(file.path(getwd(), "R", f))
}
for(f in list.files(file.path(getwd(), "experiments", "methods") )){
source(file.path(getwd(), "experiments", "methods", f))
}
source( file.path(getwd(), "experiments", "functions", "run_experiment.R") )
require(R.utils)
#fid_data <- filePath(getwd(), 'data', 'shrna_processed_data.rds') #(A) human
fid_data <- file.path(getwd(), 'data', 'yeast', 'yeast_processed_data.rds') #(B) yeast
data_list <- readRDS(fid_data)
res_dir <- "/cluster/work/math/heinzec/scl/"
data_dir <- "/cluster/work/math/heinzec/scl/"
# res_dir <- file.path(getwd(), 'results', 'yeast') #TEMP
# data_dir <- file.path(getwd(), 'data', 'yeast')
#
# Reproducibility
set.seed(0)
# Data:
#data_list = get_shrna_data()
int_data = data_list$int_data
int_indices = data_list$int_indices
int_names = data_list$int_names
obs_data = data_list$obs_data
# Experimental settings ---------------------------------------------------
## # (A) Data - setup for human (shrna) data
# int_thresh_count = 35
# int_graph_count = 35
# int_sample_count = 35
#
# obs_thresh_count = NULL
# obs_sample_count = 19
#
# thresholding = "max_min" #robust_zscore_5
# sparsity_factor = 2
# data_name = "shrna"
# remove_near_constant_columns = TRUE
# (B) Data - setup for yeast data
int_thresh_count = NULL
int_graph_count = 700
int_sample_count = 700
obs_thresh_count = 80
obs_sample_count = 80
thresholding = "robust_zscore_5" #robust_zscore_3 / .._5 / .._7
sparsity_factor = NULL
data_name = "yeast"
remove_near_constant_columns = TRUE
# Masking
percentage_visible = 50
mask_seq = c("rows", "entries")
# Repetitions
rep_seq = 1:10
## Methods ---------------------------------------------------------------
#p_seq = c(25, 50, 100, 200, 500, 1000, 2000, 5000, 10000, Inf) #(A) human
#p_seq = c(25, 50, 100, 200, 500, 1000, 2000, 4000, Inf) #(B) yeast
method_p_list = list("method_lvida_05" = p_seq[p_seq <= 1000])
method_seq = names(method_p_list)
# Generate datasets -------------------------------------------------------
db_vary_p_path = file.path(data_dir, "sampled_data", "vary_p")
db_vary_p = dbInit(db_vary_p_path, type = "RDS")
# Experiment --------------------------------------------------------------
set.seed(0)
db_res_path = file.path(res_dir, "results", "vary_p")
if(!dir.exists(db_res_path)){ dbCreate(db_res_path, type = "RDS") }
db_res = dbInit(db_res_path, type = "RDS")
for (p in p_seq) {
# repetition
for (k in rep_seq) {
data = db_vary_p[[ sprintf('p_%s__rep_%s', p, k) ]]
X = data$X
X = scale(X)
stopifnot( !anyNA(X) )
G_star = data$G_star
# mask
for (mask in mask_seq) {
G0 = switch(mask,
entries = data$G0_entries,
rows = data$G0_rows)
# method
for (method in method_seq) {
method_p_seq = method_p_list[[method]]
if ( p > max(method_p_seq) ) {
next
} else {
db_res_name = sprintf('p_%s__vis_%s__rep_%s__mask_%s__%s',
p, percentage_visible, k, mask, method)
run_experiment(X, G0, G_star, method, db_res, db_res_name,
# Data
data_name = data_name,
thresholding = thresholding,
sparsity_factor = sparsity_factor,
remove_near_constant_columns = remove_near_constant_columns,
# Masking
mask = mask,
percentage_visible = percentage_visible,
# Experiment
p = ncol(X),
rep = k)
}
}
}
}
}
# filesstrings::file.move(
# list.files(file.path("~", "cad_results", "vary_p"), full.names = TRUE),
# file.path(get_lab_folder(), "cad", "results", "vary_p", "vary_p")
# )
# filesstrings::file.move(
# list.files(file.path("~", "cad_datasets", "vary_p"), full.names = TRUE),
# file.path(get_lab_folder(), "cad", "datasets", "vary_p", "vary_p")
# )
|
64f96b17e2b11bc092539824a133a95fbf431318
|
9cbc8d7ae4c57f4948d47f11e2edcba21a1ba334
|
/sources/modules/VESimHouseholds/man/CreateHouseholdsSpecifications.Rd
|
bcab8cb5ff89c57aec4161b32514eb5f1dd96f00
|
[
"Apache-2.0"
] |
permissive
|
rickdonnelly/VisionEval-Dev
|
c01c7aa9ff669af75765d1dfed763a23216d4c66
|
433c3d407727dc5062ec4bf013abced4f8f17b10
|
refs/heads/master
| 2022-11-28T22:31:31.772517
| 2020-04-29T17:53:33
| 2020-04-29T17:53:33
| 285,674,503
| 0
| 0
|
Apache-2.0
| 2020-08-06T21:26:05
| 2020-08-06T21:26:05
| null |
UTF-8
|
R
| false
| true
| 838
|
rd
|
CreateHouseholdsSpecifications.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateHouseholds.R
\docType{data}
\name{CreateHouseholdsSpecifications}
\alias{CreateHouseholdsSpecifications}
\title{Specifications list for CreateHouseholds module}
\format{A list containing 5 components:
\describe{
\item{RunBy}{the level of geography that the module is run at}
\item{NewSetTable}{new table to be created for datasets specified in the
'Set' specifications}
\item{Inp}{scenario input data to be loaded into the datastore for this
module}
\item{Get}{module inputs to be read from the datastore}
\item{Set}{module outputs to be written to the datastore}
}}
\source{
CreateHouseholds.R script.
}
\usage{
CreateHouseholdsSpecifications
}
\description{
A list containing specifications for the CreateHouseholds module.
}
\keyword{datasets}
|
81008f03d6fec64454163d84211f4739c2dff8d9
|
b5499b1c495838c46a327d3eea93c0171b803ce5
|
/man/grid.metapost.Rd
|
2b6610121cd73294b23fe89b3b8c3ddb18a1584f
|
[] |
no_license
|
pmur002/metapost
|
27e73f587fdcaa76ad82e0f2835184990603dc19
|
b18723178cc3d196d6a2ae33251fff11f358c679
|
refs/heads/master
| 2021-06-06T13:23:16.789868
| 2020-04-29T22:06:26
| 2020-04-29T22:06:26
| 150,363,901
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,345
|
rd
|
grid.metapost.Rd
|
\name{grid.metapost}
\alias{grid.metapost}
\alias{metapostGrob}
\alias{metapostGrob.mppath}
\alias{metapostGrob.mpcontrols}
\alias{metapostGrob.mpfigure}
\title{
Draw a MetaPost curve.
}
\description{
Draw a MetaPost curve in \pkg{grid} graphics.
}
\usage{
\method{metapostGrob}{mppath}(x, gp = gpar(), name = NULL, digits=2, ...)
\method{metapostGrob}{mpcontrols}(x, gp = gpar(), name = NULL, ...)
\method{metapostGrob}{mpfigure}(x, gp = gpar(), name = NULL, ...)
grid.metapost(...)
}
\arguments{
\item{x}{
A MetaPost path, either unsolved (a description generated using
\code{\link{knot}} etc), or solved (as produced by
\code{\link{mptrace}}).
}
\item{gp}{
Graphical parameters (from a call to \code{gpar}).
}
\item{name}{
A name for the grob that is created.
}
\item{digits}{
The number of decimal places to use when writing floating
point values in MetaPost code.
}
\item{\dots}{
Arguments passed to \code{metapostGrob}.
}
}
\value{
\code{metapostGrob} creates a \code{"metapostgrob"} object.
}
\author{
Paul Murrell
}
\seealso{
\code{\link{knot}},
\code{\link{mptrace}}.
}
\examples{
\donttest{
oldunits <- options(metapost.units="in")
p <- knot(0, 0) + dir(0) + dir(0) + knot(1, 1)
grid.metapost(p)
options(oldunits)
}
}
\keyword{ dplot }% use one of RShowDoc("KEYWORDS")
|
680817e9e108f72a7869c58c08f0747cd1cdd6d6
|
0e4a0d329321b90db11a07fe9124fa428f26b792
|
/R/srr.R
|
cae32ca175f12686cd405fb80fd57f682c48f235
|
[] |
no_license
|
ropensci-review-tools/roreviewapi
|
266d265596e65407a0290e3a74b212f55f2354b7
|
e465fe2b6b20003b1eb8b5bd5ee5dd48be39fa00
|
refs/heads/main
| 2023-08-16T13:41:53.547234
| 2023-06-30T08:46:18
| 2023-06-30T08:46:18
| 364,507,612
| 4
| 1
| null | 2023-02-08T09:06:20
| 2021-05-05T08:20:22
|
R
|
UTF-8
|
R
| false
| false
| 7,866
|
r
|
srr.R
|
#' Count number of 'srr' statistical standards complied with, and confirm
#' whether than represents > 50% of all applicable standards.
#'
#' @param repourl The URL for the repo being checked, potentially including full
#' path to non-default branch.
#' @param repo The 'context.repo' parameter defining the repository from which
#' the command was invoked, passed in 'org/repo' format.
#' @param issue_id The id (number) of the issue from which the command was
#' invoked.
#' @param post_to_issue Integer value > 0 will post results back to issue (via
#' 'gh' cli); otherwise just return character string with result.
#' @return Vector of three numbers:
#' \enumerate{
#' \item Number of standards complied with
#' \item Total number of applicable standards
#' \item Number complied with as proportion of total
#' }
#' @family ropensci
#' @export
srr_counts <- function (repourl, repo, issue_id, post_to_issue = TRUE) {
# Content taken directly from editor_check():
branch <- roreviewapi::get_branch_from_url (repourl)
if (!is.null (branch)) {
repourl <- gsub (paste0 ("\\/tree\\/", branch, ".*$"), "", repourl)
}
path <- roreviewapi::dl_gh_repo (u = repourl, branch = branch)
deps <- roreviewapi::pkgrep_install_deps (path, repo, issue_id)
deps <- ifelse (is.null (deps), "", deps)
if (grepl ("failed with error", deps)) {
return (deps)
}
# Then the 'srr' bit:
if (is.null (branch)) {
branch <- "" # for srr_report fn
}
srr_rep <- srr::srr_report (path = path, branch = branch, view = FALSE)
if (length (srr_rep) == 1L) { # "This is not an 'srr' package"
out <- "This is not an 'srr' package"
if (post_to_issue) {
out <- roreviewapi::post_to_issue (out, repo, issue_id)
}
return (out)
}
index <- grep ("^\\#\\#\\sMissing Standards", srr_rep)
if (length (index) > 0L) {
missing_stds <- srr_rep [-seq (index)]
while (!nzchar (missing_stds [1])) {
missing_stds <- missing_stds [-1]
}
out <- c (
missing_stds,
"",
"All standards must be documented prior to submission"
)
} else {
out <- roreviewapi::srr_counts_summary (srr_rep)
}
out <- paste0 (out, collapse = "\n")
if (post_to_issue) {
out <- roreviewapi::post_to_issue (out, repo, issue_id)
}
return (out)
}
#' Summarise counts of 'srr' standards from full 'srr' report
#'
#' @param srr_rep An 'srr' report generated by the `srr::srr_report()` function.
#' @return Character vector with markdown-formatted summary summary of numbers
#' of standards complied with.
#' @family ropensci
#' @export
srr_counts_summary <- function (srr_rep) {
stds_start <- grep ("^\\#\\#\\sStandards with", srr_rep)
sections <- grep ("^\\#+", srr_rep)
stds_end <- vapply (
stds_start, function (i) {
sections [which (sections > i)] [1]
},
integer (1L)
)
# as.list -> unlist to avoid accidental matrix results when numbers are
# equal
stds <- apply (cbind (stds_start, stds_end), 1, function (i) {
as.list (srr_rep [seq (i [1], i [2])])
})
stds <- lapply (stds, unlist)
stds_what <- vapply (
stds, function (i) {
gsub ("\\`", "", regmatches (i, regexpr ("\\`.*\\`", i)))
},
character (1L)
)
stds_n <- lapply (stds, function (i) {
vals <- regmatches (i, gregexpr ("[0-9]+\\s+\\/\\s+[0-9]+$", i))
index <- which (vapply (vals, length, integer (1L)) > 0L)
vals <- lapply (vals [index], function (j) {
as.integer (strsplit (j, "\\/") [[1]])
})
categories <-
regmatches (i, gregexpr ("^\\-\\s+[A-Za-z]+\\s\\:", i))
categories <- gsub ("^\\-\\s+|\\s+\\:$", "", unlist (categories))
names (vals) <- categories
return (vals)
})
names (stds_n) <- stds_what
categories <- srr::srr_stats_categories ()
summarise_one <- function (s, complied = TRUE) {
stds_summary <- paste0 (
ifelse (complied,
"- Complied with: ",
"- Not complied with: "
),
s$Total [1],
" / ",
s$Total [2],
" = ",
round (100 * s$Total [1] / s$Total [2], digits = 1),
"% ("
)
these_categories <- names (s)
these_categories <-
these_categories [which (!these_categories == "Total")]
for (cat in these_categories) {
stds_summary <- paste0 (
stds_summary,
categories$category [categories$std_prefix == cat],
": ",
s [[cat]] [1],
" / ",
s [[cat]] [2],
"; "
)
}
return (gsub (";\\s$", ")", stds_summary))
}
stds_summary <- c (
summarise_one (stds_n$srrstats, TRUE),
summarise_one (stds_n$srrstatsNA, FALSE)
)
compliance <- stds_n$srrstats$Total [1] / stds_n$srrstats$Total [2]
stds_final <- ifelse (
compliance > 0.5,
paste0 (
":heavy_check_mark: This package complies with ",
"> 50% of all standads and may be submitted."
),
paste0 (
":heavy_multiplication_x: This package complies with ",
"< 50% of all standads and is not ready to be submitted."
)
)
out <- c (
"## 'srr' standards compliance:",
"",
stds_summary,
"",
stds_final
)
return (out)
}
#' Get stats badge grade and standards version for a submission
#'
#' @param repo The submission repo
#' @param issue_num GitHub issue number of submission
#' @return A single character containing the label used directly for the issue
#' badge
#' @family ropensci
#' @export
stats_badge <- function (repo = "ropensci/software-review",
issue_num = 258) {
# This by default returns only the opening comment. Additional comments can
# be extracted with the "-c" flag.
args <- list (
"issue",
"view",
issue_num,
"-R",
repo
)
out <- system2 ("gh", args = args, stdout = TRUE, wait = TRUE)
type <- get_html_var (out, "submission-type")
if (length (type) == 0L) {
return (NULL)
}
if (type != "Stats") {
return (NULL)
}
labels <- grep ("^labels\\:", out, value = TRUE) [1]
if (grepl ("approved", labels)) {
g <- regexpr ("6\\/approved\\-(bronze|silver|gold)\\-v[0-9]+\\.[0-9]+$", labels)
res <- regmatches (labels, g)
} else {
grade <- get_html_var (out, "statsgrade")
version <- stats_version (truncated = TRUE)
res <- paste0 ("6/approved-", grade, "-v", version)
}
return (res)
}
get_html_var <- function (x, expr = "submission-type") {
x <- grep (paste0 ("<!--", expr, "-->"), x, value = TRUE)
x <- regmatches (x, regexpr ("\\-\\->.*<!\\-\\-", x))
x <- gsub ("\\-\\->|<!\\-\\-", "", x)
return (x)
}
#' Get current version of statistical standards
#'
#' @return A single character containing version number
#' @noRd
stats_version <- function (truncated = TRUE) {
u <- paste0 (
"https://raw.githubusercontent.com/",
"ropensci/statistical-software-review-book/",
"main/DESCRIPTION"
)
tmp <- fs::path (fs::path_temp (), "stats-devguide-DESCRIPTION")
if (!fs::file_exists (tmp)) {
ret <- utils::download.file (u, destfile = tmp, quiet = TRUE) # nolint
}
d <- data.frame (read.dcf (tmp))
version <- d$Version
if (truncated) {
version <- regmatches (version, regexpr ("[0-9]+\\.[0-9]+", version))
}
return (version)
}
|
e63e48e38b6964f414f960a616635a0767e2f58c
|
56d05ffb5d24a3b1d5a71bdb3fd59c82aaef7791
|
/C4V ShinnyDashboard.R
|
9d23ab44883a011fbf39728d53afe9e450afa3f6
|
[] |
no_license
|
Francesca1603/Coding-for-Venezuela
|
500bf04981777f09e44be332cbe0cb3ac6362de2
|
ffdab67f9c05f90f72529d8b856e618bbd6121d5
|
refs/heads/master
| 2021-01-05T10:05:21.991019
| 2020-02-17T00:58:32
| 2020-02-17T00:58:32
| 240,986,178
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 15,961
|
r
|
C4V ShinnyDashboard.R
|
##############################################################
library(dplyr) # this library allows us use tools for working with data frames and data manipulation.
library(stringr) # this library allows us to work with commom string operations
library(shiny) # this library is used to build powerful interactive web applications
library(shinydashboard) # this library is part of shiny and it is used to create dashboards
library(plotly) # librarry used to create interactive plots
library(bigrquery) # library that allows us to talk to Google's 'BigQuery'
##############################################################
# The following part is the code given by the team of Code for Venezuela, we did not make any change
path = "https://storage.googleapis.com/angostura-public/hult-hackathon-key.json"
bq_auth(path = path)
project_id <- "event-pipeline"
sql <- 'SELECT * from `angostura_dev.eh_health_survey_response`'
df <- query_exec(sql, project_id, use_legacy_sql = FALSE)
# We created lists to group the columns in different categories
# Equipment list
equipment_list<-c('rrt_avail_high_flow_catheters',
'rrt_avail_blood_tests_hiv_hvb_hvc_vdr',
'rrt_avail_immediate_access_urea_reduction_bun',
'nCoV_face_mask_avail',
'nCoV_respiratory_isolation_protocol_avail',
'nCoV_isolation_area_avail',
'operability_lab',
'operability_uls',
'operability_ct_mri',
'operability_xr',
'er_avail_defibrillator',
'er_avail_ott_intubation',
'er_avail_catheter',
'er_avail_oxygen_suction',
'sx_avail_anesthetic_gases',
'sx_avail_ott_intubation',
'sx_avail_patient_lingerie_kit',
'sx_avail_disposables_mask_gloves_gown',
'sx_avail_oxygen_suction',
'rrt_hemodialysis_avail_filter',
'rrt_hemodialysis_avail_lines',
'rrt_hemodialysis_avail_kit_hemodialysis')
# Drug List
drug_list <- c('er_avail_adrenalin',
'er_avail_atropine',
'er_avail_dopamine',
'er_avail_cephalosporins_betalactams',
'er_avail_aminoglycosides_quinolone',
'er_avail_vancomycin_clindamycin',
'er_avail_lidocaine',
'er_avail_minor_opioids',
'er_avail_major_opioids',
'er_avail_iv_fluids',
'er_avail_diazepam_dph',
'er_avail_heparin',
'er_avail_steroids',
'er_avail_insulin',
'er_avail_asthma',
'er_avail_blood_pressure',
'sx_avail_minor_opioids',
'sx_avail_major_opioids',
'sx_avail_anesthetics_iv',
'sx_avail_relaxants',
'rrt_hemodialysis_avail_b_complex',
'rrt_hemodialysis_avail_calcium',
'rrt_hemodialysis_avail_zemblar',
'rrt_hemodialysis_avail_iron')
# Functional areas in the hospital
function_list<- c('operability_icu_p',
'operability_er',
'operability_sx',
'rrt_operability',
'operability_icu')
# Power equipment List
power_list <- c('power_generator_available',
'power_outage_days_count')
# Creating a vector to agrupate all the previous lists
full_list <- c(equipment_list, drug_list, function_list, power_list)
# Creating a second data frame choosing only the columns with data that could be converted into numbers
# to do calculations
df_2 <- df %>%
select(c(2,34, 45, 3, 5, 14, 20:33, 35:44, 46:55, 57:63, 65, 77, 79:84, 119))
# Creating a third data frame with only numercial values to do a
# for loop(function that allows us to iterate in all the values of the data frame )
df_3 <- data.frame(matrix(ncol = ncol(df_2) -3, nrow = nrow(df_2)))
# Renaming the columns to keep the consistency in all the data frames
colname <- c(colnames(df_2[,4: ncol(df_2)]))
colnames(df_3) <- colname
# Creating a new column: year_weak2 which allows us to generate timeline plots
year_week <- t(data.frame(strsplit(df_2$report_week, "\\s+")))
year<- year_week[, 3]
week<- year_week[, 1]
week2 <- str_pad(week, 2, pad = '0')
year_week2 <- paste(year, week2)
# For loop function to change all the answer into numbers according to the following criteria:
# Todos los días = 7
# Funciona todos los días = 7
# Entre 3 y 5 días = 4 (the average of 3 and 5)
# Menos de 3 de días = 2
# Funciona menos de 3 días = 2
# Entre 1 y 2 días = 1
# No existe = 0
# No operativa = 0
# Nunca ha existido = 0
# Hay pero no funciona = 0
# Nunca ha habido = 0
# No hubo = 0
for (col in 1:ncol(df_3)) {
for (row in 1:nrow(df_3)) {
if (grepl("los", df_2[row, col +3])) {
df_3[row, col] = 7
} else if (grepl("3 y 5", df_2[row, col + 3])) {
df_3[row, col] = 4
} else if (grepl("hubo", df_2[row, col + 3])) {
df_3[row, col] = 0
} else if (grepl("1 y 2", df_2[row, col + 3])) {
df_3[row, col] = 1
} else if (grepl("existido", df_2[row, col + 3])) {
df_3[row, col] = 0
} else if (grepl("operativa", df_2[row, col + 3])) {
df_3[row, col] = 0
} else if (grepl("existe", df_2[row, col + 3])) {
df_3[row, col] = 0
} else if (grepl("de 3", df_2[row, col + 3])) {
df_3[row, col] = 2
} else if (grepl("pero", df_2[row, col + 3])) {
df_3[row, col] = 0
} else if (grepl("habido", df_2[row, col + 3])) {
df_3[row, col] = 0
} else {df_3[row, col] = 0}
}
}
# Adding the 3 columns we deleted before
df_3$year_week <- year_week2
df_3$hospital_code <- df_2$hospital_code
df_3$federal_entity <- df_2$federal_entity
# Creating data frames for every category to generate specific plots per each one
# Equipment
equipment_df <- df_3 %>%
select(c(all_of(equipment_list), 54:56))
equipment_df$mean <- rowMeans(equipment_df[1:22], na.rm = TRUE)# Creating a new column with the average of every row
equipment_nation <- equipment_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
# Drugs
drug_df <- df_3 %>%
select(c(all_of(drug_list), 54:56))
drug_df$mean <- rowMeans(drug_df[1:24], na.rm = TRUE)# Creating a new column with the average of every row
drug_nation <- drug_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
# Functional areas in the hospital
function_df <- df_3 %>%
select(c(all_of(function_list), 54:56))
function_df$mean <- rowMeans(function_df[1:5], na.rm = TRUE)# Creating a new column with the average of every row
function_nation <- function_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
# Power equipment
power_df <- df_3 %>%
select(c(all_of(power_list), 54:56))
power_df$mean <- rowMeans(power_df[1:2], na.rm = TRUE)# Creating a new column with the average of every row
power_nation <- power_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
ax_hide <- list(title = "", zeroline = FALSE, showline = FALSE,
showticklabels = FALSE, showgrid = FALSE)
ax_tick <- list(dtick = 10)
# Column names
df_4<- df_3
names(df_4) <- c("OPERABILITY ICU", "POWER_GENERATOR", "OPERABILITY ICU P",
"HIGH FLOW CATHETERS", "BLOOD TESTS HIV HVB HVC VDR",
"UREA REDUCTION BUN", "FACE MASK", "RESPIRATORY ISOLATION PROTOCOL",
"EMERGENCY ROOM", "ISOLATION AREA", "SURGICAL PAVILION",
"LABORATORY", "ULTRASOUND", "CT-MRI", "X-RAY", "ADRENALINE",
"ATROPINE", "DOPAMINE", "CEPHALOSPORINS BETALACTAMS",
"AMINOGLYCOSIDES QUINOLONE", "VANCOMYCIN CLINDAMYCIN",
"LIDOCAINE", "ER MINOR OPIOIDS", "ER MAJOR OPIOIDS",
"IV FLUIDS", "DIAZEPAM DPH", "HEPARIN", "STEROIDS",
"INSULIN", "ASTHMA", "BLOOD PRESSURE", "DEFIBRILLATOR",
"ER OTT INTUBATION", "CATHETER", "ER OXYGEN SUCTION",
"SX MINOR OPIOIDS", "SX MAJOR OPIOIDS", "ANESTHETIC GASES",
"ANESTHETICS IV", "RELAXANTS", "SX OTT INTUBATION", "PATIENT LINGERIE KIT",
"DISPOSABLES MASK GLOVES GOWN", "SX OXYGEN SUCTION", "DIALYSIS SERVICES OPERABILITY",
"HEMODIALYSIS FILTER", "HEMODIALYSIS LINES", "HEMODIALYSIS KIT",
"HEMODIALYSIS IRON", "HEMODIALYSIS B COMPLEX", "HEMODIALYSIS CALCIUM",
"HEMODIALYSIS ZEMBLAR", "POWER OUTAGE DAYS COUNT", "YEAR WEEK",
"HOSPITAL CODE", "FEDERAL ENTITY")
equipment_df <- df_3 %>%
select(c(all_of(equipment_list), 54:56))
equipment_df$mean <- rowMeans(equipment_df[1:22], na.rm = TRUE)
equipment_nation <- equipment_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
drug_df <- df_3 %>%
select(c(all_of(drug_list), 54:56))
drug_df$mean <- rowMeans(drug_df[1:24], na.rm = TRUE)
drug_nation <- drug_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
function_df <- df_3 %>%
select(c(all_of(function_list), 54:56))
function_df$mean <- rowMeans(function_df[1:5], na.rm = TRUE)
function_nation <- function_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
power_df <- df_3 %>%
select(c(all_of(power_list), 54:56))
power_df$mean <- rowMeans(power_df[1:2], na.rm = TRUE)
power_nation <- power_df %>%
group_by(year_week) %>%
summarize(mean_nation = mean(mean))
drug_df2 <- df_4 %>%
select(c(54, 16:31, 36:37, 39:40, 49:52, 55:56)) %>%
arrange(`YEAR WEEK`)
year_week3 <- tail(drug_df2$`YEAR WEEK`, 1)
last_week_drug <- drug_df2 %>%
filter(`YEAR WEEK` == year_week3) %>%
select(c(2:25)) %>%
colMeans(na.rm = T) %>%
data.frame()
last_week_drug$Medicine <- c(rownames(last_week_drug))
equipment_df2 <- df_4 %>%
select(c(54, 4:8, 10, 12:15, 32:35, 38, 41:44, 46:48, 55:56)) %>%
arrange(`YEAR WEEK`)
last_week_equip <- equipment_df2 %>%
filter(`YEAR WEEK` == year_week3) %>%
select(c(2:23)) %>%
colMeans(na.rm = T) %>%
data.frame()
last_week_equip$Equipment <- c(rownames(last_week_equip))
function_df2 <- df_4 %>%
select(c(54, 1, 3, 9, 11, 45, 55:56)) %>%
arrange(`YEAR WEEK`)
last_week_function <- function_df2 %>%
filter(`YEAR WEEK` == year_week3) %>%
select(c(2:5)) %>%
colMeans(na.rm = T) %>%
data.frame()
last_week_function$Function <- c(rownames(last_week_function))
power_df2 <- df_4 %>%
select(c(54, 2, 53, 55:56)) %>%
arrange(`YEAR WEEK`)
last_week_power <- power_df2 %>%
filter(`YEAR WEEK` == year_week3) %>%
select(c(2:3)) %>%
colMeans(na.rm = T) %>%
data.frame()
last_week_power$Power <- c(rownames(last_week_power))
########### Shinny user interface
ui <- dashboardPage(dashboardHeader(title = "Code 4 Venezuela NGO"),
dashboardSidebar(sidebarMenu(
menuItem("Status", tabName = "status"),
menuItem("Overview", tabName = "overview"),
menuItem("Medicine", tabName = "medicine"),
menuItem("Equipment", tabName = "equipment"),
menuItem("Function", tabName = "function2"),
menuItem("Power", tabName = "power"))),
dashboardBody(tabItems(
tabItem(tabName = "status",
fluidRow(box(width = 6,
height = 150,
infoBox(width = NULL,
title = "Medicine",
subtitle = textOutput("text1"))),
box(width = 6,
height = 150,
infoBox(width = NULL,
title = "Equipment",
subtitle = textOutput("text2")))),
fluidRow(box(width = 6,
height = 150,
infoBox(width = NULL,
title = "Function",
subtitle = textOutput("text3"))),
box(width = 6,
height = 150,
infoBox(width = NULL,
title = "Power",
subtitle = textOutput("text4"))))),
tabItem(tabName = "overview",
plotlyOutput("overview")),
tabItem(tabName = "medicine",
plotlyOutput("medicine")),
tabItem(tabName = "equipment",
plotlyOutput("equipment")),
tabItem(tabName = "function2",
plotlyOutput("function2")),
tabItem(tabName = "power",
plotlyOutput("power"))
)
)
)
server <- function(input, output) {
output$text1 <- renderText(c(round(tail(drug_nation$mean_nation, n=1),2), "Days"))
output$text2 <- renderText(c(round(tail(equipment_nation$mean_nation, n=1),2), "Days"))
output$text3 <- renderText(c(round(tail(function_nation$mean_nation, n=1),2), "Days"))
output$text4 <- renderText(c(round(tail(power_nation$mean_nation, n=1),2), "Days"))
p1 <- drug_nation %>%
plot_ly(x = ~year_week, y = ~mean_nation) %>%
add_lines(name = "Drug") %>%
layout(xaxis = ax_hide, yaxis = list(title = "National Mean", range = c(0,5)))
p2 <- equipment_nation %>%
plot_ly(x = ~year_week, y = ~mean_nation) %>%
add_lines(name = "Equipment") %>%
layout(xaxis = ax_hide, yaxis = list(title = "", range = c(0,5)))
p3 <- function_nation %>%
plot_ly(x = ~year_week, y = ~mean_nation) %>%
add_lines(name = "Function") %>%
layout(xaxis = ax_tick, yaxis = list(title = "National Mean", range = c(0,5)))
p4 <- function_nation %>%
plot_ly(x = ~year_week, y = ~mean_nation) %>%
add_lines(name = "Power") %>%
layout(xaxis = ax_tick, yaxis = list(title = "", range = c(0,5)))
p <- subplot(p1, p2, p3, p4, nrows = 2) %>%
layout(width = 900, height = 600, title = "Weekly Trend Change")
output$overview <- renderPlotly(p)
medicine <- last_week_drug %>%
plot_ly(y = ~Medicine, x= ~., type = "bar", orientation = "h") %>%
layout(title = "Medicine Supply Available", height = 500, xaxis = list(title = "Days Supply"))
output$medicine <- renderPlotly(medicine)
equipment <- last_week_equip %>%
plot_ly(y = ~Equipment, x= ~., type = "bar", orientation = "h") %>%
layout(title = "Equipment Supply Available", height = 500, xaxis = list(title = "Days Supply"))
output$equipment <- renderPlotly(equipment)
funct <- last_week_function %>%
plot_ly(y = ~Function, x= ~., type = "bar", orientation = "h") %>%
layout(title = "Function Operating", height = 500, xaxis = list(title = "Days In Operation"))
output$function2 <- renderPlotly(funct)
power <- last_week_power %>%
plot_ly(y = ~Power, x = ~., type = "bar", orientation = "h") %>%
layout(title = "Power supply available", height = 500, xaxis = list(title = "Days Available"))
output$power <- renderPlotly(power)
}
shinyApp(ui, server)
|
64e306b663c46f19ec718def7b84bc980f64cce7
|
02741e2e8e6d693c0b770a9b34ce33cfc8c6e504
|
/tests/manualTests/manualTests.R
|
a30771ce82caacd9d89c878a713e8fd946e79f79
|
[] |
no_license
|
RGLab/ImmuneSpaceR
|
dc6baa18a9a574692d352f775b5a3c4efdb1908d
|
165c75953186548517bcf1fa26204c22bc1ae164
|
refs/heads/main
| 2022-12-21T12:18:18.942221
| 2022-12-14T22:36:51
| 2022-12-14T22:36:51
| 20,756,066
| 25
| 16
| null | 2022-12-13T17:52:17
| 2014-06-12T05:40:31
|
R
|
UTF-8
|
R
| false
| false
| 473
|
r
|
manualTests.R
|
# Tests to be run by hand
# takes about 30 min
allsdy <- CreateConnection("")
try_gei <- function(con) {
tryCatch(
con$getGEInputs(),
warning = function(w) {
return(w)
},
error = function(e) {
return(e)
}
)
}
test_that("returns error if run at project level", {
res <- try_gei(allsdy)
expect_true("project" %in% strsplit(res$message, split = " ")[[1]])
})
# Run on server to be local
allsdy$.__enclos_env__$private$.test_files()
|
31a629df44c0a5061aff532507553db93bb07fa1
|
ec95da2a7bed5e5628ea134e3c65a9b8297c9589
|
/R/test-sql.R
|
d8bd9820a42a1bc972e1fe7340d35abc78f54a87
|
[] |
no_license
|
wlattner/DBItest
|
9488f3b7378fa957d37ab38de1fd96465c4bf913
|
bb827b194b9ef07a7c6502805a37426e6b99dd25
|
refs/heads/master
| 2021-01-16T20:32:39.826822
| 2017-08-13T20:48:41
| 2017-08-13T20:48:41
| 62,806,938
| 0
| 0
| null | 2016-07-07T12:59:03
| 2016-07-07T12:59:03
| null |
UTF-8
|
R
| false
| false
| 336
|
r
|
test-sql.R
|
#' @name test_all
#' @aliases NULL
#' @section Tests:
#' [test_sql()]:
#' Test SQL methods
NULL
#' Test SQL methods
#'
#' @inheritParams test_all
#' @include test-result.R
#' @family tests
#' @export
test_sql <- function(skip = NULL, ctx = get_default_context()) {
test_suite <- "SQL"
run_tests(ctx, spec_sql, skip, test_suite)
}
|
44cbb85cbfba431a41e6e9302e5c7cb6ed39bc1f
|
126695a89b840cb6c9218f8d5e13ded73fc80c4c
|
/man/consolidate_krause_etal_rna001_data.Rd
|
ad414f4e84ac541441ca8147fb2e7435483ce79f
|
[
"MIT"
] |
permissive
|
adnaniazi/ISATAILanalyses
|
f261b3aa6a90662780fb2e01bf52d8eaa43e6bc4
|
b240834dc4b37a359eb3d863a16fc2f8ef18e4b5
|
refs/heads/master
| 2021-06-26T03:22:13.672077
| 2021-01-11T15:44:34
| 2021-01-11T15:44:34
| 196,380,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 879
|
rd
|
consolidate_krause_etal_rna001_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consolidate_krause_etal_rna001_data.R
\name{consolidate_krause_etal_rna001_data}
\alias{consolidate_krause_etal_rna001_data}
\title{Consolidate SQK-RNA001 related disparate pieces of information into a
single data frame}
\usage{
consolidate_krause_etal_rna001_data(decoded_barcodes, tailfindr_estimates,
nanopolish_estimates, transcript_start_info)
}
\arguments{
\item{decoded_barcodes}{A dataframe of decode barcodes}
\item{tailfindr_estimates}{A dataframe of tailfindr estimtates}
\item{nanopolish_estimates}{A dataframe of Nanopolish esimtates}
\item{transcript_start_info}{A dataframe of transcript start
information as obtained by alignment of eGFP}
}
\value{
A consolidated dataframe
}
\description{
Consolidate SQK-RNA001 related disparate pieces of information into a
single data frame
}
|
396ecd4e2762c94b57fef9d53b54c7b7b2df6b5d
|
b2e8d66049bfce1f9f417fa2177b6e73f1ab2a92
|
/man/turboSim.Rd
|
910f78e523ca6b4442964320002a2557849e66ba
|
[] |
no_license
|
cran/turboEM
|
618e3c4a9b0a8a19c01af9438a4b715425041138
|
6b4d3025b18a280d2c21858c5e805ca09903a739
|
refs/heads/master
| 2021-08-27T21:40:16.627220
| 2021-08-05T03:20:02
| 2021-08-05T03:20:02
| 17,700,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,111
|
rd
|
turboSim.Rd
|
\name{turboSim}
\alias{turboSim}
\title{Conduct benchmark studies of EM accelerator}
\description{The \code{turboSim} function conducts benchmark studies to compare performance of multiple acceleration schemes over a large number of repetitions. The \code{\link{turboSim}} function outputs objects of class \code{turbosim}.}
\usage{
turboSim(parmat, fixptfn, objfn, method = c("em","squarem","pem","decme","qn"),
boundary, pconstr = NULL, project = NULL, parallel = FALSE, method.names,
keep.pars = FALSE, ..., control.method = replicate(length(method),list()),
control.run = list())
}
\arguments{
\item{parmat}{A matrix of starting parameter values, where each row corresponds to a single benchmark study repetition.}
\item{fixptfn}{A vector function, \eqn{F}{F} that denotes the fixed-point mapping. This function is the most essential input in the package. It should accept a parameter vector as input and should return a parameter vector of same length. This function defines the fixed-point iteration: \eqn{x_{k+1} = F(x_k)}{x[k+1] = F(x[k])}. In the case of EM algorithm, \eqn{F}{F} defines a single E and M step.}
\item{objfn}{This is a scalar function, \eqn{L}{L}, that denotes a ``merit'' function which attains its local minimum at the fixed-point of \eqn{F}{F}. This function should accept a parameter vector as input and should return a scalar value. In the EM algorithm, the merit function \eqn{L}{L} is the negative log-likelihood. In some problems, a natural merit function may not exist. However, this argument is required for all of the algorithms *except* Squarem (which defaults to Squarem-2 if \code{objfn} not provided) and EM.}
\item{method}{Specifies which algorithm(s) will be applied. Must be a vector containing one or more of \code{c("em", "squarem", "pem", "decme", "qn")}.}
\item{boundary}{Argument required for Dynamic ECME (\code{decme}) only. Function to define the subspaces over which the line search is conducted.}
\item{pconstr}{Optional function for defining boundary constraints on parameter values. Function maps a vector of parameter values to TRUE if constraints are satisfied. Note that this argument is only used for the Squarem (\code{squarem}), Parabolic EM (\code{pem}), and quasi-Newton (\code{qn}) algorithms, and it has no effect on the other algorithms.}
\item{project}{Optional function for defining a projection that maps an out-of-bound parameter value into the constrained parameter space. Requires the \code{pconstr} argument to be specified in order for the \code{project} to be applied.}
\item{parallel}{Logical indicating whether the \emph{repetitions} of the benchmark study will be run in parallel. Note that the parallel implementation is based on the \code{foreach} package, which depends on a \emph{parallel backend} being registered prior to running \code{turboSim()}. See *Details* of \code{\link{foreach}}.}
\item{method.names}{Vector of unique names that identify the algorithms being compared.}
\item{keep.pars}{Logical indicating whether the parameter values at termination should be kept. Defaults to FALSE.}
\item{control.method}{If \code{method = c(method1, method2, ...)}, then \code{control.method = list(list1, list2, ...)} where \code{list1} is the list of control parameters for \code{method1}, \code{list2} is the list of control parameters for \code{method2}, and so on. If \code{length(method) == 1}, then \code{control.method} is the list of control parameters for the acceleration scheme.
See *Details* of \code{\link{turboem}}.}
\item{control.run}{List of control parameters for convergence and stopping the algorithms.
See *Details* of \code{\link{turboem}}.}
\item{...}{Arguments passed to \code{fixptfn} and \code{objfn}.}
}
\value{
\code{turboSim} returns an object of class \code{\link{turbosim}}.
}
\seealso{
\code{\link{turbosim}, \link{turboem}}
}
\examples{
###########################################################################
# Examples provided in the vignette, which can be seen by typing
# vignette("turboEM")
}
|
a9c43e930dd0e54694503c4147a7123e5f1e145b
|
8993d7a867b2100a8f24a7a92b691e9a9a742f44
|
/R/data.R
|
ca8634e8c34f26138fc7dc802af5126e2d0c8a02
|
[
"MIT"
] |
permissive
|
phe-stephen-carnall/epidm
|
2d7dd1cccb5e2029a751d168dead09b1e8178182
|
27f3945d2d8014414ea90b065fca37f951a8255e
|
refs/heads/master
| 2023-03-28T04:00:21.444619
| 2021-04-06T08:56:02
| 2021-04-06T08:56:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,183
|
r
|
data.R
|
#' Bacterial Genus Gram Stain Lookup Table
#'
#' A reference table of bacterial gram stain results by genus
#' to allow faster filtering of bacterial results.
#' This dataset has been maintained manually against the PHE SGSS database.
#' If there are organisms missing, please raise and issue or push request on the
#' \href{https://github.com/alexbhatt/epidm}{epidm GitHub}
#'
#' @format A data frame with four columns
#' \describe{
#' \item{organism_genus}{The bacterial genus}
#' \item{gram_stain}{A character string to indicate POSITIVE or NEGATIVE type}
#' \item{gram_positive}{A 0/1 flag to indicate if the genus is gram positive}
#' \item{gram_negative}{A 0/1 flag to indicate if the genus is gram negative}
#' }
#'
"genus_gram_stain"
#' Respeciated organisms
#'
#' Occasionally, research shows that two organisms, previously thought to be
#' different are in fact one and the same. The reverse is also true.
#' This is a manually updated list.
#' If there are organisms missing, or new respeciates to be added,
#' please raise and issue or push request on the
#' \href{https://github.com/alexbhatt/epidm}{epidm GitHub}
#'
#' @format
#' \describe{
#' \item{previous_organism_name}{What the organism used to be known as, in the form GENUS SPECIES}
#' \item{organism_species_name}{What the organism is known as now, in the form GENUS SPECIES}
#' \item{organism_genus_name}{The genus of the recoded organism}
#' \item{genus_change}{A 0/1 flag to indicate if the genus has changed}
#' \item{genu_all_species}{A 0/1 flag to indicate if all species under that genus should change}
#' }
#'
"respeciate_organism"
#' Specimen type grouping
#'
#' In order to help clean up an analysis based on a group of specimen types,
#' a lookup table has been created to help group sampling sites.
#' This is a manually updated list.
#' If there are organisms missing, or new respeciates to be added,
#' please raise and issue or push request on the
#' \href{https://github.com/alexbhatt/epidm}{epidm GitHub}
#' @format
#' \describe{
#' \item{specimen_type}{The primary specimen type with detail}
#' \item{specimen_group}{A simple grouping of like specimen sites}
#' }
"specimen_type_grouping"
|
5c553999f44a030e0ade9076f09fc89ee0190aee
|
196ff4c376e540a9e4ffd3293949af516c51319e
|
/man/merge_categorical_clusters.Rd
|
9f96c4284b88df960fa6c962d8bb2ab03aa1a267
|
[] |
no_license
|
rogerswt/fluster
|
e1163375c2be750d8300e6923f6b3ccc2195ab6c
|
e5c455d698f0997074309b679860779819cb2b89
|
refs/heads/master
| 2021-07-24T04:35:59.018408
| 2021-07-17T20:15:30
| 2021-07-17T20:15:30
| 229,350,616
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 905
|
rd
|
merge_categorical_clusters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fluster_methods.R
\name{merge_categorical_clusters}
\alias{merge_categorical_clusters}
\title{Merge Categorically Similar Clusters}
\usage{
merge_categorical_clusters(fluster_obj, sd_fac = 1)
}
\arguments{
\item{fluster_obj}{A fluster object.}
\item{sd_fac}{A factor multiplying the standard deviation to determine if that
marker is sufficiently above (below) the threshold in order to labeled "hi" ("lo").}
}
\value{
A fluster object after categorical merging.
}
\description{
Clusters are labeled with a categorical vector in which each
marker is either "hi" or "lo" with respect to a threshold. If a marker is not unambiguously
either hi or lo, it's labeled as "un" for "unknown. To receive hi (lo), the
cluster center must be sufficiently above (below) the threshold in units of
the standard deviation of that marker.
}
|
4dd6d5da37e586b5c95bb3fb8ec353fb55493150
|
f6307fbe85d3633fa3101e52afea10c1e0626a03
|
/clase_textural.R
|
7c45de0fc378062c341240313d1d1a102b46246f
|
[
"MIT"
] |
permissive
|
jimmygomez/firstR
|
31a34ed3b4b465e1b6e2afa3f128bde22d684852
|
b307a716deac12edfe0b7347942fa1c287f8563c
|
refs/heads/master
| 2021-01-12T04:38:30.051075
| 2016-12-30T16:27:47
| 2016-12-30T16:27:47
| 77,694,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,232
|
r
|
clase_textural.R
|
#TRINAGULO TEXTURA
n_textura<-c("Arcilloso", "Arcillo arenoso","Arcillo limoso",
"Franco arcillo arenoso","Franco arcilloso","Franco arcillo Limoso",
"Franco arenoso","Franco","Franco limoso",
"Arena Franca","Limo", "Arena");n_textura
comment(n_textura)<-'n_textura : nombre de las clases texturales'
arc1<-c(0:15)
arc2<-c(16:20)
arc3<-c(21:55)
arc4<-c(56:100)
r_arc<-30
if((r_arc+r_aren+r_lim)!=100){
print("Error de ingreso")
}else{
print("Correcto")
}
c_arc<-arc1/r_arc
ea1<-c_arc[c_arc==1]
c_arc<-arc2/r_arc
ea2<-c_arc[c_arc==1]
c_arc<-arc3/r_arc
ea3<-c_arc[c_arc==1]
c_arc<-arc4/r_arc
ea4<-c_arc[c_arc==1]
ea1==1
ea2==1
ea3==1
ea4==1
if(ea1==1|ea2==1){
if(ea1==1){
ea1
}else{
ea2
}
}else{
if(ea3==1){
ea3
}else{
ea4
}
}
r_aren<-50
r_lim<-20
lim1<-c(0:10)
lim2<-c(11:40)
lim3<-c(41:60)
lim4<-c(61:73)
lim5<-c(74:88)
lim6<-c(89:100)
aren1<-c(0:20)
aren2<-c(21:50)
aren3<-c(51:70)
aren4<-c(71:85)
aren5<-c(86:100)
arcilla<-c(arc1,arc2,arc3,arc4);arcilla
limo<-c(lim1,lim2,lim3,lim4,lim5,lim6);limo
arena<-c(aren1,aren2,aren3,aren4,aren5);arena
|
426c89303547701ec34a92abb5c7671fc3ce0e7f
|
6a4341f281999dab2fe13fb3d4d04f006d2211f4
|
/2015/rcode/SpatMA_spplot.R
|
b3d3be4c8c7661466ea78cd6da4a47b884b7e403
|
[] |
no_license
|
Japhilko/GeoData
|
d2bcf87972b5ad04ad18ddd038d6044cfac4add2
|
4dbea5fe0cbc7761e8c56b88a632d718d34bcf9a
|
refs/heads/master
| 2022-02-21T18:37:42.846286
| 2022-02-19T09:19:58
| 2022-02-19T09:19:58
| 20,760,765
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,586
|
r
|
SpatMA_spplot.R
|
# Spatial Visualisations
# Package spplot
# Jan-Philipp Kolb
# Tue Oct 13 09:24:12 2015
## ----message=F,cache=ca,warning=F----------------------------------------
library(sp)
## ----message=F,cache=ca,warning=F----------------------------------------
library(maptools)
data("wrld_simpl")
ISO2codes <- wrld_simpl@data$ISO2
countries <- c("FR","DE","AT","CH")
ind <- match(countries,ISO2codes)
my_map <- wrld_simpl[ind,]
## ------------------------------------------------------------------------
plot(my_map)
## ----cache=ca,warning=F--------------------------------------------------
spplot(my_map,"POP2005")
## ----message=F,cache=ca,warning=F----------------------------------------
library(colorRamps)
spplot(my_map,"POP2005",col.regions=blue2red(100))
## ----cache=ca,warning=F--------------------------------------------------
spplot(my_map,"POP2005",col.regions=blue2green(100))
## ----cache=ca,warning=F--------------------------------------------------
spplot(my_map,"POP2005",col.regions=green2red(100))
## ----cache=ca,warning=F--------------------------------------------------
spplot(my_map,"POP2005",col.regions=blue2yellow(100))
## ----cache=ca,warning=F--------------------------------------------------
spplot(my_map,"POP2005",col.regions=matlab.like(100))
## ----cache=ca,warning=F--------------------------------------------------
my_map$Pop2010 <- my_map$POP2005 +
runif(length(my_map),-10000,10000)
## ----cache=ca,warning=F--------------------------------------------------
spplot(my_map,c("POP2005","Pop2010"),
col.regions=matlab.like(100))
|
31bdc4f64a90a09498dc69874c393f1ffe4e0347
|
47a7138b54c42644cff50ee3920a0439fc222f39
|
/annotation_file_script.R
|
ab59060b8286484d78a22dee067ee281863dfbd5
|
[] |
no_license
|
vinmperez/Useful-Scripts
|
0d6f9e04c59e40408d6d3054be46cd1aec6f863f
|
712d007747c257e25a70ae61dc3477801b87ea21
|
refs/heads/master
| 2020-07-19T13:43:12.968339
| 2019-09-26T18:57:43
| 2019-09-26T18:57:43
| 206,459,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
annotation_file_script.R
|
##-------------------------------------
library(tximport)
library(readr)
dir <- "Z:/RNA Seq Data/Exp 134/ quants"
samples <- read.csv(file.path ("samples_info.txt"), header = TRUE)
samples
files <- file.path(dir, samples$Salmon_out, "quant.sf")
names(files) <- paste0(samples$Salmon_out)
all(file.exists(files))
library(biomaRt) #Get annotation files.
listMarts()
ensMart<-useMart("ensembl")
listDatasets(ensMart)
ensembl_hs_mart <- useMart(biomart="ensembl", dataset="mmusculus_gene_ensembl")
listAttributes(ensembl_hs_mart)[1:100,]
xx = getBM(attributes = c("ensembl_gene_id", "ensembl_transcript_id", "ensembl_transcript_id_version" , "entrezgene",
"start_position", "end_position", "transcript_start", "transcript_end"),
mart = ensembl_hs_mart)
write.csv(xx, "xx.csv")
annotation = read.csv('./xx.csv', h=T) #Importing our annotation file
# tx ID, then gene ID
tx2gene <- annotation[, c(3:1)] # tx ID, then gene ID (we will use transcript id, gene, id, et cetera)
#generating counts
library(tximport)
txi <- tximport(files, type = "salmon", countsFromAbundance ="no",tx2gene = tx2gene)
names(txi)
head(txi$counts)
counts = txi$counts
write.csv(counts, "./counts.csv")
gene_length = txi$length
|
94acf83746c8e310ae83254cad53b335f3e42105
|
fe99282866b4b31c6ece86ae198977f9cb2401f1
|
/man/PANAS_november.Rd
|
1c1b896b607ee2571717ceb87024b1b8ec3f5ca6
|
[] |
no_license
|
cran/cosinor2
|
ab92f083f341811268fbf5f7682acf0507a28d98
|
d036dd0244854394a24d9b3148a16a6bcb835e5a
|
refs/heads/master
| 2021-01-19T14:52:15.112949
| 2018-10-15T15:10:03
| 2018-10-15T15:10:03
| 100,931,408
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,473
|
rd
|
PANAS_november.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cosinor2.R
\docType{data}
\name{PANAS_november}
\alias{PANAS_november}
\title{Self-reported mood}
\format{A data frame with 19 rows and 30 variables:
\describe{
\item{X01, X02, X03, X04, X05, X06, X07, X08, X09, X10, X11, X12, X13, X14, X15, X16, X17, X18, X19, X20, X21, X22, X23, X24, X25, X26, X27, X28, X29, X30}{Responses of subjects at 30 measurement points (days).}}}
\source{
Mutak, A. i Vukasović Hlupić, T. (2017). Exogeneity of the Circaseptan Mood Rhythm and Its Relation to the Working Week. \emph{Review of Psychology}, \emph{24} (1-2), 15-28.
}
\usage{
PANAS_november
}
\description{
A dataset containing the responses of 19 subjects on the shortened version of the PANAS questionnaire (Watson, Clark & Tellegen, 1988) in November 2015.
}
\details{
Measurements were taken every day after 8 PM.
}
\note{
The data contained in this dataset has been reduced compared to the original data that included more subjects. This dataset contains only the subjects that have responded to the PANAS questionnaire on more than 85\% of the timepoints in both of the research cycles (July and November).
}
\references{
Watson, D., Clark, L. A. & Tellegen, A. (1988). Development and Validation of Brief Measures of Positive and Negative Affect: The PANAS Scales. \emph{Journal of Personality and Social Psychology}, \emph{54(6)}, 1063-1070.
}
\keyword{datasets}
|
81b8f5525c99478e6f090429f869ff94eef0b652
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/lookoutmetrics_describe_alert.Rd
|
526cc5bbe5165bc89bcecc6af2573093c2b8e699
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 487
|
rd
|
lookoutmetrics_describe_alert.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookoutmetrics_operations.R
\name{lookoutmetrics_describe_alert}
\alias{lookoutmetrics_describe_alert}
\title{Describes an alert}
\usage{
lookoutmetrics_describe_alert(AlertArn)
}
\arguments{
\item{AlertArn}{[required] The ARN of the alert to describe.}
}
\description{
Describes an alert.
See \url{https://www.paws-r-sdk.com/docs/lookoutmetrics_describe_alert/} for full documentation.
}
\keyword{internal}
|
8e47267934457986d766c529286bcd4b9c92169f
|
2327d0bc2cc45a5504c39109846e0f4cba266606
|
/QID-3449-SFEWienerProcess/SFEWienerProcess.R
|
3842a0d651dd500c5df0230fe29dbfd00c28da5a
|
[] |
no_license
|
QuantLet/SFE
|
3d98a33cfcdc533210856c7618c32a78e111a6ce
|
d25a728a4371538eae982f44ea811b5b93328828
|
refs/heads/master
| 2022-06-15T13:35:17.387252
| 2022-06-08T01:22:00
| 2022-06-08T01:22:00
| 72,103,182
| 12
| 32
| null | 2022-01-30T18:58:21
| 2016-10-27T11:50:43
|
R
|
UTF-8
|
R
| false
| false
| 869
|
r
|
SFEWienerProcess.R
|
# clear variables and close windows
rm(list = ls(all = TRUE))
graphics.off()
SFEWienerProcess = function(dt, c, k) {
k = floor(k) # makes sure number of path is integer
if (dt <= 0 | k <= 0) {
stop("Delta t and number of trajectories must be larger than 0!")
}
l = 100
n = floor(l/dt)
t = seq(0, n * dt, by = dt)
set.seed(0)
z = matrix(runif(n * k), n, k)
z = 2 * (z > 0.5) - 1 # scale to -1 or 1
z = z * c * sqrt(dt) # to get finite and non-zero variance
zz = apply(z, MARGIN = 2, FUN = cumsum)
x = rbind(rep(0, k), zz)
# Output
matplot(x, lwd = 2, type = "l", lty = 1, ylim = c(min(x), max(x)), col = 2:(k +
1), main = "Wiener process", xlab = "Time t", ylab = expression(paste("Values of process ", X[t], " delta")))
}
SFEWienerProcess(dt = 0.5, c = 1, k = 5)
|
7291de1d98e6b428364a22f374aaa91cb15778bf
|
7b55c0dd472e2b025f51f07ad2e23d6f1047e0f7
|
/FRBData/man/FRBData-package.Rd
|
7cce53b9cad71093c3a8a9b950a8958fdf29e953
|
[] |
no_license
|
teramonagi/R-FRBData
|
c57e97e0b4ecddf88cbd6ca5e361842f0be041a5
|
1b37353e6309bdcfeee7d032bed45ae437e30fc3
|
refs/heads/master
| 2020-07-02T04:59:02.181054
| 2011-10-27T11:15:18
| 2011-10-27T11:15:18
| 1,630,721
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 825
|
rd
|
FRBData-package.Rd
|
\name{FRBData-package}
\alias{FRBData-package}
\alias{FRBData}
\docType{package}
\title{
FRBData: Download financial data from FRB's website
}
\description{
This package provide functions which can get financial and economical data from FRB's website.(http://www.federalreserve.gov)
}
\details{
\tabular{ll}{
Package: \tab FRBData\cr
Type: \tab Package\cr
Version: \tab 0.3\cr
Date: \tab 2011-10-27\cr
License: \tab BSD\cr
LazyLoad: \tab yes\cr
}
Easily download financial data from FRB's website.Especially, the functions which import interest rate are implemented at this time. In the future, other financial and economic data such as consumer credit and industrial production will be available.
}
\author{
Shinichi Takayanagi<teramonagi (at) gmail.com>
}
\examples{
#Treasury rate
GetInterestRates("TCMNOM",lastObs = 10)
}
|
b7e00d10fc74ae3cf82b432369b937c68c782210
|
b7f5db27a6d53c6ac25b494fb371fcd6ac86f262
|
/man/load.motif.dir.Rd
|
bf514275a4f03a938387e0cd850ce9450971fd2a
|
[] |
no_license
|
JohnReid/Saturn
|
49ef3061ac57c4f6aaf1304fcd2f00e60f92f0b1
|
9bb22496b38cbd2fd47c440aeb8581e0348e73a5
|
refs/heads/master
| 2021-03-24T12:38:28.044197
| 2018-12-11T15:33:24
| 2018-12-11T15:33:24
| 62,868,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 324
|
rd
|
load.motif.dir.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motif.R
\name{load.motif.dir}
\alias{load.motif.dir}
\title{Load motif scan results from directory}
\usage{
load.motif.dir(motifs.dir, prior.log.odds = .PRIOR.LOG.ODDS,
maximum.BF = 10)
}
\description{
Load motif scan results from directory
}
|
23d71e137a0e55fd92e9b4cf3c0368127defca00
|
94eb181af16673df356af0006c49f66582605185
|
/R/chromDrawFunctions.R
|
7e93a985c5b71e4d145700709d712d9ceb9c4a0d
|
[] |
no_license
|
HongboTang/syntenyPlotteR
|
c1a6672df7aecdd427f85d35f130cb94fa80f84e
|
f55244058bb35a70fed32064938bfcbbbbeb4287
|
refs/heads/master
| 2023-02-01T01:55:37.265006
| 2020-12-15T17:32:07
| 2020-12-15T17:32:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,241
|
r
|
chromDrawFunctions.R
|
#' Draw Evolution Highway Plots
#'
#' This function draws Evolution Highway style plots. It requieres as input the syntenic blocks following this
#' format: chr,start,end,targetChr,targetStart,targetEnd,orient,targetSpecies separated by tabs
#' It also requieres the output file name and the range of chromosomes of the reference species.
#' Example: draw.eh(input.csv,goat,"1:29")
#'
#' @title Evolution Highway style plot
#' @param infile Path to the syntenic blocks file
#' @param output file name
#' @param chrRange range of chromosome numbers in the reference "1:29"
#' @return A pdf file with the comparative drawings
#' @export
draw.eh<-function(infile,output,chrRange) {
outfile<-chr<-start<-end<-tarChr<-tarSt<-tarEnd<-orient<-tar<-text_size2<-NULL
data<-read.table(infile, header=FALSE)
colnames(data) = c("chr","start","end","tarChr","tarSt","tarEnd","orient","tar")
data$orient = factor(data$orient, levels=c("1","-1"))
data$text_size2=80*((data$end-data$start)/100000)
pdf(paste0(outfile,".pdf"),width=5.5, height =10, pointsize = 10)
for (ID in c(chrRange)) {
#ID=chrRange
print(ID)
subsetChr1<-subset(data,chr==ID, select=c(chr,start,end,tarChr,tarSt,tarEnd,orient,tar,text_size2))
min=min(subsetChr1$start)
max=max(subsetChr1$end)
print(ggplot2::ggplot() +
ggplot2::geom_rect(data=subsetChr1, mapping=ggplot2::aes(xmin=start, xmax=end, ymin=0, ymax=0.5, fill=orient, group=tar), color="white",
alpha = 1, size = 0.1 ) +
ggplot2::geom_rect(data=subsetChr1, ggplot2::aes(xmin=min,xmax=max,ymin=0,ymax=0.5), size=0.3, color="black", fill="NA") +
ggplot2::facet_grid(~ tar) +
ggplot2::coord_flip() +
ggplot2::scale_x_reverse() +
ggplot2::scale_y_discrete(expand=c(0,0)) +
ggplot2::theme(
panel.spacing.y = ggplot2::unit(c(-0.5,-0.5), "lines"),
panel.spacing.x = ggplot2::unit(0,"lines"),
panel.background = ggplot2::element_blank(),
strip.background = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
axis.title.x = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank(),
legend.position="none"
) +
ggplot2::scale_fill_manual(values = c("1" = "lightblue", "-1" = "lightpink")) +
ggplot2::geom_text(data=subsetChr1,ggplot2::aes(x=start+(end-start)/2,y=0+(0.5-0)/2,label=tarChr,size=text_size2))
)
}
dev.off()
}
#' Draw Pairwise Synteny Plots
#'
#' This function draws pairwise synteny plots. It requieres as input the syntenic blocks following this
#' format: referenceSpecies,chr,start,end,targetChr,targetStart,targetEnd,orient,targetSpecies. Please separate bu tabs.
#' It also requieres the output file name and the range of chromosomes of the reference and target species.
#' Example: draw.pairwise(input.txt,outputName, refSizes, tarSizes, refName, tarName)
#'
#' @title Pairwise synteny plot
#' @param infile Path to the syntenic blocks file
#' @param output file name
#' @param refSizes file with chromosome sizes for the reference species
#' @param tarSizes file with the chromosome sizes for the target species
#' @param refName name of the reference species
#' @param tarName name of the target species
#' @return A pdf file with the comparative drawings
#' @export
draw.pairwise <- function(infile,output,refSizes,tarSizes,refName,tarName) {
xstart<-xend<-refchr<-tarchr<-x<-y<-group<-fill<-NULL
dataTMP<- read.delim(infile, header=FALSE)
data<-dataTMP[,c(4,5,6,1,2,3,7,8)]
ref_sizes <-read.delim(refSizes, header=FALSE) #to be consistent with naming in EH
tar_sizes <-read.delim(tarSizes, header=FALSE)
colnames(data) = c("tarchr", "tarstart", "tarend", "refchr", "refstart", "refend", "dir", "notUsed")
colnames(ref_sizes) = c("refchr", "size")
colnames(tar_sizes) = c("tarchr", "size")
#This adds gap in between reference chromosomes and convert to "linear" genome
for (i in c(1:nrow(ref_sizes))){
#print(i)
if (i == 1){
total_start = 1
total_end = ref_sizes[i, "size"]
} else {
total_start = total_end + 6000000
total_end = total_start + ref_sizes[i, "size"]
}
ref_sizes[i,"xstart"] = total_start
ref_sizes[i, "xend"] = total_end
}
#This adds gap in between target chromosomes
for (i in c(1:nrow(tar_sizes))){
#print(i)
if (i == 1){
total_start = 1
total_end = tar_sizes[i, "size"]
} else {
total_start = total_end + 6000000
total_end = total_start + tar_sizes[i, "size"]
}
tar_sizes[i,"xstart"] = total_start
tar_sizes[i, "xend"] = total_end
}
#This converts coordinates to linear genome and creates synteny polygon coordinates
synteny = data.frame()
for (i in c(1:nrow(data))){
tar_chr = data[i,"tarchr"]
ref_chr = data[i,"refchr"]
dir = data[i, "dir"]
tar_add = tar_sizes[as.character(tar_sizes$tarchr)==as.character(tar_chr),]$xstart
ref_add = ref_sizes[as.character(ref_sizes$refchr)==as.character(ref_chr),]$xstart
tar_y = 0.1
ref_y = 2
tar_xstart = data[i,"tarstart"] + tar_add
tar_xend = data[i,"tarend"] + tar_add
ref_xstart = data[i,"refstart"] + ref_add
ref_xend = data[i,"refend"] + ref_add
inverted = grepl("-", dir, fixed = TRUE)
if(inverted == TRUE){
df = data.frame(x = c(tar_xstart, tar_xend, ref_xstart, ref_xend), y = c(tar_y, tar_y, ref_y, ref_y),
fill = ref_chr, group = paste0("s",i))
} else {
df = data.frame(x = c(tar_xstart, ref_xstart, ref_xend, tar_xend), y = c(tar_y, ref_y, ref_y, tar_y),
fill = ref_chr, group = paste0("s",i))
}
synteny = rbind(synteny,df)
}
#making sure chr columns are factors
tar_sizes$tarchr<-as.factor(tar_sizes$tarchr)
ref_sizes$refchr<-as.factor(ref_sizes$refchr)
synteny$fill<-as.factor(synteny$fill)
pdf(paste0(output,".pdf"),width=20, height =5, pointsize = 10)
#This prints plot
print(ggplot2::ggplot(size = 0.2, font = 10, data = data) +
ggplot2::geom_rect(data=ref_sizes, mapping=ggplot2::aes(xmin=xstart, xmax=xend, ymin=2, ymax=2.10, fill=refchr),
color="black", alpha = 0.85, size = 0.2 ) +
ggplot2::geom_text(data=ref_sizes,ggplot2::aes(x=(xstart+xend)/2,y=2.15,label=refchr),size=2,angle=45) +
ggplot2::geom_text(mapping=ggplot2::aes(x=2,y=2.3, label=refName),size=3,hjust = 1) +
ggplot2::geom_rect(data=tar_sizes, mapping=ggplot2::aes(xmin=xstart, xmax=xend, ymin=0, ymax=0.10),fill="grey85",
color="black", alpha = 0.85, size = 0.2 ) +
ggplot2::geom_text(data=tar_sizes,ggplot2::aes(x=(xstart+xend)/2,y=-0.05,label=tarchr),size=2,angle=45) +
ggplot2::geom_text(mapping=ggplot2::aes(x=2,y=-0.20, label=tarName),size=3,hjust = 1) +
ggplot2::geom_polygon(data = synteny, alpha = .5, ggplot2::aes(x = x, y = y, group = group, fill = fill)) +
ggplot2::scale_fill_manual(values = c("1" = "#BFD73B", "2" = "#39ACE2", "3" = "#F16E8A",
"4" = "#2DB995", "5" = "#855823", "6" = "#A085BD",
"7" = "#2EB560", "8" = "#D79128", "9" = "#FDBB63",
"10" = "#AFDFE5", "11" = "#BF1E2D", "12" = "purple4",
"13"= "#B59F31", "14" = "#F68B1F", "15" = "#EF374B",
"16" = "#D376FF", "17" = "#009445", "18" = "#CE4699",
"19" = "#7C9ACD", "20" = "#84C441", "21" = "#404F23",
"22" = "#607F4B", "23" = "#EBB4A9", "24" = "#F6EB83",
"25" = "#915F6D", "26" = "#602F92", "27" = "#81CEC6",
"28" = "#F8DA04", "29" = "peachpuff2", "30" = "gray85", "33" = "peachpuff3",
"W" = "#9590FF", "Z" = "#666666", "Y" = "#9590FF", "X" = "#666666",
"LGE22" = "grey", "LGE64" = "gray64",
"1A" = "pink", "1B" = "dark blue", "4A" = "light green",
"Gap" = "white", "LG2" = "black", "LG5" = "#CC99CC")) +
ggplot2::theme(panel.background = ggplot2::element_blank(),
strip.background = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
axis.title.x = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank(),
axis.ticks.x=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
legend.position="none")
)
dev.off()
#ggplot2::ggsave(paste0(output,".pdf"),width=20, height =5, pointsize = 10)
}
#' Draw synteny ideograms in inferCARS style
#'
#' This function draws pairwise synteny plots in inferCARS style.
#' Inputs are tab separated files, one with the synteny blocks and two files with target and reference chromosome sizes.
#' Synteny blocks file should be as: targetChr targetStart targetEnd referenceChr referenceStart referenceEnd Orientation
#' Output will be a pdf file with the ideogram.
#'
#' Target is the species which chromosomes will be painted. Reference will be used for painting and diagonals.
#' Chromosomes will be in the same order as in the target sizes file.
#'
#' Example: draw.ideogram(synteny_file, target_chr_size, reference_chr_size)
#' @title Draw ideograms in inferCARs style
#' @param file_data Path to the syntenic blocks file
#' @param file_tarsize Path to the target chromosomes length file
#' @param file_refsize Path to the reference chromosomes length file
#' @return A pdf file with the ideogram
#' @export
draw.ideogram <- function(file_data, file_tarsize, file_refsize) {
# To make the rectangles wider, change the height of the pdf
# Refchr refstart ref end tarchr tarstart tarend - ref is ideogram target is used for painting and diagonals
# Read input files
size<-tarstart<-tarend<-refchr<-ystart<-yend<-NULL
tar_sizes = read.delim(file_tarsize, header = FALSE)
ref_sizes = read.delim(file_refsize, header = FALSE)
data = read.delim(file_data, header = FALSE)
colnames(tar_sizes) = c("tarchr", "size")
colnames(ref_sizes) = c("refchr", "size")
colnames(data) = c("tarchr", "tarstart", "tarend", "refchr", "refstart", "refend", "orien","notUsed")
data$tarchr = factor(data$tarchr, levels = tar_sizes$tarchr)
data$refchr = factor(data$refchr, levels = ref_sizes$refchr)
for (i in c(1:nrow(data))){
dir = data[i, "orien"]
chr = data[i, "refchr"]
full_len = ref_sizes[ref_sizes$refchr==chr,2]
y1 = round(data[i, "refstart"]/full_len, digits = 4)
y2 = round(data[i, "refend"]/full_len, digits = 4)
inverted = grepl("-", dir, fixed = TRUE)
if(inverted == TRUE){
data[i,"ystart"] = y2
data[i, "yend"] = y1
} else{
data[i,"ystart"] = y1
data[i,"yend"] = y2
}
}
pdf(paste0(file_data,".pdf"), width = 8.5, height = 10, pointsize = 5)
print(ggplot2::ggplot(size = 0.2, font = 10, data = data) +
ggplot2::geom_rect(data=tar_sizes, mapping=ggplot2::aes(xmin=1, xmax=size, ymin=-0.1, ymax=1.1),
fill="white", color="black", alpha = 0.85, size = 0.2 ) +
ggplot2::geom_rect(data=data, mapping=ggplot2::aes(xmin=tarstart, xmax=tarend, ymin=-0.1, ymax=1.1, fill=refchr),
color="black", alpha = 0.85, size = 0.2 ) +
ggplot2::geom_segment(data=data, mapping=ggplot2::aes(x=tarstart, y=ystart, xend=tarend, yend=yend), size = 0.2) +
ggplot2::facet_grid(tarchr ~ .) +
ggplot2::labs(fill = "Reference", x = "Chomosome length (Mb)", size = 10) +
ggplot2::theme(
axis.title.y = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(size = 10),
axis.ticks.y = ggplot2::element_blank(),
axis.ticks = ggplot2::element_line(size = 0.2),
strip.text.y = ggplot2::element_text(angle = 0, face = "bold", size = 10),
panel.grid.minor = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
legend.title.align = 0.5) +
ggplot2::guides(fill = ggplot2::guide_legend(ncol = 1)) +
ggplot2::scale_fill_manual(values = c("1" = "#BFD73B", "2" = "#39ACE2", "3" = "#F16E8A",
"4" = "#2DB995", "5" = "#855823", "6" = "#A085BD",
"7" = "#2EB560", "8" = "#D79128", "9" = "#FDBB63",
"10" = "#AFDFE5", "11" = "#BF1E2D", "12" = "purple4",
"13"= "#B59F31", "14" = "#F68B1F", "15" = "#EF374B",
"16" = "#D376FF", "17" = "#009445", "18" = "#CE4699",
"19" = "#7C9ACD", "20" = "#84C441", "21" = "#404F23",
"22" = "#607F4B", "23" = "#EBB4A9", "24" = "#F6EB83",
"25" = "#915F6D", "26" = "#602F92", "27" = "#81CEC6",
"28" = "#F8DA04", "29" = "peachpuff2", "30" = "gray85", "33" = "peachpuff3",
"W" = "#9590FF", "Z" = "#666666", "Y" = "#9590FF", "X" = "#666666",
"LGE22" = "grey", "LGE64" = "gray64",
"1A" = "pink", "1B" = "dark blue", "4A" = "light green",
"Gap" = "white")) +
ggplot2::scale_x_continuous(breaks = c(0,2.5e+07,5e+07,7.5e+07,1e+08,1.25e+08,1.5e+08,1.75e+08,2e+08),
labels = c("0","25","50","75","100","125","150","175","200")))
dev.off()
}
|
ba48afebed746ecee20d5f9c5db774fcb6cbe605
|
5f68473c2f37bd47503ca811c0af85b5a2a0936e
|
/Model Scripts/Models/Random Forest/Regular Values/Smooth Spectra/General Life Form/RF_smooth_gen_life_plants.R
|
32edaf8809b62a2604d3055db80940d53d1629c1
|
[] |
no_license
|
catherinechan70/Alaska_Spectral_Library
|
cee32bc9ee3140429ae9eae968bf2925d4f6e0ab
|
5ba1b26e20cdf143026575989c33816700be0c7a
|
refs/heads/master
| 2020-08-01T05:30:40.186038
| 2019-06-30T19:53:16
| 2019-06-30T19:53:16
| 210,881,382
| 0
| 1
| null | 2019-09-25T15:39:07
| 2019-09-25T15:39:07
| null |
UTF-8
|
R
| false
| false
| 6,614
|
r
|
RF_smooth_gen_life_plants.R
|
library(pls)
library(randomForest)
setwd("/Alaska_Spectral_Library")
###reads in alaskasspeclib
alaskaSpecLib_smooth_plants<-read.csv("processed spec/AlaskaSpecLib/alaskaSpecLib_plants.csv")
alaskaSpecLib_smooth_5nm_plants<-read.csv("processed spec/AlaskaSpecLib/alaskaSpecLib_smooth_5nm_plants.csv")
alaskaSpecLib_smooth_10nm_plants<-read.csv("processed spec/AlaskaSpecLib/alaskaSpecLib_smooth_10nm_plants.csv")
alaskaSpecLib_smooth_50nm_plants<-read.csv("processed spec/AlaskaSpecLib/alaskaSpecLib_smooth_50nm_plants.csv")
alaskaSpecLib_smooth_100nm_plants<-read.csv("processed spec/AlaskaSpecLib/alaskaSpecLib_smooth_100nm_plants.csv")
## Remove unwanted metadata
alaskaSpecLib_smooth_plants[c("ScanID","PFT","PFT_2","area")] = NULL
alaskaSpecLib_smooth_5nm_plants[c("ScanID","PFT","PFT_2","area")] = NULL
alaskaSpecLib_smooth_10nm_plants[c("ScanID","PFT","PFT_2","area")] = NULL
alaskaSpecLib_smooth_50nm_plants[c("ScanID","PFT","PFT_2","area")] = NULL
alaskaSpecLib_smooth_100nm_plants[c("ScanID","PFT","PFT_2","area")] = NULL
#Convert to factor
alaskaSpecLib_smooth_plants$PFT_3<-as.factor(alaskaSpecLib_smooth_plants$PFT_3)
alaskaSpecLib_smooth_5nm_plants$PFT_3<-as.factor(alaskaSpecLib_smooth_5nm_plants$PFT_3)
alaskaSpecLib_smooth_10nm_plants$PFT_3<-as.factor(alaskaSpecLib_smooth_10nm_plants$PFT_3)
alaskaSpecLib_smooth_50nm_plants$PFT_3<-as.factor(alaskaSpecLib_smooth_50nm_plants$PFT_3)
alaskaSpecLib_smooth_100nm_plants$PFT_3<-as.factor(alaskaSpecLib_smooth_50nm_plants$PFT_3)
################################Model all bands########################################
#Create training and testing dataset (all bands)
dataset_size_plants=floor(nrow(alaskaSpecLib_smooth_plants)*0.80)
index<-sample(1:nrow(alaskaSpecLib_smooth_plants),size=dataset_size_plants)
training_smooth_plants<-alaskaSpecLib_smooth_plants[index,]
testing_smooth_plants<-alaskaSpecLib_smooth_plants[-index,]
###random forest model for (plant and abiotic scans)
rf_smooth_plants<-randomForest(PFT_3~.,data=training_smooth_plants,mtry=5,ntree=2001,importance=TRUE)
print(rf_smooth_plants)
result_smooth_plants<-data.frame(testing_smooth_plants$PFT_3,predict(rf_smooth_plants,testing_smooth_plants,type = "response"))
################################Model 5nm bands########################################
#Create training and testing dataset (5nm bands)
dataset_size_smooth_5nm_plants=floor(nrow(alaskaSpecLib_smooth_5nm_plants)*0.80)
index<-sample(1:nrow(alaskaSpecLib_smooth_5nm_plants),size=dataset_size_smooth_5nm_plants)
training_smooth_5nm_plants<-alaskaSpecLib_smooth_5nm_plants[index,]
testing_smooth_5nm_plants<-alaskaSpecLib_smooth_5nm_plants[-index,]
###random forest model for (5nm bands)
rf_smooth_5nm_plants<-randomForest(PFT_3~.,data=training_smooth_5nm_plants,mtry=5,ntree=2001,importance=TRUE)
print(rf_smooth_5nm_plants)
result_smooth_5nm_plants<-data.frame(testing_smooth_5nm_plants$PFT_3,predict(rf_smooth_5nm_plants,testing_smooth_5nm_plants,type = "response"))
################################Model 10nm bands########################################
#Create training and testing dataset (10nm bands)
dataset_size_smooth_10nm_plants=floor(nrow(alaskaSpecLib_smooth_10nm_plants)*0.80)
index<-sample(1:nrow(alaskaSpecLib_smooth_10nm_plants),size=dataset_size_smooth_10nm_plants)
training_smooth_10nm_plants<-alaskaSpecLib_smooth_10nm_plants[index,]
testing_smooth_10nm_plants<-alaskaSpecLib_smooth_10nm_plants[-index,]
###random forest model for (10nm bands)
rf_smooth_10nm_plants<-randomForest(PFT_3~.,data=training_smooth_10nm_plants,mtry=5,ntree=2001,importance=TRUE)
print(rf_smooth_10nm_plants)
result_smooth_10nm_plants<-data.frame(testing_smooth_10nm_plants$PFT_3,predict(rf_smooth_10nm_plants,testing_smooth_10nm_plants,type = "response"))
################################Model 50nm bands########################################
#Create training and testing dataset (50nm bands)
dataset_size_smooth_50nm_plants=floor(nrow(alaskaSpecLib_smooth_50nm_plants)*0.80)
index<-sample(1:nrow(alaskaSpecLib_smooth_50nm_plants),size=dataset_size_smooth_50nm_plants)
training_smooth_50nm_plants<-alaskaSpecLib_smooth_50nm_plants[index,]
testing_smooth_50nm_plants<-alaskaSpecLib_smooth_50nm_plants[-index,]
###random forest model for (50nm bands)
rf_smooth_50nm_plants<-randomForest(PFT_3~.,data=training_smooth_50nm_plants,mtry=5,ntree=2001,importance=TRUE)
print(rf_smooth_50nm_plants)
result_smooth_50nm_plants<-data.frame(testing_smooth_50nm_plants$PFT_3,predict(rf_smooth_50nm_plants,testing_smooth_50nm_plants,type = "response"))
################################Model 100nm bands########################################
#Create training and testing dataset (100nm bands)
dataset_size_smooth_100nm_plants=floor(nrow(alaskaSpecLib_smooth_100nm_plants)*0.80)
index<-sample(1:nrow(alaskaSpecLib_smooth_100nm_plants),size=dataset_size_smooth_100nm_plants)
training_smooth_100nm_plants<-alaskaSpecLib_smooth_100nm_plants[index,]
testing_smooth_100nm_plants<-alaskaSpecLib_smooth_100nm_plants[-index,]
###random forest model for (100nm bands)
rf_smooth_100nm_plants<-randomForest(PFT_3~.,data=training_smooth_100nm_plants,mtry=5,ntree=2001,importance=TRUE)
print(rf_smooth_100nm_plants)
result_smooth_100nm_plants<-data.frame(testing_smooth_100nm_plants$PFT_3,predict(rf_smooth_100nm_plants,testing_smooth_100nm_plants,type = "response"))
###extract error rate
error_rf_smooth_plants<-as.data.frame(rf_smooth_plants$err.rate[2001,1])
names(error_rf_smooth_plants)[1]<-"smooth_all_bands"
error_rf_smooth_5nm_plants<-as.data.frame(rf_smooth_5nm_plants$err.rate[2001,1])
names(error_rf_smooth_5nm_plants)[1]<-"smooth_5nm"
error_rf_smooth_10nm_plants<-as.data.frame(rf_smooth_10nm_plants$err.rate[2001,1])
names(error_rf_smooth_10nm_plants)[1]<-"smooth_10nm"
error_rf_smooth_50nm_plants<-as.data.frame(rf_smooth_50nm_plants$err.rate[2001,1])
names(error_rf_smooth_50nm_plants)[1]<-"smooth_50nm"
error_rf_smooth_100nm_plants<-as.data.frame(rf_smooth_100nm_plants$err.rate[2001,1])
names(error_rf_smooth_100nm_plants)[1]<-"smooth_100nm"
###Make data frame from error rate
error_rate_smooth_gen_life_form<-cbind(error_rf_smooth_plants,
error_rf_smooth_5nm_plants,
error_rf_smooth_10nm_plants,
error_rf_smooth_50nm_plants,
error_rf_smooth_100nm_plants)
error_rate_smooth_gen_life_form$category<-"Courser_levels"
##write to folder
write.csv(error_rate_smooth_gen_life_form,"Model Scripts/Error Rates/Regular/error_rate_smooth_gen_life_form.csv",row.names= F)
|
00b0555ba141fa9a3a03a563fdbac8dced73c30d
|
c3534306ca0300fcfdf3b5eba7f60787589a0284
|
/data_analysis/nfp_summary_plots.R
|
45f6f5d7740d2075fe42227cd187106376c2ae88
|
[
"MIT"
] |
permissive
|
dssg/nfp
|
494d2f4747b4e51bd4babae5367579d41ede84c8
|
2d23247d5a68c0b9814f1722dac5f5704b04fab3
|
refs/heads/master
| 2021-01-17T10:23:21.249769
| 2016-05-01T14:58:23
| 2016-05-01T14:58:23
| 10,607,174
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,140
|
r
|
nfp_summary_plots.R
|
library(ggplot2)
library(plyr)
setwd("/mnt/data/csv_data")
nfp_demographics <- read.csv("nfp_demographics_expanded.csv", header = TRUE)
# Language
nfp_demographics$language_factor <- factor(nfp_demographics$Primary_language,
labels = c("No Response", "English", "Other", "Spanish"),
exclude = NULL)
nfp_demographics$language_factor <- factor(nfp_demographics$language_factor, levels = c("English", "Spanish", "Other", "No Response"))
language <- ggplot(nfp_demographics, aes(language_factor, fill=language_factor)) + geom_bar() + labs(x = "Primary Language",
y = "Count") +
ggtitle("Language Spoken at Home") + theme(legend.position = "none")
language
# Age vs Smoking
nfp_demographics$smoker =
factor(nfp_demographics$pgsmoker, labels = c("Nonsmoker", "Smoker", "No Response"))
smoking_vs_age <- ggplot(nfp_demographics, aes(x=MomsAgeBirth,fill=smoker)) +
geom_histogram(binwidth=1) + facet_wrap(~ smoker,ncol = 1) +
labs(x = "Mother's Age at Birth") + ggtitle("Mothers Age vs Smoking")
smoking_vs_age
# Age vs. Prematurity Status
nfp_demographics$Premature_factor =
factor(nfp_demographics$premature, labels = c("Full Term", "Premature"))
med.fac = ddply(nfp_demographics, .(Premature_factor), function(.d)
data.frame(x=median(.d$MomsAgeBirth, na.rm = TRUE)))
smoking_vs_premie <- ggplot(nfp_demographics, aes(x=MomsAgeBirth,fill=Premature_factor)) +
geom_histogram(binwidth=1) + facet_wrap(~ Premature_factor,ncol =
1) + geom_vline(data=med.fac, aes(xintercept=x), linetype =
"longdash") +
labs(x = "Mother's Age at Birth", y = "Count") + ggtitle("Mothers Age vs Prematurity Status") + scale_fill_hue(name = "Prematurity Status")
smoking_vs_premie
# Relabel income brackets
nfp_demographics$income_description =
factor(nfp_demographics$INCOME, labels =
c("$0-$6,000", "$6,000-$12,000", "$12,000-$20,000", "$20,000-$30,000",
"$30,000-$40,000","$40,000+", "Lives off of parents", "No Response"), exclude = NULL)
# Relabel race statistics
nfp_demographics$race_factor <-
factor(nfp_demographics$maternalrace,
labels = c("White", "Black","Latina","Other", "No Response"), exclude = NULL)
# Breakdown by race
race_alone <- ggplot(nfp_demographics, aes(race_factor, fill = race_factor)) +
geom_histogram() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, size = 25),
axis.text.y = element_text(size = 25),
plot.title = element_text(size = 40),
axis.title.x = element_text(size = 30),
axis.title.y = element_text(size = 30),
legend.position = "none") +
labs(x = "Race", y = "Count") +
scale_fill_brewer(palette = "YlOrRd", name = "Race") +
ggtitle("NFP Mothers by Race")
# Race plotted against income
race_vs_income <- ggplot(nfp_demographics, aes(income_description,fill=race_factor)) +
geom_bar() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(x = "Income", y = "Count") + scale_fill_brewer(palette="YlOrRd",name="Race") +
ggtitle("Income Brackets by Race")
race_vs_income
# Show divisions for people living off their parents
parents_data = nfp_demographics[nfp_demographics$income_description == "Lives off of parents",]
race_vs_income.parents <- ggplot(parents_data, aes(income_description,fill=race_factor)) +
geom_bar() +
labs(x = "Income", y = "Count") + scale_fill_brewer(palette="YlOrRd",name="Race") +
ggtitle("Income Brackets by Race")
race_vs_income.parents
# Education
nfp_demographics$education_factor <- factor(nfp_demographics$HSGED, labels =
c("Graduated High School", "Received GED",
"Neither", "No Response"), exclude = NULL)
levels(nfp_demographics$education_factor) = c(levels(nfp_demographics$education_factor), c("4th Grade", "5th Grade", "6th Grade",
"7th Grade", "8th Grade",
"9th Grade", "10th Grade", "11th Grade", "No Response"))
earlier_education_factor <- factor(nfp_demographics$HSGED_Last_Grade_1, labels =
c("4th Grade", "5th Grade", "6th Grade",
"7th Grade", "8th Grade",
"9th Grade", "10th Grade", "11th Grade", "No Response"), exclude = NULL)
nfp_demographics$education_factor[nfp_demographics$education_factor == "Neither"] = earlier_education_factor[nfp_demographics$education_factor == "Neither"]
nfp_demographics$education_factor <- factor(nfp_demographics$education_factor, levels= c("4th Grade", "5th Grade", "6th Grade", "7th Grade",
"8th Grade", "9th Grade", "10th Grade", "11th Grade",
"Received GED", "Graduated High School", "No Response"))
nfp_demographics$education_factor[nfp_demographics$education_factor == "Neither"] = earlier_education_factor[nfp_demographics$education_factor == "Neither"]
education <- ggplot(nfp_demographics, aes(education_factor)) + geom_bar(fill="green") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) + labs(x = "Education Received", y = "Count") + ggtitle("Highest Pre-College Education")
education
# Marital Status
nfp_demographics$marital_factor <- factor(nfp_demographics$marital_status,
labels = c("Not Married", "Married", "No Response"),
exclude = NULL)
race_vs_marriage <- ggplot(nfp_demographics, aes(race_factor, fill=marital_factor)) + geom_bar() +
theme(axis.text.x = element_text(angle = 20, hjust = 1)) + labs(x = "Race", y = "Count") +
ggtitle("Race vs. Marital Status") + scale_fill_hue(name = "Marital Status")
race_vs_marriage
|
228d7438edcca4ebacae13d63ec8dbce41808b3f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/echarts4r/examples/e_map_register.Rd.R
|
f17d67cedde7ed3f8a0dfab82af86d42024b4939
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 452
|
r
|
e_map_register.Rd.R
|
library(echarts4r)
### Name: e_map_register
### Title: Register map
### Aliases: e_map_register
### ** Examples
## Not run:
##D json <- jsonlite::read_json("http://www.echartsjs.com/gallery/data/asset/geo/USA.json")
##D
##D USArrests %>%
##D dplyr::mutate(states = row.names(.)) %>%
##D e_charts(states) %>%
##D e_map_register("USA", json) %>%
##D e_map(Murder, map = "USA") %>%
##D e_visual_map(min = 0, max = 18)
## End(Not run)
|
8f0fe04ab23b87f8a697f67ef11247d9468a5d0e
|
512bce707f96f97ad8a743807369e249232649ef
|
/man/TRACE_SUMMARY_FILE_NAME.Rd
|
b7b2dba90c534ef3ba119eac83ec85878d968531
|
[
"MIT"
] |
permissive
|
iiasa/testGAMS
|
6cab6fd58263a3f7110a26d4ac10f730800d0614
|
01223a76e0c0a9981276c7b1fdcd7d75e589d4f3
|
refs/heads/master
| 2023-08-10T09:47:59.595948
| 2021-09-13T07:59:26
| 2021-09-13T07:59:26
| 293,836,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
TRACE_SUMMARY_FILE_NAME.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.R
\docType{data}
\name{TRACE_SUMMARY_FILE_NAME}
\alias{TRACE_SUMMARY_FILE_NAME}
\title{File name of GAMS trace summary file produced by \code{\link[=report_trace]{report_trace()}} in \code{re_dir}}
\format{
An object of class \code{character} of length 1.
}
\usage{
TRACE_SUMMARY_FILE_NAME
}
\description{
File name of GAMS trace summary file produced by \code{\link[=report_trace]{report_trace()}} in \code{re_dir}
}
\keyword{datasets}
|
5cbcaf888e675a142153e02c02eb1e5b1cf77e09
|
e58ccafc57b689a2190af28e6a9e27b4bc0e9a0e
|
/Qn1.R
|
f2603e8c60e740961bc6d670e67f486816406b1d
|
[] |
no_license
|
philipp91hh/Group_project
|
aa4a031320addb6c317bffc4ea34fd2d79005900
|
2fecedea6172d15ff9fc1e5e574988745efb0aac
|
refs/heads/master
| 2021-01-11T15:30:14.665121
| 2016-06-16T02:06:22
| 2016-06-16T02:06:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,018
|
r
|
Qn1.R
|
library("sjPlot")
library("ggplot2")
library("reshape2")
library("dplyr")
library("grid")
library("gridExtra")
library("lme4")
library("LMERConvenienceFunctions")
library("MCMCglmm")
library(tidyr)
library(plyr)
setwd("/Users/Judy1/Documents/Insead/P3/BDA/Speeddating")
source("theme_darrel.R")
d <- read.csv("./Data/Speed.csv")
d$gender <- ifelse(d$gender==0, "Female", "Male")
d$gender <- factor(d$gender)
d_what <- d%>%
select(attr1_s,
sinc1_s,
fun1_1,
amb1_1,
shar1_1,
gender,
intel1_s)%>%
gather(Characteristic, Value, -gender, na.rm = TRUE)
p_gender <- ggplot(aes(x=Characteristic), data=d_what)+
geom_point(aes(y=Value, x=Characteristic, group= gender, colour=gender), size=1.0, shape=16, position=position_dodge(w=0.2,h=0))+
stat_summary(aes(y=Value, group=gender, colour=gender), fun.y="mean", geom="point", shape=18, size=5, position=position_dodge(width=0.2, height=0))+
stat_summary(aes(y=Value, group=gender), colour="black", fun.y="mean", geom="point", shape=23, size=4.7, stroke=5, position=position_dodge(width=0.2, height=0))+
#stat_summary(aes(y=Value, group=gender), fun.data="mean_cl_normal", geom="errorbar", width=0.25, position=position_dodge(width=0.6, height=0))+
theme_darrel()+
scale_y_continuous("% Importance of attribute in partner")+
coord_cartesian(ylim = c(0, 100))+
scale_x_discrete(labels=c("Attractive", "Sincere", "Fun", "Ambitious", "Shared Interests", "Intelligent"))
p_age <- ggplot(aes(x=d$age_o,), data=d)+
geom_histogram()+
theme_darrel()+
coord_cartesian(xlim = c(0, 60))+
scale_y_continuous("Count")+
scale_x_continuous("Age (years)")
p_race <- ggplot(aes(x=d$race,), data=d)+
geom_histogram()+
theme_darrel()+
coord_cartesian(ylim = c(0, 5000))+
scale_y_continuous("Count")+
scale_x_discrete(name= "Race", breaks=c("1", "2", "3", "4", "5", "6", "NA"),
labels=c("Black", "White European", "Latino", "Asian", "Native American", "Other", "Not recorded"))
|
442268cd1309c7a7ae97d03de6e26f1b58cd7d97
|
cfc6a45c07d8f73165930dbaf10ceb2578aa9f8e
|
/ATTAINS/ATTAINS_UPLOAD/Assessment_Upload.R
|
db03e61d4750409cb0745314a38edc375ade369a
|
[] |
no_license
|
TravisPritchardODEQ/IR2018
|
b9deae2c794ecb7b53f3fc64e5293ab1fe750043
|
94baee642789cce9f8e49771d28ff45e61ca310f
|
refs/heads/master
| 2021-06-21T22:21:06.892038
| 2020-12-10T16:07:05
| 2020-12-10T16:07:05
| 137,256,651
| 2
| 0
| null | 2020-03-24T16:13:43
| 2018-06-13T18:41:05
|
R
|
UTF-8
|
R
| false
| false
| 11,690
|
r
|
Assessment_Upload.R
|
### Lesley Merrick 2/26/2020
### building attains upload - ASSESSMENT
library(tidyverse)
library(IRlibrary)
library(data.table)
require(RODBC)
library(readxl)
#This script will generate the upload for the assessment portion of ATTAINS
#this is test dataset filtered from \\deqhq1\WQASSESSMENT\2018IRFiles\2018_WQAssessment\Draft List\Rollup\Basin_categories
# once finalized, this should be a table in the IR2018 database
rollup <- read.csv("~/IR2018/ATTAINS/ATTAINS_UPLOAD/ALL BASINS_Parameters.csv") #%>%
#filter(AU_ID == 'OR_LK_1705011006_05_100541')
#filter(OWRD_Basin == 'Owyhee' | OWRD_Basin == 'Umpqua') ## remove this
# connect to IRdatabase
IR.sql <- odbcConnect("IR 2018")
Pollutant <- sqlFetch(IR.sql, "dbo.LU_Pollutant") # make this is cleaned up
LU_spawn <- sqlFetch(IR.sql, "dbo.LU_Spawn")
BU <- sqlFetch(IR.sql, "dbo.LU_BenUseCode")
## make DB table?
BU_2_Pollu <- read.csv("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Draft List/Rollup/LU Bus.csv")
AU_tbl <- read.csv("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Draft List/Rollup/AU_names.csv") ## update with columbia slough split
LU_delist <- read_excel("~/IR2018/ATTAINS/ATTAINS_UPLOAD/Delisting_reasons_attains.xlsx", sheet = "Sheet1")
delistings <- read.csv("~/IR2018/ATTAINS/ATTAINS_UPLOAD/ALL BASINS_delistingsv8_attains.csv") %>%
left_join(LU_delist, by= c('Reason_Code'='reason_code')) %>%
filter(!is.na(Reason_Code))
TMDL <- read.csv("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/ATTAINS/Revised_uploads_7april2020/Cat4-5_AU_Priority_20200402.csv") %>%
filter(TMDL_Priority %in% c('Medium','High')) %>%
select(AU_ID,PARAM_NAME,PARAM_USE_NAME,Period,TMDL_Priority)
#### Build Assessment Parameter table ####
Param <- rollup %>%
#filter(Assessed_in_2018 == "YES") %>%
left_join(Pollutant, by = 'Pollu_ID') %>%
left_join(BU_2_Pollu, by = c('Pollu_ID','WQstd_code')) %>%
left_join(TMDL, by = c('AU_ID','Attains_PolluName' = 'PARAM_NAME','ben_use'='PARAM_USE_NAME','Period')) %>%
mutate(TMDL_Priority = as.character(TMDL_Priority)) %>%
mutate(PARAM_PRIORITY_RANKING = ifelse(is.na(TMDL_Priority), "Low",TMDL_Priority)) %>%
mutate(ben_use = as.character(ben_use)) %>%
mutate(attains_use = ifelse(Period == 'Spawning',"fish and aquatic life - spawning", ben_use)) %>%
mutate(Para_Attainment = case_when(IR_category == 'Category 2' ~ "meeting criteria",
IR_category %in% c('Category 5','Category 4','Category 4A','Category 4B','Category 4C') ~ "not meeting criteria",
IR_category %in% c('Category 3','Category 3D','Category 3B','Category 3C') ~ "Not enough information",
TRUE ~ "")) %>%
mutate(Para_Status = case_when(IR_category == 'Category 2' ~ "Meeting Criteria",
IR_category %in% c('Category 5','Category 4','Category 4A','Category 4B','Category 4B') ~ "Cause",
IR_category %in% c('Category 3','Category 3D','Category 3B','Category 3C') ~ "Insufficient Information",
TRUE ~ "")) %>%
mutate(Param_Indicator = ifelse(IR_category %in% c('Category 2','Category 4C','Category 3','Category 3D',
'Category 3B','Category 3C'),"N","Y")) %>%
mutate(Param_trend = "U",
PARAM_COMMENT = Rationale,
Para_agency_code = ifelse(Para_Status == "Cause","S",""),
Param_Listed_year = ifelse(year_assessed ==2018, 2020, Year_listed)) %>% ### remove this for 2022
left_join(delistings, by = c('AU_ID','Pollu_ID','WQstd_code','Period')) %>%
distinct()
### build seasonal ####
# must first run AU_Spawn_dates.R
Temp_s <- Param %>%
filter(Char_Name == 'Temperature' & Period =='Spawning') %>%
filter(Assessed_in_2018 == "YES")
Temp_spawn_Start <- Temp_s %>%
left_join(AU_Temp_Spawn, by = "AU_ID") %>%
filter(!is.na(Temp_SpawnStart)) %>%
select(AU_ID, Temp_SpawnStart,start_rank, OWRD_Basin) %>%
group_by(AU_ID) %>%
mutate(min_start = ifelse(start_rank == min(start_rank), Temp_SpawnStart, "X")) %>%
filter(!min_start == 'X') %>%
distinct()
Temp_spawn_End <- Temp_s %>%
left_join(AU_Temp_Spawn, by = "AU_ID") %>%
filter(!is.na(Temp_SpawnStart)) %>%
select(AU_ID,Temp_SpawnEnd,end_rank,OWRD_Basin) %>%
group_by(AU_ID) %>%
mutate(max_end = ifelse(end_rank == max(end_rank), Temp_SpawnEnd, "X")) %>%
filter(!max_end == 'X') %>%
distinct()
Temp_Spawn <- Temp_s %>%
left_join(Temp_spawn_Start, by = 'AU_ID') %>%
left_join(Temp_spawn_End, by = 'AU_ID') %>%
mutate(attains_use = "fish and aquatic life - spawning") %>%
select(AU_ID,Attains_PolluName,attains_use,Para_Attainment,min_start,max_end,OWRD_Basin) %>%
rename(ASSESSMENT_UNIT_ID = AU_ID,PARAM_NAME=Attains_PolluName,PARAM_USE_NAME=attains_use,PARAM_ATTAINMENT_CODE=Para_Attainment,
SEASON_START=min_start,SEASON_END=max_end)
# Same process for DO
DO_s <- Param %>%
filter(Char_Name == 'Dissolved Oxygen' & Period =='Spawning') %>%
filter(Assessed_in_2018 == "YES")
DO_spawn_Start <- DO_s %>%
left_join(AU_DO_Spawn, by = "AU_ID") %>%
filter(!is.na(DO_SpawnStart)) %>%
select(AU_ID, DO_SpawnStart,start_rank,OWRD_Basin) %>%
group_by(AU_ID) %>%
mutate(min_start = ifelse(start_rank == min(start_rank), DO_SpawnStart, "X")) %>%
filter(!min_start == 'X') %>%
distinct()
DO_spawn_End <- DO_s %>%
left_join(AU_DO_Spawn, by = "AU_ID") %>%
filter(!is.na(DO_SpawnStart)) %>%
select(AU_ID,DO_SpawnEnd,end_rank,OWRD_Basin) %>%
group_by(AU_ID) %>%
mutate(max_end = ifelse(end_rank == max(end_rank), DO_SpawnEnd, "X")) %>%
filter(!max_end == 'X') %>%
distinct()
DO_Spawn <- DO_s %>%
left_join(DO_spawn_Start, by = 'AU_ID') %>%
left_join(DO_spawn_End, by = 'AU_ID') %>%
mutate(attains_use = "fish and aquatic life - spawning") %>%
select(AU_ID,Attains_PolluName,attains_use,Para_Attainment,min_start,max_end,OWRD_Basin) %>%
rename(ASSESSMENT_UNIT_ID = AU_ID,PARAM_NAME=Attains_PolluName,PARAM_USE_NAME=attains_use,PARAM_ATTAINMENT_CODE=Para_Attainment,
SEASON_START=min_start,SEASON_END=max_end)
Season <- rbind(Temp_Spawn,DO_Spawn) %>% distinct()
write.csv(Season,"ATTAINS/ATTAINS_UPLOAD/Season_all.csv", row.names = FALSE)
#### Build ATTAINS Assessment- uses table####
# should we add additional data - monitoring location information ?
Use <- Param %>%
filter(!Period == 'Spawning') %>%
select(AU_ID) %>%
distinct(AU_ID, .keep_all = TRUE) %>%
left_join(AU_tbl, by = 'AU_ID') %>%
left_join(BU, by = c('AU_UseCode' = 'ben_use_code')) %>%
left_join(Param, by = c('AU_ID','ben_use_id')) %>%
group_by(AU_ID, ben_use.x) %>%
summarise(total_samples = n(),
num_impaired = sum(IR_category %in% c('Category 5','Category 4','Category 4A','Category 4B','Category 4C')),
num_attaining = sum(IR_category == 'Category 2'),
num_insuff = sum(IR_category %in% c('Category 3','Category 3D','Category 3B','Category 3C'))) %>%
mutate(AU_Use_Status = case_when(num_impaired >= 1 ~ "N",
num_attaining >= 1 & num_impaired == 0 ~ "F",
num_insuff >= 1 & num_impaired == 0 & num_attaining == 0 ~ "I",
TRUE ~ "X")) %>%
mutate(Use_agency_code = "S") %>%
select(AU_ID,ben_use.x,AU_Use_Status,Use_agency_code) %>%
rename(ASSESSMENT_UNIT_ID = AU_ID,USE_NAME = ben_use.x, USE_ATTAINMENT_CODE = AU_Use_Status, USE_AGENCY_CODE = Use_agency_code)
## manually adding spawning use
Use_spawn <- Param %>%
filter(Period == 'Spawning') %>%
mutate(USE_NAME = "fish and aquatic life - spawning") %>%
group_by(AU_ID, USE_NAME) %>%
summarise(total_samples = n(),
num_impaired = sum(IR_category %in% c('Category 5','Category 4','Category 4A','Category 4B','Category 4C')),
num_attaining = sum(IR_category == 'Category 2'),
num_insuff = sum(IR_category %in% c('Category 3','Category 3D','Category 3B','Category 3C'))) %>%
mutate(AU_Use_Status = case_when(num_impaired >= 1 ~ "N",
num_attaining >= 1 & num_impaired == 0 ~ "F",
num_insuff >= 1 & num_impaired == 0 & num_attaining == 0 ~ "I",
TRUE ~ "X")) %>%
mutate(Use_agency_code = "S") %>%
select(AU_ID,USE_NAME,AU_Use_Status,Use_agency_code) %>%
rename(ASSESSMENT_UNIT_ID = AU_ID, USE_ATTAINMENT_CODE = AU_Use_Status, USE_AGENCY_CODE = Use_agency_code) %>%
distinct()
# get OWRD basins
AU_OWRD <- Param %>%
filter(Assessed_in_2018 == "YES") %>%
select(AU_ID,OWRD_Basin) %>%
distinct()
Use_all <- rbind(Use,Use_spawn)
Use_basin <- Use_all %>%
left_join(AU_OWRD, by = c('ASSESSMENT_UNIT_ID' = 'AU_ID')) %>%
distinct()
### filtered down table for upload and checked for designated uses
Param_upload <- Param %>%
# filter(Assessed_in_2018 == "YES") %>%
left_join(Use_all, by = c('AU_ID' = 'ASSESSMENT_UNIT_ID','attains_use' = 'USE_NAME')) %>%
filter(!is.na(USE_ATTAINMENT_CODE)) %>%
select(AU_ID,Attains_PolluName,attains_use,Para_Status,Para_Attainment,Para_agency_code,
Param_Indicator,Param_Listed_year,PARAM_PRIORITY_RANKING,PARAM_COMMENT,
'Delisting Reason Code',Rationale.y, OWRD_Basin) %>%
rename(ASSESSMENT_UNIT_ID = AU_ID,PARAM_NAME=Attains_PolluName,PARAM_USE_NAME=attains_use,
PARAM_STATUS_NAME = Para_Status,PARAM_ATTAINMENT_CODE=Para_Attainment,
PARAM_AGENCY_CODE=Para_agency_code,PARAM_POLLUTANT_INDICATOR=Param_Indicator,
PARAM_YEAR_LISTED=Param_Listed_year,
PARAM_DELISTING_REASON ='Delisting Reason Code',
PARAM_DELISTING_COMMENT = Rationale.y) %>%
mutate(PARAM_DELISTING_AGENCY = "S") %>%
filter(!is.na(PARAM_NAME))
Use_trim <- Use_basin %>%
left_join(Param_upload, by = c('ASSESSMENT_UNIT_ID','USE_NAME' = 'PARAM_USE_NAME')) %>%
filter(!is.na(PARAM_NAME)) %>%
select('ASSESSMENT_UNIT_ID','USE_NAME',USE_ATTAINMENT_CODE,USE_AGENCY_CODE,OWRD_Basin.x)
write.csv(Use_trim,"ATTAINS/ATTAINS_UPLOAD/Use_all2.csv", row.names = FALSE)
write.csv(Param_upload,"ATTAINS/ATTAINS_UPLOAD/Param_all.csv", row.names = FALSE)
#### Build ATTAINS Assessment- general table####
## this may needs work for year last assessed and AU summary?
Gen <- Param %>%
group_by(AU_ID) %>%
mutate(keep = ifelse(year_assessed == max(year_assessed), 1, 0 )) %>%
filter(keep == 1) %>%
distinct(AU_ID, .keep_all = TRUE) %>%
select(-keep) %>%
mutate(CYCLE_LAST_ASSESSED = ifelse(year_assessed ==2018, 2020,"")) %>% # update
mutate(AGENCY_CODE = "S") %>%
select(AU_ID,AGENCY_CODE,CYCLE_LAST_ASSESSED) %>%
rename(ASSESSMENT_UNIT_ID = AU_ID) %>%
left_join(AU_OWRD, by = c('ASSESSMENT_UNIT_ID' = 'AU_ID')) %>%
distinct()
write.csv(Gen,"ATTAINS/ATTAINS_UPLOAD/Gen_all.csv", row.names = FALSE)
#### associated actions
aa_all <- read.csv("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/ATTAINS/Revised_uploads_7april2020/associated-actions_new.csv")
aa_all_2 <- read.csv("//deqhq1/WQASSESSMENT/2018IRFiles/2018_WQAssessment/Final List/Misc/Action_AU_Parameter.csv")
a_actions <- Param %>%
select(AU_ID,Attains_PolluName,Pollu_ID) %>%
distinct() %>%
left_join(aa_all_2, by = c('AU_ID','Pollu_ID')) %>%
filter(!is.na(ACTION_ID)) %>%
left_join(AU_OWRD, by = c('AU_ID')) %>%
distinct() %>%
rename(ASSESSMENT_UNIT_ID = AU_ID,PARAM_NAME=Attains_PolluName)
write.csv(a_actions,"ATTAINS/ATTAINS_UPLOAD/a_actions_all_2.csv", row.names = FALSE)
|
d6daae14e76aaf445ca73e6738d5f49342cb17b7
|
94aed35f1f7cca636419b88a53799f34e5c5dfee
|
/man/matchesGene2Symbol.Rd
|
70eaf3aeaa1788e8e2479aaa73ae3f53f8c7b7c5
|
[
"MIT"
] |
permissive
|
trichelab/basejump
|
a4a3b9e58016449faeb9b3d77cf1c09d4eafe4c7
|
6724b10dbf42dd075c7db5854a13d9509fe9fb72
|
refs/heads/master
| 2020-12-12T11:54:17.660956
| 2020-01-08T13:24:07
| 2020-01-08T13:24:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,387
|
rd
|
matchesGene2Symbol.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matchesGene2Symbol.R
\name{matchesGene2Symbol}
\alias{matchesGene2Symbol}
\title{Check that user-defined gene input matches expected values}
\usage{
matchesGene2Symbol(x, genes, gene2symbol, .xname = getNameInParent(x))
}
\arguments{
\item{x}{Object.}
\item{genes}{\code{character}.
Gene identifiers.}
\item{gene2symbol}{\code{Gene2Symbol}.
Gene-to-symbol mappings. Must contain \code{geneID} and \code{geneName} columns. See
\code{Gene2Symbol} for more information.}
\item{.xname}{Name of object defined in \code{x}.
\emph{Not intended to be used directly.}}
}
\value{
\code{TRUE} on success;
\code{FALSE} on failure, with cause set.
}
\description{
Check that user-defined gene input matches expected values
}
\note{
Updated 2019-08-11.
}
\examples{
x <- S4Vectors::DataFrame(
"sample1" = c(1L, 2L),
"sample2" = c(3L, 4L),
row.names = c("gene1", "gene2")
)
print(x)
g2s <- Gene2Symbol(
object = S4Vectors::DataFrame(
geneID = c("ENSG00000000003", "ENSG00000000005"),
geneName = c("TSPAN6", "TNMD"),
row.names = rownames(x)
)
)
print(g2s)
geneIDs <- g2s[["geneID"]]
print(geneIDs)
geneNames <- g2s[["geneName"]]
print(geneNames)
matchesGene2Symbol(x = x, genes = geneIDs, gene2symbol = g2s)
matchesGene2Symbol(x = x, genes = geneNames, gene2symbol = g2s)
}
|
5f32bd6e908554e40fa7e3188ef70340ce5bdd66
|
d387b7f20c42c27d2727087fc51a56871d27ca59
|
/metagenomicProcessing.R
|
35a6c18c107b4542bae8cb419a4b44135c0ed8fe
|
[] |
no_license
|
pakpoomton/bananaMetagenomics
|
3c175d6ebd424a14fc628b9200c6ce93a7844caa
|
01464b10a5d15407fafd235004cb4aee927ee3a0
|
refs/heads/master
| 2022-09-05T22:51:42.407010
| 2020-06-02T08:42:29
| 2020-06-02T08:42:29
| 268,718,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,986
|
r
|
metagenomicProcessing.R
|
# specify working directory
setwd("C:/Users/pakpoomsu/Desktop/Banana_Metagenomics/")
# specify data file (csv)
rawData = read.csv('HN00126595_Report_otu_table_shared_with_tax_assignment.xlsx - Sheet1.csv', sep = ',', header = T)
# specify which taxonomic level to create stack plot (Phylum, Class, Order, Family, Genus, Species)
OTUlevel = "Phylum"
# select only relevant data
otumat = as.matrix(rawData[,15:dim(rawData)[2]])
otumat = otumat[,1:15] # use only banana data, exclusing zerg data
rownames(otumat) = rawData$Group
taxmat = as.matrix(rawData[,3:9])
rownames(taxmat) = rawData$Group
# do not show data on stack bar if count (%) of a taxa is blow this CUTOFF
CUTOFF = 0.01
# scaling plot margins
sc=2
relOTU = TRUE
# order of bar plot
desired_order = c("C1", "C2", "C3", "F1", "F2", "F3",
"D1", "D2", "D3", "X0036.1" , "X0036.2", "X0036.3",
"X1887.1", "X1887.2", "X1887.3")
###########################################
####### load libraries ###################
library(phyloseq)
packageVersion("phyloseq")
library("ggplot2")
packageVersion("ggplot2")
library("scales")
packageVersion("scales")
library("grid")
packageVersion("grid")
library(matrixStats)
theme_set(theme_bw())
library("wesanderson")
######################################################
########### Processing data before plotting #########
## convert data into phyloseq compatible forms
OTU = otu_table(otumat, taxa_are_rows = TRUE)
TAX = tax_table(taxmat)
physeq = phyloseq(OTU, TAX) # initial phyloseq object from raw data
phyloGlom = tax_glom(physeq, OTUlevel) # group data by pre-specified OTUlevel
## for taxonomic matrix, exclude data at levels below OTUlevel
TAXmt = tax_table(phyloGlom)
indTaxLevel = match(OTUlevel,colnames(TAXmt))
TAXm = TAXmt[, 1:indTaxLevel]
## get otu table (after grouping)
OTUm = otu_table(phyloGlom, taxa_are_rows = TRUE)
# convert absolute to relative count as specified
if (relOTU) {
OTUm = OTUm / rep(colSums(OTUm), each = nrow(OTUm))
# screen out taxa with abundance lower than CUTOFF
idxSel = rowMaxs(as.matrix(OTUm)) > CUTOFF
OTUm = OTUm[idxSel, ]
TAXm = TAXm[idxSel, ]
}
physeqm = phyloseq(OTUm, TAXm) ## phyloseq object after grouping
set.seed(123458)
colorset = sample(wes_palette(length(rownames(OTUm)), name = "Darjeeling1", type = "continuous"))
# generate abundance bar plots
p <- plot_bar(physeqm, fill = OTUlevel) + theme(plot.margin = margin(6*sc,1*sc,6*sc,1*sc,"cm")) +
theme(legend.position="right") + guides(fill=guide_legend(ncol=1)) +
theme(text = element_text(size=20)) + scale_fill_manual(values = colorset)
# take care of plotting order
pd <- p$data
pd$Sample <- factor(pd$Sample, levels = desired_order)
p$data <- pd
print(p)
ggsave(filename = "myplot.png", plot = last_plot(),
width=40, height=40, unit="cm")
### ref. https://github.com/joey711/phyloseq/issues/616
|
d888be4459d839ebe869eb1fb0c8c890214408b4
|
1613012879c0b411a4b67b248b2ea432c1fa088b
|
/WQgraphs_6600V2raw_looping.R
|
9efb525b79d859a8eac396cb7faba96ac54cd880
|
[] |
no_license
|
swmpkim/sonde_graphing
|
1333925a806329e02966ab8ebc1f99edfcf62c13
|
8a63035e4b56f9659e6124e2c192fbef57883291
|
refs/heads/master
| 2020-04-17T09:03:17.226473
| 2017-03-22T15:27:25
| 2017-03-22T15:27:25
| 67,614,782
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,024
|
r
|
WQgraphs_6600V2raw_looping.R
|
# Script to make loop through a folder and make graphs of all raw 6600 csv files
# update 3-22-2017 so you no longer have to delete anything from the raw csv
# contact kimberly.cressman@dmr.ms.gov if you have problems with this script
### IMPORTANT
# The folder-choice pop-up does NOT show up on top of other programs
# You MUST either click on the RStudio icon to minimize RStudio OR just minimize everything else to make the pop-up visible
### INSTRUCTIONS
# 1 - Put your cursor somewhere in this window
# 2 - Push 'Ctrl' + 'A' to select the whole script
# 3 - Push 'Ctrl' + 'R' to run the script
# 4 - Minimize RStudio to get to the pop-up and choose the folder your files are in
# 5 - Magic happens
# 6 - Look in the folder you selected and pdf files should be there
#Reset R's Brain
rm(list=ls())
# interactively choose which folder you want to work in
library(tcltk) #this package is part of base R and does not need to be installed separately
my.dir <- tk_choose.dir(getwd(), caption = "Set your working directory")
setwd(my.dir)
# get the list of files in the directory that you want to graph
names.dir <- dir(pattern = ".csv")
n <- length(names.dir)
# start the loop
for(i in 1:n)
{
myFile <- names.dir[i] #whatever name is next in the loop
# interactively choose the file to work on
#myFile <- tk_choose.files(getwd(), caption="Choose file")
# read in a csv, but skip the 2nd row
# this code courtesy of Paul Hiemstra, http://stackoverflow.com/questions/15860071/read-csv-header-on-first-line-skip-second-line
all_content <- readLines(myFile)
skip_second <- all_content[-2]
ysi.data <- read.csv(textConnection(skip_second), header = TRUE, stringsAsFactors = FALSE)
# generate some names automatically from the original file name
x <- nchar(myFile) # counting the characters in the file name
Title = substr(myFile,x-16,x-4) # this should return the full name of the file (minus '.csv')
Titlepdf <- paste(Title, ".pdf", sep="") #this will be used later for the output file
#format Date.Time as POSIXct, which will turn it into a number that can be graphed
ysi.data$Date.Time <- as.POSIXct(ysi.data$Date.Time, format = "%m/%d/%Y %H:%M")
#check data to make sure it looks the way you think it should
names(ysi.data) #column names
str(ysi.data) #names; format (number, date, factor, character, etc.); first few values
head(ysi.data) #returns the first 6 rows of data, so you can make sure things were read in correctly
# open up a pdf and start graphing
pdf(file=Titlepdf) #pdf file will be saved in the same directory from which you pulled the csv file
#make the graph page layout 4 rows and 2 columns so all graphs will fit on a page
par(mfcol=c(4,2), mar=c(2.1, 4.1, 1.1, 1.1), oma=c(1,1,2,1))
#make line graphs
plot(Temp~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkred")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(SpCond~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkblue")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(Sal~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkgreen")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(Depth~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkslategray")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(ODOsat~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkorange")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(ODO~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkmagenta")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(pH~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkturquoise")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
plot(Turbid.~Date.Time, data=ysi.data, type="l", xlab = "", xaxt='n', col="darkkhaki")
axis.POSIXct(1, at=seq(min(ysi.data$Date.Time, na.rm=TRUE), max(ysi.data$Date.Time, na.rm=TRUE), length.out=5), format="%m/%d", cex.axis=0.9)
# put the title of the file above all the plots on the page
mtext(Title, outer=TRUE, side=3, cex=0.9, font=2)
#reset to one graph per page
par(mfrow=c(1,1))
#turn off pdf printer
dev.off()
}
print("Finished!")
|
bc1601e0ce74d4db260109e21867e396f9e9389c
|
78bff63e50e41adb46a662a99f42d0092d121cdf
|
/app.R
|
2d5c4096f83dd8f157ec105a83e470565531c7b7
|
[] |
no_license
|
JackEdTaylor/SupervisorRecommender
|
8a63ea6d9a5e97cd6ba0c04be253f90718fde8f4
|
3e892518145498b2faa695268434c6ccef462e89
|
refs/heads/master
| 2020-05-17T19:50:00.914101
| 2019-10-21T13:09:58
| 2019-10-21T13:09:58
| 183,928,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,699
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
library(tidyverse)
sidebarwidth <- 400
dat <- read_csv("supervisors.csv") %>%
mutate(
topics = str_replace_all(topics, "(,) +", ","),
methods = str_replace_all(methods, "(,) +", ",")
) %>%
mutate(name = paste(name_first, name_last, sep = " ")) %>%
arrange(name_last)
topics_vec <- sort(unique(unlist(str_split(dat$topics, ","))))
methods_vec <- sort(unique(unlist(str_split(dat$methods, ","))))
ui <- dashboardPage(
skin = "blue",
dashboardHeader(title = "Supervisor Recommender", titleWidth = sidebarwidth,
tags$li(a(href = "https://github.com/JackEdTaylor/SupervisorRecommender",
HTML(paste(icon("github"), " GitHub")),
title = "GitHub Repository"),
class="dropdown")),
dashboardSidebar(
width = sidebarwidth,
sidebarMenu(
fluidRow(
column(12, HTML(
'<p>Welcome! This app is designed to help you identify a suitable supervisor for the <a href="https://www.gla.ac.uk/postgraduate/taught/psychologicalscienceresearchmethodsof/">Research Methods of Psychological Science MSc</a> course at the University of Glasgow.<br><br>Just tick any topics or methods you are interested in, and the app will suggest supervisors in order of how relevant their research is.<br><br>Click on a supervisor to find out more about them.<br><br></p>'
))
),
fluidRow(
tags$div(id = "lh-checkboxes", column(6, checkboxGroupInput("topics", "Topics", topics_vec))),
column(6, checkboxGroupInput("methods", "Methods", methods_vec))
)
)
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css"),
tags$link(rel="shortcut icon", href="https://www.gla.ac.uk/favicon.ico")
),
lapply(1:nrow(dat), function(i) {
box_id <- sprintf("box_%i", i)
uiOutput(box_id)
})
)
)
server <- function(input, output) {
# process preferences
dat_rel <- reactive({
out <- dat
out$methods_n <- sapply(dat$methods, function(sup_meth) {
matches <- sapply(str_split(sup_meth, ","), function(meth) {meth %in% input$methods})
length(matches[matches])
}) %>%
unname()
out$topics_n <- sapply(dat$topics, function(sup_meth) {
matches <- sapply(str_split(sup_meth, ","), function(meth) {meth %in% input$topics})
length(matches[matches])
}) %>%
unname()
out %>%
mutate(total_n = topics_n + methods_n) %>%
arrange(desc(total_n))
})
lapply(1:nrow(dat), function(i) {
box_id <- sprintf("box_%i", i)
output[[box_id]] <- renderUI({
dat_rel_df <- dat_rel()
sup_dat <- dat_rel_df[i,]
box_title <- if (length(c(input$methods, input$topics)) > 0) {
sprintf("%s (%i matches)", sup_dat$name, sup_dat$total_n)
} else {
sup_dat$name
}
topics_matches_str <- if (length(input$topics) > 0) {
topics_bolded <- unlist(str_split(sup_dat$topics, ","))
topics_bolded[topics_bolded %in% input$topics] <- paste("<b>",topics_bolded[topics_bolded %in% input$topics],"</b>", sep="")
HTML(sprintf("Topics <b>(%i matches)</b>:<br>%s", sup_dat$topics_n, paste(topics_bolded, collapse=", ")))
} else {
HTML(sprintf("Topics:<br>%s", str_replace_all(sup_dat$topics, ",", ", ")))
}
methods_matches_str <- if (length(input$methods) > 0) {
methods_bolded <- unlist(str_split(sup_dat$methods, ","))
methods_bolded[methods_bolded %in% input$methods] <- paste("<b>",methods_bolded[methods_bolded %in% input$methods],"</b>", sep="")
HTML(sprintf("Methods <b>(%i matches)</b>:<br>%s", sup_dat$methods_n, paste(methods_bolded, collapse=", ")))
} else {
HTML(sprintf("Methods:<br>%s", str_replace_all(sup_dat$methods, ",", ", ")))
}
fluidRow(
box(
title = a(href = sup_dat$page, box_title),
width = 12, status = "info", solidHeader = TRUE,
fluidRow(
column(12,
HTML(
paste(
c(
sprintf('<p><a href="%s"><img src="%s"></a>%s', sup_dat$page, sup_dat$image, sup_dat$message),
topics_matches_str,
methods_matches_str,
"</p>"),
collapse = "<br><br>"
)
)
)
)
)
)
})
})
}
shinyApp(ui = ui, server = server)
|
22e60aec6d933c7414b0e719aecdbadd9cb79a32
|
1d3795c570b0bd6a2f64dbc0af8d3bcf3a285255
|
/R/sample.R
|
c1a51f6486624d9d387d65930693c6886a980e5b
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dnlvgt/rktiq
|
03fef4844e58f5c53ca3a30a5c65ff26e134456b
|
be8ff27e8b4eef80f541705ac4ed5c6458593e04
|
refs/heads/master
| 2020-12-02T17:32:20.220974
| 2020-01-05T13:51:42
| 2020-01-05T13:51:42
| 231,075,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,312
|
r
|
sample.R
|
# Sampling von Ereignissen ------------------------------------------------------
#' Zufaellige (klassenbalancierte) Ereignisse
#'
#' Bestimmt in einem Zeitraum zufaellig eine feste Anzahl an Ereignissen
#' (Sampling). Diese Ereignisse beziehen sich auf eine Menge an Zielereignissen
#' (z.B. Stoerungen) und werden dabei in positive und negative Ereignisse
#' unterschieden. Zur Platzierung der Ereignisse wird auf die Funktion
#' \code{sample_random} zurueckgegriffen (\code{sample_balanced}) bzw. wird
#' sequentiell die Menge aller im Zeitraum moeglichen Ereignisse gebildet und
#' anschliessend daraus eine zufaellige Stichprobe gezogen
#' (\code{sample_balanced_seq}).
#'
#' Waehrend positive Ereignisse nahe vor einem Zielereignis liegen, befinden
#' sich negative Ereignisse ausserhalb dieser Bereiche (i.d.R. also weit vor
#' einem bzw. nach einem Zielereignis). Die Ereignisse werden so von der
#' Funktion platziert, dass das Verhaeltnis von positiven und negativen
#' Ereignissen gleichverteilt ist und somit eine Klassenbalance hergestellt
#' wird.
#'
#' Der Gesamtzeitraum wird durch einen Start- und einen Endzeitpunkt festgelegt.
#' In diesem Zeitraum koennen zusaetzliche Offtime-Intervalle definiert werden,
#' in denen kein Sampling stattfindet (d.h. erzeugte Ereignisse ueberlappen sich
#' nicht mit Offtimes). Neben der Ereignislaenge kann auch eine potentielle
#' zeitliche Ueberlappung zwischen Ereignissen gesteuert werden. Um
#' Klassenbalance herzustellen, wird weiterhin die Menge der Zielereignisse und
#' die Laenge des Zeitfensters vor den Zielereignissen benoetigt. Um zusaetzlich
#' bei \code{sample_random_seq} die zeitliche Ausrichtung der Ereignisse zu
#' beeinflussen, kann deren Erzeugungsrichtung sowie ihre Orientierung
#' bezueglich der Zielereignisse angepasst werden.
#'
#' @inheritParams sample_random
#' @param target_event Dataframe, der die Zielereignisse beinhaltet. Die
#' Ereignisse sind anhand ihrer Start-/Endzeitpunkte beschrieben, die jeweils
#' in den Spalten \emph{start} und \emph{end} uebergeben werden.
#' @param target_cut_in_sec Numerischer Wert mit der Laenge des Zeitfensters (in
#' Sekunden), das vor jedem Zielereignis liegt und den Bereich der potentiell
#' positiven Ereignisse umfasst.
#' @param include_tail Logischer Wert, ob Ereignisse nach dem letzten
#' Zielereignis platziert werden koennen (Default: \emph{FALSE}).
#'
#' @return Benannte Liste mit Dataframes mit zufaelligen, klassenbalancierten
#' Ereignissen aus Zeitraum.
#'
#' @family Sampling-Funktionen
#' @seealso \code{\link{sample_random}}
#'
#' @importFrom magrittr %>%
#' @export
sample_balanced <- function(n,
int_start,
int_end,
offtime = NULL,
target_event,
target_cut_in_sec,
event_length_in_sec,
event_overlap_in_sec = 0,
include_tail = FALSE,
.seed = NULL,
.max_run = 1E3) {
# Checkt Argumente
assert_sample(n = n,
int_start = int_start,
int_end = int_end,
offtime = offtime,
target_event = target_event,
target_cut_in_sec = target_cut_in_sec,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
include_tail = include_tail,
.seed = .seed,
.max_run = .max_run)
# Sorgt ggf. dafür, dass kein Ereignis ...
# ...nach dem letzten Zielereignis kommen kann
if (!include_tail) {
int_end <- min(int_end, max(target_event$end))
}
# Offtimes für die Bestimmung der negativen Ereignisse
offtime_neg <-
target_event %>%
dplyr::mutate(start = .data$start - target_cut_in_sec - event_length_in_sec)
# Offtimes für die Bestimmung der positiven Ereignisse
offtime_pos <-
offtime_neg %>%
event_invert(int_start, int_end) %>%
dplyr::bind_rows(target_event)
# Bestimmt positive und negative Ereignisse und gibt sie in einer Liste zurück
list(offtime_pos, offtime_neg) %>%
# Offtimes vorbereiten
purrr::map(~ dplyr::bind_rows(., offtime) %>%
event_condense() %>%
event_merge()) %>%
purrr::map2(c(ceiling(n / 2), floor(n / 2)),
.,
~ sample_random(n = .x,
int_start = int_start,
int_end = int_end,
offtime = .y,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
.seed = .seed,
.max_run = .max_run) %>%
arrange2(.data$start)) %>%
stats::setNames(c("pos", "neg"))
}
#' @rdname sample_balanced
#'
#' @param from_start_to_end Logischer Wert, ob Ereignisse von \code{int_start}
#' nach \code{int_end} erzeugt werden oder andersrum (Default: \emph{FALSE},
#' d.h. es wird bei \code{int_end} begonnen).
#' @param from_target Logischer Wert, ob Ereignisse von den Zielereignissen
#' weglaufend erzeugt werden oder zu ihnen hinlaufend (Default: \emph{TRUE},
#' d.h. sie laufen von den Zielereignissen weg). Die Richtung ist abhaengig
#' von \code{from_start_to_event}).
#'
#' @importFrom magrittr %>%
#' @export
sample_balanced_seq <- function(n,
int_start,
int_end,
offtime = NULL,
target_event,
target_cut_in_sec,
event_length_in_sec,
event_overlap_in_sec = 0,
from_start_to_end = FALSE,
from_target = TRUE,
include_tail = FALSE,
.seed = NULL) {
# Checkt Argumente
assert_sample(n = n,
int_start = int_start,
int_end = int_end,
offtime = offtime,
target_event = target_event,
target_cut_in_sec = target_cut_in_sec,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
from_start_to_end = from_start_to_end,
from_target = from_target,
include_tail = include_tail,
.seed = .seed)
# Seed für Reproduzierbarkeit
set.seed(.seed %||% Sys.time())
# Sorgt ggf. dafür, dass kein Ereignis ...
# nach dem letzten Zielereignis kommen kann
if (!include_tail) {
int_end <- min(int_end, max(target_event$end))
}
offtime <-
dplyr::bind_rows(offtime, target_event) %>%
event_condense() %>%
event_merge()
postime <-
target_event %>%
dplyr::mutate(end = .data$start,
start =
.data$start - target_cut_in_sec - event_length_in_sec) %>%
dplyr::select(.data$start, .data$end)
if (from_target) {
res <- event_invert(target_event, int_start, int_end)
} else {
res <- tibble::tibble(start = int_start,
end = int_end)
}
res <-
purrr::map2_dfr(
res$start, res$end,
~ seq_event(.x, .y,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
from_start_to_end = from_start_to_end)) %>%
arrange2(.data$start) %>%
{
dplyr::filter(., !event_test(., offtime,
condition = overlap))
} %>%
{
dplyr::mutate(.,
.group = ifelse(event_test(., postime,
condition = include,
swap_xy = TRUE),
"pos", "neg"))
} %>%
split(.$.group)
list(res$pos, res$neg) %>%
purrr::map2(c(ceiling(n / 2), floor(n / 2)),
~ dplyr::select(.x, -.data$.group) %>%
dplyr::sample_n(size = .y) %>%
arrange2(.data$start)) %>%
stats::setNames(c("pos", "neg"))
}
#' Zufaellige Ereignisse
#'
#' Bestimmt in einem Zeitraum zufaellig eine feste Anzahl an Ereignissen
#' (Sampling). Dabei wird wiederholt ein Ereignis an zufaelliger Position
#' innerhalb des Zeitraums platziert (\code{sample_random}) bzw. sequentiell die
#' Menge aller im Zeitraum moeglichen Ereignisse gebildet und anschliessend
#' daraus eine zufaellige Stichprobe gezogen (\code{sample_random_seq}).
#'
#' Der Gesamtzeitraum wird durch einen Start- und einen Endzeitpunkt festgelegt.
#' In diesem Zeitraum koennen zusaetzliche Offtime-Intervalle definiert werden,
#' in denen kein Sampling stattfindet (d.h. erzeugte Ereignisse ueberlappen sich
#' nicht mit Offtimes). Neben der Ereignislaenge kann auch eine potentielle
#' zeitliche Ueberlappung zwischen Ereignissen gesteuert werden.
#'
#' @param n Numerischer Wert mit Anzahl der zu samplenden Ereignisse
#' @param int_start POSIXct-Zeitstempel mit Startzeitpunkt des Sample-Zeitraums.
#' @param int_end POSIXct-Zeitstempel mit Endzeitpunkt des Sample-Zeitraums.
#' @param offtime Dataframe, der die Offtime-Ereignisse beinhaltet. Die
#' Ereignisse sind anhand ihrer Start-/Endzeitpunkte beschrieben, die jeweils
#' in den Spalten \emph{start} und \emph{end} uebergeben werden.
#' @param event_length_in_sec Numerischer Wert mit der Laenge der Ereignisse (in
#' Sekunden).
#' @param event_overlap_in_sec Numerischer Wert mit der Laenge des Intervalls
#' (in Sekunden), in dem sich zwei aufeinanderfolgende Ereignisse ueberlappen
#' duerfen (Default: 0, d.h. keine Ueberlappung erlaubt).
#' @param .seed Numerischer Wert mit Seed-Wert, mit dem der Zufallsgenerator
#' initialisiert wird. Darf auch \emph{NULL} sein, dann wird zur
#' Initialisierung die aktuelle Systemzeit herangezogen (Default: \emph{NULL},
#' d.h. bei jedem Aufruf sollten unterschiedliche Ergebnisse erzeugt werden).
#' @param .max_run Numerischer Wert mit Anzahl der Versuche, die je Ereignis
#' unternommen werden sollen, um es im Zeitraum zu platzieren (Default: 1E3).
#'
#' @return Dataframe mit zufaelligen Ereignissen aus Zeitraum.
#'
#' @family Sampling-Funktionen
#'
#' @importFrom magrittr %>%
#' @importFrom rlang %||%
#' @export
sample_random <- function(n,
int_start,
int_end,
offtime = NULL,
event_length_in_sec,
event_overlap_in_sec = 0,
.seed = NULL,
.max_run = 1E3) {
# Checkt Argumente
assert_sample(n = n,
int_start = int_start,
int_end = int_end,
offtime = offtime,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
.seed = .seed,
.max_run = .max_run)
# Seed für Reproduzierbarkeit
set.seed(.seed %||% Sys.time())
res <-
tibble::tibble(start = rep(lubridate::as_datetime(NA_integer_), n),
end = .data$start)
offtime <- offtime %||% res[0, ]
for (i in seq_len(n)) {
x <- random_event(int_start,
int_end,
offtime,
event_length_in_sec,
.max_run)
if (!is.null(x)) {
offtime <-
x %>%
dplyr::mutate(start = .data$start + event_overlap_in_sec,
end = .data$end - event_overlap_in_sec) %>%
dplyr::filter(.data$start <= .data$end) %>%
dplyr::bind_rows(offtime, .)
res[i, ] <- x
}
}
arrange2(res, .data$start)
}
#' @rdname sample_random
#'
#' @inheritParams seq_event
#'
#' @export
sample_random_seq <- function(n,
int_start,
int_end,
offtime = NULL,
event_length_in_sec,
event_overlap_in_sec = 0,
from_start_to_end = TRUE,
.seed = NULL) {
# Checkt Argumente
assert_sample(n = n,
int_start = int_start,
int_end = int_end,
offtime = offtime,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
from_start_to_end = from_start_to_end,
.seed = .seed)
# Seed für Reproduzierbarkeit
set.seed(.seed %||% Sys.time())
res <- seq_event(int_start,
int_end,
event_length_in_sec,
event_overlap_in_sec,
from_start_to_end)
if (!is.null(offtime)) {
res <-
dplyr::filter(res,
!event_test(res, offtime,
condition = overlap))
}
dplyr::sample_n(res, size = n) %>%
arrange2(.data$start)
}
# Hilfsfunktionen --------------------------------------------------------------
#' Zufaelliges Ereignisse
#'
#' Hilfsfunktion zum zufaelligen Platzieren eines Ereignisses innerhalb eines
#' Zeitraums. Der Gesamtzeitraum wird durch einen Start- und einen Endzeitpunkt
#' festgelegt. In diesem Zeitraum koennen zusaetzliche Offtime-Intervalle
#' definiert werden, fuer die beim Platzieren sichergestellt wird, dass keine
#' Ueberlappung auftritt. D.h. sollte ein Ueberlappung auftreten, wird das
#' Ereignis verworfen und ein neuer Platzierungsversuch gestartet. Um an dieser
#' Stelle einen Deadlock zu vermeiden, gibt es eine Obergrenze fuer die
#' durchzufuehrenden Versuche.
#'
#' @inheritParams sample_random
#'
#' @return Dataframe mit zufaelligem Ereignis im Zeitraum bzw. \emph{NULL}, wenn
#' Platzierung nicht erfolgreich.
#'
#' @family Sampling-Funktionen
#'
#' @keywords internal
#'
#' @importFrom magrittr %>%
#'
#' @seealso \code{\link{sample_random}}
random_event <- function(int_start,
int_end,
offtime,
event_length_in_sec,
.max_run = 1E3) {
# Checkt Argumente
assertthat::assert_that(is_temporal(int_start, is_strict = TRUE),
is_temporal(int_end, is_strict = TRUE),
is.data.frame(offtime),
assertthat::is.number(event_length_in_sec),
assertthat::is.count(.max_run))
# Mehrere Versuche
for (i in seq_len(.max_run)) {
# Zufaelliger Startpunkt
start <-
stats::runif(1,
min = int_start,
max = int_end - event_length_in_sec) %>%
lubridate::as_datetime()
# Keine Ueberlappung mit Offtime
if (!any(overlap(start,
start + event_length_in_sec,
offtime$start,
offtime$end))) {
return(tibble::tibble(start = start,
end = start + event_length_in_sec))
}
}
warning("Kein Ereignis in .max_run Versuchen gefunden.")
return(NULL)
}
#' Alle Ereignisse
#'
#' Hilfsfunktion zum sequentiellen Erzeugen aller moeglichen Ereignisse in einem
#' gegebenen Zeitraum. Dabei kann neben dem Start- und Endzeitpunkt die
#' Ereignislaenge und die Ueberlappung sowie die Richtung der Sequenz veraendert
#' werden.
#'
#' @inheritParams sample_random
#' @param from_start_to_end Logischer Wert, ob Ereignisse von \code{int_start}
#' nach \code{int_end} erzeugt werden oder andersrum (Default: \emph{TRUE},
#' d.h. es wird bei \code{int_start} begonnen).
#'
#' @return Dataframe mit allen Ereignissen im Zeitraum.
#'
#' @keywords internal
#'
#' @importFrom magrittr %>%
seq_event <- function(int_start,
int_end,
event_length_in_sec,
event_overlap_in_sec = 0,
from_start_to_end = TRUE) {
# Checkt Argumente
assert_sample(int_start = int_start,
int_end = int_end,
event_length_in_sec = event_length_in_sec,
event_overlap_in_sec = event_overlap_in_sec,
from_start_to_end = from_start_to_end)
# Richtung der Sequenz
if (from_start_to_end) {
from <- int_start
to <- int_end - event_length_in_sec
by <- event_length_in_sec - event_overlap_in_sec
} else {
from <- int_end - event_length_in_sec
to <- int_start
by <- event_overlap_in_sec - event_length_in_sec
}
# Alle Ereignisse erzeugen
tibble::tibble(start = seq(from, to, by),
end = .data$start + event_length_in_sec) %>%
arrange2(.data$start)
}
#' Argumente-Check (Sampling-Funktionen)
#'
#' Hilfsfunktion zum Ueberpruefen einer Reihe von Argumenten, ob sie mit
#' korrekten Typen und sinnvollen Werten uebergeben wurden. Sobald ein Verstoss
#' festgestellt wird, wird die Ausfuehrung unterbrochen. Da diese Test
#' wiederholt in den Sampling-Funktionen auftreten, wurden sie in diese Funktion
#' ausgelagert.
#'
#' @inheritParams sample_random
#' @inheritParams sample_balanced_seq
#'
#' @family Argument-Funktionen
#'
#' @keywords internal
assert_sample <- function(n = 1,
int_start = lubridate::origin,
int_end = lubridate::origin + 1,
offtime = NULL,
event_length_in_sec = 1,
event_overlap_in_sec = 0,
from_start_to_end = NA,
target_event = data.frame(),
target_cut_in_sec = 1,
include_tail = NA,
from_target = NA,
.seed = NULL,
.max_run = 1) {
# Checkt Argumente
assertthat::assert_that(assertthat::is.count(n),
is_temporal(int_start, is_strict = TRUE),
is_temporal(int_end, is_strict = TRUE),
offtime %is_null_or% is.data.frame,
assertthat::is.number(event_length_in_sec),
assertthat::is.number(event_overlap_in_sec),
assertthat::is.flag(from_start_to_end),
is.data.frame(target_event),
assertthat::is.number(target_cut_in_sec),
assertthat::is.flag(include_tail),
assertthat::is.flag(from_target),
.seed %is_null_or% assertthat::is.number,
assertthat::is.count(.max_run),
int_start < int_end,
event_length_in_sec > 0,
event_overlap_in_sec >= 0,
event_overlap_in_sec < event_length_in_sec,
target_cut_in_sec > 0)
invisible(NULL)
}
|
8aad6744a2b6c9f07189dee46cb9b568015a35d4
|
5d1db2e131d3d6ad2833800fe58c8c637b31ac9a
|
/tests/testthat/test-ode.R
|
e728d9cd7a94c1f63a4f7adde8c8a391edd163bd
|
[] |
no_license
|
cran/calculus
|
ff9bb3676aeb9c43f8bfb80b9464cfa40d08fb90
|
1ef6b6e778cd845389b99860148db23c88f2af3e
|
refs/heads/master
| 2023-03-16T15:12:14.523441
| 2023-03-09T22:00:02
| 2023-03-09T22:00:02
| 236,567,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,941
|
r
|
test-ode.R
|
test_that("202012281517", {
x <- ode("x", c(x = 10), seq(0, 1, by = 0.001), drop = TRUE)
y <- c(x = 10*exp(1))
expect_equal(x, y)
})
test_that("202012281518", {
x <- ode("a*x", c(x = 1), seq(0, 1, by = 0.001), params = list(a = 2), drop = TRUE)
y <- c(x = exp(2))
expect_equal(x, y)
})
test_that("202012281519", {
x <- ode("x*t/100", c(x = 0.1), seq(1,10,0.001), timevar = "t", drop = TRUE)
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281520", {
x <- ode("x*t/n", c(x = 0.1), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE)
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281521", {
f <- function(x) x
x <- ode(f, c(x = 10), seq(0, 1, by = 0.001), drop = TRUE)
y <- c(x = 10*exp(1))
expect_equal(x, y)
})
test_that("202012281522", {
f <- function(x, a) a*x
x <- ode(f, c(x = 1), seq(0, 1, by = 0.001), params = list(a = 2), drop = TRUE)
y <- c(x = exp(2))
expect_equal(x, y)
})
test_that("202012281523", {
f <- function(x, t) x*t/100
x <- ode(f, c(x = 0.1), seq(1,10,0.001), timevar = "t", drop = TRUE)
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281524", {
f <- function(x, t, n) x*t/n
x <- ode(f, c(x = 0.1), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE)
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281525", {
f <- function(x) x
x <- ode(f, 10, seq(0, 1, by = 0.001), drop = TRUE)
y <- 10*exp(1)
expect_equal(x, y)
})
test_that("202012281526", {
f <- function(x, a) a*x
x <- ode(f, 1, seq(0, 1, by = 0.001), params = list(a = 2), drop = TRUE)
y <- exp(2)
expect_equal(x, y)
})
test_that("202012281527", {
f <- function(x, t) x*t/100
x <- ode(f, 0.1, seq(1,10,0.001), timevar = "t", drop = TRUE)
y <- 0.1640498
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281528", {
f <- function(x, t, n) x*t/n
x <- ode(f, 0.1, seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE)
y <- 0.1640498
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281529", {
x <- ode(c("x","x*y"), c(x=1, y=1), seq(0, 1, by = 0.001), drop = TRUE)
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281530", {
x <- ode(c("a*x","x*y"), c(x=1, y=1), seq(0, 1, by = 0.001), params = list(a = 1), drop = TRUE)
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281531", {
x <- ode(c("x*t/100","t"), c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", drop = TRUE)
y <- c(x = 0.1640498, y = 49.5)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281532", {
x <- ode(c("x*t/n","t*n"), c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE)
y <- c(x = 0.1640498, y = 4950)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281533", {
f <- function(y, x) c(x, x*y)
x <- ode(f, c(x=1, y=1), seq(0, 1, by = 0.001), drop = TRUE)
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281534", {
f <- function(x, y, a) c(a*x,x*y)
x <- ode(f, c(x=1, y=1), seq(0, 1, by = 0.001), params = list(a = 1), drop = TRUE)
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281535", {
f <- function(y, x, t) c(x*t/100, t)
x <- ode(f, c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", drop = TRUE)
y <- c(x = 0.1640498, y = 49.5)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281536", {
f <- function(x, y, t, n) c(x*t/n,t*n)
x <- ode(f, c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE)
y <- c(x = 0.1640498, y = 4950)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281537", {
f <- function(x) c(x[1], x[1]*x[2])
x <- ode(f, c(1, 1), seq(0, 1, by = 0.001), drop = TRUE)
y <- c(exp(1), 5.574942)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281538", {
f <- function(x, a) c(a*x[1],x[1]*x[2])
x <- ode(f, c(1, 1), seq(0, 1, by = 0.001), params = list(a = 1), drop = TRUE)
y <- c(exp(1), 5.574942)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281539", {
f <- function(x, t) c(x[1]*t/100, t)
x <- ode(f, c(0.1, 0), seq(1,10,0.001), timevar = "t", drop = TRUE)
y <- c(0.1640498, 49.5)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281540", {
f <- function(x, t, n) c(x[1]*t/n,t*n)
x <- ode(f, c(0.1, 0), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE)
y <- c(0.1640498, 4950)
expect_equal(x, y, tolerance = 1e-7)
})
test_that("202012281541", {
x <- ode("x", c(x = 10), seq(0, 1, by = 0.0001), drop = TRUE, method = "euler")
y <- c(x = 10*exp(1))
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281542", {
x <- ode("a*x", c(x = 1), seq(0, 1, by = 0.00001), params = list(a = 2), drop = TRUE, method = "euler")
y <- c(x = exp(2))
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281543", {
x <- ode("x*t/100", c(x = 0.1), seq(1,10,0.0001), timevar = "t", drop = TRUE, method = "euler")
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281544", {
x <- ode("x*t/n", c(x = 0.1), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE, method = "euler")
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281545", {
f <- function(x) x
x <- ode(f, c(x = 10), seq(0, 1, by = 0.0001), drop = TRUE, method = "euler")
y <- c(x = 10*exp(1))
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281546", {
f <- function(x, a) a*x
x <- ode(f, c(x = 1), seq(0, 1, by = 0.00001), params = list(a = 2), drop = TRUE, method = "euler")
y <- c(x = exp(2))
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281547", {
f <- function(x, t) x*t/100
x <- ode(f, c(x = 0.1), seq(1,10,0.001), timevar = "t", drop = TRUE, method = "euler")
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281548", {
f <- function(x, t, n) x*t/n
x <- ode(f, c(x = 0.1), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE, method = "euler")
y <- c(x = 0.1640498)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281549", {
f <- function(x) x
x <- ode(f, 10, seq(0, 1, by = 0.00001), drop = TRUE, method = "euler")
y <- 10*exp(1)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281550", {
f <- function(x, a) a*x
x <- ode(f, 1, seq(0, 1, by = 0.00001), params = list(a = 2), drop = TRUE, method = "euler")
y <- exp(2)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281551", {
f <- function(x, t) x*t/100
x <- ode(f, 0.1, seq(1,10,0.001), timevar = "t", drop = TRUE, method = "euler")
y <- 0.1640498
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281552", {
f <- function(x, t, n) x*t/n
x <- ode(f, 0.1, seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE, method = "euler")
y <- 0.1640498
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281553", {
x <- ode(c("x","x*y"), c(x=1, y=1), seq(0, 1, by = 0.00001), drop = TRUE, method = "euler")
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281554", {
x <- ode(c("a*x","x*y"), c(x=1, y=1), seq(0, 1, by = 0.00001), params = list(a = 1), drop = TRUE, method = "euler")
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281555", {
x <- ode(c("x*t/100","t"), c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", drop = TRUE, method = "euler")
y <- c(x = 0.1640498, y = 49.5)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281556", {
x <- ode(c("x*t/n","t*n"), c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE, method = "euler")
y <- c(x = 0.1640498, y = 4950)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281557", {
f <- function(y, x) c(x, x*y)
x <- ode(f, c(x=1, y=1), seq(0, 1, by = 0.00001), drop = TRUE, method = "euler")
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281558", {
f <- function(x, y, a) c(a*x,x*y)
x <- ode(f, c(x=1, y=1), seq(0, 1, by = 0.00001), params = list(a = 1), drop = TRUE, method = "euler")
y <- c(x = exp(1), y = 5.574942)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281559", {
f <- function(y, x, t) c(x*t/100, t)
x <- ode(f, c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", drop = TRUE, method = "euler")
y <- c(x = 0.1640498, y = 49.5)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281600", {
f <- function(x, y, t, n) c(x*t/n,t*n)
x <- ode(f, c(x = 0.1, y = 0), seq(1,10,0.001), timevar = "t", params = list(n = 100), drop = TRUE, method = "euler")
y <- c(x = 0.1640498, y = 4950)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281601", {
f <- function(x) c(x[1], x[1]*x[2])
x <- ode(f, c(1, 1), seq(0, 1, by = 0.00001), drop = TRUE, method = "euler")
y <- c(exp(1), 5.574942)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281602", {
f <- function(x, a) c(a*x[1],x[1]*x[2])
x <- ode(f, c(1, 1), seq(0, 1, by = 0.00001), params = list(a = 1), drop = TRUE, method = "euler")
y <- c(exp(1), 5.574942)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281603", {
f <- function(x, t) c(x[1]*t/100, t)
x <- ode(f, c(0.1, 0), seq(1,10,0.0001), timevar = "t", drop = TRUE, method = "euler")
y <- c(0.1640498, 49.5)
expect_equal(x, y, tolerance = 1e-4)
})
test_that("202012281604", {
f <- function(x, t, n) c(x[1]*t/n,t*n)
x <- ode(f, c(0.1, 0), seq(1,10,0.0001), timevar = "t", params = list(n = 100), drop = TRUE, method = "euler")
y <- c(0.1640498, 4950)
expect_equal(x, y, tolerance = 1e-4)
})
|
cba303e2693a3ad3214031d3be4fe4668d1f8790
|
0598e6e38f907a2727d3de5ab9ed0be5bee10226
|
/man/msk.hydrol.2011.aggregate.Rd
|
33c63b4ac90dd6db50bc6a75e0aaa30c7093a359
|
[] |
no_license
|
cran/ecoval
|
f59a3ef5bfc8a42d628bfc42a38c9a163a6ee3e0
|
742e04992cfcbb9f5fad7f8da1a499c2f341873a
|
refs/heads/master
| 2021-12-14T13:19:32.035693
| 2021-12-10T22:50:06
| 2021-12-10T22:50:06
| 25,367,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,536
|
rd
|
msk.hydrol.2011.aggregate.Rd
|
\name{msk.hydrol.2011.aggregate}
\alias{msk.hydrol.2011.aggregate}
\title{
Aggregation function of the hydrology module of the Swiss modular concept for stream assessment, level I (Regional survey) from 2011.
}
\description{
Aggregates the values of the 9 sub-objectives at the second-highest aggregation level of the hydrology module of the Swiss River Assessment Program MSK (2011).
}
\usage{
msk.hydrol.2011.aggregate(u,
par = NA)
}
\arguments{
\item{u}{
Numerical vector of length 9 containing the values that quantify the degree of fulfillment of the 9 sub-objettives.
}
\item{par}{
Argument added for consistency with the other aggregation procedures. No parameters are needed.
}
}
\value{
The function returns the aggregated value.
}
\references{
Langhans, S.D., Lienert, J., Schuwirth, N. and Reichert, P.
How to make river assessments comparable: A demonstration for hydromorphology,
Ecological Indicators 32, 264-275, 2013.
\doi{10.1016/j.ecolind.2013.03.027}\cr\cr
Langhans, S.D., Reichert, P. and Schuwirth, N.
The method matters: indicator aggregation in ecological river assessment.
Ecological Indicators 45, 494-507, 2014.
\doi{10.1016/j.ecolind.2014.05.014}\cr\cr
Reichert, P., Schuwirth, N. and Langhans, S.
Constructing, evaluating and visualizing value and utility functions for decision support,
Environmental Modelling & Software 46, 283-291, 2013.
\doi{10.1016/j.envsoft.2013.01.017}\cr\cr
Reichert, P., Langhans, S., Lienert, J. and Schuwirth, N.
The conceptual foundation of environmental decision support.
Journal of Environmental Management. 154, 316-332, 2015.
\doi{10.1016/j.jenvman.2015.01.053}\cr\cr
Reichert, P., Borsuk, M., Hostmann, M., Schweizer, S., Sporri, C., Tockner, K. and Truffer, B.
Concepts of decision support for river rehabilitation,
Environmental Modelling and Software 22, 188-201, 2007.
\doi{10.1016/j.envsoft.2005.07.017}\cr\cr
\url{https://modul-stufen-konzept.ch}\cr\cr
Pfaundler M.,Duebendorfer,C, Zysset, A.
Methoden zur Untersuchung und Beurteilung der Fliessgewaesser. Hydrologie - Abflussregime Stufe F (flaechendeckend).
Bundesamt fuer Umwelt, Bern. Umwelt-Vollzug Nr. 1107: 113 S, 2011.
\url{http://www.bafu.admin.ch/uv-1107-d}
}
\seealso{
\code{\link{msk.hydrol.2011.create}},
\code{\link[utility:utility-package]{utility}}.
}
\examples{
hydrol <- msk.hydrol.2011.create()
plot(hydrol)
hydrol.german <- msk.hydrol.2011.create("Deutsch")
plot(hydrol.german)
}
|
692c6ecba6ede96c0c49a528fda4fe935580af50
|
9927e0c8ea3b582d24f11bc8af4b602c4d02eff7
|
/American_Community_Survey_Exercise_Golba_Joseph.R
|
7513c3b26e285449b4267f879b04d8f87bc9ca13
|
[] |
no_license
|
jgolba/Golba-DSC520
|
aebf26b5519a4dfb52cc3e8dd7a322c53880c736
|
711051d6dcfedffdf713720a62e0d53805313228
|
refs/heads/main
| 2023-08-17T02:59:53.033633
| 2021-09-19T14:37:41
| 2021-09-19T14:37:41
| 401,462,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,803
|
r
|
American_Community_Survey_Exercise_Golba_Joseph.R
|
library(ggplot2)
setwd('C:/Users/jgolba/Documents/DSC 520 - Stats for Data Science/dsc520/data/')
ACS <- read.csv(file="acs-14-1yr-s0201.csv")
head(ACS)
#i What are the elements in your data (including the categories and data types)?
summary(ACS)
#ii Please provide the output from the following functions: str(); nrow(); ncol()
str(ACS)
###'data.frame': 136 obs. of 8 variables:
#$ Id : chr "0500000US01073" "0500000US04013" "0500000US04019" "0500000US06001" ...
#$ Id2 : int 1073 4013 4019 6001 6013 6019 6029 6037 6059 6065 ...
#$ Geography : chr "Jefferson County, Alabama" "Maricopa County, Arizona" "Pima County, Arizona" "Alameda County, California" ...
#$ PopGroupID : int 1 1 1 1 1 1 1 1 1 1 ...
#$ POPGROUP.display.label: chr "Total population" "Total population" "Total population" "Total population" ...
#$ RacesReported : int 660793 4087191 1004516 1610921 1111339 965974 874589 10116705 3145515 2329271 ...
#$ HSDegree : num 89.1 86.8 88 86.9 88.8 73.6 74.5 77.5 84.6 80.6 ...
#$ BachDegree : num 30.5 30.2 30.8 42.8 39.7 19.7 15.4 30.3 38 20.7 ...
nrow(ACS)
# 136
ncol(ACS)
#8
#iii Create a Histogram of the HSDegree variable using the ggplot2 package.
#1. Set a bin size for the Histogram.
#2. Include a Title and appropriate X/Y axis labels on your Histogram Plot.
ACS_Historgram <- ggplot(ACS, aes(HSDegree)) + geom_histogram(bins=30) +
ggtitle('Percent of Population to Receive High School Degree') +
xlab('HS Degrees Achieved in Percent') + ylab('Frequency')
ACS_Historgram
#iv Answer the following questions based on the Histogram produced:
#1 Based on what you see in this histogram, is the data distribution unimodal? No
#2 Is it approximately symmetrical? No
#3 Is it approximately bell-shaped? No
#4 Is it approximately normal? No
#5 If not normal, is the distribution skewed? If so, in which direction? Left skewed
#6 Include a normal curve to the Histogram that you plotted.
ACS_Historgram + stat_function(fun=dnorm, args=list(mean=mean(ACS$HSDegree, na.rm = TRUE), sd=sd(ACS$HSDegree, na.rm = TRUE)), color = 'blue', size = 1)
#7 Explain whether a normal distribution can accurately be used as a model for this data.
## Because the ACS data is skewed to the left, a normal distribution cannot be used.
#v Create a Probability Plot of the HSDegree variable.
ggplot(ACS, aes(sample=HSDegree)) + stat_qq() + stat_qq_line(color = 'red')
#vi Answer the following questions based on the Probability Plot:
#Based on what you see in this probability plot, is the distribution approximately normal? Explain how you know.
#The distribution is not normal because the plot is not straight.
#If not normal, is the distribution skewed? If so, in which direction? Explain how you know.
#Both ends of the plots are below the line so it is skewed left.
#vii Now that you have looked at this data visually for normality, you will now
#quantify normality with numbers using the stat.desc() function. Include a
#screen capture of the results produced.
library(pastecs)
options(scipen=100)
options(digits=3)
stat.desc(ACS, basic=TRUE, desc=TRUE, norm=FALSE, p=0.95)
stat.desc(ACS$HSDegree, basic = FALSE, norm = TRUE)
#viii In several sentences provide an explanation of the result produced for
#skew, kurtosis, and z-scores. In addition, explain how a change in the sample
#size may change your explanation?
#With the skewness being -1.67, the data is skewed to the left.
#The kurtosis is 4.35 so the data has a heavy tailed distribution.
#The z-scores for skew and kurtosis are rather large showing little relationship to the mean.
#A larger sample size would give us more data to analyze and possibly make the distribution more normal.
|
de1417c10c6ab1c3a9908e51719ff0cc36871740
|
2e55f1f791be3391ac854f881714e3f27734f6a2
|
/man/validate.Rd
|
a72705cd321104dabc0ae17de2c57d30f265cf75
|
[] |
no_license
|
IALSA/IalsaSynthesis
|
ff86e731d9f7956d89ea7c7b8d30d9fde2324e39
|
a7b4ad44b2b21a21e11e6438da6c27b0618cae46
|
refs/heads/master
| 2021-06-07T06:07:52.638563
| 2021-04-16T05:32:00
| 2021-04-16T05:32:00
| 34,798,650
| 0
| 0
| null | 2021-04-16T05:32:01
| 2015-04-29T14:30:49
|
R
|
UTF-8
|
R
| false
| true
| 1,267
|
rd
|
validate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate.R
\name{validate}
\alias{validate}
\alias{validate_filename_output}
\title{Functions that check the validty of values throughout the workflow.}
\usage{
validate_filename_output(
filename,
path,
file_extension_expected = "out",
underscore_count_expected = 4L
)
}
\arguments{
\item{filename}{The name of the file to be validated.}
\item{path}{The location of the file to be validated.}
\item{file_extension_expected}{The extension of the file. This defaults to "out", which corresponds to Mplus output.}
\item{underscore_count_expected}{The number of underscores required in the name (not currently used).}
}
\value{
An \code{invisible} \code{TRUE} value if the filename is valid. Otherwise, an error is thrown.
}
\description{
These functions help identify mistakes in formatting before the create difficult-to-diagnose problems later.
}
\examples{
library(IalsaSynthesis) #Load the package into the current R session.
\dontrun{
path <- "./studies/eas"
good_name <- "u1_male_aehplus_muscle_noCog_hand_noCogSpec.out"
validate_filename_output(good_name, path)
bad_name <- "missing_something.outtttt"
validate_filename_output(bad_name, path)
}
}
\author{
Will Beasley
}
|
3d5cf496ee3dcba1337d0c32d87aa873ef094508
|
aeda57f76384838b2a66ecc0deaea39bbd47c778
|
/R/fit_rfrk.R
|
756fa56732845df83326aea670198a45c1792fbf
|
[] |
no_license
|
Rafit4/slmrf
|
f59bb2f7b79b8e225c1868a7113b0b63a8dabcfe
|
50852915c8ecfdd00faa64ddf99f9929719c87bc
|
refs/heads/master
| 2022-03-28T12:00:15.787289
| 2020-01-19T03:03:44
| 2020-01-19T03:03:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,391
|
r
|
fit_rfrk.R
|
#' Fit RFRK
#'
#' Estimate a random forest residual kriging model
#'
#' @param rf random forest model
#' @param y Numeric response vector.
#' @param dist_mtx Distance matrix used to compute covariance matrix.
#' @param theta_ini numeric vector. Initial values for covariance parameters:
#' logarithm transformed nugget, partial sill, and range, in that order. If
#' not specified uses default initialization.
#'
#' @return object of type `rfrk'.
#'
#' @note assumes exponential covariance function
#'
#' @export
fit_rfrk <- function(rf, y, dist_mtx, theta_ini=NULL) {
resid <- y - predict(rf)
# default parameter initialization
if(is.null(theta_ini)) {
theta_ini <- log(c('nugg' = 0.5*var(resid), 'parsil' = 0.5*var(resid), 'range' = mean(dist_mtx)))
}
# run optimization
start_time <- Sys.time()
parmest <- optim(theta_ini, m2LL_sk, r=resid, dist_mtx=dist_mtx)
end_time <- Sys.time()
est_time <- end_time - start_time
params <- exp(parmest$par)
covMat <- make_covmat(params, dist_mtx, is_log=FALSE,
include_nugg = TRUE) # exponential is default
Vi <- solve(covMat)
out <- list(
resid = resid,
params = params,
nugg = params[1],
parsil = params[2],
range = params[3],
sill = params[1] + params[2],
covMat = covMat,
Vi = Vi,
optim_out = parmest,
est_time = est_time
)
class(out) <- 'rfrk'
return(out)
}
|
c20e518db92c21fb9aa9769115ced5c92f41c8b3
|
e48e68bcd947cd4d6422ee7343f7840df7c276e3
|
/Fig 3/Fig 3C/lectin somatic mutants fisher.R
|
d0c64300b616d0ba687c56a87d6c6d023dc89f7f
|
[] |
no_license
|
arunkumarramesh/Parasitoid-resistance
|
7408b3a34bb4bc2b6c34ca9279b31189a6f4db57
|
6681bebd502d5f56ae5813eefd1962b14af5f145
|
refs/heads/main
| 2023-06-13T01:00:57.400276
| 2023-05-26T09:58:57
| 2023-05-26T09:58:57
| 491,447,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,954
|
r
|
lectin somatic mutants fisher.R
|
## Author: Shuyu Olivia Zhou
# This is code for analyzing the encapsulation assay data for somatic mutants of lectin-24A
# and plotting the figure
library(multcomp)
library(lme4)
library(car)
library(ggplot2)
library(binom)
library(emmeans)
library(tidyverse)
library(tidyr)
library(stringr)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
list.files()
#import data
data = read.table("Lectin somatic mutants.csv",
sep = ",", dec = ".", header = TRUE)
str(data)
data
data$male = factor(data$male,
levels=c("2Ar5", "gRNAs (X)", "68A4", "gRNAs (III)"))
data$chromosome = factor(data$chromosome, levels=c("III", "X"))
str(data)
#separate X and III chromosome somatic mutagenesis data
X_data <- subset(data, chromosome == "X")
X_data
III_data <- subset(data, chromosome == "III")
III_data
####### X chromosome guides #######
#Fishers exact test
X_data_fisher <- subset(X_data, select = c(male, resistant, susceptible))
X_data_fisher
X_fisher <- aggregate(.~ male, X_data_fisher, sum)
rownames(X_fisher) <- X_fisher[,1]
X_fisher[,1] <- NULL
X_fisher
X_fisher_test <- fisher.test(X_fisher)
X_fisher_test$p.value
########### III chromosome guides ############
#Fishers exact test
III_data_fisher <- subset(III_data, select = c(male, resistant, susceptible))
III_data_fisher
III_fisher <- aggregate(.~ male, III_data_fisher, sum)
rownames(III_fisher) <- III_fisher[,1]
III_fisher[,1] <- NULL
III_fisher
III_fisher_test <- fisher.test(III_fisher)
III_fisher_test$p.value
######## ALL ########
#binomial confidence intervals
data_subset <- subset(data, select = c(male, resistant, susceptible, n))
data_sum <- aggregate(. ~ male, data_subset, sum)
binomial_CIs <- binom.confint(x = data_sum$resistant, n = data_sum$n, methods = "prop.test")
data_sum$lower <- binomial_CIs$lower
data_sum$upper <- binomial_CIs$upper
data_sum
data_sum$resistance = data_sum$resistant / data_sum$n * 100
data_sum$n <- paste("n=", data_sum$n, sep = "")
data_sum
data_sum$chromosome = NA
data_sum[data_sum$male == "68A4" | data_sum$male == "gRNAs (III)", ]$chromosome <- "III"
data_sum[data_sum$male == "2Ar5" | data_sum$male == "gRNAs (X)", ]$chromosome <- "X"
data_sum
# remove n from labels, explain this in legend
data_sum$n <- gsub("n=","",data_sum$n)
data_sum$male <- gsub("gRNAs.*","gRNA",data_sum$male)
p <- ggplot(data = data_sum, aes(x = male, y = resistance, label = n)) +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_bar(stat = "identity", fill = "gray") +
geom_errorbar(aes(ymin = lower * 100, ymax = upper * 100), width = .2) +
geom_text(aes(y = upper*100), nudge_y = 5, size = 4) +
scale_y_continuous(breaks = seq(0, 100, by = 25)) +
xlab("Genotype") +
ylab("Parasitoid Melanization Rate (%)") +
facet_wrap( ~ chromosome, scales = "free_x")
p
ggsave("Lectin somatic mutants encapsulation rates.pdf", width = 2.3, height = 2.8)
|
406125c79481f3833116b1c2abdb940d35cb9368
|
b46e9ec38e563ae6631a40772cef3bef81ea9af4
|
/supervised-learning/2-rf-xgb.R
|
3d867a65af5d6e6d5164074848ac396ffafc2895
|
[] |
no_license
|
anhnguyendepocen/bundesbank-workshop
|
9623ceaa2c5aca022607e3feffe22df0083864da
|
f60ab6c5ad4305cdcfefc790210143f72a8d3c60
|
refs/heads/master
| 2021-08-16T10:30:00.221938
| 2017-11-19T15:48:01
| 2017-11-19T15:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,381
|
r
|
2-rf-xgb.R
|
### Big Data Analysis ###
### Supervised Learning II ###
### Bagging, Random Forests and Boosting ###
# Setup
# install.packages("foreach")
# install.packages("caret")
# install.packages("rpart")
# install.packages("randomForest")
# install.packages("xgboost")
# install.packages("pdp")
library(foreach)
library(caret)
library(rpart)
library(randomForest)
library(xgboost)
library(pdp)
load("FrankfurtMain.Rda")
fr_immo$address <- NULL
fr_immo$quarter <- NULL
## Split data in training and test set
set.seed(7345)
train <- sample(1:nrow(fr_immo), 0.8*nrow(fr_immo))
fr_test <- fr_immo[-train,]
fr_train <- fr_immo[train,]
## Bagging
# Using foreach
y_tbag <- foreach(m = 1:100, .combine = cbind) %do% {
rows <- sample(nrow(fr_train), replace = T)
fit <- rpart(rent ~ ., data = fr_train[rows,], cp = 0.001)
predict(fit, newdata = fr_test)
}
postResample(y_tbag[,1], fr_test$rent)
postResample(rowMeans(y_tbag), fr_test$rent)
summary(apply(y_tbag,1,var))
y_rbag <- foreach(m = 1:100, .combine = cbind) %do% {
rows <- sample(nrow(fr_train), replace = T)
fit <- lm(rent ~ ., data = fr_train[rows,])
predict(fit, newdata = fr_test)
}
postResample(y_rbag[,1], fr_test$rent)
postResample(rowMeans(y_rbag), fr_test$rent)
summary(apply(y_rbag,1,var))
# Using caret
ctrl <- trainControl(method = "cv",
number = 5)
set.seed(7324)
bag <- train(rent ~ .,
data = fr_train,
method = "rf",
trControl = ctrl,
tuneGrid = data.frame(mtry = 17),
importance = TRUE)
bag
plot(bag$finalModel)
varImp(bag)
getTree(bag$finalModel, k = 1, labelVar = T)[1:10,]
getTree(bag$finalModel, k = 2, labelVar = T)[1:10,]
## Random Forest
set.seed(7324)
rf <- train(rent ~ .,
data = fr_train,
method = "rf",
trControl = ctrl,
importance = TRUE)
rf
plot(rf)
plot(rf$finalModel)
varImp(rf)
# Inspect Forest
getTree(rf$finalModel, k = 1, labelVar = T)[1:10,]
getTree(rf$finalModel, k = 2, labelVar = T)[1:10,]
pdp3 <- partial(rf, pred.var = "m2", ice = T, trim.outliers = T)
pdp4 <- partial(rf, pred.var = "dist_to_center", ice = T, trim.outliers = T)
p1 <- plotPartial(pdp3, rug = T, train = fr_train, alpha = 0.3)
p2 <- plotPartial(pdp4, rug = T, train = fr_train, alpha = 0.3)
grid.arrange(p1, p2, ncol = 2)
pdp5 <- partial(rf, pred.var = c("lat", "lon"))
plotPartial(pdp5, levelplot = F, drape = T, colorkey = F, screen = list(z = 130, x = -60))
## Boosting
grid <- expand.grid(max_depth = 1:3,
nrounds = c(500, 1000),
eta = c(0.05, 0.01),
min_child_weight = 5,
subsample = 0.7,
gamma = 0,
colsample_bytree = 1)
grid
set.seed(7324)
xgb <- train(rent ~ .,
data = fr_train,
method = "xgbTree",
trControl = ctrl,
tuneGrid = grid)
xgb
plot(xgb)
varImp(xgb)
## CART
grid <- expand.grid(maxdepth = 1:30)
set.seed(7324)
cart <- train(rent ~ .,
data = fr_train,
method = "rpart2",
trControl = ctrl,
tuneGrid = grid)
cart
plot(cart)
varImp(cart)
## Linear regression
set.seed(7324)
reg <- train(rent ~ .,
data = fr_train,
method = "glm",
trControl = ctrl)
reg
summary(reg)
varImp(reg)
## Comparison (rf, xgboost, rpart & glm)
resamps <- resamples(list(Bagging = bag,
RandomForest = rf,
Boosting = xgb,
CART = cart,
Regression = reg))
resamps
summary(resamps)
bwplot(resamps, metric = c("RMSE", "Rsquared"), scales = list(relation = "free"), xlim = list(c(0, 500), c(0, 1)))
splom(resamps, metric = "RMSE")
splom(resamps, metric = "Rsquared")
difValues <- diff(resamps)
summary(difValues)
## Prediction
y_bag <- predict(bag, newdata = fr_test)
y_rf <- predict(rf, newdata = fr_test)
y_xgb <- predict(xgb, newdata = fr_test)
y_cart <- predict(cart, newdata = fr_test)
y_reg <- predict(reg, newdata = fr_test)
postResample(pred = y_bag, obs = fr_test$rent)
postResample(pred = y_rf, obs = fr_test$rent)
postResample(pred = y_xgb, obs = fr_test$rent)
postResample(pred = y_cart, obs = fr_test$rent)
postResample(pred = y_reg, obs = fr_test$rent)
|
695518d0ec123bb3464e0713ae9e53fb7a1ba9c1
|
e3fb5b8a7a00fde19e9101c4199c143081cf06c0
|
/tests/testthat/test-BTD_cov.R
|
e87b041355e3edebacca64408d9b58d80dd21bab
|
[
"MIT"
] |
permissive
|
lindemann09/singcar
|
8093ac944c0aaf24463fa881a7c70c9ae83fd66d
|
a2cbf841f1430fc6f0afba3da5e9b4ba0c850b38
|
refs/heads/master
| 2023-08-12T13:28:04.152637
| 2021-10-11T14:07:36
| 2021-10-11T14:07:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,486
|
r
|
test-BTD_cov.R
|
test_that("summary input yields same result as raw", {
x <- MASS::mvrnorm(18, mu = c(100, 13),
Sigma = matrix(c(15^2, 0.65*15*3,
0.65*15*3, 3^2),
nrow = 2, byrow = T),
empirical = TRUE)
set.seed(123456)
sumstats <- BTD_cov(70, 13, c(100, 15), c(13, 3), use_sumstats = TRUE,
cor_mat = matrix(c(1, 0.65, 0.65, 1), nrow=2),
sample_size = 18)[["p.value"]]
set.seed(123456)
raw <- BTD_cov(70, 13, x[ , 1], x[ , 2])[["p.value"]]
expect_equal(sumstats, raw, tol = 0.01)
})
test_that("input of control_covar can be both dataframe and matrix", {
size_weight_illusion$MF01 <- as.numeric(size_weight_illusion$SEX == "Female")
set.seed(123)
df <- BTD_cov(case_task = size_weight_illusion[1, "V_SWI"],
case_covar = unlist(size_weight_illusion[1, c("YRS", "MF01")]),
control_task = size_weight_illusion[-1, "V_SWI"],
control_covar = size_weight_illusion[-1, c("YRS", "MF01")], iter = 100)
set.seed(123)
mat <- BTD_cov(case_task = size_weight_illusion[1, "V_SWI"],
case_covar = unlist(size_weight_illusion[1, c("YRS", "MF01")]),
control_task = size_weight_illusion[-1, "V_SWI"],
control_covar = as.matrix(size_weight_illusion[-1, c("YRS", "MF01")]), iter = 100)
expect_equal(df, mat)
})
test_that("we get approx same results as C&G on BTD_cov", {
x <- MASS::mvrnorm(18, mu = c(100, 13),
Sigma = matrix(c(15^2, 0.65*15*3,
0.65*15*3, 3^2),
nrow = 2, byrow = T),
empirical = TRUE)
# p-values and intervals from C&G programs given these values
cg_ot <- c(0.04362 , -2.653, -1.071, 0.3987, 14.2189)
set.seed(1234597)
sc_ot <- BTD_cov(78, 13, x[ , 1], x[ , 2], iter = 10000)
sc_ot <- c(sc_ot[["p.value"]],
sc_ot[["interval"]][["Lower Z-CCC CI"]],
sc_ot[["interval"]][["Upper Z-CCC CI"]],
sc_ot[["interval"]][["Lower p CI"]],
sc_ot[["interval"]][["Upper p CI"]])
expect_equal(sc_ot, cg_ot, tolerance = 1e-2)
})
test_that("alternative hypotheses direction", {
x <- MASS::mvrnorm(18, mu = c(100, 13),
Sigma = matrix(c(15^2, 0.65*15*3,
0.65*15*3, 3^2),
nrow = 2, byrow = T),
empirical = TRUE)
set.seed(123456234)
pos_z <- BTD_cov(105, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "less")[["p.value"]]
expect_equal(pos_z > 0.5, TRUE)
set.seed(123456234)
pos_z <- BTD_cov(105, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "greater")[["p.value"]]
expect_equal(pos_z < 0.5, TRUE)
set.seed(123456234)
neg_z <- BTD_cov(78, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "less")[["p.value"]]
expect_equal(neg_z < 0.5, TRUE)
set.seed(123456234)
neg_z <- BTD_cov(78, 13, x[ , 1], x[ , 2],
iter = 1000, alternative = "greater")[["p.value"]]
expect_equal(neg_z > 0.5, TRUE)
})
test_that("errors and warnings are occuring as they should for BTD", {
expect_error(BTD_cov(1, 0, 0, 0, use_sumstats = TRUE, sample_size = NULL),
"Please supply both correlation matrix and sample size")
expect_error(BTD_cov(1, 0, 0, 0, use_sumstats = TRUE, sample_size = 20, cor_mat = NULL),
"Please supply both correlation matrix and sample size")
expect_error(BTD_cov(-2, 0, rnorm(15), rnorm(15), int_level = 1.1),
"Interval level must be between 0 and 1")
expect_error(BTD_cov(c(-2, 0), 0, rnorm(15), rnorm(15)),
"case_task should be single value")
expect_error(BTD_cov(-2, 0, c(0, 1), c(0, 1), use_sumstats = TRUE, sample_size = 20,
cor_mat = diag(c(-2, -2))),
"cor_mat is not positive definite")
expect_error(BTD_cov(-2, 0, c(0, 1), c(0, 1), use_sumstats = FALSE, sample_size = 20,
cor_mat = diag(2)),
"If input is summary data, set use_sumstats = TRUE")
expect_error(BTD_cov(-2, 0, c(0, 1), c(0, 1), use_sumstats = TRUE, sample_size = 20,
cor_mat = diag(3)),
"Number of variables and number of correlations does not match")
})
|
a7f1657b7401b305686b1829a26de25b7abf33f1
|
fbb23e88df629fc696b48844772f7db137d18460
|
/man/list_resource.Rd
|
6974e5a235e309d887055fd62710a2f435927da7
|
[] |
no_license
|
BigelowLab/genologicsr
|
4dc9941bdc7ad531baabb1dc010081a20a5e35fe
|
df5ed969f7258bff2cc29fba82dc07cce980d8c1
|
refs/heads/master
| 2020-04-04T21:15:23.113184
| 2018-07-19T18:32:55
| 2018-07-19T18:32:55
| 38,256,262
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 725
|
rd
|
list_resource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Lims.R
\name{list_resource}
\alias{list_resource}
\title{List URIs in a resource such as samples or containers.}
\usage{
list_resource(lims, resource, n = NA, ...)
}
\arguments{
\item{lims}{the LimsRefClass object to query}
\item{resource}{character the uri to get}
\item{n}{numeric, the maximum number of URI, NA to get all}
\item{...}{further arguments for httr::GET including \code{query} list}
}
\value{
character vector of zero or more URI
}
\description{
List URIs in a resource such as samples or containers.
}
\examples{
\dontrun{
# list the samples in a project
ss <- list_resources(lims,'samples', projectname = 'foobar')
}
}
|
7c782f9d9eb7ee86e695fdd624a89de83f788996
|
1a6005e76c759351314762364d93f193b4cab6a3
|
/R/leapyear.R
|
27d54cc7efa2b5aa9f2a5180f0e9e1e248941834
|
[] |
no_license
|
mariaherrera87/leapyear_Yi_Maria
|
70f44854d93620ff47753bad00b59b5d97d44ac7
|
a41a31f19c35578e878689cd810c6d9abe8b6939
|
refs/heads/master
| 2021-01-10T07:14:12.394447
| 2016-01-07T15:56:36
| 2016-01-07T15:56:36
| 49,210,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
leapyear.R
|
## Author Yi Maria
## Date Jan 7 2016
## This is a function to calculate whether a year is a leap year
is.LeapYear <- function(year) {
if(!is.numeric(year)) {
stop("argument of class numeric expected")
}
## the leap year begin to use after 1582
else if(year<1582) {
print( "This year is out of the valid range")}
else{
## if the year can be devided by 4, this year is a leap year
temp <- year/4
result <- floor(temp)==temp
return(result)
}
}
|
d4f653db63dd2fe1be51df25715b8df38a770431
|
26080f1eaf6f615d14c4b55e531250e6ecce4c8b
|
/R/mada_1x1.R
|
5361095024ae66a638935e8b6396fc3265d0159d
|
[] |
no_license
|
marjoleinbruijning/covid19-burden-madagascar
|
98fcd0b342a2045dbcb474b27cbd639484a81b44
|
4e14b69480ff00776d16433066408a09b1550d32
|
refs/heads/master
| 2022-11-21T13:45:41.081627
| 2020-07-23T19:50:49
| 2020-07-23T19:50:49
| 251,711,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,776
|
r
|
mada_1x1.R
|
# ------------------------------------------------------------------------------------------------
#' Getting mada estimates
# ------------------------------------------------------------------------------------------------
# set up cluster on single node with do Parallel
library(doParallel)
cl <- makeCluster(3)
registerDoParallel(cl)
getDoParWorkers()
Sys.time()
library(raster)
library(data.table)
library(rgdal)
library(foreach)
library(iterators)
library(glue)
# Aggregate up files --------------------------------------------------------------------------
directory <- "wp_data/mada_100m_2020/"
out_dir <- "wp_data/mada_1x1km_2020/"
files <- list.files(directory, recursive = TRUE)
files <- files[grepl(".tif$", files)] # only tifs
ages <- unique(unlist(lapply(strsplit(files, "_"), function(x) x[[3]])))
foreach(i = 1:length(ages), .combine = cbind, .packages = c("raster", "glue"),
.export = c("directory", "out_dir", "ages")) %dopar% {
popM <- raster(glue("{directory}mdg_m_{ages[i]}_2020.tif"))
popM <- aggregate(popM, fact = 10, fun = sum, na.rm = TRUE)
writeRaster(popM, glue("{out_dir}mdg_m_{ages[i]}_2020_1x1.tif"))
popF <- raster(glue("{directory}mdg_f_{ages[i]}_2020.tif"))
popF <- aggregate(popF, fact = 10, fun = sum, na.rm = TRUE)
writeRaster(popF, glue("{out_dir}mdg_f_{ages[i]}_2020_1x1.tif"))
pop <- popM + popF
values(pop)
} -> out_mat
colnames(out_mat) <- ages
fwrite(out_mat, "output/temp_out.gz")
# Read in shapefiles --------------------------------------------------------------------------
raster_base <- raster("wp_data/mada_1x1km_2020/mdg_f_0_2020_1x1.tif")
values(raster_base) <- 1:ncell(raster_base)
# Admin codes (pick finest scale and match accordingly)
admin3 <- readOGR("shapefiles/admin3.shp")
admin3 <- admin3[admin3$iso == "MDG", ]
admin3$id_match <- 1:length(admin3)
id_match <- values(rasterize(admin3, raster_base, field = "id_match"))
mada_dt <- data.table(cell_id = values(raster_base), iso_code = "MDG",
admin1_code = admin3$id_1[id_match], admin2_code = admin3$id_2[id_match],
admin3_code = admin3$id_3[id_match], out_mat)
fwrite(mada_dt, "output/mada_dt.gz")
mada_admin1 <- mada_dt[, lapply(.SD, sum, na.rm = TRUE), .SDcols = 6:ncol(mada_dt),
by = c("admin1_code")]
fwrite(mada_admin1, "output/mada_admin1.csv")
mada_admin2 <- mada_dt[, lapply(.SD, sum, na.rm = TRUE), .SDcols = 6:ncol(mada_dt),
by = c("admin2_code")]
fwrite(mada_admin2, "output/mada_admin2.csv")
mada_admin3 <- mada_dt[, lapply(.SD, sum, na.rm = TRUE), .SDcols = 6:ncol(mada_dt),
by = c("admin3_code")]
fwrite(mada_admin3, "output/mada_admin3.csv")
# Close out
stopCluster(cl)
Sys.time()
|
613604a6c1216c6f2ddfa9174cf2d1965eedd53b
|
6f1db09f6c708de5bd4de251d18bf3f229819f10
|
/Numerical response/PATH_PAPER_Figures_production.R
|
ab44a3c2aca6eb7c00b38b90e1bcf5b949a4a531
|
[] |
no_license
|
Sckende/Path-analysis
|
10f3d922e76f4acd5e696818469e47d7569c1e24
|
904d8f1984a9cc9770d35fb574109d358aedb271
|
refs/heads/master
| 2021-05-11T16:07:56.311589
| 2019-07-04T14:33:12
| 2019-07-04T14:33:12
| 117,251,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,905
|
r
|
PATH_PAPER_Figures_production.R
|
rm(list = ls()) #clean R memory
setwd(dir = "C:/Users/HP_9470m/OneDrive - Université de Moncton/Doc doc doc/Ph.D. - ANALYSES/R analysis/Data")
list.files()
#### WINTER, SPRING & SUMMER AO ####
# Winter AO = November to April
# Spring AO = 20 MAy - 20 June
# Summer AO =
AO<-read.csv("AO_saisonnier.txt", sep = ",", dec = ".")
head(AO)
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V1/Figures/AO_seasons.tiff",
# res=300,
# width=15,
# height= 25,
# pointsize=12,
# unit="cm",
# bg="transparent")
#x11()
par(mfrow = c(3, 1), mar = c(1, 5, 1, 1))
plot(AO$YEAR[AO$YEAR <= 2016 & AO$YEAR >= 1996],
AO$winAO[AO$YEAR <= 2016 & AO$YEAR >= 1996],
xlab = "",
ylab = "",
xaxp = c(1996, 2016, 10),
ylim = c(-2.0, 1.5),
bty = "n",
yaxt = "s",
xaxt = "n",
cex = 1,
cex.lab = 1,
cex.axis = 1.5,
col = "orange",
pch = 19,
lwd = 2,
type = 'b',
las = 2)
lines(AO$YEAR[AO$YEAR <= 2016 & AO$YEAR >= 1996],
rep(mean(AO$winAO[AO$YEAR <= 2016 & AO$YEAR >= 1996]), 21),
col = "orange",
type = "l",
lty = 4,
lwd = 2)
legend(1995,
1.8,
"Winter AO index",
bty = "n",
cex = 2,
text.col = "Orange")
plot(AO$YEAR[AO$YEAR <= 2016 & AO$YEAR >= 1996],
AO$sprAO[AO$YEAR <= 2016 & AO$YEAR >= 1996],
xlab = "",
ylab = "",
xaxp = c(1996, 2016, 10),
ylim = c(-2.0, 1.5),
bty = "n",
yaxt = "s",
xaxt = "n",
cex = 1,
cex.lab = 1,
cex.axis = 1.5,
col = "orange",
pch = 19,
lwd = 2,
type = 'b',
las = 2)
lines(AO$YEAR[AO$YEAR <= 2016 & AO$YEAR >= 1996],
rep(mean(AO$sprAO[AO$YEAR <= 2016 & AO$YEAR >= 1996]), 21),
col = "orange",
type = "l",
lty = 4,
lwd = 2)
legend(1995,
1.8,
"Spring AO index",
bty = "n",
cex = 2,
text.col = "Orange")
par(mar = c(2, 5, 1, 1))
plot(AO$YEAR[AO$YEAR <= 2016 & AO$YEAR >= 1996],
AO$sumAO[AO$YEAR <= 2016 & AO$YEAR >= 1996],
xlab = "",
ylab = "",
xaxp = c(1996, 2016, 10),
ylim = c(-2.0, 1.5),
bty = "n",
yaxt = "s",
xaxt = "n",
cex = 1,
cex.lab = 1,
cex.axis = 1.5,
col = "orange",
pch = 19,
lwd = 2,
type = 'b',
las = 2)
lines(AO$YEAR[AO$YEAR <= 2016 & AO$YEAR >= 1996],
rep(mean(AO$sumAO[AO$YEAR <= 2016 & AO$YEAR >= 1996]), 21),
col = "orange",
type = "l",
lty = 4,
lwd = 2)
axis(side = 1,
at = 1996:2016,
lwd = 1,
cex.axis = 1.5)
legend(1995,
1.8,
"Summer AO index",
bty = "n",
cex = 2,
text.col = "Orange")
dev.off()
#### TEMPERATURES AND RAINFALL PLOTS ####
gg <- read.table("GOOSE_breeding_informations_1989_2017.txt", sep = "\t", dec = ".", h = T)
head(gg); summary(gg)
tt <- read.table("TEMP_Tair moy 1989-2017 BYLCAMP.txt", sep = "\t", dec = ",", h = T)
head(tt); summary(tt)
AO <- read.table("AO_daily.txt", h = T, sep = "\t", dec = ".")
AO <- AO[AO$YEAR >= 1996 & !AO$YEAR == 2017,]; head(AO); summary(AO)
rain <- read.table("PREC_precipitation_Bylot_1995-2017.txt", h = T, sep = "\t", dec = ",")
rain <- rain[!rain$YEAR == 2017,]; head(rain); summary(rain)
rain <- na.omit(rain)
#### Traitement des jours juliens ####
#### Obtention des jours juliens pour les dates GOOSE
Sys.setlocale(category = "LC_TIME", locale = "English") #en_US.utf8 pour linux, en_US pour macOS, English pour Windows
#setting in english to read month
gg$lay_date_jj <- paste(gg$LAY_DATE, gg$YEAR, sep = " ")
gg$lay_date_jj <- strptime(gg$lay_date_jj, format = "%d %B %Y")
gg$lay_date_jj <- gg$lay_date_jj$yday +1
gg$hatch_date_jj <- paste(gg$HATCH_DATE, gg$YEAR, sep = " ")
gg$hatch_date_jj <- strptime(gg$hatch_date_jj, format = "%d %B %Y")
gg$hatch_date_jj <- gg$hatch_date_jj$yday +1
#### Obtention des jours juliens pour les dates TEMP
head(tt)
tt$jj <- strptime(paste(tt$DAY, tt$MONTH, tt$YEAR, sep = "-"), format = "%d-%m-%Y")
tt$jj <- tt$jj$yday + 1
# Exemple de Vérification de la valeur des JJ pour les années bissextiles (366 jours - 1996, 2000, 2004, 2008, 2012, 2016) et non bissextiles
#NONbiss <- g[!(g$YEAR == 1996 | g$YEAR == 2000 | g$YEAR == 2004 | g$YEAR == 2008 | g$YEAR == 2012 | g$YEAR == 2016),]
#biss <- g[g$YEAR == 1996 | g$YEAR == 2000 | g$YEAR == 2004 | g$YEAR == 2008 | g$YEAR == 2012 | g$YEAR == 2016,]
#plot(biss$LAY_DATE, biss$lay_date_jj)
#plot(biss$HATCH_DATE, biss$hatch_date_jj)
#### Temperatures trends ####
#### Relations entre les temperatures et la periode de nidification de oies ####
x11(title = "Nidification temperature trends between 1989 & 2016 ")
#dev.off()
g <- gg[!gg$YEAR == 2017,]
t <- tt[!tt$YEAR == 2017,]
x11()
par(mfrow = c(5, 6))
#for (i in 1996:2016) {
for (i in g$YEAR){
plot(t$jj[t$jj >= g$lay_date_jj[g$YEAR == i] & t$jj <= g$hatch_date_jj[g$YEAR == i] & t$YEAR == i],
t$TEMP[t$jj >= g$lay_date_jj[g$YEAR == i] & t$jj <= g$hatch_date_jj[g$YEAR == i] & t$YEAR == i],
main = i,
xlab = "Julian day",
ylab = "temp",
ylim = c(-1, 14),
xlim = c(158, 195))
ajout <- with(t, smooth.spline(t$jj[t$jj >= g$lay_date_jj[g$YEAR == i] & t$jj <= g$hatch_date_jj[g$YEAR == i] & t$YEAR == i],
t$TEMP[t$jj >= g$lay_date_jj[g$YEAR == i] & t$jj <= g$hatch_date_jj[g$YEAR == i] & t$YEAR == i],
df = 2))
ajout
lines(ajout, col = "blue")
}
#dev.copy2pdf("Nidifi_temp_trends_1996-2015.pdf")
dev.off()
#### Rainfall trends ####
#### Relations entre les précipitations et la periode de nidification de oies ####
x11(title = "Nidification rainfall trends between 1995 & 2016 ")
#dev.off()
x11()
par(mfrow = c(4, 6))
for (i in unique(rain$YEAR)) {
plot(rain$JJ[rain$JJ >= g$lay_date_jj[g$YEAR == i] & rain$JJ <= g$hatch_date_jj[g$YEAR == i] & rain$YEAR == i],
rain$RAIN[rain$JJ >= g$lay_date_jj[g$YEAR == i] & rain$JJ <= g$hatch_date_jj[g$YEAR == i] & rain$YEAR == i],
main = i,
xlab = "Julian day",
ylab = "Rainfall",
ylim = c(-1, max(rain$RAIN)),
xlim = c(158, 195))
ajout <- with(rain,
smooth.spline(rain$JJ[rain$JJ >= g$lay_date_jj[g$YEAR == i] & rain$JJ <= g$hatch_date_jj[g$YEAR == i] & rain$YEAR == i],
rain$RAIN[rain$JJ >= g$lay_date_jj[g$YEAR == i] & rain$JJ <= g$hatch_date_jj[g$YEAR == i] & rain$YEAR == i],
df = 2))
ajout
lines(ajout, col = "blue")
}
#dev.copy2pdf("Nidifi_temp_trends_1996-2015.pdf")
dev.off()
#### Calcul des valeurs de précipitations cumulées et de températures moyennes par années entre les dates moyennes d'initiation et d'éclosion
WEA <- NULL
for(i in g$YEAR){
YEAR <- i
LAY <- g$lay_date_jj[g$YEAR == i]
HATCH <- g$hatch_date_jj[g$YEAR == i]
meanTEMP <- mean(t$TEMP[t$YEAR == i & t$jj <= HATCH & t$jj >= LAY])
sdTEMP <- sd(t$TEMP[t$YEAR == i & t$jj <= HATCH & t$jj >= LAY])
varTEMP <- var(t$TEMP[t$YEAR == i & t$jj <= HATCH & t$jj >= LAY])
c <- data.frame(YEAR, LAY, HATCH, meanTEMP, sdTEMP, varTEMP)
WEA <- rbind(WEA, c)
}
summary(WEA)
WEA.1 <- NULL
for(i in unique(rain$YEAR)){
YEAR <- i
LAY <- g$lay_date_jj[g$YEAR == i]
HATCH <- g$hatch_date_jj[g$YEAR == i]
summerRAIN <- sum(rain$RAIN[rain$YEAR == i])
cumRAIN <- sum(rain$RAIN[rain$YEAR == i & rain$JJ <= HATCH & rain$JJ >= LAY])
c <- data.frame(YEAR, LAY, HATCH, summerRAIN,cumRAIN)
WEA.1 <- rbind(WEA.1, c)
}
WEA <- merge(WEA, WEA.1, all.x = TRUE)
#### Superposition of temperature and precipitation in time ####
#
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V1/Figures/prec_temp.tiff",
# res=300,
# width=20,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
#x11()
#par(oma=c(0,0,0,3)) # outer margin
par(mar=c(5,5,1,5)) # inner margin - default parameter is par("mar") <- 5.1 4.1 4.1 2.1
plot(WEA$YEAR,
WEA$cumRAIN,
xlab = "Year",
ylab = "",
xaxp = c(1996, 2016, 10),
ylim = c(0, 150),
bty = "n",
yaxt = "n",
xaxt = "n",
cex = 1,
cex.lab = 1,
col = "darkblue",
pch = 19,
lwd = 2,
type = 'b')
lines(WEA$YEAR,
rep(mean(WEA$cumRAIN), 21),
col = "darkblue",
type = "l",
lty = 4,
lwd = 2)
axis(side = 4,
lwd = 1,
las = 2)
#mtext(side = 4,
# line = 3,
# "Rainfall (mm)",
# las = 2)
par(new = T)
plot(WEA$YEAR,
WEA$meanTEMP,
xlab = "",
ylab = "",
ylim = c(0, 7),
bty = "n",
yaxt = "n",
xaxt = "n",
cex = 1,
cex.lab = 1,
col = "chocolate",
pch = 17,
type = 'b',
lwd = 2)
lines(WEA$YEAR,
rep(mean(WEA$meanTEMP), 21),
col = "chocolate",
type = "l",
lty = 4,
lwd = 2)
axis(side = 1,
at = 1996:2016,
lwd = 1)
axis(side = 2,
lwd = 1,
las = 2,
at = 0:7)
#mtext(side = 2,
# line = 3,
# "Mean temperature (c)",
# las = 2)
dev.off()
#### GOOSE - LEMMING - FOX PLOT ####
lmg <- read.table("LEM_1993-2017.txt", sep = "\t", dec = ",", h = T)
lmg <- lmg[lmg$YEAR >= 1996 & !lmg$YEAR == 2017,]; head(lmg); summary(lmg)
fox <- read.table("FOX_abundance_Chevallier.txt", sep = "\t", dec = ",", h = T)
fox <- fox[fox$year >= 1996 & !fox$year == 2017,]; head(fox); summary(fox)
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V2/Figures/fox_lmg_gee.tiff",
# res=300,
# width=20,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
#x11()
#par(oma=c(0,0,0,3)) # outer margin
par(mar=c(5,5,1,5)) # inner margin - default parameter is par("mar") <- 5.1 4.1 4.1 2.1
plot(lmg$YEAR,
lmg$LMG_C1_CORR,
xlab = "",
ylab = "",
xaxp = c(1996, 2016, 10),
ylim = c(0, 12),
bty = "n",
yaxt = "n",
xaxt = "n",
cex = 1,
cex.lab = 1,
cex.axis = 1,
col = "chartreuse3",
type = 'h',
lwd = 4)
axis(side = 2,
lwd = 1,
las = 2,
cex.axis = 1)
axis(side = 1,
at = 1996:2016,
lwd = 1,
cex.axis = 1)
par(new = T)
plot(g$YEAR,
g$NEST_SUCC,
xlab = "",
ylab = "",
ylim = c(0, 1),
bty = "n",
yaxt = "n",
xaxt = "n",
cex = 1,
cex.lab = 1,
col = "darkolivegreen4",
pch = 17,
type = 'b',
lwd = 2)
lines(fox$year,
fox$prop_natal_dens/100,
col = "dodgerblue4",
pch = 19,
type = 'b',
lwd = 2)
axis(side = 4,
lwd = 1,
las = 2,
cex.axis = 1)
#mtext(side = 4,
# line = 3,
# "Goose nesting succeess & fox breeding dens proportion")
dev.off()
#### FOX VS. LEMMING PLOT ####
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V1/Figures/fox_vs_lmg.tiff",
# res=300,
# width=20,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
#x11()
par(mar=c(3,3,1,1)) # inner margin - default parameter is par("mar") <- 5.1 4.1 4.1 2.1
plot(lmg$LMG_C1_CORR,
fox$prop_natal_dens,
xlim = c(0, 10),
ylim = c(0, 40),
col = "dodgerblue4",
bty = "n",
pch = 16,
type = "p",
lwd = 2,
cex.axis = 1,
xlab = "",
ylab = "",
las = 2,
xaxt = "n")
axis(side = 1,
lwd = 1,
cex.axis = 1)
lines(smooth.spline(lmg$LMG_C1_CORR,
fox$prop_natal_dens,
df = 3),
col = "dodgerblue4",
lwd = 2)
# Plot confident intervals
fit <- smooth.spline(lmg$LMG_C1_CORR,
fox$prop_natal_dens, df = 3) # smooth.spline fit
res <- (fit$yin - fit$y)/(1-fit$lev) # jackknife residuals
sigma <- sqrt(var(res)) # estimate sd
upper <- fit$y + 2.0*sigma*sqrt(fit$lev) # upper 95% conf. band
lower <- fit$y - 2.0*sigma*sqrt(fit$lev) # lower 95% conf. band
par(new = T)
matplot(fit$x, cbind(upper, lower), type="l", lty = "dotdash", ylim = c(0, 40), xaxt = "n", yaxt = "n", ylab = "", xlab = "", bty = "n", col = "dodgerblue4")
#lines(smooth.spline(lmg$LMG_C1_CORR,
# fox$prop_natal_dens,
# df = 2),
# col = "darkgoldenrod3",
# lwd = 3)
#legend(0,
# 40,
# legend = c("df = 3", "df = 2"),
# col = c("dodgerblue4", "darkgoldenrod3"),
# pch = "-",
# lwd = 3,
# bty = "n")
dev.off()
# OR ELSE
#rm(list = ls()) #clean R memory
setwd(dir = "C:/Users/HP_9470m/OneDrive - Université de Moncton/Doc doc doc/Ph.D. - ANALYSES/R analysis/Data")
mC1 <- read.table("mC1_path_data.txt", h = T)
#Package nécessaires
require(nlme)
require(lme4)
require(piecewiseSEM)
require(ggm)
g <- glm(prop_fox_dens ~ lmg_C1_CORR, weights = monit_dens, data = mC1, family = binomial)
summary(g)
# plot predicted values on raw data
range(mC1$lmg_C1_CORR)
# For creation new dataframe for lmg values simulation
v <- seq(0, 10, by = 0.1)
p <- predict(g, newdata = data.frame(lmg_C1_CORR = v), type = "response", se.fit = TRUE)
plot(mC1$lmg_C1_CORR, mC1$prop_fox_dens)
lines(p$fit, type = "l", col = "green")
#
g2 <- glm(cbind(breed_dens, monit_dens - breed_dens) ~ lmg_C1_CORR, data = mC1, family = binomial)
summary(g2)
p2 <- predict(g2, newdata = data.frame(lmg_C1_CORR = v), type = "response", se.fit = TRUE)
par(las = 1)
#
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V1/Figures/fox_vs_lmg_v3.tiff",
# res=300,
# width=20,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
plot(mC1$lmg_C1_CORR, mC1$prop_fox_dens,
xlim = c(0, 10),
ylim = c(0, 0.40),
col = "dodgerblue4",
bty = "n",
pch = 16,
type = "p",
lwd = 2,
cex.axis = 1,
xlab = "",
ylab = "")
lines(v, p2$fit,
col = "dodgerblue4",
lwd = 1)
lines(v, (p2$fit - 1.96 * p2$se.fit), type = "l", col = "dodgerblue4", lty = "dashed")
lines(v, (p2$fit + 1.96 * p2$se.fit), type = "l", col = "dodgerblue4", lty = "dashed")
dev.off()
#
g3 <- glm(cbind(breed_dens, monit_dens - breed_dens) ~ lmg_C1_CORR + winAO + cumul_prec + MEAN_temp, data = mC1, family = binomial)
summary(g3)
plot(mC1$AN, mC1$prop_fox_dens)
lines(mC1$AN, mC1$prop_fox_dens)
lines(predict(g3, type = "response"), col = "red", lty = "dashed")
# Log of lemming abundance
#g4 <- glm(cbind(breed_dens, monit_dens - breed_dens) ~ log(lmg_C1_CORR) + winAO + cumul_prec + MEAN_temp, data = mC1, family = binomial)
g4 <- glm(cbind(breed_dens, monit_dens - breed_dens) ~ log(lmg_C1_CORR), data = mC1, family = binomial)
summary(g4)
Eg4 <- resid(g4, type = "pearson")
sum(Eg4^2) / (g4$df.residual)
# plot predicted values on raw data
range(log(mC1$lmg_C1_CORR))
# For creation new dataframe for lmg values simulation
xv <- seq(-4, 2.30, by = 0.1)
yv <- predict(g4, list(lmg_C1_CORR = exp(xv)), type = "response", se.fit = TRUE)
p <- mC1$breed_dens/(mC1$monit_dens)
plot(p ~ log(mC1$lmg_C1_CORR), ylab = "Proportion breeding dens")
lines(yv$fit ~ xv, col = "red")
lines((yv$fit - 1.96 * yv$se.fit) ~ xv, type = "l", col = "dodgerblue4", lty = "dashed")
lines((yv$fit + 1.96 * yv$se.fit) ~ xv, type = "l", col = "dodgerblue4", lty = "dashed")
# Log of lemming abundance without 2000
mC1bis <- mC1[!(mC1$AN == 2000),]
g5 <- glm(cbind(breed_dens, monit_dens - breed_dens) ~ log(lmg_C1_CORR) + winAO + cumul_prec + MEAN_temp, data = mC1bis, family = binomial)
summary(g5)
Eg5 <- resid(g5, type = "pearson")
sum(Eg5^2) / (g5$df.residual) # Overdisp. = 1.13 for the complete modele
# plot predicted values on raw data
range(log(mC1$lmg_C1_CORR))
# For creation new dataframe for lmg values simulation
xv <- seq(-4, 2.30, by = 0.1)
yv <- predict(g5, list(lmg_C1_CORR = exp(xv), winAO = rep(g5$coefficients[3], 64), cumul_prec = rep(g5$coefficients[4], 64), MEAN_temp = rep(g5$coefficients[5], 64)), type = "response", se.fit = TRUE)
utils::View(yv)
p <- mC1$breed_dens/(mC1$monit_dens)
plot(p ~ log(mC1$lmg_C1_CORR), ylab = "Proportion breeding dens")
lines(yv$fit ~ xv, col = "red")
lines((yv$fit - 1.96 * yv$se.fit) ~ xv, type = "l", col = "dodgerblue4", lty = "dashed")
lines((yv$fit + 1.96 * yv$se.fit) ~ xv, type = "l", col = "dodgerblue4", lty = "dashed")
#### FOX VS. LEMMING PLOT WITHOUT 2000 ####
# png("fox_vs_lem_WITHOUT_2000.tiff",
# res=300,
# width=20,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
#x11()
plot(lmg$LMG_C1_CORR[!lmg$YEAR == 2000],
fox$prop_natal_dens[!fox$year == 2000],
xlim = c(0, 10),
ylim = c(0, 40),
col = "dodgerblue4",
bty = "n",
pch = 16,
#type = "p",
lwd = 3,
xlab = "Lemming abundance",
ylab = "Proportion of fox breeding dens")
lines(smooth.spline(lmg$LMG_C1_CORR[!lmg$YEAR == 2000],
fox$prop_natal_dens[!fox$year == 2000],
df = 3),
col = "dodgerblue4",
lwd = 3)
dev.off()
#### FOX vs. LEMMING PLOT - LAST PLOT ####
m <- glm(prop_fox_dens ~ I(log(lmg_C1_CORR)) + cumul_prec + MEAN_temp + winAO, weights = monit_dens, data = mC1, family = binomial(link="logit"))
plot(mC1$lmg_C1_CORR,
mC1$prop_fox_dens,
xlim = c(0, 10),
ylim = c(0, 0.5),
bty = "n",
las = 1,
col = "dodgerblue4",
pch = 16,
#type = "p",
lwd = 3,
xlab = "Lemming abundance",
ylab = "Proportion of fox breeding dens")
v <- seq(0, 10, by = 0.01)
newdat <- data.frame(lmg_C1_CORR = v, cumul_prec = mean(mC1$cumul_prec), MEAN_temp = mean(mC1$MEAN_temp), winAO = mean(mC1$winAO))
p <- predict(m, newdata = newdat, type = "response", se.fit = TRUE)
lines(v,
p$fit,
col = "dodgerblue4",
lwd = 2)
lines(v,
p$fit + 1.96*p$se.fit,
col = "dodgerblue4",
lwd = 1.5,
lty = "dashed")
lines(v,
p$fit - 1.96*p$se.fit,
col = "dodgerblue4",
lwd = 1.5,
lty = "dashed")
#### SAME PLOT BUT WITH SIMPLIER MODEL ####
prop_fox_dens <- as.numeric(tapply(mC1$prop_fox_dens, mC1$AN, unique))
monit_dens <- as.numeric(tapply(mC1$monit_dens, mC1$AN, unique))
lmg_C1_CORR <- as.numeric(tapply(mC1$lmg_C1_CORR, mC1$AN, unique))
winAO <- as.numeric(tapply(mC1$winAO, mC1$AN, mean))
cumul_prec <- as.numeric(tapply(mC1$cumul_prec, mC1$AN, mean))
MEAN_temp <- as.numeric(tapply(mC1$MEAN_temp, mC1$AN, mean))
oth <- as.data.frame(cbind(YEAR = 1996:2016, prop_fox_dens, monit_dens, lmg_C1_CORR, winAO, cumul_prec, MEAN_temp))
summary(oth)
m <- glm(prop_fox_dens ~ I(log(lmg_C1_CORR)) + cumul_prec + MEAN_temp + winAO, weights = monit_dens, data = oth, family = binomial(link="logit"))
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V1/Figures/fox_vs_lem_LAST_PLOT_simplier model.tiff",
# res=300,
# width=20,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
plot(oth$lmg_C1_CORR,
oth$prop_fox_dens,
xlim = c(0, 10),
ylim = c(0, 0.5),
bty = "n",
las = 1,
col = "dodgerblue4",
pch = 16,
#type = "p",
lwd = 3,
xlab = "Lemming abundance",
ylab = "Proportion of fox breeding dens")
v <- seq(0, 10, by = 0.01)
newdat <- data.frame(lmg_C1_CORR = v, cumul_prec = mean(oth$cumul_prec), MEAN_temp = mean(oth$MEAN_temp), winAO = mean(oth$winAO))
p <- predict(m, newdata = newdat, type = "response", se.fit = TRUE)
lines(v,
p$fit,
col = "dodgerblue4",
lwd = 2)
lines(v,
p$fit + 1.96*p$se.fit,
col = "dodgerblue4",
lwd = 1.5,
lty = "dashed")
lines(v,
p$fit - 1.96*p$se.fit,
col = "dodgerblue4",
lwd = 1.5,
lty = "dashed")
dev.off()
#### Plot SN vs. prec ####
setwd(dir = "C:/Users/HP_9470m/OneDrive - Université de Moncton/Doc doc doc/Ph.D. - ANALYSES/R analysis/Data")
#rm(list = ls()) #clean R memory
f <- read.table("Path analysis_data 3bis.txt", sep = ",", dec = ".", h = T)
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 3 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V1/Figures/goose vs prec_temp.tiff",
# res=300,
# width=25,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
par(mfrow = c(1, 2))
# SN vs. prec
k3 <- glm(SN ~ cumul_prec, data = f, family = binomial(link = "cloglog"))
summary(k3)
range(f$cumul_prec)
xprec <- seq(0, 69, 0.01)
ySN <- predict(k3, list(cumul_prec = xprec), type = 'response')
# Plot values
require(scales) # For the transparency of points - alpha()
par(mar = c(5.1, 4.1, 5, 0.1))
plot(f$cumul_prec, f$SN, pch = 16, xlab = '', ylab = '', ylim = c(0, 1), bty = 'n', col = alpha('olivedrab', 0.4), yaxt = 'n', xaxt = 'n')
lines(xprec, ySN, col = 'olivedrab', lwd = 2)
axis(side = 1, lwd = 1)
axis(side = 2, lwd = 1)
#legend(65, 1.06, "(a)", bty = "n")
# SN vs. temp
k2 <- glm(SN ~ MEAN_temp, data = f, family = binomial(link = "logit"))
summary(k2)
range(f$MEAN_temp)
xtemp <- seq(-0.85, 8.98, 0.01)
ySN <- predict(k2, list(MEAN_temp = xtemp), type = 'response')
# Plot values
require(scales) # For the transparency of points
par(mar = c(5.1, 0, 5, 2.1))
plot(f$MEAN_temp, f$SN, pch = 16, xlab = '', ylab = '', ylim = c(0, 1), bty = 'n', col = alpha('olivedrab', 0.4), yaxt = 'n', xaxt = 'n', xlim = c(-1, 9))
lines(xtemp, ySN, col = 'olivedrab', lwd = 2)
axis(side = 1, lwd = 1, xaxp = c(-1, 9, 10))
#axis(side = 2, lwd = 1)
#legend(8.5, 1.3, "(b)", bty = "n")
# dev.off()
#### Plot prop of success vs. cumul prec - LAST VERSION ####
# Same method used with lmg vs. fox plot and based on Gilles comments
require(lme4)
require(scales)
m <- glmer(SN ~ prop_fox_dens + cumul_prec + MEAN_temp + (1|AN), data = f, family = binomial(link = "logit"))
summary(m)
plot(f$cumul_prec, f$SN, pch = 16, xlab = '', ylab = '', ylim = c(0, 1), bty = 'n', col = alpha('olivedrab', 0.4), yaxt = 'n', xaxt = 'n')
v1 <- seq(0, 70, by = 0.01)
newdat <- data.frame(cumul_prec = v1, prop_fox_dens = mean(f$prop_fox_dens), MEAN_temp = mean(f$MEAN_temp))
p1 <- predict(m, newdata = newdat, type = "response", re.form = NA) # se.fit doesn't work with glmer
#plot(f$cumul_prec, jitter(f$SN)) # jitter() allows to see the variability of points
plot(v1, p1, ylim = c(0, 1), type = "l", bty = "n")
# Delimitation of categories to plot transformed raw data
nn <- 50
#f$rain_CAT <- cut(f$cumul_prec, breaks = seq(-5, 70, 5))# Creation of precipitation categorical variable to plot raw data
f$rain_CAT <- cut(f$cumul_prec, breaks = seq(min(f$cumul_prec), max(f$cumul_prec), length.out = nn))
xaxis <- seq(min(f$cumul_prec), max(f$cumul_prec), length.out = nn)
# f$rain_CAT <- lapply(strsplit(as.character(f$rain_CAT),"(|\\,|\\]"),function(i){
#
# })
rain.DF <- split(f, f$rain_CAT) # Split dataframe into a list, based on the rainfall categorical variable rain.DF levels
PROP1 <- NULL
for (i in 1:length(rain.DF)){
succ <- sum(rain.DF[[i]]$SN)
tot <- dim(rain.DF[[i]])[1]
prop <- succ/tot
# print(succ)
# print(tot)
print(prop)
c <- c(succ, tot, prop)
PROP1 <- as.data.frame(rbind(PROP1, c))
}
PROP1
#### Plot prop of success vs. mean temp - LAST VERSION ####
# Same method used with lmg vs. fox plot and based on Gilles comments
require(lme4)
m <- glmer(SN ~ prop_fox_dens + cumul_prec + MEAN_temp + (1|AN), data = f, family = binomial(link = "logit"))
# summary(m)
#
# summary(f$MEAN_temp)
v <- seq(-0.5, 9, by = 0.01)
newdat <- data.frame(MEAN_temp = v, prop_fox_dens = mean(f$prop_fox_dens), cumul_prec = mean(f$cumul_prec))
p <- predict(m, newdata = newdat, type = "response", re.form = NA) # se.fit doesn't work with glmer
#plot(f$cumul_prec, jitter(f$SN)) # jitter() allows to see the variability of points
#plot(v, p, ylim = c(0, 1), type = "l", bty = "n")
# Delimitation of categories to plot transformed raw data
f$temp_CAT <- cut(f$MEAN_temp, breaks = seq(min(f$MEAN_temp), max(f$MEAN_temp), length.out = nn))
xaxis.2 <- seq(min(f$MEAN_temp), max(f$MEAN_temp), length.out = nn)
temp.DF <- split(f, f$temp_CAT) # Split dataframe into a list, based on the rainfall categorical variable rain.DF levels
PROP <- NULL
for (i in 1:length(temp.DF)){
succ <- sum(temp.DF[[i]]$SN)
tot <- dim(temp.DF[[i]])[1]
prop <- succ/tot
# print(succ)
# print(tot)
print(prop)
c <- c(succ, tot, prop)
PROP <- as.data.frame(rbind(PROP, c))
}
PROP <- cbind(PROP, levels(f$temp_CAT))
PROP
# png("C:/Users/HP_9470m/Dropbox/PHD. Claire/Chapitres de thèse/CHAPTER 1 - Path analysis/FOX numerical response/ARTICLE Ph.D. 3/VERSION FINALE V2/Figures/goose vs temp&prec_Last_Version.tiff",
# res=300,
# width=25,
# height=15,
# pointsize=12,
# unit="cm",
# bg="transparent")
par(mfrow = c(1, 2))
par(mar = c(5.1, 4.1, 5, 0.1))
plot(xaxis[-1],
PROP1$V3,
ylim = c(0, 1),
pch = 16,
xlab = '',
ylab = '',
bty = 'n',
col = "olivedrab",
yaxt = 'n',
xaxt = 'n')
lines(v1, p1, col = 'olivedrab', lwd = 2)
axis(side = 1, lwd = 1)
axis(side = 2, lwd = 1, las = 1)
par(mar = c(5.1, 0, 5, 2.1))
plot(xaxis.2[-1],
PROP$V3,
ylim = c(0, 1),
xlim = c(-1, 9),
pch = 16,
xlab = '',
ylab = '',
bty = 'n',
col = "olivedrab",
yaxt = 'n',
xaxt = 'n')
lines(v, p, col = 'olivedrab', lwd = 2, xlim = c(-0.5, 9))
axis(side = 1, at = -1:9, lwd = 1)
dev.off()
#### Autocorrelation tests ####
# For AO
AO <- read.csv("AO_saisonnier.txt", sep = ",", dec = ".")
AO.ts <- ts(AO[-length(AO$YEAR),c(3, 5, 11)], start = 1950, frequency = 1)
summary(AO.ts)
AO.ts.2 <- ts(AO[AO$YEAR >= 1989 & AO$YEAR < 2017, c(3, 5, 11)], start = 1989, frequency = 1)
summary(AO.ts.2)
# From 1950 to 2016
data <- AO.ts
ax <- 1950:2016
#n <- 68
n <- 5
lag <- 1:n
# From 1989 to 2016
data <- AO.ts.2
ax <- 1989:2016
n <- ((2016-1989)/2)
lag <- 1:n
x11()
layout(matrix(c(1,2,3,4, 5, 6), 3, 2, byrow = FALSE))
par(mar=c(1, 4.1, 4.1, 2.1))
plot(data[,1], bty = "n", main = "", xaxt = "n", ylab = "winAO", xlab = "", type = "b")
par(mar=c(1, 4.1, 1.5, 2.1))
plot(data[,2], bty = "n", main = "", xaxt = "n", ylab = "sprAO", xlab = "", type = "b")
par(mar=c(5.1, 4.1, 1.5, 2.1))
plot(data[,3], bty = "n", main = "", xaxt = "n", ylab = "sumAO", xlab = "Time", type = "b")
axis(1, ax)
#apply(data, MARGIN = 2, acf, main = "", bty = "n") # No temporal autocorrelation
par(mar=c(1, 4.1, 4.1, 2.1))
acf(data[,1], lag.max = n, bty = "n", main = "", xaxt = "n", ylab = "winAO ACF", xlab = "")
par(mar=c(1, 4.1, 1.5, 2.1))
acf(data[,2], lag.max = n, bty = "n", main = "", xaxt = "n", ylab = "sprAO ACF", xlab = "")
par(mar=c(5.1, 4.1, 1.5, 2.1))
acf(data[,3], lag.max = n, bty = "n", main = "", xaxt = "n", ylab = "sumAO ACF", xlab = "Lag")
axis(1, lag)
# Rain & temperature between annual initiation and hatching date - WEA dataframe
head(WEA)
WEA.ts <- ts(WEA[, c(4,8)], start = 1989, frequency = 1)
head(WEA.ts)
x11()
#ts.plot(WEA.ts)
plot(WEA.ts, bty = "n", type = "b")
#apply(WEA.ts, MARGIN = 2, acf)
x11()
layout(matrix(c(1,2,3,4), 2, 2, byrow = FALSE))
par(mar=c(1, 4.1, 4.1, 2.1))
plot(WEA.ts[,1], xaxt = "n", xlab = "", bty = "n", type = "b", ylab = "mean temperature (C)")
par(mar=c(5.1, 4.1, 1.5, 2.1))
plot(WEA.ts[,2], xaxt = "n", xlab = "Time", bty = "n", type = "b", ylab = "Cumulative precipitation (mm)")
axis(1, 1989:2016)
# --- #
par(mar=c(1, 4.1, 4.1, 2.1))
acf(WEA.ts[,1], xlab = "", bty = "n",lag.max = 27, ylab = "Mean temp. ACF", main = "", xaxt = "n")
axis(1, 1:27)
par(mar=c(5.1, 4.1, 1.5, 2.1))
rain.ts <- na.omit(WEA.ts[,2])
acf(rain.ts, xlab = "Lag", bty = "n", lag.max = 21, ylab = "Cum. prec. ACF", xaxt = "n")
axis(1, 1:21)
# Lemming abundance
lmg <- read.table("LEM_1993-2017.txt", sep = "\t", dec = ",", h = T)
head(lmg); summary(lmg)
lmg.ts <- ts(lmg$LMG_C1_CORR, start = 1993, frequency = 1)
plot(lmg.ts, type = "b")
acf(lmg.ts, na.action = na.pass)
# Goose nesting success
head(g)
acf(g$NEST_SUCC)
# Fox breeding proportion
fox <- read.table("FOX_abundance_Chevallier.txt", sep = "\t", dec = ",", h = T)
head(fox); summary(fox)
names(fox)[1] <- "YEAR"
acf(fox$prop_natal_dens)
zoo <- merge(g[,c(1, 6)], fox[,c(1, 4)], all.x = TRUE )
zoo <- merge(zoo, lmg[, c(1, 6)], all.x = TRUE)
zoo.ts <- ts(zoo[,-1], start = 1989, frequency = 1)
head(zoo.ts)
x11()
layout(matrix(c(1,2,3,4, 5, 6), 3, 2, byrow = FALSE))
par(mar=c(1, 4.1, 4.1, 2.1))
plot(zoo.ts[,1], bty = "n", main = "", xaxt = "n", ylab = "Goose nesting success prop.", xlab = "", type = "b")
par(mar=c(1, 4.1, 1.5, 2.1))
plot(zoo.ts[,2], bty = "n", main = "", xaxt = "n", ylab = "Fox breed. dens prop.", xlab = "", type = "b")
par(mar=c(5.1, 4.1, 1.5, 2.1))
plot(zoo.ts[,3], bty = "n", main = "", xaxt = "n", ylab = "Lemming abun.", xlab = "Time", type = "b")
axis(1, 1989:2016)
# --- #
par(mar=c(1, 4.1, 4.1, 2.1))
acf(zoo.ts[,1], lag.max = 27, bty = "n", main = "", ylab = "Goose ACF", xlab = "", xaxt = "n")
axis(1, 1:27)
par(mar=c(1, 4.1, 1.5, 2.1))
acf(zoo.ts[,2], lag.max = 22, bty = "n", main = "", ylab = "Fox ACF", xlab = "", na.action = na.pass, xaxt = "n")
axis(1, 1:22)
par(mar=c(5.1, 4.1, 1.5, 2.1))
acf(zoo.ts[,3], lag.max = 20, bty = "n", main = "", ylab = "Lemming ACF", xlab = "Lag", na.action = na.pass, xaxt = "n")
axis(1, 1:20)
|
fd1187bc8b7002f715d645ca829c3e56f34fe069
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LSAfun/examples/multicos.Rd.R
|
3079b83fc24c5245d7cfb0a31ff1fa0959b9a4f9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
multicos.Rd.R
|
library(LSAfun)
### Name: multicos
### Title: Vector x Vector Comparison
### Aliases: multicos
### ** Examples
data(wonderland)
multicos("mouse rabbit cat","king queen",
tvectors=wonderland)
|
d1574f69e4651770028cdd68c68b5ae2637bda8c
|
5ecd33fb174769b0ccec519dfeca939a9592531d
|
/computeAICBIC.R
|
80c609c1d1ce6277445b2e6f310cf64eab1cdcbd
|
[
"MIT"
] |
permissive
|
lnsongxf/multibreak
|
40ff9cd645e910f92897309f9449384e0772ee48
|
922f3b48c8d81e2b9e702701a493e084e100f0cf
|
refs/heads/master
| 2022-04-02T09:40:01.845993
| 2020-02-11T11:17:02
| 2020-02-11T11:17:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
computeAICBIC.R
|
compute_aicbic <- function(Y, qMax, X, trend, intercept) #compute the AIC and BIC criteria for lags from 1 to qMax
{
library(stats) #load stats package
AICBIC = matrix(data <- NA, nrow = 2, ncol = qMax) #create empty matrix for the AIC / BIC criteria
for(q in 1:qMax)
{
print(paste0("Testing lags number : ", q))
lConfMatrix <- matrix_conformation(Y, q, X, trend, intercept) #create a list of conformed objects for the estimation
Yex <- lConfMatrix$Yex
Gex <- lConfMatrix$Gex
mod <- lm(Yex~Gex) #estimate the model with lm
AICBIC[1,q] <- AIC(mod) #get AIC
AICBIC[2,q] <- BIC(mod) #get BIC
}
rownames(AICBIC) <- c("AIC", "BIC")
colnames(AICBIC) <- paste0("lags = ", 1:qMax)
return(AICBIC)
}
|
a0990893cf725b3a9d8a5a7863c21822eba05f9c
|
c7c2d89ea7460c46a85ecd2ba2634a2bbd1bfbb6
|
/04_tw_corpus_hist.R
|
9a56e73512aec06d1b4166ca56351c2efe9bc8f1
|
[] |
no_license
|
jaromirsalamon/HR-and-Sentiment-Analysis
|
003ff2c2380c47568221066e5b0c974af25d6c1b
|
acd8ff8371506cff4fe88372b0fa46e11b69a5cd
|
refs/heads/master
| 2021-06-16T22:43:03.465702
| 2017-04-30T20:31:00
| 2017-04-30T20:31:00
| 78,581,817
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,732
|
r
|
04_tw_corpus_hist.R
|
setwd("~/Dropbox/PhD/2 - 2017 Data - publication/02 - Code and Data")
library(ggplot2)
library(lubridate)
# Tweets related to FitBit Charge HR
tw_xfb <- read.csv("data/out/experiment-1_twitter.csv",header = T, stringsAsFactors = F)
tw_xfb$sent[tw_xfb$sent_num == -1] <- "negative"
tw_xfb$sent[tw_xfb$sent_num == 1] <- "positive"
tw_xfb$source <- "Experiment #1"
# factor for plot 1
tw_xfb$time_h <- strftime(tw_xfb$date_time, format="%H")
tw_xfb$time_h_fact <- factor(tw_xfb$time_h, levels = unique(as.character(tw_xfb$time_h)))
# factor for plot 2
tw_xfb$date_time_diff <- abs(as.numeric(difftime(strptime(tw_xfb$date_time,"%Y-%m-%d %H:%M:%S"),
strptime(tw_xfb$date_time_exp,"%Y-%m-%d %H:%M:%S"))))
tw_xfb.sub <- tw_xfb[(tw_xfb$date_time_diff > 0 & tw_xfb$date_time_diff <= 5400),]
tw_xfb.sub <- tw_xfb.sub[as.numeric(tw_xfb.sub$time_h) != 1,]
# Tweets related to Peak Basis
tw_xpb <- read.csv("data/out/experiment-2_twitter.csv",header = T, stringsAsFactors = F)
tw_xpb$sent[tw_xpb$sent_num == -1] <- "negative"
tw_xpb$sent[tw_xpb$sent_num == 1] <- "positive"
tw_xpb$source <- "Experiment #2"
# factor for plot 1
tw_xpb$time_h <- strftime(tw_xpb$date_time, format="%H")
tw_xpb$time_h_fact <- factor(tw_xpb$time_h, levels = unique(as.character(tw_xpb$time_h)))
# factor for plot 2
tw_xpb$date_time_diff <- abs(as.numeric(difftime(strptime(tw_xpb$date_time,"%Y-%m-%d %H:%M:%S"),
strptime(tw_xpb$date_time_exp,"%Y-%m-%d %H:%M:%S"))))
tw_xpb.sub <- tw_xpb[(tw_xpb$date_time_diff > 0 & tw_xpb$date_time_diff <= 5400),]
tw_xpb.sub <- tw_xpb.sub[as.numeric(tw_xpb.sub$time_h) != 1,]
data <- rbind(tw_xfb.sub, tw_xpb.sub)
#png(filename = "graphs/03_tweets_per_hour.png", width = 2200, height = 1000, units = "px", res = 300, bg = "transparent")
setEPS()
postscript("graphs/03-tweets-per-hour.eps", width = 7.33, height = 3.33)
m <- ggplot(data, aes(time_h_fact, fill = sent)) + geom_bar() + theme_bw()
m <- m + labs(x = "hours", y = "count of tweets", fill = "sentiment")
m <- m + facet_wrap(~source, scales="free_x")
m <- m + scale_x_discrete(labels = c("7","","9","","11","","13","","15","","17","","19","","21","","23",""))
m
dev.off()
#png(filename = "graphs/04_tweets_difference_to_expected.png", width = 2200, height = 1000, units = "px", res = 300, bg = "transparent")
setEPS()
postscript("graphs/04-tweets-difference-to-expected.eps", width = 7.33, height = 3.33)
m <- ggplot(data, aes(round(date_time_diff/60), fill=sent)) + geom_bar() + theme_bw()
m <- m + labs(x = "minutes", y = "count of tweets", fill = "sentiment")
m <- m + facet_wrap(~source, scales="fixed")
m <- m + scale_x_continuous(limits = c(0, 45))
m
dev.off()
|
a7974c94527331c047d7c6c891bd7e9285ce70a4
|
29dac245d1f39f4d2eda9584c6a64d4693c1e909
|
/R/TEST_2018_03_06.R
|
30fbd834b25072babecf9b7b3470b0f23e365426
|
[] |
no_license
|
mattjtuttle/marine_genome_analysis
|
241377e8ed38deee050954543189b877a4784ccd
|
50e6ab864d3c8765e2b8f08fec90c85e2fa9d26e
|
refs/heads/master
| 2021-03-27T18:55:56.271508
| 2018-03-06T22:13:42
| 2018-03-06T22:13:42
| 92,315,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
TEST_2018_03_06.R
|
library(tidyverse)
genome_id_phylum <- read.csv("./tables/formatted_genome_list.csv", header = TRUE) %>%
select(Genome.ID, Phylum)
all_refseq_by_prophage <- read.csv("./tables/all_refseq_by_prophage.csv", header = TRUE) %>%
# Adds phylum data to detected prophages so that we can look for phyla of interest
left_join(genome_id_phylum, by = "Genome.ID") %>%
# Selects phyla for which there were at least 10 genomes queried
filter(Phylum %in% c("Firmicutes",
"Proteobacteria",
"Actinobacteria",
"Bacteroidetes",
"Cyanobacteria",
"Chloroflexi",
"Planctomycetes",
"Deferribacteres",
"Marinimicrobia",
"Thermotogae",
"Nitrospirae")
) %>%
select(-Phylum)
all_viromes_by_prophage <- read.csv("./tables/all_viromes_by_prophage.csv", header = TRUE) %>%
# Adds phylum data to detected prophages so that we can look for phyla of interest
left_join(genome_id_phylum, by = "Genome.ID") %>%
# Selects phyla for which there were at least 10 genomes queried
filter(Phylum %in% c("Firmicutes",
"Proteobacteria",
"Actinobacteria",
"Bacteroidetes",
"Cyanobacteria",
"Chloroflexi",
"Planctomycetes",
"Deferribacteres",
"Marinimicrobia",
"Thermotogae",
"Nitrospirae")
)
|
44f7b2a926ed011441867248104fb3713321b845
|
1aeb2d503f8f2485549c9ee92ac05280120bf4ad
|
/man/qda-train.Rd
|
ab3fcdd80eb922788d1f453b1825d7fb45645c73
|
[] |
no_license
|
cran/stochmod
|
0faad8289316642e4beb6c95ef336d7b30503620
|
8fb71d0d15fa0ee41756428a4b462e23cb39bde6
|
refs/heads/master
| 2021-01-01T16:30:51.217636
| 2009-10-23T00:00:00
| 2009-10-23T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,319
|
rd
|
qda-train.Rd
|
\name{qda-train}
\alias{QDA.train}
\title{Quadratic Discriminant Analysis}
\description{Training method for QDA.}
\usage{
QDA.train( x, y, cov.reg = 0.0 )
}
\arguments{
\item{x}{N x p data matrix of N samples in p dimensions}
\item{y}{N x 1 vector of labels}
\item{cov.reg}{Covariance matrix regularization (towards identity),
value must be in [0, 1]}
}
\details{
Models each class as a single (multivariate) Gaussian. Relaxes the
common covariance matrix constraint of LDA. Computes the
distribution parameters, the Bayesian class priors, and the
discriminant functions. QDA is insensitive to temporal structure of
the data and, therefore, only needs to work with a single observation
sequence. This, in turn, requires a label for each sample.
}
\value{
An QDA classifier defined by:
\item{labels}{Vector of unique class labels}
\item{priors}{K x 1 vector of priors, estimated as fraction of points
from each class}
\item{means}{K x p matrix of means approximated from the data}
\item{covmats}{K x p x p array of covariance matrices esimated from
the data}
\item{icovmats}{K x p x p array of inverse covariance matrices}
\item{bias}{K x 1 vector of bias terms for discriminant function computations}
}
\author{Artem Sokolov \email{Artem.Sokolov@gmail.com}}
\keyword{models}
|
b21487dd752d5c0610495ee560a0819056a1e788
|
d95e9db7d0425e37dd417eb2bae739a17f0f1702
|
/brazil_.R
|
b3e82f132ba0823ba9ff16814c3de5b0b8e5d757
|
[] |
no_license
|
SLRaul/Covid-19-data-monitoring
|
556d4b604da2840c3b3fa3240d751265b65b411f
|
6ba928dc45379acbbdea94d2598f4a6030d3db43
|
refs/heads/master
| 2022-07-17T07:00:40.462019
| 2020-05-21T13:56:54
| 2020-05-21T13:56:54
| 248,812,937
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,753
|
r
|
brazil_.R
|
#
rm(list=ls())
#change the directory the code
#setwd("/home/silva/R_Diretorio/sars-covid-19")
library(data.table)
library(lubridate)
library(dplyr)
library(ggplot2)
library(tidyr)
# extract the data from the repository from the computer
new_cases <- fread("C:/Users/silva/Documents/Repositorio/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv",
header = T)
death_cases <- fread("C:/Users/silva/Documents/Repositorio/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv",
header = T)
cases_date <- fread("C:/Users/silva/Documents/Repositorio/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv",
header = F)[1,c(-1,-2,-3,-4)]
cases_date <- mdy(cases_date)
Brazil_cases <- as.numeric(new_cases %>% filter(`Country/Region` == 'Brazil') %>% select(-`Province/State`, -`Country/Region`,-Lat, -Long))
Brazil_death_cases <- as.numeric(death_cases %>% filter(`Country/Region` == 'Brazil') %>% select(-`Province/State`, -`Country/Region`,-Lat, -Long))
Brazil_cases_fd <- Brazil_cases[Brazil_cases > 0]
########## new cases per day ##########
NCPD <- data.frame(diff(Brazil_cases), (cases_date[-1]))
colnames(NCPD) <- c("New_cases", "Date")
ggplot(NCPD, aes(x = Date, y = New_cases)) + geom_col(col = "white", fill = "darkblue") +
ggtitle("New cases per day in Brazil") + ylab("New cases") + theme_bw()
# in log scale
ncpd <- data.frame((Brazil_cases), (cases_date[]))
colnames(ncpd) <- c("New_cases", "Date")
ncpd <- ncpd[ncpd$New_cases > 100, ]
# # Possible values for trans : 'log2', 'log10','sqrt'
ggplot(ncpd, aes(x = Date, y = New_cases)) + geom_point() + geom_line() +
ggtitle("New cases since tem 100 case in log scale on Brazil") + ylab("New cases (log scale)") + theme_bw() +
scale_y_continuous(trans = 'log10')
# ggplot(NCPD, aes(x = Date, y = New_cases)) + geom_point() + geom_line()+
# ggtitle("New cases per day in Brazil") + ylab("New cases") + theme_bw()
#barplot(NCPD$New_cases)
########## new death cases per day ##########
NDCPD <- data.frame(diff(Brazil_death_cases), (cases_date[-1]))
colnames(NDCPD) <- c("New_death_cases", "Date")
ggplot(NDCPD, aes(x = Date, y = New_death_cases)) + geom_col(col = "white", fill = "black") +
ggtitle("New deaths cases per day in Brazil") + ylab("Deaths per day") + theme_bw()
# in log scale
ndcpd <- data.frame((Brazil_death_cases), (cases_date[]))
colnames(ndcpd) <- c("death_cases", "Date")
ndcpd <- ndcpd[ndcpd$death_cases > 100, ]
# # Possible values for trans : 'log2', 'log10','sqrt'
ggplot(ndcpd, aes(x = Date, y = death_cases)) + geom_point() + geom_line() +
ggtitle("Death cases since tem 100 case in log scale on Brazil") + ylab("New cases (log scale)") + theme_bw() +
scale_y_continuous(trans = 'log10')
########## previsão #########
########## prediction #########
#rm(list = ls())
library (deSolve)
sis_model = function (current_timepoint, state_values, parameters)
{
# create state variables (local variables)
S = state_values [1] # susceptibles
I = state_values [2] # infectious
with (
as.list (parameters), # variable names within parameters can be used
{
# compute derivatives
dS = (-beta * S * I) + (gamma * I)
dI = ( beta * S * I) - (gamma * I)
# combine results
results = c (dS, dI)
list (results)
}
)
}
# #parametros
# contact_rate = 8 # number of contacts per day
# transmission_probability = 0.07 # transmission probability
# infectious_period = 5.55 # infectious period
#computando os valores de tranmisção e de recuperação
r_0 <- 2.04 # imperial college r_0 = 2.0 - 2.4 - 2.6
beta_value = r_0/5.55#contact_rate * transmission_probability
gamma_value = ((r_0/5.55)/r_0)#1 / infectious_period
#numero reprodutivo
Ro = beta_value / gamma_value
#parametros de dinamica da doença
parameter_list = c (beta = beta_value, gamma = gamma_value)
#valores iniciais das sub pop
X = 210000000 # susceptible hosts
Y = 1 # infectious hosts
#pop total
N = X + Y
#valores inicial da eq diferenci
initial_values = c (S = X/N, I = Y/N)
#chamando os dias-ponto
timepoints = seq (0, 100, by=1)
#simulando um epidemia sis
output = lsoda (initial_values, timepoints, sis_model, parameter_list)
###### plot
# susceptible hosts over time
plot (I ~ time, data = output, type='l', ylim = c(0,1),
#,xlim=c(0,50),
col = 'red', ylab = 'S, I, S', main = 'SIS epidemic')
# remain on same frame
#par (new = TRUE)
# infectious hosts over time
#plot (S ~ time, data = output, type='b', ylim = c(0,1), col = 'blue', ylab = '', axes = FALSE)
lines(Brazil_cases_fd/210000000)
cbind(output[1:length(Brazil_cases_fd),],Brazil_cases_fd/210000000)
# http://desolve.r-forge.r-project.org/
# https://rpubs.com/choisy/sir
## https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf
## https://www.imperial.ac.uk/mrc-global-infectious-disease-analysis/news--wuhan-coronavirus/
## https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-Global-Impact-26-03-2020.pdf
## https://www.r-bloggers.com/sir-model-with-desolve-ggplot2/
# https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0185528
## https://rpubs.com/docblount/111138 #SIS
## https://www.lewuathe.com/covid-19-dynamics-with-sir-model.html
|
ea0a7d1e109438d8b9a0a13d6c7be94adae81827
|
a5f3d6c1cf45a7b8ee641c8a0c05c6046578c783
|
/R/AmigoDot.to.graphNEL.R
|
d078d35cbe8e552609cf648ad283df715494c3d4
|
[] |
no_license
|
mschroed/RamiGO
|
f71140f3ac1b5ae94d5f7cdeafaac175de125901
|
fe05fe3e166922db58796df169edbc0445e6d4da
|
refs/heads/master
| 2021-01-19T16:57:05.668955
| 2015-04-17T04:42:40
| 2015-04-17T04:42:40
| 1,807,803
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 585
|
r
|
AmigoDot.to.graphNEL.R
|
AmigoDot.to.graphNEL <- function(object){
gg <- adjM2gml(adjMatrix=adjMatrix(object),edgecolor=relations(object)$color,
vertexcolor=annot(object)$fillcolor,nodelabels=annot(object)$GO_ID,
nodedescription=annot(object)$description)
gNEL <- igraph.to.graphNEL(gg)
attr(edgeDataDefaults(gNEL, attr="weight"), "class") = "INTEGER"
attr(edgeDataDefaults(gNEL, attr="color"), "class") = "STRING"
attr(nodeDataDefaults(gNEL, attr="description"), "class") = "STRING"
attr(nodeDataDefaults(gNEL, attr="color"), "class") = "STRING"
return("gNEL" = gNEL)
}
|
960a0caf595ced9fe836742c6e13d2d0c7f1d227
|
6d5346e6e5364187c4120fb8cee18c54c135eddb
|
/R/new_dict.r
|
53aca6d062f01a119df076055ed4bfa1babe87bb
|
[] |
no_license
|
joelcarlson/rdatadict
|
6cf5dd142f4115a9d032389593aedbded3c014fc
|
c3c777030e68b99cf4ea52cc89940a60eedce7be
|
refs/heads/master
| 2020-04-27T16:24:53.659648
| 2015-05-20T02:20:11
| 2015-05-20T02:20:11
| 33,967,927
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
new_dict.r
|
new_dict <- function(){
#initialize the new dictionary
print( "New data_dict initialized")
dict <- data.frame("Variable" = character(), "Description" = character(), stringsAsFactors=FALSE)
class(dict) <- c("data.frame","data_dict")
return(dict)
}
|
50b3a7d071f35717ec85d58d909165beca44d031
|
77f228315f25d2d5d3101a3f954fc6b1364fee59
|
/R/gear.R
|
1560e160eab1189b601408eeaf829de70053a5d0
|
[] |
no_license
|
TobieSurette/gulf.data
|
5fbe678b64d10d9f50fed501d958c65f70d79fcf
|
7e9796e47c90645e399775dadb1344e7f51a13b0
|
refs/heads/master
| 2023-09-03T23:53:10.733496
| 2023-08-29T18:45:20
| 2023-08-29T18:45:20
| 253,640,181
| 2
| 1
| null | 2022-06-01T12:33:15
| 2020-04-06T23:36:23
|
R
|
UTF-8
|
R
| false
| false
| 2,108
|
r
|
gear.R
|
#' Gear Codes
#'
#' @description Gear code functions.
#'
#' @param x Numeric gear code(s).
#'
#' @examples
#' gear() # Return all codes.
#'
#' gear(1)
#' gear(c(1, 1, 10, NA, 11, NA, 2, 2)
#'
#' @export
gear <- function(x, ...){
descriptions <- c("3/4 35 otter trawl", # 1
"35 otter trawl", # 2
"yankee 36 otter trawl", # 3
"41.5 otter trawl", # 4
"long line", # 5
"beam trawl", # 6
"mid-water trawl", # 7
"Engel highlift (bottom) trawl", # 8
"western IIA", # 9
"IYGPT (International young gadoid pelagic trawl)", # 10
"shrimp trawl", # 11 Foreign trawl (30m in length)
"50' flounder drag", # 12 Concord trawl
"rock hopper trawl", # 13 Modified yankee #41
"nephrops trawl", # 14
"300 Balloon Star otter trawl", # 15
"No. 286 otter trawl with rockhopper", # 16
"8 gang toothed scallop drag lined with 14 mm mesh", # 17
"8 gang toothed scallop drag with 82.6mm rings", # 18
"Northumberland trawl", # 19
"2 gang toothed scallop drag lined with 14 mm mesh", # 20
"6 gang toothed scallop drag with 82.6mm rings", # 21
"Newston net ", # 96
"10ft isaacs-kidd net ", # 97
"6ft isaacs-kidd net ") # 98
codes <- as.character(c(1:21, 96:98))
names(codes) <- descriptions
# Look up codes:
if (!missing(x)) return(names(codes)[match(as.character(x), codes)])
return(codes)
}
|
99db85112617743145349764b033e1dcdbfd92c1
|
629a1c4267544403b76735e3a78d5acfd5328d6f
|
/20151112-creative_data_science/important_mean.R
|
459b639151e5bb8a45abb76efa0f47b8cb9bee9f
|
[] |
no_license
|
ajschumacher/ajschumacher.github.io
|
b99f87308c4ebb34096028f29214cb2c25e1b427
|
0437a64db7ea740ac79507e0be441ab1485f8fcb
|
refs/heads/master
| 2023-08-09T04:07:51.115823
| 2023-07-23T20:35:47
| 2023-07-23T20:35:47
| 21,110,337
| 18
| 27
| null | 2022-07-06T19:47:24
| 2014-06-23T01:24:43
|
HTML
|
UTF-8
|
R
| false
| false
| 69
|
r
|
important_mean.R
|
d <- read.csv("important_stats.csv")
cat(mean(d$growth, na.rm=TRUE))
|
d0f5d9be6bc164471b3eecae6bbee0d71107f3d8
|
64467287f8a0569e035b32c8f5ece02e640b8133
|
/man/MarginalModelFit.Rd
|
44a088451b9c3dade1b1ccb0677b4b2e25ee550e
|
[] |
no_license
|
cran/cmm
|
72f4f13853fd3d5f69252df6a23f6c730da06b74
|
e44f5ba312b0538e842da370943a8e096bb2bdd7
|
refs/heads/master
| 2023-08-17T08:38:19.376886
| 2023-08-09T17:50:03
| 2023-08-09T18:33:30
| 17,695,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,636
|
rd
|
MarginalModelFit.Rd
|
\name{MarginalModelFit}
\alias{MarginalModelFit}
\title{MarginalModelFit}
\description{
Fits marginal models, by default using maximum likelihood.
}
\usage{
MarginalModelFit(dat, model, ShowSummary = TRUE, MaxSteps = 1000, MaxStepSize = 1,
MaxError = 1e-20, StartingPoint = "Automatic", MaxInnerSteps = 2,
ShowProgress = TRUE, CoefficientDimensions="Automatic", Labels="Automatic",
ShowCoefficients = TRUE, ShowParameters = FALSE, ParameterCoding = "Effect",
ShowCorrelations = FALSE, Method = "ML", Title = "Summary of model fit")
}
\arguments{
\item{dat}{vector of frequencies or data frame}
\item{model}{list specified eg as \code{list(bt,coeff,at)}}
\item{ShowSummary}{Whether or not to execute \code{summary()} of the output}
\item{MaxSteps}{integer: maximum number of steps of the algorithm}
\item{MaxStepSize}{number greater than 0 and at most 1: step size}
\item{MaxError}{numeric: maximum error term}
\item{StartingPoint}{vector of starting frequencies corresponding to all cells in the manifest table}
\item{MaxInnerSteps}{nonnegative integer: only used for latent variable models, indicates number of steps in M step of EM algorithms}
\item{ShowProgress}{boolean or integer: FALSE for no progress information, TRUE or 1 for information at every step, an integer k for information at every k-th step}
\item{CoefficientDimensions}{numeric vector of dimensions of the table in which the coefficient vector is to be arranged}
\item{Labels}{list of characters or numbers indicating labels for dimensions of table in which the coefficient vector is to be arranged}
\item{ShowCoefficients}{boolean, indicating whether or not the coefficients are to be displayed}
\item{ShowParameters}{boolean, indicating whether or not the parameters (computed from the coefficients) are to be displayed}
\item{ParameterCoding}{Coding to be used for parameters, choice of \code{"Effect"}, \code{"Dummy"} and \code{"Polynomial"}}
\item{ShowCorrelations}{boolean, indicating whether or not to show the correlation matrix for the estimated coefficients}
\item{Method}{character, choice of "ML" for maximum likelihood or "GSK" for the GSK method}
\item{Title}{title of computation to appear at top of screen output}
}
\details{
The data can be a data frame or vector of frequencies. \code{MarginalModelFit} converts a data frame \code{dat} to a vector of
frequencies using \code{c(t(ftable(dat)))}.
The model specification is fairly flexible. We first describe the most typical way to specify the model.
The model itself should typically first be written in the form of a constraint vector as
\eqn{B'\theta(A'\pi) = 0}
where \emph{B'} is a contrast matrix, \emph{A'} is matrix, normally of zeroes and ones, such that \emph{A'pi} gives a vector of marginal probabilities, and the function theta yields
a list of (marginal) \emph{coefficients}. The model is then specified as \code{model=list(bt,coeff,at)} where \code{bt} is the matrix \emph{B'}, \code{at} is the matrix \emph{A'}, and \code{coeff}
represents the vector of coefficients using the generalized exp-log notation. For most of the models in the book, \code{bt} can be obtained directly using \code{ConstraintMatrix},
\code{coeff} can be obtained directly using \code{SpecifyCoefficient}, and \code{at} can be obtained directly using \code{MarginalMatrix}.
Note that CMM does not permit the \emph{C} and \emph{X} matrix in the model
\eqn{C'\theta(A'\pi) = X\beta}
to be specified for use in the programme. The model has to be rewritten in terms of constraints as above, which is normally straightforward to do with the use of \code{ConstraintMatrix}.
For many purposes, estimates and standard errors for a beta vector as in the equation above can still be obtained using the optional argument \code{ShowParameters=TRUE}.
There are two ways to specify \code{coeff}. The first is using the generalized exp-log notation, in which case \code{coeff[[1]]} should be a list of matrices, and
\code{coeff[[2]]} should be a list of predefined functions of the same length. The second is to set \code{coeff} equal to a predefined function; for example, marginal loglinear models
are obtained by setting \code{coeff="log"}.
The model can be specified in various other ways: as \code{model=list(coeff,at)}, \code{model=list(bt,coeff)}, \code{model=at}, or even just \code{model=coeff}.
Furthermore, the model
\eqn{B'\theta(A'\pi) = d}
with \emph{d} a nonzero vector is specified in the form \code{model=list(bt,coeff,at,d)}.
To specify the simultaneous model
\eqn{B'\theta(A'\pi) = 0\\ \log\pi=X\beta}
the extended model specification \code{model=list(margmodel,x)} should be used, where \code{margmodel} has one of the above forms, and \code{x} is a design matrix,
which can be obtained using \code{DesignMatrix}. Fitting is often more efficient by specifying a loglinear model for the joint distribution in this way rather than
using constraints.
The default on-screen output when running \code{fit=MarginalModelFit(...)} is given by \code{summary(fit)}. Important here is the distinction between coefficients and parameters, briefly
described above. Standard output gives the coefficients. These are that part of \code{model} without the \code{bt} matrix, eg if the model is \code{list(bt,coeff,at)}
then the coefficients are \code{list(coeff,at)}. If other coefficients are needed, \code{\link{ModelStatistics}} can be used.
Latent variable models can be specified: if the size of the table for which \code{model} is specified is a multiple of the the size of the
observed frequencies specified in \code{dat}, it is assumed this is due to the presence of latent variables. With respect to vectorization,
the latent variables are assumed to change their value fastest.
Convergence may not always be achieved with \code{MaxStepSize=1} and a lower value may need to be used, but not too low or convergence is slow. If the step size is too large,
a typical error message is "system is computationally singular: reciprocal condition number = 1.35775e-19"
}
\value{Most of the following are included in any output. Use \code{summary()} to get a summary of output.
\item{FittedFrequencies}{Vector of fitted frequencies for the full table (including any latent variables).}
\item{Method}{Fitting method used (currently maximum likelihood, GSK or minimum discrimination information)}
\item{LoglikelihoodRatio}{}
\item{ChiSquare}{}
\item{DiscriminationInformation}{}
\item{WaldStatistic}{}
\item{DegreesOfFreedom}{}
\item{PValue}{p-value based on asymptotic chi-square approximation for likelihood ratio test statistic}
\item{SampleSize}{}
\item{BIC}{}
\item{Eigenvalues}{}
\item{ManifestFittedFrequencies}{}
For the ``coefficients'' in the equation bt.coeff(at.pi)=d, the following statistics are available:
\item{ObservedCoefficients}{}
\item{FittedCoefficients}{}
\item{CoefficientStandardErrors}{}
\item{CoefficientZScores}{}
\item{CoefficientAdjustedResiduals}{}
\item{CoefficientCovarianceMatrix}{}
\item{CoefficientCorrelationMatrix}{}
\item{CoefficientAdjustedResidualCovarianceMatrix}{}
\item{CoefficientDimensions}{}
\item{CoefficientTableVariableLabels}{}
\item{CoefficientTableCategoryLabels}{}
The ``parameters'' are certain linear combinations of the coefficients. For example, if the coefficients are log probabilities, then the parameters are the usual loglinear parameters.
\item{Parameters}{}
For the i-th subset of variables, the parameters are obtained by
\item{Parameters[[i]]$}{}
The following statistics for the parameters belonging to each subset of variable are available.
\item{Parameters[[i]]$ObservedCoefficients}{}
\item{Parameters[[i]]$FittedCoefficients}{}
\item{Parameters[[i]]$CoefficientStandardErrors}{}
\item{Parameters[[i]]$CoefficientZScores}{}
\item{Parameters[[i]]$CoefficientAdjustedResiduals}{}
\item{Parameters[[i]]$CoefficientCovarianceMatrix}{}
\item{Parameters[[i]]$CoefficientCorrelationMatrix}{}
\item{Parameters[[i]]$CoefficientAdjustedResidualCovarianceMatrix}{}
\item{Parameters[[i]]$CoefficientDimensions}{}
\item{Parameters[[i]]$CoefficientTableVariableLabels}{}
\item{Parameters[[i]]$CoefficientTableCategoryLabels}{}
}
\references{
Bergsma, W. P. (1997).
\emph{Marginal models for categorical data}.
Tilburg, The Netherlands: Tilburg University Press.
\url{http://stats.lse.ac.uk/bergsma/pdf/bergsma_phdthesis.pdf}
}
\author{
W. P. Bergsma \email{w.p.bergsma@lse.ac.uk}
}
\seealso{
\code{\link{SampleStatistics}}, \code{\link{ModelStatistics}}
}
\examples{
# see also the built-in data sets
data(NKPS)
# Fit the model asserting Goodman and Kruskal's gamma is zero for
# Child's attitude toward sex role's (NKPS[,3], three categories) and
# parent's attitude toward sex role's (NKPS[,4], three categories).
coeff = SpecifyCoefficient("GoodmanKruskalGamma",c(3,3))
fit = MarginalModelFit(NKPS[,c(3,4)], coeff )
# Marginal homogeneity (MH) in a 3x3 table AB
# Note that MH is equivalent to independence in the 2x3 table of marginals IR, in which
# the row with I=1 gives the A marginal, and the row with I=2 gives the B marginal
n <- c(1,2,3,4,5,6,7,8,9)
at <- MarginalMatrix(c("A","B"),list(c("A"),c("B")),c(3,3))
bt <- ConstraintMatrix(c("I","R"),list(c("I"),c("R")),c(2,3))
model <- list( bt, "log", at)
fit <- MarginalModelFit(n,model)
#Output can be tidied up:
fit <- MarginalModelFit(n,model,CoefficientDimensions=c(2,3))
}
\keyword{univar}
|
992e9e93f05b05c2889c0b4f2d50aa9a26f23af1
|
185a886342148b1728e087785cc2587def1442dc
|
/data/siemens.R
|
29034f38aae1594afea9e10a75780077b41cd085
|
[] |
no_license
|
bpfaff/evir
|
6fda254f63f0d01ddbb82b5e07fd108312e4af72
|
98abc911dbcc5be2f9fd0546f4217c469736c360
|
refs/heads/master
| 2021-01-11T11:58:12.542208
| 2016-09-24T22:24:11
| 2016-09-24T22:24:11
| 68,827,018
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191,373
|
r
|
siemens.R
|
"siemens" <-
structure(c(0.0143474484081416, 0.0108619721995584, 0.00702085729721347,
0.00186393343806257, 0, -0.00139762426663781, -0.00514139950041903,
0, 0.00839948941997637, 0, -0.00699466596252707, -0.0127150288490356,
0.00567109270381616, 0.0182120373329577, 0.0251326913602883,
-0.00815591290360773, 0.0149020594689659, 0.00134378519661338,
-0.0148818981026841, 0.0246815310914879, -0.0165366896853554,
0.00673705669655167, -0.00493606566494931, -0.00496055132197126,
-0.0340262710572348, 0.00652682969668428, 0.00832952525283037,
0.00367985695840067, -0.0092251576748259, 0.0119761910467155,
0.0328784399527402, -0.00845388794566526, -0.0166708073454087,
0.00994583243773484, -0.00315386609702228, 0.0107720969819112,
0.0102155007844518, 0.0114236743190297, -0.0176293679932513,
0.0128121920478121, 0.00743497848751762, -0.00524247596484972,
-0.00394997221931437, -0.000879894469439257, 0.00482986668875363,
0, 0.0165077597446412, 0.0145426001760813, 0, 0.00423729447551491,
-0.00508691219867075, 0.00719884540181459, -0.00550033119304594,
0.00169563414938168, 0.0159667257541818, -0.00125130360435444,
0.0218790977168277, 0.00163198730632175, -0.0156124781718163,
-0.0221905693293656, -0.0175105904828707, 0.00815280207731028,
0.00638979809877105, 0, -0.00382897723709741, -0.0102828669555839,
-0.026621689979867, 0.0309190751924131, -0.00904593719600211,
-0.00781932741647662, 0.00087183963674109, 0.0287758742626623,
0.012621118313215, -0.00293071174808102, -0.0186209358315308,
-0.0154975120601617, -0.00217155351350806, 0.00217155351350806,
0, 0, 0.000867302743004394, -0.0259070471601719, -0.00938554375621381,
0.00715886726284065, -0.0062612011343397, 0, -0.0149155307564475,
-0.00227946303395488, -0.00686973161963289, -0.0185534078957481,
0.00467072311055849, -0.00467072311055849, -0.0127210196013108,
0.00708720188305456, -0.0286552557603756, -0.021548336206203,
-0.0200006667066694, 0.0200006667066694, -0.00845145472943232,
-0.030927686814548, 0.011264840063141, 0.023648900858241, 0.0123549878977611,
0.0333283684460683, 0, -0.00762998455991593, -0.0276649528581934,
0, -0.0163734741979722, 0.00797213988038292, 0.0278951289985905,
0.0100841190666263, 0.0042908289908965, -0.00812041720399437,
0, 0.00382958821309787, 0.0142317392572564, -0.0099409102663599,
-0.0163073158085765, 0.0172583382292806, 0, 0.0118120910358788,
0, -0.00188058353496201, -0.00377181022365214, -0.0253541445334116,
-0.0195700961940974, 0, -0.0351971978046279, 0.00459536167373287,
0.0101369342433348, -0.015756361546758, -0.00668211173038591,
-0.00465237334372093, -0.0193571306896083, 0, 0.0296665003484695,
-0.00411100270652209, 0.00616018375145799, 0.00663098578633114,
-0.0241843533672323, -0.0205216357207965, 0, -0.00372836650266306,
-0.00911290144566168, 0.0107124752338033, -0.0177376324699696,
-0.00652885888246368, -0.00987391455899367, 0.00330214939575901,
0.0456455074899602, 0.00262123347987453, 0.0104167608582553,
0.00774197415361577, 0.00461657655018133, -0.0139141641883618,
0.00775398043690378, 0, 0.0107555454251909, -0.0179907191059701,
-0.00103788281242023, -0.0025994295895142, 0.00829450063843939,
-0.00777407387675844, -0.00469362008998475, -0.0089262866252855,
0.000527287120046793, -0.000527287120046793, -0.0164853507370513,
0, 0.00107181146380686, 0.0159408263932912, -0.023467743649654,
0.00913248356327223, 0.00533334597536239, 0.00900191411101936,
-0.00422610243350441, -0.0127865354235936, -0.00214707543566517,
-0.0118920320379297, -0.00381367937024546, 0.00109110758233433,
-0.010965022141328, -0.00774768679082616, -0.0190695627203508,
-0.00113314459716829, 0.00169923575295972, -0.0022662899218262,
-0.0212103107544972, -0.0157667499714411, 0.0157667499714411,
0.00404741807223274, -0.00404741807223274, 0.0166622642565932,
-0.00399657968448563, 0.0187027270741806, 0.00949993174190444,
0.00996134412967287, -0.00330943490136715, -0.0150295063967314,
0.0117090827594288, 0, 0.00442478598035567, 0.0277568839986637,
0.0159747806077344, 0.0021108187256913, 0.0249882861175328, 0.00768249617201633,
-0.00102092913410745, -0.00614756034450537, -0.0025726795769403,
0.0168586368244168, -0.00101368483084618, 0.0175928136337573,
0.0039781255844451, -0.0084725151722842, -0.0130984240459182,
0.0080808520539386, -0.0126551929400582, -0.0164359118479465,
0.00155239358472503, -0.0103951040003398, 0.0103951040003398,
0.00618558673271075, -0.0186727417052746, -0.00788440352414899,
-0.018104860874276, -0.0222774765714098, -0.0132745312086961,
-0.00222965532726915, 0.0110988930680489, 0, -0.0228868488200913,
-0.0234229502910086, -0.0358967638299896, -0.0120556011022361,
0.0174335644675527, 0.0229759738703237, -0.0093622685016137,
-0.00176522552456904, 0.0307359654145594, -0.00343249764932452,
-0.0243631539738391, -0.0177729796758643, -0.00239377732498891,
0.0125038842552838, -0.00712592088996722, -0.00777748461260774,
-0.00240529280124901, 0.000601865802098533, -0.0108893997992681,
0.00121580562088974, -0.01038177263497, -0.00245851383820739,
0, 0, 0, 0.0332859776334669, 0.0270127992460685, -0.00931322214666741,
0, -0.0218749485098817, -0.00299311806813218, 0.0272045162390331,
0.0259156455798331, 0.0269225458546707, 0.0180975600108026, -0.00435730536895562,
-0.00273298885606899, 0.0168253629957769, -0.0081059617039605,
0.0118663744080929, 0, 0.0149018191314068, -0.0213569249239627,
0, 0.00645510579255593, -0.016761679261506, -0.00876237807898583,
0.00439078641748969, 0.00491669933375682, 0.0151436101129176,
0.0101469495437367, 0.0100450223431041, 0.00524660175184355,
-0.0200852417961843, -0.00374432103679911, 0.00587765517921968,
0, -0.00373632673787672, 0.00798939003347909, -0.00478597031594141,
-0.00480898602444713, -0.0140239623086451, -0.0342574935672428,
-0.0107375996336372, -0.00970604251217688, -0.0098011738766477,
0.00520082073758665, 0.00115207386014538, 0.000575539584231954,
-0.00115141060504165, -0.00404040953700502, -0.00231615621783021,
0, -0.00406622692125946, -0.00233100338647585, 0.0213572331281271,
0.0231452612490237, -0.00503497567173561, 0, 0.0292919533172955,
-0.0126063604255848, 0.006048959879426, 0.00655740054615883,
0.00705566020668913, -0.00487938078409345, 0.00704037568232874,
-0.0146781651304462, -0.0227093669411742, 0.00781253973679341,
-0.0173819907529444, -0.00283206079971832, 0, 0, 0.00734674115424516,
0.017857617400006, 0.0197158631644179, 0.00540834200926099, 0.0101959563182881,
0.00532482621306407, -0.00746272120158986, -0.0032154368539743,
0, 0, 0.0212434966945612, -0.000525624190813812, 0.00210084110881148,
-0.00684393300482444, 0.0021108187256913, 0.00210637255341517,
0, -0.0201919718868409, 0.0106781580555642, 0.00106157122495887,
0.00423505132721136, 0, -0.0106214487652343, 0.00213333414242056,
-0.00213333414242056, 0.0174653817700587, 0.0104385081439236,
-0.014117881545785, -0.00793025567597727, -0.0117459972992959,
-0.00808847763777276, 0.0139787222448486, 0.0127322674909256,
-0.010598933348505, 0.00372241854979949, -0.0112090845347201,
0.00428495567250131, 0, 0, 0, 0, -0.0139937697994621, -0.0114473858403508,
-0.000548395955613845, 0, 0.000548395955613845, 0.00655740054615883,
0.00434783293610375, 0.000542152358088188, -0.000542152358088188,
0.00594757092499609, -0.0064900173717124, 0, -0.00435019718115237,
0, -0.0076586807610628, -0.00164880499018372, -0.00110071557618552,
-0.0088496152769828, -0.00501254182354405, 0.0116570845422128,
0.00660068403135217, -0.0317486983145803, 0.0090141455434436,
-0.00224593019730523, 0.0139550129567341, -0.0117090827594288,
-0.00562431178893208, -0.016491701605283, -0.0103747328255519,
-0.00173963511544351, 0.013833073606127, -0.0068926177530968,
0.0068926177530968, 0.000572246081424144, 0.000571918803121019,
-0.00919018530299587, -0.00636760472792997, -0.00933495693605169,
0, -0.00705885283962449, 0.031951599806602, -0.00114416488454516,
-0.00458979577977336, 0, -0.00172661913398775, -0.000576202839335682,
0.00803678263764196, -0.00976738589622128, -0.00347021747900733,
0.00750364271091009, 0.0204901875552328, 0.00897370015119581,
-0.000558503225911444, 0, 0.00779080178776859, 0.00607904607638199,
-0.00829880281469508, 0.00443459706786564, 0.00441501820911716,
-0.0166577977247941, -0.00843412365361118, -0.0222675486604529,
0.00460830308619409, 0, -0.00576370471674981, -0.00929158833409982,
-0.0164709605959241, 0.00118553659410159, -0.00177883236944032,
0.000593295775338731, -0.0288828741487861, -0.00550965581096952,
0.0115961018673718, -0.00182204726994462, -0.00732157022900726,
-0.00245248436841994, 0.00245248436841994, -0.0204151847425926,
-0.0304592074847085, 0.00193112387017047, -0.00515797404026808,
0.0160311224160856, 0.0019065782705816, 0, 0.0256986878213072,
-0.0105755262470866, -0.00690307102687315, -0.01650831140745,
-0.0318726166112753, 0.00198085241396129, 0.00788440352414854,
-0.000654664507833136, -0.0198419207990042, -0.00603016902659093,
0.0206192872027353, -0.0125871155054713, 0.00862932669805438,
-0.015989681104347, -0.00471222682797645, 0.0207019079323234,
-0.00862932669805438, -0.0181639706276711, 0.000678656286651425,
-0.00544219030268511, 0.0128771687902032, 0.0147061473896954,
-0.00599003454933067, 0.0139213385186081, 0.00525280262285976,
0.0130125453544339, 0.0160311224160856, 0.0226424767497595, -0.00249066131245179,
-0.00876649321218226, -0.0203310687835838, -0.0200979212120611,
-0.00131061617712058, 0.0136765401058554, 0.0249134571640135,
0.00189095549570029, -0.00695105484404257, 0.0151041260336764,
0.02223686437442, -0.00489896876908968, -0.00925075977215117,
0, -0.00559181326577773, 0.0148425730379289, -0.004923086866369,
0.00982205563545868, 0.00487508582833751, 0.0072683542270906,
0.00661656549212886, 0.000599340743143273, 0.00656914873546954,
0.00947874395454384, 0.0198488711399762, 0.02285813807605, -0.00282885620047768,
-0.0102506592333405, 0.00228702215179544, 0.013613369623664,
0, -0.0028208763416413, 0.00731779229526541, 0.00447678419757169,
-0.00503779402995708, -0.000561324741093383, -0.028475728113452,
0, 0, 0, 0.00690849034381147, 0.00913769921753715, 0, 0, 0.0157927704218701,
0.0149961155156539, 0.00440044714482157, -0.00219780308247941,
0.0233769819048266, 0.00535906963183885, 0.00106837616999877,
0.00373035337990624, -0.0133870807824592, -0.00323974365441071,
0.00754313921446359, 0.0159747806077344, -0.0079554914411144,
-0.00480385231264568, 0.00213789498852579, -0.0156042983275491,
-0.00652885888246368, 0.0113976810512844, -0.00324324608612026,
0.00324324608612026, -0.00216099489823529, 0.00861610069079122,
0.000536049329373345, -0.000536049329373345, 0.00428037036617246,
0.00160042712208153, 0.00478597031594141, 0.0147448695581414,
0.00469362008998475, 0.0256819456323432, -0.00712471207212495,
-0.0149218101847266, 0.0118527540673088, 0.0217895623509259,
0.00350263055120204, -0.00200000066666695, 0.0104556591043385,
0.0259119850665361, -0.00193236775105365, -0.00387597384469318,
0.0158924606623732, 0.0109290705321903, 0.0103432986068004, 0.00373483160565335,
-0.0117179686259612, 0.00611047729701575, -0.0023457669682192,
0.0098154562282291, 0.0225348694330529, 0, -0.00182066504548972,
-0.0137616850726814, -0.010213644963013, 0.00372613379415299,
-0.00606203768273961, -0.0060990102264733, -0.00377181022365214,
0.0018876834298176, -0.00519972809457059, 0.00472814119594611,
0.0177657781520217, 0, 0, 0.0183659891454866, 0.00227221183877679,
0.0139736328268025, -0.00493606566494931, 0.00493606566494931,
-0.00493606566494931, 0.00448833787596126, 0.0137871874829156,
0.00704228262541307, -0.00351494210744452, 0.00175901540517964,
-0.0146052607260132, 0.00222667649337316, 0.00885745383406089,
0.0105264129869878, 0.00348432408261079, 0.00260529891176331,
0.00346320692461743, -0.00911662489571752, -0.0189307876594387,
0.0097302846425764, 0.0161540295134532, 0, 0.00905373720525127,
-0.0103538457307422, 0.00949510802232423, -0.00646414661988937,
0, -0.00303096140243486, 0.000433557344749147, -0.0113340358893654,
-0.0070391843672537, -0.000883392283597129, 0.00572814961268442,
0, -0.00572814961268442, -0.00176912916553418, -0.00577138116961429,
-0.00535715566924289, 0, -0.0276849993727164, -0.018110548394731,
0, -0.0023457669682192, 0.00795325829716464, 0.0307434168604055,
-0.00999100045558299, 0.00318979536810016, 0.0135564660836081,
-0.00179694567670152, -0.00994133808784792, -0.00775020975293472,
-0.000915750979746388, -0.00505167549611807, -0.00878618368946293,
0, -0.00232504173560777, 0.0092679069307815, 0.027747970826332,
0.00759780191252002, -0.00759780191252002, -0.00179613878552809,
0.00537876688978889, -0.0035826281042608, -0.00224567801539566,
0.0116228398279925, 0.0284794713218686, -0.003028338892344, 0.003028338892344,
0.00731658095004883, -0.00343642949858136, -0.00560950769742075,
0.0124705980773663, 0.0131608777136583, -0.00168847655142157,
-0.0106180653563213, -0.00771542618586096, 0.0145239631255145,
0.00211819608959152, -0.00509122864683809, -0.00298189784316616,
-0.0107228134413715, -0.00779224722016592, 0.0155242445034918,
-0.00171306251742775, 0.0144683374925538, 0.00379507096855169,
0.0033613477027048, -0.00041955108649816, -0.00969857907908622,
-0.000847816922339817, -0.000424178161185118, 0.00465609306776527,
-0.00126769507786317, 0.00168990323275242, 0.0117500302755986,
-0.00711746776886368, -0.00168208618298493, -0.0122804846315123,
-0.000852514970643536, 0.00552840021693646, -0.0175405566649398,
-0.00823406602367527, 0.00260756340708079, 0, -0.018397365139716,
-0.0137994622435333, -0.00224366259704212, 0.00448230243949954,
-0.0130544280182536, 0.00991892712913245, 0.00179291845428908,
0.012903404835908, 0.000441988957471384, 0.00792257665085083,
0.00175208104391711, -0.0154358081298396, 0.00399202126953746,
0.00265252144913131, -0.00132538124106851, -0.00132714020806279,
-0.0102336821930131, -0.00853362465867846, 0.0080862974313578,
0.00401875959491793, -0.0125562187754129, 0.00764220397298354,
0.00402235179194754, 0, -0.00402235179194754, -0.00134438738599352,
0.00447428039492115, 0.0026749904493033, 0.0228883231103123,
0.00995894786573581, -0.00648370544543253, 0.00691446142883256,
0, 0.00814930523307389, 0.0038371396472936, 0.00509338961910322,
-0.0106406622184498, 0.00171013295205302, 0, 0.00681143544522023,
0.00971086372938945, 0.00125971043322171, 0.00041955108649816,
-0.00546794211983492, -0.000421851936228457, 0.0167367923555237,
0.00331400469003729, -0.00497513464011368, -0.00374766210024013,
-0.00250626697605938, 0.0120558516924074, 0.00494642393532541,
-0.00701467699938441, -0.00915147817829975, 0.0112197312423588,
0.00576845439674045, -0.00576845439674045, 0.00494642393532541,
0, 0.00123279243063257, -0.00123279243063257, 0, -0.00494642393532541,
-0.00248241749325562, 0.00124197904600631, 0.00906101751243726,
0.00245700369305224, 0.0121952730938184, 0.0104502558748285,
-0.00200120138830107, -0.00321027563024812, -0.00523877885627932,
-0.00364299127850121, 0.00606429996751112, -0.00403877770319072,
-0.00040477636658709, -0.00487013949604265, 0, -0.00530288821700786,
-0.00533115882291479, 0.00533115882291479, 0.0121952730938184,
0.0223740855423333, -0.00158165315692971, 0.0153154377873874,
0.0231135096871595, 0, 0, 0, 0.00152207030906881, 0, 0, 0, 0.0214414834436809,
0.023175582045126, -0.00254869972301375, -0.00805275215435719,
-0.00811812576524185, -0.00296846229109837, -0.00820900132238167,
-0.00789923234762568, 0.01647977370772, -0.00409455252865953,
-0.00936862763963475, 0.00338282600362039, -0.00904642430281255,
-0.00683893243025846, 0.00114307499586763, 0.00493828164058252,
0.00151457810096511, 0.00189000245260962, -0.00302572091653674,
-0.00379507096855169, 0.00795308111721527, 0.00714154510526965,
-0.00751883241402718, 0.0108839505643408, 0.0107667644875566,
0.00331797539418233, -0.0107309077833078, 0.00260078170005773,
0.00111255342460348, -0.00706716722309242, -0.00523953294466395,
-0.00488998529419149, 0.00188359445406006, -0.000376435163488953,
0, -0.00113015645858461, 0.00301091683994947, 0.00487348668419907,
-0.00224635062637413, -0.00752448997861555, 0, -0.00113357276449877,
0.00753299230754489, 0.00486618965117325, 0.00149253759050572,
0.00372162692299494, -0.00185908214912756, -0.00822127201554634,
0.013418017192671, 0.0179788368680014, 0.00217944147293192, -0.00947874395454384,
0.00292611766938888, 0.010535973938532, -0.00579921886235146,
-0.0358965460324217, 0.00937565465245571, 0.00446928118229728,
-0.00148754211141489, -0.00672145921096678, -0.0101676643222537,
0.00453172980988237, 0.000753295704171375, -0.011359455733583,
0.00152207030906881, -0.00266514530493644, -0.00229007733672759,
-0.0130771094343056, -0.00660324792025868, -0.00586282703377039,
0.000783699099672575, -0.00353149278147136, 0.00353149278147136,
0.0081919709145879, 0.00310318325051595, 0, 0, 0.00154798792521493,
-0.00581735206591327, -0.0054602319747028, 0, -0.00352595859913274,
-0.000785237574702169, -0.0162347655983868, 0.00159553284745728,
-0.016071057021243, -0.00731710581706668, 0.0133740595252565,
0.0100141033130532, -0.0172868626421327, -0.0122400548945021,
0.0106166779146113, 0.00364889924421119, -0.0154978632050873,
-0.00123380644892856, -0.00288481551806852, 0.00903868109244677,
0.0089577146203843, -0.000405432805067107, -0.0118296300990832,
-0.00741354301517916, 0.0070031182197603, -0.00658980967905531,
0.00782057906518796, 0, 0.00409165873624762, 0.0117720052369199,
0.00362538161431702, -0.0048367688006139, -0.0121952730938184,
0.0138101301262181, 0, 0.001209433729362, -0.000806126605524593,
0.0100301743599371, 0.0067635030058999, -0.00596541836657138,
-0.0128464231555028, 0.0104502558748285, 0, 0.00558215165622,
-0.00238853616741697, -0.00079744820812877, -0.00921665510492442,
0.00682048782425015, -0.00521147701854918, 0, 0.00280955432654872,
-0.00240770581802696, -0.00402577033240137, -0.00161485703239972,
-0.00404858852600043, 0.0052599757122973, -0.00121138718629688,
-0.00689238487681054, 0.00324939359830934, 0.0020255222643204,
0.00524723706475916, -0.00161160389434123, 0.005629287080954,
0.000801603249336313, -0.000801603249336313, 0.00480000921603141,
0.00278940208757872, 0.00238473880901813, 0.00672603920450632,
-0.00513936691605865, -0.00238095350574197, 0, -0.00998211522797199,
-0.00321543685397474, -0.00686732652401245, -0.00732305957718893,
0.00975221056924847, 0.00443817553195291, -0.00322580924888261,
-0.00933258954599703, -0.00163198730632175, 0.0117720052369199,
-0.00282885620047768, 0.00323232604658052, -0.0113591485070779,
0.0028519065978676, -0.00203624586749074, -0.00859780117243192,
-0.0211933965991951, -0.0131052393486581, 0.00170068068201745,
-0.0149799505646899, 0.00559021021485107, -0.00904593719600211,
0, 0.0137518282334148, 0.000853242372583995, 0.00764659630652798,
0.010524197325585, -0.00167644632725228, 0.0108469982994603,
0.0103200090319895, -0.0103200090319895, -0.00374143026344731,
-0.00333750831287993, 0.00333750831287993, -0.00166736178686921,
-0.00167014652601072, -0.0109244783943896, 0.0109244783943896,
0.00541780862625085, 0.0123917322951632, -0.00370142310318622,
0, -0.00330169511657408, -0.00538861407540292, -0.00625392907629951,
0, 0.00625392907629951, -0.0146538104692189, 0.00294799118532163,
-0.0195335297803441, -0.000858000910636481, -0.0073228843283073,
-0.0245088854139035, -0.0178815672012242, -0.000451161749137174,
0.00584928551869446, -0.0158266925506929, -0.00228154331231289,
0.00364797486174728, 0.00996385054798399, 0.0040476781145804,
-0.0117382837645494, -0.0109590137897193, 0.00549451931764056,
0.00546449447207875, -0.00775020975293472, -0.0147536101653274,
0.00324600331384772, 0.0160738248310608, -0.00182398591380517,
0.0149493157750067, 0.00806094826163228, 0.00133719654424924,
0, -0.00401696590136469, 0, -0.00358423322781531, -0.0108304307743694,
0.00993236156268518, 0, -0.0195061752855463, 0.00365798304552234,
-0.00274223206577595, 0.011829073327756, 0.0049627893421289,
0.000449943764621263, -0.01130977900063, 0.00995933990275866,
-0.00180342699915093, 0, 0.00584928551869446, -0.00224567801539566,
0.0116228398279925, 0.00399202126953746, 0.00176912916553418,
0.00220701921410527, -0.0128690482488878, 0.00133898703335067,
-0.0134712409898801, 0.00989661698692057, 0.00134198186098367,
-0.00268576705759704, 0.0035794221662222, 0.00578937266082624,
0.0127952330248071, -0.00219442703816641, 0, 0.00219442703816641,
0.0021896220703681, 0.00567068040842544, -0.00304944692855136,
0.00130804466001511, 0.0194180858571018, 0, 0.000427259139163638,
-0.00428082845515387, 0.00556151166114649, 0, -0.00727898739461175,
-0.00215100104445476, -0.00345125450421513, 0.00172711614607524,
-0.00953626646023986, -0.00874896219323551, -0.00131897139367831,
0.00131897139367831, -0.0294270007344228, -0.00772204609391069,
-0.0119267468815289, 0.00735635501358489, -0.00321174857411055,
0.0159565728816129, 0.00225886700972788, 0.00315386609702228,
0.013848780635525, -0.00400089442336693, -0.00222965532726915,
0.00312012733624378, 0.00133422301313635, 0.00531916147760025,
-0.0097735118269804, -0.00402595041655296, -0.00764908116395047,
-0.00543479598595686, -0.00363967644744934, -0.00731933734226997,
-0.00460194093336908, -0.00462321674145683, -0.00511034793929888,
0.00232612340289373, -0.00186046565291953, -0.00935460516722042,
-0.0161064590310529, -0.00239062987391625, 0.0166156208052635,
0.00610187295731857, 0.0139407462172114, 0, -0.00462535514720264,
-0.00139178867282252, -0.00792360239718626, 0, -0.00281162322053063,
0.0116632999302708, 0.00923367594694557, -0.00830646215775355,
-0.0121213605323449, 0.0116578611034215, -0.00931539107000878,
0.00792360239718626, 0.00278164296187677, -0.0069686693160933,
-0.000932835888540318, 0.00139892771706673, 0, 0.000465874688137191,
0, -0.00279850928909831, 0.00419483253202557, -0.00372787011401421,
-0.0037418191459957, 0, 0, 0.0037418191459957, 0.0134479188705976,
0.00230043809754177, 0.00732604009207272, 0.0274482923643382,
0.0114740888134399, 0.0122112289280691, 0.00475676572594352,
-0.00562406846894792, 0.00216684808509005, -0.00216684808509005,
0, 0.00605538182478282, 0.00730400669258557, 0, 0.00554964071079533,
-0.010269666637456, -0.0217399866364061, 0, 0.0056980211146378,
0.00956945101615059, 0.00345722038385787, -0.00910082121059874,
0, -0.0193412622568538, 0.00531209748488992, 0.0083535328587665,
0, -0.0013143484915128, -0.00131607827726032, -0.00837562753912469,
0.00132714020806279, -0.0017699119664587, 0.00132772756836275,
0, -0.00532152031865429, -0.00267141743910715, -0.0166333347067606,
-0.00408812742883757, 0.0149087920695541, -0.0090090699423655,
0.0040641284207954, 0, 0.00763191134239571, 0.0124446050480835,
0.0066035899226109, 0.0039413232540042, -0.00350263055120204,
0, -0.00219539056343576, -0.00175978927949227, -0.000440431630110094,
0.00351803353167934, -0.00793305174079606, 0.00265134932161981,
0.00791212918794937, 0.000437732552404313, -0.0101166466522393,
0.0131754211585644, 0.0078227256812089, 0.000432806758541737,
-0.00433652023901487, 0.00260416813838749, 0.0112070138476765,
0, 0.00128507192668748, 0.00725104477924488, -0.00725104477924488,
0.00725104477924488, -0.00554018037561521, 0.000854335805915429,
0.000853606539240825, -0.00727898739461175, -0.00129004533049004,
-0.00258509407210461, 0.00129338237473986, -0.0038851764821688,
-0.00390032997158274, 0.00433276241075387, 0.0073228843283073,
0.00428266179200065, 0.00128123015604853, 0.00510856794272252,
0.00212089156913775, 0.0080186122833017, 0.0100377254335111,
-0.00417014782809533, 0.00583334987469852, 0.00621248632996574,
0.0028860048891346, -0.0037121098514401, 0.00453515516539094,
-0.00288362712691814, -0.00995033085316788, -0.00166805710069706,
0.00374922330452154, 0.00166182007500959, 0.00165906301016072,
-0.00498546940377853, -0.00752511912251119, -0.00125971043322171,
-0.00421053253634351, 0.00965785377919648, 0.00333750831287993,
0.00870832789178433, -0.00123941350301049, -0.00165494452335402,
-0.00331812830360123, -0.00499792793805653, -0.00334588350618548,
0.00251046156954349, -0.000417972837850833, 0.00417188756940634,
-0.00291849279491352, -0.00502303274627325, -0.00336276063000085,
-0.00506543928183367, -0.00594229252799483, 0.00721105987941906,
-0.00169204778102428, 0.00380308930809781, 0.00126448910420773,
0.00755353990090946, 0.0033389012655145, -0.00752826642079185,
0.00418936515527735, -0.00251151241266134, 0.0070996330786075,
0.00870111145273622, 0.0135220253319743, 0.00527705710084359,
-0.003650379230967, -0.00693173009956993, -0.00204792207556581,
0.00368324541629672, 0.00935156235639312, 0.00242522350074958,
-0.000403795684873032, -0.00242620418246364, -0.00527705710084359,
0.00325071395561105, -0.00325071395561105, 0.00851068966790836,
0.0132292969684973, 0.00832842410648205, 0.00315457674851505,
-0.000393778307903769, -0.00513327879424175, -0.00356930777512066,
-0.00278496300948605, 0.00476948439903513, 0.0106488878512505,
0.0078155927337682, 0.013532006218576, 0.00115141060504209, -0.00307337929201168,
0, 0.00192196868696959, -0.00616334770766835, -0.00892689801592139,
-0.00351494210744452, 0.00195427070209986, -0.00469668182340977,
-0.00511308881899897, -0.000394399531832956, 0.00315084939683619,
0, 0.00196425126993427, 0.0097637955896901, -0.00467472608221886,
-0.0129693309327772, 0.00158102799731896, -0.000790201542501201,
0.00394477829101625, -0.00275971001090713, -0.00593943539738229,
-0.00717420374800071, 0.00717420374800071, 0.00158730192057233,
-0.00516797015844261, -0.00479425608664918, 0.00718279225406615,
0.00594649918772649, 0.0023687338355507, 0, 0.00628932890756406,
-0.00510706276000228, -0.00355099998311248, 0, 0.00197433430371774,
0.000394399531832956, -0.00950878796902721, 0.0146218767880262,
-0.00393082267143718, 0.00275319741267355, -0.00551399585328483,
-0.00514750691145682, 0.00317083103558424, 0.0063116580339253,
0.00900377975346212, 0.00194666209030769, 0.000777605015854554,
-0.00428432982506433, 0.000390243907391508, 0.005059360860173,
0.0023264841823436, 0.00617762582395676, -0.00850411000630036,
-0.0031104224143923, -0.00821118983039515, -0.00314589327477854,
-0.00276079844061128, 0.0121689431000043, -0.00117118889046308,
0.00117118889046308, 0, 0.00893383296773154, -0.00426274623396639,
0.00193986481782682, 0.00154918698682938, -0.00894077870296739,
0.00389712106636431, 0.00271897623324335, -0.000387972846767948,
0.00580384681980028, -0.000385876910055316, 0.00192789726187081,
-0.00657007194263581, -0.00388500877146125, -0.00233827074581239,
-0.00704779737944827, 0.00157047539149247, 0.00391543178367337,
0.00117164629689093, -0.00351906521519574, 0.00234741891830481,
0.00234192144508105, 0.00155823950501377, -0.000389332300031686,
-0.00312012733624378, -0.0133702345631717, -0.0155598591070318,
0.00521356705288722, -0.0230639395985515, -0.000819000864780595,
0.00531155487789681, -0.0024479816386398, -0.000408580189545038,
0, 0, -0.00573772065914913, 0.00205296725769433, 0, -0.00658438592817934,
0.002474228066351, 0, 0.0118732203417906, 0, -0.00203707546507736,
0.00203707546507736, -0.00489597061220692, -0.00574242759284926,
-0.00123482213673443, 0.00205719060413401, 0, -0.0149071083491386,
-0.00837876707419039, 0.00419815897863618, -0.0160476416799171,
-0.00255754615111714, -0.00771212048633885, 0, 0.00257732101430008,
0.00214270491236057, 0, 0, 0.0152934161694986, 0, -0.0012655559588306,
-0.00295920740147793, 0.00548871971143372, -0.00506543928183367,
-0.000423280429600048, -0.00381760802994524, 0, -0.00340570784698269,
0.000426348333041116, 0.00975617494536474, 0.00882544076677094,
0.00125444298281696, 0.00208724767908608, -0.00334169066190304,
0, 0.00625653761430511, 0.0062176366108706, 0.00330033302865651,
0.00615891907835175, 0.00122724498937732, 0.00367272398150309,
-0.00612872194137326, -0.000409920071327452, 0.00653864201270071,
0.00892138926073871, -0.00648037909830368, 0.0024350661383008,
-0.000811030052566331, 0.00687287928776215, -0.00161290357546484,
0.00362538161431702, 0.0016070714587757, 0.00720003110424194,
-0.00239425493464829, -0.00280056194245581, 0.00479617226349305,
-0.0064000218454674, 0.00600121825094968, 0.00398089697702231,
0.00515158658624992, 0.00197433430371774, 0.00157666567939474,
-0.00236593170300337, -0.0115149177816858, 0.0087475706904474,
-0.000395961200976291, -0.00555998255187529, -0.0112135736494183,
-0.0105264129869873, 0.00162667786987658, 0.00486421713036034,
0.00242326451377428, 0.0116303697176208, 0.00715140115762525,
-0.00158478638557158, -0.00198452138954908, -0.00677698427902396,
0.0039920212695379, -0.0023932999058407, 0.00358780532360292,
-0.00358780532360292, 0.000798403236024203, -0.00239712459972141,
-0.00160128136697368, -0.00321027563024812, 0.0048115569972218,
0.00359353551013042, 0.0118860353382559, 0.00118087004472134,
0.00314218640307207, -0.00432305644779341, 0, -0.0138808494846892,
0.00517826291532675, 0, 0.0067313657457686, 0.00354540454103525,
-0.00315084939683619, 0.00511510066677046, 0, -0.00432645709277191,
-0.00434525668884556, -0.00675275654401419, -0.00119641091040901,
0.0079491674544232, 0.00788647620691529, 0.00860388569250015,
0.00388651868928092, 0.00309837583266992, 0, 0.0034742362681861,
0.00805991697710029, -0.00459770924862957, -0.00654982114243063,
-0.000773395243499397, -0.00193610902686636, 0.00425450264644667,
-0.0019316212303071, -0.00659556222191915, -0.00624757986839963,
0.0101325886398609, -0.00194061772527299, 0.00966564648541812,
-0.00115495681750932, -0.0015420203518155, 0, 0.00461894585629441,
0.0030674870678622, 0, -0.000382921697192895, -0.00614677309914002,
0.00346220772847072, 0.00993512184335055, -0.00840020212526671,
-0.00653974248443356, 0.0164597017411166, 0.00416746198917339,
-0.00530706325210728, 0.00265705223696733, 0.00943225253667235,
-0.00905324931060836, -0.00189645420009743, -0.00609293583338122,
-0.00229445607352341, -0.00114920525959361, -0.00345556112715473,
-0.0186340795448929, 0.00508111314018933, -0.0117648415795863,
0.0148788962564605, 0.00503974705005872, -0.0144111549556873,
0.000784313765695988, -0.00117670144975746, -0.0162219185337604,
0.0142576672638262, -0.0130617332962899, -0.000398485759407485,
0.00516797015844261, 0.00395726673920205, 0.00118413276121654,
-0.00157915548015941, -0.00713722300153208, 0, 0.00159045759172383,
-0.00557770370354183, -0.00802250592360743, 0.00241351685942526,
0, -0.00604474941515853, 0.014047993413989, -0.00479425608664918,
0.00399680787852041, 0.00715424355116667, -0.00198216120399142,
-0.00597016698650377, 0.000399121937048275, 0.00676081318504451,
-0.00676081318504451, 0.00278940208757872, -0.0104000937401874,
0, -0.0101031370590059, 0.00405351183318237, 0, -0.00608644605640185,
0, 0, -0.00122174725032265, 0.000814663996176357, 0, 0, 0.00568414035498988,
0.012072581234269, -0.00400802139753864, -0.00281520395058665,
0.00762290551868983, 0.00279385533956589, -0.00119641091040901,
-0.00359784517624018, -0.00160320675621506, -0.00361664047018895,
0, -0.0109290705321898, -0.00366972888896244, -0.00122624172324448,
-0.00368777307918489, -0.00164338574372991, -0.000822706751463453,
-0.00909472990588256, 0.0016597514183645, 0.00289795276295113,
-0.00705250797009827, -0.00166666705246943, -0.0176697908159822,
0.00508045123241896, -0.00211371881698863, -0.00296673241543033,
-0.00425351555805653, 0, -0.000852878516517386, -0.00556151166114649,
0.00598804184462276, 0.000852514970643536, -0.00812838699969465,
0, 0.00343053509678892, 0.0059752631900869, 0.0076303888656919,
-0.0076303888656919, 0.00720800237577945, 0.0105065055565388,
0.00625132271254936, -0.00750629045726248, -0.00335430083894694,
0.00544503963105614, 0.00956149458556421, -0.00789204511602337,
0.00166666705246943, -0.00291849279491352, -0.00041762372709675,
-0.00292825977908873, -0.00419815897863618, 0.00545417756536271,
-0.00377279843869394, -0.00294427131724939, 0.00126289219058062,
0.00168137912666877, -0.00674254556082943, -0.0170579829045763,
-0.027912845743487, 0.00265017822955915, 0.00703299602226659,
0.0121899637445853, -0.00086580091988564, 0, -0.0267735983409136,
0.00532388125275007, 0.00441501820911672, 0.00220022090960237,
0.000878734678688708, -0.00307895558829108, -0.00530036576584125,
-0.0026607554500262, 0.00354610300675073, 0, 0, 0.00441501820911672,
0.000880669365593612, 0.00307624940464235, 0.00525165320781129,
-0.00349803592370002, 0.00393443130483462, -0.00174672533494169,
-0.0056980211146378, 0.00351031514474487, -0.000438116107774889,
0, -0.00836088478848351, -0.000884173355567341, 0, -0.00710167210161528,
0.00311318908322633, -0.0040044547393725, -0.00312570024521852,
-0.00403316701762213, 0.0022426562438338, 0.000447928338956238,
-0.00313972000466745, -0.00405132221917892, 0.00045095829400088,
-0.00406596449019503, -0.00453721286535602, -0.000909918170151069,
0, 0.00227324489801495, 0.00769060564996904, -0.00723657613025086,
-0.00272727441773313, -0.00136643154943439, -0.0137680008688035,
0, -0.0125553846472246, -0.00563117396063051, -0.0113583805135167,
0.00853085742329851, -0.00283554065224223, 0.0089517676696782,
0.00327792379667047, 0, 0.00652378828195754, 0.00278293315044431,
-0.00510797488605208, -0.00326416703907251, 0.00419483253202557,
0.012479940112839, 0.0173046212446883, 0.00270514141519174, -0.0140560010838811,
-0.00182815407406256, -0.00228990253485506, 0.00183234132806165,
0.0185904243057999, 0.00582308476326432, 0.00578937266082624,
0.00133126267719774, -0.00935627230763281, -0.00583615573471841,
0.00224870794106291, 0.0107239633629752, 0.0048769770795043,
0.00573320203251138, 0.00394997221931437, 0.0113241628246996,
0.00819499949683333, -0.00128949082987173, -0.00258398076592492,
-0.00215842950191014, -0.00476500793293644, 0.0030349036951538,
0.00173010423778264, -0.00173010423778264, -0.00129954534208565,
0.00475676572594352, 0.00602152357068597, 0.00641164838064023,
-0.000426166637602421, -0.0115756919578889, 0.00344382604675975,
-0.00258175702785479, 0.00644193767368062, -0.00644193767368062,
-0.00258843975410272, -0.00389526569132981, -0.00260529891176331,
-0.00130519922814276, 0.00304281912668358, 0.000867679013222489,
-0.0060896229943741, 0.000436205023274017, -0.00437063632810863,
0.000875656798509539, 0.0034949795295991, 0.00435161696278108,
0.0107969090306432, -0.00171969087952695, 0.00257842858794488,
0.00939776245877111, -0.00554254243433849, 0.000854700906731676,
-0.000427259139163638, 0.0102041701742417, 0.00926322413187242,
-0.000838574472620923, -0.00420345301562719, 0, 0.00126289219058062,
0.00796149187514628, 0.00499584719337154, 0, -0.00416147084125207,
-0.00459770924862957, -0.00461894585629441, 0.000841396768191505,
0.00293932607171454, -0.00251889301948394, 0.00167996679578453,
-0.000839630611879638, 0.00377279843869394, 0.0079183576893147,
0.0152423136322466, 0.0170251376119523, -0.00483482005458358,
-0.00526423568394874, -0.00774407782905184, 0.00204373666068181,
-0.0106733361356421, -0.0116232109715102, -0.000835421936641989,
0.00250417493132105, -0.0206366557014901, -0.0106952891167476,
-0.000430200049655216, -0.00388015145146747, -0.0139193199905385,
-0.0043898226772523, 0.00745127269574208, -0.00174825219352925,
-0.0048235129697054, -0.00220022090960237, 0.00307895558829108,
-0.00351958128389773, -0.0119708811477364, -0.0170944333593002,
0.0157554463259495, -0.00807179270432723, -0.00406046125220216,
0.0112385988479042, -0.0135015552333932, 0.000453001140249487,
0.0152811962351196, -0.00491840919957287, 0.00491840919957287,
0, 0.000445930888103074, 0.0040044547393725, 0.00752049552026079,
0.0166087733939118, -0.000433557344749147, -0.001301801008319,
-0.000868809785319336, 0.00606587648496815, 0.00172637073519777,
-0.00475470962754176, -0.00303753759262415, 0.00217061079363834,
0, 0.0017331026868348, 0.000865426276428938, -0.00868815195763784,
-0.000436395381134602, -0.0039361520314749, 0.0108957277517328,
-0.00390879976505243, 0.00260756340708079, -0.00522194398115161,
0, 0, 0, 0, 0.0078227256812089, 0, 0, -0.00956945101615059, -0.0118708802217187,
0.00968317425182574, 0, 0.00959030890095347, 0.00130067219724106,
-0.00303753759262415, 0.000434499246461773, -0.00391730642392352,
0, 0.00174291983119401, 0, 0, -0.00742848074061175, 0.0139375078437816,
0.00302310746326384, 0.00644747559096936, -0.00128617381074436,
-0.00688767251542277, 0.00344976625007476, 0.000860585251047929,
0.0195917680439486, 0.00126448910420773, 0.00880693467540228,
-0.00586266335815289, 0.00167855683557905, 0.00334868458593185,
0, 0.0255789523202474, -0.0189150376137497, 0.0136056520557788,
0.0114007749513729, -0.00690638536353338, -0.00204040062627264,
0.00326264563481615, -0.00122224500854351, 0.0016293282626898,
0.00446701250409909, 0.00525359866652231, -0.0146165817828283,
-0.00368777307918489, -0.0128074462899259, 0, -0.00416667269484616,
-0.00628274317949495, 0.0087848295557329, -0.00333750831287993,
-0.0198360586949526, -0.00598548174303781, 0.00171379647773451,
0.0106451931373952, 0.00464822303210344, -0.00464822303210344,
-0.00978939505349885, -0.00945430333912789, 0.00859850525523154,
0.00937373714919865, -0.0499995465771077, -0.000445930888103074,
-0.010762435721793, 0.00360036392517804, -0.00405132221917892,
-0.00406780221932568, 0.0081191244385046, -0.00269905696916517,
0, 0, 0, 0, 0, 0, 0.00135043909787136, 0.0107383582211327, 0.00133422301313635,
-0.0116228398279925, 0.0107335556431085, 0.0128178550501579,
0.00131665588474705, 0.00960286733334126, -0.00828794275203215,
0.00959030890095347, -0.000868055610063756, 0.0030349036951538,
0.0107643665871584, -0.00128562263261456, -0.00300622944892615,
0, -0.00560708822918876, -0.00259852896326374, 0.0017331026868348,
-0.0017331026868348, 0.00475882360337199, 0.00688174758905369,
0.0148907239824663, 0.00379347135629748, -0.00210570724257764,
0, 0, 0.0158932663670845, -0.00624351663968525, -0.00250836251920594,
0.000418497599746992, 0.00334169066190304, 0, 0.00582122225961657,
0.00866878493644663, 0.00655471416864506, -0.00778852061757362,
-0.000823384155205442, 0.00287888333730146, 0.000821018108516203,
0, 0.00164001676775349, -0.000409752105712258, 0, -0.00328407520119001,
0.00123279243063257, 0.0114333626038476, 0.0109025913382155,
0, -0.00120554565534858, 0.0044132468821183, 0.00598684685806905,
0.00911077801352445, 0.0035426134275065, -0.00630668231465803,
-0.00277173014510002, 0.00198059086332947, 0.00197667587587258,
0.00197277633521509, 0.0051110785531896, 0.00703402665738029,
0.00968812072881242, 0.00653471477807477, -0.00692044284457394,
-0.00541587397303234, 0.00155038790745499, 0.000387221689252648,
0, 0.00617524154564952, 0.00651967793559916, 0.00305343748689024,
-0.00649973613047772, 0.00115008638323744, 0.000383068382389773,
-0.00653220382273956, 0.00115584678635905, -0.000385133838767704,
0.00537842249275844, -0.00115008638323744, 0.00306396256552155,
-0.00498371742657255, -0.000384393623799273, 0.00230414848485028,
0, -0.000767459746034316, 0, 0, 0.0125883790897925, -0.00341750854737866,
-0.00725332441314208, -0.00807852056520142, -0.000772797565508743,
-0.00193461077944113, -0.00349175912484379, 0, 0.00155339837062085,
-0.00194212529485505, -0.0156743020934429, 0.00904271937373791,
0.00468567823333554, -0.00939341544902073, -0.00433498215805272,
0.00197277633521509, -0.000394244042165592, 0.00746418708068841,
0.0124466795482241, -0.00348499868689611, -0.00427600262799244,
-0.00664454271866877, -0.000784621460417689, -0.00157109223204133,
-0.00314960890289617, -0.0023687338355507, 0.00197433430371774,
0, -0.00118413276121654, 0.0117786991926128, -0.00234466695925439,
-0.0047058910374127, -0.000393159037892943, 0.00627207073766911,
-0.00824018788545589, -0.00394789300758491, 0.0027651607585355,
0.00903206611350438, -0.00117347950098301, 0.00117347950098301,
0.00778820136034497, 0.00580160203953151, -0.00463858582005061,
0.0050241651577756, 0.00154083235415525, 0.00460830308619453,
-0.00422346567524912, 0, 0.000769230807161403, -0.00539708326262245,
-0.00971069972934924, 0.000390243907391508, -0.00508508821159959,
-0.00866488288572365, -0.00396354066245541, 0.00909276871587128,
0.00196579579718792, 0.00470220302143609, 0, -0.014173465613923,
0.00198059086332947, 0.00749067172915741, -0.00354121951806974,
-0.0106995274734523, -0.0164695427806505, -0.00365186041877097,
0.0177281032935808, 0.0055754823843035, 0.00554456865977437,
-0.00276844155460676, 0.0039525743158233, 0.00629180211652791,
0, 0.00352181933179674, 0, 0.00350945963627547, 0.000778210156005876,
-0.000389029376623995, -0.00546236239184017, -0.00235017734495369,
-0.000784621460417689, -0.0047207011349375, -0.00118366555544158,
-0.00237154261348049, -0.00994241636133797, -0.0161163842824577,
-0.0130827704098047, 0.00369686188132601, -0.012376395601049,
0, 0.00290155643983425, -0.0116570845422128, -0.000837871854631356,
0.00501673292959826, -0.00501673292959826, 0.00585041367817585,
0.00124921940043166, 0, 0, 0, -0.00124921940043166, 0.00208116620382448,
0, 0, -0.00166458631860822, 0.0103584932827423, 0.0147302172404031,
-0.00733798645291284, -0.00204792207556581, -0.00164136269922999,
0.00736500874179935, 0.00203624586749074, 0.00810377206310742,
0.00120992150271304, -0.00363416511421955, -0.00812352152147922,
-0.00408664238545198, -0.00163934462943294, -0.00246406695515944,
0.000411099697465112, -0.0124071070470539, -0.0121416605668556,
-0.00126448910420773, 0.000843170370357971, -0.0029542118974315,
0.0029542118974315, 0, -0.00718971166428606, 0.00254345198583028,
-0.0170798344994707, -0.00734820682708781, 0.00259965484363178,
0.00388685438597491, -0.00475265530586055, -0.00738973151986499,
-0.0140599855682986, 0.00441501820911672, 0.0113886469640088,
0.00477950073429634, -0.000433557344749147, -0.00434594338954719,
-0.0113886469640088, 0.00789824202424017, -0.0017497817237877,
-0.00175284882741433, -0.00615928036053504, 0.0109722408655442,
-0.00131032995380709, -0.0017497817237877, 0, 0.00741551925946515,
0.0090850722224709, -0.000430755983400033, -0.00691745084368778,
0.0116455920396339, 0.0148970620436009, 0.00168847655142157,
-0.00422654897989272, -0.00211999231403537, -0.033228515525793,
-0.000438885238556619, -0.0021973201545844, -0.0017613390727016,
0.0017613390727016, -0.0088378834202989, 0.00265957603575862,
0.00221092285507041, 0.0066035899226109, 0.00350416477709681,
0.0104394162515997, 0, 0.00818793628536563, 0.0110969982117308,
0.00423549976685456, 0.00547946576462577, -0.00336842423755224,
-0.00592218312927439, 0.00381114160220086, -0.00211550745281919,
0, 0, 0.00717454016521435, 0.00837525833673247, 0.00249896006763839,
0.00704666128296072, 0.00206313256593704, -0.00288957889396846,
-0.00455770418131562, -0.00207857069224282, 0, -0.0104581584857417,
-0.00632780006381939, -0.00084674010139496, 0.00591217938322863,
0.00587743085706816, -0.00125654466794822, 0.00501673292959826,
0.000833680748577592, -0.0142679717977052, 0.00168918959084507,
-0.00380469704366426, -0.0106451931373952, -0.0030010740637767,
-0.0120951798489748, -0.0179789977690343, -0.00354610300675073,
0.00531445006349252, -0.0137871874829156, 0, -0.00269058458279003,
0.0098302847019478, 0.00266429997746309, -0.00221975673831309,
0.0132452267500209, -0.00263504763800526, 0, -0.00308302380295222,
-0.00752715530906345, 0.00443459706786609, 0.00705470297988997,
0.00438404910853452, 0.000437349668025, 0, 0, -0.00614037017023783,
0.00482774692335752, 0.00349650705872939, -0.00393443130483462,
0, 0.00262467342277128, -0.000874125929785752, 0.0074058277930007,
0.00692044284457394, 0.0107182162200239, 0.0164943473048607,
-0.00420345301562719, -0.0131441366618175, -0.00427716648636567,
0, -0.00386515404962839, -0.0129928202824856, -0.00787750244309438,
0, 0.00307085128357398, -0.00307085128357398, 0.000878348760906089,
0.000877577941358787, 0.00306547207429464, 0.0147572122516557,
0.000430755983400033, -0.000430755983400033, -0.00605014810068338,
-0.00826990567406494, -0.00218770596989293, -0.0123403945930169,
-0.00355397975144944, -0.00669794868412765, -0.026329255820047,
-0.035107747860522, 0.00853895131423243, -0.00378609065609048,
-0.00618313503784185, 0.00475964725298805, -0.00380590373767831,
-0.00717534769527051, -0.00481232882519222, 0.003371060259584,
-0.00240674003056496, 0.01530398113939, 0.00662254076049384,
-0.00519972809457059, 0.00472814119594611, -0.000943841504706722,
-0.00948773720510898, 0.00238038674645713, -0.0129219314169204,
0.00863314714470276, -0.00863314714470276, 0.00719945514285447,
-0.00527452673326989, 0.00192123017789392, 0, -0.00432797020845888,
0.00863728978120104, -0.00287081536879885, 0.00668579376779954,
0.0099456183289277, 0.00844678884196881, 0.00279981517474681,
-0.000932400999951177, -0.0274248866299751, 0.00382775586976436,
0, -0.00911495876568313, 0.00288739372883562, -0.0115999367406774,
0.00871254301184177, -0.00192957126921467, -0.00921665510492398,
0.0135529664047036, 0.0119475734211179, -0.00190204527148108,
-0.000476077133486186, 0.0165293019512105, 0, -0.00657897109804217,
-0.00425230973853052, -0.000947418356040064, -0.00856740128529854,
-0.00575265248944978, 0, -0.00096200103619104, -0.00579152197958877,
0.00531017419652846, -0.0130847517436585, -0.0392987649797338,
-0.0220578319822291, 0.0133953574322372, -0.0185955771593447,
-0.023741552999295, 0.0294603104616407, -0.000518537735238134,
0.00465718823650496, 0.00924980901247041, -0.0149448800613956,
-0.0135914272035391, -0.00739570196112904, -0.00638300039503514,
0.00691308257018264, 0.00105932213295956, -0.00424403820047914,
-0.0193242728264029, 0, -0.0180480698717029, 0.00165426009602632,
-0.00497101272202016, -0.0223973528130257, 0.0174015056196537,
0.0176507616073067, 0.00870990808376204, 0.0097088141269608,
0.00160901080570097, 0.0148938923387343, -0.00158520508756599,
0.0141771867849259, 0.0185955771593447, 0.0147322959170677, -0.0147322959170677,
-0.0159920870139483, 0.00155884676929086, -0.012539349252735,
0.00524385050601106, -0.00629592845681515, -0.0331758622388993,
-0.00163354242696157, 0.00922165471745373, 0.0155374140949061,
0.00265463389237208, 0, 0, 0.00158940430810706, -0.00424403820047914,
0.00265463389237208, 0, 0, 0.00792187929502663, -0.0105765131873987,
0.00265463389237208, -0.00265463389237208, 0, -0.00479617226349305,
-0.0172418064345061, 0.00271370587159625, -0.00870990808376204,
0.00109289628364495, 0.00490330592852084, 0.0172418064345061,
0.0106270925742864, 0.00684753794897741, 0.0223211966416792,
-0.00359990103347041, 0.00974116905555311, -0.00460241160145181,
0.00153649197961681, -0.00102406562969026, -0.00205128277055744,
0.00205128277055744, 0.00714653373407392, 0.014643028616852,
-0.00603320080416792, -0.00454890852636902, -0.00660739379708053,
0.00812599697427041, 0.0025258917193165, 0.0120362536319476,
0.00348345711791076, -0.00448096343503623, 0.00547401217602772,
-0.00697213979704969, 0.00399003023120104, 0.000497636238187216,
-0.00648866763730638, 0.000500625792683618, 0.00150037537523451,
-0.00450789643918981, 0.00500752173062224, 0.0133963799506489,
-0.00444554943056419, 0.00444554943056419, 0.00393507640099111,
0.018483016437993, 0.00911495876568313, 0.00997870004989654,
-0.00568991577767886, -0.000951474857699086, -0.0187370362680954,
-0.000970402793278424, 0.00532559961195744, -0.0126337929186819,
-0.0353358484834754, 0.00757005886054518, 0.0124907944245356,
0, 0.00495295712884847, 0.00345253117302846, 0.00196753629631097,
0.00831910841396466, 0.000487210728273624, 0.00340384476469291,
0.00145525128762491, 0.0149148783592303, -0.00670821873511818,
-0.0111192830424067, 0.00774822278282272, 0, 0, 0.0134165884092208,
-0.00812429797174286, -0.0174254167138592, 0.00487093023459639,
-0.00291971010333469, 0.00486145828624496, 0.00145384081822808,
0, 0.00145173023318668, -0.00242072255706827, -0.0166181732454369,
0.00246123678269061, -0.00147601502812078, -0.00890213594059297,
0, 0.00544960476756451, -0.00644364063470393, 0.020182816518008,
0, -0.0107791342548271, 0.0004924895445515, 0.00784702408090476,
-0.00637100905278398, 0.00343558620133955, -0.000490075971582904,
-0.00985229644301144, 0.00543077118513402, 0, -0.000985221754569832,
0.00246123678269061, 0.00685940955089315, 0.0019512201312617,
-0.00684599250799023, 0.00440637187861936, 0, -0.00293542285144444,
-0.00540409077401183, -0.00296004162847696, -0.00296882949380173,
-0.0149778136970351, 0.00501757199197961, -0.0030075210639553,
0, 0.00100351237724006, -0.00100351237724006, 0.00150489117942021,
-0.00351494210744452, 0, -0.00960330857739367, -0.0163854152321297,
0.00463559933895441, -0.00154281336851048, 0.00871130284712329,
0.00711746776886368, 0.00505306785139137, 0.0100301743599371,
-0.0034991287889401, 0.0134296969915408, 0.00148111604464374,
-0.010414177930119, -0.00249563393814745, 0, -0.0115608224010759,
0.0110609473594248, 0.0188217542405877, -0.00541206733067012,
-0.00148111604464374, 0.0004939491332836, -0.00346277862708533,
0.00986201286911559, 0.00489477237672853, 0.00389864041565735,
-0.00536455809815362, 0.00779351241355686, -0.00194269123750201,
-0.00243368336229688, -0.00488520756604904, -0.00786631391598247,
0.00394089179987667, 0.000491521268190187, 0.00734397425575839,
0.00243605479788123, 0.000972762722621923, -0.00780872684311218,
-0.0103423724145943, 0.00739830748144499, -0.00147528917832318,
-0.00493340911795581, -0.00148478126757912, -0.00397022854018525,
-0.0221225713835929, 0.0181365172767589, -0.000999001082084838,
0.00845986632982099, 0.00740561271479212, -0.00295566717634665,
0.0107950017868998, 0.0106797131566845, 0.00769604568161686,
-0.0154517801468659, 0.00775573446524902, 0.00241138289019505,
0.00240558210785524, 0.0128910832676183, -0.00237473395855314,
0.014164542768651, 0.00607620471929327, 0.0143421380439035, -0.000459453258713882,
-0.00414460643947434, 0.0201011793210872, 0.00721048639992139,
0.0173607743305837, 0.0105356549603339, 0.0108578699720487, -0.00737050811163309,
0.0133998114756397, -0.00862446052984556, -0.00347071931912923,
-0.0448864506889586, 0.0211386328389218, 0.0215724852541279,
-0.00261666089117174, -0.00306145001848979, 0, 0.00393443130483462,
-0.00832425398208692, 0.00351339843184473, -0.0030735479739068,
0.0096280831289759, 0.00651185283492373, -0.00564115186567982,
-0.00261438057407082, -0.0118500399174959, -0.00353826109959865,
-0.00177383638529127, -0.00444840591203599, 0.00622224229732726,
-0.000443164199586121, -0.0251359732715422, 0.00408256407370988,
0.00901312996575143, -0.0049471654755564, -0.00678582110796899,
-0.000908265275881615, 0.00994583243773484, 0, 0.00895662055601187,
-0.00402055489106257, -0.00493606566494931, 0.00404040953700502,
0.007587628458114, -0.000889679773986352, 0.00133422301313635,
0.000888494061115352, 0.00531445006349252, 0, 0.0135996831715719,
0.00434783293610375, 0.0064865092296067, -0.0103987072208986,
-0.00174367960482691, 0, -0.00174672533494169, 0.00174672533494169,
-0.00568804858894589, 0.0074317281937728, 0.0112604048183798,
0.0136871255623849, 0.0105642224827358, -0.00886642920564151,
0.00211819608959152, 0.00674823311605, -0.00759497321744496,
-0.00765309857771701, 0.00553310626368164, -0.0110969982117308,
-0.00042927667540571, 0.00042927667540571, -0.00042927667540571,
0.00641988081256262, 0.00722638638860928, -0.00127145598819656,
0, -0.000848536325838811, 0.00550265938730821, 0.0063118240264548,
0.00793820755122843, 0, 0.00746272120158986, -0.00746272120158986,
0.00746272120158986, -0.00455393045335795, -0.0087518791588912,
-0.00503567996561571, -0.00042078687765823, -0.0122804846315123,
-0.0176234950220189, 0.0013001085254909, 0.000432994161343903,
-0.0161469796632958, -0.00352578593540898, 0.00176444686270738,
-0.00397439251351006, -0.00933547566757698, -0.00313129309755311,
0.0102519284355771, 0.0070702901687798, 0.0170272614102807, 0.0252204006429464,
-0.00211282563279669, 0.00211282563279669, 0.012167131428674,
0.0165429744537575, 0.00572598701488491, 0, 0.00366375333495395,
0.0117149390676974, 0.00560674276123585, -0.00721446014933269,
0.00281181145290166, -0.00401929801539147, -0.00403551798675039,
0.0216008398667782, 0.00709782159642858, -0.0035426134275065,
0.00432815942234033, 0.0136480964271311, -0.00349175912484379,
0, -0.0144844967246276, 0.013706891708773, 0.0127537960625701,
0.00230149698827953, 0.0272125635248845, 0.00594797292607741,
-0.0115565125856869, 0.0350008097872232, -0.00290065468063716,
0.00651468102119335, -0.00361402634055619, 0.0200723584893883,
0.0200252607728251, -0.00523104941755248, -0.00210010577711639,
0.0221769310459492, 0.00512733989471981, 0.0279056601503958,
-0.00999675102871089, -0.000670016775484061, -0.0141750601622688,
0.00305447378155677, 0.027737784965538, 0, 0, -0.001319261405063,
0.0254168129841235, -0.00161004703915779, -0.00873933382284564,
0.0048646115065818, -0.0133530725306592, -0.0141986866129282,
0.0151818097463488, 0.0194621777067301, 0.0118155613780706, 0.00727505976351717,
-0.00727505976351717, 0.00411849148597021, -0.00729814440334975,
0.00729814440334975, 0.0103790835805166, 0.0250283418126491,
0.0175443096509098, 0.00925241640102703, -0.0329185694745178,
0.0349960297854164, 0.00679571236958809, -0.0226334855207035,
0, -0.0102937633133213, -0.0144063794258837, -0.00464181554481069,
0, -0.000620539889599314, -0.018797545967502, 0.00693352919485601,
0.0124845566622453, 0.00741659765504954, -0.00990107098271142,
0, -0.00342626094408649, 0.00529019438214906, -0.0096679374586759,
-0.0142025426600205, -0.0124741742251757, -0.00840341079637952,
-0.0183431281317259, 0, 0.0289969819290019, 0.00767758089903392,
0.0220620356934274, 0.000623247137656602, 0, 0.0108444130141288,
0.0125556694590228, 0.00576545368857317, -0.0130959691048274,
0.00793897299511181, 0, -0.000608457578857635, -0.00488103470591383,
0.00943546796189354, -0.00212282111560747, 0.00484555763352335,
0.0102195058334833, -0.00479617226349305, -0.00481928643594864,
0.0012070007500351, 0.00691212368846905, 0.0101311951179954,
0.00679571236958809, 0, 0.0227083993698121, 0.00459506839542279,
0.00314690575819254, -0.0193228794972446, -0.0070134711798584,
0.00438918041876324, 0.00611266639154939, -0.0116755558652413,
-0.000587371529368941, 0.0116823758543565, 0.0092486208376612,
-0.00230414848485028, -0.00202049426508788, 0.00662349433704934,
0.000860708703257274, -0.00229687152370905, -0.00692243895017164,
-0.0142838749791365, 0.00380952841666726, 0.0133645219847027,
0.00632913505164723, -0.0138610362433846, -0.0238351727947803,
-0.00837325466299665, -0.0099593399027582, 0.00121248878146174,
0.00784554416528405, 0.00509669818611513, 0.00119545740477367,
-0.00719427563402686, 0, -0.0118022250566279, -0.0079462520811493,
-0.00522997654194013, -0.0024706621585695, -0.00371747640013265,
0.00371747640013265, -0.0146397257197157, -0.00282796732067503,
0.000629128677561042, 0.0121895555245857, -0.00529513774214818,
0.00529513774214818, 0.000931532438111393, -0.00310848867026703,
0.00186625248567607, -0.0040479581975581, 0.0133273319267979,
-0.00617667189172888, -0.0115284031115959, 0, -0.003767665367441,
0.00533334597536284, 0.0102724638721097, 0.0101680124226839,
-0.0049170351064225, 0.00338305720169707, 0.0125096978433104,
-0.000303260047812781, 0.00242350921405521, -0.000908127805713033,
0.0057375968966733, 0.00690175282784544, 0.00922762910203812,
0.0114893000477556, 0.00467563680559957, 0, -0.000583260442315048,
0.000583260442315048, -0.0035046764844493, 0.00467017778188472,
-0.00262429076109516, 0.00640281582053959, 0.0086655654909964,
0.0173917427118688, 0.0070412908861055, 0.00420109846282868,
-0.00027952480964677, 0.0381233606747111, -0.00350451902011617,
-0.00731809743803513, -0.00217865009922225, -0.000818219055603642,
0.00136332672786432, -0.00546449447207875, -0.00466841435533549,
-0.00745962845793935, -0.0105939100633625, 0.0114255109428885,
0.00497513464011412, 0.0161359989468659, 0.000542446446715861,
0, 0.00540834200926144, 0, 0.0107297166725671, -0.0319897160994165,
-0.0225667746123999, 0.0120432767755938, 0.00914513786668492,
0.0079681696491769, 0.000547195636088738, 0.00136668054619538,
-0.0120913811681382, 0.0101775049858541, -0.00521334697033238,
-0.00385888020197855, -0.00138178825357427, 0, -0.00944713889140969,
-0.00475724977226077, -0.00337173682736935, 0.00561326200238277,
0.00279485928154388, -0.00139645323628912, -0.00673025535406779,
0.00393148498530316, 0.0147450160657412, 0.00220689744742897,
-0.00193076876971165, 0.00578115856520522, -0.000823836377777276,
0, 0.00192123017789392, 0.0054689772626455, -0.00273074992194466,
0, 0.00952517747336179, -0.00108401094625865, -0.000813780053047441,
0.0126737573437974, 0.0021413284413434, -0.0013377928416598,
-0.0153786924026513, 0, -0.00517924014505944, -0.0104396552535659,
-0.00415110600237867, 0.0162292105574893, 0.000818219055603642,
0.00272257178791113, 0.0153786924026513, 0.00187240926711718,
-0.0145359221575507, 0.0043290110895855, -0.00650056460309312,
0.000814885282088618, 0, 0.0102648119015574, 0.045187619263833,
0.00358974744461493, 0, -0.0159962131153262, -0.0149354276337754,
0.0112876903946799, -0.0326332180149032, -0.00080938895555871,
0.00296456220354191, -0.00620868508116557, 0.0113086827143545,
0.0190936588155792, 0.00497839361770103, 0.00261028600313074,
-0.0152353879418738, -0.0101091576528773, 0.00267022855587884,
0.0100823353415125, 0.0144151362588749, -0.00574114311794416,
-0.0113173993854447, 0.00685837560382563, 0.00445902378161911,
0.00391798855642911, -0.00391798855642911, -0.0137024541797643,
-0.00425419413211436, 0.00292670157024899, -0.00586199387123498,
0.010897117798994, 0, 0.00920937965266377, 0.0191964390202028,
-0.0043775015184484, 0.00822415326762327, 0.00891839265759486,
0.00934232271426305, -0.00226443674569232, -0.00454431481347761,
-0.0106844058883473, -0.00795998553944433, -0.0098446390940623,
-0.0154797791275021, 0.00158520508756643, 0.0036890687424056,
-0.0113742794841967, -0.00133102641772398, -0.00240032119513423,
-0.00616045874912352, 0.00241513601896148, 0.0109290705321903,
0.00396878414844659, 0.0115517167988544, -0.00497187992655634,
0.00366588522610556, -0.00787199027037966, 0, 0, 0, 0.00157936331855613,
0.00759862165227432, -0.00261369725374916, -0.00235818267280719,
-0.00499672308519372, 0, 0.0164731140415375, -0.00155722844860628,
0.00129785871559962, -0.00885883855052993, 0.0015690379788027,
-0.00892394373680361, -0.00900429812472092, -0.00239712459972141,
-0.010454457903859, 0, 0.0117869031376374, -0.000266347051780969,
-0.00802357874949644, 0.00348479108852651, 0.00932221426786395,
-0.00292048528301159, 0.00212483479679904, -0.0136238249857463,
-0.00215401267990822, 0.000269505459116637, -0.0173964702519172,
0, -0.00522337957365293, 0.0123272345846397, 0.00136035934459944,
0.00650408796917645, 0.00296696104363736, 0.00510410779015391,
0, 0.0021413284413434, 0, -0.0140013057320933, -0.011179393867804,
0.00328497421052676, 0.00599456835981416, 0.00189985129746306,
0, -0.00789441965727722, -0.00273672858275953, 0.0103599617976968,
0.0105221529273893, 0.0106781580555637, -0.00265886888894373,
-0.00560674276123585, -0.00995568561984772, -0.00542300678280361,
-0.000815993517329527, 0.00866979150379521, -0.0100312621021161,
0.00489264362786823, -0.0205458535288439, -0.0125332374477405,
-0.00140232809406404, 0.00810626381625434, -0.00726463099900476,
0.00196105960032433, -0.00448808607495454, -0.0113091394744727,
-0.0195240195371418, -0.00290360250457455, -0.0196746913105201,
0.0170542210382392, 0.00407332538763594, 0.0138410514118714,
0.00314510623799391, 0.0169831424083493, 0.0183542416072342,
0.0134119979511964, 0.00271517951221778, -0.0120023262966353,
0.00928714678441755, 0.00542300678280361, 0.00781148753218819,
-0.00430223841866084, -0.00107845790986527, 0.00161725102634813,
-0.00675586950059603, 0.010519314802774, 0.0029470885362497,
0.0103793346969487, 0.00869914540828987, -0.00843442005929163,
0.00922151179610875, -0.00288903680918917, 0.00131423333412251,
0.011231670899333, 0, 0.00285306898240645, 0.0028449521322309,
-0.00336308685884878, -0.00858821153742051, 0, 0.00182791538035687,
0.00182458020121734, 0.0049357159558463, 0.0100555185298874,
0.00358515108720203, 0.00637025978902939, -0.00381728418744576,
0.00964229749223433, 0, 0.034255024550319, -0.00342214951428899,
-0.00663311607810879, 0.0100552655923978, 0.0257752436082033,
-0.00907794098459469, 0.000719683370393298, -0.00336296257372881,
-0.00361576867355096, -0.00144997608777553, 0.00434363617296185,
0.0052833936542025, 0.00787311713258987, 0.0106371408360877,
0.00211391740492894, 0.0037470769837884, 0.0023348131082872,
0.00279459893101563, 0.00209083593418447, -0.00675284903999573,
0.000233617569103561, 0.0337613876835814, -0.0175423111078472,
-0.00969984510484201, -0.00768786547090228, 0.00373483160565335,
-0.0114822585482646, -0.0173545304094778, 0.0123929136154191,
0.00566841385493522, 0.00446481766257811, 0.00561142459110098,
0.00812729424221859, 0.00254071045554349, -0.00277200454701054,
0.0032332591677342, 0, -0.0043905327606133, 0.00115727359287909,
0.00369430113071889, 0.000921446736471321, -0.00345981200850476,
0, 0.0043804104624745, 0, 0.0107540219557638, -0.0031912495740638,
0.00432851797822797, -0.00752998423883078, 0, -0.0087417165835717,
0.00322952991191983, 0.00436631740452942, 0.00320513094894848,
-0.00320513094894848, -0.00828924847678669, -0.00115673812782369,
-0.00162168459031298, 0.00115861442690957, 0.00415993204472942,
0.000230600715883877, -0.000922722095456852, -0.00416281822238096,
0.00830837920315197, 0.00846783867444056, 0, 0.0144798910003145,
0, 0, 0, 0.0149372970158441, 0.00176873802467936, 0, 0, 0.0138205556186319,
-0.00524247596485017, 0.00415255760082456, 0.00630505404313375,
0.00927630424867809, 0.0121653405136994, 0.0111803717087495,
-0.0101202634784383, -0.0158055404784485, 0.00472509470187665,
0.00917144106064782, 0.00254453063499449, 0.0115802957445039,
-0.00272508291477136, 0.00669458567221426, -0.0151263388301519,
0.00169204778102428, -0.00593473551981427, 0.00445435034938013,
0.0019029501460861, 0.00904222331603277, 0.0256265315195856,
-0.0020424843701905, -0.00389225114962422, -0.00184899898594715,
0.0213618132095288, 0.0377254176313353, 0.00733877376379199,
-0.00927901496230055, -0.00936592212321496, 0.00761648052628461,
0.0073658018622722, 0.00731194329500573, 0, -0.00191901805197814,
0.00917788469097447, -0.0128341958940852, 0.00192604065704183,
-0.0036626546969587, -0.0073658018622722, 0.00543268701322663,
0, 0.0220083517018783, 0.0178224396644939, 0.0187885431132515,
0.00545952220489898, -0.00728600673093371, 0.00364963908754934,
0.00906624523775346, 0.0354647095722829, 0.00174064447778388,
-0.0246491352746538, 0.00355872261699375, -0.0017777782459989,
-0.0161438483713567, -0.0036231923694201, 0.0144146640021843,
-0.0308834717154527, -0.0129992545436397, 0, -0.0150661983546443,
-0.00189933580365231, -0.00763362485507102, 0.00763362485507102,
0.0206968817711548, 0, 0.00186046565291953, 0, 0, -0.00186046565291953,
0.0092679069307815, -0.00555043053064885, -0.00371747640013265,
0.00742118433761707, -0.00370370793748442, 0.0110702237542468,
0.0109490144896704, -0.0109490144896704, 0.00731264684628607,
-0.00364963908754934, -0.00549955556603843, -0.00368324541629628,
0, -0.0092679069307815, -0.0037313476128582, 0, -0.0169655341582966,
0.0169655341582966, 0.0148425730379289, -0.00554018037561521,
-0.00371057939653596, 0.00740744127786197, 0.00368324541629628,
-0.00368324541629628, 0.0091828009823347, 0, 0, -0.00182982667707599,
0.00365631120311072, 0.00727275932908, -0.0036297680505788, 0.0126469617007672,
-0.00359712618084984, 0, -0.00542006746933854, 0.00542006746933854,
0, 0.0196259644567487, 0.0087951314528274, -0.0105634785095692,
-0.00710482562374448, 0, 0.00887317268048626, -0.00176834705674178,
0.0158036651731255, 0.005212870188533, -0.00870327512830205,
-0.00701757265864611, 0, 0.012248622076199, 0.0172418064345061,
0.00170794234515625, -0.00513260322652087, -0.00515908281002719,
0.00172265331144672, -0.0243914531241591, 0.00527705710084359,
0.00175284882741433, -0.0105634785095692, 0.0192819295494502,
0.00692044284457349, 0.00858374369139181, 0.00341297259623996,
-0.0119967162876318, -0.0103987072208982, -0.028270433938256,
0.0106952891167484, -0.0342684568348148, -0.0073665158167624,
-0.00742118433761707, 0.00371747640013265, 0.0110702237542468,
0.00366300775873674, 0.00182648452603473, -0.0110092855083694,
0.0018433184942892, -0.0204658305922911, -0.00376648279547664,
-0.0152094186635288, 0, 0.00573067470898492, 0.0355509842643187,
0.0145987994211532, -0.0054496047675654, 0, -0.00547946576462532,
0.00547946576462532, 0, -0.00547946576462532, 0.00182982667707599,
-0.00366300775873674, -0.00551979322359797, -0.00555043053064885,
-0.0149535496670583, 0.00563381771825577, 0.00373832211060732,
0.0129751588631333, -0.0129751588631333, 0.00558140983819522,
0.0220192382439173, 0.00723330459351956, -0.00723330459351956,
0, -0.00545952220489898, 0.00727275932908, -0.0164387263431602,
0.00916596701408023, 0.023445618574681, -0.00178412179350129,
0.019452425926815, 0.0052401866635563, 0.0121213605323449, -0.0121213605323449,
0.0103987072208982, 0.0438518825288501, 0.00329489589685217,
-0.00494642393532541, 0.00986850114075377, -0.0131797624444099,
-0.00832644277655348, 0.0215062052209634, -0.00327869146169846,
-0.024938948347252, 0.0200006667066699, 0.00329489589685217,
0.0130720815673531, 0.00324149392417095, -0.00649352931054903,
0.00649352931054903, -0.0048661896511728, -0.0081633106391612,
-0.0265796378047121, 0.00671143458798706, 0.0149380371088661,
0.0211906869796392, 0.00161160389434123, 0.0143887374520997,
0.0188684843043827, 0.00775799081093975, -0.00309597770512848,
0.0305367238600818, 0, -0.0197424787292011, 0, 0.0152209940103551,
0.0268272422331446, 0.0102414940521971, -0.00437637459979889,
-0.00292825977908873, -0.0282546676575999, 0.0208962827264125,
-0.00444116199996802, -0.0149479614358725, -0.0105981309918235,
0.0121030221712433, -0.0166796891986554, -0.0247690681124091,
0.00312989300892763, 0.0185763855729357, 0.0257394982015757,
0, 0.0044742803949207, 0.00593473551981472, 0, -0.0029629651306573,
-0.0225065764097998, -0.0030395160178962, -0.0168845147020091,
-0.0282593373144033, 0.0063492276786592, 0.00315956029036801,
0.0279956074894274, -0.0123458358222992, -0.00155400186673393,
0.0169626272195789, 0, 0.00761618304530831, 0.00906350615334706,
0.0089820963158278, 0.00594355390084811, 0.0103169669709322,
0, 0.0360010080531108, 0.0361150359900781, 0, 0, 0, 0.0401123293978056,
-0.00921665510492353, 0, 0, -0.0240975515790609, 0.0280579527951579,
0.00525625388882656, -0.00131147559781031, 0.0078431774610257,
-0.00915637528598623, 0, -0.011897033911846, 0.019750477417416,
0.0607013118736068, -0.00862605471976696, -0.00496278934212935,
0.0197050710793327, -0.0209496263115376, -0.0062461164969525,
-0.0357180826020791, 0.0481715436733658, 0.0123002780816517,
-0.0160200591841839, 0.0160200591841839, -0.0297788753556114,
0, 0.00752826642079096, 0.00374298627883451, -0.0062461164969525,
0.00250313021811799, 0.00871194060202196, -0.00996272250367447,
-0.00376175992189154, -0.00630121807672879, -0.047905599759698,
-0.0187421818097411, -0.0204785313435405, 0.0339015516756813,
0.0106101791120157, -0.0240331994441565, 0, -0.0163491380015293,
-0.0292703823001128, -0.00283286308430419, -0.0171678036223657,
0.0185852383033387, -0.00853490244983668, 0.0295588022415441,
0.0205909814439273, 0.00406780221932568, -0.024658783663253,
0.0137743224648297, 0.0176275750833135, 0.00134318354646723,
-0.0135137191667232, -0.00956945101615059, -0.00137457066316671,
-0.0236615074981579, 0.0139862419747399, 0, -0.0660211010977942,
0.0029629651306573, 0.00147819686931072, 0.0132063548136108,
0.00870832789178433, 0, 0, 0.0114943794257352, 0, 0.0042765567672598,
0, 0.00425834568257688, 0.0293112400402773, 0.00274725447513902,
-0.0250013022054176, 0.0042105325363444, 0.00836824967051619,
0, 0, 0.00554018037561566, -0.00971555178609584, -0.0254250983658109,
0.00996449525943177, -0.0301963992310164, 0, -0.0102715503218302,
-0.0406376457318256, 0.0242804037070545, 0.0104400649966827,
0, -0.0270693219682174, -0.0231313320234197, 0.020077894536529,
-0.00767463475310848, 0, -0.0249622945599137, -0.0386521544342795,
-0.00164338574372991, 0.0275767677702348, -0.012882625831014,
-0.0146941419392208, 0, -0.00329489589685217, -0.00165152803847324,
0.0244910200082957, 0.00963089306096077, -0.00480385231264524,
-0.00967749488206504, -0.0130507561954909, 0, 0.0098040000966213,
0, -0.0246926125903721, 0.011599135843352, 0.0130934767470201,
0.00809721023261911, 0.0112270625937834, -0.00640002184546784,
-0.00160642604827377, 0.0175303294049973, 0.0141178815457845,
0.0200469319452914, 0, 0.00456969569006471, -0.0153142349730429,
-0.0250013022054167, 0.0172553300907623, -0.00780644089283022,
0, -0.0142070266443488, 0.00949374219225074, -0.0174746305232638,
-0.0145281005629094, 0.0193242728264025, -0.00961545869944214,
-0.0129661459972672, -0.0181074906273899, 0.00497101272202016,
-0.00165426009602676, 0.013158084577511, 0.00488998529419238,
0, -0.0114473858403512, -0.0283588645600688, 0.0101010959865038,
0.00667782114260529, -0.0167789171291091, -0.00508906950747168,
0.0168638060520054, 0.0280315138558862, 0, -0.00652530863492284,
-0.0215062052209634, 0.0182275137592649, -0.00990107098271142,
-0.00165975141836405, 0.0033167526259934, 0.0421456214507634,
0.00158604315563515, -0.00317460584077267, 0.04355276178417,
0.0314632694557844, 0, 0.00147383961830094, 0.0189648280589738,
-0.0145562977742077, 0.0160003413464409, 0.0171678036223657,
-0.012848142477849, 0.00858374369139181, 0.0113315660095497,
0, -0.00282087634164174, 0, -0.0127933514599095, 0.00712761392423289,
0.00848661387731831, -0.00140944350323391, -0.0199436809673355,
0, 0.0114450462458731, -0.0100072314762452, -0.00865806274311431,
0.00289435802636451, 0.0256790144176913, -0.0227930095285567,
0.00431345025371943, 0.00143369200184829, -0.0261263045922195,
0, -0.0193027522545286, -0.00300300525977004, 0.0193602472845411,
0.00881062968215574, -0.00146305805176095, -0.00587373201209385,
-0.0148370674304674, 0.005961269516499, 0.0234959413689309, 0.0158390640297466,
-0.00142959280959509, 0.00854706057845878, 0.0154823641483768,
0.00557104504945549, -0.0111733005981254, -0.00422238487984838,
-0.0170701157716033, 0.00429492428288114, -0.00429492428288114,
-0.00287356519573212, -0.0101230076314485, -0.00583092031079246,
0.00437637459979889, -0.00583943265155984, 0, 0.00874641144286858,
0.00289855275401152, -0.0175186962089739, -0.00590843668616614,
0.0147061473896954, 0.0159308220721943, 0.00430108189939027,
0.011379923662763, -0.011379923662763, 0.00712761392423289, 0.00141944665422589,
-0.00141944665422589, -0.001421464347378, -0.0100072314762452,
0, 0, -0.0014378147696279, 0.0014378147696279, 0, 0.012848142477849,
0.0126851595273161, 0.0344149586086173, 0.00270270434788511,
-0.00677050656721079, 0.00542006746933854, 0.00135043909787225,
0, -0.00813012608325092, 0.0215351525512988, 0.00398671624382096,
-0.00132714020806279, -0.00132890385005346, 0.00398142529918477,
-0.00398142529918477, 0.00265604405811626, -0.00798939003347865,
-0.00805373480709726, 0.00805373480709726, 0.0026702285558784,
-0.00937716181259685, 0.00268817366180052, -0.0135137191667232,
0.00542742173536581, 0, 0, 0, 0.00942767925555899, 0, 0, 0, -0.0257991226702421,
0.0150173470669532, 0.00675222011728671, -0.0081081525284219,
-0.0191786700305725, -0.0280523303480997, 0.001421464347378,
-0.0143063956512384, 0.011461443519007, 0.00426439878645724,
0.00847462699097168, -0.0198870190427165, 0.00999294539751894,
-0.00712761392423289, 0.0156142278015512, 0.00561799230422366,
-0.0183750753049949, -0.00859604146979809, -0.0638672836381389,
0.0242436116099931, 0.0133830993756288, -0.0208962827264125,
-0.0198026272961798, -0.0155041865359653, -0.0141623138125038,
0.0403781559433973, -0.00152322954052142, -0.00765114673551981,
-0.00307692550447936, 0.00461184522256275, 0.00459067370859945,
0, -0.0310102367425609, 0.0031446566794715, 0.0248074737042678,
0.00153022218076782, -0.00921665510492442, -0.00464037955650198,
0.00618239990831793, 0.00153964618559232, -0.00617285910708087,
-0.00465477954498184, -0.00780644089283022, 0.02017135886378,
0.00612559342668284, 0.00152555330883697, 0.00152322954052142,
0.0030395160178962, -0.0122138922939374, 0.00306748706786131,
-0.00306748706786131, -0.0046189458562953, 0, -0.00774597211465444,
-0.028393074501218, 0, 0.0221528046411326, 0.00624026986008541,
0.0108276386520627, 0.0347729667550452, 0, 0.010347468525425,
-0.010347468525425, 0.0364737731176445, 0.0268567378372486, -0.00559442018532508,
0.02081962778239, -0.012439690475639, 0.00277777956390235, -0.00556329668532829,
-0.0326038800928163, -0.0248006875945297, 0.00442152525787787,
0.0318406058556588, 0.00142348778485335, 0, 0, 0.0099080782246963,
0.0167601688574655, -0.00974259619881934, -0.0140847398817385,
-0.034635496662756, 0.0189096543572917, 0.0100359265277854, 0,
0, -0.00428878427221768, 0.0113961347308695, 0.0126673052007655,
-0.00139958035442245, -0.00985229644301189, 0.0126495010640726,
-0.00560225554866989, 0.00420463128203163, -0.00420463128203163,
-0.0141445073861641, -0.00285306898240645, -0.00429492428288114,
-0.0129965728271806, -0.00875918008988119, 0, 0, 0.00730463437888762,
0.0130153681120699, 0, 0, 0.0213227694688207, -0.0198870190427165,
0.00857148105014094, -0.00142348778485335, -0.00858374369139181,
0, -0.0014378147696279, 0.0014378147696279, 0.00573067470898536,
-0.010050335853502, 0.0171678036223657, 0.00706716722309242,
0, 0, 0.0236615074981579, 0.017723708664696, 0.0147554655659201,
-0.024259949877484, 0.00679812271382119, 0.00405680069561498,
-0.00270270434788511, -0.0232727820553755, -0.0569954810493645,
0.0160003413464409, 0.00431966114451665, -0.0276783874221449,
0.00294985464642128, 0.00733679006385479, -0.00292825977908873,
0.00875918008988119, 0.00868312257346116, 0.00860837453660057,
0.00285306898240645, -0.011461443519007, -0.0101376682844547,
0.00290697879130875, -0.00582243275143313, -0.0132257622192613,
-0.00593473551981472, 0.00445435034938058, -0.00445435034938058,
0.00888894741724577, 0.0044150182091176, 0.0160236494681572,
0.00576370471674981, -0.0101083893207612, -0.00436364328777294,
0.00436364328777294, 0.0115441397468654, 0.0198870190427165,
0.00140548160873522, -0.0227282510775559, 0.00143575042610422,
-0.0159077830346384, 0.00726219604178446, 0, -0.00289855275401152,
-0.0264721340419767, 0.00297619267304583, 0.00444774639823642,
0, -0.0104090159147354, -0.00149588659158262, -0.00600602406021178,
0.00150489117941977, 0, 0.00450113288079201, -0.0196529379013937,
-0.00459067370859945, -0.00461184522256275, -0.0155282623265558,
0.00156372197618282, 0, 0.00778214044205505, 0.00154918698682938,
0.0214403312378693, 0.0150378773645405, -0.0180727810596943,
0.0030349036951538, -0.012195273093818, 0.00611622701743642,
0.00152322954052142, 0.0121030221712433, -0.00754720563538314,
-0.00607904607638154, -0.00152555330883697, 0.0121397545506099,
-0.00453515516539138, 0.0135442251077578, 0.0118872127840461,
-0.00147819686931072, -0.0254695415404571, -0.010678972575854,
0.00611622701743642, -0.0122700925918151, -0.00464037955650198,
0.00925932541279728, 0.00459770924862912, -0.0169626272195789,
-0.0093750686654568, -0.0581747210369556, -0.0407526680271078,
0.0323984005572502, -0.0323984005572502, -0.0316927933530007,
-0.0723206615796257, -0.00578593706704389, -0.0680262208558569,
-0.0272839589484684, 0.0508144563586628, -0.0318465536518517,
-0.00628274317949495, -0.0292038232372405, -0.0392639760402202,
0.00336889706610455, -0.0841617923217104, -0.0865680163084592,
0.02365419089789, 0.036970430918545, -0.00879955395184906, 0.0347429484438733,
-0.0707365510917786, 0, 0.0142952401868266, -0.0169164736667007,
0.00471328445209807, 0.0511757326185025, -0.0109808939670319,
-0.0279916494368679, -0.00258398076592492, -0.0331434860444086,
0.00427008922112115, 0.0288733968232875, -0.0169606153070454,
-0.0280205123321746, 0.0107672641846155, 0.00773440313835216,
0.000265639528723938, 0.00582628766780058, -0.0232413292694789,
0.00135043909787136, 0, 0.0318752085304492, -0.00471698987813918,
-0.0209695805770154, 0.0178125140242487, 0, -0.00926545344579033,
0, 0, -0.0379449315641232, -0.0195264223503555, 0.0139862419747399,
0, 0, -0.0425596144187956, 0.0425596144187956, 0.00554018037561566,
0.00248310239325322, 0.00302655341771629, -0.0478290876898817,
0.00574714225556816, -0.0246567966128604, 0.0275180288938923,
-0.0172914971100608, 0.0523828169213312, -0.0265564173614328,
-0.0331203779224523, -0.019217147185778, 0.0121645173065263,
-0.0112693956594101, 0.00624351663968525, -0.022176515651342,
-0.0307716586667537, 0.0118050454497625, 0.0340044905815318,
0.0221411258772135, 0.0245145657635857, 0.0155480163884851, -0.00986618292105401,
0.0121077080960674, -0.00927885189558975, 0.0112360732669257,
0.0324293325523244, 0.0139637070173864, 0.0198026272961798, 0.00391389932113606,
-0.00182458020121734, -0.008646719666078, -0.00660068403135217,
0.0248541243407026, 0.00900327624995878, -0.00900327624995878,
0.015639339398033, -0.0117708626201125, 0, -0.0012878301844288,
-0.0137539471916166, -0.000522739165066177, -0.0152795385685951,
0.00476695817950779, 0.0105125803890873, -0.00261780254207888,
0.00261780254207888, -0.0131580845775114, -0.00265252144913086,
0.0228446326840221, 0.0149448800613952, 0.00535647244244908,
0.0161497327785147, -0.00552210238577633, -0.0015113353002687,
0.0112797285674962, -0.0257524961024149, -0.0817705422313182,
-0.017640104620273, 0.0140254753545044, 0.0110804457765719, -0.00885451274841786,
0, 0, -0.00501533511573271, 0.00557104504945549, 0.00829880281469508,
-0.00220628881553786, 0.0172441661800784, 0.0156169032809919,
-0.00885555230294788, -0.00947874395454384, -0.0331980694095959,
0.00280899061105533, -0.00140350900232011, -0.0169975763685715,
-0.00286123228103197, -0.000573230167602468, -0.00661587340486669,
-0.0116111609276781, 0.000583771178283232, 0.0023316827818407,
-0.0131870042819537, 0.00382747412640994, -0.0195851957495319,
0.00984054459503358, -0.00446097394062539, -0.00149142458666995,
-0.0171613424427477, -0.00853924066583289, -0.0138784031720776,
0, 0.019985292380257, 0.046104819448733, -0.00232828975959132,
-0.000582920447867341, -0.0325954782494509, 0.00330777628314527,
0, -0.00240456983908555, 0.00958378105118785, 0.000298018181314585,
0, 0.0729579837398298, -0.0252467827347838, 0.0447234647868786,
0, 0.0318166123354557, 0.00289055515938319, -0.00763865165545763,
0.00343144061132783, 0.018277271191792, 0.00696148228437332,
0.000770514998952976, -0.00618080772720075, 0.00976871521530676,
-0.0123553695287182, 0, -0.0132970281434899, 0.0200084627314765,
0.019868203216725, -0.00835765280189094, 0.00911398713770639,
0.0216884981447496, -0.00742577669684952, 0.0401758230635196,
0.017741447009596, 0.000703152495864501, -0.00776200533548899,
0.00588583217726146, 0.00468384931242616, -0.0103335818545456,
0.0242549203731537, -0.00925932541279684, -0.012873202936206,
-0.00590669171539027, 0.0183146701723982, 0.00162733965937534,
-0.0116823758543565, -0.00944517057151861, 0.00236966935531813,
-0.00118413276121654, -0.0251962939188872, 0.00484849434662138,
0.0155971968136681, 0.000238066897922273, -0.00620823388442604,
0.0123781632818338, 0.0143279021838483, 0.000233181766242208,
0.00951393647965526, -0.00347021747900733, 0.0217773649233632,
0.00677968698537867, -0.00587306860958847, -0.0208317021490498,
-0.00603250089263696, 0.00302079932279842, -0.00581735206591327,
-0.00937873228643227, 0.0221325283490028, -0.00138344499814602,
-0.00903934767794867, -0.00349854584251341, -0.011751016535519,
0.00683239788288992, -0.000939628915738844, -0.00707550121620049,
-0.00236966935531813, 0.00944517057151861, 0.00117439825594179,
-0.0103823490422181, 0.00331518181912527, 0.0164130296413303,
0.00695251931488183, 0.00115406821810682, 0.00460300007196057,
0.000688626217962174, 0.0204413204258529, 0.0105111109753029,
0.0189516469810211, 0.00196270916784869, -0.00327332534496882,
0.0184087494100695, -0.00473220930421459, 0.0164656772520808,
-0.00574286381070888, -0.000640136584326267, 0.00425985734695722,
-0.00532766310778721, -0.00643089033029032, 0.00856536285892284,
0.00785816935291139, -0.00572824309508668, 0.00530505222969335,
0.00632913505164723, 0.0115003881203166, 0.0062176366108706,
0.0244910200082957, -0.0203673028244338, -0.013464729577001,
0.0114049966139902, 0.00923557103308204, -0.00553110085719322,
0.00553110085719322, 0.00468766781152707, -0.0118635058815983,
0.000411437981528984, -0.0186843716582619, 0.0151817411070598,
0.0143445082564004, 0.00608521147554564, 0.0050428750338618,
-0.0141846349919565, 0.0111619626653705, -0.0111619626653705,
-0.0148029018812958, -0.00124352347630552, -0.0177847558173649,
0.0184067108493347, -0.0135630665476203, -0.00907466394120426,
-0.0061669520274128, 0.0089191502291297, -0.00636269378782872,
0, 0.0031864073694079, 0.0126450578472728, 0.00417973440270814,
-0.00229669168707103, -0.0025115124126609, 0.00376490718715417,
-0.00523287431665853, -0.00737233460232378, 0.00421941554270822,
0.0104712998672953, 0.00312012733624378, -0.00103896113241886,
0.00601100369351171, 0.00535862782678675, 0.0102250379611615,
-0.0030565484825491, 0.0131780923805551, -0.00403633242246038,
0.0050428750338618, 0.00201005092802431, 0.00999009307508603,
0.0313137130661496, 0.0126365840045124, 0.0214576273384637, -0.0102948967268621,
0.00375587295980528, 0.000936768218386952, 0, 0.0176338676010692,
-0.00128877860054999, -0.00517178840239385, 0, -0.00185356864932285,
0.0292525428374368, 0.00539084863487638, -0.00269179166571121,
-0.00721373732369113, 0.0054151756877765, -0.0181658038065593,
0.000916170471779498, 0.0091158334080097, -0.0100320038797892,
-0.0131010269426075, -0.030934190447879, 0.00592113276117479,
0.0083444443976477, -0.00796363708154546, -0.00152410012356397,
0.0157006913898274, 0.0143485178530836, -0.0177326158373239,
0.0138397570885789, -0.00633503143451142, -0.0236417630570402,
-0.00575817329968409, 0.00767021977130922, 0.00190839752576055,
0.00854300402194719, 0.00471476592370346, 0.0167914392972603,
-0.00519385326363064, 0.00519385326363064, -0.0156631954769395,
0.00412603736140227, -0.0194657946426835, -0.00114569422499677,
-0.00248684967953894, 0.00363254390453571, -0.00766287274556898,
-0.012773543138433, -0.0119527238643959, 0.0082450408190371,
-0.0183491386681967, 0.013449569827038, 0.0175272257089603, 0.00385356931599024,
0.00995033085316788, 0.0119240553533615, -0.0113530074649968,
-0.00668260243436691, 0.00534964974724028, -0.00152555330883697,
-0.000954654010450806, 0.0151661674714112, 0.00749769210580098,
-0.00280505092760874, 0.000748783262241037, -0.0186991943860839,
0.000571809793527223, 0.00323409394415108, -0.00571430126343886,
0, 0, -0.0237761272472663, 0.00253980794923825, 0.0048661896511728,
0.00869990987554603, 0.0207646283323704, -0.00643089033029032,
0.00416588369943849, -0.00416588369943849, -0.00189933580365231,
0.00757579380845774, 0.00658206419564022, 0.0145145830611244,
-0.00240451424461208, -0.00278164296187677, 0.00296681100240193,
0.00166497124763643, -0.0027764941482924, -0.00334200088255709,
-0.00746690047573306, -0.000374812598091356, -0.00131295151562849,
0.000562904602535941, -0.000187599662870852, 0.00187441479435035,
0, -0.00563381771825577, -0.000942063187901354, 0, 0.0125505065950446,
-0.00279616176216768, -0.0131531396455102, -0.000378357931112294,
-0.00474159257258222, -0.0153259704782269, 0, -0.00677968698537867,
-0.000583260442315048, 0.00909889154190457, 0.000192696792194802,
-0.00192864090640565, 0.0105617880511084, 0.013659859523258,
0, 0.00338600774973097, 0.0112045990128631, 0.0101617502594991,
0.00366972888896244, 0.00638979809877149, 0.00272603530923821,
-0.00272603530923821, -0.00309851701469821, 0.0103514953197132,
0.000902934598592253, 0.00180342699915048, -0.00270636159774273,
-0.00725297830501503, -0.00364631215302058, -0.0073327551293918,
-0.00184162114664943, 0.0379792480652164, 0.0288607700606338,
0, 0.0102916860365481, -0.00599060413715691, -0.00223425374372788,
-0.00950824001436423, 0.00398717699061368, -0.00155830695545678,
-0.0109766761142396, 0.00906877509321014, 0.0204492047123184,
-0.00922767996835372, -0.0017182134811371, 0.00685521107905274,
0.00511074366256015, 0.000849256951254596, -0.00561847055584952,
0.00985397032270452, 0.0134342910491281, -0.00989028261439895,
0.000673627509474528, 0.00838227875280406, 0.018197359051908,
0.00245599795986973, -0.00788052017897467, 0.00542452221910494,
0.00245599795986973, 0.000653915341268352, -0.00721669525379198,
-0.000823384155204998, 0.0106514730914684, 0.00649880435116579,
-0.00976411337490557, 0.00212366329915437, 0.00195630972076088,
0.00730226368540077, -0.000808734374851738, -0.0048661896511728,
0.000162588407804698, -0.0182106582795081, -0.00831259981936583,
-0.001670844164817, -0.00587003781979512, -0.00675108049342033,
0.0222733816353218, -0.00498091682281299, -0.00970069490429548,
0.00502934840500213, 0.00583578146410169, -0.0105290518958512,
-0.00420911472509022, -0.00881212277547849, 0.00763038886569234,
-0.00474095029553112, 0.0069344886761229, 0.0203543897285492,
-0.00413736626596162, 0.00661159433231262, 0.014718172474022,
-0.00978800636616306, -0.00789997525315655, -0.0102973837987514,
0.00499584719337243, -0.0125367291779845, 0.00553553371900062,
0.00100317681043016, 0.00100217145518133, 0.00615796237354971,
0.0202027073175195, 0.0113178249326609, -0.0224363174959148,
-0.00825495854951797, -0.00481928643594909, -0.00718886319244749,
-0.000839278269994637, 0.0108560565134663, 0.00546675758882031,
0.00280458816185636, 0, 0.00820349145282773, -0.00968729355508646,
-0.021513378693462, 0.00302979528185254, -0.0155829711519972,
-0.12011162385435, 0.0340619494625356, 0.0120204868541247, 0.0182154398913412,
0.00144300169339306, -0.00596584974711956, -0.0136925918376147,
-0.020427822690098, 0.00187441479435035, -0.0198588086496034,
0.00381316000641441, 0.0104118309249333, 0.0028208763416413,
-0.0126619852877994, -0.0126317469059005, 0.0200216431606006,
-0.0136858247115148, 0.021208899500933, 0.0213188164824785, 0.0312384342561973,
0.0426336857495215, -0.0111350186901502, 0.00857638189982524,
0.0101955003806697, -0.00338696344101574, 0.00676249407229701,
-0.0042211966436243, 0, 0.0151136378100478, 0.0148886124937508,
0.00213237184055259, 0.00229132669806997, 0.00781635635587907,
0.0216614967811797, 0.0289421843709201, 0.0122606899863529, -0.00995033085316877,
0.0122325684356346, 0.0113337065098493, -0.0143778488910771,
-0.00534964974724073, -0.00306984124428844, -0.010663882834332,
-0.00717296074069385, 0.0031250025431353, 0.00591073427174926,
0.0109509847380744, 0.020647461904673, 0.000450687305757924,
0.0181579627994708, 0, 0, 0.0573222811078287, 0.0097020858064507,
0, 0, 0.00687287928776215, 0.0283611176216985, -0.0113827810872804,
-0.016978336534418, 0.0075060058990557, 0.00718014329047723,
-0.00446459511799446, 0.00135501375745939, -0.0054311063575021,
-0.00546076442317833, -0.0269161643012819, 0.00826277371014861,
-0.0140451747030479, 0.00352983070352408, 0.00492092381419873,
0.0111577168706534, -0.0245712607305055, 0.025957262338383, 0.00621334411751118,
0.0129916357185387, 0, 0.00271370587159581, 0.0280579527951579,
0.0253672138784227, 0.0234856875161622, -0.00882173332914782,
-0.0198152856041443, 0.0227224481142736, -0.0253080991992958,
-0.0136854530852544, -0.00922880643762181, -0.00265252144913131,
0.00925320548048347, 0.0143699402829522, -0.0117418178766826,
-0.018543577712169, -0.00469012585041373, 0.00335233300875348,
-0.0168751895188848, -0.00136239803089566, 0.0105778040796372,
-0.00175521546445889, -0.0122367264484362, 0.0212500382918774,
0.00374281950099586, -0.00736528270955983, 0.0208825836760838,
0.00695221531735957, -0.000653808457418137, -0.00695677807339923,
0.0124355290593678, 0, 0.0103533990705156, 0.00935484492480665,
0.0264332570681551, 0.00470939028769823, 0.00910775526544416,
-0.00886050591321386, -0.0153207206721717, 0.00762742053586329,
-0.014554836613681, 0.00101061150592319, 0.0137933221323356,
0.00372902855986723, -0.0206839088109314, 0.0139616128627758,
-0.00262647890340606, -0.00313578181068408, -0.0113709374269799,
-0.0112446878669497, -0.00735155177855251, -0.00311163222669641,
0.00994904517644279, 0, 0, 0.00154162414910086, -0.0116205310230182,
-0.0104439591610834, 0.00131147559781031, -0.021862414305212,
0.00667559221378422, 0.00464654005599563, -0.00931477127454716,
-0.00670693325671845, -0.00960180478520556, 0, 0.0136313602712077,
0.0199077576579247, 0.0201632847270563, 0.00833605340206134,
-0.0147960241875875, -0.0045469386555439, -0.00260756340708124,
-0.00983937095200371, -0.00727757098869386, -0.00840172975823705,
-0.00227927966209673, 0, 0.00147542109790955, -0.0197624190172903,
-0.0130720815673522, 0.00469808795597526, 0, -0.00844470997549163,
0.000972695136790769, 0.00249688019858763, 0.00304330017702803,
-0.000276281256074817, 0.00578593706704389, 0, 0.0129650400172894,
-0.0157160733891795, -0.00441745610672228, -0.00513212860962486,
-0.00153086106022471, -0.00979713924498338, -0.0076239251106589,
0, 0.00127469743200059, 0.00212089156913731, 0.0226235036378215,
0.00592002830371818, 0.00888287357757722, 0.00893951503880874,
0.0209494618681321, -0.0096862695894213, 0.0123237496888322,
-0.0081989351118823, -0.0067950431328283, 0.0270375850041731,
-0.0075757938084573, 0.0127672396017076, -0.0101484087217214,
0.0101484087217214, -0.00389105549296698, -0.00717316411241331,
0.00652318033912369, -0.00456175440790574, 0.00521173818119536,
0, -0.00573440904112754, 0.0192873754458311, -0.000256443135140394,
-0.0038545595713817, -0.0125657460118651, -0.00995684892942261,
-0.00263678465230299, 0.00552923999521759, -0.00751140119920901,
-0.00464037955650198, 0.00132802144351896, -0.0160538564885631,
-0.0122118206867645, -0.010981579130485, -0.0516929238273942,
0.0208565632745579, -0.0288828741487865, 0.0161328984072178,
-0.0102892002583062, -0.0243285128230148, 0.00521028346540753,
0.0142996466239413, -0.010742506368052, -0.0372283214179774,
-0.0095664978925587, -0.0590754348296052, 0.00164338574372991,
-0.0600603311404715, 0.0334364624283001, 0.0621114635434878,
-0.0198470022902439, 0.0182609591346088, 0.00158604315563515,
-0.0119570974211225, 0.00400160598000721, -0.0291754891339311,
-0.0115799474309597, -0.00667782114260529, 0.00467915292230625,
0.0307009255176345, -0.0204172057632279, 0.0204172057632279,
-0.0125276184880656, -0.020175977925879, -0.0161674117670119,
-0.0205839171434867, -0.0125568444442914, -0.0261371443676923,
-0.0101395038524394, -0.0174396342853562, -0.0281708769666964,
0.0188684843043827, -0.014117881545785, -0.0247589655025777,
0.0481792397106764, 0.0246926125903713, 0, -0.0114473858403503,
-0.020680205237539, 0.0491454670688882, -0.0242708542960139,
-0.0174396342853562, 0.021072699135237, 0.0224124128045018, 0.0149584839303358,
-0.00877198607283702, -0.00353045379938255, 0.0284164505049258,
0.0122994375904852, -0.00442403307914496, 0.0394568065949157,
-0.00657897109804217, 0.0114849498668965, -0.0289635471408562,
0.00669458567221426, -0.0227761848905566, 0.00340715832161376,
-0.00340715832161376, 0.00510639407457347, 0.00929453247925061,
-0.0028636421489594, -0.0252832053095, -0.0043346405104403, 0.005199318471794,
0.0247558523639944, 0.00437490181212841, 0.00569229730559506,
0.0124431302686556, 0, 0.0260388999547576, 0.00400802139753953,
0, 0.0166604408931068, -0.0134655499965879, -0.0274875834655637,
-0.021541843774755, -0.00841047850859589, 0.000844238125280228,
0.00890537684055115, 0.0185620178600594, -0.0148886124937508,
0.0214352807200653, 0.0193867138001904, 0.00796816964917646,
-0.00557104504945549, -0.00881769240264418, 0.0151820734714958,
-0.00238189870453365, 0.000794596783960877, -0.0268872350580205,
0.00488361706401896, -0.0140640094089797, -0.028735689181655,
0.00338409798424077, 0, 0, 0, 0.0016877641137194, -0.0132407298517565,
0, 0, -0.0207189731919524, 0.00174307173294697, 0.023579097721246,
-0.0298684266288101, -0.00491919474052871, 0.0129485623679662,
0.00831029713362774, 0, -0.0341988610282504, -0.00178571476023404,
-0.00268456537066974, 0.0767287029834991, -0.0123582067140031,
-0.0152390351684231, -0.0146088359037675, -0.00886885215022115,
0.0138770657095515, 0.00429738521254563, 0, -0.00171673861905397,
0.0148378265817515, 0.00977096722478166, 0.0025115124126609,
0.00666669135818942, 0.00165975141836405, -0.00949769698892489,
0.00284209837718841, 0.0173773508058774, 0.00865806274311431,
-0.0119448364497377, 0.00558110444101789, -0.00623975751933248,
0.0242465706027906, 0.0119857607452225, 0.0157607675344957, -0.0110064004172221,
0.00031615555116371, 0.00535940753191966, 0.00938827111524443,
-0.0230003837940638, 0.003976148379639, -0.0168071183163807,
-0.0210431058147167, 0.00394867245541608, 0.00409668741732983,
0.028216301573214, -0.00478088560034262, 0.0178901312201507,
-0.0162939616873281, 0.00477327875265754, -0.0273548834493669,
0.0160210563238881, -0.00854499354953298, -0.0123820415432094,
-0.00641396821011764, -0.0176436001618843, 0.0041893651552769,
-0.0025115124126609, -0.001677852742616, -0.0142062257252151,
0.00255167272923273, 0.00406918160672554, 0, 0, -0.0170652605541868,
0.0212864571978111, -0.011012394221626, 0.000851426190217275,
0.0017006806820179, 0.0143401866922703, -0.00706003941072453,
-0.00219539056343621, 0.00925542997416073, 0.0109946135010919,
-0.00265428158487246, 0.00497101272202016, -0.00663352349563429,
-0.0125577204854848, -0.0173355802901307, 0.024052650046074,
0.00417537141048019, 0.0157092937051804, 0.00245801025026893,
-0.00442877784493589, -0.00875098459947399, 0, 0.0128523766172917,
-0.00131061617712014, -0.0049301661078589, 0.0032894766503988,
-0.00907222717231093, 0, 0.00890801007694275, -0.0047740645452139,
-0.00811867602419891, -0.0046690096491302, 0.00533423501874797,
0.00016623722089637, 0, 0.00728721343368477, 0.0228394919698225,
0.00225551891980924, 0.0105651695092517, 0.00159109023224246,
0.00618117508081895, 0.00252485537318314, 0, 0.0164130296413294,
-0.00934586241823787, -0.00627945548484288, 0.0218077178113987,
0.00537842249275844, 0.012185984765531, 0.00663952944602997,
0.00898344116043326, -0.00838202782100073, -0.00482146481704149,
0.00587306860958847, 0.00329785938226657, -0.00449978260388306,
-0.0120993611556592, 0.00121654516220371, 0.009829946827983,
-0.0119598730858081, 0.00288951611584398, -0.00884691554216399,
-0.00306889914023323, -0.0236366158714301, 0.00439630163250282,
-0.00298109579838002, 0.00251098688115814, 0.0101365390344528,
0.0100348198293272, -0.0112768631499991, 0.012504987344891, 0.000306795523191461,
0.00076657727019569, 0.00321322287023129, -0.000917010609881252,
-0.00229621226035004, -0.0126466830522514, -0.00233082228123926,
0.00264118851588435, -0.000155171076422711, 0.00896035656698491,
-0.0049337133689038, -0.00092778729401477, -0.00480732878993173,
-0.000777544553596243, 0.00651266063754274, 0.00815828430114962,
0.00107255047438048, -0.000459523635258918, 0.0055003958385047,
-0.00841562613479407, 0.0123694025748913, -0.00364908417692256,
0.000913520160970904, -0.00763828950466561, 0.0126477876468165,
0.00317484581440386, 0.0015082959118855, 0.00150602438103764,
-0.0938064201224584, 0.0132995967378005, 0.0327319449925518,
0.0336805327776659, -0.00336340326242901, 0.0191117602305679,
-0.00240674003056451, 0.000903206444059279, 0.00659870420773512,
-0.00449438958783954, 0.00254930032343381, 0.00522818428917393,
-0.0032830950247682, -0.000149488003866161, -0.00119670919293124,
-0.00600512238476369, -0.000903886774405116, -0.00226329783525436,
-0.0109357104400285, -0.00582020328841004, -0.0109662076644321,
-0.000621407507956029, -0.00139958035442334, -0.00718416333696847,
0.00312989300892763, 0, 0.00747201483870175, -0.00201816415623757,
-0.0023337233462204, -0.00577991142794954, -0.00188176313956934,
0.00282131848585543, -0.00392065114466611, 0, -0.00804611109005116,
-0.00970958682625866, -0.00674051661384301, -0.00678625968435842,
0.00517465579001719, -0.00485045413374952, 0.00323624877920814,
0.00804509568483169, -0.00385356931599024, -0.00177119440877593,
0.000483364224347582, 0.00369805068116769, 0.00080211763344451,
0.000160346348455853, -0.00837229169953435, -0.00210407134353741,
-0.000810438491558152, 0.00485280797061449, -0.00517716736366314,
-0.0012984906872342, -0.0117629194895823, -0.00213868634616166,
0.00460073108593662, -0.000820008246030568, 0.00245801025026893,
0.0205725243604817, 0.000480884837350537, 0.0103627870355467,
0.00947126259248687, -0.00472441823626735, 0.00472441823626735,
-0.0137627735896082, -0.00928155166867395, 0, -0.00144799317392152,
0.00080469948817985, -0.00823847644903886, 0.00678955081520805,
-0.00808804150244224, 0.0012984906872342, -0.0105993654370415,
-0.0148640226321497, 0.0028246259537914, 0.0108911967654501,
-0.00295858203974575, 0.0101540341803483, 0.0047143060569983,
-0.00276086249262608, -0.00472737044709781, 0.00326264563481615,
0.00503615717987937, -0.00275862243907987, 0.00178585971645706,
0.00807758834636729, -0.00483871911822309, -0.00145619312197631,
-0.00486934901418223, 0, 0, 0, 0.0119684430427949, 0.00465154419585723,
0, 0, 0.0112977572118211, -0.00079145235631195, -0.00015836566665417,
-0.00779080178776859, -0.00962316969146126, 0.00562476372476617,
0.0190481949706944, 0.0103238833416359, 0.000155605695482564,
0.0171234419128696, -0.00275693236345731, -0.000613685198763037,
0.00688233962294227, 0.00698666268500769, -0.00166628835039617,
-0.00471089694259241, -0.00503933264654144, 0.01141651793953,
-0.00439894528088303, -0.00274014481086748, 0.0048661896511728,
0.00423857734174593, 0.00407025511052783, -0.00709597602706502,
0.00453515516539138, 0.00150715929057199, 0.0016552558139411,
-0.00346411973796723, 0.00180886392402613, -0.00150715929057199,
0.00346307656197986, -0.00301069021633182, 0.00706079700513573,
0.011904902506318, -0.00757298147412122, 0.0054998280587073,
0.00723731096364855, 0.00616199132854955, -0.00322297378316083,
0.00949258679675591, 0.00362713491600353, -0.00435414330862027,
0.00145348862798311, 0.0158504759368263, -0.00429800088567589,
0, -0.00937619562855474, 0.00880173058331568, -0.00590737255695117,
-0.00507063744896019, -0.00407510387839061, -0.0201818737671697,
-0.00597016698650332, 0.00149588659158262, 0.00268696982082517,
-0.00238806083638465, 0.00640502673774979, -0.00745159930842121,
0, 0.00507312338410681, 0.00208147562513084, -0.00357090098239254,
0, 0.0031252350758848, -0.00491547872872289, 0.01084142199627,
-0.00207008797862951, 0.011771766307791, -0.00205008125689066,
-0.0113511943367346, 0.00207346045849732, 0.0116203410988929,
-0.00616108740742405, 0.00674588274572763, 0.00597451686081385,
-0.000290613195884859, 0, 0, 0.000726374695990017, 0.00275542194492306,
-0.00173938295774079, -0.00334229766925631, 0.0013091863096184,
-0.00349497952959954, 0.000583345502906951, 0, 0, -0.00467632008104069,
0.00336331383799049, 0.00364299127850121, 0.00551205341049243,
-0.00289729305961295, 0.00130482077229654, -0.00421053253634263,
-0.00481296050500912, -0.00557104504945549, -0.0061937965301313,
0.0168687534977625, -0.00218420181484102, 0.00581396986541982,
0.00534799279995024, 0.0067524141341373, 0.00784429608869264,
-0.00498470020238884, -0.00572739251766308, 0, 0.00358346258904962,
-0.00703570997627878, -0.00665030642403952, -0.0137288559126123,
0.00469484430420763, -0.000585651554074751, 0, -0.00337416882876518,
0.00205519743508642, -0.00588323508968713, -0.00014752526396844,
-0.00814758969795992, 0.00297044626554488, -0.00297044626554488,
-0.000148754183985744, 0.00208054763711107, -0.00342033198341074,
-0.000148975791709027, -0.0032830950247682, -0.00104688561120447,
-0.00645502504058992, 0.00435796016182621, -0.00255236231948786,
0.00345164295563194, 0.0067189502487448, 0.00578679558315898,
-0.00429980646158068, -0.00895528372910359, -0.0113080737582552,
0.00499131339503922, -0.00196330199739769, -0.0107911982664728,
-0.000611433831338992, 0.00229095174655569, 0.00411053330548494,
-0.0139220484698166, -0.0274888462488772, 0.00316205797064839,
-0.0162320559463893, -0.00032092426462782, -0.0025711085994109,
0.00545572598427047, -0.00384800859617407, 0.0103876283615785,
-0.00159109023224246, -0.00398883656123239, -0.00899315519909205,
0.0061113088242033, 0.00846177316836361, 0.00158856268513752,
-0.00700863015447695, -0.00915301452322748, -0.0111932387716625,
-0.00457816365364216, -0.00756707015932978, 0.00148502626086522,
0.00804668107262962, -0.0131710827317733, -0.0099934209429513,
-0.00739250678428416, 0.00370308452115697, -0.00962276354145697,
-0.0188361733436944, 0.00345125450421513, 0.01453638251364, -0.00169923575295972,
0.0140169482304335, -0.0140169482304335, -0.00716848947861237,
0.0168156828566151, 0.00369872647630398, 0.00502177151748384,
0.000166958845031928, -0.0134455807093508, 0.00405269045062351,
0.00151553451589326, 0.0382948261295173, -0.0122201113347753,
-0.00575422878325238, -0.0074472830752832, 0.00298557196391691,
-0.00714704280516898, -0.0147877001543799, 0.00641244181764744,
-0.00793721974966122, -0.00954010642753467, -0.0144830117755657,
-0.00226028088926533, -0.00663641974614748, 0.00907035696996417,
-0.00592129908423189, -0.033931668075569, -0.0134620916079209,
0.000915331871688352, 0.00638397060992357, -0.00821547295339142,
-0.0116162456750279, 0.0176475168135783, -0.00237161472101999,
-0.00256035253254794, 0.00438517055717735, 0.0108795270751365,
0.0152128213492686, -0.0017777782459989, -0.00803934655941774,
0.00679301861388026, 0.0063932023830624, 0.00547269438993503,
0.00105578049574806, -0.0152404154877042, -0.00788817284900656,
-0.00831380172727414, 0.00308054968630245, -0.0121963830943121,
0.000915331871688352, 0.000365898284360178, 0.0139862419747399,
0.0127253263028049, -0.00499911771339523, 0.0244000789632919,
0.0107714731513981, 0.0159427393188336, 0.00424268773879, 0,
0.00641460673519845, -0.00777030936615652, -0.00510031707464442,
-0.00787136165412416, 0.0126324656997356, 0.00928668556820433,
0.00067204303604651, 0.011023996727145, 0.00480570980404682,
-0.00298013465592728, -0.00348808592173366, -0.00835426746985757,
0.000838574472621367, -0.00774153984129189, -0.00865365542785224,
-0.000170430337012917, -0.0097628590799026, 0.0017196908795265,
0.00599573530633357, -0.0089209718412091, 0.00120554565534903,
0.00600602406021178, 0.0124119590539857, 0.00202565903161656,
-0.00676821534613925, 0, 0, 0.0116466841013469, -0.00268862538031822,
0.000168251030934208, 0, 0, -0.00827777342612368, 0.0136469138411819,
-0.00604231445596248, -0.0144131363248734, -0.0228028536227152,
0.000698689984755418, 0.00886576863232325, -0.00834207131739539,
0.0102440343702197, 0.0154271143232503, 0.0161974183974563, 0.00667225160906781,
-0.00333056094716433, -0.00283972461773185, 0.0116415750154859,
-0.00930239266231325, 0.0140859069382993, -0.0105873610957863,
0.00514225235306132, -0.00364661435095126, 0.00315013109706186,
0.00396498123940958, 0.0135922832803361, -0.00211674751347424,
0.0180912703485578, 0.0166683754334018, -0.00663404739171281,
0.0118158129313803, 0.00312744586419544, 0.00700119545898303,
0.0051032353851781, 0.00523078115738418, -0.00261197044865202,
0.0152674721307884, 0.00302572091653719, 0.00241400237928158,
-0.0107568645734384, -0.0107198576321972, 0.00782152595642582,
0.011996195793297, 0.00901583868162081, -0.00254624566535888,
-0.00270311022078484, -0.00331375510344589, -0.00544630695714599,
0.00408750852961504, 0.00331825342179037, 0.00195562303770913,
0.00569460552829248, -0.028341671160109, 0.00383583132387066,
-0.00153256734977791, -0.010019351639956, 0.0046367934698015,
-0.00138878195301828, -0.023751116468163, -0.00221624279410371,
0.0100915394420227, -0.00424495567081173, 0.00204611702769775,
0.00783089358054756, 0.0122491647064535, -0.00169661488989448,
-0.00402166048617492, -0.0104386707784601, -0.000783392127804206,
0.00297363080062496, -0.0068998235159059, 0.00157232736795265,
0, 0, 0.0101603062401585, 0, 0.00403539271811937, -0.00248139085138543,
0.00650056460309312, -0.00463894445639301, -0.0120060692184722,
-0.00314218640307296, -0.0112351842480427, -0.00223036574854518,
-0.000478583402291122, -0.0044778581138889, -0.00224647077787843,
-0.0104966648053422, 0.00178412179350129, -0.00357143236759772,
-0.00309471702321229, -0.000652741537536805, -0.00720841915541026,
0.00213517367806215, 0.00490999349755583, 0.00374806924555848,
0.00113793396006745, -0.00815665207374927, -0.00492611833605583,
0.00279444580630361, -0.0072487961556833, 0, -0.00563755431089508,
-0.0115395544235914, 0.00754088198461211, 0, 0.00665559861173648,
0.00198807222538644, 0, -0.00447873353783823, 0.00149514107094983,
0.00727757098869386, 0.00115292774032572, 0.00738010729762273,
0.00342549882302734, 0.00406273411669655, 0, 0.00307667638049303,
0.0141277032701543, -0.00687837631524424, -0.00482704074831553,
-0.00436434863928703, -0.00519734806449712, 0.00130187162407047,
0.00954931590958541, -0.00193486032629586, -0.0113619141440386,
0.00325945529770966, 0.00681046900252724, 0.00612213930859618,
-0.00660706617496487, 0.00242228620659457, -0.00679724254873726,
-0.00537329307151779, 0.00472352046905922, 0.00647880465116124,
0.0381707594970786, -0.000310848619227144, 0.0187871247868001,
-0.00919546709310026, 0.00261397857767687, -0.00415481093183256,
-0.000771307404224153, 0.013031997390577, 0.00531714127761962,
-0.00653646775089722, -0.00091547153174254, 0.00198246347810471,
0.0137682786192306, -0.000150274250789728, -0.00890639142700778,
0, -0.0182087503538888, -0.00123609410052783, 0.00724026886194729,
0.0084066219197112, 0.00152091284070632, 0.015083242211329, 0.000748223005351711,
-0.00434750703499898, -0.00225614896033122, 0.0211569274053662,
-0.00339659190906971, 0.012641650053121, 0.000438116107774, 0.00814313064548777,
0.00144717825546614, -0.0129541146322065, -0.0219203089630522,
0.00463136705928857, 0.00757298147412122, -0.0143029851903309,
-0.00255427980509726, 0.00644824908069275, 0.0118872127840461,
-0.00666916143628082, 0.00296956417181704, -0.00430875044442569,
-0.00432739614345223, -0.0153710078765972, 0.00227531383713497,
-0.0030349036951538, -0.00824054716609712, 0.00565361732045666,
0.00258692984564046, -0.0114636333727569, 0, 0.0065885475444194,
0.0100289550507124, 0.00241618963817025, -0.0191874319522611,
0.012832440069884, -0.0111426017385856, 0.0174975936209627, 0.00811180653537669,
-0.0026966308475993, 0, -0.00390801639720539, 0.00899556290857717,
0.0249110088348248, 0.0105713822078499, 0.00846307855546602,
0.005271042904333, 0.0129871955268106, -0.00492092381419873,
0.00351741481187862, -0.00506900550329714, 0.0117879557520419,
0.00653551655320506, -0.000970671919797716, 0.00360061322386951,
-0.00958407224977087, 0.0117934075864481, 0.00206682815566328,
-0.00496758300408651, -0.00749795237247497, -0.00251186288922423,
0.0115303076411646, -0.00554018037561566, 0.0158460084500778,
-0.00054697116090896, -0.0104497032277315, -0.0223629165541457,
0.00169467629881748, 0.00675013110068079, 0.00489340368272995,
0.00320267627368942, -0.00893735874972723, 0.0114366657991312,
0.00897362272125335, 0, 0.00739425524268356, -0.00068236099542851,
-0.022783173930577, -0.006304749430738, 0.00140449461289816,
0.00420168685370026, 0.0040449179914841, -0.00362571865606487,
0.00931662165466207, 0.00593391653542863, 0.00288521186615132,
-0.000548922752908076, 0.000686106373398943, -0.0048126596844158,
0.0162694134758015, 0.0149407180596395, -0.00549267909158768,
0.00281709221114923, -0.00752995355663355, -0.0124950695326271,
0.0112795923082309, 0.00928112116480584, 0.0121100688165123,
0.000661157048877925, 0.0183371215317596, 0.0205530677381622,
0, 0.014515874951476, -0.0107087637541117, -0.00571611591328924,
0.0157981168765913, 0, -0.00125470530889604, -0.00226244440396961,
-0.00922830643649686, -0.00496531671356149, -0.000893826274154463,
0.00535101289548212, -0.00701309855587162, -0.0246126413799646,
-0.0490524936567711, -0.00621763661087016, -0.00138696277434835,
-0.00836241806965532, 0.00418995026385449, -0.0098040000966213,
-0.00720392973520312, 0.0135175248920554, 0.0100195661493503,
-0.00862194188271204, -0.00139762426663825, 0, 0.0108500160240661,
-0.00945239175742785, -0.00111794310134083, -0.0122390217347226,
-0.00212539931231337, -0.025863510589919, 0.0145945802700034,
-0.0116876014786946, 0.00982099170418316, -0.014913763715473,
0.0130436631920299, -0.00288392413290506, 0.00905634019840829,
-0.00761333850501522, 0.00546449447207831, -0.0118293358638022,
-0.000145127349504115, 0.00362188009115449, -0.0153008449553003,
-0.0051527533956568, 0.00881710341014852, -0.00807936822360933,
-0.0111235852186615, 0.00594797292607741, 0.00738555794338769,
0.0189510044702947, 0.00575955515836668, -0.00937619562855474,
0.00332779009267448, -0.0202823032062867, 0.0194152511288461,
0.0190454672057294, -0.000709471473533618, 0.000709471473533618,
-0.00640343703520685, -0.0064447054426422, 0.0064447054426422,
0.00711240551580694, 0.00579465970250048, -0.0179160202348454,
0.0163646483702111, -0.0049522563184663, -0.012848142477849,
-0.0014378147696279, 0, 0, 0.0107335556431094, 0.0127300163220063,
0.0118757944801935, 0.0013879252748481, 0.0141846349919561, -0.00782487347597804,
-0.000137826476684921, 0.000688942500553402, 0.000963722797794553,
0.0163781469121016, -0.0201004921489751, 0.00275862243907898,
0.00137646270872338, 0.0106718758074518, -0.00108932472645495,
0.0155461055902775, 0.0100100935951, 0, -0.00733092253505152,
0.0126289481726074, -0.00995693589479707, -0.00871027282489312,
-0.00634579865263873, -0.00108415785114513, -0.00885262846419632,
0.00545703946305753, -0.00340715832161376, 0, -0.00095608830611571,
0.00191126338044256, -0.00533334597536239, 0.000274197972653489,
-0.00867355888487342, -0.00124524402116588, 0, -0.0207029355394086,
-0.0135185267550204, -0.00964521981115318, 0.0082115278093049,
-0.00734397425575839, 0.00734397425575839, -0.00215439939702655,
0, 0.00644932367990769, 0.01206972408733, -0.0185190477672377,
0.00730609939988902, -0.00515170000286247, 0, -0.00893506448799997,
-0.0252138833444073, -0.00610529779688918, -0.0126260507916953,
0.000907166678481452, -0.036473997673224, 0.00187911110727512,
0.00779184244503384, 0.011575096441236, 0.000306842592158851,
-0.00522917217399499, 0.00691512551993245, 0.00534556446642043,
-0.0161231899602496, 0.0138357318526507, 0.0113853222251246,
-0.0136780247960999, 0.00579800220525328, 0.00455374193099534,
-0.00882269185057094, 0.000763650285297679, -0.00766287274556987,
0.00491099820662733, 0.00259919112769769, 0.00957381372286203,
0.0102318075897765, 0.0222066860341439, -0.00293255342127718,
-0.0110743102990938, 0.0147386603135855, -0.00734217688908068,
0.00953435898730159, -0.0139656808619719, -0.00311365067327962,
0.00975329144317882, -0.00250313021811888, 0.0175366177573961,
0.00750472565406746, 0.00143678185636276, -0.00720464211504712,
-0.00028926815359398, -0.0132510129666734, 0.00555475774133996,
-0.0035046764844493, -0.0154790442247688, 0.00355925038358595,
0.00590407619094524, 0.0163483425037283, -0.0031902579262173,
0.00145137906464665, -0.00800877961080548, -0.00881062968215574,
0.00514896319619229, 0.0115254029461775, 0.00275222886845405,
0.0139340040833709, 0.00852277886198305, -0.0014154284033312,
-0.00568183346743023, -0.000284940876696282, -0.0144961275425759,
-0.00173636275719247, -0.00435414330862027, 0.000727008392616746,
0.00868312257346116, -0.0156843180324442, -0.00440076990126492,
0.00117543357641736, -0.0134527862234082, -0.00208581720437095,
-0.00583353684809396, -0.00616960801326272, 0, -0.0112326621225458,
0.00669407641953779, -0.0129723299487994, 0.00383289085570748,
0.00152905228567768, -0.0345076202853871, 0.000316255537107679,
0, -0.00889318232175285, -0.0134898464473796, 0.00113113044297464,
-0.00193986481782638, 0.0144580831752297, 0.0251981836046991,
-0.00373948706256222, 0.00529348892929615, 0.0031007776782479,
-0.0107385671263653, 0.00530588011313871, -0.0324267240301959,
-0.000643293685741675, -0.0123018709062954, 0.00552218392091675,
-0.0161649368533556, 0.013894788318817, -0.00880488323274253,
0.0112369882013548, 0.0176569595281695, 0.000318167358713595,
-0.00910767660156253, -0.00482704074831553, 0.00595000549293978,
-0.0156745553849351, 0.0116581631273895, 0.0200803559822891,
-0.0224979572887589, -0.00113021728346574, 0.00724349243549494,
-0.000802246332638035, 0, -0.00805806132976183, -0.0048661896511728,
0.0048661896511728, -0.0102464912091715, -0.0176470317133122,
0.018954007883675, -0.000326583934973534, 0.00277256966323325,
-0.0114661370876439, 0.00574950249126083, -0.000819336383401925,
-0.00344856241268054, 0.0050865644169189, -0.0206704341735673,
0.00749067172915741, -0.00249066131245179, -0.00249688019858763,
0.00166527931906124, -0.00416840953717923, 0.00666114036678156,
0.0334588534226432, 0.0226939835327959, -0.00835110817339135,
0.0164774226225175, 0.00279763937522226, 0.00603483456284959,
-0.0044839657656528, 0, 0.0103292369848589, -0.0026107670515465,
-0.010511767838925, 0.0085106896679088, -0.0108444130141292,
0.00357615245463627, 0.00418183835630348, -0.00232108421420385,
-0.00155038790745454, 0.00232468140335929, -0.00481031263496057,
0.00481031263496057, 0.0092450581440513, -0.00615386557437869,
0.0137933221323365, -0.000609013417118831, -0.00396765361029328,
0.00655340301663276, -0.014998750717333, -0.0116325468358287,
0.00622085987510257, 0.00772204609391025, 0.00537017017459451,
0.00534148542033375, 0.00758153745239731, -0.00575671942776879,
0.00982697453829662, 0.00120282678763051, 0.00150150178359709,
0.0111899712935433, 0.000741564735504241, -0.00222634600308336,
0.00666422310912829, 0.0190064200954909, -0.0109211132785569,
0.00656696996993666, 0.00796816964917646, -0.00578872817624454,
-0.000435508463005441, -0.00655549000714828, -0.00248774548439012,
-0.00411100270652209, 0.0106827678697572, -0.0115659001026041,
-0.00976628064375173, 0.00814517646440116, 0.0163842167133792,
-0.00101618648506463, -0.00407510387839061, -0.020926198327027,
0.00223131368143559, -0.0119582889888736, -0.00906350615334706,
-0.00608829886725459, 0.000763067568501974, 0.000610035095935935,
0.00471519620281668, -0.00456274555841762, 0.00304414238122774,
-0.00304414238122774, 0.000304831582911724, -0.00412308746272405,
-0.0161978088266572, -0.00780644089283022, 0.0118418336109496,
-0.0157680263262305, 0.00235756494265438, 0.0124806612236092,
0.0038684767779209, -0.00650761515673892, 0.0134333835096987,
0.00229797113762409, 0.0156383662215971, -0.00120590910001184,
-0.00332376797909362, 0.0127811678998278, -0.00494271956620729,
0, 0, 0, -0.00376081676812667, -0.00604688161489353, 0.00378358377963917,
0.00977084060498967, 0.0121916466961824, 0.0172872749305295,
0.0115524750511504, -0.0203052661607455, -0.014017197414387,
0, 0.00917030535241015, -0.00324436208486301, 0.00147601502812034,
-0.0133632278121665, 0.000747105002998794, 0.00298285083206462,
0.00593914143696139, 0.00957650044320868, 0.00657656736456413,
-0.00730997407143885, 0, -0.00632960087760726, -0.00518174229236568,
-0.0160099177807522, 0.00826140635613548, 0.00253977876121247,
0.0108333774506457, 0, -0.0144208252443017, -0.00194858789079078,
0.00940796903997487, -0.00118976813558902, 0.0184303042053546,
0.00145985427386464, 0, 0.00581819823098773, -0.00508906950747079,
-0.00291971010333469, -0.00468934036338631, 0.00176108058429758,
0.00365898688504185, 0.0108973108891659, 0, -0.00144613184999987,
0.00749715160966868, -0.00086219289721079, 0.00487595975368205,
0.00314241081326916, -0.00543401880552796, -0.00431097089541055,
0.00158284801735675, -0.0112783150377069, 0.00478712744187959,
-0.00653359855964375, 0.0115859369863722, 0.0104563009547167,
-0.00600259055434549, 0.010551930217499, 0.0161804444559577,
0.00930756180974068, 0.00248584577670741, 0.00344234419097322,
0.00205973296301032, -0.0193911893146508, 0.0126469617007681,
-0.000828958323237039, -0.00304540656191143, 0.00166228047696038,
0.0062090575469318, -0.00275482267884453, 0.015058463874202,
0.0101386471713329, -0.00539448374935514, -0.0076015025998375,
-0.00464354836026271, 0.0070931957474647, 0.0113530074649963,
-0.00484980069536167, -0.000270124258801019, 0.00229373371832775,
-0.00242882321532445, -0.00528277557738477, -0.0010871043331786,
0.00704514497775133, -0.00908665640472783, 0.00989637304815805,
0.0096658021970093, 0.00639235097196611, 0, 0.00371008773257486,
-0.00703433785187002, -0.00373632673787672, 0.00333667309903962,
-0.00333667309903962, -0.00160556630690945, -0.00348759575504598,
0.00442448935231976, 0.000401257278171308, -0.000401257278171308,
0.00600201868499806, 0.00252340926781613, 0.00542365242471554,
0.00250346003336777, 0.00655826065208132, -0.00774332893219043,
0.00328839493958633, 0.00849956081245562, -0.00143331837685867,
0.00273455473483075, -0.0098007971704277, 0.00784830712613616,
-0.00326264563481704, -0.00524247596484972, -0.0334032391328094,
-0.00544960476756451, 0.00612872194137282, -0.0205768576887593,
-0.0111499412970346, 0.010872702592736, 0.00759619701238901,
0, 0.0140722316732269, 0.00054252001873234, -0.0150276052036373,
0.00274914262492132, -0.00965524742133361, -0.00069324092897638,
0.00553251756972539, 0.01844944711676, -0.00815776108021193,
0.0100517010402825, 0.0108879360429723, -0.00738010729762273,
0.00201816415623757, -0.029323614846505, 0.00441928629750432,
0.0100089956025595, -0.00753170284100335, -0.011752641324601,
0.017235862739132, 0.00884660414029348, 0.00338180910738117,
0.0107455701518298, -0.00428438197203462, -0.00646118817979513,
0.00605858467170606, -0.00538359308347225, 0.00538359308347225,
-0.0125617914413407, 0.000135915732455416, 0.00433957827752796,
-0.00338868504551737, 0.00541639780516867, 0.00269723696900126,
0.0033613477027048, -0.00430455332406865, -0.00378174410728871,
-0.000676818976767635, 0.00270453173641894, 0.0134140173656636,
0.00199667287632277, 0.00398142529918477, 0.000662032463769613,
0.00725357614889166, -0.0052701044242367, 0.00789477784700754,
-0.00209918733605985, 0.000787711738249541, 0, 0.000655952794920367,
0.00392670661617167, -0.00169968008493893, 0.00665147545208988,
0.0028556613383266, 0.00517131081337752, -0.00841701311094933,
0.010349380862003, 0.00947874395454384, 0.00901078499887564,
0, 0, 0.00290166102789868, -0.00315437773381788, -0.00799344483273945,
0, 0.0063492276786592, 0.00982499401023951, -0.00376742879364489,
0.000377382229776124, -0.00415905823540719, 0.00315238896705594,
-0.00125976333112909, 0.000252079658506155, 0.0193453770920282,
0.00591571850909745, 0.00208678649347949, -0.00122699401896931,
0.00183992692200707, 0.00975617494536518, -0.00364742045704336,
-0.00121876919412944, 0.0178878349447471, 0.0107233244871923,
-0.00570479322303186, 0.000595770050389355, -0.000834177489831056,
0.00700923570568612, -0.00796343425136836, 0.00357355949086546,
-0.00715993512209234, 0.00418285641240779, 0.009731859384968,
-0.00521019532691369, -0.000712589103787131, 0.00733904809242691,
-0.00200696602399031, -0.0100956971262036, -0.0112846335555457,
0.00565823456262482, -0.0145105327144144, -0.00610875448832182,
0, 0.0101201863550751, 0.00809525353626661, 0.00623802406701657,
-0.00323411673161633, 0.00955231144001267, -0.00357143236759772,
0.00890477834296011, -0.00807030252413643, -0.00537667822539589,
-0.00624926922619196, 0.0072072384049493, -0.0126469617007681,
-0.0171153322192676, 0.0140804285241138, -0.0013382811170688,
0.00679449350123296, 0.00362100576695301, -0.00362100576695301,
0.0105846549901241, -0.00925542997416073, 0.00229178075098968,
0, 0.00336781651011542, -0.017931180698012, -0.000489117153307461,
-0.0090920886808501, 0.00283478343530241, -0.00160128136697324,
0.00197044398729851, -0.00406830370652767, -0.0017309598782882,
0, 0, 0.00358224087601133, 0.0085942824873344, -0.00379693027657346,
0.00477444746096012, 0.0145492382148236, -0.00361751268732569,
-0.00399443731497851, -0.000242600680471661, -0.00559612166019718,
0.0136913751964638, 0.00695529845521037, 0.00262561315654786,
0.00416295575930992, 0.00402748708345246, -0.0111746288830901,
0.00214951121693296, 0, -0.00790423276917007, -0.0153877150215722,
0.00729930248161104, 0.00724640852076774, 0.00180342699915048,
0.00180018050414787, -0.00264137504993567, 0.0074260731732716,
0, 0.00867349345279944, 0, -0.000710059201431079, 0.00354526488090912,
0.003297612221556, 0.00994451014833952, 0.000698242784099357,
-0.00829395561068313, 0, 0.00584796988242342, 0.00232964578280903,
-0.00817761566523245, 0.00386349482504489, -0.00621229214680064,
0.00539400932599321, -0.00281063541390569, 0, -0.00552714995610071,
0, -0.00579506086327264, 0.00319735010975819, -0.00509691140752722,
-0.0100323306499979, -0.00240355842188311, -0.00252996942368533,
-0.00556430611899117, -0.00754904352100549, 0.00524167714422852,
0.00847462699097257, 0.00409047731079593, -0.00916663034265675,
-0.0144010299395463, -0.000614817111281418, 0.00613122705493652,
0, -0.0049019706002067, 0.0061237178505289, 0.00947874395454384,
-0.0165857460534218, 0.00649631505699766, -0.00711135905202998,
-0.0059237492734967, -0.0154674407078215, 0.0112501186545968,
-0.0213575957326499, 0.000507807551854533, -0.0150898003915492,
-0.00477882730573409, -0.00975744422653868, 0.0053458631778236
), times = structure(c(94780800, 94867200, 94953600, 95040000,
95299200, 95385600, 95472000, 95558400, 95644800, 95904000, 95990400,
96076800, 96163200, 96249600, 96508800, 96595200, 96681600, 96768000,
96854400, 97113600, 97200000, 97286400, 97372800, 97459200, 97718400,
97804800, 97891200, 97977600, 98064000, 98323200, 98409600, 98496000,
98582400, 98668800, 98928000, 99014400, 99100800, 99187200, 99273600,
99532800, 99619200, 99705600, 99792000, 99878400, 100137600,
100224000, 100310400, 100396800, 100483200, 100742400, 100828800,
100915200, 101001600, 101088000, 101347200, 101433600, 101520000,
101606400, 101692800, 101952000, 102038400, 102124800, 102211200,
102297600, 102556800, 102643200, 102729600, 102816000, 102902400,
103161600, 103248000, 103334400, 103420800, 103507200, 103766400,
103852800, 103939200, 104025600, 104112000, 104371200, 104457600,
104544000, 104630400, 104716800, 104976000, 105062400, 105148800,
105235200, 105321600, 105580800, 105667200, 105753600, 105840000,
105926400, 106185600, 106272000, 106358400, 106444800, 106531200,
106790400, 106876800, 106963200, 107049600, 107136000, 107395200,
107481600, 107568000, 107654400, 107740800, 1.08e+08, 108086400,
108172800, 108259200, 108345600, 108604800, 108691200, 108777600,
108864000, 108950400, 109209600, 109296000, 109382400, 109468800,
109555200, 109814400, 109900800, 109987200, 110073600, 110160000,
110419200, 110505600, 110592000, 110678400, 110764800, 111024000,
111110400, 111196800, 111283200, 111369600, 111628800, 111715200,
111801600, 111888000, 111974400, 112233600, 112320000, 112406400,
112492800, 112579200, 112838400, 112924800, 113011200, 113097600,
113184000, 113443200, 113529600, 113616000, 113702400, 113788800,
114048000, 114134400, 114220800, 114307200, 114393600, 114652800,
114739200, 114825600, 114912000, 114998400, 115257600, 115344000,
115430400, 115516800, 115603200, 115862400, 115948800, 116035200,
116121600, 116208000, 116467200, 116553600, 116640000, 116726400,
116812800, 117072000, 117158400, 117244800, 117331200, 117417600,
117676800, 117763200, 117849600, 117936000, 118022400, 118281600,
118368000, 118454400, 118540800, 118627200, 118886400, 118972800,
119059200, 119145600, 119232000, 119491200, 119577600, 119664000,
119750400, 119836800, 120096000, 120182400, 120268800, 120355200,
120441600, 120700800, 120787200, 120873600, 120960000, 121046400,
121305600, 121392000, 121478400, 121564800, 121651200, 121910400,
121996800, 122083200, 122169600, 122256000, 122515200, 122601600,
122688000, 122774400, 122860800, 123120000, 123206400, 123292800,
123379200, 123465600, 123724800, 123811200, 123897600, 123984000,
124070400, 124329600, 124416000, 124502400, 124588800, 124675200,
124934400, 125020800, 125107200, 125193600, 125280000, 125539200,
125625600, 125712000, 125798400, 125884800, 126144000, 126230400,
126316800, 126403200, 126489600, 126748800, 126835200, 126921600,
127008000, 127094400, 127353600, 127440000, 127526400, 127612800,
127699200, 127958400, 128044800, 128131200, 128217600, 128304000,
128563200, 128649600, 128736000, 128822400, 128908800, 129168000,
129254400, 129340800, 129427200, 129513600, 129772800, 129859200,
129945600, 130032000, 130118400, 130377600, 130464000, 130550400,
130636800, 130723200, 130982400, 131068800, 131155200, 131241600,
131328000, 131587200, 131673600, 131760000, 131846400, 131932800,
132192000, 132278400, 132364800, 132451200, 132537600, 132796800,
132883200, 132969600, 133056000, 133142400, 133401600, 133488000,
133574400, 133660800, 133747200, 134006400, 134092800, 134179200,
134265600, 134352000, 134611200, 134697600, 134784000, 134870400,
134956800, 135216000, 135302400, 135388800, 135475200, 135561600,
135820800, 135907200, 135993600, 136080000, 136166400, 136425600,
136512000, 136598400, 136684800, 136771200, 137030400, 137116800,
137203200, 137289600, 137376000, 137635200, 137721600, 137808000,
137894400, 137980800, 138240000, 138326400, 138412800, 138499200,
138585600, 138844800, 138931200, 139017600, 139104000, 139190400,
139449600, 139536000, 139622400, 139708800, 139795200, 140054400,
140140800, 140227200, 140313600, 140400000, 140659200, 140745600,
140832000, 140918400, 141004800, 141264000, 141350400, 141436800,
141523200, 141609600, 141868800, 141955200, 142041600, 142128000,
142214400, 142473600, 142560000, 142646400, 142732800, 142819200,
143078400, 143164800, 143251200, 143337600, 143424000, 143683200,
143769600, 143856000, 143942400, 144028800, 144288000, 144374400,
144460800, 144547200, 144633600, 144892800, 144979200, 145065600,
145152000, 145238400, 145497600, 145584000, 145670400, 145756800,
145843200, 146102400, 146188800, 146275200, 146361600, 146448000,
146707200, 146793600, 146880000, 146966400, 147052800, 147312000,
147398400, 147484800, 147571200, 147657600, 147916800, 148003200,
148089600, 148176000, 148262400, 148521600, 148608000, 148694400,
148780800, 148867200, 149126400, 149212800, 149299200, 149385600,
149472000, 149731200, 149817600, 149904000, 149990400, 150076800,
150336000, 150422400, 150508800, 150595200, 150681600, 150940800,
151027200, 151113600, 151200000, 151286400, 151545600, 151632000,
151718400, 151804800, 151891200, 152150400, 152236800, 152323200,
152409600, 152496000, 152755200, 152841600, 152928000, 153014400,
153100800, 153360000, 153446400, 153532800, 153619200, 153705600,
153964800, 154051200, 154137600, 154224000, 154310400, 154569600,
154656000, 154742400, 154828800, 154915200, 155174400, 155260800,
155347200, 155433600, 155520000, 155779200, 155865600, 155952000,
156038400, 156124800, 156384000, 156470400, 156556800, 156643200,
156729600, 156988800, 157075200, 157161600, 157248000, 157334400,
157593600, 157680000, 157766400, 157852800, 157939200, 158198400,
158284800, 158371200, 158457600, 158544000, 158803200, 158889600,
158976000, 159062400, 159148800, 159408000, 159494400, 159580800,
159667200, 159753600, 160012800, 160099200, 160185600, 160272000,
160358400, 160617600, 160704000, 160790400, 160876800, 160963200,
161222400, 161308800, 161395200, 161481600, 161568000, 161827200,
161913600, 1.62e+08, 162086400, 162172800, 162432000, 162518400,
162604800, 162691200, 162777600, 163036800, 163123200, 163209600,
163296000, 163382400, 163641600, 163728000, 163814400, 163900800,
163987200, 164246400, 164332800, 164419200, 164505600, 164592000,
164851200, 164937600, 165024000, 165110400, 165196800, 165456000,
165542400, 165628800, 165715200, 165801600, 166060800, 166147200,
166233600, 166320000, 166406400, 166665600, 166752000, 166838400,
166924800, 167011200, 167270400, 167356800, 167443200, 167529600,
167616000, 167875200, 167961600, 168048000, 168134400, 168220800,
168480000, 168566400, 168652800, 168739200, 168825600, 169084800,
169171200, 169257600, 169344000, 169430400, 169689600, 169776000,
169862400, 169948800, 170035200, 170294400, 170380800, 170467200,
170553600, 170640000, 170899200, 170985600, 171072000, 171158400,
171244800, 171504000, 171590400, 171676800, 171763200, 171849600,
172108800, 172195200, 172281600, 172368000, 172454400, 172713600,
172800000, 172886400, 172972800, 173059200, 173318400, 173404800,
173491200, 173577600, 173664000, 173923200, 174009600, 174096000,
174182400, 174268800, 174528000, 174614400, 174700800, 174787200,
174873600, 175132800, 175219200, 175305600, 175392000, 175478400,
175737600, 175824000, 175910400, 175996800, 176083200, 176342400,
176428800, 176515200, 176601600, 176688000, 176947200, 177033600,
177120000, 177206400, 177292800, 177552000, 177638400, 177724800,
177811200, 177897600, 178156800, 178243200, 178329600, 178416000,
178502400, 178761600, 178848000, 178934400, 179020800, 179107200,
179366400, 179452800, 179539200, 179625600, 179712000, 179971200,
180057600, 180144000, 180230400, 180316800, 180576000, 180662400,
180748800, 180835200, 180921600, 181180800, 181267200, 181353600,
181440000, 181526400, 181785600, 181872000, 181958400, 182044800,
182131200, 182390400, 182476800, 182563200, 182649600, 182736000,
182995200, 183081600, 183168000, 183254400, 183340800, 183600000,
183686400, 183772800, 183859200, 183945600, 184204800, 184291200,
184377600, 184464000, 184550400, 184809600, 184896000, 184982400,
185068800, 185155200, 185414400, 185500800, 185587200, 185673600,
185760000, 186019200, 186105600, 186192000, 186278400, 186364800,
186624000, 186710400, 186796800, 186883200, 186969600, 187228800,
187315200, 187401600, 187488000, 187574400, 187833600, 187920000,
188006400, 188092800, 188179200, 188438400, 188524800, 188611200,
188697600, 188784000, 189043200, 189129600, 189216000, 189302400,
189388800, 189648000, 189734400, 189820800, 189907200, 189993600,
190252800, 190339200, 190425600, 190512000, 190598400, 190857600,
190944000, 191030400, 191116800, 191203200, 191462400, 191548800,
191635200, 191721600, 191808000, 192067200, 192153600, 192240000,
192326400, 192412800, 192672000, 192758400, 192844800, 192931200,
193017600, 193276800, 193363200, 193449600, 193536000, 193622400,
193881600, 193968000, 194054400, 194140800, 194227200, 194486400,
194572800, 194659200, 194745600, 194832000, 195091200, 195177600,
195264000, 195350400, 195436800, 195696000, 195782400, 195868800,
195955200, 196041600, 196300800, 196387200, 196473600, 196560000,
196646400, 196905600, 196992000, 197078400, 197164800, 197251200,
197510400, 197596800, 197683200, 197769600, 197856000, 198115200,
198201600, 198288000, 198374400, 198460800, 198720000, 198806400,
198892800, 198979200, 199065600, 199324800, 199411200, 199497600,
199584000, 199670400, 199929600, 200016000, 200102400, 200188800,
200275200, 200534400, 200620800, 200707200, 200793600, 200880000,
201139200, 201225600, 201312000, 201398400, 201484800, 201744000,
201830400, 201916800, 202003200, 202089600, 202348800, 202435200,
202521600, 202608000, 202694400, 202953600, 203040000, 203126400,
203212800, 203299200, 203558400, 203644800, 203731200, 203817600,
203904000, 204163200, 204249600, 204336000, 204422400, 204508800,
204768000, 204854400, 204940800, 205027200, 205113600, 205372800,
205459200, 205545600, 205632000, 205718400, 205977600, 206064000,
206150400, 206236800, 206323200, 206582400, 206668800, 206755200,
206841600, 206928000, 207187200, 207273600, 207360000, 207446400,
207532800, 207792000, 207878400, 207964800, 208051200, 208137600,
208396800, 208483200, 208569600, 208656000, 208742400, 209001600,
209088000, 209174400, 209260800, 209347200, 209606400, 209692800,
209779200, 209865600, 209952000, 210211200, 210297600, 210384000,
210470400, 210556800, 210816000, 210902400, 210988800, 211075200,
211161600, 211420800, 211507200, 211593600, 211680000, 211766400,
212025600, 212112000, 212198400, 212284800, 212371200, 212630400,
212716800, 212803200, 212889600, 212976000, 213235200, 213321600,
213408000, 213494400, 213580800, 213840000, 213926400, 214012800,
214099200, 214185600, 214444800, 214531200, 214617600, 214704000,
214790400, 215049600, 215136000, 215222400, 215308800, 215395200,
215654400, 215740800, 215827200, 215913600, 2.16e+08, 216259200,
216345600, 216432000, 216518400, 216604800, 216864000, 216950400,
217036800, 217123200, 217209600, 217468800, 217555200, 217641600,
217728000, 217814400, 218073600, 218160000, 218246400, 218332800,
218419200, 218678400, 218764800, 218851200, 218937600, 219024000,
219283200, 219369600, 219456000, 219542400, 219628800, 219888000,
219974400, 220060800, 220147200, 220233600, 220492800, 220579200,
220665600, 220752000, 220838400, 221097600, 221184000, 221270400,
221356800, 221443200, 221702400, 221788800, 221875200, 221961600,
222048000, 222307200, 222393600, 222480000, 222566400, 222652800,
222912000, 222998400, 223084800, 223171200, 223257600, 223516800,
223603200, 223689600, 223776000, 223862400, 224121600, 224208000,
224294400, 224380800, 224467200, 224726400, 224812800, 224899200,
224985600, 225072000, 225331200, 225417600, 225504000, 225590400,
225676800, 225936000, 226022400, 226108800, 226195200, 226281600,
226540800, 226627200, 226713600, 226800000, 226886400, 227145600,
227232000, 227318400, 227404800, 227491200, 227750400, 227836800,
227923200, 228009600, 228096000, 228355200, 228441600, 228528000,
228614400, 228700800, 228960000, 229046400, 229132800, 229219200,
229305600, 229564800, 229651200, 229737600, 229824000, 229910400,
230169600, 230256000, 230342400, 230428800, 230515200, 230774400,
230860800, 230947200, 231033600, 231120000, 231379200, 231465600,
231552000, 231638400, 231724800, 231984000, 232070400, 232156800,
232243200, 232329600, 232588800, 232675200, 232761600, 232848000,
232934400, 233193600, 233280000, 233366400, 233452800, 233539200,
233798400, 233884800, 233971200, 234057600, 234144000, 234403200,
234489600, 234576000, 234662400, 234748800, 235008000, 235094400,
235180800, 235267200, 235353600, 235612800, 235699200, 235785600,
235872000, 235958400, 236217600, 236304000, 236390400, 236476800,
236563200, 236822400, 236908800, 236995200, 237081600, 237168000,
237427200, 237513600, 237600000, 237686400, 237772800, 238032000,
238118400, 238204800, 238291200, 238377600, 238636800, 238723200,
238809600, 238896000, 238982400, 239241600, 239328000, 239414400,
239500800, 239587200, 239846400, 239932800, 240019200, 240105600,
240192000, 240451200, 240537600, 240624000, 240710400, 240796800,
241056000, 241142400, 241228800, 241315200, 241401600, 241660800,
241747200, 241833600, 241920000, 242006400, 242265600, 242352000,
242438400, 242524800, 242611200, 242870400, 242956800, 243043200,
243129600, 243216000, 243475200, 243561600, 243648000, 243734400,
243820800, 244080000, 244166400, 244252800, 244339200, 244425600,
244684800, 244771200, 244857600, 244944000, 245030400, 245289600,
245376000, 245462400, 245548800, 245635200, 245894400, 245980800,
246067200, 246153600, 246240000, 246499200, 246585600, 246672000,
246758400, 246844800, 247104000, 247190400, 247276800, 247363200,
247449600, 247708800, 247795200, 247881600, 247968000, 248054400,
248313600, 248400000, 248486400, 248572800, 248659200, 248918400,
249004800, 249091200, 249177600, 249264000, 249523200, 249609600,
249696000, 249782400, 249868800, 250128000, 250214400, 250300800,
250387200, 250473600, 250732800, 250819200, 250905600, 250992000,
251078400, 251337600, 251424000, 251510400, 251596800, 251683200,
251942400, 252028800, 252115200, 252201600, 252288000, 252547200,
252633600, 252720000, 252806400, 252892800, 253152000, 253238400,
253324800, 253411200, 253497600, 253756800, 253843200, 253929600,
254016000, 254102400, 254361600, 254448000, 254534400, 254620800,
254707200, 254966400, 255052800, 255139200, 255225600, 255312000,
255571200, 255657600, 255744000, 255830400, 255916800, 256176000,
256262400, 256348800, 256435200, 256521600, 256780800, 256867200,
256953600, 257040000, 257126400, 257385600, 257472000, 257558400,
257644800, 257731200, 257990400, 258076800, 258163200, 258249600,
258336000, 258595200, 258681600, 258768000, 258854400, 258940800,
259200000, 259286400, 259372800, 259459200, 259545600, 259804800,
259891200, 259977600, 260064000, 260150400, 260409600, 260496000,
260582400, 260668800, 260755200, 261014400, 261100800, 261187200,
261273600, 261360000, 261619200, 261705600, 261792000, 261878400,
261964800, 262224000, 262310400, 262396800, 262483200, 262569600,
262828800, 262915200, 263001600, 263088000, 263174400, 263433600,
263520000, 263606400, 263692800, 263779200, 264038400, 264124800,
264211200, 264297600, 264384000, 264643200, 264729600, 264816000,
264902400, 264988800, 265248000, 265334400, 265420800, 265507200,
265593600, 265852800, 265939200, 266025600, 266112000, 266198400,
266457600, 266544000, 266630400, 266716800, 266803200, 267062400,
267148800, 267235200, 267321600, 267408000, 267667200, 267753600,
267840000, 267926400, 268012800, 268272000, 268358400, 268444800,
268531200, 268617600, 268876800, 268963200, 269049600, 269136000,
269222400, 269481600, 269568000, 269654400, 269740800, 269827200,
270086400, 270172800, 270259200, 270345600, 270432000, 270691200,
270777600, 270864000, 270950400, 271036800, 271296000, 271382400,
271468800, 271555200, 271641600, 271900800, 271987200, 272073600,
272160000, 272246400, 272505600, 272592000, 272678400, 272764800,
272851200, 273110400, 273196800, 273283200, 273369600, 273456000,
273715200, 273801600, 273888000, 273974400, 274060800, 274320000,
274406400, 274492800, 274579200, 274665600, 274924800, 275011200,
275097600, 275184000, 275270400, 275529600, 275616000, 275702400,
275788800, 275875200, 276134400, 276220800, 276307200, 276393600,
276480000, 276739200, 276825600, 276912000, 276998400, 277084800,
277344000, 277430400, 277516800, 277603200, 277689600, 277948800,
278035200, 278121600, 278208000, 278294400, 278553600, 278640000,
278726400, 278812800, 278899200, 279158400, 279244800, 279331200,
279417600, 279504000, 279763200, 279849600, 279936000, 280022400,
280108800, 280368000, 280454400, 280540800, 280627200, 280713600,
280972800, 281059200, 281145600, 281232000, 281318400, 281577600,
281664000, 281750400, 281836800, 281923200, 282182400, 282268800,
282355200, 282441600, 282528000, 282787200, 282873600, 282960000,
283046400, 283132800, 283392000, 283478400, 283564800, 283651200,
283737600, 283996800, 284083200, 284169600, 284256000, 284342400,
284601600, 284688000, 284774400, 284860800, 284947200, 285206400,
285292800, 285379200, 285465600, 285552000, 285811200, 285897600,
285984000, 286070400, 286156800, 286416000, 286502400, 286588800,
286675200, 286761600, 287020800, 287107200, 287193600, 287280000,
287366400, 287625600, 287712000, 287798400, 287884800, 287971200,
288230400, 288316800, 288403200, 288489600, 288576000, 288835200,
288921600, 289008000, 289094400, 289180800, 289440000, 289526400,
289612800, 289699200, 289785600, 290044800, 290131200, 290217600,
290304000, 290390400, 290649600, 290736000, 290822400, 290908800,
290995200, 291254400, 291340800, 291427200, 291513600, 291600000,
291859200, 291945600, 292032000, 292118400, 292204800, 292464000,
292550400, 292636800, 292723200, 292809600, 293068800, 293155200,
293241600, 293328000, 293414400, 293673600, 293760000, 293846400,
293932800, 294019200, 294278400, 294364800, 294451200, 294537600,
294624000, 294883200, 294969600, 295056000, 295142400, 295228800,
295488000, 295574400, 295660800, 295747200, 295833600, 296092800,
296179200, 296265600, 296352000, 296438400, 296697600, 296784000,
296870400, 296956800, 297043200, 297302400, 297388800, 297475200,
297561600, 297648000, 297907200, 297993600, 298080000, 298166400,
298252800, 298512000, 298598400, 298684800, 298771200, 298857600,
299116800, 299203200, 299289600, 299376000, 299462400, 299721600,
299808000, 299894400, 299980800, 300067200, 300326400, 300412800,
300499200, 300585600, 300672000, 300931200, 301017600, 301104000,
301190400, 301276800, 301536000, 301622400, 301708800, 301795200,
301881600, 302140800, 302227200, 302313600, 302400000, 302486400,
302745600, 302832000, 302918400, 303004800, 303091200, 303350400,
303436800, 303523200, 303609600, 303696000, 303955200, 304041600,
304128000, 304214400, 304300800, 304560000, 304646400, 304732800,
304819200, 304905600, 305164800, 305251200, 305337600, 305424000,
305510400, 305769600, 305856000, 305942400, 306028800, 306115200,
306374400, 306460800, 306547200, 306633600, 306720000, 306979200,
307065600, 307152000, 307238400, 307324800, 307584000, 307670400,
307756800, 307843200, 307929600, 308188800, 308275200, 308361600,
308448000, 308534400, 308793600, 308880000, 308966400, 309052800,
309139200, 309398400, 309484800, 309571200, 309657600, 309744000,
310003200, 310089600, 310176000, 310262400, 310348800, 310608000,
310694400, 310780800, 310867200, 310953600, 311212800, 311299200,
311385600, 311472000, 311558400, 311817600, 311904000, 311990400,
312076800, 312163200, 312422400, 312508800, 312595200, 312681600,
312768000, 313027200, 313113600, 313200000, 313286400, 313372800,
313632000, 313718400, 313804800, 313891200, 313977600, 314236800,
314323200, 314409600, 314496000, 314582400, 314841600, 314928000,
315014400, 315100800, 315187200, 315446400, 315532800, 315619200,
315705600, 315792000, 316051200, 316137600, 316224000, 316310400,
316396800, 316656000, 316742400, 316828800, 316915200, 317001600,
317260800, 317347200, 317433600, 317520000, 317606400, 317865600,
317952000, 318038400, 318124800, 318211200, 318470400, 318556800,
318643200, 318729600, 318816000, 319075200, 319161600, 319248000,
319334400, 319420800, 319680000, 319766400, 319852800, 319939200,
320025600, 320284800, 320371200, 320457600, 320544000, 320630400,
320889600, 320976000, 321062400, 321148800, 321235200, 321494400,
321580800, 321667200, 321753600, 321840000, 322099200, 322185600,
322272000, 322358400, 322444800, 322704000, 322790400, 322876800,
322963200, 323049600, 323308800, 323395200, 323481600, 323568000,
323654400, 323913600, 3.24e+08, 324086400, 324172800, 324259200,
324518400, 324604800, 324691200, 324777600, 324864000, 325123200,
325209600, 325296000, 325382400, 325468800, 325728000, 325814400,
325900800, 325987200, 326073600, 326332800, 326419200, 326505600,
326592000, 326678400, 326937600, 327024000, 327110400, 327196800,
327283200, 327542400, 327628800, 327715200, 327801600, 327888000,
328147200, 328233600, 328320000, 328406400, 328492800, 328752000,
328838400, 328924800, 329011200, 329097600, 329356800, 329443200,
329529600, 329616000, 329702400, 329961600, 330048000, 330134400,
330220800, 330307200, 330566400, 330652800, 330739200, 330825600,
330912000, 331171200, 331257600, 331344000, 331430400, 331516800,
331776000, 331862400, 331948800, 332035200, 332121600, 332380800,
332467200, 332553600, 332640000, 332726400, 332985600, 333072000,
333158400, 333244800, 333331200, 333590400, 333676800, 333763200,
333849600, 333936000, 334195200, 334281600, 334368000, 334454400,
334540800, 334800000, 334886400, 334972800, 335059200, 335145600,
335404800, 335491200, 335577600, 335664000, 335750400, 336009600,
336096000, 336182400, 336268800, 336355200, 336614400, 336700800,
336787200, 336873600, 336960000, 337219200, 337305600, 337392000,
337478400, 337564800, 337824000, 337910400, 337996800, 338083200,
338169600, 338428800, 338515200, 338601600, 338688000, 338774400,
339033600, 339120000, 339206400, 339292800, 339379200, 339638400,
339724800, 339811200, 339897600, 339984000, 340243200, 340329600,
340416000, 340502400, 340588800, 340848000, 340934400, 341020800,
341107200, 341193600, 341452800, 341539200, 341625600, 341712000,
341798400, 342057600, 342144000, 342230400, 342316800, 342403200,
342662400, 342748800, 342835200, 342921600, 343008000, 343267200,
343353600, 343440000, 343526400, 343612800, 343872000, 343958400,
344044800, 344131200, 344217600, 344476800, 344563200, 344649600,
344736000, 344822400, 345081600, 345168000, 345254400, 345340800,
345427200, 345686400, 345772800, 345859200, 345945600, 346032000,
346291200, 346377600, 346464000, 346550400, 346636800, 346896000,
346982400, 347068800, 347155200, 347241600, 347500800, 347587200,
347673600, 347760000, 347846400, 348105600, 348192000, 348278400,
348364800, 348451200, 348710400, 348796800, 348883200, 348969600,
349056000, 349315200, 349401600, 349488000, 349574400, 349660800,
349920000, 350006400, 350092800, 350179200, 350265600, 350524800,
350611200, 350697600, 350784000, 350870400, 351129600, 351216000,
351302400, 351388800, 351475200, 351734400, 351820800, 351907200,
351993600, 352080000, 352339200, 352425600, 352512000, 352598400,
352684800, 352944000, 353030400, 353116800, 353203200, 353289600,
353548800, 353635200, 353721600, 353808000, 353894400, 354153600,
354240000, 354326400, 354412800, 354499200, 354758400, 354844800,
354931200, 355017600, 355104000, 355363200, 355449600, 355536000,
355622400, 355708800, 355968000, 356054400, 356140800, 356227200,
356313600, 356572800, 356659200, 356745600, 356832000, 356918400,
357177600, 357264000, 357350400, 357436800, 357523200, 357782400,
357868800, 357955200, 358041600, 358128000, 358387200, 358473600,
358560000, 358646400, 358732800, 358992000, 359078400, 359164800,
359251200, 359337600, 359596800, 359683200, 359769600, 359856000,
359942400, 360201600, 360288000, 360374400, 360460800, 360547200,
360806400, 360892800, 360979200, 361065600, 361152000, 361411200,
361497600, 361584000, 361670400, 361756800, 362016000, 362102400,
362188800, 362275200, 362361600, 362620800, 362707200, 362793600,
362880000, 362966400, 363225600, 363312000, 363398400, 363484800,
363571200, 363830400, 363916800, 364003200, 364089600, 364176000,
364435200, 364521600, 364608000, 364694400, 364780800, 365040000,
365126400, 365212800, 365299200, 365385600, 365644800, 365731200,
365817600, 365904000, 365990400, 366249600, 366336000, 366422400,
366508800, 366595200, 366854400, 366940800, 367027200, 367113600,
367200000, 367459200, 367545600, 367632000, 367718400, 367804800,
368064000, 368150400, 368236800, 368323200, 368409600, 368668800,
368755200, 368841600, 368928000, 369014400, 369273600, 369360000,
369446400, 369532800, 369619200, 369878400, 369964800, 370051200,
370137600, 370224000, 370483200, 370569600, 370656000, 370742400,
370828800, 371088000, 371174400, 371260800, 371347200, 371433600,
371692800, 371779200, 371865600, 371952000, 372038400, 372297600,
372384000, 372470400, 372556800, 372643200, 372902400, 372988800,
373075200, 373161600, 373248000, 373507200, 373593600, 373680000,
373766400, 373852800, 374112000, 374198400, 374284800, 374371200,
374457600, 374716800, 374803200, 374889600, 374976000, 375062400,
375321600, 375408000, 375494400, 375580800, 375667200, 375926400,
376012800, 376099200, 376185600, 376272000, 376531200, 376617600,
376704000, 376790400, 376876800, 377136000, 377222400, 377308800,
377395200, 377481600, 377740800, 377827200, 377913600, 3.78e+08,
378086400, 378345600, 378432000, 378518400, 378604800, 378691200,
378950400, 379036800, 379123200, 379209600, 379296000, 379555200,
379641600, 379728000, 379814400, 379900800, 380160000, 380246400,
380332800, 380419200, 380505600, 380764800, 380851200, 380937600,
381024000, 381110400, 381369600, 381456000, 381542400, 381628800,
381715200, 381974400, 382060800, 382147200, 382233600, 382320000,
382579200, 382665600, 382752000, 382838400, 382924800, 383184000,
383270400, 383356800, 383443200, 383529600, 383788800, 383875200,
383961600, 384048000, 384134400, 384393600, 384480000, 384566400,
384652800, 384739200, 384998400, 385084800, 385171200, 385257600,
385344000, 385603200, 385689600, 385776000, 385862400, 385948800,
386208000, 386294400, 386380800, 386467200, 386553600, 386812800,
386899200, 386985600, 387072000, 387158400, 387417600, 387504000,
387590400, 387676800, 387763200, 388022400, 388108800, 388195200,
388281600, 388368000, 388627200, 388713600, 388800000, 388886400,
388972800, 389232000, 389318400, 389404800, 389491200, 389577600,
389836800, 389923200, 390009600, 390096000, 390182400, 390441600,
390528000, 390614400, 390700800, 390787200, 391046400, 391132800,
391219200, 391305600, 391392000, 391651200, 391737600, 391824000,
391910400, 391996800, 392256000, 392342400, 392428800, 392515200,
392601600, 392860800, 392947200, 393033600, 393120000, 393206400,
393465600, 393552000, 393638400, 393724800, 393811200, 394070400,
394156800, 394243200, 394329600, 394416000, 394675200, 394761600,
394848000, 394934400, 395020800, 395280000, 395366400, 395452800,
395539200, 395625600, 395884800, 395971200, 396057600, 396144000,
396230400, 396489600, 396576000, 396662400, 396748800, 396835200,
397094400, 397180800, 397267200, 397353600, 397440000, 397699200,
397785600, 397872000, 397958400, 398044800, 398304000, 398390400,
398476800, 398563200, 398649600, 398908800, 398995200, 399081600,
399168000, 399254400, 399513600, 399600000, 399686400, 399772800,
399859200, 400118400, 400204800, 400291200, 400377600, 400464000,
400723200, 400809600, 400896000, 400982400, 401068800, 401328000,
401414400, 401500800, 401587200, 401673600, 401932800, 402019200,
402105600, 402192000, 402278400, 402537600, 402624000, 402710400,
402796800, 402883200, 403142400, 403228800, 403315200, 403401600,
403488000, 403747200, 403833600, 403920000, 404006400, 404092800,
404352000, 404438400, 404524800, 404611200, 404697600, 404956800,
405043200, 405129600, 405216000, 405302400, 405561600, 405648000,
405734400, 405820800, 405907200, 406166400, 406252800, 406339200,
406425600, 406512000, 406771200, 406857600, 406944000, 407030400,
407116800, 407376000, 407462400, 407548800, 407635200, 407721600,
407980800, 408067200, 408153600, 408240000, 408326400, 408585600,
408672000, 408758400, 408844800, 408931200, 409190400, 409276800,
409363200, 409449600, 409536000, 409795200, 409881600, 409968000,
410054400, 410140800, 410400000, 410486400, 410572800, 410659200,
410745600, 411004800, 411091200, 411177600, 411264000, 411350400,
411609600, 411696000, 411782400, 411868800, 411955200, 412214400,
412300800, 412387200, 412473600, 412560000, 412819200, 412905600,
412992000, 413078400, 413164800, 413424000, 413510400, 413596800,
413683200, 413769600, 414028800, 414115200, 414201600, 414288000,
414374400, 414633600, 414720000, 414806400, 414892800, 414979200,
415238400, 415324800, 415411200, 415497600, 415584000, 415843200,
415929600, 416016000, 416102400, 416188800, 416448000, 416534400,
416620800, 416707200, 416793600, 417052800, 417139200, 417225600,
417312000, 417398400, 417657600, 417744000, 417830400, 417916800,
418003200, 418262400, 418348800, 418435200, 418521600, 418608000,
418867200, 418953600, 419040000, 419126400, 419212800, 419472000,
419558400, 419644800, 419731200, 419817600, 420076800, 420163200,
420249600, 420336000, 420422400, 420681600, 420768000, 420854400,
420940800, 421027200, 421286400, 421372800, 421459200, 421545600,
421632000, 421891200, 421977600, 422064000, 422150400, 422236800,
422496000, 422582400, 422668800, 422755200, 422841600, 423100800,
423187200, 423273600, 423360000, 423446400, 423705600, 423792000,
423878400, 423964800, 424051200, 424310400, 424396800, 424483200,
424569600, 424656000, 424915200, 425001600, 425088000, 425174400,
425260800, 425520000, 425606400, 425692800, 425779200, 425865600,
426124800, 426211200, 426297600, 426384000, 426470400, 426729600,
426816000, 426902400, 426988800, 427075200, 427334400, 427420800,
427507200, 427593600, 427680000, 427939200, 428025600, 428112000,
428198400, 428284800, 428544000, 428630400, 428716800, 428803200,
428889600, 429148800, 429235200, 429321600, 429408000, 429494400,
429753600, 429840000, 429926400, 430012800, 430099200, 430358400,
430444800, 430531200, 430617600, 430704000, 430963200, 431049600,
431136000, 431222400, 431308800, 431568000, 431654400, 431740800,
431827200, 431913600, 432172800, 432259200, 432345600, 432432000,
432518400, 432777600, 432864000, 432950400, 433036800, 433123200,
433382400, 433468800, 433555200, 433641600, 433728000, 433987200,
434073600, 434160000, 434246400, 434332800, 434592000, 434678400,
434764800, 434851200, 434937600, 435196800, 435283200, 435369600,
435456000, 435542400, 435801600, 435888000, 435974400, 436060800,
436147200, 436406400, 436492800, 436579200, 436665600, 436752000,
437011200, 437097600, 437184000, 437270400, 437356800, 437616000,
437702400, 437788800, 437875200, 437961600, 438220800, 438307200,
438393600, 438480000, 438566400, 438825600, 438912000, 438998400,
439084800, 439171200, 439430400, 439516800, 439603200, 439689600,
439776000, 440035200, 440121600, 440208000, 440294400, 440380800,
440640000, 440726400, 440812800, 440899200, 440985600, 441244800,
441331200, 441417600, 441504000, 441590400, 441849600, 441936000,
442022400, 442108800, 442195200, 442454400, 442540800, 442627200,
442713600, 442800000, 443059200, 443145600, 443232000, 443318400,
443404800, 443664000, 443750400, 443836800, 443923200, 444009600,
444268800, 444355200, 444441600, 444528000, 444614400, 444873600,
444960000, 445046400, 445132800, 445219200, 445478400, 445564800,
445651200, 445737600, 445824000, 446083200, 446169600, 446256000,
446342400, 446428800, 446688000, 446774400, 446860800, 446947200,
447033600, 447292800, 447379200, 447465600, 447552000, 447638400,
447897600, 447984000, 448070400, 448156800, 448243200, 448502400,
448588800, 448675200, 448761600, 448848000, 449107200, 449193600,
449280000, 449366400, 449452800, 449712000, 449798400, 449884800,
449971200, 450057600, 450316800, 450403200, 450489600, 450576000,
450662400, 450921600, 451008000, 451094400, 451180800, 451267200,
451526400, 451612800, 451699200, 451785600, 451872000, 452131200,
452217600, 452304000, 452390400, 452476800, 452736000, 452822400,
452908800, 452995200, 453081600, 453340800, 453427200, 453513600,
453600000, 453686400, 453945600, 454032000, 454118400, 454204800,
454291200, 454550400, 454636800, 454723200, 454809600, 454896000,
455155200, 455241600, 455328000, 455414400, 455500800, 455760000,
455846400, 455932800, 456019200, 456105600, 456364800, 456451200,
456537600, 456624000, 456710400, 456969600, 457056000, 457142400,
457228800, 457315200, 457574400, 457660800, 457747200, 457833600,
457920000, 458179200, 458265600, 458352000, 458438400, 458524800,
458784000, 458870400, 458956800, 459043200, 459129600, 459388800,
459475200, 459561600, 459648000, 459734400, 459993600, 460080000,
460166400, 460252800, 460339200, 460598400, 460684800, 460771200,
460857600, 460944000, 461203200, 461289600, 461376000, 461462400,
461548800, 461808000, 461894400, 461980800, 462067200, 462153600,
462412800, 462499200, 462585600, 462672000, 462758400, 463017600,
463104000, 463190400, 463276800, 463363200, 463622400, 463708800,
463795200, 463881600, 463968000, 464227200, 464313600, 464400000,
464486400, 464572800, 464832000, 464918400, 465004800, 465091200,
465177600, 465436800, 465523200, 465609600, 465696000, 465782400,
466041600, 466128000, 466214400, 466300800, 466387200, 466646400,
466732800, 466819200, 466905600, 466992000, 467251200, 467337600,
467424000, 467510400, 467596800, 467856000, 467942400, 468028800,
468115200, 468201600, 468460800, 468547200, 468633600, 468720000,
468806400, 469065600, 469152000, 469238400, 469324800, 469411200,
469670400, 469756800, 469843200, 469929600, 470016000, 470275200,
470361600, 470448000, 470534400, 470620800, 470880000, 470966400,
471052800, 471139200, 471225600, 471484800, 471571200, 471657600,
471744000, 471830400, 472089600, 472176000, 472262400, 472348800,
472435200, 472694400, 472780800, 472867200, 472953600, 473040000,
473299200, 473385600, 473472000, 473558400, 473644800, 473904000,
473990400, 474076800, 474163200, 474249600, 474508800, 474595200,
474681600, 474768000, 474854400, 475113600, 475200000, 475286400,
475372800, 475459200, 475718400, 475804800, 475891200, 475977600,
476064000, 476323200, 476409600, 476496000, 476582400, 476668800,
476928000, 477014400, 477100800, 477187200, 477273600, 477532800,
477619200, 477705600, 477792000, 477878400, 478137600, 478224000,
478310400, 478396800, 478483200, 478742400, 478828800, 478915200,
479001600, 479088000, 479347200, 479433600, 479520000, 479606400,
479692800, 479952000, 480038400, 480124800, 480211200, 480297600,
480556800, 480643200, 480729600, 480816000, 480902400, 481161600,
481248000, 481334400, 481420800, 481507200, 481766400, 481852800,
481939200, 482025600, 482112000, 482371200, 482457600, 482544000,
482630400, 482716800, 482976000, 483062400, 483148800, 483235200,
483321600, 483580800, 483667200, 483753600, 483840000, 483926400,
484185600, 484272000, 484358400, 484444800, 484531200, 484790400,
484876800, 484963200, 485049600, 485136000, 485395200, 485481600,
485568000, 485654400, 485740800, 4.86e+08, 486086400, 486172800,
486259200, 486345600, 486604800, 486691200, 486777600, 486864000,
486950400, 487209600, 487296000, 487382400, 487468800, 487555200,
487814400, 487900800, 487987200, 488073600, 488160000, 488419200,
488505600, 488592000, 488678400, 488764800, 489024000, 489110400,
489196800, 489283200, 489369600, 489628800, 489715200, 489801600,
489888000, 489974400, 490233600, 490320000, 490406400, 490492800,
490579200, 490838400, 490924800, 491011200, 491097600, 491184000,
491443200, 491529600, 491616000, 491702400, 491788800, 492048000,
492134400, 492220800, 492307200, 492393600, 492652800, 492739200,
492825600, 492912000, 492998400, 493257600, 493344000, 493430400,
493516800, 493603200, 493862400, 493948800, 494035200, 494121600,
494208000, 494467200, 494553600, 494640000, 494726400, 494812800,
495072000, 495158400, 495244800, 495331200, 495417600, 495676800,
495763200, 495849600, 495936000, 496022400, 496281600, 496368000,
496454400, 496540800, 496627200, 496886400, 496972800, 497059200,
497145600, 497232000, 497491200, 497577600, 497664000, 497750400,
497836800, 498096000, 498182400, 498268800, 498355200, 498441600,
498700800, 498787200, 498873600, 498960000, 499046400, 499305600,
499392000, 499478400, 499564800, 499651200, 499910400, 499996800,
500083200, 500169600, 500256000, 500515200, 500601600, 500688000,
500774400, 500860800, 501120000, 501206400, 501292800, 501379200,
501465600, 501724800, 501811200, 501897600, 501984000, 502070400,
502329600, 502416000, 502502400, 502588800, 502675200, 502934400,
503020800, 503107200, 503193600, 503280000, 503539200, 503625600,
503712000, 503798400, 503884800, 504144000, 504230400, 504316800,
504403200, 504489600, 504748800, 504835200, 504921600, 505008000,
505094400, 505353600, 505440000, 505526400, 505612800, 505699200,
505958400, 506044800, 506131200, 506217600, 506304000, 506563200,
506649600, 506736000, 506822400, 506908800, 507168000, 507254400,
507340800, 507427200, 507513600, 507772800, 507859200, 507945600,
508032000, 508118400, 508377600, 508464000, 508550400, 508636800,
508723200, 508982400, 509068800, 509155200, 509241600, 509328000,
509587200, 509673600, 509760000, 509846400, 509932800, 510192000,
510278400, 510364800, 510451200, 510537600, 510796800, 510883200,
510969600, 511056000, 511142400, 511401600, 511488000, 511574400,
511660800, 511747200, 512006400, 512092800, 512179200, 512265600,
512352000, 512611200, 512697600, 512784000, 512870400, 512956800,
513216000, 513302400, 513388800, 513475200, 513561600, 513820800,
513907200, 513993600, 514080000, 514166400, 514425600, 514512000,
514598400, 514684800, 514771200, 515030400, 515116800, 515203200,
515289600, 515376000, 515635200, 515721600, 515808000, 515894400,
515980800, 516240000, 516326400, 516412800, 516499200, 516585600,
516844800, 516931200, 517017600, 517104000, 517190400, 517449600,
517536000, 517622400, 517708800, 517795200, 518054400, 518140800,
518227200, 518313600, 518400000, 518659200, 518745600, 518832000,
518918400, 519004800, 519264000, 519350400, 519436800, 519523200,
519609600, 519868800, 519955200, 520041600, 520128000, 520214400,
520473600, 520560000, 520646400, 520732800, 520819200, 521078400,
521164800, 521251200, 521337600, 521424000, 521683200, 521769600,
521856000, 521942400, 522028800, 522288000, 522374400, 522460800,
522547200, 522633600, 522892800, 522979200, 523065600, 523152000,
523238400, 523497600, 523584000, 523670400, 523756800, 523843200,
524102400, 524188800, 524275200, 524361600, 524448000, 524707200,
524793600, 524880000, 524966400, 525052800, 525312000, 525398400,
525484800, 525571200, 525657600, 525916800, 526003200, 526089600,
526176000, 526262400, 526521600, 526608000, 526694400, 526780800,
526867200, 527126400, 527212800, 527299200, 527385600, 527472000,
527731200, 527817600, 527904000, 527990400, 528076800, 528336000,
528422400, 528508800, 528595200, 528681600, 528940800, 529027200,
529113600, 529200000, 529286400, 529545600, 529632000, 529718400,
529804800, 529891200, 530150400, 530236800, 530323200, 530409600,
530496000, 530755200, 530841600, 530928000, 531014400, 531100800,
531360000, 531446400, 531532800, 531619200, 531705600, 531964800,
532051200, 532137600, 532224000, 532310400, 532569600, 532656000,
532742400, 532828800, 532915200, 533174400, 533260800, 533347200,
533433600, 533520000, 533779200, 533865600, 533952000, 534038400,
534124800, 534384000, 534470400, 534556800, 534643200, 534729600,
534988800, 535075200, 535161600, 535248000, 535334400, 535593600,
535680000, 535766400, 535852800, 535939200, 536198400, 536284800,
536371200, 536457600, 536544000, 536803200, 536889600, 536976000,
537062400, 537148800, 537408000, 537494400, 537580800, 537667200,
537753600, 538012800, 538099200, 538185600, 538272000, 538358400,
538617600, 538704000, 538790400, 538876800, 538963200, 539222400,
539308800, 539395200, 539481600, 539568000, 539827200, 539913600,
5.4e+08, 540086400, 540172800, 540432000, 540518400, 540604800,
540691200, 540777600, 541036800, 541123200, 541209600, 541296000,
541382400, 541641600, 541728000, 541814400, 541900800, 541987200,
542246400, 542332800, 542419200, 542505600, 542592000, 542851200,
542937600, 543024000, 543110400, 543196800, 543456000, 543542400,
543628800, 543715200, 543801600, 544060800, 544147200, 544233600,
544320000, 544406400, 544665600, 544752000, 544838400, 544924800,
545011200, 545270400, 545356800, 545443200, 545529600, 545616000,
545875200, 545961600, 546048000, 546134400, 546220800, 546480000,
546566400, 546652800, 546739200, 546825600, 547084800, 547171200,
547257600, 547344000, 547430400, 547689600, 547776000, 547862400,
547948800, 548035200, 548294400, 548380800, 548467200, 548553600,
548640000, 548899200, 548985600, 549072000, 549158400, 549244800,
549504000, 549590400, 549676800, 549763200, 549849600, 550108800,
550195200, 550281600, 550368000, 550454400, 550713600, 550800000,
550886400, 550972800, 551059200, 551318400, 551404800, 551491200,
551577600, 551664000, 551923200, 552009600, 552096000, 552182400,
552268800, 552528000, 552614400, 552700800, 552787200, 552873600,
553132800, 553219200, 553305600, 553392000, 553478400, 553737600,
553824000, 553910400, 553996800, 554083200, 554342400, 554428800,
554515200, 554601600, 554688000, 554947200, 555033600, 555120000,
555206400, 555292800, 555552000, 555638400, 555724800, 555811200,
555897600, 556156800, 556243200, 556329600, 556416000, 556502400,
556761600, 556848000, 556934400, 557020800, 557107200, 557366400,
557452800, 557539200, 557625600, 557712000, 557971200, 558057600,
558144000, 558230400, 558316800, 558576000, 558662400, 558748800,
558835200, 558921600, 559180800, 559267200, 559353600, 559440000,
559526400, 559785600, 559872000, 559958400, 560044800, 560131200,
560390400, 560476800, 560563200, 560649600, 560736000, 560995200,
561081600, 561168000, 561254400, 561340800, 561600000, 561686400,
561772800, 561859200, 561945600, 562204800, 562291200, 562377600,
562464000, 562550400, 562809600, 562896000, 562982400, 563068800,
563155200, 563414400, 563500800, 563587200, 563673600, 563760000,
564019200, 564105600, 564192000, 564278400, 564364800, 564624000,
564710400, 564796800, 564883200, 564969600, 565228800, 565315200,
565401600, 565488000, 565574400, 565833600, 565920000, 566006400,
566092800, 566179200, 566438400, 566524800, 566611200, 566697600,
566784000, 567043200, 567129600, 567216000, 567302400, 567388800,
567648000, 567734400, 567820800, 567907200, 567993600, 568252800,
568339200, 568425600, 568512000, 568598400, 568857600, 568944000,
569030400, 569116800, 569203200, 569462400, 569548800, 569635200,
569721600, 569808000, 570067200, 570153600, 570240000, 570326400,
570412800, 570672000, 570758400, 570844800, 570931200, 571017600,
571276800, 571363200, 571449600, 571536000, 571622400, 571881600,
571968000, 572054400, 572140800, 572227200, 572486400, 572572800,
572659200, 572745600, 572832000, 573091200, 573177600, 573264000,
573350400, 573436800, 573696000, 573782400, 573868800, 573955200,
574041600, 574300800, 574387200, 574473600, 574560000, 574646400,
574905600, 574992000, 575078400, 575164800, 575251200, 575510400,
575596800, 575683200, 575769600, 575856000, 576115200, 576201600,
576288000, 576374400, 576460800, 576720000, 576806400, 576892800,
576979200, 577065600, 577324800, 577411200, 577497600, 577584000,
577670400, 577929600, 578016000, 578102400, 578188800, 578275200,
578534400, 578620800, 578707200, 578793600, 578880000, 579139200,
579225600, 579312000, 579398400, 579484800, 579744000, 579830400,
579916800, 580003200, 580089600, 580348800, 580435200, 580521600,
580608000, 580694400, 580953600, 581040000, 581126400, 581212800,
581299200, 581558400, 581644800, 581731200, 581817600, 581904000,
582163200, 582249600, 582336000, 582422400, 582508800, 582768000,
582854400, 582940800, 583027200, 583113600, 583372800, 583459200,
583545600, 583632000, 583718400, 583977600, 584064000, 584150400,
584236800, 584323200, 584582400, 584668800, 584755200, 584841600,
584928000, 585187200, 585273600, 585360000, 585446400, 585532800,
585792000, 585878400, 585964800, 586051200, 586137600, 586396800,
586483200, 586569600, 586656000, 586742400, 587001600, 587088000,
587174400, 587260800, 587347200, 587606400, 587692800, 587779200,
587865600, 587952000, 588211200, 588297600, 588384000, 588470400,
588556800, 588816000, 588902400, 588988800, 589075200, 589161600,
589420800, 589507200, 589593600, 589680000, 589766400, 590025600,
590112000, 590198400, 590284800, 590371200, 590630400, 590716800,
590803200, 590889600, 590976000, 591235200, 591321600, 591408000,
591494400, 591580800, 591840000, 591926400, 592012800, 592099200,
592185600, 592444800, 592531200, 592617600, 592704000, 592790400,
593049600, 593136000, 593222400, 593308800, 593395200, 593654400,
593740800, 593827200, 593913600, 5.94e+08, 594259200, 594345600,
594432000, 594518400, 594604800, 594864000, 594950400, 595036800,
595123200, 595209600, 595468800, 595555200, 595641600, 595728000,
595814400, 596073600, 596160000, 596246400, 596332800, 596419200,
596678400, 596764800, 596851200, 596937600, 597024000, 597283200,
597369600, 597456000, 597542400, 597628800, 597888000, 597974400,
598060800, 598147200, 598233600, 598492800, 598579200, 598665600,
598752000, 598838400, 599097600, 599184000, 599270400, 599356800,
599443200, 599702400, 599788800, 599875200, 599961600, 600048000,
600307200, 600393600, 600480000, 600566400, 600652800, 600912000,
600998400, 601084800, 601171200, 601257600, 601516800, 601603200,
601689600, 601776000, 601862400, 602121600, 602208000, 602294400,
602380800, 602467200, 602726400, 602812800, 602899200, 602985600,
603072000, 603331200, 603417600, 603504000, 603590400, 603676800,
603936000, 604022400, 604108800, 604195200, 604281600, 604540800,
604627200, 604713600, 604800000, 604886400, 605145600, 605232000,
605318400, 605404800, 605491200, 605750400, 605836800, 605923200,
606009600, 606096000, 606355200, 606441600, 606528000, 606614400,
606700800, 606960000, 607046400, 607132800, 607219200, 607305600,
607564800, 607651200, 607737600, 607824000, 607910400, 608169600,
608256000, 608342400, 608428800, 608515200, 608774400, 608860800,
608947200, 609033600, 609120000, 609379200, 609465600, 609552000,
609638400, 609724800, 609984000, 610070400, 610156800, 610243200,
610329600, 610588800, 610675200, 610761600, 610848000, 610934400,
611193600, 611280000, 611366400, 611452800, 611539200, 611798400,
611884800, 611971200, 612057600, 612144000, 612403200, 612489600,
612576000, 612662400, 612748800, 613008000, 613094400, 613180800,
613267200, 613353600, 613612800, 613699200, 613785600, 613872000,
613958400, 614217600, 614304000, 614390400, 614476800, 614563200,
614822400, 614908800, 614995200, 615081600, 615168000, 615427200,
615513600, 615600000, 615686400, 615772800, 616032000, 616118400,
616204800, 616291200, 616377600, 616636800, 616723200, 616809600,
616896000, 616982400, 617241600, 617328000, 617414400, 617500800,
617587200, 617846400, 617932800, 618019200, 618105600, 618192000,
618451200, 618537600, 618624000, 618710400, 618796800, 619056000,
619142400, 619228800, 619315200, 619401600, 619660800, 619747200,
619833600, 619920000, 620006400, 620265600, 620352000, 620438400,
620524800, 620611200, 620870400, 620956800, 621043200, 621129600,
621216000, 621475200, 621561600, 621648000, 621734400, 621820800,
622080000, 622166400, 622252800, 622339200, 622425600, 622684800,
622771200, 622857600, 622944000, 623030400, 623289600, 623376000,
623462400, 623548800, 623635200, 623894400, 623980800, 624067200,
624153600, 624240000, 624499200, 624585600, 624672000, 624758400,
624844800, 625104000, 625190400, 625276800, 625363200, 625449600,
625708800, 625795200, 625881600, 625968000, 626054400, 626313600,
626400000, 626486400, 626572800, 626659200, 626918400, 627004800,
627091200, 627177600, 627264000, 627523200, 627609600, 627696000,
627782400, 627868800, 628128000, 628214400, 628300800, 628387200,
628473600, 628732800, 628819200, 628905600, 628992000, 629078400,
629337600, 629424000, 629510400, 629596800, 629683200, 629942400,
630028800, 630115200, 630201600, 630288000, 630547200, 630633600,
630720000, 630806400, 630892800, 631152000, 631238400, 631324800,
631411200, 631497600, 631756800, 631843200, 631929600, 632016000,
632102400, 632361600, 632448000, 632534400, 632620800, 632707200,
632966400, 633052800, 633139200, 633225600, 633312000, 633571200,
633657600, 633744000, 633830400, 633916800, 634176000, 634262400,
634348800, 634435200, 634521600, 634780800, 634867200, 634953600,
635040000, 635126400, 635385600, 635472000, 635558400, 635644800,
635731200, 635990400, 636076800, 636163200, 636249600, 636336000,
636595200, 636681600, 636768000, 636854400, 636940800, 637200000,
637286400, 637372800, 637459200, 637545600, 637804800, 637891200,
637977600, 638064000, 638150400, 638409600, 638496000, 638582400,
638668800, 638755200, 639014400, 639100800, 639187200, 639273600,
639360000, 639619200, 639705600, 639792000, 639878400, 639964800,
640224000, 640310400, 640396800, 640483200, 640569600, 640828800,
640915200, 641001600, 641088000, 641174400, 641433600, 641520000,
641606400, 641692800, 641779200, 642038400, 642124800, 642211200,
642297600, 642384000, 642643200, 642729600, 642816000, 642902400,
642988800, 643248000, 643334400, 643420800, 643507200, 643593600,
643852800, 643939200, 644025600, 644112000, 644198400, 644457600,
644544000, 644630400, 644716800, 644803200, 645062400, 645148800,
645235200, 645321600, 645408000, 645667200, 645753600, 645840000,
645926400, 646012800, 646272000, 646358400, 646444800, 646531200,
646617600, 646876800, 646963200, 647049600, 647136000, 647222400,
647481600, 647568000, 647654400, 647740800, 647827200, 648086400,
648172800, 648259200, 648345600, 648432000, 648691200, 648777600,
648864000, 648950400, 649036800, 649296000, 649382400, 649468800,
649555200, 649641600, 649900800, 649987200, 650073600, 650160000,
650246400, 650505600, 650592000, 650678400, 650764800, 650851200,
651110400, 651196800, 651283200, 651369600, 651456000, 651715200,
651801600, 651888000, 651974400, 652060800, 652320000, 652406400,
652492800, 652579200, 652665600, 652924800, 653011200, 653097600,
653184000, 653270400, 653529600, 653616000, 653702400, 653788800,
653875200, 654134400, 654220800, 654307200, 654393600, 654480000,
654739200, 654825600, 654912000, 654998400, 655084800, 655344000,
655430400, 655516800, 655603200, 655689600, 655948800, 656035200,
656121600, 656208000, 656294400, 656553600, 656640000, 656726400,
656812800, 656899200, 657158400, 657244800, 657331200, 657417600,
657504000, 657763200, 657849600, 657936000, 658022400, 658108800,
658368000, 658454400, 658540800, 658627200, 658713600, 658972800,
659059200, 659145600, 659232000, 659318400, 659577600, 659664000,
659750400, 659836800, 659923200, 660182400, 660268800, 660355200,
660441600, 660528000, 660787200, 660873600, 660960000, 661046400,
661132800, 661392000, 661478400, 661564800, 661651200, 661737600,
661996800, 662083200, 662169600, 662256000, 662342400, 662601600,
662688000, 662774400, 662860800, 662947200, 663206400, 663292800,
663379200, 663465600, 663552000, 663811200, 663897600, 663984000,
664070400, 664156800, 664416000, 664502400, 664588800, 664675200,
664761600, 665020800, 665107200, 665193600, 665280000, 665366400,
665625600, 665712000, 665798400, 665884800, 665971200, 666230400,
666316800, 666403200, 666489600, 666576000, 666835200, 666921600,
667008000, 667094400, 667180800, 667440000, 667526400, 667612800,
667699200, 667785600, 668044800, 668131200, 668217600, 668304000,
668390400, 668649600, 668736000, 668822400, 668908800, 668995200,
669254400, 669340800, 669427200, 669513600, 669600000, 669859200,
669945600, 670032000, 670118400, 670204800, 670464000, 670550400,
670636800, 670723200, 670809600, 671068800, 671155200, 671241600,
671328000, 671414400, 671673600, 671760000, 671846400, 671932800,
672019200, 672278400, 672364800, 672451200, 672537600, 672624000,
672883200, 672969600, 673056000, 673142400, 673228800, 673488000,
673574400, 673660800, 673747200, 673833600, 674092800, 674179200,
674265600, 674352000, 674438400, 674697600, 674784000, 674870400,
674956800, 675043200, 675302400, 675388800, 675475200, 675561600,
675648000, 675907200, 675993600, 676080000, 676166400, 676252800,
676512000, 676598400, 676684800, 676771200, 676857600, 677116800,
677203200, 677289600, 677376000, 677462400, 677721600, 677808000,
677894400, 677980800, 678067200, 678326400, 678412800, 678499200,
678585600, 678672000, 678931200, 679017600, 679104000, 679190400,
679276800, 679536000, 679622400, 679708800, 679795200, 679881600,
680140800, 680227200, 680313600, 680400000, 680486400, 680745600,
680832000, 680918400, 681004800, 681091200, 681350400, 681436800,
681523200, 681609600, 681696000, 681955200, 682041600, 682128000,
682214400, 682300800, 682560000, 682646400, 682732800, 682819200,
682905600, 683164800, 683251200, 683337600, 683424000, 683510400,
683769600, 683856000, 683942400, 684028800, 684115200, 684374400,
684460800, 684547200, 684633600, 684720000, 684979200, 685065600,
685152000, 685238400, 685324800, 685584000, 685670400, 685756800,
685843200, 685929600, 686188800, 686275200, 686361600, 686448000,
686534400, 686793600, 686880000, 686966400, 687052800, 687139200,
687398400, 687484800, 687571200, 687657600, 687744000, 688003200,
688089600, 688176000, 688262400, 688348800, 688608000, 688694400,
688780800, 688867200, 688953600, 689212800, 689299200, 689385600,
689472000, 689558400, 689817600, 689904000, 689990400, 690076800,
690163200, 690422400, 690508800, 690595200, 690681600, 690768000,
691027200, 691113600, 691200000, 691286400, 691372800, 691632000,
691718400, 691804800, 691891200, 691977600, 692236800, 692323200,
692409600, 692496000, 692582400, 692841600, 692928000, 693014400,
693100800, 693187200, 693446400, 693532800, 693619200, 693705600,
693792000, 694051200, 694137600, 694224000, 694310400, 694396800,
694656000, 694742400, 694828800, 694915200, 695001600, 695260800,
695347200, 695433600, 695520000, 695606400, 695865600, 695952000,
696038400, 696124800, 696211200, 696470400, 696556800, 696643200,
696729600, 696816000, 697075200, 697161600, 697248000, 697334400,
697420800, 697680000, 697766400, 697852800, 697939200, 698025600,
698284800, 698371200, 698457600, 698544000, 698630400, 698889600,
698976000, 699062400, 699148800, 699235200, 699494400, 699580800,
699667200, 699753600, 699840000, 700099200, 700185600, 700272000,
700358400, 700444800, 700704000, 700790400, 700876800, 700963200,
701049600, 701308800, 701395200, 701481600, 701568000, 701654400,
701913600, 7.02e+08, 702086400, 702172800, 702259200, 702518400,
702604800, 702691200, 702777600, 702864000, 703123200, 703209600,
703296000, 703382400, 703468800, 703728000, 703814400, 703900800,
703987200, 704073600, 704332800, 704419200, 704505600, 704592000,
704678400, 704937600, 705024000, 705110400, 705196800, 705283200,
705542400, 705628800, 705715200, 705801600, 705888000, 706147200,
706233600, 706320000, 706406400, 706492800, 706752000, 706838400,
706924800, 707011200, 707097600, 707356800, 707443200, 707529600,
707616000, 707702400, 707961600, 708048000, 708134400, 708220800,
708307200, 708566400, 708652800, 708739200, 708825600, 708912000,
709171200, 709257600, 709344000, 709430400, 709516800, 709776000,
709862400, 709948800, 710035200, 710121600, 710380800, 710467200,
710553600, 710640000, 710726400, 710985600, 711072000, 711158400,
711244800, 711331200, 711590400, 711676800, 711763200, 711849600,
711936000, 712195200, 712281600, 712368000, 712454400, 712540800,
712800000, 712886400, 712972800, 713059200, 713145600, 713404800,
713491200, 713577600, 713664000, 713750400, 714009600, 714096000,
714182400, 714268800, 714355200, 714614400, 714700800, 714787200,
714873600, 714960000, 715219200, 715305600, 715392000, 715478400,
715564800, 715824000, 715910400, 715996800, 716083200, 716169600,
716428800, 716515200, 716601600, 716688000, 716774400, 717033600,
717120000, 717206400, 717292800, 717379200, 717638400, 717724800,
717811200, 717897600, 717984000, 718243200, 718329600, 718416000,
718502400, 718588800, 718848000, 718934400, 719020800, 719107200,
719193600, 719452800, 719539200, 719625600, 719712000, 719798400,
720057600, 720144000, 720230400, 720316800, 720403200, 720662400,
720748800, 720835200, 720921600, 721008000, 721267200, 721353600,
721440000, 721526400, 721612800, 721872000, 721958400, 722044800,
722131200, 722217600, 722476800, 722563200, 722649600, 722736000,
722822400, 723081600, 723168000, 723254400, 723340800, 723427200,
723686400, 723772800, 723859200, 723945600, 724032000, 724291200,
724377600, 724464000, 724550400, 724636800, 724896000, 724982400,
725068800, 725155200, 725241600, 725500800, 725587200, 725673600,
725760000, 725846400, 726105600, 726192000, 726278400, 726364800,
726451200, 726710400, 726796800, 726883200, 726969600, 727056000,
727315200, 727401600, 727488000, 727574400, 727660800, 727920000,
728006400, 728092800, 728179200, 728265600, 728524800, 728611200,
728697600, 728784000, 728870400, 729129600, 729216000, 729302400,
729388800, 729475200, 729734400, 729820800, 729907200, 729993600,
730080000, 730339200, 730425600, 730512000, 730598400, 730684800,
730944000, 731030400, 731116800, 731203200, 731289600, 731548800,
731635200, 731721600, 731808000, 731894400, 732153600, 732240000,
732326400, 732412800, 732499200, 732758400, 732844800, 732931200,
733017600, 733104000, 733363200, 733449600, 733536000, 733622400,
733708800, 733968000, 734054400, 734140800, 734227200, 734313600,
734572800, 734659200, 734745600, 734832000, 734918400, 735177600,
735264000, 735350400, 735436800, 735523200, 735782400, 735868800,
735955200, 736041600, 736128000, 736387200, 736473600, 736560000,
736646400, 736732800, 736992000, 737078400, 737164800, 737251200,
737337600, 737596800, 737683200, 737769600, 737856000, 737942400,
738201600, 738288000, 738374400, 738460800, 738547200, 738806400,
738892800, 738979200, 739065600, 739152000, 739411200, 739497600,
739584000, 739670400, 739756800, 740016000, 740102400, 740188800,
740275200, 740361600, 740620800, 740707200, 740793600, 740880000,
740966400, 741225600, 741312000, 741398400, 741484800, 741571200,
741830400, 741916800, 742003200, 742089600, 742176000, 742435200,
742521600, 742608000, 742694400, 742780800, 743040000, 743126400,
743212800, 743299200, 743385600, 743644800, 743731200, 743817600,
743904000, 743990400, 744249600, 744336000, 744422400, 744508800,
744595200, 744854400, 744940800, 745027200, 745113600, 745200000,
745459200, 745545600, 745632000, 745718400, 745804800, 746064000,
746150400, 746236800, 746323200, 746409600, 746668800, 746755200,
746841600, 746928000, 747014400, 747273600, 747360000, 747446400,
747532800, 747619200, 747878400, 747964800, 748051200, 748137600,
748224000, 748483200, 748569600, 748656000, 748742400, 748828800,
749088000, 749174400, 749260800, 749347200, 749433600, 749692800,
749779200, 749865600, 749952000, 750038400, 750297600, 750384000,
750470400, 750556800, 750643200, 750902400, 750988800, 751075200,
751161600, 751248000, 751507200, 751593600, 751680000, 751766400,
751852800, 752112000, 752198400, 752284800, 752371200, 752457600,
752716800, 752803200, 752889600, 752976000, 753062400, 753321600,
753408000, 753494400, 753580800, 753667200, 753926400, 754012800,
754099200, 754185600, 754272000, 754531200, 754617600, 754704000,
754790400, 754876800, 755136000, 755222400, 755308800, 755395200,
755481600, 755740800, 755827200, 755913600, 7.56e+08, 756086400,
756345600, 756432000, 756518400, 756604800, 756691200, 756950400,
757036800, 757123200, 757209600, 757296000, 757555200, 757641600,
757728000, 757814400, 757900800, 758160000, 758246400, 758332800,
758419200, 758505600, 758764800, 758851200, 758937600, 759024000,
759110400, 759369600, 759456000, 759542400, 759628800, 759715200,
759974400, 760060800, 760147200, 760233600, 760320000, 760579200,
760665600, 760752000, 760838400, 760924800, 761184000, 761270400,
761356800, 761443200, 761529600, 761788800, 761875200, 761961600,
762048000, 762134400, 762393600, 762480000, 762566400, 762652800,
762739200, 762998400, 763084800, 763171200, 763257600, 763344000,
763603200, 763689600, 763776000, 763862400, 763948800, 764208000,
764294400, 764380800, 764467200, 764553600, 764812800, 764899200,
764985600, 765072000, 765158400, 765417600, 765504000, 765590400,
765676800, 765763200, 766022400, 766108800, 766195200, 766281600,
766368000, 766627200, 766713600, 766800000, 766886400, 766972800,
767232000, 767318400, 767404800, 767491200, 767577600, 767836800,
767923200, 768009600, 768096000, 768182400, 768441600, 768528000,
768614400, 768700800, 768787200, 769046400, 769132800, 769219200,
769305600, 769392000, 769651200, 769737600, 769824000, 769910400,
769996800, 770256000, 770342400, 770428800, 770515200, 770601600,
770860800, 770947200, 771033600, 771120000, 771206400, 771465600,
771552000, 771638400, 771724800, 771811200, 772070400, 772156800,
772243200, 772329600, 772416000, 772675200, 772761600, 772848000,
772934400, 773020800, 773280000, 773366400, 773452800, 773539200,
773625600, 773884800, 773971200, 774057600, 774144000, 774230400,
774489600, 774576000, 774662400, 774748800, 774835200, 775094400,
775180800, 775267200, 775353600, 775440000, 775699200, 775785600,
775872000, 775958400, 776044800, 776304000, 776390400, 776476800,
776563200, 776649600, 776908800, 776995200, 777081600, 777168000,
777254400, 777513600, 777600000, 777686400, 777772800, 777859200,
778118400, 778204800, 778291200, 778377600, 778464000, 778723200,
778809600, 778896000, 778982400, 779068800, 779328000, 779414400,
779500800, 779587200, 779673600, 779932800, 780019200, 780105600,
780192000, 780278400, 780537600, 780624000, 780710400, 780796800,
780883200, 781142400, 781228800, 781315200, 781401600, 781488000,
781747200, 781833600, 781920000, 782006400, 782092800, 782352000,
782438400, 782524800, 782611200, 782697600, 782956800, 783043200,
783129600, 783216000, 783302400, 783561600, 783648000, 783734400,
783820800, 783907200, 784166400, 784252800, 784339200, 784425600,
784512000, 784771200, 784857600, 784944000, 785030400, 785116800,
785376000, 785462400, 785548800, 785635200, 785721600, 785980800,
786067200, 786153600, 786240000, 786326400, 786585600, 786672000,
786758400, 786844800, 786931200, 787190400, 787276800, 787363200,
787449600, 787536000, 787795200, 787881600, 787968000, 788054400,
788140800, 788400000, 788486400, 788572800, 788659200, 788745600,
789004800, 789091200, 789177600, 789264000, 789350400, 789609600,
789696000, 789782400, 789868800, 789955200, 790214400, 790300800,
790387200, 790473600, 790560000, 790819200, 790905600, 790992000,
791078400, 791164800, 791424000, 791510400, 791596800, 791683200,
791769600, 792028800, 792115200, 792201600, 792288000, 792374400,
792633600, 792720000, 792806400, 792892800, 792979200, 793238400,
793324800, 793411200, 793497600, 793584000, 793843200, 793929600,
794016000, 794102400, 794188800, 794448000, 794534400, 794620800,
794707200, 794793600, 795052800, 795139200, 795225600, 795312000,
795398400, 795657600, 795744000, 795830400, 795916800, 796003200,
796262400, 796348800, 796435200, 796521600, 796608000, 796867200,
796953600, 797040000, 797126400, 797212800, 797472000, 797558400,
797644800, 797731200, 797817600, 798076800, 798163200, 798249600,
798336000, 798422400, 798681600, 798768000, 798854400, 798940800,
799027200, 799286400, 799372800, 799459200, 799545600, 799632000,
799891200, 799977600, 800064000, 800150400, 800236800, 800496000,
800582400, 800668800, 800755200, 800841600, 801100800, 801187200,
801273600, 801360000, 801446400, 801705600, 801792000, 801878400,
801964800, 802051200, 802310400, 802396800, 802483200, 802569600,
802656000, 802915200, 803001600, 803088000, 803174400, 803260800,
803520000, 803606400, 803692800, 803779200, 803865600, 804124800,
804211200, 804297600, 804384000, 804470400, 804729600, 804816000,
804902400, 804988800, 805075200, 805334400, 805420800, 805507200,
805593600, 805680000, 805939200, 806025600, 806112000, 806198400,
806284800, 806544000, 806630400, 806716800, 806803200, 806889600,
807148800, 807235200, 807321600, 807408000, 807494400, 807753600,
807840000, 807926400, 808012800, 808099200, 808358400, 808444800,
808531200, 808617600, 808704000, 808963200, 809049600, 809136000,
809222400, 809308800, 809568000, 809654400, 809740800, 809827200,
809913600, 810172800, 810259200, 810345600, 810432000, 810518400,
810777600, 810864000, 810950400, 811036800, 811123200, 811382400,
811468800, 811555200, 811641600, 811728000, 811987200, 812073600,
812160000, 812246400, 812332800, 812592000, 812678400, 812764800,
812851200, 812937600, 813196800, 813283200, 813369600, 813456000,
813542400, 813801600, 813888000, 813974400, 814060800, 814147200,
814406400, 814492800, 814579200, 814665600, 814752000, 815011200,
815097600, 815184000, 815270400, 815356800, 815616000, 815702400,
815788800, 815875200, 815961600, 816220800, 816307200, 816393600,
816480000, 816566400, 816825600, 816912000, 816998400, 817084800,
817171200, 817430400, 817516800, 817603200, 817689600, 817776000,
818035200, 818121600, 818208000, 818294400, 818380800, 818640000,
818726400, 818812800, 818899200, 818985600, 819244800, 819331200,
819417600, 819504000, 819590400, 819849600, 819936000, 820022400,
820108800, 820195200, 820454400, 820540800, 820627200, 820713600,
820800000, 821059200, 821145600, 821232000, 821318400, 821404800,
821664000, 821750400, 821836800, 821923200, 822009600, 822268800,
822355200, 822441600, 822528000, 822614400, 822873600, 822960000,
823046400, 823132800, 823219200, 823478400, 823564800, 823651200,
823737600, 823824000, 824083200, 824169600, 824256000, 824342400,
824428800, 824688000, 824774400, 824860800, 824947200, 825033600,
825292800, 825379200, 825465600, 825552000, 825638400, 825897600,
825984000, 826070400, 826156800, 826243200, 826502400, 826588800,
826675200, 826761600, 826848000, 827107200, 827193600, 827280000,
827366400, 827452800, 827712000, 827798400, 827884800, 827971200,
828057600, 828316800, 828403200, 828489600, 828576000, 828662400,
828921600, 829008000, 829094400, 829180800, 829267200, 829526400,
829612800, 829699200, 829785600, 829872000, 830131200, 830217600,
830304000, 830390400, 830476800, 830736000, 830822400, 830908800,
830995200, 831081600, 831340800, 831427200, 831513600, 831600000,
831686400, 831945600, 832032000, 832118400, 832204800, 832291200,
832550400, 832636800, 832723200, 832809600, 832896000, 833155200,
833241600, 833328000, 833414400, 833500800, 833760000, 833846400,
833932800, 834019200, 834105600, 834364800, 834451200, 834537600,
834624000, 834710400, 834969600, 835056000, 835142400, 835228800,
835315200, 835574400, 835660800, 835747200, 835833600, 835920000,
836179200, 836265600, 836352000, 836438400, 836524800, 836784000,
836870400, 836956800, 837043200, 837129600, 837388800, 837475200,
837561600, 837648000, 837734400, 837993600, 838080000), class = c("POSIXt",
"POSIXct")))
|
c464e782c6c83ab4e3b5272cccecc8a401f59f88
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/penaltyLearning/examples/IntervalRegressionCV.Rd.R
|
2f18724d9fdbec2f22559ff91c7c127fca017b58
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
IntervalRegressionCV.Rd.R
|
library(penaltyLearning)
### Name: IntervalRegressionCV
### Title: IntervalRegressionCV
### Aliases: IntervalRegressionCV
### ** Examples
if(interactive()){
library(penaltyLearning)
data("neuroblastomaProcessed", package="penaltyLearning", envir=environment())
if(require(future)){
plan(multiprocess)
}
set.seed(1)
i.train <- 1:100
fit <- with(neuroblastomaProcessed, IntervalRegressionCV(
feature.mat[i.train,], target.mat[i.train,],
verbose=0))
## When only features and target matrices are specified for
## training, the squared hinge loss is used as the metric to
## minimize on the validation set.
plot(fit)
## Create an incorrect labels data.table (first key is same as
## rownames of feature.mat and target.mat).
errors.per.model <- data.table(neuroblastomaProcessed$errors)
errors.per.model[, pid.chr := paste0(profile.id, ".", chromosome)]
setkey(errors.per.model, pid.chr)
set.seed(1)
fit <- with(neuroblastomaProcessed, IntervalRegressionCV(
feature.mat[i.train,], target.mat[i.train,],
## The incorrect.labels.db argument is optional, but can be used if
## you want to use AUC as the CV model selection criterion.
incorrect.labels.db=errors.per.model))
plot(fit)
}
|
5e33cef09aad5c39bec848b13f6e58b22e88f96a
|
97e9c55dc2cf1a99b42e5f87aaab5f4378b272cf
|
/BE/LP1/Data Analytics/Assignment 4. Decision Tree- Bike Sharing/R/decision tree.r
|
45e0966c6641fdda8e0d1b2c28a6b88f28100471
|
[] |
no_license
|
riamittal8/Engineering-Assignments
|
1f133971ecedf301fe0416427631436675959f21
|
c62a55eaa1abec9854e3facf6743ee168f3c6ab0
|
refs/heads/master
| 2022-08-11T22:18:15.733246
| 2022-07-20T06:44:11
| 2022-07-20T06:44:11
| 216,047,821
| 0
| 0
| null | 2022-07-20T06:44:12
| 2019-10-18T14:56:09
|
Java
|
UTF-8
|
R
| false
| false
| 1,356
|
r
|
decision tree.r
|
library(rpart) #library required for decision tree
library(caTools)
library(e1071)
library(rpart.plot) #library required for plotting the decision tree
library(RColorBrewer)
#install.packages("rattle", dependencies = TRUE, repos='http://cran.rstudio.com/')
setwd("C:\\Users\\DELL\\Desktop\\Sem 7 submissions\\Lab Practise 1\\Data Analytics\\Assignment 4 - decision tree\\R")
data <- read.csv('2010-capitalbikeshare-tripdata.csv')
# To display number of rows and columns in data
dim(data)
set.seed(123)
my_sample <- sample.split(data, SplitRatio = 0.8) #creating a sample
biker_train <- data[my_sample, c(1,4,6,9)] #training data containing the sample and 4 specific columns
biker_test <- data[!my_sample, c(1,4,6,9)]
summary(biker_test)
summary(biker_train)
fit <- rpart(biker_train$Member.type~., data = biker_train, method = "class") #building decision tree model based on training data to predict Member Type on the basis of all other attributes
fit
fancyRpartPlot(fit)
rpart.plot(fit)
rpart.plot(fit,type=4,extra=101)
prediction <- predict(fit,newdata = biker_test[,-4],type = ("class")) #making predicions on test data using the model made
table(biker_test[,4],prediction) #table comparing predicted type with actual type
table(prediction)
png(file="trypie.png")
labs <- c("Member","Casual")
pie(table(prediction),labels = labs)
dev.off()
|
43a56199d26b81e533f5d333aa710fbc77349037
|
503491528ad656e30a6f1b4cee7c342a1e3c21cd
|
/scripts/10_create_validation_samples.R
|
8af5e2a50d36e587af096682d753c472609aab2e
|
[
"MIT"
] |
permissive
|
albhasan/sits_classify_S2_10_16D_STK_077095
|
113143c97752908033b6715b74144b12a4583728
|
9a6fc2507eb2f0e75e555a2cff0abd6e643465d9
|
refs/heads/main
| 2023-08-13T02:01:41.475219
| 2021-10-08T14:59:01
| 2021-10-08T14:59:01
| 329,635,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,628
|
r
|
10_create_validation_samples.R
|
library(dplyr)
library(ensurer)
library(sf)
library(terra)
resnet_file <- "/home/alber-d005/Documents/sits_classify_S2_10_16D_STK_077095/results/paper_defor_resnet/amazon_S2_10_16D_STK_class_2018-07-12_2019-07-28_v1_class_bayes_v1.tif"
tempcnn_file <- "/home/alber-d005/Documents/sits_classify_S2_10_16D_STK_077095/results/paper_defor_rf/amazon_S2_10_16D_STK_class_2018-07-12_2019-07-28_v1_class_bayes_v1.tif"
rf_file <- "/home/alber-d005/Documents/sits_classify_S2_10_16D_STK_077095/results/paper_defor_tempcnn/amazon_S2_10_16D_STK_class_2018-07-12_2019-07-28_v1_class_bayes_v1.tif"
out_dir <- "/home/alber-d005/Documents/sits_classify_S2_10_16D_STK_077095/data/validation/"
stopifnot(dir.exists(out_dir))
# Helper function for saving a sf object (x) using a template file path (y) and
# and output directory (out_dir).
sf_to_shp <- function(x, y, out_dir){
# Get the parent directory of the template because it includes the algorithm.
my_dir <- y %>%
dirname() %>%
basename()
# Get the template file name and change its extension to shp.
out_name <- y %>%
basename() %>%
tools::file_path_sans_ext() %>%
paste0(".shp")
# Bild a new out name using the new out_name, the parent directory and the
# output directory.
out_name <- file.path(out_dir,
my_dir,
out_name)
if (!dir.exists(dirname(out_name)))
dir.create(dirname(out_name))
sf::write_sf(x, out_name)
return(TRUE)
}
set.seed(123)
data_tb <- tibble::tibble(file_path = c(resnet_file, tempcnn_file, rf_file)) %>%
ensurer::ensure_that(all(file.exists(.$file_path)),
err_desc = "File not found!") %>%
dplyr::mutate(r = purrr::map(file_path, terra::rast)) %>%
dplyr::mutate(samples = purrr::map(r, terra::spatSample,
size = 10000,
method = "random",
as.points = TRUE )) %>%
dplyr::mutate(samples_sf = purrr::map(samples, sf::st_as_sf)) %>%
dplyr::mutate(sub_samples = purrr::map(samples_sf, function(x) {
x %>%
dplyr::group_by(lyr1) %>%
dplyr::sample_n(size = 150) %>%
return()
})) %>%
dplyr::mutate(n_samples = purrr::map_int(sub_samples, nrow)) %>%
ensurer::ensure_that(all(.$n_samples == 600),
err_desc = "Missing samples") %>%
dplyr::mutate(export2shp = purrr::map2_lgl(sub_samples, file_path,
sf_to_shp, out_dir = out_dir))
|
cd5b89e70a57b49ec7ab8a59d833190aaf916728
|
cc3beea2feb5d66b4df71a96f42129687a1296e7
|
/draft/from_DataMining_folder/Pepare_JPG_Figures_for_Presentation_Random.R
|
235820d1f36f72bf5dbd14ee62088a15dd4f0201
|
[] |
no_license
|
YulongXieGitHub/YulongR_Code
|
133c90b708c33c447737aaa0b6d01f5c9cb33818
|
e1f68c1564fb4036df9500297fbd36548e3b8014
|
refs/heads/master
| 2021-01-23T15:03:12.427516
| 2015-07-16T01:52:35
| 2015-07-16T01:52:35
| 39,168,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140,378
|
r
|
Pepare_JPG_Figures_for_Presentation_Random.R
|
#
# Pepare_JPG_Figures_for_Presentation_Random.R
# April 21, 2015: add a "Janitor" label which has "A', "B" two values corresponding to "baseline" and "EEM"
# April 20, 2015: Team meeting requested (1) create a boxplot on the Janitor Shifting (2) Plots of the traninbg data results from classification.
# April 3, 2015
# On April 1, 2015 team meeting, Srinivas proposed to create new data sets based on the simulation data: use three weeks of EEm data and one week of baseline data for each EEM
# do cluster analysis
# check the distribution of independent variables in the clusters
#
# March 19, 2015: tally the composition in term of dates in each cluster.
#
# March 6, 2015: prepare plots for todays' meeting
#
# Feb 27, 2015: re-visit the scripts
#
# Feb 3, 2015: load the data for each CZ and each EEMs
#
# Large Office:
# Simulated at Miami for all of the individual EEMs
#
#
# eliminate all stuff
rm(list = ls(all = TRUE))
# close all devices which are currently open
device.list <- dev.list()
if (length(device.list) != 0){for (device.this in device.list){dev.off(device.this)}}
library("RSNNS")
library("chron")
library("reshape2")
library("lattice")
library("ggplot2")
library("mclust")
library("fpc")
library("NbClust")
library("rpart") # Classification and Regression Tree Packge
library("e1071") # SVM package
library(rpart) # Popular decision tree algorithm
library(rattle) # Fancy tree plot
library(rpart.plot) # Enhanced tree plots
library(RColorBrewer) # Color selection for fancy tree plot
library(randomForest) # Random Forest
# library(party) # Alternative decision tree algorithm
# library(partykit) # Convert rpart object to BinaryTree
col.array <- c("red","blue","green","magenta","cyan","purple","brown","black")
# setup start date and time
start_time <- date();
Start.time <- Sys.time()
set.seed(12345, kind = NULL) # set seed of random number
this.method.4.cluster <- "PAM"
this.method.4.NbClust <- "kmean"
# -------------------------------------------------------------------------------------------------
# change to the script directory
# -------------------------------------------------------------------------------------------------
if(.Platform$OS.type == "unix")
{
Path.Project <- "/phome/comstd/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining"
Path.Sim <- "/phome/comstd/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining"
Path.Script <- "/phome/comstd/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining/0_scripts"
Path.EPW <- "/phome/comstd/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining/00_process_epw"
}else{
Path.Project <- "X:/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining"
Path.Sim <- "X:/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining"
Path.Script <- "X:/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining/0_scripts"
Path.EPW <- "X:/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining/00_process_epw"
# Path.Project <- "C:/Yulong_Projects/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining"
# Path.Sim <- "C:/Yulong_Projects/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining"
# Path.Script <- "C:/Yulong_Projects/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining/0_scripts"
# Path.EPW <- "C:/Yulong_Projects/CtrlBenefit/simulation_for_DataMining/OfficeLarge/sim_selected_4_DataMining/prepare.data.for.DataMining/00_process_epw"
}
setwd(Path.Script)
Path.IN <- paste(Path.Sim,"02_Plotting_LargeOffice", sep="/")
Path.OUT <- paste(Path.Sim,"Pepare_JPG_Figures_for_Presentation_Random",sep="/")
if (!file.exists(Path.IN)){print(paste(Path.IN," does not exist. Check why!",sep=""));die}
if (!file.exists(Path.OUT)){print(paste("NOT existing:",Path.OUT));dir.create(Path.OUT,showWarnings=TRUE,recursive=TRUE)}
FL.LOG <- paste(Path.OUT,"Pepare_JPG_Figures_for_Presentation_Random.log",sep="/")
if (file.exists(FL.LOG)){print(paste(FL.LOG," exist. Delete it!"));file.remove(FL.LOG)}
FL.EPW <- paste(Path.EPW,"epw.Rdata",sep="/")
if (!(file.exists(FL.EPW))){print(paste(FL.EPW," does not exist.Check why!"));die}
# fields in epw weather file which have been used in Eplus Simulation
field.used.all <- c("epw.T.drybulb","epw.T.dewpoint","epw.rel.humidity","epw.atm.pressure","epw.hor.ir.sky","epw.direct.norm.rad","epw.diffuse.hor.rad","epw.wind.direct","epw.wind.speed","epw.pres.weath.obs","epw.pres.weath.codes","epw.snow.depth","epw.liquid.precip.depth")
field.used.short <- c("epw.T.drybulb","epw.T.dewpoint","epw.rel.humidity","epw.atm.pressure","epw.hor.ir.sky","epw.direct.norm.rad","epw.diffuse.hor.rad","epw.wind.direct","epw.wind.speed")
# 1. load multiplot function
source("multipleplot.R")
cat(paste("1. insert a [multipleplot] function for ggplot2.\n",sep=""))
cat(paste("1. insert a [multipleplot] function for ggplot2.\n",sep=""),file=FL.LOG,append=TRUE)
# 2. CZ arrays
CZ.arrays <- c("Miami","Houston","Phoenix","Atlanta","LosAngeles","LasVegas","SanFrancisco","Baltimore","Albuquerque","Seattle","Chicago","Denver","Minneapolis","Helena","Duluth","Fairbanks")
cat(paste("2. specify inptu/out file/folder.\n",sep=""))
cat(paste("2. specify inptu/out file/folder.\n",sep=""),file=FL.LOG,append=TRUE)
# 3. Load the weather data
load(FL.EPW)
cat(paste("2b. a binary file with all epw weather data have been loaded.\n",sep=""))
cat(paste("2b. a binary file with all epw weather data have been loaded.\n",sep=""),file=FL.LOG,append=TRUE)
cat(paste("---------------- Loopping through Climate Zone ----------------------.\n",sep=""))
cat(paste("---------------- Loopping through Climate Zone ----------------------.\n",sep=""),file=FL.LOG,append=TRUE)
for (this.random in c("random1","random2"))
{
Path.Random <- paste(Path.OUT,this.random,sep="/")
if (!file.exists(Path.Random)){print(paste("NOT existing:",Path.Random));dir.create(Path.Random,showWarnings=TRUE,recursive=TRUE)}
for (this.CZ in CZ.arrays[c(7)]) #for (this.CZ in CZ.arrays) c("SanFrancisco","Albuquerque") c(7,9,4,8,3,2,5,6,10,11,12,13,14,15,16,1)
{
if (this.CZ == "SanFrancisco")
{
EEM.selected <- c(1,6)
}else if (this.CZ == "Albuquerque")
{
EEM.selected <- c(2,8)
}else{
EEM.selected <- c(1,6)
}
# 3. subfolder for each CZ
Path.CZ.IN <- paste(Path.IN, this.CZ,sep="/")
Path.CZ.OUT <- paste(Path.Random,this.CZ,sep="/")
if (!file.exists(Path.CZ.IN)) {print(paste(Path.CZ.IN," does not exist. Check why!",sep=""));die}
if (!file.exists(Path.CZ.OUT)){print(paste("NOT existing:",Path.CZ.OUT));dir.create(Path.CZ.OUT,showWarnings=TRUE,recursive=TRUE)}
cat(paste("3. [",this.CZ,"]: Output folder has been set.\n",sep=""))
cat(paste("3. [",this.CZ,"]: Output folder has been set.\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------------------------------
# 4. extract all Rdata file name in this CZ folder
# -----------------------------------------------------------------------------------------
EEMs.arrays <- grep("\\.Rdata",list.files(Path.CZ.IN),value=TRUE) # all EEM objects
cat(paste("4. [",this.CZ,"]: extract all R object files into [EEMs.arrays].\n",sep=""))
cat(paste("4. [",this.CZ,"]: extract all R object files into [EEMs.arrays].\n",sep=""),file=FL.LOG,append=TRUE)
# 5. delete "GatheringData_OfficeLarge.Rdata)
EEMs.arrays <- grep("GatheringData",EEMs.arrays,value=TRUE,invert=TRUE)
cat(paste("5. [",this.CZ,"]: delete [atheringData].\n",sep=""))
cat(paste("5. [",this.CZ,"]: delete [atheringData].\n",sep=""),file=FL.LOG,append=TRUE)
# 6. stripping EEM names
EEMs.name <- sub("(.*)_(.*)_(.*)_(.*)_(.*)_(.*)","\\1_\\2_\\3",EEMs.arrays)
EEMs.fuel <- sub("(.*)_(.*)_(.*)_(.*)_(.*)_(.*)","\\1",EEMs.arrays)
EEMs.num <- as.numeric(sub("(.*)_(.*)_(.*)_(.*)_(.*)_(.*)","\\2",EEMs.arrays))
EEMs.label <- sub("(.*)_(.*)_(.*)_(.*)_(.*)_(.*)","\\3",EEMs.arrays)
EEMs.saving <- sub("(.*)_(.*)_(.*)_(.*)_(.*)_(.*)","\\4",EEMs.arrays)
cat(paste("6. [",this.CZ,"]: get EEM related information.\n",sep=""))
cat(paste("6. [",this.CZ,"]: get EEM related information.\n",sep=""),file=FL.LOG,append=TRUE)
# 7. prepare a data frame consists of the information of all EEMs
EEMs.df <- data.frame(EEMs.names = EEMs.name,
EEMs.fuel = EEMs.fuel,
EEMs.num = EEMs.num,
EEMs.label = EEMs.label,
EEMs.saving = EEMs.saving)
cat(paste("7. [",this.CZ,"]: put all EEMs related into [EEMs.df].\n",sep=""))
cat(paste("7. [",this.CZ,"]: put all EEMs related into [EEMs.df].\n",sep=""),file=FL.LOG,append=TRUE)
# get the EPW weather file at this CZ
df.epw.thisCZ <- paste("EPW.",this.CZ,sep="")
command.string.EPW <- paste("myEPW.thisCZ <- ",df.epw.thisCZ,sep="")
eval(parse(text=command.string.EPW))
cat(paste("7b. [",this.CZ,"]: the epw weather data has been store into [myEPW.thisCZ].\n",sep=""))
cat(paste("7b. [",this.CZ,"]: the epw weather data has been store into [myEPW.thisCZ].\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# loopping through the EEMs
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
for (this.idx in seq(1,length(EEMs.name))[EEM.selected][1])
# for (this.idx in seq(1,length(EEMs.name)))
{
this.EEM.name = EEMs.name[this.idx]
this.EEM.fuel = EEMs.fuel[this.idx]
this.EEM.num = EEMs.num[this.idx]
this.EEM.label = EEMs.label[this.idx]
this.EEM.saving = EEMs.saving[this.idx]
this.EEM.Rdata = EEMs.arrays[this.idx]
#
# 11. current EEM being processed.
#
this.EEM.string <- sub("\\.Rdata","", this.EEM.Rdata)
this.EEM.pdf <- sub("\\.Rdata",".pdf", this.EEM.Rdata)
this.EEM_dummy.pdf <- sub("\\.Rdata","_dummy.pdf", this.EEM.Rdata)
this.EEM.clsLAB <- sub("\\.Rdata","_cluster_Label.csv", this.EEM.Rdata)
this.EEM.clsDAT <- sub("\\.Rdata","_cluster_Data.csv", this.EEM.Rdata)
this.EEM.clsSUM <- sub("\\.Rdata","_cluster_Summary.csv",this.EEM.Rdata)
this.EEM.weather <- sub("\\.Rdata","_weather.csv", this.EEM.Rdata)
this.EEM.Obj <- sub("\\.Rdata","_Processed.Rdata", this.EEM.Rdata)
this.EEM.csv <- sub("\\.Rdata","_fabriacted.csv", this.EEM.Rdata)
this.EEM.classifier <- sub("\\.Rdata","_classifier.csv", this.EEM.Rdata)
this.EEM.string.rev <- paste("EEM",this.EEM.num,"-",this.EEM.fuel,"(GJ)",sep="")
cat(paste("\n\n11. [",this.CZ,"]-[",this.EEM.name,"]: processing......................\n",sep=""))
cat(paste("\n\n11. [",this.CZ,"]-[",this.EEM.name,"]: processing......................\n",sep=""),file=FL.LOG,append=TRUE)
#
# 12. define a character string for the plot title
#
thisLearn.string <- paste(this.method.4.cluster,sep="")
cat(paste("12. [",this.CZ,"]-[",this.EEM.name,"]: character string for plot title.\n",sep=""))
cat(paste("12. [",this.CZ,"]-[",this.EEM.name,"]: character string for plot title.\n",sep=""),file=FL.LOG,append=TRUE)
#
# 13. generate files for each EEMs
#
FL.IN.OBJ <- paste(Path.CZ.IN, this.EEM.Rdata, sep="/")
FL.OUT.OBJ <- paste(Path.CZ.OUT,this.EEM.Rdata, sep="/")
FL.OUT.CSV <- paste(Path.CZ.OUT,this.EEM.csv, sep="/")
FL.OUT.PDF <- paste(Path.CZ.OUT,this.EEM.pdf, sep="/")
FL.OUT_Dummy.PDF <- paste(Path.CZ.OUT,this.EEM_dummy.pdf, sep="/")
FL.OUT.CLSLAB <- paste(Path.CZ.OUT,this.EEM.clsLAB, sep="/")
FL.OUT.CLSDAT <- paste(Path.CZ.OUT,this.EEM.clsDAT, sep="/")
FL.OUT.CLSSUM <- paste(Path.CZ.OUT,this.EEM.clsSUM, sep="/")
FL.weather <- paste(Path.CZ.OUT,this.EEM.weather, sep="/")
FL.PROCESSED.OBJ <- paste(Path.CZ.OUT,this.EEM.Obj, sep="/")
FL.classifier.CSV <- paste(Path.CZ.OUT,this.EEM.classifier,sep="/")
if (!(file.exists(FL.IN.OBJ))) {print(paste(FL.IN.OBJ," does exist. Check Why!"));die}
if (file.exists(FL.OUT.OBJ)) {print(paste(FL.OUT.OBJ, "exist. Delete it!"));file.remove(FL.OUT.OBJ)}
if (file.exists(FL.OUT.CSV)) {print(paste(FL.OUT.CSV, "exist. Delete it!"));file.remove(FL.OUT.CSV)}
if (file.exists(FL.OUT.PDF)) {print(paste(FL.OUT.PDF, "exist. Delete it!"));file.remove(FL.OUT.PDF)}
if (file.exists(FL.OUT_Dummy.PDF)) {print(paste(FL.OUT_Dummy.PDF, "exist. Delete it!"));file.remove(FL.OUT_Dummy.PDF)}
if (file.exists(FL.OUT.CLSLAB)) {print(paste(FL.OUT.CLSLAB, "exist. Delete it!"));file.remove(FL.OUT.CLSLAB)}
if (file.exists(FL.OUT.CLSDAT)) {print(paste(FL.OUT.CLSDAT, "exist. Delete it!"));file.remove(FL.OUT.CLSDAT)}
if (file.exists(FL.OUT.CLSSUM)) {print(paste(FL.OUT.CLSSUM, "exist. Delete it!"));file.remove(FL.OUT.CLSSUM)}
if (file.exists(FL.weather)) {print(paste(FL.weather, "exist. Delete it!"));file.remove(FL.weather)}
if (file.exists(FL.PROCESSED.OBJ)) {print(paste(FL.PROCESSED.OBJ, "exist. Delete it!"));file.remove(FL.PROCESSED.OBJ)}
if (file.exists(FL.classifier.CSV)){print(paste(FL.classifier.CSV,"exist. Delete it!"));file.remove(FL.classifier.CSV)}
cat(paste("13. [",this.CZ,"]-[",this.EEM.name,"]: prepare files for output.\n",sep=""))
cat(paste("13. [",this.CZ,"]-[",this.EEM.name,"]: prepare files for output.\n",sep=""),file=FL.LOG,append=TRUE)
# ----------------------------------------------------------------------------------------
# 16. open the pdf file
# ----------------------------------------------------------------------------------------
pdf(file = FL.OUT.PDF, paper="special", width=17, height=11,bg = "transparent") # dev.set(2) goes to what we want
pdf(file = FL.OUT_Dummy.PDF,paper="special", width=17, height=11,bg = "transparent") # dev.set(3) goes to dummy
cat(paste("16. [",this.CZ,"]-[",this.EEM.name,"]: open pdf file for plotting.\n",sep=""))
cat(paste("16. [",this.CZ,"]-[",this.EEM.name,"]: open pdf file for plotting.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------
# 14. load the data which contains
# [myData.base]: differ from [myData.advn] only on 4 fields (i.e., -c(19,20,22,23) === "EEM","EnergyGJ","EEM.idx","EEM.name")
# [myData.advn]
# [myData.merged.long]
# [myData.4.weeklyLong]
# [myData.4.dailyLong]
# April 4, 2015: since we are going to fabricate a data set by using three weeks of EEM and one week of Baseline based on [myData.base"] and [myDaat.advn]
# [myData.merged.long], [myData.4.weeklyLong], [myData.4.dailyLong] will need to be deleted
# ---------------------------------------------------------------------------------
load(FL.IN.OBJ)
cat(paste("14. [",this.CZ,"]-[",this.EEM.name,"]: load data from [",FL.IN.OBJ,"].\n",sep=""))
cat(paste("14. [",this.CZ,"]-[",this.EEM.name,"]: load data from [",FL.IN.OBJ,"].\n",sep=""),file=FL.LOG,append=TRUE)
#
# add "month.lab" to [myData.base] and [myData.advn]
#
myData.base[,"month.lab"] <- factor(myData.base[,"month"],levels = c(1,2,3,4,5,6,7,8,9,10,11,12),labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered = TRUE)
myData.advn[,"month.lab"] <- factor(myData.advn[,"month"],levels = c(1,2,3,4,5,6,7,8,9,10,11,12),labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered = TRUE)
cat(paste("14A. [",this.CZ,"]-[",this.EEM.name,"]: added [month.lab] to [myData.base] and [myData.advn].\n",sep=""))
cat(paste("14A. [",this.CZ,"]-[",this.EEM.name,"]: added [month.lab] to [myData.base] and [myData.advn].\n",sep=""),file=FL.LOG,append=TRUE)
#
# generate a plot with two years of data
#
myTwoYears <- rbind(myData.base,myData.advn)
myTwoYears[,"week.idx.in.Year"] <- factor(paste("Week",myTwoYears[,"week.idx"],sep=""),levels=paste("Week",seq(1:53),sep=""),labels=paste("Week",seq(1:53),sep=""),ordered = TRUE)
myTwoYears[,"week.idx.in.Month"] <- as.factor(paste("Week",myTwoYears[,"week.idx.in.month"],sep=""))
myTwoYears[,"Janitor"] <- myTwoYears[,"EEM"]
myTwoYears[,"Janitor"] <- sub("Elec_00_base","B",sub(this.EEM.name,"A",myTwoYears[,"Janitor"]))
# 10a. plotting the classification results
# --------------------------------------------------------------
p.weekly.2years.op1 <- qplot(data=myTwoYears,x=hour.in.week,y=EnergyGJ,group=Janitor,color=Janitor,facets=month.lab~week.idx.in.Month,geom="line")
p.weekly.2years.op1 <- p.weekly.2years.op1 + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
p.weekly.2years.op1 <- p.weekly.2years.op1 + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("Eplus simulated electricity consumption of a Large Office building model\n",sep=""))
p.weekly.2years.op1 <- p.weekly.2years.op1 + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
# --------------------------------------------------------------
p.daily.2years <- qplot(data=myTwoYears,x=hour,y=EnergyGJ,group=Janitor,color=Janitor,facets=month.lab~day,geom="line")
p.daily.2years <- p.daily.2years + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
p.daily.2years <- p.daily.2years + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Eplus simulated electricity consumption of a Large Office building model\n",sep=""))
p.daily.2years <- p.daily.2years + scale_x_discrete(name="Hour in the day",limits=c(12,24))
# --------------------------------------------------------------
p.weekly.2years.op2 <- qplot(data=myTwoYears,x=hour.in.week,y=EnergyGJ,group=Janitor,color=Janitor,facets=~week.idx.in.Year,geom="line")
p.weekly.2years.op2 <- p.weekly.2years.op2 + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
p.weekly.2years.op2 <- p.weekly.2years.op2 + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("Eplus simulated electricity consumption of a Large Office building model\n",sep=""))
p.weekly.2years.op2 <- p.weekly.2years.op2 + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
# --------------------------------------------------------------
dev.set(2)
plot(p.weekly.2years.op1)
plot(p.weekly.2years.op2)
plot(p.daily.2years)
FL.Fig00A.JPG <- paste(Path.CZ.OUT,paste(this.EEM.string,"Fig00A_TwoYearPlots_001.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig00A.JPG)){print(paste(FL.Fig00A.JPG,"exist. Delete it!"));file.remove(FL.Fig00A.JPG)}
jpeg(file = FL.Fig00A.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.2years.op1)
dev.off(4)
FL.Fig00B.JPG <- paste(Path.CZ.OUT,paste(this.EEM.string,"Fig00B.JPG",sep="_"),sep="/")
if (file.exists(FL.Fig00B.JPG)){print(paste(FL.Fig00B.JPG,"exist. Delete it!"));file.remove(FL.Fig00B.JPG)}
jpeg(file = FL.Fig00B.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.2years.op2)
dev.off(4)
FL.Fig00C.JPG <- paste(Path.CZ.OUT,paste(this.EEM.string,"Fig00C.JPG",sep="_"),sep="/")
if (file.exists(FL.Fig00C.JPG)){print(paste(FL.Fig00C.JPG,"exist. Delete it!"));file.remove(FL.Fig00C.JPG)}
jpeg(file = FL.Fig00C.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.2years)
dev.off(4)
#
# delete the [myData.merged.long], [myData.4.weeklyLong] and [myData.4.dailyLong] which conists of two years of data
#
rm(myData.merged.long,myData.4.weeklyLong,myData.4.dailyLong)
cat(paste("14B. [",this.CZ,"]-[",this.EEM.name,"]: delete [myData.merged.long], [myData.4.weeklyLong], [myData.4.dailyLong].\n",sep=""))
cat(paste("14B. [",this.CZ,"]-[",this.EEM.name,"]: delete [myData.merged.long], [myData.4.weeklyLong], [myData.4.dailyLong].\n",sep=""),file=FL.LOG,append=TRUE)
# **************************************************************************************************************************************************************************
# re-construct a data set by using three weeks of data from EEM and one week of data from baseline (i.e., every four weeks has three weeks of EEM and one week of baseline)
# week.idx (4,8,12,16,20,24,28,32,36,40,44,48,52) from baseline
# **************************************************************************************************************************************************************************
myData.Fake <- myData.advn # initialize [myData.Fake] with the EEM data
idx.weekday <- unique(myData.Fake[myData.Fake[,"day.type.lab"]=="Weekday","day.in.year"]) # day index of all weekdays
random.52 <- sample(idx.weekday,52,replace=FALSE) # randomly draw 52 day to represent one weekday in a week on average
remain.200 <- idx.weekday[!(idx.weekday %in% random.52)] # the remianing 200 weekdays from the baseline can be used to test the classification
myData.Fake[myData.Fake[,"day.in.year"] %in% random.52,] <- myData.base[myData.base[,"day.in.year"] %in% random.52,] # replace those selected day inthe advn time series with those from baseline series
myData.Remain <- myData.base[myData.base[,"day.in.year"] %in% remain.200,] # use the remaining 200 weekday for testing the classifiers
myData.Fake[,"week.idx.in.Month"] <- factor(paste("Week",myData.Fake[,"week.idx.in.month"],sep="")) # add "week.idx.in.Month" for plotting facet label purpose
myData.Remain[,"week.idx.in.Month"] <- factor(paste("Week",myData.Remain[,"week.idx.in.month"],sep="")) # add "week.idx.in.Month" for plotting facet label purpose
myData.Fake[,"week.idx.in.Year"] <- factor(paste("Week",myData.Fake[,"week.idx"],sep=""),levels=paste("Week",seq(1:53),sep=""),labels=paste("Week",seq(1:53),sep=""),ordered = TRUE) # add "week.idx.in.Year" for plotting facet label purpose
myData.Remain[,"week.idx.in.Year"] <- factor(paste("Week",myData.Remain[,"week.idx"],sep=""),levels=paste("Week",seq(1:53),sep=""),labels=paste("Week",seq(1:53),sep=""),ordered = TRUE) # add "week.idx.in.Year" for plotting facet label purpose
myData.Fake[,"Janitor"] <- myData.Fake[,"EEM"]
myData.Fake[,"Janitor"] <- sub("Elec_00_base","B",sub(this.EEM.name,"A",myData.Fake[,"Janitor"]))
myData.Remain[,"Janitor"] <- myData.Remain[,"EEM"]
myData.Remain[,"Janitor"] <- sub("Elec_00_base","B",sub(this.EEM.name,"A",myData.Remain[,"Janitor"]))
cat(paste("14C. [",this.CZ,"]-[",this.EEM.name,"]: construct [myData.Fake] by using data from EEM by randomly replaced 52 days from the baseline weekday data and put remaining 200 weekdays of baseline into [myData.Remain].\n\n",sep=""))
cat(paste("14C. [",this.CZ,"]-[",this.EEM.name,"]: construct [myData.Fake] by using data from EEM by randomly replaced 52 days from the baseline weekday data and put remaining 200 weekdays of baseline into [myData.Remain].\n\n",sep=""),file=FL.LOG,append=TRUE)
# ********************************************
# ********************************************
# ********************************************
# ********************************************
# ********************************************
# [myData.Fake] has been created!!!!!
# ********************************************
# ********************************************
# ********************************************
# ********************************************
# ********************************************
#
# 15. output: immediately save out
# April 3, 2015: although we only have [myData.Fake] and all other data frames are identical to this one, to minimize the script change, I still duplicate them!!!!
#
save(myData.base,myData.advn,myData.Fake,file=FL.OUT.OBJ)
cat(paste("[",this.CZ,"]-[",this.EEM.name,"]: fabricated data,",sep=""),file=FL.OUT.CSV,append=TRUE)
write.table(myData.Fake,file=FL.OUT.CSV,sep=",",row.names=TRUE,col.names=TRUE,append=TRUE)
cat("\n\n",file=FL.OUT.CSV,append=TRUE)
cat(paste("[",this.CZ,"]-[",this.EEM.name,"]: remaining 200 weekdays from baseline for verification of classifiers,",sep=""),file=FL.OUT.CSV,append=TRUE)
write.table(myData.Remain,file=FL.OUT.CSV,sep=",",row.names=TRUE,col.names=TRUE,append=TRUE)
cat("\n\n",file=FL.OUT.CSV,append=TRUE)
cat(paste("15. [",this.CZ,"]-[",this.EEM.name,"]: save the fabricated data [myData.Fake] and remaining 200 weekdays from baseline in [myData.Remain] out into [",FL.OUT.OBJ,"] and [",FL.OUT.CSV,"].\n",sep=""))
cat(paste("15. [",this.CZ,"]-[",this.EEM.name,"]: save the fabricated data [myData.Fake] and remaining 200 weekdays from baseline in [myData.Remain] out into [",FL.OUT.OBJ,"] and [",FL.OUT.CSV,"].\n",sep=""),file=FL.LOG,append=TRUE)
# ----------------------------------------------------------------------------------------------
# 17. compare the weather data between EplusOut and the epw read in
# ----------------------------------------------------------------------------------------------
# T dryBulb from E+ output file
myTdryBulb.eplus <- myData.Fake
# T dryBulb from the epw weather file
myTdryBulb.epw <- myTdryBulb.eplus
myTdryBulb.epw[,"T.dryBulb"] <- myEPW.thisCZ[,"epw.T.drybulb"] # replace with the reading T dryBulb from epw weather file
# the difference between E+ and EPW
myTdryBulb.diff <- myTdryBulb.eplus
myTdryBulb.diff[,"T.dryBulb"] <- myTdryBulb.eplus[,"T.dryBulb"] - myTdryBulb.epw[,"T.dryBulb"]
# percentage difference between Eplus out and EPW input
perc.diff <- 100*(myTdryBulb.diff[,"T.dryBulb"] / myTdryBulb.epw[,"T.dryBulb"])
# add an "ID" to them
myTdryBulb.eplus[,"ID"] <- "eplus"
myTdryBulb.epw[,"ID"] <- "epw"
myTdryBulb.diff[,"ID"] <- "diff (eplus-epw)"
myTdryBulb.wide <- cbind(myTdryBulb.eplus,myTdryBulb.eplus[,"T.dryBulb"],myTdryBulb.epw[,"T.dryBulb"],myTdryBulb.diff[,"T.dryBulb"],perc.diff)
names(myTdryBulb.wide)<- c(names(myTdryBulb.eplus),"T.eplus","T.epw","T.diff","T.diff%")
# long format for plotting
myTdryBulb.long <- rbind(myTdryBulb.eplus,myTdryBulb.epw,myTdryBulb.diff)
# plotting
plot.TdryBulb <- qplot(data=myTdryBulb.long,x=hour,y=T.dryBulb,group=ID,color=ID,facets=month.lab~day,geom="line")
plot.TdryBulb <- plot.TdryBulb + theme(legend.position="top")
plot.TdryBulb <- plot.TdryBulb + labs(title=paste(paste("EPW vs EPLUS: in Each Month of (",this.CZ,")",sep="")))
plot.TdryBulb <- plot.TdryBulb + geom_hline(aes(yintercept = 0),linetype=14,colour="red")
# actual plotting
dev.set(3)
plot(plot.TdryBulb)
p.wk.TdryBulb <- qplot(data=myTdryBulb.long,x=hour.in.week,y=T.dryBulb,group=ID,color=ID,facets=~week.idx,geom="line")
p.wk.TdryBulb <- p.wk.TdryBulb + theme(legend.position="top")
p.wk.TdryBulb <- p.wk.TdryBulb + labs(title=paste(paste("EPW vs EPLUS: in Each Month of (",this.CZ,")",sep="")))
p.wk.TdryBulb <- p.wk.TdryBulb + geom_hline(aes(yintercept = 0),linetype=14,colour="red")
dev.set(3)
plot(p.wk.TdryBulb)
cat(paste("17. [",this.CZ,"]-[",this.EEM.name,"]: plot the dry bulb T data.\n",sep=""))
cat(paste("17. [",this.CZ,"]-[",this.EEM.name,"]: plot the dry bulb T data.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------------------------
# 18. make epw weather data ready for cluster results plotting [myWeather.wide]
# --------------------------------------------------------------------------------
myWeather.wide <- cbind(myTdryBulb.wide,myEPW.thisCZ[,field.used.short])
cat(paste("18A. [",this.CZ,"]-[",this.EEM.name,"]: prepare [myWeather.wide] which consists of the weather parameter in the epw file.\n",sep=""))
cat(paste("18A. [",this.CZ,"]-[",this.EEM.name,"]: prepare [myWeather.wide] which consists of the weather parameter in the epw file.\n",sep=""),file=FL.LOG,append=TRUE)
# output the weather file in csv for further checking
cat(",",file=FL.weather,append=TRUE)
write.table(myTdryBulb.wide,file=FL.weather,sep=",",row.names=TRUE,col.names=TRUE)
cat(paste("18B. [",this.CZ,"]-[",this.EEM.name,"]: Output [myTdryBulb.wide] consists of T drybuld from epw and from E+ out and their differences are checked.\n",sep=""))
cat(paste("18B. [",this.CZ,"]-[",this.EEM.name,"]: Output [myTdryBulb.wide] consists of T drybuld from epw and from E+ out and their differences are checked.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
# 19. manually specify a scaling factor in order to use ggplot2 to plot T as well.
# --------------------------------------------------------------
scaling.factor <- (max(myData.Fake[,c("EnergyGJ")]) / max(myData.Fake[,c("T.dryBulb")])) * 2
cat(paste("19A. [",this.CZ,"]-[",this.EEM.name,"]: manually specifying a scaling factor in order to plot Energy and Weather T drybulb in the same plots.\n",sep=""))
cat(paste("19A. [",this.CZ,"]-[",this.EEM.name,"]: manually specifying a scaling factor in order to plot Energy and Weather T drybulb in the same plots.\n",sep=""),file=FL.LOG,append=TRUE)
#
# the following code may not be used at all
#
myTmp1 <- myData.Fake[,c("Date.Time","T.dryBulb","EEM")]
myTmp1[,"T.dryBulb"] <- myTmp1[,"T.dryBulb"] * scaling.factor
myTmp1[,"ID"] <- "T.dryBulb (scaled)"
names(myTmp1) <- sub("T.dryBulb","value",names(myTmp1))
myTmp2 <- myData.Fake[,c("Date.Time","EnergyGJ","EEM")]
myTmp2[,"ID"] <- "EnergyGJ"
names(myTmp2) <- sub("EnergyGJ","value",names(myTmp2))
myTmp3 <- rbind(myTmp1,myTmp2)
myTmp3[,"Variable"] <- paste(myTmp3[,"EEM"],myTmp3[,"ID"],sep="_")
cat(paste("19B. [",this.CZ,"]-[",this.EEM.name,"]: do not remember what they are for.\n",sep=""))
cat(paste("19B. [",this.CZ,"]-[",this.EEM.name,"]: do not remember what they are for.\n",sep=""),file=FL.LOG,append=TRUE)
# ----------------------------------------------------------------------------------------
# PLOTTING RAW DATA ..................
# ----------------------------------------------------------------------------------------
# --------------------------------------------------------------
# 20A. plot weekly plot in each month
# --------------------------------------------------------------
p.weekly.all.op1 <- qplot(data=myData.Fake,x=hour.in.week,y=EnergyGJ,group=Janitor,color=Janitor,facets=month.lab~week.idx.in.Month,geom="line")
p.weekly.all.op1 <- p.weekly.all.op1 + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.weekly.all.op1 <- p.weekly.all.op1 + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\n",sep=""))
p.weekly.all.op1 <- p.weekly.all.op1 + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
dev.set(2)
plot(p.weekly.all.op1)
cat(paste("20A. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly profile for each month.\n",sep=""))
cat(paste("20A. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly profile for each month.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
# 20AA. plot weekly plot in each month (showing only weekdays)
# --------------------------------------------------------------
myData.Fake.Weekday <- myData.Fake
myData.Fake.Weekday[!(myData.Fake.Weekday[,"day.type.lab"]=="Weekday"),"EnergyGJ"] <- NaN
p.weekly.wkday.op1 <- qplot(data=myData.Fake.Weekday,x=hour.in.week,y=EnergyGJ,group=Janitor,color=Janitor,facets=month.lab~week.idx.in.Month,geom="line")
p.weekly.wkday.op1 <- p.weekly.wkday.op1 + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.weekly.wkday.op1 <- p.weekly.wkday.op1 + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\nWeekday Only",sep=""))
p.weekly.wkday.op1 <- p.weekly.wkday.op1 + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
dev.set(2)
plot(p.weekly.wkday.op1)
cat(paste("20AA. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly profile for each month.\n",sep=""))
cat(paste("20AA. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly profile for each month.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
# 20B. plot weekly plot in conseccutive weeks
# --------------------------------------------------------------
p.weekly.all.op2 <- qplot(data=myData.Fake,x=hour.in.week,y=EnergyGJ,group=Janitor,color=Janitor,facets=~week.idx.in.Year,geom="line")
p.weekly.all.op2 <- p.weekly.all.op2 + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.weekly.all.op2 <- p.weekly.all.op2 + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\n",sep=""))
p.weekly.all.op2 <- p.weekly.all.op2 + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
dev.set(2)
plot(p.weekly.all.op2)
cat(paste("20B. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly plot in conseccutive weeks.\n",sep=""))
cat(paste("20B. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly plot in conseccutive weeks.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
# 20BB. plot weekly plot in conseccutive weeks
# --------------------------------------------------------------
p.weekly.wkday.op2 <- qplot(data=myData.Fake.Weekday,x=hour.in.week,y=EnergyGJ,group=Janitor,color=Janitor,facets=~week.idx.in.Year,geom="line")
p.weekly.wkday.op2 <- p.weekly.wkday.op2 + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.weekly.wkday.op2 <- p.weekly.wkday.op2 + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\nWeekday Only",sep=""))
p.weekly.wkday.op2 <- p.weekly.wkday.op2 + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
dev.set(2)
plot(p.weekly.wkday.op2)
cat(paste("20BB. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly plot in conseccutive weeks.\n",sep=""))
cat(paste("20BB. [",this.CZ,"]-[",this.EEM.name,"]: plot weekly plot in conseccutive weeks.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
# 20C. plot daily plot in each month
# --------------------------------------------------------------
# did not color differently
p.daily.all.op1 <- qplot(data=myData.Fake,x=hour,y=EnergyGJ,color="black",facets=month.lab~day,geom="line")
p.daily.all.op1 <- p.daily.all.op1 + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.daily.all.op1 <- p.daily.all.op1 + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\n",sep=""))
p.daily.all.op1 <- p.daily.all.op1 + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(2)
plot(p.daily.all.op1)
cat(paste("20C. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month\n",sep=""))
cat(paste("20C. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month.\n",sep=""),file=FL.LOG,append=TRUE)
# May 1, 2015: to control the color use ggplot instead of qplot
p.daily.all.op1 <- ggplot(data=myData.Fake,aes(x=hour,y=EnergyGJ)) + geom_line(color="black") + facet_grid(month.lab~day)
p.daily.all.op1 <- p.daily.all.op1 + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.daily.all.op1 <- p.daily.all.op1 + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\n",sep=""))
p.daily.all.op1 <- p.daily.all.op1 + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(2)
plot(p.daily.all.op1)
# colored differently for EEM and baseline
p.daily.all.op2 <- qplot(data=myData.Fake,x=hour,y=EnergyGJ,group=Janitor,color=Janitor,facets=month.lab~day,geom="line")
p.daily.all.op2 <- p.daily.all.op2 + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.daily.all.op2 <- p.daily.all.op2 + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\n",sep=""))
p.daily.all.op2 <- p.daily.all.op2 + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(2)
plot(p.daily.all.op2)
cat(paste("20C. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month\n",sep=""))
cat(paste("20C. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
# 20CC. plot daily plot in each month
# --------------------------------------------------------------
# did not color differently
p.daily.wkday.op1 <- qplot(data=myData.Fake.Weekday,x=hour,y=EnergyGJ,group=EEM,color="black",facets=month.lab~day,geom="line")
p.daily.wkday.op1 <- p.daily.wkday.op1 + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.daily.wkday.op1 <- p.daily.wkday.op1 + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\nWeekday Only",sep=""))
p.daily.wkday.op1 <- p.daily.wkday.op1 + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(2)
plot(p.daily.wkday.op1)
# May 1, 2015: to control the color use ggplot instead of qplot
p.daily.wkday.op1 <- ggplot(data=myData.Fake.Weekday,aes(x=hour,y=EnergyGJ,group=EEM)) + geom_line(color="black") + facet_grid(month.lab~day)
p.daily.wkday.op1 <- p.daily.wkday.op1 + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.daily.wkday.op1 <- p.daily.wkday.op1 + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\nWeekday Only",sep=""))
p.daily.wkday.op1 <- p.daily.wkday.op1 + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(2)
plot(p.daily.wkday.op1)
cat(paste("20CC. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month\n",sep=""))
cat(paste("20CC. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month.\n",sep=""),file=FL.LOG,append=TRUE)
# colored differently for EEM and baseline
p.daily.wkday.op2 <- qplot(data=myData.Fake.Weekday,x=hour,y=EnergyGJ,group=Janitor,color=Janitor,facets=month.lab~day,geom="line")
p.daily.wkday.op2 <- p.daily.wkday.op2 + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.daily.wkday.op2 <- p.daily.wkday.op2 + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("Raw Data: Synthetized uses Eplus annual simulation data of a baseline and an EEM model\nWeekday Only",sep=""))
p.daily.wkday.op2 <- p.daily.wkday.op2 + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(2)
plot(p.daily.wkday.op2)
cat(paste("20CCC. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month\n",sep=""))
cat(paste("20CCC. [",this.CZ,"]-[",this.EEM.name,"]: plot daily plot in each month.\n",sep=""),file=FL.LOG,append=TRUE)
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# Making JPEG PLOTS
# April 5, 2015: plot [p.weekly.all.op1], [p.weekly.wkday.op1], [p.weekly.all.op2], [p.weekly.wkday.op2], [p.daily.all.op1], [p.daily.all.op2], [p.daily.wkday.op1], [p.daily.wkday.op1] as JPEG plots into
# [FL.Fig01A.JPG], [FL.Fig01B.JPG], [FL.Fig02A.JPG], [FL.Fig02B.JPG], [FL.Fig03A.JPG], [FL.Fig03B.JPG], [FL.Fig03C.JPG], [FL.Fig03D.JPG]
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
FL.Fig01A.JPG <- paste(Path.CZ.OUT,paste("Fig01A_FakedOneYear_Week_Month_002.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig01A.JPG)){print(paste(FL.Fig01A.JPG,"exist. Delete it!"));file.remove(FL.Fig01A.JPG)}
jpeg(file = FL.Fig01A.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.all.op1)
dev.off(4)
FL.Fig01B.JPG <- paste(Path.CZ.OUT,paste("Fig01B.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig01B.JPG)){print(paste(FL.Fig01B.JPG,"exist. Delete it!"));file.remove(FL.Fig01B.JPG)}
jpeg(file = FL.Fig01B.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.wkday.op1)
dev.off(4)
FL.Fig02A.JPG <- paste(Path.CZ.OUT,paste("Fig02A.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig02A.JPG)){print(paste(FL.Fig02A.JPG,"exist. Delete it!"));file.remove(FL.Fig02A.JPG)}
jpeg(file = FL.Fig02A.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.all.op2)
dev.off(4)
FL.Fig02B.JPG <- paste(Path.CZ.OUT,paste("Fig02B.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig02B.JPG)){print(paste(FL.Fig02B.JPG,"exist. Delete it!"));file.remove(FL.Fig02B.JPG)}
jpeg(file = FL.Fig02B.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.wkday.op2)
dev.off(4)
# Color the same for EEM and baseline
FL.Fig03A.JPG <- paste(Path.CZ.OUT,paste("Fig03A_FakedOneYear_Day_Month_OneColor_004.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig03A.JPG)){print(paste(FL.Fig03A.JPG,"exist. Delete it!"));file.remove(FL.Fig03A.JPG)}
jpeg(file = FL.Fig03A.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.all.op1)
dev.off(4)
FL.Fig03AA.JPG <- paste(Path.CZ.OUT,paste("Fig03AA_FakedOneYear_Day_Month_OneColor_015.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig03AA.JPG)){print(paste(FL.Fig03AA.JPG,"exist. Delete it!"));file.remove(FL.Fig03AA.JPG)}
jpeg(file = FL.Fig03A.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.all.op1)
dev.off(4)
# ********************************************************************************************************
# color differently for EEM and Baseline
FL.Fig03B.JPG <- paste(Path.CZ.OUT,paste("Fig03B_FakedOneYear_Day_Month_003.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig03B.JPG)){print(paste(FL.Fig03B.JPG,"exist. Delete it!"));file.remove(FL.Fig03B.JPG)}
jpeg(file = FL.Fig03B.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.all.op2)
dev.off(4)
# Color the same for EEM and baseline
FL.Fig03C.JPG <- paste(Path.CZ.OUT,paste("Fig03C_FakedOneYear_WeekdayOnly_OneColor_005.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig03C.JPG)){print(paste(FL.Fig03C.JPG,"exist. Delete it!"));file.remove(FL.Fig03C.JPG)}
jpeg(file = FL.Fig03C.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.wkday.op1)
dev.off(4)
# ********************************************************************************************************
# color differently for EEM and Baseline
FL.Fig03D.JPG <- paste(Path.CZ.OUT,paste("Fig03D.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig03D.JPG)){print(paste(FL.Fig03D.JPG,"exist. Delete it!"));file.remove(FL.Fig03D.JPG)}
jpeg(file = FL.Fig03D.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.wkday.op2)
dev.off(4)
cat(paste("20DD. [",this.CZ,"]-[",this.EEM.name,"]: the plots are re-plotted to JPEG files\n",sep=""))
cat(paste("20CC. [",this.CZ,"]-[",this.EEM.name,"]: the plots are re-plotted to JPEG files\n",sep=""),file=FL.LOG,append=TRUE)
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# DATA MINING ..................
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# 21. to avoid worngly taking existing data frame,
# ---------------------------------------------------------------------------------
df_name <- "class.days"; if (exists(df_name) && is.data.frame(get(df_name))){rm("class.days")}
df_name <- "myData.Work.long"; if (exists(df_name) && is.data.frame(get(df_name))){rm("myData.Work.long")}
df_name <- "myData.Work.wide"; if (exists(df_name) && is.data.frame(get(df_name))){rm("myData.Work.wide")}
cat(paste("21. [",this.CZ,"]-[",this.EEM.name,"]: to avoid worngly taking existing data frame, delete them first.\n",sep=""))
cat(paste("21. [",this.CZ,"]-[",this.EEM.name,"]: to avoid worngly taking existing data frame, delete them first..\n",sep=""),file=FL.LOG,append=TRUE)
# for (this.subset in c("artificial1","artificial2","artificial3","July","July-August","WeekDay","All Data","Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Holiday"))
# for (this.subset in c("artificial1","artificial2","artificial3","July","July-August","July-Weekday","July-August-Weekday","WeekDay","Sunday","Saturday","Holiday"))
count.subset <- 0
for (this.subset in c("WeekDay","AllData"))
{
idx.name <- "all"
idx.name.selected <- c("Hartigan","Duda","PseudoT2","Beale","TraceW")
if (this.subset == "AllData")
{
myData.Work.long <- subset(myData.Fake, select = c("T.dryBulb","EnergyGJ","EEM","hour","date","day.week.lab","day.type.lab","week.idx","hour.in.week"))
min.nc <- 2
max.nc <- 20
}else if (this.subset == "WeekDay")
{
myData.Work.long <- subset(myData.Fake,subset=(day.type.lab == "Weekday"), select = c("T.dryBulb","EnergyGJ","EEM","hour","date","day.week.lab","day.type.lab","week.idx","hour.in.week"))
min.nc <- 2
max.nc <- 20
}
cat(paste("\n\n22. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: [myData.Work.long] is a subset of [myData.Fake] used for data mining.\n",sep=""))
cat(paste("\n\n22. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: [myData.Work.long] is a subset of [myData.Fake] used for data mining.\n",sep=""),file=FL.LOG,append=TRUE)
# seems like the following are not used. turn the long format to wide format
var.intact <- c("date","hour") # fields will not be changed
var.expand <- c("EEM") # fields will be used to expand
var.EnergyGJ <- c("EnergyGJ.norm") # fields of the measurement
var.TdryBulb <- c("T.dryBulb.norm") # fields of the measurement
# -----------------------------------------------------------------
# 25. normalize the energy consumption data in [myData.Work.long]
# -----------------------------------------------------------------
min.GJ <- min(myData.Work.long[,"EnergyGJ"])
max.GJ <- max(myData.Work.long[,"EnergyGJ"])
min.Td <- min(myData.Work.long[,"T.dryBulb"])
max.Td <- max(myData.Work.long[,"T.dryBulb"])
myData.Work.long[,"EnergyGJ.norm"] <- (myData.Work.long[,"EnergyGJ"] - min.GJ) / (max.GJ - min.GJ)
myData.Work.long[,"T.dryBulb.norm"] <- (myData.Work.long[,"T.dryBulb"] - min.Td) / (max.Td - min.Td)
cat(paste("25. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: normalize the data in [myData.Work.long].\n",sep=""))
cat(paste("25. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: normalize the data in [myData.Work.long]..\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------
# 26. [myData.Work.wide] for data mining the daily profiles
# -----------------------------------------------------------------
myData.Work.wide.EnergyGJ <- dcast(myData.Work.long,date + EEM ~ hour,value.var = var.EnergyGJ)
myData.Work.wide.TdryBulb <- dcast(myData.Work.long,date + EEM ~ hour,value.var = var.TdryBulb)
cat(paste("26. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: turn the long format to a wide format having 24 hour a day.\n",sep=""))
cat(paste("26. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: turn the long format to a wide format having 24 hour a day.\n",sep=""),file=FL.LOG,append=TRUE)
# 27. use [date] as row names
row.names(myData.Work.wide.EnergyGJ) <- as.Date(myData.Work.wide.EnergyGJ[,"date"],"%m/%d/%y")
row.names(myData.Work.wide.TdryBulb) <- as.Date(myData.Work.wide.TdryBulb[,"date"],"%m/%d/%y")
cat(paste("27. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: use date as row names.\n",sep=""))
cat(paste("27. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: use date as row names.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------
# 28. use "GJ.h" for fields name in [myData.Work.wide.EnergyGJ]
# ---------------------------------------------------------------------------------
field1 <- c("date","EEM")
field2 <- names(myData.Work.wide.EnergyGJ)[!(names(myData.Work.wide.EnergyGJ) %in% field1)]
myData.GJ.part1 <- myData.Work.wide.EnergyGJ[,field1]
myData.GJ.part2 <- myData.Work.wide.EnergyGJ[,field2]
myData.Work.wide.EnergyGJ <- cbind(myData.GJ.part1,myData.GJ.part2)
names(myData.Work.wide.EnergyGJ) <- c(field1,paste("GJ.h",field2,sep=""))
cat(paste("28. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: a re-organization on the GJ data.\n",sep=""))
cat(paste("28. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: a re-organization.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------
# 29. use "T.h" for fields name in [myData.Work.wide.TdryBulb]
# ---------------------------------------------------------------------------------
field1 <- c("date","EEM")
field2 <- names(myData.Work.wide.TdryBulb)[!(names(myData.Work.wide.TdryBulb) %in% field1)]
myData.T.part1 <- myData.Work.wide.TdryBulb[,field1]
myData.T.part2 <- myData.Work.wide.TdryBulb[,field2]
myData.Work.wide.TdryBulb <- cbind(myData.T.part1,myData.T.part2)
names(myData.Work.wide.TdryBulb) <- c(field1,paste("T.h",field2,sep=""))
cat(paste("29. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: a re-organization.\n",sep=""))
cat(paste("29. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: a re-organization.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------
# 30. decide if we want to merge GJ and T of the 24 hours OR
# just use GJ data
# ---------------------------------------------------------------------------------
# myData.Work.wide <- merge(myData.Work.wide.EnergyGJ,myData.Work.wide.TdryBulb)
myData.Work.wide <- myData.Work.wide.EnergyGJ
cat(paste("30. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: use the date as row name for the data frame.\n",sep=""))
cat(paste("30. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: use the date as row name for the data frame.\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------
# 31. [myData.Work.long] and [myData.Work.wide] are ready
# -----------------------------------------------------------------
myData.Work.long[,"date"] <- as.Date(myData.Work.long[,"date"],"%m/%d/%y")
myData.Work.wide[,"date"] <- as.Date(myData.Work.wide[,"date"],"%m/%d/%y")
cat(paste("31. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: have the [date] field in [myData.Work.long] and [myData.Work.wide].\n",sep=""))
cat(paste("31. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: have the [date] field in [myData.Work.long] and [myData.Work.wide].\n",sep=""),file=FL.LOG,append=TRUE)
# 32. create a mapping table between [date] and [day.type.lab] and [day.weel.lab]
myData.dayMapping <- myData.Work.long[myData.Work.long[,"hour"] == 0,c("EEM","date","day.week.lab","day.type.lab","week.idx","date")]
row.names(myData.dayMapping) <- myData.dayMapping[,"date"]
cat(paste("32. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: use the date as row name for the data frame.\n",sep=""))
cat(paste("32. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: use the date as row name for the data frame.\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------
# 33. [myData.Work] is the one used for data mining, consist of only variables
# -----------------------------------------------------------------
myDate <- myData.Work.wide[,c(1,2)]
myData.Work <- myData.Work.wide[,-c(1,2)]
names(myData.Work) <- paste("hour",names(myData.Work),sep="")
cat(paste("33. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: only keep the data for Data Mining.\n",sep=""))
cat(paste("33. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: only keep the data for Data Mining.\n",sep=""),file=FL.LOG,append=TRUE)
# ==============================================================================================
# April 5, 2015: calculate the starting and ending hour based on the largest sloe
# ==============================================================================================
# find out the mean difference in two consecutive hours in the normalizxed data
# and the sd difference in two consecutive hours in the normalizxed data
Tmp.norm.diff <- myData.Work[,2:24] - myData.Work[,1:23]
Tmp.norm.diff.mean <- apply(Tmp.norm.diff,1,mean)
Tmp.norm.diff.sd <- apply(Tmp.norm.diff,1,sd)
# find the sharpest different forward and backward in the original unit and take them as the start and end of the business hours.
Tmp.intact <- myData.Work * (max.GJ - min.GJ) + min.GJ
Tmp.origin <- Tmp.intact[,1:23]
Tmp.offset <- Tmp.intact[,2:24]
Tmp.diff <- Tmp.offset - Tmp.origin
hour.start <- apply(Tmp.diff,1,which.max)
hour.end <- apply(Tmp.diff,1,which.min) + 1 # plus 1 is because the end time should counting backward
hour.op <- hour.end - hour.start
# for some daily profile close to zero, there is no obvious start and end, which may lead to negative operation hours.
# correct such negative operation hours
no.neg <- length(hour.op[hour.op < 0])
hour.start[hour.op < 0] <- sample(c(1:24),no.neg,replace=TRUE)
hour.end[hour.op < 0] <- hour.start[hour.op < 0]
hour.op[hour.op < 0] <- 0
cat(paste("33A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: figure out the operation hours.\n",sep=""))
cat(paste("33A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: figure out the operation hours.\n",sep=""),file=FL.LOG,append=TRUE)
# plotting the daily profile with the starting and ending hours marked as vertical lines
FL.TBD.PDF <- paste(Path.CZ.OUT,paste("CheckOperationHour",this.CZ,this.EEM.name,this.subset,".PDF",sep="_"),sep="/")
pdf(FL.TBD.PDF,paper="special", width=17, height=11,bg = "transparent")
dev.set(4)
for (idx in seq(from=1,to=dim(myData.Work)[1]))
{
this.line <- myData.Work[idx,]
this.plot <- myDate[idx,"EEM"]
if (this.plot == "Elec_04_HVACfixed")
{
this.color <- "red"
}else if (this.plot == "Elec_00_base")
{
this.color <- "blue"
}
plot(as.numeric(this.line),col=this.plot,type="b",ylim=c(0,1))
abline(v=c(hour.start[idx],hour.end[idx]))
}
dev.off(4)
cat(paste("33B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: the starting / ending hours are calculated for each day.\n",sep=""))
cat(paste("33B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: the starting / ending hours are calculated for each day.\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------
# 34. PAM
# -----------------------------------------------------------------
no.object <- dim(myData.Work)[1]
no.variable <- dim(myData.Work)[2]
if (no.object >= no.variable)
{
# Use NbClust to determine the best number of clusters
# the distance measure to be used to compute the dissimilarity matrix. This must be one of: "euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski" or "NULL". By default, distance="euclidean".
# the cluster analysis method to be used the cluster analysis method to be used. This should be one of: "ward.D","ward.D2", "single", "complete", "average", "mcquitty", "median", "centroid","kmeans".
dev.set(3) # sent the plot to screen no to the file
NbClust.model <- NbClust(myData.Work, diss=NULL,distance="euclidean",min.nc = min.nc,max.nc = max.nc,method=this.method.4.NbClust,index=idx.name)
cat(paste("34A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Using NbClust to determine the numbe rof clusters.\n",sep=""))
cat(paste("34A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Using NbClust to determine the numbe rof clusters.\n",sep=""),file=FL.LOG,append=TRUE)
if (length(grep("artificial",this.subset)))
{
no.cls.best <- which.max(table(NbClust.model$Best.nc["Number_clusters",]))
}else{
count.subset <- count.subset + 1
arrays.clusters <- data.frame(NbClust.model$Best.nc)
if(length(idx.name.selected) == 1)
{
arrays.clusters.sub <- arrays.clusters
}else{
arrays.clusters.sub <- arrays.clusters[,idx.name.selected]
}
# no.cls.best <- which.max(table(NbClust.model$Best.nc["Number_clusters",]))
no.cls.best <- max(arrays.clusters.sub["Number_clusters",],na.rm=TRUE)
# put the cluster number information into an data frame
tmp.df <- data.frame(t(arrays.clusters))
names(tmp.df) <- c(paste("Number_Clusters(",this.subset,")",sep=""),paste("Value_Index(",this.subset,")",sep=""))
tmp.df[,"Index"] <- row.names(tmp.df)
if (count.subset == 1)
{
myCluster.summary <- tmp.df
}else{
myCluster.summary <- merge(myCluster.summary,tmp.df)
}
}
if (no.cls.best < 8){no.cls.best <- 8}
if (((this.EEM.name == "Gas_04_HVACfixed") | (this.EEM.name == "Elec_04_HVACfixed")) & (this.subset == "WeekDay") & (this.CZ == "SanFrancisco"))
{
no.cls.best <- 4
}
if (((this.EEM.name == "Elec_04_HVACfixed")) & (this.subset == "AllDay") & (this.CZ == "SanFrancisco"))
{
no.cls.best <- 8 # "Hartigan"
}
cat(paste("34B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Using NbClust to determine the numbe rof clusters.\n",sep=""))
cat(paste("34B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Using NbClust to determine the numbe rof clusters.\n",sep=""),file=FL.LOG,append=TRUE)
#
# PAM modeling
#
myModel <- pam(myData.Work, no.cls.best,metric = "euclidean",medoids = NULL)
cls.Work <- (myModel)$clustering
cat(paste("34C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PAM modeling using the number of cluster selected by NbClust.\n",sep=""))
cat(paste("34C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PAM modeling using the number of cluster selected by NbClust.\n",sep=""),file=FL.LOG,append=TRUE)
myResults.Work <- cbind(myDate,myData.Work,Hour.Start = hour.start,Hour.End = hour.end,Hour.OP = hour.op,diff.mean = Tmp.norm.diff.mean,diff.sd = Tmp.norm.diff.sd)
cat(paste("36. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: combine clustering results with data.\n",sep=""))
cat(paste("36. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: combine clustering results with data.\n",sep=""),file=FL.LOG,append=TRUE)
# assign cluster label
myResults.Work[,"cls.Work"] <- cls.Work
cat(paste("37. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: combine clustering results with data.\n",sep=""))
cat(paste("37. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: combine clustering results with data.\n",sep=""),file=FL.LOG,append=TRUE)
#
# no.cls.best <- max(myResults.Work[,"cls.Work"])
thisLearn.string <- paste(this.method.4.cluster,": ",no.cls.best," clusters",sep="")
thisLearn.string.rev <- paste(no.cls.best," clusters",sep="")
cat(paste("41. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [cls.Work].\n",sep=""))
cat(paste("41. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [cls.Work].\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------
# 42. create [class.days] data frame to record the date, type of the day,EEM and class label
# ---------------------------------------------------------------------------------
class.days <- myResults.Work[,c("date","cls.Work")]
row.names(class.days) <- class.days[,"date"]
# add "day.type" and "day.week" into [days.class]
class.days[,"day.type.lab"] <- NA
class.days[,"day.week.lab"] <- NA
class.days[,"EEM"] <- NA
class.days[,"day.type.lab"] <- myData.dayMapping[as.character(class.days[,"date"]),"day.type.lab"]
class.days[,"day.week.lab"] <- myData.dayMapping[as.character(class.days[,"date"]),"day.week.lab"]
class.days[,"EEM"] <- myData.dayMapping[as.character(class.days[,"date"]),"EEM"]
cls.levels <- paste("cls",sort(unique(class.days[,"cls.Work"])),sep="-")
class.days[,"cluster"] <- factor(paste("cls",class.days[,"cls.Work"],sep="-"),levels = cls.levels,labels = cls.levels,ordered = TRUE)
cat(paste("clustering results on [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"],",sep=""),file=FL.OUT.CLSLAB,append=TRUE)
write.table(class.days,file=FL.OUT.CLSLAB,sep=",",row.names=TRUE,col.names=TRUE,appen=TRUE)
cat(paste("42. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: associate dates and class label.\n",sep=""))
cat(paste("42. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: associate dates and class label.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------
# 43. add a class label to [myData.Work.wide] and [myData.Work.long]
# ---------------------------------------------------------------------------------
myData.Work.long[,"cls.Work"] <- -999
myData.Work.long[,"cls.Work"] <- class.days[as.character(myData.Work.long[,"date"]),"cls.Work"]
myData.Work.wide[,"cls.Work"] <- -999
myData.Work.wide[,"cls.Work"] <- class.days[as.character(myData.Work.wide[,"date"]),"cls.Work"]
cls.levels <- paste("cls",sort(unique(myData.Work.long[,"cls.Work"])),sep="-")
myData.Work.long[,"cluster"] <- factor(paste("cls",myData.Work.long[,"cls.Work"],sep="-"),levels = cls.levels,labels = cls.levels,ordered = TRUE)
myData.Work.wide[,"cluster"] <- factor(paste("cls",myData.Work.wide[,"cls.Work"],sep="-"),levels = cls.levels,labels = cls.levels,ordered = TRUE)
myData.Work.long[,"month"] <- as.numeric(sub("(.*)-(.*)-(.*)","\\2",myData.Work.long[,"date"]))
myData.Work.long[,"day"] <- as.numeric(sub("(.*)-(.*)-(.*)","\\3",myData.Work.long[,"date"]))
myData.Work.long[,"month.lab"] <- factor(myData.Work.long[,"month"],levels = c(1,2,3,4,5,6,7,8,9,10,11,12),labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"),ordered = TRUE)
myData.Work.long[,"Janitor"] <- myData.Work.long[,"EEM"]
myData.Work.long[,"Janitor"] <- sub("Elec_00_base","B",sub(this.EEM.name,"A",myData.Work.long[,"Janitor"]))
cat(paste("\n\nclustering results on [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"],",sep=""),file=FL.OUT.CLSDAT,append=TRUE)
write.table(myData.Work.long,file=FL.OUT.CLSDAT,sep=",",row.names=TRUE,col.names=TRUE,appen=TRUE)
cat(paste("43. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [cls.Work] to [myData.Work.wide] and [myData.Work.long].\n",sep=""))
cat(paste("43. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [cls.Work] to [myData.Work.wide] and [myData.Work.long].\n",sep=""),file=FL.LOG,append=TRUE)
#### #
#### # add an "ID" field to be used for combining with [myEpw.Cls]
#### #
#### myData.Work.long[,"ID"] <- paste(myData.Work.long[,"month"],myData.Work.long[,"day"],myData.Work.long[,"hour"],sep="-")
#### cat(paste("43B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [ID] to [myData.Work.wide] and [myData.Work.long] to be used for merging with [myEpw.Cls].\n",sep=""))
#### cat(paste("43B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [ID] to [myData.Work.wide] and [myData.Work.long] to be used for merging with [myEpw.Cls].\n",sep=""),file=FL.LOG,append=TRUE)
#
# tally the distribution of the type of days in each cluster
#
myData.4.tally <- myData.Work.long
myData.4.tally[,"year"] <- sub("(.*)/(.*)/(.*)","\\3",myData.4.tally[,"date"])
myTally1 <- dcast(as.data.frame(table(myData.4.tally[,c("day.week.lab","cluster","year")])/24),cluster ~ year + day.week.lab)
myTally2 <- dcast(as.data.frame(table(myData.4.tally[,c("day.type.lab","cluster","year")])/24),cluster ~ year + day.type.lab)
cat(paste(no.cls.best,"clusters for [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"],",sep=""),file = FL.OUT.CLSSUM,append=TRUE)
write.table(cbind(myTally1,myTally2),sep=",",row.names=TRUE,col.names=TRUE,file = FL.OUT.CLSSUM,append=TRUE)
cat("\n\n",file = FL.OUT.CLSSUM,append=TRUE)
cat(paste("44. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: tally the day types.\n",sep=""))
cat(paste("44. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: tally the day types.\n",sep=""),file=FL.LOG,append=TRUE)
# -----------------------------------------------------------------
# 50. PLOTTING: profile of the classes
# -----------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# 51. plot1: (p.cluster): plot the profile of the clusters
# ---------------------------------------------------------------------------------------
if(length(grep("artificial",this.subset)))
{
p.cluster <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,color=Janitor,geom="line") + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(title=paste("[",thisLearn.string.rev,"]: Clustering on subset [",this.subset,"] with ",no.object," objects\n",sep=""))
p.cluster <- p.cluster + scale_x_discrete(name="Hour in the day",limits=c(8,16))
}else{
p.cluster.all <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,color=cluster,geom=c("line","point")) + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(x="Hour",y="Energy (GJ)",title=paste("[",thisLearn.string.rev,"]: Clustering on subset [",this.subset,"] with ",no.object," objects\n",sep=""))
p.cluster.all <- p.cluster.all + scale_x_discrete(name="Hour in the day",limits=c(8,16))
p.cluster <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,color=Janitor, geom=c("line"), facets=~cluster) + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(x="Hour",y="Energy (GJ)",title=paste("[",thisLearn.string.rev,"]: Clustering on subset [",this.subset,"] with ",no.object," objects\n",sep="")) + scale_color_manual("",labels = c("A", "B"), values = c("blue", "red"))
p.cluster <- p.cluster + scale_x_discrete(name="Hour in the day",limits=c(8,16))
}
cat(paste("51. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile of all clusters.\n",sep=""))
cat(paste("51. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile of all clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------------
# 52. plot2: (p.cluster.dayType): [Cluster] vs [day.type.lab] vs [base|EEM]
# ---------------------------------------------------------------------------------------
if(length(grep("artificial",this.subset)))
{
p.cluster.dayType <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,color=Janitor,facets=~day.type.lab, geom="line") + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(x="Hour",y="Energy (GJ)",title=paste("[",thisLearn.string.rev,"]: [",this.subset,"] with ",no.object," objects\n",sep="")) + scale_color_manual("",labels = c("A", "B"), values = c("blue", "red"))
p.cluster.dayType <- p.cluster.dayType + scale_x_discrete(name="Hour in the day",limits=c(8,16))
}else{
p.cluster.dayType <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,color=Janitor,facets=day.type.lab~cluster,geom="line") + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(x="Hour",y="Energy (GJ)",title=paste("[",thisLearn.string.rev,"]: [",this.subset,"] with ",no.object," objects\n",sep="")) + scale_color_manual("",labels = c("A", "B"), values = c("blue", "red"))
p.cluster.dayType <- p.cluster.dayType + scale_x_discrete(name="Hour in the day",limits=c(8,16))
}
cat(paste("52. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile of all clusters in terms of day type.\n",sep=""))
cat(paste("52. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile of all clusters in terms of day type.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------------
# 53. plot 3: (p.cluster.dayinWeek): [Cluster] vs [day.week.lab] vs [base|EEM]
# ---------------------------------------------------------------------------------------
# plot cluster results of all dates in both "base" and "EEM" in terms of [day.week.lab]
if(length(grep("artificial",this.subset)))
{
p.cluster.dayinWeek <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,color=Janitor,facets=~day.week.lab, geom="line") + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(x="Hour",y="Energy (GJ)",title=paste("[",thisLearn.string.rev,"]: [",this.subset,"] with ",no.object," objects\n",sep="")) + scale_color_manual("",labels = c("A", "B"), values = c("blue", "red"))
p.cluster.dayType <- p.cluster.dayType + scale_x_discrete(name="Hour in the day",limits=c(8,16))
}else{
p.cluster.dayinWeek <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=date,colour=Janitor,facets=day.week.lab~cluster,geom="line") + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top") + labs(x="Hour",y="Energy (GJ)",colour="Janitor",title=paste("[",thisLearn.string.rev,"]: [",this.subset,"] with ",no.object," objects\n",sep="")) + scale_color_manual("",labels = c("A", "B"), values = c("blue", "red"))
p.cluster.dayinWeek <- p.cluster.dayinWeek + scale_x_discrete(name="Hour in the day",limits=c(8,16))
# p.cluster.dayinWeek <- p.cluster.dayinWeek + labs(list(title = "", x = "Hour", y = "Energy(GJ)",colour="Janitor"))
p.cluster.dayinWeek <- p.cluster.dayinWeek
}
cat(paste("53. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile of all clusters in terms of day in the week.\n",sep=""))
cat(paste("53. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile of all clusters in terms of day in the week.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------------
# 54. plot 4: plot the daily profiles coloring in clusters
# ---------------------------------------------------------------------------------------
p.daily.cls <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=cluster,color=cluster,shape=cluster,facets=month.lab~day,geom="line")
p.daily.cls <- p.daily.cls + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
# p.daily.cls <- p.daily.cls + labs( title=paste(paste("Raw Data: ",paste(this.EEM.fuel,this.EEM.num,this.EEM.label,sep="_"),sep="")," (",this.EEM.saving,") Daily Profile of All Days in Each Month (",this.CZ,")",sep=""))
p.daily.cls <- p.daily.cls + labs(x="Hour",y="Energy (GJ)",title=paste("[",thisLearn.string.rev,"]: Clustering on subset [",this.subset,"] with ",no.object," objects\n",sep=""))
p.daily.cls <- p.daily.cls + scale_x_discrete(name="Hour in the day",limits=c(12,24))
cat(paste("54. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile coloring with clusters.\n",sep=""))
cat(paste("54. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot daily profile coloring with clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# 55. actual plotting
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
dev.set(2)
if(length(grep("artificial",this.subset,invert = TRUE))) # for all non-artificial subset, plot this (all clusters and both baseline and EEM together
{
plot(p.cluster.all)
}
plot(p.cluster)
if(length(grep("July",this.subset)) | length(grep("July-August",this.subset)) | length(grep("All Data",this.subset)))
{
multiplot(p.cluster.dayType) # data from complete month or year can be plotted in terms of day in the week or day type
multiplot(p.cluster.dayinWeek) # data from complete month or year can be plotted in terms of day in the week or day type
}else if (length(grep("WeekDay",this.subset)))
{
multiplot(p.cluster.dayinWeek) # data from a day type will only be plotted in terms of day in the week
}
plot(p.daily.cls)
cat(paste("55. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: actually generate the plots.\n",sep=""))
cat(paste("55. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: actually generate the plots.\n",sep=""),file=FL.LOG,append=TRUE)
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# JPEG plots
# April 5, 2015: add JPEG plots
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# all cluster plot
if (this.subset == "AllData")
{
FL.Fig04.JPG <- paste(Path.CZ.OUT,paste("Fig04_Clusering",this.subset,"AllClusters_016.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig04.JPG <- paste(Path.CZ.OUT,paste("Fig04_Clusering",this.subset,"AllClusters_006.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig04.JPG)){print(paste(FL.Fig04.JPG,"exist. Delete it!"));file.remove(FL.Fig04.JPG)}
jpeg(file = FL.Fig04.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.cluster.all)
dev.off(4)
# day type plot
if (this.subset == "AllData")
{
FL.Fig05.JPG <- paste(Path.CZ.OUT,paste("Fig05_Clusering",this.subset,"DayType_017.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig05.JPG <- paste(Path.CZ.OUT,paste("Fig05_Clusering",this.subset,"DayType.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig05.JPG)){print(paste(FL.Fig05.JPG,"exist. Delete it!"));file.remove(FL.Fig05.JPG)}
jpeg(file = FL.Fig05.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.cluster.dayType)
dev.off(4)
# day in week plot
if (this.subset == "AllData")
{
FL.Fig06.JPG <- paste(Path.CZ.OUT,paste("Fig06_Clusering",this.subset,"DayInWeek.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig06.JPG <- paste(Path.CZ.OUT,paste("Fig06_Clusering",this.subset,"DayInWeek_007.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig06.JPG)){print(paste(FL.Fig06.JPG,"exist. Delete it!"));file.remove(FL.Fig06.JPG)}
jpeg(file = FL.Fig06.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.cluster.dayinWeek)
dev.off(4)
# daily plot
if (this.subset == "AllData")
{
FL.Fig07.JPG <- paste(Path.CZ.OUT,paste("Fig07_Clusering",this.subset,"Daily_018.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig07.JPG <- paste(Path.CZ.OUT,paste("Fig07_Clusering",this.subset,"Daily_008.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig07.JPG)){print(paste(FL.Fig07.JPG,"exist. Delete it!"));file.remove(FL.Fig07.JPG)}
jpeg(file = FL.Fig07.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.cls)
dev.off(4)
cat(paste("56. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Added JPEG format plots.\n",sep=""))
cat(paste("56. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Added JPEG format plots.\n",sep=""),file=FL.LOG,append=TRUE)
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# plot the distribution of the environmental variables among clusters
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------
# 1. merging [myWeather.wide] with [myData.Work.long]
if (length(grep("artificial",this.subset,invert = TRUE))) # for all non-artifical subsets
{
# add "month","day" in [myData.Work.long] which will be used to merge the weather data in [myWeather.wide] which has "month","day","hour"
myData.Work.long[,"month"] <- as.numeric(sub("(.*)-(.*)-(.*)","\\2",myData.Work.long[,"date"]))
myData.Work.long[,"day"] <- as.numeric(sub("(.*)-(.*)-(.*)","\\3",myData.Work.long[,"date"]))
var.cls <- c("month","day","hour","cluster","EEM","month.lab","day.week.lab","day.type.lab","week.idx")
var.epw <- c("month","day","hour",grep("epw.",names(myWeather.wide),value=TRUE))
tmp1 <- myData.Work.long[,var.cls]
tmp2 <- myWeather.wide[,var.epw]
tmp1[,"ID"] <- paste(tmp1[,"month"],tmp1[,"day"],tmp1[,"hour"],sep="-")
tmp2[,"ID"] <- paste(tmp2[,"month"],tmp2[,"day"],tmp2[,"hour"],sep="-") # this "ID" is unique for each row in [myWeather.wide] i.e., in [tmp2]
row.names(tmp2) <- tmp2[,"ID"]
# the corresponding part in [tmp2] which matchs the "month","day","hour" in [tmp1]
# those rows in [tmp2] which has the values of (tmp1[,"ID"])
tmp3 <- tmp2[tmp1[,"ID"],] # the part of the [myWeather.wide] which matches the "month","day","hour" in [myData.Work.long]
# merge weather in [tmp3] and the class label in [tmp1]
myEpw.Cls <- cbind(tmp1,tmp3)
myEpw.Cls[,"date"] <- paste(myEpw.Cls[,"month"],myEpw.Cls[,"day"],"2010",sep="/")
myEpw.Cls[,"date"] <- as.Date(myEpw.Cls[,"date"],"%m/%d/%y")
cat(paste("56B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: merge the fields in [myData.Work.long] and fields in [myWeather.wide].\n",sep=""))
cat(paste("56B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: merge the fields in [myData.Work.long] and fields in [myWeather.wide].\n",sep=""),file=FL.LOG,append=TRUE)
# put in a long format to plot all environmental variables
var.epw <- grep("epw.",names(myEpw.Cls),value=TRUE)
var.non.epw <- names(myEpw.Cls)[!(names(myEpw.Cls) %in% var.epw)]
myEpw.Cls.long <- melt(myEpw.Cls,id.vars = var.non.epw,measure.vars = var.epw,value.name="value")
cat(paste("56C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: [myEps.Cls] to [myEps.Cls.long].\n",sep=""))
cat(paste("56C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: [myEps.Cls] to [myEps.Cls.long].\n",sep=""),file=FL.LOG,append=TRUE)
# plot
count.epw <- 0
for (this.variable in unique(myEpw.Cls.long[,"variable"]))
{
count.epw <- count.epw + 1
myData.4.plot <- myEpw.Cls.long[myEpw.Cls.long[,"variable"] == this.variable,]
p.weather <- qplot(data=myData.4.plot,x=cluster,y=value,color=cluster,geom="boxplot") + theme(legend.position="none") + labs(title=paste("[(",this.variable,"): [",this.subset,"] with ",no.object," objects\n",sep=""))
command.string.epw <- paste(paste("p.epw",count.epw,sep="")," <- p.weather",sep="")
eval(parse(text=command.string.epw))
}
cat(paste("57. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Weather variables in the epw file have been plotted in term of the clusters.\n",sep=""))
cat(paste("57. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: Weather variables in the epw file have been plotted in term of the clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ***********************************************************************************
# April 5, 2015: Explicitly define T drybuld and T wetbulb temperature plots
# ***********************************************************************************
p.TdryBulb <- qplot(data=myEpw.Cls, x=cluster,y=epw.T.drybulb, color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y=expression(paste("Drybulb T ("^"o","C)",sep="")),title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
p.TdewPoint <- qplot(data=myEpw.Cls, x=cluster,y=epw.T.dewpoint,color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y=expression(paste("Dew Point ("^"o","C)",sep="")),title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
p.dayInWeek <- qplot(data=myData.Work.long,x=cluster,y=day.week.lab, color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Day in Week", title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
p.dayType <- qplot(data=myData.Work.long,x=cluster,y=day.type.lab, color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Day Type", title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
cat(paste("58. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: T drybulb, T dewpoint, day in week, day type have been plotted in terms of the clusters.\n",sep=""))
cat(paste("58. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: T drybulb, T dewpoint, day in week, day type have been plotted in terms of the clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ***************************************************************
# April 5, 2015: summing the dail consumption.
# ***************************************************************
myDaily.consumption <- aggregate(myData.Work.long[,"EnergyGJ"],by = list(myData.Work.long[,"month"],myData.Work.long[,"day"],myData.Work.long[,"date"]),sum)
names(myDaily.consumption) <- c("month","day","date","EnergyGJ")
# assign the cluster label based on the "month" and "day"
myDaily.consumption[,"cls.Work"] <- class.days[as.character(myDaily.consumption[,"date"]),"cls.Work"]
cls.levels <- paste("cls",sort(unique(myDaily.consumption[,"cls.Work"])),sep="-")
myDaily.consumption[,"cluster"] <- factor(paste("cls",myDaily.consumption[,"cls.Work"],sep="-"),levels = cls.levels,labels = cls.levels,ordered = TRUE)
p.dailyGJ <- qplot(data=myDaily.consumption,x=cluster,y=EnergyGJ,color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Daily Energy (GJ)",title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
cat(paste("59. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: the dialy consumption has been plotted in terms of the clusters.\n",sep=""))
cat(paste("59. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: the dialy consumption has been plotted in terms of the clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ***************************************************************
# April 5, 2015: plotting the distribution of the operation hours.
# ***************************************************************
cls.levels <- paste("cls",sort(unique(myResults.Work[,"cls.Work"])),sep="-")
myResults.Work[,"cluster"] <- factor(paste("cls",myResults.Work[,"cls.Work"],sep="-"),levels = cls.levels,labels = cls.levels,ordered = TRUE)
cat(paste("60A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [custer] to [myResults.Work].\n",sep=""))
cat(paste("60A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [custer] to [myResults.Work].\n",sep=""),file=FL.LOG,append=TRUE)
# get the day type, day in the week etc from [myData.Work.long]
myResults.Work[,"day.week.lab"] <- myData.Work.long[match(myResults.Work[,"date"],myData.Work.long[,"date"]),"day.week.lab"]
myResults.Work[,"day.type.lab"] <- myData.Work.long[match(myResults.Work[,"date"],myData.Work.long[,"date"]),"day.type.lab"]
myResults.Work[,"month.lab"] <- myData.Work.long[match(myResults.Work[,"date"],myData.Work.long[,"date"]),"month.lab"]
myResults.Work[,"week.idx"] <- myData.Work.long[match(myResults.Work[,"date"],myData.Work.long[,"date"]),"week.idx"]
cat(paste("60B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: extract [day.week.lab]/[day.type.lab]/[month.lab]/[week.idx] from [myData.Work.long] to [myResults.Work].\n",sep=""))
cat(paste("60B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: extract [day.week.lab]/[day.type.lab]/[month.lab]/[week.idx] from [myData.Work.long] to [myResults.Work].\n",sep=""),file=FL.LOG,append=TRUE)
# get the weather average of each day from [myEpw.Cls]
Tmp.TdryBulb <- aggregate(myEpw.Cls[,"epw.T.drybulb"], list(myEpw.Cls[,"date"]),mean,na.rm=TRUE)
names(Tmp.TdryBulb) <- c("date","T.drybulb")
Tmp.TdewPoint <- aggregate(myEpw.Cls[,"epw.T.dewpoint"], list(myEpw.Cls[,"date"]),mean,na.rm=TRUE)
names(Tmp.TdewPoint) <- c("date","T.dewPoint")
Tmp.RelHumid <- aggregate(myEpw.Cls[,"epw.rel.humidity"], list(myEpw.Cls[,"date"]),mean,na.rm=TRUE)
names(Tmp.RelHumid) <- c("date","T.RelHumid")
Tmp.DirNormRad <- aggregate(myEpw.Cls[,"epw.direct.norm.rad"],list(myEpw.Cls[,"date"]),mean,na.rm=TRUE)
names(Tmp.DirNormRad) <- c("date","T.DirNormRad")
Tmp.DiffHorRad <- aggregate(myEpw.Cls[,"epw.diffuse.hor.rad"],list(myEpw.Cls[,"date"]),mean,na.rm=TRUE)
names(Tmp.DiffHorRad) <- c("date","T.DiffHorRad")
Tmp.Weather <- merge(Tmp.TdryBulb,Tmp.TdewPoint)
Tmp.Weather <- merge(Tmp.Weather, Tmp.RelHumid)
Tmp.Weather <- merge(Tmp.Weather, Tmp.DirNormRad)
Tmp.Weather <- merge(Tmp.Weather, Tmp.DiffHorRad)
myResults.Work <- merge(myResults.Work,Tmp.Weather,by="date",all=TRUE)
cat(paste("60C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: get [TdryBulb] / [TdewPoint] / [RelHumid] / [DirNormRad] / [DiffHorRad] from [myEpw.Cls] to [myResults.Work].\n",sep=""))
cat(paste("60C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: get [TdryBulb] / [TdewPoint] / [RelHumid] / [DirNormRad] / [DiffHorRad] from [myEpw.Cls] to [myResults.Work].\n",sep=""),file=FL.LOG,append=TRUE)
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# now [myResults.Work] consists of everything fro plotting on the dates.
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
#
# April 20, 2015: re-set EEM to two different Janitors
#
myResults.Work[,"Janitor"] <- myResults.Work[,"EEM"]
myResults.Work[,"Janitor"] <- sub("Elec_00_base","B",sub(this.EEM.name,"A",myResults.Work[,"Janitor"]))
# plot the estimate operation hours and duration
p.Start <- qplot(data=myResults.Work,x=cluster,y=Hour.Start,color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Business Start (hr)", title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
p.End <- qplot(data=myResults.Work,x=cluster,y=Hour.End, color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Business End (hr)", title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
p.Operation <- qplot(data=myResults.Work,x=cluster,y=Hour.OP, color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Business Operation (hr)",title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
p.Janitor <- qplot(data=myResults.Work,x=cluster,y=Janitor, color=cluster,geom="boxplot") + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="none") + labs(x="Cluster",y="Janitor Shift", title=paste("[",this.subset,"] with ",no.object," objects\n",sep=""))
cat(paste("60D. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: distribution of business hours.\n",sep=""))
cat(paste("60D. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: distribution of business hours.\n",sep=""),file=FL.LOG,append=TRUE)
if (count.epw == 1){plot(p.epw1)}
if (count.epw == 2){multiplot(p.epw1,p.epw2,cols=2)}
if (count.epw == 3){multiplot(p.epw1,p.epw2,p.epw3,cols=3)}
if (count.epw == 4){multiplot(p.epw1,p.epw2,p.epw3,p.epw4,cols=2)}
if (count.epw == 5){multiplot(p.epw1,p.epw2,p.epw3,p.epw4,p.epw5,cols=3)}
if (count.epw == 6){multiplot(p.epw1,p.epw2,p.epw3,p.epw4,p.epw5,p.epw6,cols=3)}
if (count.epw == 7){multiplot(p.epw1,p.epw2,p.epw3,p.epw4,p.epw5,p.epw6,p.epw7,cols=3)}
if (count.epw == 8){multiplot(p.epw1,p.epw2,p.epw3,p.epw4,p.epw5,p.epw6,p.epw7,p.epw8,cols=3)}
if (count.epw == 9){multiplot(p.epw1,p.epw2,p.epw3,p.epw4,p.epw5,p.epw6,p.epw7,p.epw8,p.epw9,cols=3)}
cat(paste("61. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot the distribution of the weather variables in the clusters.\n",sep=""))
cat(paste("61. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot the distribution of the weather variables in the clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ********************************************************************************************************
# April 5, 2015: plot into the pdf file
# ********************************************************************************************************
# (1) plot against Drybulb T
dev.set(3)
multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
cat(paste("62A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("62A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (2) plot against day type or day in the week
dev.set(3)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.TdryBulb,p.TdewPoint,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.TdryBulb,p.TdewPoint,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
cat(paste("63A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("63A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (3) day in week, drybulb T and dew point
dev.set(3)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.TdryBulb,p.TdewPoint,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.TdryBulb,p.TdewPoint,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
cat(paste("64A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("64A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (4) day type | day in the week and operation hours
dev.set(3)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.dailyGJ,p.Operation,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.dailyGJ,p.Operation,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
cat(paste("65A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("65A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (5) day type | day in the week and start and end operation hours
dev.set(3)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.Start,p.End,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.Start,p.End,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
cat(paste("66A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("66A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (6) day type | day in the week and operation hours
dev.set(3)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.Janitor,p.dailyGJ,p.Operation,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.Janitor,p.dailyGJ,p.Operation,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
cat(paste("67A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("67A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: PDF. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# Making JPEG plots
# April 5, 2015: add JPEG plots
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# ********************************************************************************************************
# (1) plot against Drybulb T
FL.Fig08.JPG <- paste(Path.CZ.OUT,paste("Fig08_",this.subset,".jpg",sep=""),sep="/")
if (file.exists(FL.Fig08.JPG)){print(paste(FL.Fig08.JPG,"exist. Delete it!"));file.remove(FL.Fig08.JPG)}
jpeg(file = FL.Fig08.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
dev.off(4)
cat(paste("62B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("62B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (2) plot against day type or day in the week
if (this.subset == "AllData")
{
FL.Fig09.JPG <- paste(Path.CZ.OUT,paste("Fig09_Clusering",this.subset,"Visual_019.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig09.JPG <- paste(Path.CZ.OUT,paste("Fig09_Clusering",this.subset,"Visual_009.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig09.JPG)){print(paste(FL.Fig09.JPG,"exist. Delete it!"));file.remove(FL.Fig09.JPG)}
jpeg(file = FL.Fig09.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.TdryBulb,p.TdewPoint,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.TdryBulb,p.TdewPoint,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
dev.off(4)
cat(paste("63B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("63B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (3) day in week, drybulb T and dew point
if (this.subset == "AllData")
{
FL.Fig10.JPG <- paste(Path.CZ.OUT,paste("Fig10_Clusering",this.subset,"Visual_020.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig10.JPG <- paste(Path.CZ.OUT,paste("Fig10_Clusering",this.subset,"Visual_010.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig10.JPG)){print(paste(FL.Fig10.JPG,"exist. Delete it!"));file.remove(FL.Fig10.JPG)}
jpeg(file = FL.Fig10.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.TdryBulb,p.TdewPoint,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.TdryBulb,p.TdewPoint,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
dev.off(4)
cat(paste("64B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("64B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (4) day type | day in the week and operation hours
if (this.subset == "AllData")
{
FL.Fig11.JPG <- paste(Path.CZ.OUT,paste("Fig11_Clusering",this.subset,"Visual_021.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig11.JPG <- paste(Path.CZ.OUT,paste("Fig11_Clusering",this.subset,"Visual_011.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig11.JPG)){print(paste(FL.Fig11.JPG,"exist. Delete it!"));file.remove(FL.Fig11.JPG)}
jpeg(file = FL.Fig11.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.dailyGJ,p.Operation,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.dailyGJ,p.Operation,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
dev.off(4)
cat(paste("65B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("65B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (5) day type | day in the week and start and end operation hours
if (this.subset == "AllData")
{
FL.Fig12.JPG <- paste(Path.CZ.OUT,paste("Fig12_Clusering",this.subset,"Visual_022.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig12.JPG <- paste(Path.CZ.OUT,paste("Fig12_Clusering",this.subset,"Visual_012.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig12.JPG)){print(paste(FL.Fig12.JPG,"exist. Delete it!"));file.remove(FL.Fig12.JPG)}
jpeg(file = FL.Fig12.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.dayType,p.Start,p.End,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.dayInWeek,p.Start,p.End,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
dev.off(4)
cat(paste("66B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("66B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
# (4) day type | day in the week and operation hours
if (this.subset == "AllData")
{
FL.Fig11B.JPG <- paste(Path.CZ.OUT,paste("Fig11B_Clusering",this.subset,"Visual_021B.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig11B.JPG <- paste(Path.CZ.OUT,paste("Fig11B_Clusering",this.subset,"Visual_011B.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig11B.JPG)){print(paste(FL.Fig11B.JPG,"exist. Delete it!"));file.remove(FL.Fig11B.JPG)}
jpeg(file = FL.Fig11B.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
if (this.subset == "AllData")
{
multiplot(p.cluster.dayType,p.Janitor,p.dailyGJ,p.Operation,cols=2)
}else if (this.subset == "WeekDay")
{
multiplot(p.cluster.dayinWeek,p.Janitor,p.dailyGJ,p.Operation,cols=2)
}
# multiplot(p.TdryBulb,p.TdewPoint,p.dayInWeek,p.dayType,p.dailyGJ,p.Start,p.End,p.Operation,cols=4)
dev.off(4)
cat(paste("67B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""))
cat(paste("67B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG. plot the distribution of the T drybuld, T dewpoint, Day in Week, Day type and daily consumption in term of clusters.\n",sep=""),file=FL.LOG,append=TRUE)
}
# ----------------------------------------------------------------------------------------------------
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# SVM for classification: use the same data for clustering to build classifier
# 1. classifier: (1) use the EnergyGJ data for training clssifier (2) use the (3) use the remaining data for verification
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
myData.Remain[,"date"] <- as.Date(myData.Remain[,"date"],"%m/%d/%y")
myData.4classify.train <- dcast(myData.Work.long,date + EEM ~ hour,value.var = "EnergyGJ") # use non-normalized data
myData.4classify.remain <- dcast(myData.Remain, date ~ hour,value.var = "EnergyGJ") # use non-normalized data
cat(paste("67A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: prepare [myData.4classify.train] / [myData.4classify.remain] for SVM.\n",sep=""))
cat(paste("67A. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: prepare [myData.4classify.train] / [myData.4classify.remain] for SVM.\n",sep=""),file=FL.LOG,append=TRUE)
# --------------------------------------------------------------
p.weekly.remain.all <- qplot(data=myData.Remain,x=hour.in.week,y=EnergyGJ,colour="black",facets=month.lab~week.idx.in.Month,geom="line")
p.weekly.remain.all <- p.weekly.remain.all + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="none")
p.weekly.remain.all <- p.weekly.remain.all + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("The Raw Data of the Remaning Dates which are not used in the cluster analysis.\n",sep=""))
p.weekly.remain.all <- p.weekly.remain.all + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
dev.set(3)
plot(p.weekly.remain.all)
cat(paste("67B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot [p.weekly.remain.all].\n",sep=""))
cat(paste("67B. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot [p.weekly.remain.all].\n",sep=""),file=FL.LOG,append=TRUE)
# 2. add "GJ.h" to field names of [field1.4classify.train]
field1.4classify.train <- c("date","EEM")
field2.4classify.train <- names(myData.4classify.train)[!(names(myData.4classify.train) %in% field1.4classify.train)]
myData.GJ.part1.4classify.train <- myData.4classify.train[,field1.4classify.train]
myData.GJ.part2.4classify.train <- myData.4classify.train[,field2.4classify.train]
myData.4classify.train <- cbind(myData.GJ.part1.4classify.train,myData.GJ.part2.4classify.train)
names(myData.4classify.train) <- c(field1.4classify.train,paste("GJ.h",field2.4classify.train,sep=""))
cat(paste("67C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [GJ.h] to [myData.4classify.train].\n",sep=""))
cat(paste("67C. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [GJ.h] to [myData.4classify.train].\n",sep=""),file=FL.LOG,append=TRUE)
# 3. add "GJ.h" to field names to [field1.4classify.remain] for verification
field1.4classify.remain <- c("date")
field2.4classify.remain <- names(myData.4classify.remain)[!(names(myData.4classify.remain) %in% field1.4classify.remain)]
myData.GJ.part1.4classify.remain <- myData.4classify.remain[,field1.4classify.remain]
myData.GJ.part2.4classify.remain <- myData.4classify.remain[,field2.4classify.remain]
myData.4classify.remain <- cbind(myData.GJ.part1.4classify.remain,myData.GJ.part2.4classify.remain)
names(myData.4classify.remain) <- c(field1.4classify.remain,paste("GJ.h",field2.4classify.remain,sep=""))
cat(paste("67D. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [GJ.h] to [myData.4classify.remain].\n",sep=""))
cat(paste("67D. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [GJ.h] to [myData.4classify.remain].\n",sep=""),file=FL.LOG,append=TRUE)
# 4. add class label to the data
myData.4classify.train[,"cluster"] <- myResults.Work[match(myData.4classify.train[,"date"],myResults.Work[,"date"]),"cluster"]
cat(paste("67E. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [cluster] to [myData.4classify.train].\n",sep=""))
cat(paste("67E. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [cluster] to [myData.4classify.train].\n",sep=""),file=FL.LOG,append=TRUE)
# 5. SVM model
myData.4.classifier <- myData.4classify.train[,c("cluster",grep("GJ\\.h",names(myData.4classify.train),value=TRUE))]
model.svm <- svm(formula = cluster ~ ., data = myData.4.classifier)
cat(paste("67F. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: conduct SVM on [myData.4classify.train].\n",sep=""))
cat(paste("67F. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: conduct SVM on [myData.4classify.train].\n",sep=""),file=FL.LOG,append=TRUE)
# 6A. estimate the training data
pred.svm.train <- predict(model.svm, myData.4.classifier[,-1])
tab.svm.train <- table(pred = pred.svm.train, true = myData.4.classifier[,1])
cat(paste("67G. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: predict back on the training data to get [pred.svm.train].\n",sep=""))
cat(paste("67G. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: predict back on the training data to get [pred.svm.train].\n",sep=""),file=FL.LOG,append=TRUE)
# 6B. April 20, 2015: for visualizing the training classification results,
myData.4.classifier[,"Classified"] <- pred.svm.train
myData.4.classifier[,"date"] <- myData.4classify.train[,"date"]
cat(paste("67H. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add the predicted [Classified] back to [myData.4.classifie].\n",sep=""))
cat(paste("67H. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add the predicted [Classified] back to [myData.4.classifie].\n",sep=""),file=FL.LOG,append=TRUE)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# add the classification results based to [myData.Work.long]
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
myData.Work.long[,"Classified"] <- myData.4.classifier[match(myData.Work.long[,"date"],myData.4.classifier[,"date"]),"Classified"]
cat(paste("67HH. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add the classified results back to [myData.Work.long].\n",sep=""))
cat(paste("67HH. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add the classified results back to [myData.Work.long].\n",sep=""),file=FL.LOG,append=TRUE)
### # 6C. plotting the classification results
### p.weekly.train.cls <- qplot(data=myData.Work.long,x=hour.in.week,y=EnergyGJ,group=Classified,color=cluster,facets=month.lab~week.idx.in.Month,geom="line")
### p.weekly.train.cls <- p.weekly.train.cls + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
### p.weekly.train.cls <- p.weekly.train.cls + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("SVM: Classify the training dates onto the clusters established on the training data.\n",sep=""))
### p.weekly.train.cls <- p.weekly.train.cls + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
### dev.set(3)
### plot(p.weekly.train.cls)
### cat(paste("67I. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot classification results to [p.weekly.train.cls].\n",sep=""))
### cat(paste("67I. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot classification results to [p.weekly.train.cls].\n",sep=""),file=FL.LOG,append=TRUE)
# 6D. plotting the classification results on the training data
p.daily.train.cls <- qplot(data=myData.Work.long,x=hour,y=EnergyGJ,group=Classified,color=Classified,shape=cluster,facets=month.lab~day,geom="line")
p.daily.train.cls <- p.daily.train.cls + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
p.daily.train.cls <- p.daily.train.cls + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("SVM: Classify the training dates onto the clusters established on the training data.\nWeekday Only",sep=""))
p.daily.train.cls <- p.daily.train.cls + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(3)
plot(p.daily.train.cls)
cat(paste("67J. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot classification results on the training data to [p.daily.train.cls].\n",sep=""))
cat(paste("67J. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot classification results on the training data to [p.daily.train.cls].\n",sep=""),file=FL.LOG,append=TRUE)
# 7. use the SVM classifiers to predict the remaining data from the baseline
myData.4.verify <- myData.4classify.remain[,grep("GJ\\.h",names(myData.4classify.remain),value=TRUE)]
cat(paste("67K. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: applies the classifiers to [myData.4classify.remain].\n",sep=""))
cat(paste("67K. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: applies the classifiers to [myData.4classify.remain].\n",sep=""),file=FL.LOG,append=TRUE)
# 8. verify the SVM model
pred.svm.verify <- predict(model.svm, myData.4.verify)
cat(paste("67L. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: classification results on [myData.4classify.remain].\n",sep=""))
cat(paste("67L. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: classification results on [myData.4classify.remain].\n",sep=""),file=FL.LOG,append=TRUE)
# 9. add the classification results to the data for plotting
myData.4classify.remain["cluster"] <- pred.svm.verify
myData.Remain[,"cluster"] <- myData.4classify.remain[match(myData.Remain[,"date"],myData.4classify.remain[,"date"]),"cluster"]
cat(paste("67M. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add classification results back to [myData.Remain].\n",sep=""))
cat(paste("67M. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add classification results back to [myData.Remain].\n",sep=""),file=FL.LOG,append=TRUE)
# add "week" in fron tof week index in the month
myData.Remain[,"week.idx.in.Month"] <- as.factor(paste("Week",myData.Remain[,"week.idx.in.month"],sep=""))
cat(paste("67N. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [week] field to [myData.Remain].\n",sep=""))
cat(paste("67N. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: add [week] field to [myData.Remain].\n",sep=""),file=FL.LOG,append=TRUE)
# 10a. plotting the classification results
p.weekly.remain.cls <- qplot(data=myData.Remain,x=hour.in.week,y=EnergyGJ,group=cluster,color=cluster,facets=month.lab~week.idx.in.Month,geom="line")
p.weekly.remain.cls <- p.weekly.remain.cls + theme(axis.text.x = element_text(angle=0,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
p.weekly.remain.cls <- p.weekly.remain.cls + labs(x="Hour in the Week",y="Energy (GJ)",title=paste("SVM: Classify the remaining dates onto the clusters established on the training data.\n",sep=""))
p.weekly.remain.cls <- p.weekly.remain.cls + scale_x_discrete(name="Hour in the week",limits=c(48,96,144))
dev.set(3)
plot(p.weekly.remain.cls)
cat(paste("67O. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot [p.weekly.remain.cls].\n",sep=""))
cat(paste("67O. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot [p.weekly.remain.cls].\n",sep=""),file=FL.LOG,append=TRUE)
# 10b. plotting the classification results
p.daily.remain.cls <- qplot(data=myData.Remain,x=hour,y=EnergyGJ,group=cluster,color=cluster,facets=month.lab~day,geom="line")
p.daily.remain.cls <- p.daily.remain.cls + theme(axis.text.x = element_text(angle=90,color="black"),axis.text.y = element_text(color="black"),legend.position="top")
p.daily.remain.cls <- p.daily.remain.cls + labs(x="Hour in the Day",y="Energy (GJ)",title=paste("SVM: Classify the remaining dates onto the clusters established on the training data.\nWeekday Only",sep=""))
p.daily.remain.cls <- p.daily.remain.cls + scale_x_discrete(name="Hour in the day",limits=c(12,24))
dev.set(3)
plot(p.daily.remain.cls)
cat(paste("67P. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot [p.daily.remain.cls].\n",sep=""))
cat(paste("67P. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot [p.daily.remain.cls].\n",sep=""),file=FL.LOG,append=TRUE)
# 11. output the classification results
cat("SVM classification results [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]",file=FL.classifier.CSV,append=TRUE)
write.table(dcast(as.data.frame(tab.svm.train),pred~true),file=FL.classifier.CSV,sep=",",row.names=TRUE,col.names=TRUE,append=TRUE)
cat("\n\n",file=FL.classifier.CSV,append=TRUE)
cat(paste("67Q. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: output the SVM clasification to [FL.classifier.CSV].\n",sep=""))
cat(paste("67Q. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: output the SVM clasification to [FL.classifier.CSV].\n",sep=""),file=FL.LOG,append=TRUE)
# =====================================================================================================================
# =====================================================================================================================
# Make JPEG plots
# =====================================================================================================================
# =====================================================================================================================
FL.Fig13.JPG <- paste(Path.CZ.OUT,paste("Fig13",this.subset,"SVM.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig13.JPG)){print(paste(FL.Fig13.JPG,"exist. Delete it!"));file.remove(FL.Fig13.JPG)}
jpeg(file = FL.Fig13.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.remain.all)
dev.off(4)
cat(paste("67R. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG file [FL.Fig13.JPG].\n",sep=""))
cat(paste("67R. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG file [FL.Fig13.JPG].\n",sep=""),file=FL.LOG,append=TRUE)
if (this.subset == "AllData")
{
FL.Fig14.JPG <- paste(Path.CZ.OUT,paste("Fig14_SVM.jpg",sep="_"),sep="/")
FL.Fig14A.JPG <- paste(Path.CZ.OUT,paste("Fig14A_SVM_Clusering",this.subset,"SVM_Train_Weekly_024A.jpg",sep="_"),sep="/")
FL.Fig14B.JPG <- paste(Path.CZ.OUT,paste("Fig14B_SVM_Clusering",this.subset,"SVM_Train_Daily_024B.jpg",sep="_"),sep="/")
FL.Fig14C.JPG <- paste(Path.CZ.OUT,paste("Fig14C_SVM_Clusering",this.subset,"SVM_Test_Weekly_024C.jpg",sep="_"),sep="/")
FL.Fig14D.JPG <- paste(Path.CZ.OUT,paste("Fig14D_SVM_Clusering",this.subset,"SVM_Test_Daily_024D.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig14A.JPG <- paste(Path.CZ.OUT,paste("Fig14A_SVM_Clusering",this.subset,"SVM_Train_Weekly_014A.jpg",sep="_"),sep="/")
FL.Fig14B.JPG <- paste(Path.CZ.OUT,paste("Fig14B_SVM_Clusering",this.subset,"SVM_Train_Daily_014B.jpg",sep="_"),sep="/")
FL.Fig14C.JPG <- paste(Path.CZ.OUT,paste("Fig14C_SVM_Clusering",this.subset,"SVM_Test_Weekly_014C.jpg",sep="_"),sep="/")
FL.Fig14D.JPG <- paste(Path.CZ.OUT,paste("Fig14D_SVM_Clusering",this.subset,"SVM_Test_Daily_014D.jpg",sep="_"),sep="/")
}
### if (file.exists(FL.Fig14A.JPG)){print(paste(FL.Fig14A.JPG,"exist. Delete it!"));file.remove(FL.Fig14A.JPG)}
### jpeg(file = FL.Fig14A.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
### dev.set(4)
### plot(p.weekly.train.cls)
### dev.off(4)
if (file.exists(FL.Fig14B.JPG)){print(paste(FL.Fig14B.JPG,"exist. Delete it!"));file.remove(FL.Fig14B.JPG)}
jpeg(file = FL.Fig14B.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.train.cls)
dev.off(4)
if (file.exists(FL.Fig14C.JPG)){print(paste(FL.Fig14C.JPG,"exist. Delete it!"));file.remove(FL.Fig14C.JPG)}
jpeg(file = FL.Fig14C.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.weekly.remain.cls)
dev.off(4)
if (file.exists(FL.Fig14D.JPG)){print(paste(FL.Fig14D.JPG,"exist. Delete it!"));file.remove(FL.Fig14D.JPG)}
jpeg(file = FL.Fig14D.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(p.daily.remain.cls)
dev.off(4)
cat(paste("67S. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG file [FL.Fig14.JPG].\n",sep=""))
cat(paste("67S. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: JPEG file [FL.Fig14.JPG].\n",sep=""),file=FL.LOG,append=TRUE)
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# RPART for classification: use external variable to create Classification Tree to check the distribution of the external variable
# (2) use the external variables (3) the remaining data for verification
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# 1. use external data to building classifier
if (this.subset == "WeekDay")
{
myData.classifier <- myResults.Work[,c("cluster","Hour.OP","T.drybulb")]
model.rpart <- rpart(cluster ~ Hour.OP + T.drybulb, method="class", data=myData.classifier)
}else if (this.subset == "AllData")
{
myData.classifier <- myResults.Work[,c("cluster","day.type.lab","Hour.OP","T.drybulb")]
model.rpart <- rpart(cluster ~ day.type.lab + Hour.OP + T.drybulb, method="class", data=myData.classifier)
}
cat(paste("67T. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: CART Model.\n",sep=""))
cat(paste("67T. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: CART Model.\n",sep=""),file=FL.LOG,append=TRUE)
# 2. building rpart model
dev.set(3)
plot(model.rpart,main = "RPART: Classification Tree: Training data / external variables\n")
# text(model.rpart,use.n=TRUE,all=TRUE,cex=0.8)
text(model.rpart,use.n=TRUE,all=TRUE,cex=0.6,pretty=1)
FL.Fig16.JPG <- paste(Path.CZ.OUT,paste("Fig16",this.subset,"RPART.jpg",sep="_"),sep="/")
if (file.exists(FL.Fig16.JPG)){print(paste(FL.Fig16.JPG,"exist. Delete it!"));file.remove(FL.Fig16.JPG)}
jpeg(file = FL.Fig16.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(model.rpart,main = "RPART: Classification Tree: Training data / external variables\n")
# text(model.rpart,use.n=TRUE,all=TRUE,cex=0.8)
text(model.rpart,use.n=TRUE,all=TRUE,cex=0.6,pretty=1)
dev.off(4)
cat(paste("67U. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot un-pruned CART tree.\n",sep=""))
cat(paste("67U. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot un-pruned CART tree.\n",sep=""),file=FL.LOG,append=TRUE)
# 3. prun the tree:
model.rpart.prune <- prune(model.rpart, cp = model.rpart$cptable[which.min(model.rpart$cptable[,"xerror"]),"CP"])
dev.set(3)
plot(model.rpart.prune, uniform=TRUE,main = "RPART: Pruned Classification Tree: Training data / external variables")
# text(model.rpart.prune, use.n=TRUE, all=TRUE, cex=.8)
text(model.rpart.prune, use.n=TRUE,all=TRUE,cex=0.6,pretty=1)
cat(paste("67V. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot pruned CART tree.\n",sep=""))
cat(paste("67V. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot pruned CART tree.\n",sep=""),file=FL.LOG,append=TRUE)
if (this.subset == "AllData")
{
FL.Fig17.JPG <- paste(Path.CZ.OUT,paste("Fig17",this.subset,"RPART_Clusering_AllDays_CART_023.jpg",sep="_"),sep="/")
}else if (this.subset == "WeekDay")
{
FL.Fig17.JPG <- paste(Path.CZ.OUT,paste("Fig17",this.subset,"RPART_Clusering_WeekdayOnly_CART_023.jpg",sep="_"),sep="/")
}
if (file.exists(FL.Fig17.JPG)){print(paste(FL.Fig17.JPG,"exist. Delete it!"));file.remove(FL.Fig17.JPG)}
jpeg(file = FL.Fig17.JPG,width=12,height=8,units="in",res=1200,bg = "transparent") # device 4
dev.set(4)
plot(model.rpart.prune, uniform=TRUE,main = "RPART: Pruned Classification Tree: Training data / external variables")
# text(model.rpart.prune, use.n=TRUE, all=TRUE, cex=.8)
text(model.rpart.prune, use.n=TRUE,all=TRUE,cex=0.6,pretty=1)
dev.off(4)
cat(paste("67W. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot pruned CART tree.\n",sep=""))
cat(paste("67W. [",this.CZ,"]-[",this.EEM.name,"]-[",this.subset,"]: plot pruned CART tree.\n",sep=""),file=FL.LOG,append=TRUE)
# ----------------------------------------------------------------------------------------------------
} # end of decision if number of objects is larger than the number of variables
} # end of subset of current EEM
cat(paste("all index: [",this.CZ,"]-[",this.EEM.name,"],",sep=""),file=FL.OUT.CLSSUM,append=TRUE)
write.table(myCluster.summary,sep=",",row.names=TRUE,col.name=TRUE,file=FL.OUT.CLSSUM,append=TRUE)
cat(paste("70. [",this.CZ,"]-[",this.EEM.name,"]: output the cluster determination information.\n",sep=""))
cat(paste("70. [",this.CZ,"]-[",this.EEM.name,"]: output the cluster determination information.\n",sep=""),file=FL.LOG,append=TRUE)
dev.off(2)
dev.off(3)
} # end of current EEM
cat(paste("80. [",this.CZ,"]: finished the processing of all EEMs.\n\n\n",sep=""))
cat(paste("80. [",this.CZ,"]: finished the processing of all EEMs.\n\n\n",sep=""),file=FL.LOG,append=TRUE)
} # end of current CZ
cat(paste("\n\n90. Completed the processing the data of all CZs.\n",sep=""))
cat(paste("\n\n90. Completed the processing the data of all CZs.\n",sep=""),file=FL.LOG,append=TRUE)
} # end of random loop
# -------------------------------------------------------------------------------------------------------------------------------------
# time used for completing this script
# -------------------------------------------------------------------------------------------------------------------------------------
End.time <- Sys.time()
Diff.time <- End.time - Start.time
Diff.time
cat(paste("\nPepare_JPG_Figures_for_Presentation_Random.R is finished successfully at ",End.time,"!\n",sep=" "))
cat(paste("\nPepare_JPG_Figures_for_Presentation_Random.R is finished successfully at ",End.time,"!\n",sep=" "),file=FL.LOG,append=TRUE)
cat(paste("\nProcessing time for [Pepare_JPG_Figures_for_Presentation_Random.R] is ",as.numeric(Diff.time, units="mins")," minutes\n",sep=" "))
cat(paste("\nProcessing time for [Pepare_JPG_Figures_for_Presentation_Random.R] is ",as.numeric(Diff.time, units="mins")," minutes\n",sep=" "),file=FL.LOG,append=TRUE)
#
# put run related information into the log file
#
cat(paste("This run was conducted in ",.Platform$OS.type,"\n",sep=""));
cat(paste("This run was conducted in ",.Platform$OS.type,"\n",sep=""),file=FL.LOG,append=TRUE);
# get the version of R used for this computation and the latest version released
current.Rversion <- R.Version()$version.string
tmp = readLines("http://cran.r-project.org/sources.html")
rls = tmp[grep("latest release", tmp) + 1L] # the version number is in the next line of 'The latest release'
latest.Rversion <- gsub("(.*R-|\\.tar\\.gz.*)", "", rls) # "The latest release: R-2.13.0.tar.gz"
if (latest.Rversion != current.Rversion)
{
cat(paste("\n\nyou may want to upgrade R from the version you are using [",current.Rversion,"] to the latest version of [",latest.Rversion,"]\n",sep=""));
cat(paste("\n\nyou may want to upgrade R from the version you are using [",current.Rversion,"] to the latest version of [",latest.Rversion,"]\n",sep=""),file=FL.LOG,append=TRUE);
}else{
cat(paste("\n\nThe R version you are using is the latest version released so far!\n",sep=""))
cat(paste("\n\nThe R version you are using is the latest version released so far!\n",sep=""),file=FL.LOG,append=TRUE)
}
# get the version information of the attached libraries
cat(paste("\n\nThe information of the packages you used for this calculation:\n"))
cat(paste("\n\nThe information of the packages you used for this calculation:\n"),file=FL.LOG,append=TRUE)
tmp <- sessionInfo()
pkg.loaded <- tmp$otherPkgs
no.pkg.loaded <- length(pkg.loaded)
for (i in seq(1,no.pkg.loaded))
{
cat(paste(pkg.loaded[[i]]$Package,":",pkg.loaded[[i]]$Version," ",pkg.loaded[[i]]$Date,"\n",sep=" "))
cat(paste(pkg.loaded[[i]]$Package,":",pkg.loaded[[i]]$Version," ",pkg.loaded[[i]]$Date,"\n",sep=" "),file=FL.LOG,append=TRUE)
}
|
c3b06429a919009508c5c5ee498043ec86c4798e
|
17bdadb3129f95f357ed0a4b18a9b58083f6aa57
|
/Assignment 3/BoatRace_SSM_Analysis.R
|
cdcb35ad3127cf1cec3938ac52ce4a04e30b18a2
|
[] |
no_license
|
alexanderkwok17/Advanced-Time-Series-Application
|
a10b7ab45764a6c32d78511086f42d8bb5a06be3
|
b105da7a0c064dd26fb0c22c30576f1c09285305
|
refs/heads/master
| 2020-03-23T06:52:19.199250
| 2018-07-17T05:13:17
| 2018-07-17T05:13:17
| 141,234,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,023
|
r
|
BoatRace_SSM_Analysis.R
|
# Initialize packages and functions
library(KFAS)
library(numDeriv)
library(lme4)
source("BinaryFunctions.R")
plots_in_pdf<-FALSE
# Read in the Boat Race data:
BoatRace <- read.csv("boatrace.csv")
BoatRace$Ox_Win<-1-BoatRace$Cam_Win
nobs<-length(BoatRace$Cam_Win)
X<-cbind(rep(1,nobs),BoatRace$Wgt_Diff)
colnames(X)<-c("Incpt","Wgt_Diff")
Y<-BoatRace$Cam_Win
# Plot main series:
plot(BoatRace$Year,BoatRace$Cam_Win, ylab="Cambridge Win")
title("Time series of wins by Cambridge \n Cambridge vs Oxford Boat Race")
abline(h=0.5)
ts.plot(ts(BoatRace$Wgt_Diff,start=BoatRace$Year[1]), ylab="Weight Difference")
title("Time series of Team Weight Difference (Winner - Loser)\n Cambridge vs Oxford Boat Race")
# Klingenberg report simple logistic model estimate of weight effect
# equals 0.118, with a s.e. of 0.036. Klingeberg uses data to 2007.
# log Likelihood value is reported as -98 in Klingenberg.
# Matches output from:
summary(glm(cbind(Cam_Win,Ox_Win)~Wgt_Diff, data=BoatRace, family="binomial", subset= Year<= 2007))
logLik(glm(cbind(Cam_Win,Ox_Win)~Wgt_Diff, data=BoatRace, family="binomial", subset= Year<= 2007))
# GLM for data to 2011 -- is this the data used in Durbin and Koopman Section 14.6???
BoatRace_glm<-glm(cbind(Cam_Win,Ox_Win)~Wgt_Diff, data=BoatRace, family="binomial")
summary(BoatRace_glm)
logLik(BoatRace_glm)
plot(BoatRace$Year,BoatRace$Cam_Win, ylab="Cambridge Win")
lines(BoatRace$Year[!is.na(BoatRace$Cam_Win)],BoatRace_glm$fitted.values)
title("Time series of wins by Cambridge \n with logistic regression fitted values")
abline(h=0.5)
abline(h=mean(BoatRace_glm$fitted.values),col="purple")
# Can this be improved by using an autoregressive latent process??
# Can we detect serial dependence by standard residual analysis?
BoatRace$glm_Resids<-rep(NA,dim(BoatRace)[1])
BoatRace$glm_Resids[!is.na(BoatRace$Cam_Win)]<-residuals(BoatRace_glm,type="pearson")
acf(BoatRace$glm_Resids,na.action=na.pass)
# ACF of Pearson residuals suggests very little serial dependence.
# Yet, Klingenberg uses an ar(1) latent process with rho (our phi) = 0.69
# Fitting using 'glmer' ( Same as GLM! in this case....)
case<-rep(1:nobs,each=1)
(gm1 <- glmer(cbind(Cam_Win,Ox_Win) ~ Wgt_Diff + (1 | case),data=BoatRace,
family = binomial,nAGQ=3))
# Using KFAS functions to set up state space model
phi<-0.69
sigma2<-2.03^2
BoatRaceSSM<-SSModel(Cam_Win~Wgt_Diff+SSMarima(ar=phi, Q=sigma2), u=rep(1,nobs),
data=BoatRace, distribution='binomial')
BoatRace_approxSSM<-approxSSM(BoatRaceSSM,theta=0, maxiter=500,tol=1e-10)
print(BoatRace_approxSSM$iterations)
logL_BoatRace_approxSSM<-logLik(BoatRace_approxSSM)
BoatRaceSSM_KFS<-KFS(BoatRaceSSM, filtering = c('state','signal'),smoothing=c('state','signal'),theta=0, maxiter=500)
beta<-BoatRaceSSM_KFS$alphahat[1,1:2]
Xbeta<-X%*%beta
if(plots_in_pdf == TRUE) pdf("Polio/Figure8_1.pdf")
par(mfrow=c(1,1))
ts.plot(ts(BoatRace$Cam_Win,start=BoatRace$Year[1]), type="p", ylab="Prob")
pihats_1<-1-1/(1+exp(BoatRace_approxSSM$thetahat))
pihats_2<-1-1/(1+exp(Xbeta+BoatRaceSSM_KFS$alphahat[,3]))
pihats_3<-1-1/(1+exp(Xbeta))
points(ts(pihats_1,start=1829),col="blue",pch="+")
lines(ts(pihats_1,start=1829),col="blue")
lines(ts(pihats_2,start=1829),col="red")
lines(ts(pihats_3,start=1829),col="green")
if(plots_in_pdf == TRUE) dev.off()
if(plots_in_pdf == TRUE) pdf("Figure.pdf")
par(mfrow=c(2,1))
ts.plot(BoatRaceSSM_KFS$alphahat[,3],col="purple")
abline(h=0)
acf(BoatRaceSSM_KFS$alphahat[,3])
if(plots_in_pdf == TRUE) dev.off()
# Using function to get mode.
# First set up approximate Gaussian model....
thetatilde<-rep(0,nobs)
pitilde<-1-1/(1+exp(Xbeta+thetatilde))
pitilde[is.na(pitilde)==TRUE]<-0.5
A<-pitilde*(1-pitilde)
Ytilde<-thetatilde + A*(Y-pitilde)
ts.plot(Ytilde)
# Testing if arbitrary value for system noise H when A missing matters
# IT DOES NOT....
BoatRace_approx<-SSModel(Ytilde~-1+SSMarima(ar=phi, Q=sigma2), H=array(A_noNA,c(1,1,length(Y))))
is.SSModel(BoatRace_approx)
out<-KFS(BoatRace_approx,smoothing=c('signal'))
out$logLik
ts.plot(out$muhat)
points(out$muhat)
# run function to get mode by direct implementation of Newton Raphson method...
# starting from zero - converges quickly..
if(plots_in_pdf == TRUE) pdf("X.pdf")
par(mfrow=c(1,1))
BoatRace_approx_mode<-GetModeBinary(Y,BoatRace_approx,Xbeta,
thetatilde_init=rep(0,nobs),maxiter=600, plot_iters=TRUE,tol=1e-10)
abline(h=0,col="red")
lines(BoatRace_approx_mode$thetatilde, col="blue")
if(plots_in_pdf == TRUE) dev.off()
points(BoatRaceSSM_KFS$alphahat[,3],col="red",pch="+")
print(BoatRace_approx_mode$results)
# run function to get mode by direct implementation of Newton Raphson method...
# starting from alphahat from above - very quick for same betas....
if(plots_in_pdf == TRUE) pdf("X.pdf")
par(mfrow=c(1,1))
BoatRace_approx_mode<-GetModeBinary(Y,BoatRace_approx,Xbeta,
thetatilde_init=BoatRaceSSM_KFS$alphahat[,3],maxiter=600, plot_iters=TRUE,tol=1e-10)
abline(h=0,col="red")
lines(BoatRace_approx_mode$thetatilde, col="blue")
if(plots_in_pdf == TRUE) dev.off()
points(BoatRaceSSM_KFS$alphahat[,3],col="red",pch="+")
print(BoatRace_approx_mode$results)
# run function to get mode by direct implementation of Newton Raphson method...
# using method for initializing in approxSSM -- very fast
thetatilde_init<-qlogis((ifelse(is.na(BoatRaceSSM$y[,1]), 0.5,
BoatRaceSSM$y[, 1]) + 0.5)/(BoatRaceSSM$u[, 1] +1))
ts.plot(thetatilde_init)
points(thetatilde_init)
if(plots_in_pdf == TRUE) pdf("X.pdf")
par(mfrow=c(1,1))
BoatRace_approx_mode<-GetModeBinary(Y,BoatRace_approx,Xbeta,
thetatilde_init=thetatilde_init,maxiter=800, plot_iters=TRUE,tol=1e-10)
abline(h=0,col="red")
lines(BoatRace_approx_mode$thetatilde, col="blue")
if(plots_in_pdf == TRUE) dev.off()
points(BoatRaceSSM_KFS$alphahat[,3],col="red",pch="+")
print(BoatRace_approx_mode$results)
# Can we replicate Klingenberg estimates:
# Klingenberg report simple logistic model estimate of weight effect
# equals 0.118, with a s.e. of 0.036. Klingeberg uses data to 2007.
# log Likelihood value is reported as -98 in Klingenberg.
# Matches output from:
summary(glm(cbind(Cam_Win,Ox_Win)~Wgt_Diff, data=BoatRace, family="binomial", subset= Year<= 2007))
logLik(glm(cbind(Cam_Win,Ox_Win)~Wgt_Diff, data=BoatRace, family="binomial", subset= Year<= 2007))
# Fitting using 'glmer' ( Same as GLM! in this case....)
case<-rep(1:dim(BoatRace)[1],each=1)
(gm1 <- glmer(cbind(Cam_Win,Ox_Win) ~ Wgt_Diff + (1 | case),data=BoatRace,
family = binomial,nAGQ=3,subset=Year<=2007))
####################################################
Laplace and DK approximations.
#####################################################
# Data to 2007:
BoatRace07<-BoatRace[BoatRace$Year<=2007,]
nobs<-length(BoatRace07$Cam_Win)
X<-cbind(rep(1,nobs),BoatRace07$Wgt_Diff)
colnames(X)<-c("Incpt","Wgt_Diff")
Y<-BoatRace07$Cam_Win
# Klingenberg reports estimates:
phi<-0.69
sigma2<-2.03^2*(1-phi^2)
beta<-c(0.250,0.139)
pars<-c(beta,atanh(phi),log(sigma2))
parnames<-(c("Incpt","Wgt_Diff","Phi","Sigma2"))
names(pars)<-parnames
nobs<-length(Y)
Xbeta<-X%*%beta
# Comparing the L_DK with L_LA
likfnBinary(pars,Y,X)
likfnLAapproxBinary(pars,Y,X)
likfnDKapproxBinary(pars,Y,X)
if(plots_in_pdf == TRUE) pdf("FigureX.pdf")
par(mfrow=c(2,2))
for(i in 1:4) {
parsgrid<-rep(NA,21)
likelihood<-rep(NA,21)
parsuse<-pars
for(j in 1:21) {
parsuse[i]<-pars[i]+(j-11)/50
parsgrid[j]<-pars[i]+(j-11)/50
likelihood[j]<-likfnLAapproxBinary(parsuse,Y,X)
}
plot(parsgrid,likelihood, type="l", ylab="logL_LA", xlab=names(pars[i]))
abline(v=pars[i],col="purple")
}
if(plots_in_pdf == TRUE) dev.off()
# Newton Raphson Iterations:
nr_maxiter<-15
for(nr_iter in 1:nr_maxiter){
likfnLAapproxBinary(pars,Y,X)
likfnLAapprox_d1<- grad(likfnLAapproxBinary,pars,Y=Y,X=X)
likfnLAapprox_d2<- hessian(likfnLAapproxBinary,pars,Y=Y,X=X)
pars_new<-pars-solve(likfnLAapprox_d2,likfnLAapprox_d1)
NRresults<-cbind(pars,pars_new,likfnLAapprox_d1,diag(-solve(likfnLAapprox_d2))^0.5)
colnames(NRresults)<-c("pars","pars_new","L_LA_d1","S.E.s")
print(NRresults)
# Update par estimates (if needed):
pars<-pars_new
names(pars)<-parnames
}
# Eventually converges to reasonable estimates..
phi<-artransform(pars[3])
sigma2<-exp(pars[4])
print(c(phi,sigma2))
print(likfnLAapproxBinary(pars,Y,X))
if(plots_in_pdf == TRUE) pdf("FigureX.pdf")
par(mfrow=c(2,2))
for(i in 1:4) {
parsgrid<-rep(NA,21)
likelihood<-rep(NA,21)
parsuse<-pars
for(j in 1:21) {
parsuse[i]<-pars[i]+(j-11)/50
parsgrid[j]<-pars[i]+(j-11)/50
likelihood[j]<-likfnLAapproxBinary(parsuse,Y,X)
}
plot(parsgrid,likelihood, type="l", ylab="logL_LA", xlab=names(pars[i]))
abline(v=pars[i],col="purple")
}
if(plots_in_pdf == TRUE) dev.off()
# Using 'optim':
# Starting at Klingenberg estimates FAILS
Xbeta<-X%*%beta
optim.out<-optim(par=pars,fn=likfnLAapproxBinary,control=list(fnscale=-1),
method="BFGS", Y=Y, X=X,hessian=TRUE)
cbind(optim.out$par,diag(-solve(optim.out$hessian))^0.5)
pars<-optim.out$par
phi<-artransform(pars[3])
sigma2<-exp(pars[4])
print(c(phi,sigma2))
# Applied to GLM estimates
# Does not coverge using NR or optim...
phi<-0.5
sigma2<-1
beta<-BoatRace_glm$coefficients
pars<-c(beta,atanh(phi),log(sigma2))
parnames<-(c("Incpt","Wgt_Diff","Phi","Sigma2"))
names(pars)<-parnames
nobs<-length(Y)
Xbeta<-X%*%beta
# Importance Sampling
# Klingenberg Estimates
phi<-0.69
sigma2<-2.03^2*(1-phi^2)
beta<-c(0.250,0.139)
pars<-c(beta,atanh(phi),log(sigma2))
parnames<-(c("Incpt","Wgt_Diff","Phi","Sigma2"))
names(pars)<-parnames
nobs<-length(Y)
Xbeta<-X%*%beta
likfnLAIS(pars,Y,X,N=5,antiflag=FALSE, plot_samples=TRUE)
|
659d851737be99f283db965fe54da81dc3137220
|
3a12f3171ca1af3941a2134fa422d09064be0ae1
|
/Read_Coefs.R
|
2ccd59a806be1128b281bcac2e165cb8d71970e6
|
[] |
no_license
|
srravula1/RF_PEF_AM
|
ecae0e0905378a82c88beb6df573b31d5721c182
|
08f4cbe0f5d3d2885e3c5bdec89be621fb4e3279
|
refs/heads/master
| 2020-12-13T22:13:01.781249
| 2019-03-19T21:53:24
| 2019-03-19T21:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
Read_Coefs.R
|
library(gamlss)
x <- seq(0,1,0.1)
pGG(x, mu = 1, sigma = 1, nu = 1)
pGA(x, mu = 1, sigma = 1)
setwd("C:/Users/christian.tausch/Dropbox/Project D/3_R_Pro_D/Exit_Dynamics/Code/CSV_output")
GA.list <- readRDS("GA.list20181023.RDS")
round(GA.list$VC$VC$coefs$m0$mu$Mean, 3)
round(GA.list$VC$VC$coefs$m0$mu$SD, 3)
round(GA.list$VC$VC$coefs$m1$mu$Mean, 3)
round(GA.list$VC$VC$coefs$m1$mu$SD, 3)
round(GA.list$VC$VC$coefs$m1$sigma$Mean, 3)
round(GA.list$VC$VC$coefs$m1$sigma$SD, 3)
GA.list$VC$VC$Iter
|
d2db963b47221af909cde9b52b1d2ea556424015
|
69bc712bce655618928721a4e57e85b0500ec526
|
/man/nudge.plot.comp.Rd
|
49ef46ef460ad01d7ac4e1a7ab8beb5f6d487de2
|
[] |
no_license
|
cran/DIME
|
5cea66eee25305ed777f7ff9ce364ad7b53987d8
|
4c34813ded0a2333af020e6b9784f82edc648296
|
refs/heads/master
| 2022-05-17T09:27:54.289423
| 2022-05-09T13:50:13
| 2022-05-09T13:50:13
| 17,678,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,564
|
rd
|
nudge.plot.comp.Rd
|
\name{nudge.plot.comp}
\alias{nudge.plot.comp}
\title{
Plot NUDGE Individual Components
}
\description{
Plot each estimated individual components of NUDGE model
(mixture of uniform and 1-normal) fitted using \code{\link{nudge.fit}}.
}
\usage{
nudge.plot.comp(data, obj, new.plot = TRUE, legpos = NULL, xlim = NULL,
ylim = NULL, xlab = NULL, ylab = NULL, main = NULL, lwd = NULL, ...)
}
\arguments{
\item{data}{
an \strong{R list} of vector of normalized intensities (counts). Each element can
correspond to a particular chromosome. User can construct
their own list containing only the chromosome(s) they want to analyze.
}
\item{obj}{
a list object returned by \code{\link{nudge.fit}} function.
}
\item{new.plot}{
an \strong{R list} of vector of normalized intensities (counts). Each object can correspond to
particular chromosome that one want to fit.
}
\item{legpos}{
optional vector of (x,y) location for the legend position
}
\item{xlim}{
optional x-axis limit (see \code{\link{par}}).
}
\item{ylim}{
optional y-axis limit (see \code{\link{par}}).
}
\item{xlab}{
optional x-axis label (see \code{\link{par}}).
}
\item{ylab}{
optional y-axis label (see \code{\link{par}}).
}
\item{main}{
optional plot title (see \code{\link{par}}).
}
\item{lwd}{
optional line width for lines in the plot (see \code{\link{par}}).
}
\item{\dots}{
additional graphical arguments to be passed to methods (see \code{\link{par}}).
}
}
\details{
The components representing differential data are denoted by asterisk (*)
symbol on the plot legend.
}
\seealso{
\code{\link{nudge.plot.mix}}, \code{\link{inudge.plot.comp}},
\code{\link{nudge.plot.fit}}, \code{\link{nudge.plot.qq}},
\code{\link{DIME.plot.fit}}, \code{\link{gng.plot.fit}}.
}
\examples{
library(DIME);
# generate simulated datasets with underlying uniform and 1-normal components
set.seed(1234);
N1 <- 1500; N2 <- 500; rmu <- c(1.5); rsigma <- c(1);
rpi <- c(.10,.90); a <- (-6); b <- 6;
chr1 <- c(-runif(ceiling(rpi[1]*N1),min = a,max =b),
rnorm(ceiling(rpi[2]*N1),rmu[1],rsigma[1]));
chr4 <- c(-runif(ceiling(rpi[1]*N2),min = a,max =b),
rnorm(ceiling(rpi[2]*N2),rmu[1],rsigma[1]));
# analyzing chromosome 1 and 4
data <- list(chr1,chr4);
# fit NUDGE model with maximum iterations = 20
set.seed(1234);
bestNudge <- nudge.fit(data, max.iter=20);
# plot individual components of NUDGE
nudge.plot.comp(data,bestNudge);
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ aplot }
\keyword{ dplot }% __ONLY ONE__ keyword per line
|
0ad01eb22c6a04c8f85e1a8a0c014e7335d9387a
|
bdacaddf5b14c4479373825d2dbdc2fdd4b11cf7
|
/docker/R/lib/create_and_run_datapoint.R
|
c125017fa3e2398c9eb9d83c822f7288281f4279
|
[
"BSD-2-Clause"
] |
permissive
|
NREL/OpenStudio-server
|
15213b113ed491303b2b83b050d6e087ada7cb60
|
eb82244f9fd0f49429b96a1b52ee1ab17ffe789a
|
refs/heads/develop
| 2023-08-17T12:30:12.551774
| 2023-08-10T12:31:01
| 2023-08-10T12:31:01
| 11,376,563
| 38
| 26
|
NOASSERTION
| 2023-08-10T12:31:03
| 2013-07-12T19:36:24
|
Ruby
|
UTF-8
|
R
| false
| false
| 9,804
|
r
|
create_and_run_datapoint.R
|
# *******************************************************************************
# OpenStudio(R), Copyright (c) Alliance for Sustainable Energy, LLC.
# See also https://openstudio.net/license
# *******************************************************************************
# create_and_run_datapoint(x) such that x is vector of variable values,
# create a datapoint from the vector of variable values x and run
# the new datapoint
# x: vector of variable values
#
# Several of the variables are in the sessions. The list below should abstracted out:
# rails_host
# rails_analysis_id
# ruby_command
# r_scripts_path
create_and_run_datapoint <- function(x){
options(warn=-1)
if (check_run_flag(r_scripts_path, rails_host, rails_analysis_id, debug_messages)==FALSE){
options(warn=0)
stop(options("show.error.messages"=FALSE),"run flag set to FALSE")
}
# convert the vector to comma separated values
force(x)
w <- paste(x, collapse=",")
y <- paste(r_scripts_path,'/api_create_datapoint.rb -h ',rails_host,' -a ',rails_analysis_id,' -v ',w,' --submit',sep='')
if(debug_messages == 1){
print(paste('run command: ruby ', y))
}
counter <- 1
repeat{
Sys.sleep(5)
# Call the system command to submit the simulation to the API / queue
z <- system2("ruby",y, stdout = TRUE, stderr = TRUE)
z <- z[length(z)]
if(debug_messages == 1){
print(paste("Create and Run Datapoint ",counter," z:",z))
}
#check if return status is not NULL (means errors and z is not a json)
if(!is.null(attr(z, "status"))) {
counter = counter + 1
next
}
#z is a json so check status
json <- try(fromJSON(z), silent=TRUE)
if(debug_messages == 1){
print(paste('json:',json))
}
#if json$status is failed, then exit
if(json$status == 'failed'){
print("Datapoint Failed")
break
}
#if json$status is false, then try again
if(!isTRUE(json$status)){
counter = counter + 1
#only do this 10 times
if(counter > 10){break}
next
}
#if gotten this far then json is good
if(debug_messages == 1){
print(paste("Success ",counter))
}
#only do this 10 times and should be good at this point so break
break
}
#THIS PATH DOESNT EXIST on Workers. THIS IS RUNNING ON RSERVE_1
data_point_directory <- paste('/mnt/openstudio/analysis_',rails_analysis_id,'/data_point_',json$id,sep='')
if(debug_messages == 1){
print(paste("data_point_directory:",data_point_directory))
}
if(!dir.exists(data_point_directory)){
dir.create(data_point_directory)
if(debug_messages == 1){
print(paste("data_point_directory created: ",data_point_directory))
}
}
## save off the variables file (can be used later if number of vars gets too long)
if (dir.exists(data_point_directory)) {
write.table(x, paste(data_point_directory,"/input_variables_from_r.data",sep=""),row.names = FALSE, col.names = FALSE)
} else {
print(paste("data_point_directory does not exist! ",data_point_directory))
}
#if json$status is FALSE then datapoint status is false
if (!isTRUE(json$status)) {
print(paste("json$status is false, RETURNING: ",failed_f))
options(warn=0)
return(failed_f)
} else {
if (is.null(json$results)) {
obj <- failed_f
print("json$results is NULL")
} else {
obj <- NULL
objvalue <- NULL
objtarget <- NULL
sclfactor <- NULL
for (i in 1:objDim){
objfuntemp <- paste("objective_function_",i,sep="")
if (json$results[objfuntemp] != "NULL"){
objvalue[i] <- as.numeric(json$results[objfuntemp])
} else {
objvalue[i] <- failed_f
cat(data_point_directory," Missing ", objfuntemp,"\n");
}
objfuntargtemp <- paste("objective_function_target_",i,sep="")
if (json$results[objfuntargtemp] != "NULL"){
objtarget[i] <- as.numeric(json$results[objfuntargtemp])
} else {
objtarget[i] <- 0.0
}
scalingfactor <- paste("scaling_factor_",i,sep="")
sclfactor[i] <- 1.0
if (json$results[scalingfactor] != "NULL"){
sclfactor[i] <- as.numeric(json$results[scalingfactor])
if (sclfactor[i] == 0.0) {
print(paste(scalingfactor," is ZERO, overwriting\n"))
sclfactor[i] <- 1.0
}
} else {
sclfactor[i] <- 1.0
}
}
options(digits=8)
options(scipen=-2)
if(debug_messages == 1){
print(paste("Objective function results are:",objvalue))
print(paste("Objective function targets are:",objtarget))
print(paste("Objective function scaling factors are:",sclfactor))
}
objvalue <- objvalue / sclfactor
objtarget <- objtarget / sclfactor
obj <- force(eval(dist(rbind(objvalue,objtarget),method=normtype,p=ppower)))
if(debug_messages == 1){
print(paste("Objective function Norm:",obj))
}
if(debug_messages == 1){
print(paste("rails_exit_guideline_14:",rails_exit_guideline_14))
}
# Check if exit on guideline 14 is enabled
#if (rails_exit_guideline_14){
if (rails_exit_guideline_14 %in% c(1,2,3)) {
guide <- check_guideline14(r_scripts_path, rails_host, json$id, debug_messages)
if(debug_messages == 1){
print(paste("guide:",guide))
}
# read in the results from the guideline14 file
#TODO this path will not work
#guideline_file <- paste(data_point_directory,"/reports/calibration_reports_enhanced_21_report_guideline.json",sep="")
#json <- NULL
#try(json <- fromJSON(file=guideline_file), silent=TRUE)
if (is.null(guide)) {
print("no guideline14 return ")
} else if (!is.recursive(guide)) {
print(paste("guideline14 return is not a json: ",guide))
} else {
#guideline <- json[[1]]
#for (i in 2:length(json)) guideline <- cbind(guideline,json[[i]])
print(paste("guide: ",guide))
#print(paste("isTRUE(guideline): ",isTRUE(guideline)))
#print(paste("all(guideline): ",all(guideline)))
#if (length(which(guideline)) == objDim){
#if (guide$electricity_cvrmse_within_limit == 1 && guide$electricity_nmbe_within_limit == 1 && guide$natural_gas_cvrmse_within_limit == 1 && guide$natural_gas_nmbe_within_limit == 1) {
if (rails_exit_guideline_14 == 1) {
if (guide$electricity_cvrmse_within_limit == 1 && guide$electricity_nmbe_within_limit == 1 && guide$natural_gas_cvrmse_within_limit == 1 && guide$natural_gas_nmbe_within_limit == 1) {
#write final params to json file
write_filename <- paste(analysis_dir,'/varnames.json',sep='')
varnames <- scan(file=write_filename, what=character())
answer <- paste('{',paste('"',gsub(".","|",varnames, fixed=TRUE),'"',': ',x,sep='', collapse=','),'}',sep='')
write_filename <- paste(analysis_dir,'/best_result.json',sep='')
write.table(answer, file=write_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
convergenceflag <- paste('{',paste('"',"exit_on_guideline_14",'"',': ',"true",sep='', collapse=','),'}',sep='')
write_filename <- paste(analysis_dir,'/convergence_flag.json',sep='')
write(convergenceflag, file=write_filename)
options(warn=0)
stop(options("show.error.messages"=FALSE),"exit_on_guideline_14")
}
} else if (rails_exit_guideline_14 == 2) {
if (guide$electricity_cvrmse_within_limit == 1 && guide$electricity_nmbe_within_limit == 1) {
#write final params to json file
write_filename <- paste(analysis_dir,'/varnames.json',sep='')
varnames <- scan(file=write_filename, what=character())
answer <- paste('{',paste('"',gsub(".","|",varnames, fixed=TRUE),'"',': ',x,sep='', collapse=','),'}',sep='')
write_filename <- paste(analysis_dir,'/best_result.json',sep='')
write.table(answer, file=write_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
convergenceflag <- paste('{',paste('"',"exit_on_guideline_14",'"',': ',"true",sep='', collapse=','),'}',sep='')
write_filename <- paste(analysis_dir,'/convergence_flag.json',sep='')
write(convergenceflag, file=write_filename)
options(warn=0)
stop(options("show.error.messages"=FALSE),"exit_on_guideline_14")
}
} else if (rails_exit_guideline_14 == 3) {
if (guide$natural_gas_cvrmse_within_limit == 1 && guide$natural_gas_nmbe_within_limit == 1) {
#write final params to json file
write_filename <- paste(analysis_dir,'/varnames.json',sep='')
varnames <- scan(file=write_filename, what=character())
answer <- paste('{',paste('"',gsub(".","|",varnames, fixed=TRUE),'"',': ',x,sep='', collapse=','),'}',sep='')
write_filename <- paste(analysis_dir,'/best_result.json',sep='')
write.table(answer, file=write_filename, quote=FALSE,row.names=FALSE,col.names=FALSE)
convergenceflag <- paste('{',paste('"',"exit_on_guideline_14",'"',': ',"true",sep='', collapse=','),'}',sep='')
write_filename <- paste(analysis_dir,'/convergence_flag.json',sep='')
write(convergenceflag, file=write_filename)
options(warn=0)
stop(options("show.error.messages"=FALSE),"exit_on_guideline_14")
}
}
}
}
}
}
options(warn=0)
return(as.numeric(obj))
}
|
36966c3bd1835de937cfaa67f8bdff2c10d291f0
|
c9ce1ee0f684de1c9cb231c41263cee4f0ef7251
|
/cachematrix.R
|
ef898cd8b0cce26025ba7129a654680c9d694c89
|
[] |
no_license
|
lenwood/ProgrammingAssignment2
|
8d77fa8d420c9a371ad9e4b640e1ea80b327a3b9
|
ccde607ab87c80947aa5661c7dd7701cc2592204
|
refs/heads/master
| 2021-01-17T23:49:20.395140
| 2014-07-28T00:19:16
| 2014-07-28T00:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,000
|
r
|
cachematrix.R
|
# get a matrix and cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
# set the inverse of a matrix
set <- function(y) {
x <<- y
m <<- NULL
}
# get the inverse of a given matrix
get <- function() x
# invert the matrix
setInverse <- function(solve) m <<- solve
# return the inverted matrix
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
# Either return the cached value of an inverse matrix or
# invert and cache the matrix inverse.
cacheSolve <- function(x, ...) {
# return a matrix that is the inverse of 'x'
m <- x$getInverse()
# check to see if the inverse if cached
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# if the inverse value is not cached, invert the matrix
data <- x$get()
m <- solve(data, ...)
# cache the value of the inverse
x$setInverse(m)
}
|
7fdda28913a3dc7c306a177d3c2a8a3e627c8e10
|
45b6cd78a73551a3d78d744f1412c7b458cd2705
|
/run_analysis.R
|
9ac56e477839755c0fcadd4746b6061f33e9ce41
|
[] |
no_license
|
k-caldwell/GettingAndCleaningData-Week4
|
41dcbe6ecdb7405e7e3af9cfa5019c6ac6180ad5
|
33f3918d75030832fc62685307ca0c0a19604c46
|
refs/heads/master
| 2021-06-28T22:47:01.668626
| 2017-09-16T21:44:21
| 2017-09-16T21:44:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,094
|
r
|
run_analysis.R
|
library(dplyr)
rm(list=ls())
#The "UCI HAR Dataset" folder needs to placed in the active directory
fp <- file.path(getwd(),'UCI HAR Dataset')
# read in data (file.path() for OS control)
X_train <- read.table(file.path(fp, "train", "X_train.txt"))
X_test <- read.table(file.path(fp, "test", "X_test.txt"))
Y_train <- read.table(file.path(fp, "train", "Y_train.txt"))
Y_test <- read.table(file.path(fp, "test", "Y_test.txt"))
subject_train <- read.table(file.path(fp, "train", "subject_train.txt"))
subject_test <- read.table(file.path(fp, "test", "subject_test.txt"))
# read in descritive variable names from "features.txt" file
features <- read.table(file.path(fp, "features.txt"))
var_names <- features[,2]
# read in descriptive activity identifiers
activity_ids <- read.table(file.path(fp, "activity_labels.txt"))
activity_ids <- activity_ids[,2]
# Merge training and test data
X_merged <- rbind(X_train, X_test)
Y_merged <- rbind(Y_train, Y_test)
subject_merged <- rbind(subject_train, subject_test)
# Pull the mean and standard deviation
mean_or_std_cols <- grep("mean\\(|std\\(", var_names)
good_var_names <- var_names[mean_or_std_cols]
X_merged_mean_or_std <- X_merged[mean_or_std_cols]
# Use descriptive activity names and give columns descrie variable names
colnames(Y_merged) <- "activity_id"
activity <- factor(Y_merged$activity_id, labels = as.character(activity_ids))
colnames(X_merged_mean_or_std) <- good_var_names
# Create the tidy data with average of mean and standard deviation
# for each activity and subject
colnames(subject_merged) <- "subject_id"
good_data <- cbind(X_merged_mean_or_std, activity, subject_merged)
tidy_data <- group_by(good_data, activity, subject_id) %>%
summarize_all(mean)
# save the (finaly) tidy data to a text file, it will be placed within the
# "UCI HAR Dataset" folder called "tidydata.txt"
write.table(tidy_data, file = file.path(fp, "tidydata.txt"), row.names = FALSE,
col.names = TRUE)
# check to make sure it saved properly
#f <- read.table(file.path(fp, "tidydata.txt"), header = TRUE)
#head(f[,1:3])
|
40280168a91aecc5e68feca95ce3a4b60a9bc25c
|
73deec41a93b532ae0583e0a90b766f2c3903b43
|
/man/boot_ordination.Rd
|
4f47ef0b585cb6c8fc75bdd120ba831662853315
|
[] |
no_license
|
nlhuong/mvarVis
|
1b0931b1a78b6b1a97a55009fa988938a040d779
|
b6aaf910d944796e18d6e8795d3db0aa8829e4f4
|
refs/heads/master
| 2021-01-18T03:13:41.845527
| 2016-03-28T04:53:11
| 2016-03-28T04:53:11
| 54,500,551
| 0
| 0
| null | 2016-03-22T18:50:20
| 2016-03-22T18:50:20
| null |
UTF-8
|
R
| false
| true
| 1,541
|
rd
|
boot_ordination.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap.R
\name{boot_ordination}
\alias{boot_ordination}
\title{Boostrap ordination.}
\usage{
boot_ordination(D, n = 50, method = "ade4_pca", dist_method = "euclidean",
rows_annot = NULL, cols_annot = NULL, table_names = NULL,
common_depth = FALSE, replace_zero = FALSE, round = FALSE, ...)
}
\arguments{
\item{D}{(Required). A data frame of raw counts/weights. Raw data
is required, and distance matrix/objects are not acceptable.}
\item{n}{(Optional). Default 50. An integer indicating the number of
boostrap samples generated.}
\item{method}{(Required). The method among those listed above that will
perform the required ordination.}
\item{dist_method}{(Optional). If a distance matrix is used by the specified
method. We will call \code{vegdist} on the \code{D} using this string as
the distance.}
}
\value{
A list ordination objects generated by calling
\code{mvarVis::ordi} on each of the boostrap samples, and then
rotating the coordinates using procustes method to fit them to the
coordinates of ordination of the original data table.
}
\description{
\code{boot_ordination} computes the ordination for samples boostrapped
from data in \code{D}. Ordination is performed using \code{mvarVis::ordi}
function with specified \code{method}.
}
\examples{
D <- matrix(runif(100, max = 100), nrow = 25)
bootOrd <- boot_ordination(D, n = 50, method = "ade4_pca",
dist_method = "euclidean")
}
|
6d1ca34e633e796266d0d43b77583ac6110e88a0
|
a48797beca55474d7b39676389f77f8f1af76875
|
/man/node_signature.Rd
|
12d39f70ca7bbf3cf5623049c7236d472a4b0e1e
|
[] |
no_license
|
uqrmaie1/admixtools
|
1efd48d8ad431f4a325a4ac5b160b2eea9411829
|
26759d87349a3b14495a7ef4ef3a593ee4d0e670
|
refs/heads/master
| 2023-09-04T02:56:48.052802
| 2023-08-21T21:15:27
| 2023-08-21T21:15:27
| 229,330,187
| 62
| 11
| null | 2023-01-23T12:19:57
| 2019-12-20T20:15:32
|
R
|
UTF-8
|
R
| false
| true
| 708
|
rd
|
node_signature.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toposearch.R
\name{node_signature}
\alias{node_signature}
\title{Returns a signature of a graph consisting of the left and right descendent leaf nodes of each internal node (sorted and concatenated)}
\usage{
node_signature(graph)
}
\arguments{
\item{graph}{An admixture graph}
}
\value{
A graph signature as character vector
}
\description{
Can be used to determine how often internal nodes occur in a list of other well fitting models
}
\examples{
\dontrun{
sigs = example_winners \%>\% mutate(sig = map(igraph, node_signature)) \%$\%
sig \%>\% unlist \%>\% table \%>\% c
node_signature(example_winners$igraph[[1]])
}
}
|
fbe1a04f1b4205a5a3bba9c2538f7649aad1fed7
|
cdc811d8888afd524083b1b361fe2a62fa0a4bf4
|
/R/getExtraPackages.R
|
d29e04160c5c00b9868114fd8fb88510dcf50b2f
|
[] |
no_license
|
helenaK/parallelMap
|
7cc2f8ce495b48226afdc98e4e1af12ded4920ae
|
31d9da26defdc388aad5e53383d26485c95a3a2e
|
refs/heads/master
| 2021-01-14T10:36:58.316421
| 2016-08-22T16:49:05
| 2016-08-22T16:49:05
| 63,674,773
| 0
| 0
| null | 2016-07-19T08:13:43
| 2016-07-19T08:13:43
| null |
UTF-8
|
R
| false
| false
| 153
|
r
|
getExtraPackages.R
|
getExtraPackages = function(mode) {
if (mode %in% MODE_MPI)
"Rmpi"
else if (mode %in% MODE_BATCHJOBS)
"BatchJobs"
else
character(0L)
}
|
1958231942318190f63191860fc454037d62a42b
|
fc41c0a908e332b2dc021f5aca4a17ca7d58803e
|
/R/likert_add_fullnames.df.R
|
037ce8a060c309d1b09be95a355b6caf697e2cb2
|
[] |
no_license
|
MHS-R/mhs
|
92ba9bb646a1f55fcf06638773cad714547de108
|
b7593333dc638634eb9adcc3f9fec8f4644d9590
|
refs/heads/master
| 2020-01-27T10:00:19.252327
| 2019-03-11T14:23:00
| 2019-03-11T14:23:00
| 66,571,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,833
|
r
|
likert_add_fullnames.df.R
|
#' Add names to likert graphs
#'
#' Takes a list of dataframe column names, and long names, and stores the long names in the attribute 'fullname'. Then, the original names of the original dataframe get renamed with the full names.
#' This is to be used with the 'likert' package. e.g., WHen plotting, item names should appear with item content description if this function is used.
#'Example:
#'db <- add_likert_fullnames(db, c(
#' 'X7'='Do you use sites like Facebook, Twitter, or GPlus?',
# 'X8'='Do you participate in online communities organised around your
# interests?',
#' 'X10'='Do you know of online communities relevant to your discipline or the courses you are taking now?'))
#'
#' See sample script in G:/R&D/_PRIVATE SHARED/Psychometric templates/Sample script for Likert Plots.R
#' @param rawDf; a data frame with named columns
#' @param levels; a numeric vector indicating the number of levels per item
#' @param fnames; a character vector providing the item content names to be placed in the column names.
#' @export likert_add_fullnames.df
likert_add_fullnames.df <- function(rawDf, levels = 1:6, fnames) {
# create likert-usable dataframe.
db2 <- data.frame(lapply(rawDf, factor, levels = levels))
# NESTED FUNCTION
likert_add_fullnames <- function(to, fnames) {
if (length(fnames) > length(unique(fnames))) {
stop("All names must be unique")
}
for (x in names(fnames)) {
attr(to[[x]], "fullname") <- fnames[[x]]
}
to
}
# get the full names
db2 <- likert_add_fullnames(db2, fnames = fnames)
# rename the columns
for (n in names(db2)) {
if (!is.null(attr(db2[[n]], "fullname"))) {
names(db2)[names(db2) == n] <- attr(db2[[n]], "fullname")
}
}
return(db2)
}
|
99c4f427f31d312768f6951561a7be971bda4a84
|
37c9f1a02bc48d7d9f232b876bd214612f8ff123
|
/testCachematrix.R
|
1f7f5677b90f9c63113fd01d2eceea42a86b7474
|
[] |
no_license
|
baxteran/ProgrammingAssignment2
|
2a386a84ebd394bb7e746af047db59ec674d70e0
|
220522b4dad56f6fce2ab1932b450be0176c31ec
|
refs/heads/master
| 2020-12-26T10:46:47.624708
| 2014-04-24T19:44:28
| 2014-04-24T19:44:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 843
|
r
|
testCachematrix.R
|
## test my cachematrix.R
setwd("~/Rprog/ProgrammingAssignment2")
source ("./cachematrix.R")
## make a simple square matrix
mdat <- matrix (c(1,2,3,4), nrow =2 , ncol =2)
## construct a cached matrix
myCachedMatrix <- makeCacheMatrix(mdat)
## confirm that it is correctly constructed
myCachedMatrix$get()
## call the solver once
invM <- cacheSolve(myCachedMatrix)
invM
## call again to confirm caching
invM <- cacheSolve(myCachedMatrix)
invM
## test again with a 3x3 matrix
mdat2 <-matrix(c(5,6,7,8,9,0,1,2,3), nrow=3 , ncol=3)
myCachedMatrix$set(mdat2)
## check that the cache has been cleared by setting the matrix
myCachedMatrix$getInverse()
## check that the matrix has been updated
myCachedMatrix$get()
## invoke the solver
invM <- cacheSolve(myCachedMatrix)
invM
## show that the new solution has been cached
myCachedMatrix$getInverse()
|
f6d4676b8ebe450a56abc01061b0193385afe42d
|
6656318be29e1b39b5ab20d6872a27dd9af923b2
|
/R/conversion.R
|
55a6d4066084773523743354271475ec3178aae7
|
[] |
no_license
|
mdfrias/downscaleR
|
4ceb27793d2a119860d18ed5bc48b90a02705250
|
a841f836eccae7ba749030b3a65b997745906a92
|
refs/heads/master
| 2021-01-16T22:53:43.767577
| 2015-08-18T09:19:39
| 2015-08-18T09:19:39
| 31,306,761
| 0
| 1
| null | 2015-02-25T09:42:57
| 2015-02-25T09:42:57
| null |
UTF-8
|
R
| false
| false
| 3,605
|
r
|
conversion.R
|
#' @title Specific humidity from relative humidity
#' @description Derive specific humidity from relative humidity
#'
#' @param tas surface air temperature (K)
#' @param ps surface pressure (Pa)
#' @param hurs surface relative humidity (\%)
#' @return Specific humidity (kg.kg-1)
#' @references Bohren & Albrecht (2000) Atmospheric thermodynamics. Oxford University Press. 402 pp
#' @author S. Herrera \email{sixto@@predictia.es}
#' @keywords internal
#' @export
#' @family conversion
hurs2huss <- function(tas, ps, hurs) {
Rd <- 287.058 # dry air constant J/(K kg)
Rv <- 461.5 # J/(K kg)
T0 <- 273.15 # (K)
es0 <- 611
es <- tas
# Saturation pressure (Pa) over ice and water respectively (Bohren & Albrecht 2000, pp 197-200)
iceMask <- which(tas < T0)
es[iceMask] <- es0 * exp((6293 / T0) - (6293 / tas[iceMask]) - 0.555 * log(abs(tas[iceMask] / T0)))
waterMask <- which(tas >= T0)
es[waterMask] <- es0 * exp((6808 / T0) - (6808 / tas[waterMask]) - 5.09 * log(abs(tas[waterMask] / T0)))
tas <- NULL
ws <- (Rd / Rv) * (es / (ps - es))
ps <- NULL
es <- NULL
w <- ws * hurs * 0.01
ws <- NULL
hurs <- NULL
huss <- w / (1 + w)
w <- NULL
return(huss)
}
# End
#################################################################################
#' @title Sea-level pressure to surface pressure
#' @description Conversion of sea-level pressure to surface pressure
#'
#' @param tas surface temperature
#' @param zs surface geopotential (m^2/s^2)
#' @param mslp sea-level pressure (Pa)
#' @return surface pressure (Pa)
#' @author S. Herrera \email{sixto@@predictia.es}
#' @keywords internal
#' @family conversion
#' @export
mslp2ps <- function(tas, zs, mslp) {
Rd <- 287.058 # dry air constant J/(K kg)
GammaST <- 0.0065 #(dT/dz)^st standard atmosphere vertical gradient of the temperature in the troposphere (0.0065 (K/m^-1))
g <- 9.80665 # gravity (m/s^2)
To <- tas + GammaST * zs / g
ind <- which(abs(zs) >= 0.001)
ind1 <- intersect(intersect(which(To > 290.5), which(tas <= 290.5)), ind)
auxGamma <- mslp
ps <- mslp
auxGamma[ind1] <- g * (290.5 - tas[ind1]) / zs[ind1]
ind <- setdiff(ind, ind1)
ind1 <- intersect(intersect(which(To > 290.5), which(tas > 290.5)), ind)
auxGamma[ind1] <- 0
tas[ind1] <- 0.5 * (255 + tas[ind1])
ind <- setdiff(ind, ind1)
ind1 <- intersect(which(tas < 255), ind)
auxGamma[ind1] <- GammaST
tas[ind1] <- 0.5 * (255 + tas[ind1])
ind <- setdiff(ind, ind1)
auxGamma[ind] <- GammaST
ind <- which(abs(zs) >= 0.001)
ps[ind] <- mslp[ind] * exp((-zs[ind] / (Rd * tas[ind])) * (1 - 0.5 * (auxGamma[ind] * zs[ind]) / (g * tas[ind]) + (1 / 3) * ((auxGamma[ind] * zs[ind]) / (g * tas[ind])) ^ 2))
tas <- NULL
zs <- NULL
mslp <- NULL
auxGamma <- NULL
return(ps)
}
# End
#################################################################################
#' @title Dew point to relative humidity
#' @description Conversion of dew point to relative humidity
#'
#' @param tas vector of surface temperature (K)
#' @param tdps vector of surface dew point temperature (K)
#' @return vector of relative humidity (\%)
#' @author J Bedia \email{joaquin.bedia@@gmail.com}, borrowing MatLab code from S. Herrera
#' @keywords internal
#' @family conversion
#' @export
tdps2hurs <- function(tas, tdps) {
lv <- 2.5e+06
Rv <- 461.5
hurs <- 100 * exp((lv / Rv) * ((1 / tas) - (1 / tdps)))
return(hurs)
}
# End
|
ac860280e40c37cefc30df27d036459c3bf3ce10
|
8fdc42d8698286d1908b58cb1a7ea6425f9f0cc3
|
/R/bind_re_match.R
|
1c8f269631e3bcaf4be0959b8f11a69c7d0e93ec
|
[
"MIT"
] |
permissive
|
r-lib/rematch2
|
eae06b7ed66e4ed2d7cdecf6133394469a5ec45f
|
82bd8a3fb1125575213f87af5f68652f0c15d39b
|
refs/heads/main
| 2022-09-11T07:41:13.721661
| 2022-08-25T07:37:08
| 2022-08-25T07:37:08
| 94,905,516
| 43
| 9
|
NOASSERTION
| 2022-08-25T07:37:09
| 2017-06-20T15:18:28
|
R
|
UTF-8
|
R
| false
| false
| 1,979
|
r
|
bind_re_match.R
|
#' Match results from a data frame column and attach results
#'
#' Taking a data frame and a column name as input, this function will run
#' \code{\link{re_match}} and bind the results as new columns to the original
#' table., returning a \code{\link[tibble]{tibble}}. This makes it friendly for
#' pipe-oriented programming with \link[magrittr]{magrittr}.
#'
#' @note If named capture groups will result in multiple columns with the same
#' column name, \code{\link[tibble]{repair_names}} will be called on the
#' resulting table.
#'
#' @param df A data frame.
#' @param from Name of column to use as input for \code{\link{re_match}}.
#' \code{\link{bind_re_match}} takes unquoted names, while
#' \code{\link{bind_re_match_}} takes quoted names.
#' @param ... Arguments (including \code{pattern}) to pass to
#' \code{\link{re_match}}.
#' @param keep_match Should the column \code{.match} be included in the results?
#' Defaults to \code{FALSE}, to avoid column name collisions in the case that
#' \code{\link{bind_re_match}} is called multiple times in succession.
#'
#' @seealso Standard-evaluation version \code{\link{bind_re_match_}} that is
#' suitable for programming.
#'
#' @examples
#' match_cars <- tibble::rownames_to_column(mtcars)
#' bind_re_match(match_cars, rowname, "^(?<make>\\w+) ?(?<model>.+)?$")
#'
#' @export
bind_re_match <- function(df, from, ..., keep_match = FALSE) {
bind_re_match_(df = df, from = deparse(substitute(from)), ..., keep_match = keep_match)
}
#' @describeIn bind_re_match Standard-evaluation version that takes a quoted column name.
#' @export
bind_re_match_ <- function(df, from, ..., keep_match = FALSE) {
stopifnot(is.data.frame(df))
if (!tibble::has_name(df, from))
stop(from, " is not present in the data frame.")
res <- re_match(text = df[[from]], ...)
res <- res[, !names(res) == ".text"]
if (!keep_match) {
res <- res[, !names(res) == ".match"]
}
tibble::repair_names(cbind(df, res))
}
|
e09faef7f51ed572f322d1a05edd5396483a4f7c
|
80d3e11cc2f6cb12898ef3b39d84a7db8fb91d76
|
/simcor.R
|
f163a105781558e8f0b3b942cb67ee821cd2ca53
|
[] |
no_license
|
anupamsingh81/utilities
|
3f0ed9b832d6eb8db9d1dc3ce435f14580c39b2a
|
a0ac77d434e60d50ad1432d2a0c386cca30f5360
|
refs/heads/master
| 2021-09-06T07:02:40.959834
| 2018-02-03T14:12:36
| 2018-02-03T14:12:36
| 109,866,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 205
|
r
|
simcor.R
|
simcor = function(x, r,m,s){ r2 = r**2
ve = 1-r2
SD = sqrt(ve)
e = rnorm(length(x), mean=0, sd=SD)
y = r*x + e
y1 = m+y*s
return(y1) }
j=rnorm(50,40,10)
x=scale(j)
f=simcor(x=x,r=0.5,m=120,s=10)
|
19a1ec7caadbf80efb292d89a1111fce1931f56f
|
0052b4118f494c300bddce9b2360a3441d518181
|
/man/prof_mean_var_Delta.Rd
|
6b3ccccb4d5254379b62f8004dc1f54886eb77d5
|
[] |
no_license
|
cran/profExtrema
|
bedcc473c5e52659ed58c200f939af77f114a9df
|
5af8f93723af135b21f6e43629614928e47c7192
|
refs/heads/master
| 2020-03-27T16:00:14.525328
| 2020-03-21T16:10:02
| 2020-03-21T16:10:02
| 146,753,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,781
|
rd
|
prof_mean_var_Delta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bound_functions.R
\name{prof_mean_var_Delta}
\alias{prof_mean_var_Delta}
\title{Profile extrema for the mean and variance functions of difference process}
\usage{
prof_mean_var_Delta(kmModel, simupoints, allPsi = NULL,
options_full_sims = NULL, options_approx = NULL, F.mat = NULL,
T.mat = NULL)
}
\arguments{
\item{kmModel}{the \link[DiceKriging]{km} model of the Gaussian process \eqn{Z}.}
\item{simupoints}{the matrix \eqn{l x d} containing the pilot points \eqn{G}.}
\item{allPsi}{optional list of matrices (dim \eqn{pxd}) for which to compute the profile extrema. If NULL coordinate profiles are computed.}
\item{options_full_sims}{an optional list of options for \link{getAllMaxMin}(or \link{approxProfileExtrema} if \code{allPsi} not NULL). If NULL the full computations are not excuted. NOTE: this computations might be very expensive!}
\item{options_approx}{an optional list of options for \link{approxMaxMin} (or \link{approxProfileExtrema} if \code{allPsi} not NULL).}
\item{F.mat}{the evaluation of the trend function at \code{rbind(kmModel@X,simupoints)}, see \link[stats]{model.matrix}, if \code{NULL} it is computed.}
\item{T.mat}{the upper triangular factor of the Choleski decomposition of the covariance matrix of \code{rbind(kmModel@X,simupoints)}, if \code{NULL} it is computed.}
}
\value{
the profile extrema functions at \code{options_approx$design} for the mean and variance function of the difference process \eqn{Z^\Delta = Z_x - \widetilde{Z}_x}.
}
\description{
The function prof_mean_var_Delta computes the profile extrema functions for the mean and variance functions of the difference process \eqn{Z_x - \widetilde{Z}_x} at \code{x}.
}
\author{
Dario Azzimonti
}
|
bbca556ef2ce4e2837cc8966f2cc224002d7999a
|
07b91fa13642a0548d0f5d069a4df7ec287de0d0
|
/man/select_top_features.Rd
|
e89d4c76fecd3d71951623e34c45f9e97bb4bab4
|
[
"MIT"
] |
permissive
|
cgplab/RadAR
|
c8b11fbfd72e8d904b811596a21a7c0be704d584
|
c418a316f22e5ad830e642354394c9c6280bd7c7
|
refs/heads/master
| 2022-02-11T19:25:56.087343
| 2021-11-09T08:35:29
| 2021-11-09T08:35:29
| 237,180,201
| 15
| 7
|
MIT
| 2021-07-09T11:00:39
| 2020-01-30T09:32:10
|
R
|
UTF-8
|
R
| false
| true
| 1,682
|
rd
|
select_top_features.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats_functions.R
\name{select_top_features}
\alias{select_top_features}
\title{Select top radiomic features according to a given statistics}
\usage{
select_top_features(
rdr = NULL,
which_statistics = "wilcox",
thr_pvalue = 0.05,
thr_auc = 0.8,
thr_concordance = 0.1,
thr_cox_zvalue = 3,
write_to = NULL
)
}
\arguments{
\item{rdr}{A RadAR object (class \code{\link{SummarizedExperiment}}).}
\item{which_statistics}{(character) Select top features based on one of the following pre-computed statistics:
"wilcox", "AUC", "concordance" or "cox".}
\item{thr_pvalue}{(numeric) P-value threshold to identify statistically significant features from wilcox.
It should be in the range (0, 1].}
\item{thr_auc}{(numeric) AUC threshold to identify statistically significant features from AUC
It should be in the range (0.5, 1].}
\item{thr_concordance}{(numeric) Threshold to identify statistically significant features from concordance analysis.
It should be in the range (0, .5].}
\item{thr_cox_zvalue}{(numeric) Z threshold (Wald statistics) to identify statistically significant features from cox regression analysis.
It should be in the range (0, inf).}
\item{write_to}{(character) If specified, filename to output top statistically significant features (tab-delimited).}
}
\value{
A table of class \code{\link{tibble}} reporting top features.
}
\description{
This function create a summary table of most significant features based on previously computed statistics
(wilcox, AUC, concordance index or cox regression)
}
\author{
Matteo Benelli (\email{matteo.benelli@uslcentro.toscana.it})
}
|
ac32119f82b20aa2d956de0f18e103cdb56f9735
|
d3f96c9ca845fc5b38ef9e7bd4c53b3f06beb49b
|
/inst/ubinc/scripts/nca_generate_data.R
|
8402edbcbff2c60327f161299a2968b73610fbf8
|
[
"BSD-3-Clause"
] |
permissive
|
john-harrold/ubiquity
|
92f08e4bebf0c4a17f61e2a96ae35d01ba47e5a2
|
bb0532915b63f02f701148e5ae097222fef50a2d
|
refs/heads/master
| 2023-08-16T17:43:52.698487
| 2023-08-16T01:24:48
| 2023-08-16T01:24:48
| 121,585,550
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,766
|
r
|
nca_generate_data.R
|
#clearing the workspace
rm(list=ls())
graphics.off()
options(show.error.locations = TRUE)
# If we cannot load the ubiquity package we try the stand alone distribution
if("ubiquity" %in% rownames(installed.packages())){require(ubiquity)} else
{source(file.path('library', 'r_general', 'ubiquity.R')) }
# For documentation explaining how to modify the commands below
# See the "R Workflow" section at the link below:
# http://presentation.ubiquity.grok.tv
# Rebuilding the system (R scripts and compiling C code)
cfg = build_system(system_file="system-mab_pk.txt",
output_directory = file.path(".", "output"),
temporary_directory = file.path(".", "transient"))
# set name | Description
# -------------------------------------------------------
# default | mAb in Humans
cfg = system_select_set(cfg, "default")
# fetching the parameter values
parameters = system_fetch_parameters(cfg)
# The following applies to both individual and stochastic simulations:
# Define the solver to use
cfg=system_set_option(cfg,group = "simulation", option = "solver", value = "lsoda")
# cfg=system_set_option(cfg, group = "simulation",
# option = "include_important_output_times",
# value = "no")
#
# Sample times in hours
# the grps will be used to split up the data set for sparse sampling
st_grp1 = c(1, 24, 336)
st_grp2 = c(4, 72, 504)
st_grp3 = c(8, 168, 671.9999)
sample_times_sd = sort(c(st_grp1, st_grp2, st_grp3))
sample_times_md = sort(c(st_grp1, st_grp2, st_grp3))
ndose = 6
28*24*(ndose-1)
# Cmax and Ctrough times for the intermediate doses
tmax_iv = 1:(ndose-2)*28*24+1
tmax_sc = 1:(ndose-2)*28*24+168
ttrough = 1:(ndose-2)*28*24+671.99
sample_times_md = sort(unique(c(sample_times_sd, sample_times_sd+3360, tmax_iv, tmax_sc, ttrough)))
# To overwrite the default dosing uncomment the following
# Setting all dosing to zero
doses = c(30, 120, 300)
dcmpts = c("Cc", "At", "Cc")
routes = c("iv bolus", "extra-vascular", "iv bolus")
simfull_sd = NULL
simsparse_sd = NULL
simfull_md = NULL
simsparse_md = NULL
sparse_idx = 1
sub_idx = 1
dose_idx = 1
for(dose in doses){
dcmpt = dcmpts[dose_idx]
route = routes[dose_idx]
dose_idx = dose_idx + 1
# Single dose simulations
ot_sd = sort(unique(c(linspace(0,28,100), sample_times_sd)))
cfg=system_set_option(cfg, group = "simulation",
option = "output_times",
linspace(0,28,100))
cfg = system_zero_inputs(cfg)
cfg = system_set_bolus(cfg, state = dcmpt,
times = c( 0.0 ), # day
values = c( dose)) # mg
cfg = system_set_option(cfg, group="stochastic", option="nsub", value=18)
som_sd = simulate_subjects(parameters, cfg)
# Multiple dose simulations
# Q4W for 6 months
ot_md =sort(unique(c(linspace(0,28*6,100),sample_times_md)))
cfg=system_set_option(cfg, group = "simulation",
option = "output_times",
ot_md)
cfg = system_zero_inputs(cfg)
cfg = system_set_bolus(cfg, state = dcmpt,
times = c( 0:(ndose-1)*28), # day
values = rep(dose, times=ndose)) # mg
cfg = system_set_option(cfg, group="stochastic", option="nsub", value=18)
som_md = simulate_subjects(parameters, cfg)
for(tmpidx in 1:length(som_sd$subjects$parameters[,1])){
# -------------------------------------------------------------------------
# Making the full dataset
# Single dose
C_ng_ml = approx(x=som_sd$times$ts.hours, y=som_sd$outputs$C_ng_ml[tmpidx, ], xout=sample_times_sd)$y
tmpdf = data.frame( ID = rep(sub_idx, length(sample_times_sd)),
TIME_HR = sample_times_sd,
C_ng_ml = C_ng_ml,
DOSENUM = floor(sample_times_sd/24/28 +1),
DOSE = dose)
tmpdf$ROUTE = route
if(is.null(simfull_sd)){
simfull_sd = tmpdf
} else {
simfull_sd = rbind(simfull_sd, tmpdf)
}
# Multiple dose
C_ng_ml = approx(x=som_md$times$ts.hours, y=som_md$outputs$C_ng_ml[tmpidx, ], xout=sample_times_md)$y
tmpdf = data.frame( ID = rep(sub_idx, length(sample_times_md)),
TIME_HR = sample_times_md,
C_ng_ml = C_ng_ml,
DOSENUM = floor(sample_times_md/24/28 +1),
DOSE = dose)
tmpdf$ROUTE = route
if(is.null(simfull_md)){
simfull_md = tmpdf
} else {
simfull_md = rbind(simfull_md, tmpdf)
}
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Now making the sparse dataset
if(sparse_idx == 1){
st_grp = st_grp1
} else if (sparse_idx == 2){
st_grp = st_grp2
} else if (sparse_idx == 3){
st_grp = st_grp3
}
C_ng_ml_grp = approx(x=som_sd$times$ts.hours, y=som_sd$outputs$C_ng_ml[tmpidx, ], xout=st_grp)$y
tmpdf = data.frame( ID = rep(sub_idx, length(st_grp)),
TIME_HR = st_grp,
C_ng_ml = C_ng_ml_grp,
DOSENUM = floor(st_grp/24/28 +1),
DOSE = dose)
tmpdf$ROUTE = route
if(is.null(simsparse_sd)){
simsparse_sd = tmpdf
} else {
simsparse_sd = rbind(simsparse_sd, tmpdf)
}
sparse_idx = sparse_idx + 1
# if we get past 3 we go back to 1
if(sparse_idx > 3){
sparse_idx = 1
}
# -------------------------------------------------------------------------
# incrementing the subject id
sub_idx = sub_idx + 1
}
}
#Adding the nominal time to the multiple dose dataset
simfull_md$NTIME_HR = simfull_md$TIME_HR - (simfull_md$DOSENUM-1)*28*24
#Adding the extrapolation column
simfull_md$EXTRAP = -1
# For even numbed subjects we set the number of extrapolation points to 3
simfull_md$EXTRAP[(simfull_md$ID %% 2) ==0]=3
# For odd we set it to 4
simfull_md$EXTRAP[(simfull_md$ID %% 2) !=0]=4
write.csv(simfull_sd, file="pk_all_sd.csv", quote=FALSE, row.names=FALSE)
write.csv(simfull_md, file="pk_all_md.csv", quote=FALSE, row.names=FALSE)
write.csv(simsparse_sd, file="pk_sparse_sd.csv", quote=FALSE, row.names=FALSE)
# All of the data
# calculating the average
simfull_sd$DOSE = as.factor(simfull_sd$DOSE)
simfull_sd$POOL = interaction(simfull_sd$TIME_HR, simfull_sd$DOSE)
simfull_sd$C_ng_ml_ave = -1
simfull_md$DOSE = as.factor(simfull_md$DOSE)
simfull_md$POOL = interaction(simfull_md$TIME_HR, simfull_md$DOSE)
simfull_md$C_ng_ml_ave = -1
for(POOL in simfull_sd$POOL){
simfull_sd[simfull_sd$POOL == POOL, ]$C_ng_ml_ave = mean(simfull_sd[simfull_sd$POOL == POOL, ]$C_ng_ml)
}
for(POOL in simfull_md$POOL){
simfull_md[simfull_md$POOL == POOL, ]$C_ng_ml_ave = mean(simfull_md[simfull_md$POOL == POOL, ]$C_ng_ml)
}
# Single dose
p = ggplot() +
geom_point(data=simfull_sd, aes(x=TIME_HR, y=C_ng_ml, color=DOSE)) +
geom_line(data=simfull_sd, aes(x=TIME_HR, y=C_ng_ml_ave, color=DOSE)) +
facet_wrap(.~ID)
p = gg_log10_yaxis(fo=p)
p = prepare_figure(fo=p, purpose="print")
p = p + xlab("Time (hours)") + ylab("Serum (ng/ml)")
ggsave(file.path("output","pk_full_sd.png"), plot=p, units="in", dpi=300, height=10, width=12 )
# Multiple dose
p = ggplot() +
geom_point(data=simfull_md, aes(x=TIME_HR, y=C_ng_ml, color=DOSE)) +
geom_line(data=simfull_md, aes(x=TIME_HR, y=C_ng_ml_ave, color=DOSE)) +
facet_wrap(.~ID)
p = gg_log10_yaxis(fo=p)
p = prepare_figure(fo=p, purpose="print")
p = p + xlab("Time (hours)") + ylab("Serum (ng/ml)")
ggsave(file.path("output","pk_full_md.png"), plot=p, units="in", dpi=300, height=10, width=12 )
# Sparse data
simsparse_sd$DOSE = as.factor(simsparse_sd$DOSE)
simsparse_sd$POOL = interaction(simsparse_sd$TIME_HR, simsparse_sd$DOSE)
simsparse_sd$C_ng_ml_ave = -1
for(POOL in simsparse_sd$POOL){
simsparse_sd[simsparse_sd$POOL == POOL, ]$C_ng_ml_ave = mean(simsparse_sd[simsparse_sd$POOL == POOL, ]$C_ng_ml)
}
p = ggplot() +
geom_point(data=simsparse_sd, aes(x=TIME_HR, y=C_ng_ml, color=DOSE)) +
geom_line(data=simsparse_sd, aes(x=TIME_HR, y=C_ng_ml_ave, color=DOSE)) +
facet_wrap(.~ID)
p = gg_log10_yaxis(fo=p)
p = prepare_figure(fo=p, purpose="print")
p = p + xlab("Time (hours)") + ylab("Serum (ng/ml)")
ggsave(file.path("output","pk_sparse_sd.png"), plot=p, units="in", dpi=300, height=10, width=12 )
|
35949fc8578b66b9bd0907a45983db41ecb47835
|
186ed4e2ec9732b060947ea4d283f568d7dc75a1
|
/R/UpdateCovCoef_function.R
|
d6243c4a9afeac3d0dcfeb0bdde71e74247b682e
|
[] |
no_license
|
gpapadog/LERCA
|
22483d5f306d9f91a5fa00105243718034e5973c
|
bda06c4882ee0b16dc07f369fef1c3773cc584ba
|
refs/heads/master
| 2021-11-08T11:37:11.684605
| 2019-05-19T18:56:22
| 2019-05-19T18:56:22
| 110,159,350
| 3
| 1
| null | 2021-11-03T21:49:57
| 2017-11-09T19:50:23
|
R
|
UTF-8
|
R
| false
| false
| 5,025
|
r
|
UpdateCovCoef_function.R
|
#' Update within experiment coefficients
#'
#' Updating the coefficients that use the within experiment likelihood:
#' intercept and coefficients of the exposure model, and the coefficients of
#' the covariates in the outcome model.
#'
#' @param dta A data set including a column of the exposure of interest as X,
#' the outcome of interest as Y, and all potential confounders as C1, C2, ...
#' @param cov_cols The indices of the columns in dta corresponding to the
#' potential confounders.
#' @param current_cutoffs Numeric of length K. The current values for the
#' points in the experiment configuraiton.
#' @param current_coefs The current coefficients of the MCMC. Three dimensional
#' array with dimensions corresponding to exposure/outcome model, experiment,
#' and coefficients (intercept, slope, covariates).
#' @param current_alphas Array of dimensions that correspond to the exposure or
#' outcome model, the experiment, and potential confounding. Entries are 0/1
#' corresponding to exclusion/inclusion of the covaraite in the corresponding
#' model of the experiment.
#' @param current_vars Matrix. Rows correspond to exposure/outcome model, and
#' columns to experiments. Entries are the current variances.
#' @param mu_priorX The mean of the normal prior on the coefficients of the
#' exposure model. Numeric vector of length equal to the number of potential
#' confounders + 1 with the first entry corresponding to the intercept.
#' @param Sigma_priorX Covariance matrix of the normal prior on the regression
#' coefficients of the exposure model.
#' @param mu_priorY The mean of the normal prior on the coefficients of the
#' outcome model. Numeric vector with entries corresponding to intercept, slope
#' of exposure, and potential covariates.
#' @param Sigma_priorY The covariance matrix of the normal prior on the
#' regression coefficients of the outcome model.
#'
#' @return Array of dimensions that correspond to the exposure/outcome model,
#' experiment, and coefficients (intercept, covariates). The intercepts of the
#' outcome model are NA, since they are not updated with this function.
#'
UpdateCovCoef <- function(dta, cov_cols, current_cutoffs, current_coefs,
current_alphas, current_vars, mu_priorX,
Sigma_priorX, mu_priorY, Sigma_priorY) {
num_conf <- ifelse(is.null(cov_cols), 0, length(cov_cols))
K <- length(current_cutoffs)
exact_cuts <- c(min(dta$X), current_cutoffs, max(dta$X))
# Coefficients of the covariates including intercept (of exposure only).
r <- array(0, dim = c(2, K + 1, num_conf + 1))
cov_names <- 'Int'
if (num_conf > 0) {
cov_names <- c(cov_names, paste0('C', 1 : num_conf))
}
dimnames(r) <- list(model = c('Exposure', 'Outcome'),
exper = 1 : (K + 1), covar = cov_names)
r[2, , 1] <- NA # Intercept of the outcome model is not udpated here.
for (ee in 1 : (K + 1)) {
D <- subset(dta, E == ee)
# For the exposure model.
curr_variance <- current_vars[1, ee]
C <- matrix(1, nrow = nrow(D), ncol = 1)
which_in <- NULL
if (num_conf > 0) {
current_alphaX <- current_alphas[1, ee, ]
which_in <- which(current_alphaX == 1)
cov_cols_in <- cov_cols[which_in]
C <- cbind(C, as.matrix(D[, cov_cols_in]))
}
prior_var <- Sigma_priorX[c(1, which_in + 1), c(1, which_in + 1)]
prior_var_inv <- chol2inv(chol(prior_var))
post_var <- prior_var_inv + t(C) %*% C / curr_variance
post_var <- chol2inv(chol(post_var))
prior_mean <- mu_priorX[c(1, which_in + 1)]
post_mean <- prior_var_inv %*% prior_mean
post_mean <- post_mean + t(C) %*% matrix(D$X, ncol = 1) / curr_variance
post_mean <- post_var %*% post_mean
gen_coef <- mvnfast::rmvn(1, mu = post_mean, sigma = post_var)
r[1, ee, c(1, which_in + 1)] <- gen_coef
# For the outcome model, only update coefficients of covariates.
if (num_conf > 0) {
curr_variance <- current_vars[2, ee]
current_alphaY <- current_alphas[2, ee, ]
which_in <- which(current_alphaY == 1)
if (length(which_in) > 0) {
cov_cols_in <- cov_cols[which_in]
C <- as.matrix(D[, cov_cols_in, drop = FALSE])
# Prior of the coefficients for the covariates.
prior_var <- Sigma_priorY[which_in + 2, which_in + 2]
prior_var_inv <- chol2inv(chol(prior_var))
post_var <- prior_var_inv + t(C) %*% C / curr_variance
post_var <- chol2inv(chol(post_var))
resid <- D$Y - cbind(1, D$X - exact_cuts[ee]) %*% current_coefs[2, ee, 1 : 2]
prior_mean <- mu_priorY[which_in + 2]
post_mean <- prior_var_inv %*% prior_mean
post_mean <- post_mean + t(C) %*% matrix(resid, ncol = 1) / curr_variance
post_mean <- post_var %*% post_mean
gen_coef <- mvnfast::rmvn(1, mu = post_mean, sigma = post_var)
r[2, ee, which_in + 1] <- gen_coef
}
}
}
return(r)
}
|
cc9f43bf17b1b176f2424f656c5d26310df98f8c
|
b813aeeaf3561b92cc313bb6832887bb79b96815
|
/Quandl.R
|
c4e96f54e94460f172d2301f5461eb8aeac99866
|
[] |
no_license
|
pcava/Test1
|
dad5718cae222a89b8d80258b4e40bfdd3eaff02
|
631757b9d12832372d28552fecb2a162309862b1
|
refs/heads/master
| 2021-11-24T17:28:21.279689
| 2014-01-28T01:34:35
| 2014-01-28T01:34:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,348
|
r
|
Quandl.R
|
# http://www.quandl.com/help/api
# http://www.quandl.com/help/packages/r
# https://github.com/quandl/R-package
rm(list=ls())
require(xts); require(Quandl)
#authenticationtoken - under your account API
Quandl.auth("QaouhGqFcaaommmFiCib")
#get data
data <- Quandl("NSE/OIL")
spx <- Quandl(code="YAHOO/INDEX_GSPC",type="xts",start_date="2000-01-01",end_date="2013-10-31",)
spx.MonthlyRet <- Quandl(code="YAHOO/INDEX_GSPC",type="xts",start_date="2000-01-01",
end_date="2013-10-31",transformation="rdiff",collapse="monthly")[,6]
#plot example
plot(stl(Quandl("GOOG/NASDAQ_GOOG",type="ts",collapse="monthly")[,1],s.window="per"))
#Upload example
mydata = Quandl("NSE/OIL", rows=5)
Quandl.push(code="TEST2", username="pcavatore", name="This is a test dataset2",
description="This is a description",data=mydata)
#and if you want to update one of your datasets that has already been created
#add update=TRUE otherwise it will ask you if you want you to update.
#testing to overwrite existing one and append some new data
mydata2 = mydata[1,]
mydata2[1,2] = mydata2[1,2]+10
mydata3 = mydata2
mydata3[1,1] = mydata2[1,1]+1
mydata4 = rbind(mydata3,mydata2)
Quandl.push(code="TEST2", username="pcavatore", name="This is a test dataset2",
description="This is a description",data=mydata4, update=T)
|
9d3617696bb384b704e8988f637810492287e05a
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gdkDisplayStoreClipboard.Rd
|
d2602a218fb2ae13fcf3781a994d1721aaca2315
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 858
|
rd
|
gdkDisplayStoreClipboard.Rd
|
\alias{gdkDisplayStoreClipboard}
\name{gdkDisplayStoreClipboard}
\title{gdkDisplayStoreClipboard}
\description{Issues a request to the clipboard manager to store the
clipboard data. On X11, this is a special program that works
according to the freedesktop clipboard specification, available at
http://www.freedesktop.org/Standards/clipboard-manager-spec (\url{http://www.freedesktop.org/Standards/clipboard-manager-spec}).}
\usage{gdkDisplayStoreClipboard(object, clipboard.window, targets)}
\arguments{
\item{\verb{object}}{a \code{\link{GdkDisplay}}}
\item{\verb{clipboard.window}}{a \code{\link{GdkWindow}} belonging to the clipboard owner}
\item{\verb{targets}}{a list of targets that should be saved, or \code{NULL}
if all available targets should be saved.}
}
\details{Since 2.6}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
226682554443caa0617b2a71623afdf82f8b2638
|
8151e7b269a7aed3f7c2e1e1e9e792ef0c174be9
|
/R-introduction/bankdataexploration_simple.R
|
6c8c73e61ef5bd8411c318a284565d1a0add0e20
|
[] |
no_license
|
ashokharnal/bigdata
|
046f23ccd4e0c41c38dccecd3d57b68c56353d37
|
651e48f4b9deb476d1258beac058356c1e046c15
|
refs/heads/master
| 2021-01-23T07:09:51.341999
| 2017-02-11T06:58:10
| 2017-02-11T06:58:10
| 80,479,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,070
|
r
|
bankdataexploration_simple.R
|
# Objectives:
# A. How to read a csv file
# B. How to look at data-structure
# C. How to access slices of data
# D. Basic plotting
# E. Statistical tests
# 1
# Set all libraries that we will use
library(dplyr) # For glimpse() and sample_n
library(prettyR) # For describe()
library(ggplot2) # For plotting
# 2
# Set path to your data files (change it appropriately for your machine)
# Note the use of forward slash (/) in the path statement
# 2.1
setwd("C:/Users/ashokharnal/Documents/bigDataLessons/3-dayclasses/data_exploration/bankmarketing")
# Just Check what path has been set
# 2.2
getwd()
#2.3
# List all files in current directory
dir()
# 2.4
# Read bank-full.csv file into some variable 'bankdata'.
# Fields are separated by semi-colon
bankdata<-read.table("bank-full.csv", header=TRUE,sep=";",quote="\"")
#******************
# 3. Have a look at data
#******************
# 3.1
# See some data
head(bankdata) # Display some starting rows
tail(bankdata) # Display rows from tail-end
# 3.2
# Data dimesion
dim(bankdata) # How many rows and columns
ncol(bankdata) # Number of columns
nrow(bankdata) # Number of rows
# 3.3
# Check data type of variable 'bankdata'
class(bankdata)
# 3.4
# What is the structure of 'bankdata'
str(bankdata)
glimpse(bankdata)
# 3.5
# Examine distribution of individual factors
table(bankdata$education) # What to do with unknown
# 3.51 Divide age into three levels
bankdata$age_level=cut(bankdata$age,3,labels = c("young","middle","senior"))
# 3.52 See distribution of education vs age
# And the fill up education, age-level wise
table(bankdata$education,bankdata$age_level)
# 3.6
# Row and col names
row.names(bankdata) # Rownames
names(bankdata) # Again column names
colnames(bankdata) # Column names
# 3.7
# See a data point at row 5, column 6
bankdata[5,6]
## Square-bracket operator
# Look at first five rows
bankdata[1:5,]
# Select just 'job' column
bankdata[,'job']
# OR, as
bankdata$job
#******************
# 4. Slice data
#******************
## Slice data
# Just look at all those rows data where job = management
# 4.1
## Which rows have values 'management'. Note double equal to (==)
bankdata[,'job']=='management'
# Get these values in a variable
myvalues<-bankdata[,'job']=='management'
bankdata[myvalues,]
head(bankdata[myvalues,])
# Or write directly as
bankdata[bankdata[,'job']=='management',]
OR
bankdata[bankdata$job=='management',]
# 4.2
# Look at those rows where age > 21
bankdata[bankdata[,'age']> 21,] # Try first bankdata[,'age']> 21
# OR
bankdata[bankdata$age > 21,]
# Look at only job and marital columns
bankdata[,c(1,3)]
# 4.3
# Look rows 42 to 78 of job and marital columns
bankdata[c(42:78),c(1,3)]
# OR
bankdata[c(42:78),c('job','marital')]
# 4.4
# Assign those who are "young" and whose education level is 'unknown;
# education of "secondary"
# Filter age_level wise and education wise
g<-bankdata[bankdata$age_level=="young" & bankdata$education == "unknown" ,]
head(g)
# Create a logical vector as per filter
lv<-bankdata$age_level=="young" & bankdata$education == "unknown"
bankdata[lv,]$education <-"secondary"
#******************
# 5. Aggregate data
#******************
# 5.1
########## Explore individual variables #################
# Summarise. Note the differences between summary of
# numerical and categorical data
summary(bankdata)
describe(bankdata)
# 5.2
# Aggregate is more general form of table.
# class of aggregate is data.frame
# Aggregate age, marital status wise
aggregate(age ~ marital, mean , data=bankdata)
# How balance depends upon job
aggregate(balance ~ job ,mean,data=bankdata)
# Sort it
aggregate(balance ~ job ,mean,data=bankdata) %>% arrange(balance)
# g<-aggregate(balance ~ job ,mean,data=bankdata)
# ggplot(data=g,mapping = aes(x=job,y=balance)) + geom_bar(stat="identity")
# Advanced
aggregate(cbind(age,balance) ~ marital, mean , data=bankdata)
aggregate(age ~ marital+housing, mean , data=bankdata)
aggregate(cbind(balance,duration) ~ job+education+housing ,mean,data=bankdata)
aggregate(cbind(age,balance) ~ marital, mean , data=bankdata)
aggregate(. ~ marital, mean , data=bankdata)
########## Some statistical operations #################
########## Explore multiple variables #################
# 6
# correlation between age, balance and duration
cor(bankdata[,c(1,6,12)])
# 6.1
# Chi-sqaure test
# Is there a relationship between 'has housing loan?'
# and 'has personal loan?'
# Get a contingency table
p<-table(bankdata$housing,bankdata$loan)
# Better still give names to rows and columns
p<-table("Has housing loan?"=bankdata$housing,"Has personal loan?"=bankdata$loan)
p # Contingency table
# 6.2
# Apply chisquare test
chisq.test(p)
# 6.3
# Compare two means
# Are the means of 'balance' different for those with 'yes'
# and with 'no' hosuing loan
sample1<-bankdata[bankdata$housing=="yes",]$balance
sample2<-bankdata[bankdata$housing=="no",]$balance
t.test(sample1,sample2)
# 6.4
# Test the hypothesis that population mean for balance is 1300
t.test(bankdata$balance,mu=1300)
#******************
# 7. Plotting data
#******************
# 7.1
# Histogram of numerical variables, such as, age
# Specify number of bins with breaks = 20
# Also show labels
hist(bankdata$age, labels = TRUE)
# Have 10 bins
hist(bankdata$age,breaks=10, labels = TRUE)
# 7.2
# Get information about plotted histogram
histinfo<-hist(bankdata$age,breaks=10)
histinfo
# 7.3
# MAke a density, not frequency histogram
# Read density points on y-axis
# Sum of areas must total to 1
histinfo<-hist(bankdata$age,freq=FALSE)
histinfo
# 7.4
# Transform data and then plot
hist(log10(bankdata$age),breaks=20)
# 7.5
# Density plot
plot(density(bankdata$age))
# Superimpose normal distribution on histogram
# dnorm is for density distribution function
# hist(bankdata$age,freq=FALSE)
# Note that in dnorm(), we have not specified 'x'. Default values are taken
# from the x-limits used from the previous plot. (see ?curve)
# curve(dnorm(x, mean=mean(bankdata$age), sd=sd(bankdata$age)), add=TRUE, col="darkblue", lwd=2)
# 7.6
# Frequency of values in column education
table(bankdata$education)
# Draw a pie chart of education (levels) factors
pie(table(bankdata$education))
# Or draw a simple barplot of categorical data
pie(table(bankdata$education))
barplot(table(bankdata$education))
barplot(table(bankdata$marital, bankdata$housing)) # Stacked barplot
legend("topright", legend=c("a","b","c"))
legend("bottom", legend=c("single","married","divorced"), fill=c("lightgray","gray","darkgray"))
# 7.7
# Draw boxplots maritl status wise
# X-axis (independent) has categorical variable
boxplot(age ~ marital, data=bankdata)
# Draw scatter plot between age and balance
plot(bankdata$balance,bankdata$age)
# 7.8
# Or use ggplot() to draw a good bar-plot
ggplot(bankdata, aes(x = education)) + geom_bar()
# Bar plots of two categorical variables: education and job
ggplot(bankdata, aes(x = education,fill=job)) + geom_bar()
# Or barplot with proportion of jobs education wise
ggplot(bankdata, aes(x = education,fill=job)) + geom_bar(position="fill")
# 7.9
# Draw boxplots maritl status wise
# X-axis (independent) has categorical variable
ggplot(bankdata,aes(x=marital,y=age))+geom_boxplot()
# You can draw a single boxplot() with imaginary
# categorical variable on x-axis
ggplot(bankdata,aes(x=factor(0),y=age))+geom_boxplot()
# 7.10
# Plot density graph of counts of education (x-axis), job-wise
ggplot(bankdata, aes(x = education, color = job)) + geom_density()
# 8. gc() Garbage Collector output:
# The max used column shows that you started with certain memory
# sometime during startup R was using more memory,
# and the gc trigger shows that garbage collection
# will happen when you get above certain number of Ncells or
# Vcells.
# 8.1 Remove/delete all objects from memory
rm(list = ls())
gc()
################# FINISHED #####################
|
8f8dc85b473e0776ef10cb0bcccb725ffee0db9c
|
c2e833feb1c738737ed468b3d0da503439faa199
|
/pkg.paper.PRS/man/c_method_r2.Rd
|
b3434d1a93213ce8e9fd37fb0cab4208b5837b32
|
[] |
no_license
|
privefl/paper2-PRS
|
4ea4d5d6aa4d0b422c57f4e20cedc8925f4b6497
|
3487d0d0d77e27956788ddb9ef8840c46676a7bf
|
refs/heads/master
| 2021-11-25T23:51:21.652297
| 2021-10-27T08:14:25
| 2021-10-27T08:14:25
| 106,011,226
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 503
|
rd
|
c_method_r2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read-format-results.R
\name{c_method_r2}
\alias{c_method_r2}
\title{Combine "method" and "r2"}
\usage{
c_method_r2(method, r2)
}
\arguments{
\item{method}{Vector of names of methods.}
\item{r2}{Vector of r2 values (possibly with missing values).}
}
\value{
Concatenation of \code{method} and \code{r2} using \code{"-"}.
}
\description{
Combine "method" and "r2"
}
\examples{
c_method_r2(c("A", "B", "A"), c(0.5, NA, 0.2))
}
|
79d3de08d6f17a7141579941d2949d97218f5ab2
|
ef87abda86a463b49d282fec7b7866d799f2dd92
|
/man/BeMRes.Rd
|
c4e7cfc26f5d12935ec4ebf0e0e793ccac3bc732
|
[] |
no_license
|
cran/BGPhazard
|
98779e1afe2d97186499e2a400971e779f6cd9f9
|
af8e75bd2f1dd3e239e1f47bfcaf94a7e97b5776
|
refs/heads/master
| 2021-08-16T03:27:59.141364
| 2021-01-17T04:10:02
| 2021-01-17T04:10:02
| 17,677,848
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,237
|
rd
|
BeMRes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BeMRes.R
\name{BeMRes}
\alias{BeMRes}
\title{Markov Beta Model}
\usage{
BeMRes(
times,
delta = rep(1, length(times)),
alpha = rep(1e-04, K),
beta = rep(1e-04, K),
c.r = rep(0, K - 1),
a.eps = 0.1,
b.eps = 0.1,
type.c = 4,
epsilon = 1,
iterations = 2000,
burn.in = floor(iterations * 0.2),
thinning = 5,
printtime = TRUE
)
}
\arguments{
\item{times}{Numeric positive vector. Failure times.}
\item{delta}{Logical vector. Status indicator. \code{TRUE} (1) indicates
exact lifetime is known, \code{FALSE} (0) indicates that the corresponding
failure time is right censored.}
\item{alpha}{Nonnegative vector. Small entries are recommended in order to
specify a non-informative prior distribution.}
\item{beta}{Nonnegative vector. Small entries are recommended in order to
specify a non-informative prior distribution.}
\item{c.r}{Nonnegative vector. The higher the entries, the higher the
correlation of two consecutive failure times.}
\item{a.eps}{Numeric. Shape parameter for the prior gamma distribution of
epsilon when \code{type.c = 4}.}
\item{b.eps}{Numeric. Scale parameter for the prior gamma distribution of
epsilon when \code{type.c = 4}.}
\item{type.c}{Integer. 1=defines \code{c.r} as a zero-entry vector; 2=lets
the user define \code{c.r} freely; 3=assigns \code{c.r} an
exponential prior distribution with mean \code{epsilon}; 4=assigns \code{c.r}
an exponential hierarchical distribution with mean \code{epsilon} which in turn has a
a Ga(a.eps, b.eps) distribution.}
\item{epsilon}{Double. Mean of the exponential distribution assigned to
\code{c.r}}
\item{iterations}{Integer. Number of iterations including the \code{burn.in}
and \code{thining} to be computed for the Markov chain.}
\item{burn.in}{Integer. Length of the burn-in period for the Markov chain.}
\item{thinning}{Integer. Factor by which the chain will be thinned. Thinning
the Markov chain is to reduces autocorrelation.}
\item{printtime}{Logical. If \code{TRUE}, prints out the execution time.}
}
\description{
Posterior inference for the Bayesian non-parametric Markov beta model for discrete
survival times.
}
\details{
Computes the Gibbs sampler given by the full conditional distributions of u
and Pi (Nieto-Barajas & Walker, 2002) and arranges the resulting Markov
chain into a tibble which can be used to obtain posterior summaries.
}
\note{
It is recommended to verify chain's stationarity. This can be done by
checking each partition element individually. See \link{BePlotDiag}.
}
\examples{
## Simulations may be time intensive. Be patient.
## Example 1
# data(psych)
# timesP <- psych$time
# deltaP <- psych$death
# BEX1 <- BeMRes(timesP, deltaP, iterations = 3000, burn.in = 300, thinning = 1)
## Example 2
# data(gehan)
# timesG <- gehan$time[gehan$treat == "control"]
# deltaG <- gehan$cens[gehan$treat == "control"]
# BEX2 <- BeMRes(timesG, deltaG, type.c = 2, c.r = rep(50, 22))
}
\references{
- Nieto-Barajas, L. E. & Walker, S. G. (2002). Markov beta and
gamma processes for modelling hazard rates. \emph{Scandinavian Journal of
Statistics} \strong{29}: 413-424.
}
\seealso{
\link{BePlotDiag}, \link{BePloth}
}
|
d5b85623d25d502893cb125dfb39d0aaa5bfce3e
|
6bf46c0e53622dab0757a8319510e7ed1d37e31f
|
/NumerosPseudoaleatorios/clase6.R
|
77eace99b86db53775716b0bcc1da3851554f602
|
[] |
no_license
|
MisaelErikson/ComputacionEstadistica2020-2
|
fcc0d109ae258272b78128a7b1e56c040a2ed168
|
b4b749518cf97321059e1c9fb298657299d7aa54
|
refs/heads/main
| 2023-02-26T12:44:17.163552
| 2021-01-15T18:06:36
| 2021-01-15T18:06:36
| 314,183,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,708
|
r
|
clase6.R
|
# Generaci?n de numeros Pseudoaleatorios
library(tidyverse)
library(descr) ## estadisticas descriptivas
## Recodar
# modulo
179%%100
# divisi?n entero
179%/%100
## crear una tabla de frecuencias
tblf=function(vecb,k){
inter=cut(vecb,breaks=k)
tabla=freq(ordered(inter),plot=TRUE)
tabla
}
# Algoritmo de cuadro medios
c2_ale=function(semilla,n){
semillas=numeric(1)
ri=numeric(1) #numero pseudoaleatorio
for (i in 1:n){
x=semilla^2
y=str_length(x) #6 8
p=(y-4)/2 #(8-4)/2=2
semilla=as.numeric(str_sub(x,p+1,y-p))
semillas[i]=semilla
ri[i]=semilla/10^4
}
cbind(semillas,ri)
}
c2_ale(550,10)
p=c2_ale(550,100)
p=tbl_df(p)
x=p$ri
tblf(x,4) #
plot(density(x),col="red")
# algoritmo producto medio
mpr_med=function(semi0,semilla,n){
semillas=numeric(1)
ri=numeric(1) #numero pseudoaleatorio
for (i in 1:n){
x=semilla*semi0
semi0=semilla
y=str_length(x) #6 8
p=(y-4)/2 #(8-4)/2=2
semilla=as.numeric(str_sub(x,p+1,y-p))
semillas[i]=semilla
ri[i]=semilla/10^4
}
cbind(semillas,ri)
}
x0=6965
x1=9803
mpr_med(x0,x1,100)
p=mpr_med(x0,x1,10)
p=tbl_df(p)
x=p$ri
tblf(x,4) #
plot(density(x),col="red")#--------
# algoritmo de multiplicaci?n constate
#y=a*x
mult_const=function(semilla,a,n){
semillas=numeric(1)
ri=numeric(1) #numero pseudoaleatorio
for (i in 1:n){
x=semilla*a
y=str_length(x) #6 8
p=(y-4)/2 #(8-4)/2=2
semilla=as.numeric(str_sub(x,p+1,y-p))
semillas[i]=semilla
ri[i]=semilla/10^4
}
cbind(semillas,ri)
}
a=6965
x1=9803
mult_const(x1,a,10)
p=mult_const(x0,x1,10)
p=tbl_df(p)
x=p$ri
tblf(x,4) #
plot(density(x),col="red")#--------
# algoritmo lineal
#y=(ax+c)mod(m)
alg_lineal=function(semilla,a,c,n){
g=str_length(semilla)
m=10^g
semillas=numeric(1)
ri=numeric(1) #numero pseudoaleatorio
for (i in 1:n){
x=(semilla*a+c) %% m
semilla=x
semillas[i]=semilla
ri[i]=semilla/(m-1)
}
cbind(semillas,ri)
}
a=19
c=32
x1=37
alg_lineal(x1,a,c,10)
p=alg_lineal(x1,a,c,10000)
p=tbl_df(p)
x=p$ri
tblf(x,4) #
plot(density(x),col="red")#--------
# algoritmo congruencias multiplicativo
#m=2^g
#a=3+8k o a=5+8k k=0,1,2,3,..
congr_mult=function(semilla,a0,k,g,n){
a=a0+8*k
m=2^g
semillas=numeric(1)
ri=numeric(1) #numero pseudoaleatorio
for (i in 1:n){
x=(semilla*a) %% m
semilla=x
semillas[i]=semilla
ri[i]=semilla/(m-1)
}
cbind(semillas,ri)
}
a0=5 # 5 o 3
k=2
x1=17
g=5
congr_mult(x1,a0,k,g,10)
p=congr_mult(x1,a0,k,g,1000)
p=tbl_df(p)
x=p$ri
tblf(x,4) #
plot(density(x),col="red")#--------
# pruebas de diagnostico de aleatoridad
# prueba media = 0.5
# H0: mean(ri)=0.5 H1: mean(ri)<>0.5
t.test(p$ri,
alternative =c("two.sided"),
mu=0.5, conf.level = 0.95)
# prueba de varianza=0.5
# install.packages("TeachingDemos")
library(TeachingDemos)
sigma.test(x=p$ri,
alternative ="two.sided",
sigma=0.5,conf.level = 0.95)
## uniformidad
# ajuste a una uniforme
chisq.test(p$ri,rescale.p = TRUE,
correct=TRUE)
# contrastes normalidad
shapiro.test(p$ri)
nortest::lillie.test(p$ri)
#ri=1 2 2 2 1 2 1 2 3 4 1 1
# + - - + +
# prueba de corridas
# H0: los ri son independientes
# H1: los ri son dependetientes
test.corrida=function(xbse){
y=length(xbse)
t=numeric(y-1)
for(i in 2:y){
t[i-1]=if_else(xbse[i]>xbse[i-1],"+","-", missing = NULL)
}
s=0
for(i in 1:(y-2)){
x=if_else(t[i]==t[i+1],1,0, missing = NULL)
s=s+x
}
h=y-1-s
Eh=round((2*y-1)/3,2)
vh=round((16*y-29)/90,2)
z=round((h-Eh)/sqrt(vh),3)
p_value=round(pnorm(z),3)
tblr=data.frame(h,Eh,vh,z,p_value)
return(tblr)
}
test.corrida(p$ri)
|
068d8638aa91c7315b0fb83688b4aa909dddec4d
|
b72a579eddbd2e20a0d154a4704fa28dc89adf5f
|
/code/23andMe/besteur_prs.R
|
09bae72c2f9c9bf1b55290ef01e0803153adb7ac
|
[] |
no_license
|
andrewhaoyu/multi_ethnic
|
cf94c2b02c719e5e0cbd212b1e09fdd7c0b54b1f
|
d1fd7d41ac6b91ba1da8bb8cd1b2b0768f28b984
|
refs/heads/master
| 2023-06-24T20:47:18.268972
| 2023-06-13T15:30:14
| 2023-06-13T15:30:14
| 214,069,397
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,071
|
r
|
besteur_prs.R
|
args = commandArgs(trailingOnly = T)
#i represent ethnic group
#j represent chromosome
#l represent the causal SNPs proportion
#m represent the training sample size
#i_rep represent simulation replication
#i1 represent the genetic architecture
i = as.numeric(args[[1]])
l = as.numeric(args[[2]])
pthres <- c(5E-08,5E-07,5E-06,5E-05,5E-04,5E-03,5E-02,5E-01,1.0)
library(data.table)
#install.packages("dplyr")
#install.packages("vctrs")
library(dplyr)
method = "PT"
eth <- c("EUR","AFR","AMR","EAS","SAS")
trait <- c("any_cvd","depression",
"heart_metabolic_disease_burden",
"height",
"iqb.sing_back_musical_note",
"migraine_diagnosis",
"morning_person")
setwd("/data/zhangh24/multi_ethnic/")
#temp.dir = paste0("/fastscratch/myscratch/hzhang1/ARIC/",trait[l],"/",eth[i],"/")
data.dir = "/data/zhangh24/multi_ethnic/data/cleaned/"
#load best eur results
out.dir = "/data/zhangh24/multi_ethnic/result/cleaned/result_summary/"
eth_group = c("european","african_american",
"latino","east_asian","south_asian")
method = "PT"
result <- read.csv(paste0(out.dir,eth_group[1],"_",trait[l],"_",method))
k <- which.max(result)
out.dir.prs <- paste0("/data/zhangh24/multi_ethnic/result/cleaned/prs/PT/",eth[1],"/",trait[l],"/")
prs.file.name = dir(out.dir.prs,pattern = paste0("_",method,"_pvalue_",k),full.names = T)
#best eur prs
best.prs = read.table(prs.file.name,header=T)
best.eur.prs = best.prs %>%
rename(BETA.EUR = BETA)
#load EUR ethnic group coefficients
sum.data = as.data.frame(fread(paste0(data.dir,eth[1],"/sumdat/",trait[l],"_passQC_noNA_matchinfo_matchMAFnoNA_common_mega+hapmap3_cleaned.txt"),header=T))
best.eur.prs = left_join(best.eur.prs,sum.data,by=c("SNP"="rsid")) %>%
select(SNP,FREQ_A1,BETA,SD) %>%
rename(BETA.EUR = BETA,
SD.EUR = SD,
FREQ.EUR = FREQ_A1)
#load target ethnic group coefficients
sum.data = as.data.frame(fread(paste0(data.dir,eth[i],"/sumdat/",trait[l],"_passQC_noNA_matchinfo_matchMAFnoNA_common_mega+hapmap3_cleaned.txt"),header=T))
#find the subset of best eur prs in target ethnic group
best.eur.prs.com = inner_join(best.eur.prs,sum.data,by=c("SNP"="rsid")) %>%
select(SNP,BETA.EUR,SD.EUR,FREQ.EUR,BETA,SD,FREQ_A1) %>%
rename(BETA.TAR = BETA,
SD.TAR = SD,
FREQ.TAR = FREQ_A1)
source("/data/zhangh24/multi_ethnic/code/stratch/EB_function.R")
beta_tar <- best.eur.prs.com$BETA.TAR
sd_tar <- best.eur.prs.com$SD.TAR
beta_eur <- best.eur.prs.com$BETA.EUR
sd_eur <- best.eur.prs.com$SD.EUR
EBprior = EstimatePrior(beta_tar,sd_tar,
beta_eur,sd_eur)
post_beta_mat = EBpost(beta_tar,sd_tar,beta_eur,sd_eur,EBprior)
post_beta_tar = post_beta_mat[,1,drop=F]
best.eur.prs.com$BETA.EB = post_beta_tar
#best target ethnic group prs results
method = "PT"
result <- read.csv(paste0(out.dir,eth_group[i],"_",trait[l],"_",method))
k <- which.max(result)
out.dir.prs <- paste0("/data/zhangh24/multi_ethnic/result/cleaned/prs/PT/",eth[i],"/",trait[l],"/")
prs.file.name = dir(out.dir.prs,pattern = paste0("_",method,"_pvalue_",k),full.names = T)
#best target prs
best.prs = read.table(prs.file.name,header=T)
#combime best eur prs and best target ethnic group prs
best.prs.all = full_join(best.eur.prs.com,
best.prs,by="SNP")
#fill in 0 for the rows without any cofficients
best.prs.select = best.prs.all %>%
select(SNP,BETA.EUR,BETA.TAR,BETA.EB,BETA)
best.prs.select[is.na(best.prs.select)] = 0
#match snp with imputed id
load(paste0(data.dir,"snpinfo/snpinfo_mega.RData"))
snpinfo_mega_filter = snpinfo_mega %>%
filter(!is.na(im.data.id)) %>%
select(im.data.id,assay.name)
prs.snp = left_join(best.prs.select,snpinfo_mega_filter,by=c("SNP"="assay.name")) %>%
arrange(im.data.id) %>%
select(im.data.id,BETA.EUR,BETA.TAR,BETA.EB,BETA)
out.dir.organize.prs <- paste0("/data/zhangh24/multi_ethnic/result/cleaned/organize_prs/BESTEUR/",eth[i],"/",trait[l],"/")
write.table(prs.snp,file = paste0(out.dir.organize.prs,"prs.file"),row.names = F,col.names = F,quote=F)
|
7914dd16da62a0e737a1981945eda953f927c220
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rspa/tests/testthat.R
|
38d9c92dd58fdd2852002bd0ca1ab1d92c7af31b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40
|
r
|
testthat.R
|
library(testthat)
test_check("rspa")
|
ff55de80bc610c5501d3a0ab4c9eaad3b268ae25
|
7f9d5e1b88674b92b01e6ec92f4369461941d2a9
|
/man/secder.Rd
|
276df933a0eb986dba6f23637d143c16f7292705
|
[] |
no_license
|
cran/demogR
|
ec920f3fd6e25ec0d8343c77f43a8b52bee1654a
|
04d1a0d87025c6a0b5cc15dd85ba3b6353303c85
|
refs/heads/master
| 2018-11-11T15:37:16.085875
| 2018-09-14T19:30:04
| 2018-09-14T19:30:04
| 17,718,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,787
|
rd
|
secder.Rd
|
\name{secder}
\alias{secder}
\title{secder }
\description{
Calculates the second derivatives of the dominant eigenvalue of the
demographic projection matrix for all non-zero transitions with
respect to one specified transition.
}
\usage{
secder(A, k, l)
}
\arguments{
\item{A}{ demographic projection matrix }
\item{k}{ row index for the specified transition }
\item{l}{ column index for the specified transition }
}
\details{
See Caswell (1996, 2001) for details on second derivatives of the
dominant eigenvalue.
}
\value{
A square matrix of the same rank as A where each element \eqn{s_{ij}} is the
second derivative of the dominant eigenvalue of A, \eqn{\partial^2
\lambda/\partial a_{ij} \partial a_{kl}}.
}
\references{
Caswell, H. 1996. Second derivatives of population growth rate:
Calculation and applications. Ecology 77 (3):870-879.
Caswell, H. 2001. Matrix population models: Construction, analysis,
and interpretation. 2nd ed. Sunderland, MA: Sinauer.
}
\note{
The eigenvalue second derivatives are essential for calculating both
perturbation analyses of the eigenvalue elasticities and stochastic
sensitivities. \code{secder} is used in functions to calculate both
these quantities.
}
\seealso{ \code{\link{fullsecder}}, \code{\link{elassens}},
\code{\link{eigen.analysis}}, \code{\link{stoch.sens}} }
\examples{
## eigenvalue second derivatives of the US projection matrix from 1967
## with respect to infant survival
data(goodman)
ult <- with(goodman, life.table(x=age, nKx=usa.nKx, nDx=usa.nDx))
mx <- goodman$usa.bx/goodman$usa.nKx
usa <- leslie.matrix(lx=ult$nLx,mx=mx)
sd21 <- secder(usa,2,1)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ array }
\keyword{ algebra }
|
3dfa5f9b095157e710cf3be70796fdf6a32cd499
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/2000.11.03.editorial.33226.0480.r
|
9a0c45c722d3153ee3f25cf03707cc4128f59d43
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,254
|
r
|
2000.11.03.editorial.33226.0480.r
|
stiti care este visul prostului ingimfat ?
sa nu existe decit lucrurile pe care le stie el .
toate celelalte sint scorneli , inutilitati , savantlicuri sau inselatorii .
ceva de acest fel s - a petrecut si la emisiunea lui Marius Tuca , de miercuri seara , de la Antena PDSR .
ajuns si el pe cele mai inalte culmi ale competentei ( mai nou , Marius Tuca , in ciuda studiilor modeste , se poarta de parca si - ar fi dat doctoratul in cel putin douazeci de domenii ) , ziaristul s - a incrincenat sa faca o demonstratie pe care o adusese de acasa sau o primise din alta parte .
in discutia cu Sorin Frunzaverde , ministrul apararii , Marius Tuca avea de dovedit o conspiratie .
de ce a fost destituit generalul Mircea Chelaru abia marti , dupa ce " Evenimentul zilei " a publicat articolul " Republica Oltenia " ?
pina atunci n - a zis nimeni nimic , n - a suflat o vorba despre conferinta de presa in care generalul a scapat " porumbeii " .
ministrul Frunzaverde l - a corectat civilizat , atragindu - i atentia asupra faptului ca stirea respectiva a fost difuzata si de Mediafax , si de Televiziunea Romana , ba ca l - a auzit pe general cum scotea aceleasi " panglici " si la radio .
si ce daca a spus la TVR ?
acolo nu se uita nimeni !
daca n - a spus la Antena 1 , atunci declaratia nu exista !
nu e prima data cind Marius Tuca foloseste acest procedeu pagubos .
daca nu e la Antena 1 sau in " Jurnalul national " nici nu exista .
acest truc , menit a provoca un zimbet , ascunde totusi o suficienta in gindire .
daca nu stie el , nici nu exista .
daca n - a auzit el , nu s - a spus , daca n - a vazut el , nici nu s - a petrecut .
toata demonstratia lui Marius Tuca privitoare la demisia generalului Chelaru dupa aparitia articolului din " Evenimenul zilei " avea la baza un siretlic .
n - a scris nimeni despre spusele negindite ale generalului pina luni .
generalul trebuia demis , si atunci - insinua Marius cel cult si atoatestiutor - cineva a aranjat cu redactia " Evenimentului zilei " si asa a aparut articolul de a doua zi .
adica s - a lucrat pe ascuns , pregatind pretextul unei debarcari .
nu conta ce spunea ministrul ( altfel incolor in aceasta emisiune ) , n - a contat nici cazul , mult mai grav , al infiintarii ANMR .
or , lucrurile sint totusi extrem de simple .
cum puteti vedea si din facsimilele reproduse in numarul nostru de astazi , stirea despre declaratiile generalului Chelaru a fost difuzata si de TVR , a fost preluata si de " Evenimentul zilei " de simbata ( in editia a doua , data fiind ora tirzie de incheiere a conferintei de presa ) , a aparut si pe agentiile de monitorizare , a fost publicata si in " Curierul National " de luni .
sa fie toate acestea dovada faptului ca lui Marius Tuca nu i - a placut niciodata sa citeasca , ci doar sa se vada la televizor ?
sau un semn ca a fost si el atins de boala megapersonalitatilor ( psihiatrii o numesc altfel ) ?
sau patronii sai politici i - au dat sarcina sa gaseasca inca un nod in papura si asa taiata a lui Emil Constantinescu ?
cert este ca Marius Tuca vorbea cu maxima raspundere si convingere despre lucruri neadevarate sau despre care nici macar nu avea habar .
nu e azi momentul sa discutam despre cei pe care popularitatea ii imbata si - i face sa creada ca sint buricul pamintului .
in cele din urma , e treaba lor .
e vorba doar de sirgul cu care un ziarist cu nerv ingroapa si lucruri pe care nu le intelege , terfeleste si idei care ii supraincalzesc circuitele nervoase .
e regretabil ca un om de la care multi cititori si telespectatori au asteptat o crestere profesionala aluneca pe nesimtite in suficienta si in partizanat politic .
e din ce in ce mai vizibil ca Marius Tuca e la un pas de carnetul de partid al lui George Pruteanu .
intr - o Romanie intrata in faza premergatoare impartirii ciolanului , Antena 1 a devenit Antena PDSR pentru ca Dan Voiculescu a semnat o alianta politica profitabila .
daca Dan Voiculescu vrea sa stea in scaun de parlamentar si sa transforme prea purul PUR intr - o marioneta a PDSR , nu intelegem de ce a pus la bataie si obrazul jurnalistic al unor colegi .
ceea ce se petrece acum la Antena 1 , inclusiv cu Marius Tuca , numai independenta jurnalistica nu poate fi numita .
|
817b238ab571dc66255d2bb962f2165775bc8620
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9048_0/rinput.R
|
ce6017ac223a0accbaa3635128ff857a59b12a6e
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9048_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9048_0_unrooted.txt")
|
d518184e0e5140db880e9fd998796436890bbfcb
|
9041358fd619cec189ef6f3fc442a55ca58e16b2
|
/tasas-app/old/server_2016_0822_1520PM.r
|
eea34f6c3e1b026ffe9927f83aece50fa444f92d
|
[] |
no_license
|
carlosror/tasas-graduaciones
|
e388a0374f5eb7895db0feb76b562e1ff9cc1f9d
|
1d7a65cac90763e8cddbd55056a0640f16ab56f9
|
refs/heads/master
| 2021-01-12T03:21:04.710280
| 2017-01-06T12:50:43
| 2017-01-06T12:50:43
| 78,200,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,876
|
r
|
server_2016_0822_1520PM.r
|
library(shiny)
library(ggplot2)
# Distritos de las regiones
choices_Region_ARECIBO <- c("ARECIBO" = "ARECIBO", "CAMUY" = "CAMUY", "MANATÍ" = "MANATÍ", "VEGA ALTA" = "VEGA ALTA")
choices_Region_BAYAMON <- c("BAYAMÓN" = "BAYAMÓN", "COROZAL" = "COROZAL", "OROCOVIS" = "OROCOVIS", "TOA BAJA" = "TOA BAJA")
choices_Region_CAGUAS <- list("BARRANQUITAS" = "BARRANQUITAS", "CIDRA" = "CIDRA", "GUAYAMA" = "GUAYAMA", "GURABO" = "GURABO")
choices_Region_HUMACAO <- list("CANÓVANAS" = "CANÓVANAS", "FAJARDO" = "FAJARDO", "LAS PIEDRAS" = "LAS PIEDRAS", "YABUCOA" = "YABUCOA")
choices_Region_MAYAGUEZ <- list("AGUADILLA" = "AGUADILLA", "CABO ROJO" = "CABO ROJO", "MAYAGÜEZ" = "MAYAGÜEZ", "SAN SEBASTIÁN" = "SAN SEBASTIÁN")
choices_Region_PONCE <- list("PONCE" = "PONCE", "SANTA ISABEL" = "SANTA ISABEL", "UTUADO" = "UTUADO", "YAUCO" = "YAUCO")
choices_Region_SAN_JUAN <- list("CAROLINA" = "CAROLINA", "GUAYNABO" = "GUAYNABO", "SAN JUAN I" = "SAN JUAN I", "SAN JUAN II" = "SAN JUAN II")
# Municipios
# Municipios de distritos de región de Arecibo
choices_Distrito_ARECIBO <- c("ARECIBO I" = "ARECIBO I", "ARECIBO II" = "ARECIBO II", "HATILLO" = "HATILLO")
choices_Distrito_CAMUY <- c("CAMUY" = "CAMUY", "LARES" = "LARES", "QUEBRADILLAS" = "QUEBRADILLAS")
choices_Distrito_MANATI <- c("BARCELONETA" = "BARCELONETA", "CIALES" = "CIALES", "FLORIDA" = "FLORIDA", "MANATÍ" = "MANATÍ")
choices_Distrito_VEGA_ALTA <- c("DORADO" = "DORADO", "VEGA ALTA" = "VEGA ALTA", "VEGA BAJA" = "VEGA BAJA")
# Municipios de distritos de región de BAYAMON
choices_Distrito_BAYAMON <- c("BAYAMÓN I" = "BAYAMÓN I", "BAYAMÓN II" = "BAYAMÓN II", "BAYAMÓN III" = "BAYAMÓN III")
choices_Distrito_COROZAL <- c("COROZAL" = "COROZAL", "NARANJITO" = "NARANJITO")
choices_Distrito_OROCOVIS <- c("MOROVIS" = "MOROVIS", "OROCOVIS" = "OROCOVIS")
choices_Distrito_TOA_BAJA <- c("CATAÑO" = "CATAÑO", "TOA ALTA" = "TOA ALTA", "TOA BAJA" = "TOA BAJA")
# Municipios de distritos de región de CAGUAS
choices_Distrito_BARRANQUITAS <- list("AIBONITO" = "AIBONITO", "BARRANQUITAS" = "BARRANQUITAS", "COMERÍO" = "COMERIO")
choices_Distrito_CIDRA <- list("AGUAS BUENAS" = "AGUAS BUENAS", "CAYEY" = "CAYEY", "CIDRA" = "CIDRA")
choices_Distrito_GUAYAMA <- list("ARROYO" = "ARROYO", "GUAYAMA" = "GUAYAMA", "SALINAS" = "SALINAS")
choices_Distrito_GURABO <- list("CAGUAS I" = "CAGUAS I", "CAGUAS II" = "CAGUAS II", "GURABO" = "GURABO")
# Municipios de distritos de región de HUMACAO
choices_Distrito_CANOVANAS <- list("CANÓVANAS" = "CANÓVANAS", "LOÍZA" = "LOÍZA", "LUQUILLO" = "LUQUILLO", "RÍO GRANDE" = "RÍO GRANDE")
choices_Distrito_FAJARDO <- list("CEIBA" = "CEIBA", "CULEBRA" = "CULEBRA", "FAJARDO" = "FAJARDO", "NAGUABO" = "NAGUABO", "VIEQUES" = "VIEQUES")
choices_Distrito_LAS_PIEDRAS <- list("HUMACAO" = "HUMACAO", "JUNCOS" = "JUNCOS", "LAS PIEDRAS" = "LAS PIEDRAS")
choices_Distrito_YABUCOA <- list("MAUNABO" = "MAUNABO", "PATILLAS" = "PATILLAS", "SAN LORENZO" = "SAN LORENZO", "YABUCOA" = "YABUCOA")
# Municipios de distritos de región de MAYAGÜEZ
choices_Distrito_AGUADILLA <- list("AGUADA" = "AGUADA", "AGUADILLA" = "AGUADILLA", "AÑASCO" = "AÑASCO", "RINCÓN" = "RINCÓN")
choices_Distrito_CABO_ROJO <- list("CABO ROJO" = "CABO ROJO", "LAJAS" = "LAJAS", "SÁBANA GRANDE" = "SÁBANA GRANDE", "SAN GERMÁN" = "SAN GERMÁN")
choices_Distrito_MAYAGUEZ <- list("HORMIGUEROS" = "HORMIGUEROS", "LAS MARÍAS" = "LAS MARÍAS", "MARICAO" = "MARICAO", "MAYAGÜEZ" = "MAYAGÜEZ")
choices_Distrito_SAN_SEBASTIAN <- list("ISABELA" = "ISABELA", "MOCA" = "MOCA", "SAN SEBASTIÁN" = "SAN SEBASTIÁN")
# Municipios de distritos de región de PONCE
choices_Distrito_PONCE <- list("PONCE I" = "PONCE I", "PONCE II" = "PONCE II", "PONCE III" = "PONCE III")
choices_Distrito_SANTA_ISABEL <- list("COAMO" = "COAMO", "JUANA DÍAZ" = "JUANA DÍAZ", "SANTA ISABEL" = "SANTA ISABEL", "VILLALBA" = "VILLALBA")
choices_Distrito_UTUADO <- list("ADJUNTAS" = "ADJUNTAS", "JAYUYA" = "JAYUYA", "UTUADO" = "UTUADO")
choices_Distrito_YAUCO <- list("GUÁNICA" = "GUÁNICA", "GUAYANILLA" = "GUAYANILLA", "PEÑUELAS" = "PEÑUELAS", "YAUCO" = "YAUCO")
# Municipios de distritos de región de SAN JUAN
choices_Distrito_CAROLINA <- list("CAROLINA I" = "CAROLINA I", "CAROLINA II" = "CAROLINA II")
choices_Distrito_GUAYNABO <- list("GUAYNABO" = "GUAYNABO", "TRUJILLO ALTO" = "TRUJILLO ALTO")
choices_Distrito_SAN_JUAN_I <- list("SAN JUAN I" = "SAN JUAN I", "SAN JUAN II" = "SAN JUAN II")
choices_Distrito_SAN_JUAN_II <- list("SAN JUAN III" = "SAN JUAN III", "SAN JUAN IV" = "SAN JUAN IV", "SAN JUAN V" = "SAN JUAN V")
shinyServer(function(input, output, session) {
# tasas is a golbal data frame, we denote it's global with "<<-"
tasas <<- read.csv("tasas.csv", encoding="UTF-8")
output$distritos_pulldown <- renderUI({
region <- input$select_Region
if (region == "ARECIBO") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_ARECIBO)
else if (region == "BAYAMÓN") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_BAYAMON)
else if (region == "CAGUAS") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_CAGUAS)
else if (region == "HUMACAO") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_HUMACAO)
else if (region == "MAYAGÜEZ") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_MAYAGUEZ)
else if (region == "PONCE") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_PONCE)
else if (region == "SAN JUAN") selectInput("select_Distrito", label = "Distrito", choices = choices_Region_SAN_JUAN)
})
output$municipios_pulldown <- renderUI({
if (is.null(input$select_Distrito))
return()
distrito <- input$select_Distrito
if (distrito == "ARECIBO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_ARECIBO)
else if (distrito == "CAMUY") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_CAMUY)
else if (distrito == "MANATÍ") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_MANATI)
else if (distrito == "VEGA ALTA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_VEGA_ALTA)
else if (distrito == "BAYAMÓN") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_BAYAMON)
else if (distrito == "COROZAL") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_COROZAL)
else if (distrito == "OROCOVIS") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_OROCOVIS)
else if (distrito == "TOA BAJA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_TOA_BAJA)
else if (distrito == "BARRANQUITAS") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_BARRANQUITAS)
else if (distrito == "CIDRA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_CIDRA)
else if (distrito == "GUAYAMA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_GUAYAMA)
else if (distrito == "GURABO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_GURABO)
else if (distrito == "CANÓVANAS") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_CANOVANAS)
else if (distrito == "FAJARDO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_FAJARDO)
else if (distrito == "LAS PIEDRAS") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_LAS_PIEDRAS)
else if (distrito == "YABUCOA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_YABUCOA)
else if (distrito == "AGUADILLA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_AGUADILLA)
else if (distrito == "CABO ROJO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_CABO_ROJO)
else if (distrito == "MAYAGÜEZ") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_MAYAGUEZ)
else if (distrito == "SAN SEBASTIÁN") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_SAN_SEBASTIAN)
else if (distrito == "PONCE") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_PONCE)
else if (distrito == "SANTA ISABEL") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_SANTA_ISABEL)
else if (distrito == "UTUADO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_UTUADO)
else if (distrito == "YAUCO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_YAUCO)
else if (distrito == "CAROLINA") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_CAROLINA)
else if (distrito == "GUAYNABO") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_GUAYNABO)
else if (distrito == "SAN JUAN I") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_SAN_JUAN_I)
else if (distrito == "SAN JUAN II") selectInput("select_Municipio", label = "Municipio", choices = choices_Distrito_SAN_JUAN_II)
})
output$regiones <- renderPlot({
relevant_data <- subset(tasas, Region != "OFICINA CENTRAL")
# Base plot
base_plot <- ggplot(relevant_data)
# Color scheme
# Color blind palette: http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
# removed black
color_blind_palette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# color_scheme <- scale_colour_brewer(palette="Accent")
# color_fill_scheme <- scale_fill_brewer(palette="Accent")
color_scheme <- scale_colour_manual(values = color_blind_palette)
color_fill_scheme <- scale_fill_manual(values = color_blind_palette)
# legend position
legend_theme <- theme(legend.position = "top")
# Arranging legend list in two rows:
# http://stackoverflow.com/questions/27130610/legend-on-bottom-two-rows-wrapped-in-ggplot2-in-r
legend_arrange <- guides(fill=guide_legend(nrow=1))
regiones_boxplot <- base_plot + geom_boxplot(aes(x = Region, y = Tasa, fill = Categoria), outlier.colour = "red") + color_fill_scheme + xlab("Regiones") + ylab("Tasa de graduación")+ labs(fill=NULL) + legend_theme + legend_arrange
regiones_barplot <- base_plot + geom_bar(aes(x = Tasa_bucket, fill = Categoria), position = "dodge") + color_fill_scheme + xlab("Tasa de graduación") + ylab("Escuelas")+ labs(fill=NULL) + legend_theme + legend_arrange
multiplot(regiones_boxplot, regiones_barplot, cols=1)
})
output$distritos <- renderPlot({
relevant_data <- subset(tasas, Region == input$select_Region)
# Base plot
base_plot <- ggplot(relevant_data)
# Color scheme
# Color blind palette: http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
# removed black
color_blind_palette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# color_scheme <- scale_colour_brewer(palette="Accent")
# color_fill_scheme <- scale_fill_brewer(palette="Accent")
color_scheme <- scale_colour_manual(values = color_blind_palette)
color_fill_scheme <- scale_fill_manual(values = color_blind_palette)
# legend position
legend_theme <- theme(legend.position = "top")
# Arranging legend list in two rows:
# http://stackoverflow.com/questions/27130610/legend-on-bottom-two-rows-wrapped-in-ggplot2-in-r
legend_arrange <- guides(fill=guide_legend(nrow=1))
distrito_boxplot <- base_plot + geom_boxplot(aes(x = Distrito, y = Tasa, fill = Categoria), outlier.colour = "red") + color_fill_scheme + xlab(paste("Distritos en región de", input$select_Region)) + ylab("Tasa de graduación")+ labs(fill=NULL) + legend_theme + legend_arrange
distrito_barplot <- base_plot + geom_bar(aes(x = Tasa_bucket, fill = Categoria), position = "dodge") + color_fill_scheme + xlab("Tasa de graduación") + ylab(paste("Escuelas en región de", input$select_Region))+ labs(fill=NULL) + scale_y_discrete() + legend_theme + legend_arrange
multiplot(distrito_boxplot, distrito_barplot, cols=1)
})
output$municipios <- renderPlot({
relevant_data <- subset(tasas, Region == input$select_Region & Distrito == input$select_Distrito)
# Base plot
base_plot <- ggplot(relevant_data)
# Color scheme
# Color blind palette: http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
# removed black
color_blind_palette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# color_scheme <- scale_colour_brewer(palette="Accent")
# color_fill_scheme <- scale_fill_brewer(palette="Accent")
color_scheme <- scale_colour_manual(values = color_blind_palette)
color_fill_scheme <- scale_fill_manual(values = color_blind_palette)
# legend position
legend_theme <- theme(legend.position = "top")
# Arranging legend list in two rows:
# http://stackoverflow.com/questions/27130610/legend-on-bottom-two-rows-wrapped-in-ggplot2-in-r
legend_arrange <- guides(fill=guide_legend(nrow=1))
municipios_boxplot <- base_plot + geom_boxplot(aes(x = Municipio, y = Tasa, fill = Categoria), outlier.colour = "red") + color_fill_scheme + xlab(paste("Municipios en distrito de", input$select_Distrito)) + ylab("Tasa de graduación")+ labs(fill=NULL) + legend_theme + legend_arrange
municipios_barplot <- base_plot + geom_bar(aes(x = Tasa_bucket, fill = Categoria), position = "dodge") + color_fill_scheme + xlab("Tasa de graduación") + ylab(paste("Escuelas en distrito de", input$select_Distrito)) + labs(fill=NULL) + scale_y_discrete() + legend_theme + legend_arrange
municipios_table <- table(relevant_data$Municipio)
municipios_table <- municipios_table[municipios_table > 0]
# print(municipios_table)
muni_plot_list = list()
for (muni in names(municipios_table)) {
relevant_data_muni <- subset(relevant_data, Municipio == muni)
base_plot_muni <- ggplot(relevant_data_muni)
muni_escuelas_plot <- base_plot_muni + geom_bar(aes(x = Escuela, y = Tasa, fill = Categoria), position = "dodge", stat="identity") + labs(fill=NULL) + color_fill_scheme + legend_theme + legend_arrange
muni_escuelas_plot <- muni_escuelas_plot + xlab(paste("Escuelas en municipio de", muni)) + ylab("Tasa de graduación")
muni_plot_list <- c(muni_plot_list, list(muni_escuelas_plot))
}
print("ok")
# multiplot(municipios_boxplot, municipios_barplot, cols=1)
multiplot(plotlist = muni_plot_list)
# escuelas_barplot <- base_plot + geom_bar(aes(x = Escuela, y = Tasa, fill = Categoria), position = "dodge", stat="identity") + facet_wrap(~ Municipio) + color_fill_scheme + legend_theme + legend_arrange
# escuelas_barplot
})
output$debug <- renderPrint({
x <- switch(input$select_Region, "ARECIBO" = 4, "BAYAMÓN" = 5)
x
# z <- "ARECIBO"
# z <- "BAYAMÓN"
# y <- switch(z, "ARECIBO" = 4, "BAYAMÓN" = 5)
# y
})
# Multiple plot function
# http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
})
|
763e3369560dc27be9eb93d1d82d79d1a2d0b1b2
|
77b9c26131fb30f90adedeb0615702b9c81bf7dd
|
/time_series_W_SSAR1_logY_norm_norm_var_thetaM_2006.R
|
d944e9b24f9026368d82c19e31b2c9222ec60c60
|
[] |
no_license
|
mteguchi/Indonesia_nesting
|
a4c940bcdec06b6d1b803e6e8e239d4ca7b87bc6
|
3c074e88c264c811857a67a566a3180e2846b85f
|
refs/heads/master
| 2023-07-20T10:38:36.792593
| 2023-07-13T22:50:25
| 2023-07-13T22:50:25
| 126,876,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,869
|
r
|
time_series_W_SSAR1_logY_norm_norm_var_thetaM_2006.R
|
#time series analysis
rm(list=ls())
tic <- Sys.time()
Sys <- Sys.info()
source('HILL_BiOp_functions.R')
library(jagsUI)
library(coda)
library(ggplot2)
library(loo)
save.RData <- T
save.fig <- T
plot.fig <- T
MCMC.n.chains <- 5
MCMC.n.samples <- 500000
MCMC.n.burnin <- 350000
MCMC.n.thin <- 50
MCMC.params <- list(n.chains = MCMC.n.chains,
n.samples = MCMC.n.samples,
n.burnin = MCMC.n.burnin,
n.thin = MCMC.n.thin)
# get JM data first:
data.0 <- read.csv('data/W_nests.csv')
# create regularly spaced time series:
data.2 <- data.frame(Year = rep(min(data.0$Year_begin,
na.rm = T):max(data.0$Year_begin,
na.rm = T),
each = 12),
Month_begin = rep(1:12,
max(data.0$Year_begin,
na.rm = T) -
min(data.0$Year_begin,
na.rm = T) + 1)) %>%
mutate(begin_date = as.Date(paste(Year,
Month_begin,
'01', sep = "-"),
format = "%Y-%m-%d"),
Frac.Year = Year + (Month_begin-0.5)/12) %>%
select(Year, Month_begin, begin_date, Frac.Year)
data.0 %>% mutate(begin_date = as.Date(paste(Year_begin,
Month_begin,
'01', sep = "-"),
format = "%Y-%m-%d")) %>%
mutate(Year = Year_begin,
Month = Month_begin,
f_month = as.factor(Month),
f_year = as.factor(Year),
Frac.Year = Year + (Month_begin-0.5)/12,
Nests = W.1) %>%
select(Year, Month, Frac.Year, begin_date, Nests) %>%
na.omit() %>%
right_join(.,data.2, by = "begin_date") %>%
transmute(Year = Year.y,
Month = Month_begin,
Frac.Year = Frac.Year.y,
Nests = Nests) %>%
reshape::sort_df(.,vars = "Frac.Year") %>%
filter(Year > 2005) -> data.1
jags.data <- list(y = log(data.1$Nests),
m = data.1$Month,
T = nrow(data.1))
#load.module('dic')
jags.params <- c("theta.1", 'sigma.pro1', "sigma.pro2", "sigma.obs",
"mu", "y", "X", "deviance", "loglik")
jm <- jags(jags.data,
inits = NULL,
parameters.to.save= jags.params,
model.file = 'models/model_SSAR1_W_logY_norm_norm_var_thetaM.txt',
n.chains = MCMC.n.chains,
n.burnin = MCMC.n.burnin,
n.thin = MCMC.n.thin,
n.iter = MCMC.n.samples,
DIC = T, parallel=T)
#g.diag1 <- gelman.diag(jm$samples)
Rhat <- jm$Rhat
# extract ys
ys.stats <- data.frame(low_y = jm$q2.5$y,
median_y = jm$q50$y,
high_y = jm$q97.5$y,
time = data.1$Frac.Year,
obsY = data.1$Nests,
month = data.1$Month,
year = data.1$Year)
# extract Xs - the state model
Xs.stats <- data.frame(low_X = jm$q2.5$X,
median_X = jm$q50$X,
high_X = jm$q97.5$X,
time = data.1$Frac.Year,
obsY = data.1$Nests,
month = data.1$Month,
year = data.1$Year)
Xs.year <- group_by(Xs.stats, year) %>% summarize(median = sum(median_X),
low = sum(low_X),
high = sum(high_X))
loo.out <- pareto.k.diag(jm, MCMC.params, jags.data)
toc <- Sys.time()
dif.time <- toc - tic
p.1 <- ggplot() +
#geom_point(data = ys.stats,
# aes(x = time, y = mode_y), color = "blue") +
#geom_line(data = Xs.stats,
# aes(x = time, y = mode_X), color = 'blue') +
geom_line(data = Xs.stats,
aes(x = time, y = exp(high_X)), color = "red",
linetype = 2) +
geom_point(data = Xs.stats,
aes(x = time, y = exp(median_X)), color = "red",
alpha = 0.5) +
geom_line(data = Xs.stats,
aes(x = time, y = exp(median_X)), color = "red",
alpha = 0.5) +
geom_line(data = Xs.stats,
aes(x = time, y = exp(low_X)), color = "red",
linetype = 2) +
geom_point(data = ys.stats,
aes(x = time, y = obsY), color = "green",
alpha = 0.5)
results <- list(data.1 = data.1,
jags.data = jags.data,
Xs.stats = Xs.stats,
Xs.year = Xs.year,
ys.stats = ys.stats,
tic = tic,
toc = toc,
dif.time = dif.time,
Sys = Sys,
MCMC.params = MCMC.params,
Rhat = Rhat,
jm = jm,
loo.out = loo.out)
if (save.fig)
ggsave(plot = p.1,
filename = 'figures/predicted_counts_W_logY_norm_norm_var_thetaM_2006.png',
dpi = 600)
if (save.RData)
saveRDS(results,
file = paste0('RData/SSAR1_logY_norm_norm_var_thetaM_W_2006_', Sys.Date(), '.rds'))
if (plot.fig){
base_theme <- ggplot2::theme_get()
library(bayesplot)
# set back to the base theme:
ggplot2::theme_set(base_theme)
mcmc_trace(jm$samples, c("mu", "sigma.obs", "sigma.pro1", "sigma.pro2"))
mcmc_dens(jm$samples, c("mu", "sigma.obs", "sigma.pro1", "sigma.pro2"))
mcmc_dens(jm$samples, c("theta.1[1]", "theta.1[2]", "theta.1[3]", "theta.1[4]",
"theta.1[5]", "theta.1[6]", "theta.1[7]", "theta.1[8]",
"theta.1[9]", "theta.1[10]", "theta.1[11]", "theta.1[12]"))
}
|
343197d7dfe90ff003ed7eebcec6db73f34044c2
|
db23c9521df0c510eae066cb796a4c517500600c
|
/Rscript.R
|
edc02f5cd7efc7a67d64e3df99307ba3bcc90e8a
|
[] |
no_license
|
PavelDeryabin/BI_project_1st_semester
|
f689094d2da66a1b85968a5af346a9eaaf50632c
|
0ecb93e15ca564b828bcf320b27489594bbbf67c
|
refs/heads/main
| 2023-02-02T13:37:49.247505
| 2020-12-20T22:10:45
| 2020-12-20T22:10:45
| 300,917,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,243
|
r
|
Rscript.R
|
###########################################################################################################################################
# RNA-seq analysis for OUABAIN THEME
###########################################################################################################################################
library(Rmisc)
library(xlsx)
library(ggplot2)
library(dplyr)
library(tximeta)
library(org.Hs.eg.db)
library(AnnotationDbi)
library(GenomicFeatures)
library(DESeq2)
library(clusterProfiler)
library(DOSE)
library(topGO)
library(enrichplot)
library(pheatmap)
library(genefilter)
library(RColorBrewer)
#### Tximeta quantification
setwd("...") # setting wd with the folder "quants" and predesigned "coldata.csv" file
dir <- ("quants")
list.files(file.path(dir))
dir2 <- (".")
csvfile <- file.path(dir2, "coldatadf.csv")
coldata <- read.csv(csvfile, row.names = 1, stringsAsFactors = FALSE)
str(coldata)
coldata$SampleID <- as.character(coldata$SampleID)
coldata$names <- coldata$SampleGName
coldata$files <- file.path(dir2, "quants", coldata$names, "quant.sf")
file.exists(coldata$files)
coldata # look at the data
se <- tximeta(coldata) # quantification to transcript-base level
dim(se) # look at the munber of dimentions
head(rownames(se))
gse <- summarizeToGene(se) # summarization to gene-level
keytypes(org.Hs.eg.db) # add nessessary annotations
gse <- addIds(gse, "REFSEQ", gene=TRUE)
gse <- addIds(gse, "ENTREZID", gene=TRUE)
gse <- addIds(gse, "GENENAME", gene=TRUE)
gse <- addIds(gse, "SYMBOL", gene=TRUE)
mcols(gse)
######################## EVA and DE analysis with DESeq2
### Construction of the DESeqDataSet - the DESeq2 custom class object (almost* the same as Bioconductor custom class object SummarizedExperiment)
gse@colData
str(gse@colData$CellType)
gse@colData$CellType <- as.factor(gse@colData$CellType)
gse@colData$Cells <- as.factor(gse@colData$Cells)
gse@colData$OuabSeno <- as.factor(gse@colData$OuabSeno)
levels(gse@colData$CellType)
levels(gse@colData$Cells)
levels(gse@colData$OuabSeno)
colData(gse)$CellType <- relevel(colData(gse)$CellType, ref = "END-MSCs")
levels(gse@colData$CellType)
levels(gse@colData$Cells) <- c("Young", "Senescent")
levels(gse@colData$CellType) <- c("END-MSCs", "A549", "IMR-90")
levels(gse@colData$OuabSeno) <- c("Sensitive", "Resistant")
names(gse@colData)[names(gse@colData) == 'OuabSeno'] <- 'Senolysis'
levels(gse@colData$Senolysis)
dds <- DESeqDataSet(gse, design = ~ Cells + Senolysis + Cells:Senolysis)
dds # get summary
nrow(dds) # learn total number of rows
### EVA
dds1 <- dds[ rowSums(counts(dds)) > 5, ] # removing rows of the DESeqDataSet that have no counts, or only a single count across all samples
nrow(dds1)
# or
keep <- rowSums(counts(dds) >= 5) >= 4 # minimal filtering to reduce the size of the dataset. We do not need to retain genes
# if they do not have a count of 5 or more for 4 or more samples
# as these genes will have no statistical power to detect differences,
# and no information to compute distances between samples
table(keep)
dds1 <- dds[keep,]
rld <- DESeq2::rlog(dds1, blind = FALSE) # DESeq2 transformation for count data that stabilize the variance across the mean
class(rld) # DESeqTransform object which is based on the SummarizedExperiment class
DESeq2::plotPCA(rld, intgroup = c("CellType", "Cells"))
# Senescence validation
total <- as.data.frame(dds1@rowRanges)
View(total)
total <- total[,c(6,8)]
total1 <- total[,c(6,10)]
sub1 <- read.xlsx("Cellular senescence GO 0090398.xlsx", sheetIndex = 1)
str(sub1)
str(total)
sub1$ENTREZID <- as.character(sub1$ENTREZID)
merged <- merge(total,sub1,by="ENTREZID")
head(merged)
str(merged)
sub1v <- merged[,2]
rld_sen <- rld[sub1v,]
DESeq2::plotPCA(rld_sen, intgroup = c("CellType", "Cells"))
pcaData <- plotPCA(rld_sen, intgroup = c("CellType", "Cells"), returnData = TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
ggplot(pcaData, aes(x = PC1, y = PC2, color = Cells, shape = CellType)) +
geom_point(size = 2.5) +
xlab(paste0("PC1: ", percentVar[1], "% variance")) +
ylab(paste0("PC2: ", percentVar[2], "% variance")) +
theme_bw() +
theme(
plot.title = element_text(size=14, color="black"),
axis.title.y = element_text(size=14, color="black"),
axis.title.x = element_text(size=14, color="black"),
axis.text.y = element_text(size=14),
axis.text.y.right = element_text(color = "black"),
axis.text.x = element_text(size=14),
legend.text = element_text(size = 14, colour = "black"),
legend.title = element_text(size = 14, colour = "black"),
legend.position = "right") +
scale_color_manual(values = c("#ff9a00","#5e5e5e"),
breaks=c("Young","Senescent"),
labels=c("Young", "Senescent"))
ggsave(file = "./results_new/PCA.jpeg", width = 4.7, height = 3, dpi = 500)
############ DEG analysis with DESeq2
dds1 <- DESeq(dds1) # running DE analysis (on non-transformed/normilized counts!!!)
resultsNames(dds1) # get terms from the model
res_ouab <- results(dds1, name = "CellsSenescent.SenolysisResistant") # DEGs for interaction
# for now the difference reflects lines vs eMSCs -> *(-1) lfc
mcols(res_ouab, use.names = TRUE) # description of DataFrames
summary(res_ouab) # main statistics
plotMA(res_ouab, ylim = c(-10, 10)) # look at all genes
#abline(h=c(-1,1), col="dodgerblue", lwd=2)
# advanced lcf shrinkage method (more informative visualization and more accurate ranking of genes by effect size - adequate lcf values)
res_ouab_sh <- lfcShrink(dds1, coef = 4, type="apeglm", lfcThreshold=0.667)
plotMA(res_ouab_sh, ylim = c(-10, 10))
#abline(h=c(-1,1), col="dodgerblue", lwd=2)
### Annotations with different IDs
ens.str <- substr(rownames(res_ouab_sh), 1, 15)
columns(org.Hs.eg.db) # what IDs type are avaliable
res_ouab_sh$symbol <- mapIds(org.Hs.eg.db,
keys=ens.str,
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
res_ouab_sh$entrez <- mapIds(org.Hs.eg.db,
keys=ens.str,
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
res_ouab_sh$genename <- mapIds(org.Hs.eg.db,
keys=ens.str,
column="GENENAME",
keytype="ENSEMBL",
multiVals="first")
summary(res_ouab_sh)
############### subsetting
res_ouab_sh <- res_ouab_sh[order(res_ouab_sh$log2FoldChange, decreasing = T),] # order by lfc
write.csv(as.data.frame(res_ouab_sh), file="./results_new/Supplementary9.csv") # ALL BH-sig DEGs, ordered by lfc
########################################################################################################################################
# GSEA with multiple annotations
########################################################################################################################################
# geneList preparation
rm(all,gene_list,original_gene_list)
all <- as.data.frame(res_ouab_sh)
str(all)
# we want the log2 fold change
original_gene_list <- all$log2FoldChange
# name the vector
names(original_gene_list) <- all$symbol
original_gene_list
# omit any NA values
gene_list<-na.omit(original_gene_list)
anyDuplicated(names(gene_list))
gene_list <- gene_list[!duplicated(names(gene_list))]
gene_list
# sort the list in decreasing order (required for clusterProfiler)
gene_list = sort(gene_list, decreasing = TRUE)
keytypes(org.Hs.eg.db)
################ GO BP
gseGOBP <- gseGO(geneList=gene_list,
OrgDb = org.Hs.eg.db,
ont ="BP",
keyType = "SYMBOL",
nPerm = 100000,
#minGSSize = 3,
#maxGSSize = 1000,
verbose = TRUE,
pAdjustMethod = "BH",
pvalueCutoff = 0.2,
by = "fgsea")
View(summary(gseGOBP))
dotplot(gseGOBP, showCategory=50, split=".sign") + facet_grid(.~.sign)
# GO:0010107 potassium ion import
# GO:1904064 positive regulation of cation transmembrane transport
# GO:0035794 positive regulation of mitochondrial membrane permeability
# GO:0008637 apoptotic mitochondrial changes
# GO:2001233 regulation of apoptotic signaling pathway
# GO:1901028 regulation of mitochondrial outer membrane permeabilization involved in apoptotic signalin
# GO:1902110 positive regulation of mitochondrial membrane permeability involved in apoptotic process
# GO:2001235 positive regulation of apoptotic signaling pathway
# GO:0006919 activation of cysteine-type endopeptidase activity involved in apoptotic process
gseaplot2(gseGOBP,
geneSetID = c("GO:0010107", "GO:1904064"),
color = c("#fa3c4c", "#d696bb"),
base_size = 14,
#rel_heights = c(1.5, 0.5, 1),
subplots = 1:3,
pvalue_table = TRUE,
ES_geom = "line")
ggsave(file="./results_new/GSEA GO cations terms.jpeg", width=5.8, height=4.8, dpi=500)
# Just for visualization
#gseGOBP2 <- gseGOBP
#View(gseGOBP2@result)
#rn <- rownames(gseGOBP2@result)
#rownames(gseGOBP2@result) <- c(1:891)
#gseGOBP2@result[216,2] <-"p1"
#gseGOBP2@result[73,2] <-"p2"
#gseGOBP2@result[237,2] <-"p5"
#gseGOBP2@result[385,2] <-"p4"
#gseGOBP2@result[540,2] <-"p3"
#rownames(gseGOBP2@result) <- rn
#gseaplot2(gseGOBP2, #gseGOBP
# geneSetID = c("GO:1902110", "GO:2001235", "GO:0006919"),
# color = c("#ffc100", "#ff7a7b", "#a24d53"),
# base_size = 14,
# rel_heights = c(1.5, 0.5, 1),
# #subplots = 1,
# #pvalue_table = TRUE,
# ES_geom = "line")
#ggsave(file="./results_new/GSEA GO apoptosis terms1.jpeg", width=5.8, height=4.8, dpi=500)
# for BI short presentation
gseaplot2(gseGOBP2, #gseGOBP
geneSetID = c("GO:1902110", "GO:0010107"),
color = c("#398564", "#f07f13"),
base_size = 14,
rel_heights = c(1.5, 0.3, 1),
#subplots = 1,
#pvalue_table = TRUE,
ES_geom = "line")
ggsave(file="./results_new/GSEA GO apoptosis terms6.jpeg", width=3.1, height=4.4, dpi=500)
# results export
gseGOBPs_df <- gseGOBP@result
str(gseGOBPs_df)
gseGOBPs_df<- arrange(gseGOBPs_df,p.adjust)
View(gseGOBPs_df)
write.csv(as.data.frame(gseGOBPs_df), file="./results_new/Supplementary.csv") #
# additional visualization
dotplot(gseGOBP, showCategory=40, split=".sign") + facet_grid(.~.sign)
emapplot(gseGOBP)
cnetplot(gseGOBP, #categorySize="pvalue",
showCategory = 2,
foldChange=gene_list)
cnetplot(gseGOBP, foldChange=gene_list, circular = TRUE, colorEdge = TRUE)
ridgeplot(gseGOBP)
heatplot(gseGOBP, foldChange=gene_list)
plotGOgraph(gseGOBP)
gseaplot(gseGOBPs, geneSetID = "GO:1904064", title = "positive regulation of cation transmembrane transport")
# potassium import related genes heatmap
# potassium import core genes - KCNJ2/WNK4/SLC12A8/WNK1/KCNJ8/KCNJ14/ABCC9/SLC12A7/SLC12A6
# pos reg cation transport - RELN/KCNJ2/WNK4/AGT/RGN/CX3CL1/PKD2/CAPN3/AMIGO1/TRPC1/HSPA2/WNK1/CACNA2D1
plotCounts(dds1, gene="ENSG00000123700.5", intgroup=c("CellType","Cells"), main = "KCNJ2 expression")
plotCounts(dds1, gene="ENSG00000163399.16", intgroup=c("CellType","Cells"), main = "ATP1A1 expression")
plotCounts(dds1, gene="ENSG00000143153.13", intgroup=c("CellType","Cells"), main = "ATP1B1 expression")
plotCounts(dds1, gene="ENSG00000129473.9", intgroup=c("CellType","Cells"), main = "ATP1B3 expression")
genes_hm1 <- c("ENSG00000123700.5","ENSG00000126562.17","ENSG00000221955.10", "ENSG00000060237.17",
"ENSG00000121361.5", "ENSG00000182324.7", "ENSG00000069431.11", "ENSG00000113504.21", "ENSG00000140199.12",
"ENSG00000189056.14", "ENSG00000135744.8", "ENSG00000130988.13",
"ENSG00000006210.7", "ENSG00000118762.8", "ENSG00000092529.25", "ENSG00000181754.7", "ENSG00000144935.15",
"ENSG00000126803.9", "ENSG00000153956.16")
rld_hm1 <- rld[genes_hm1,]
mat <- assay(rld_hm1)
mat <- mat - rowMeans(mat)
#anno <- as.data.frame(colData(rld_hm1)[, c("Cells","CellType","OuabSeno")])
rownames(anno) <- colnames(rld_hm1)
#colnames(anno)[1] <- "Cells"
colors<-colorRampPalette(rev(brewer.pal(n=9,name="RdBu")))(255) #length(breaksList)
anno <- as.data.frame(colData(rld_hm1)[, c("Cells","CellType","Senolysis")])
ann_colors <- list(Cells = c(Young = "#ffd596", Senescent = "#b2b2b2"),
CellType = c("END-MSCs" = "#b2d8d8", A549 = "#f7d0cb", "IMR-90" = "#e0ac69"),
Senolysis = c(Sensitive = "#ff556d", Resistant = "#9ace79"))
mat_eMSCs <- t(scale(t(mat[,1:8])))
mat_a549 <- t(scale(t(mat[,9:12])))
mat_imr <- t(scale(t(mat[,13:18])))
mat_scaled <- cbind(mat_eMSCs, mat_a549, mat_imr)
mat_scaled
plot5f <-pheatmap(mat_scaled,
annotation_col = anno,
#annotation_row = row_anno,
#scale = "row",
#breaks = breaksList,
col=colors,
annotation_colors = ann_colors,
labels_row = rld_hm1@rowRanges$SYMBOL,
cluster_cols = FALSE,
cluster_rows = FALSE,
gaps_col = c(8,12),
show_colnames = FALSE,
#legend_breaks = -1:4,
main = "")
png(filename = "./results_new/HM1.png", width = 6200, height = 4400, units = "px", res = 1000)
plot5f
dev.off()
# apoptosis related genes heatmap
genes_hm2_df <- read.csv("apoptosis_hm.csv")
str(genes_hm2_df)
genes_hm2 <- levels(genes_hm2_df$symbol)
genes_hm2 <- rownames(subset(as.data.frame(res_ouab_sh), symbol %in% genes_hm2))
rld_hm2 <- rld[genes_hm2,]
mat <- assay(rld_hm2)
mat <- mat - rowMeans(mat)
#anno <- as.data.frame(colData(rld_hm1)[, c("Cells","CellType","OuabSeno")])
rownames(anno) <- colnames(rld_hm2)
#colnames(anno)[1] <- "Cells"
colors<-colorRampPalette(rev(brewer.pal(n=9,name="RdBu")))(255) #length(breaksList)
anno <- as.data.frame(colData(rld_hm1)[, c("Cells","CellType","Senolysis")])
ann_colors <- list(Cells = c(Young = "#ffd596", Senescent = "#b2b2b2"),
CellType = c("END-MSCs" = "#b2d8d8", A549 = "#f7d0cb", "IMR-90" = "#e0ac69"),
Senolysis = c(Sensitive = "#ff556d", Resistant = "#9ace79"))
mat_eMSCs <- t(scale(t(mat[,1:8]))) # apply(mat[,1:8], 2, scale) # scale(t(mat[,1:8])) apply(x, 1, function(x) x / sum(x, na.rm = TRUE))
mat_a549 <- t(scale(t(mat[,9:12])))
mat_imr <- t(scale(t(mat[,13:18])))
mat_scaled <- cbind(mat_eMSCs, mat_a549, mat_imr)
mat_scaled
# breaksList = seq(-2, 2, by = 1)
plot5g <-pheatmap(mat_scaled,
annotation_col = anno,
#annotation_row = row_anno,
#scale = "row",
#breaks = breaksList,
col=colors,
annotation_colors = ann_colors,
labels_row = rld_hm2@rowRanges$SYMBOL,
cluster_cols = FALSE,
cluster_rows = FALSE,
gaps_col = c(8,12),
show_colnames = FALSE,
#legend_breaks = -1:4,
main = "")
# View(as.data.frame(rld_tf@rowRanges@elementMetadata))
png(filename = "./results_new/Figure 2f TFs HM.png", width = 6200, height = 14400, units = "px", res = 1000)
plot5g
dev.off()
############################################# Plotting KEGG Pathways
library(pathview)
# Plot specific KEGG pathways (with fold change)
## pathway.id : KEGG pathway identifier
pathview(gene.data = gene_list,
pathway.id = "04218",
species = "hsa") # hsa04218 - Cellular senescence
pathview(gene.data = gene_matrix,
pathway.id = "04142",
species = "hsa") # hsa04142 - Lysosome
pathview(gene.data = gene_matrix,
pathway.id = "04020",
species = "hsa") # hsa04020 - Calcium signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04152",
species = "hsa") # ko04152 - AMPK signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04140",
species = "hsa") # hsa04140 - Autophagy
pathview(gene.data = gene_matrix,
pathway.id = "00010",
species = "hsa") # map00010 - Glycolysis / Gluconeogenesis
pathview(gene.data = gene_matrix,
pathway.id = "00020",
species = "hsa") # map00020 - Citrate cycle (TCA cycle)
pathview(gene.data = gene_matrix,
pathway.id = "03410",
species = "hsa") # ko03410 - Base excision repair
pathview(gene.data = gene_matrix,
pathway.id = "03450",
species = "hsa") # ko03450 - Non-homologous end-joining
pathview(gene.data = gene_matrix,
pathway.id = "02010",
species = "hsa") # ko02010 - ABC transporters
pathview(gene.data = gene_matrix,
pathway.id = "04330",
species = "hsa") # hsa04330 - Notch signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04310",
species = "hsa") # hsa04310 - Wnt signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04014",
species = "hsa") # hsa04014 - Ras signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04390",
species = "hsa") # hsa04390 - Hippo signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04350",
species = "hsa") # hsa04350 - TGF-beta signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04630",
species = "hsa") # hsa04630 - JAK-STAT signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04064",
species = "hsa") # hsa04064 - NF-kappa B signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04068",
species = "hsa") # hsa04068 - FoxO signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04151",
species = "hsa") # hsa04151 - PI3K-Akt signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04150",
species = "hsa") # hsa04150 - mTOR signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04512",
species = "hsa") # hsa04512 - ECM-receptor interaction
pathview(gene.data = gene_matrix,
pathway.id = "04514",
species = "hsa") # hsa04514 - Cell adhesion molecules
pathview(gene.data = gene_list,
pathway.id = "04210",
species = "hsa") # hsa04210 - Apoptosis
pathview(gene.data = gene_matrix,
pathway.id = "04110",
species = "hsa") # hsa04110 - Cell cycle
pathview(gene.data = gene_matrix,
pathway.id = "04115",
species = "hsa") # hsa04115 - p53 signaling pathway
pathview(gene.data = gene_matrix,
pathway.id = "04550",
species = "hsa") # hsa04550 - Signaling pathways regulating pluripotency of stem cells
pathview(gene.data = gene_list,
pathway.id = "04978",
species = "hsa") # hsa04978 - Mineral absorption
pathview(gene.data = gene_matrix,
pathway.id = "04750",
species = "hsa") # hsa04750 - Inflammatory mediator regulation of TRP channels
pathview(gene.data = gene_matrix,
pathway.id = "04136",
species = "hsa") # hsa04136 - Autophagy - other
|
3ef671d07a1eb36c27da875bbb7f31456bf55310
|
29dbebba9a0cbd0610a1660b7b1a27673bb90d3f
|
/man/SCRdensity.Rd
|
1ec87dbe88f22027ef568a0740d32e533b577bea
|
[] |
no_license
|
jaroyle/SCRbayes
|
9af20e405b8eae53d80a63f137e37b60ef993041
|
6f0b484947f53d6fa8cbea806190e68919fa7fd7
|
refs/heads/master
| 2021-01-19T05:45:03.904012
| 2014-10-09T17:14:19
| 2014-10-09T17:14:19
| 3,121,871
| 4
| 2
| null | 2020-05-05T16:31:54
| 2012-01-06T23:24:08
|
R
|
UTF-8
|
R
| false
| false
| 2,944
|
rd
|
SCRdensity.Rd
|
\name{SCRdensity}
\alias{SCRdensity}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
makes a density plot from object of class "scrfit" %% ~~function to do ... ~~
}
\description{
This makes a basic density plot given MCMC output from the main fitting
functions of SCRbayes (e.g., SCRh.fn, SCRi.fn)
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
SCRdensity(obj, nx = 30, ny = 30, Xl = NULL, Xu = NULL, Yl = NULL, Yu = NULL, scalein = 1, scaleout = 100000*100, ncolors = 10,opt.ss=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obj}{
should be of class scrfit %% ~~Describe \code{obj} here~~
}
\item{nx}{
%% ~~Describe \code{nx} here~~
}
\item{ny}{
%% ~~Describe \code{ny} here~~
}
\item{Xl}{
%% ~~Describe \code{Xl} here~~
}
\item{Xu}{
%% ~~Describe \code{Xu} here~~
}
\item{Yl}{
%% ~~Describe \code{Yl} here~~
}
\item{Yu}{
%% ~~Describe \code{Yu} here~~
}
\item{scalein}{
%% ~~Describe \code{scalein} here~~
}
\item{scaleout}{
%% ~~Describe \code{scaleout} here~~
}
\item{ncolors}{
%% ~~Describe \code{ncolors} here~~
}
\item{opt.ss=FALSE}{
If TRUE the density plot has support on the input state-space grid. If
FALSE (default) the binning method is used.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Modified from the SCR book Royle et al. (2013)
%% ~put references to the literature/web site here ~
}
\author{
Andy Royle, aroyle@usgs.gov
Joshua Goldberg, joshua.goldberg@umontana.edu
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(wolverine)
## make the encounter data file (EDF)
EDF<-wolverine$wcaps
dimnames(EDF)<-list(NULL,c("session","individual","occasion","trapid"))
## grab the trap deployment file (TDF)
TDF<-wolverine$wtraps
## we will fit a model with sex-specific parameters, so grab the sex variable
wsex<-wolverine$wsex
## bundle these into an "scrdata" object along with the state-space grid
grid<-cbind(wolverine$grid2,rep(1,nrow(wolverine$grid2)))
wolv.scrdata<-scrData(TDF,EDF,grid,Xsex=wsex)
## now fit a model using the current development version of
## the fitting function
test<-SCRh.fn( wolv.scrdata,ni= 1200, burn=200, skip=2,nz=100,theta=1,
Msigma=1, Mb=0, Msex=1, Msexsigma=1,
coord.scale=5000, area.per.pixel=4, thinstatespace=4)
## make a density plot
SCRdensity(test, scalein=1, scaleout=1000000*100, nx=20,ny=20)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d7981355b5c4004dfa1ef0c97abda79641332556
|
43e1e2ccb87ee235b669d12334e6616af2b76eeb
|
/heatmap_part3.R
|
12073d14d2752c4890018c52137062fe1ad7503d
|
[] |
no_license
|
ZhonghL/heatmap
|
4f076158d0b2e09f297adcf99408e22983bf7106
|
8f369f145f546828407ff78c91a1b6261d1d5e07
|
refs/heads/master
| 2021-01-22T11:37:45.503404
| 2015-08-07T07:31:07
| 2015-08-07T07:31:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,535
|
r
|
heatmap_part3.R
|
############################################################################
# Bioramble
# Heatmap: Part 3: How to create a microarray heatmap with R
# by Jesse Lipp
# Aug 3, 2015
############################################################################
# ----------------------------------------------------------
# set up system
# ----------------------------------------------------------
# clear memory
rm(list = ls())
# load packages (install if necessary)
if (!require(ALL)) {
source("http://bioconductor.org/biocLite.R")
biocLite("ALL")
}
if (!require("genefilter")) {
source("http://bioconductor.org/biocLite.R")
biocLite("genefilter")
}
if (!require("gplots")) {
install.packages("gplots")
}
if (!require("RColorBrewer")) {
install.packages("RColorBrewer")
}
# ----------------------------------------------------------
# Step 1: Prepare the data
# ----------------------------------------------------------
# load accute lymphoblastic leukemia data
data(ALL)
# get information on ALL data set
?ALL
# get basic information on data
class(ALL)
dim(ALL)
# check for NA values
any(is.na(exprs(ALL)))
# data formating following
# http://www.bioconductor.org/help/publications/books/bioconductor-case-studies/
# subset data set to B-cell samples with genotype "NEG" or "BCR/ABL" translocation
# "BT" is the type of the cancer (B-cell or T-cell)
# "mol.biol" is the genotypic classification of the cancer
# get indices of cancers with either no cytogenetic abnormalities (NEG) or
# the BCR-ABL translocation (BCR/ABL)
bcrabl <- ALL$mol.biol %in% c("NEG", "BCR/ABL")
# get indices cancers originating from B-cells
bcell <- grepl("^B", ALL$BT)
# subset the ALL data set
all <- ALL[, bcell & bcrabl]
# re-adjust the factor levels to reflect the subset
all$mol.biol <- droplevels(all$mol.biol)
all$mol.biol <- relevel(all$mol.biol, ref = "NEG")
# get dimensions again
dim(all)
# # determine standard deviation of all genes
# all_sd <- apply(exprs(all), 1, sd)
# there is an optimized function in genefilter package "rowSds"
all_sd <- rowSds(exprs(all))
# get names of 200 most variable genes
top200 <- names(sort(all_sd, decreasing = TRUE))[1:200]
all_var <- all[top200, ]
# ----------------------------------------------------------
# Step 2: Decide on a distance metric
# ----------------------------------------------------------
# distance function
# remember: cor computes across columns
dist_cor <- function(x) {
as.dist(1 - cor(t(x), method = "pearson"))
}
# ----------------------------------------------------------
# Step 3: Decide on a clustering method
# ----------------------------------------------------------
# clustering function
clus_wd2 <- function(x) {
hclust(x, method = "ward.D2")
}
# ----------------------------------------------------------
# Step 4: Plot a microarray heatmap
# ----------------------------------------------------------
# it is customary to use a red (up-regulated), black (neutral),
# green (down-regulated) color scheme for expression microarrays
# create a red-black-green color palette
redblackgreen <- colorRampPalette(c("green", "black", "red"))(n = 100)
# generate genotype class labels
# "NEG" is light grey, "BCR/ABL" is dark grey
class_labels <- ifelse(all_var$mol.biol == "NEG", "grey80", "grey20")
# plot microarray data using "heatmap.2" from "gplots" library
heatmap.2(exprs(all_var),
# clustering
distfun = dist_cor,
hclust = clus_wd2,
# scaling (genes are in rows)
scale = "row",
# color
col = redblackgreen,
# labels
labRow = "",
ColSideColors = class_labels,
# tweaking
trace = "none",
density.info = "none")
# ----------------------------------------------------------
# Step 5: A "better" way of selecting genes
# ----------------------------------------------------------
# non-specific filtering
# the shortest interval containing half of the data
# reasonable estimate of the "peak" of the distribution
sh <- shorth(all_sd)
all_sh <- all[all_sd >= sh, ]
dim(all_sh)
# row-wise t-tests using "rowttests" from "genefilter" package
tt <- rowttests(all_sh, all_sh$mol.biol)
# adjust p-values for multiple testing
# using "Benjamini-Hochberg" method
tt$p.adj <- p.adjust(tt$p.value, method = "BH")
all_sig <- all_sh[tt$p.adj <= 0.05, ]
# how many genes are we left with
dim(all_sig)
# plot heatmap for differentially expressed genes
heatmap.2(exprs(all_sig),
# clustering
distfun = dist_cor,
hclust = clus_wd2,
# scaling (genes are in rows)
scale = "row",
# color
col = redblackgreen,
# labels
labRow = "",
ColSideColors = class_labels,
# tweaking
trace = "none",
density.info = "none")
# ----------------------------------------------------------
# Step 6: Have mercy with the color-challenged
# ----------------------------------------------------------
# blue to black to yellow color map
yellowblackblue <- colorRampPalette(c("dodgerblue", "black", "gold"))(n = 100)
heatmap.2(exprs(all_sig),
# clustering
distfun = dist_cor,
hclust = clus_wd2,
# scaling (genes are in rows)
scale = "row",
# color
col = yellowblackblue,
# labels
labRow = "",
ColSideColors = class_labels,
# tweaking
trace = "none",
density.info = "none")
|
e6937b64186a353cec49288be27345fc5d08f692
|
88e52896e2de3ddfbc836658ba47146daa50f7b5
|
/man/me_covariance.Rd
|
a3fd4512b74cee5fa2b3eb9d708ce3a13a7e7a11
|
[
"MIT"
] |
permissive
|
sociometricresearch/cosme
|
bb88499802c0dee0c238b0d558280998465137bb
|
b231c5350eff940660e8924e12d6b8458444ae0c
|
refs/heads/master
| 2023-04-10T17:45:00.159025
| 2021-11-19T12:49:01
| 2021-11-19T12:49:01
| 198,222,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,381
|
rd
|
me_covariance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/me_correlate_covariance.R
\name{me_covariance}
\alias{me_covariance}
\title{Calculate a covariance matrix with an adjusted diagonal}
\usage{
me_covariance(
.data,
diag_adj = 1,
wt = NULL,
use = "complete.obs",
method = "pearson"
)
}
\arguments{
\item{.data}{a matrix or data frame with numeric columns.}
\item{diag_adj}{a numeric vector with length equal to the number of columns
of \code{x} to be multiplied by the diagonal. Alternatively, it can be of
length 1 which will be repeated through the whole diagonal. If the
argument \code{wt} is used, then the length of \code{diag_adj} must be the
same as \code{x} excluding the weight column. By default it multiplies by
1, giving the same diagonal.}
\item{wt}{the name of the column which contains the weights as bare unquoted
names or as character vector or length 1. Note that when the weight
argument is specified, the estimation is done using
\code{\link[stats]{cov.wt}} instead of \code{\link[stats]{cor}} or
\code{\link[stats]{cov}}. This means that the arguments \code{use} and
\code{method} are ignored.}
\item{use}{an optional character string giving a method for computing
covariances in the presence of missing values. This must be (an
abbreviation of) one of the strings "everything", "all.obs",
"complete.obs", "na.or.complete", or "pairwise.complete.obs".}
\item{method}{a character string indicating which correlation coefficient (or
covariance) is to be computed. One of "pearson" (default), "kendall", or
"spearman": can be abbreviated.}
}
\value{
a covariance \code{tibble} with variable names as a column and
the diagonal multiplied by \code{diag_adj}
}
\description{
\code{me_covariance} calculates a covariance matrix through
\code{\link[stats]{cov}} and multiplies the diagonal with the supplied
numeric vector. It's a wrapper around \code{\link[stats]{cov}} with slight
tweaks.
}
\examples{
# New diagonal
new_diagonal <- rnorm(ncol(mtcars))
me_covariance(mtcars)
me_covariance(mtcars, new_diagonal)
me_covariance(mtcars, new_diagonal, method = "kendall")
diagonal_wout_weight <- rnorm(ncol(mtcars) - 1)
me_covariance(mtcars, diagonal_wout_weight, wt = mpg)
}
\seealso{
\code{\link[stats]{cov}} for the workhorse behind the function and
\code{\link[stats]{cov.wt}} for the function used for the weighting.
}
|
ba7d870cb1f9824fb2ee13ea3eae70414f2895d4
|
44d0ba82f86729e2f6966f107911d16c99bb6722
|
/.Rproj.user/F209A5CB/sources/per/t/CF0DED5F-contents
|
ecf7b86f356880d278dab72181ad443aa9b5cbbc
|
[] |
permissive
|
isglobal-brge/nlOmicAssoc
|
6f51e329f820dad39173e7632ef6d36c037d737f
|
8bd77d15c1ce426afb9594c824678632d3ab9816
|
refs/heads/master
| 2021-01-20T01:14:17.247921
| 2019-02-21T12:21:40
| 2019-02-21T12:21:40
| 89,244,014
| 0
| 1
|
MIT
| 2018-09-14T08:08:36
| 2017-04-24T13:34:13
|
R
|
UTF-8
|
R
| false
| false
| 3,518
|
CF0DED5F-contents
|
# =============================================================================
# USAGE EXAMPLE
# =============================================================================
# Preambles
# =============================================================================
rm(list = ls())
gc(reset = TRUE)
require(Biobase)
# Load expressions an espositions:
# -----------------------------------
load("objects/expo_INMA.RData")
load("objects/expset_INMA.RData")
expo_INMA[expo_INMA<0] <- 0
expoINMA.log <- data.frame(lapply(expo_INMA, function(x) log(x+0.0001)))
rownames(expoINMA.log) <- rownames(expo_INMA)
# =============================================================================
# EXAMPLE USAGE OF Fit() FUNCTION: Table of fits of first 100 probes:
# =============================================================================
source("Fit.R")
# Fit() attributes:
# _________________
# N = position (in exprs(expset))[N,] of probes which are going to be analysed
# select = global signifiance level for the MFP algorithm to be adjuted due to cor(expo)
# select_adj = manual adjusted select
# cores = number of cores to be used (if > 1, parallel processes will be executed)
EXAMPLE <- Fit(expset = expset_INMA, expo = expo_INMA, N = sample(1:nrow(exprs(expset_INMA)), 100), select = 0.05, cores = 20)
EXAMPLE <- Fit(expset = expset_INMA, expo = expoINMA.log, N = sample(1:nrow(exprs(expset_INMA)), 100), df = 2, select = 0.05, cores = 20)
print(EXAMPLE[[1]]) # Table ordered by LRT sig.
print(EXAMPLE[[2]][1:10]) # List of associated expositions to the first 10 probes
# =============================================================================
# EXAMPLE USAGE OF SingleFit() FUNCTION: Fit 1 probe
# =============================================================================
source("SingleFit.R")
# SingleFit() attributes:
# _________________
# probe = name of position of probe (in exprs(expset)[probe,]) which is going to be analysed
# select = global signifiance level for the MFP algorithm to be adjuted due to cor(expo)
# select_adj = manual adjusted select (select attribute will be ignored)
example <- SingleFit(expset = expset_INMA, expo = expo_INMA, probe = "TC09001306.hg.1", df = 2, select = 0.05) # , select_adj = 0.05)
# sig. probes = 3, TC01000083.hg.1, TC04000946.hg.1, TC12001765.hg.1, TC08001774.hg.1
example <- SingleFit(expset = expset_INMA, expo = expoINMA.log, probe = "TC03002103.hg.1", df = 2, select = 0.05)
# summary.mfp.SingleFit() attributes:
# _________________
# mod = an mfp.SingleFit object
summary(example)
# plot.mfp.SingleFit() attributes:
# _________________
# mod = an mfp.SingleFit object
# realpoints = logical indicator if real points will be added to the plot of the marginal effects
# seed = seed to be fixed for the colors of the plot
pdf(paste0("plots/plot_", example$probe_name, ".pdf"), onefile = FALSE)
plot(example, realpoints = TRUE, seed = 2)
dev.off()
ff <- function(x, ...) {
y <- names(x)
example <- SingleFit(expset = expset_INMA, expo = expoINMA.log,
probe = y, select = 0.05)
plot(example, realpoints = TRUE, seed = 2, ...)
points(expoINMA.log[,grep(x, names(expoINMA.log))], exprs(expset_INMA)[y,], col="gray60", cex=0.7)
lines(lowess(expoINMA.log[,grep(x, names(expoINMA.log))], exprs(expset_INMA)[y,]), col="lightblue")
abline(reg=lm(expoINMA.log[,grep(x, names(expoINMA.log))] ~ exprs(expset_INMA)[y,]), col="red")
dev.off()
}
ff(EXAMPLE[[2]][3], xlim=c(0,7), ylim=c(0,2))
|
|
be3262c4aca014ec0071f8e430b322656d800623
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HyperbolicDist/examples/safeIntegrate.Rd.R
|
7125fc1a3d06dd605abc392be7c560dad84f5c37
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
safeIntegrate.Rd.R
|
library(HyperbolicDist)
### Name: safeIntegrate
### Title: Safe Integration of One-Dimensional Functions
### Aliases: safeIntegrate print.integrate
### Keywords: math utilities
### ** Examples
integrate(dnorm, -1.96, 1.96)
safeIntegrate(dnorm, -1.96, 1.96) # Same as for integrate()
integrate(dnorm, -Inf, Inf)
safeIntegrate(dnorm, -Inf, Inf) # Same as for integrate()
integrate(dnorm, 1.96, 1.96) # OK here but can give an error
safeIntegrate(dnorm, 1.96, 1.96)
integrate(dnorm, -Inf, -Inf)
safeIntegrate(dnorm, -Inf, -Inf) # Avoids nonsense answer
integrate(dnorm, Inf, Inf)
safeIntegrate(dnorm, Inf, Inf) # Avoids nonsense answer
|
8436beb70c432c118bb4e0f3f28f512bfb3dbfb2
|
316516337da2ca6d86b7da32e14149728177f1e4
|
/man/insample_sim.Rd
|
97a9e414d682c64f211cd05681fd0932e911cbc7
|
[] |
no_license
|
swihart/wfpca
|
7f208bb895dfb9b9dfd57723fac7fb92031135e3
|
4814cdf4648a9d9631df1e705858512f2e84d143
|
refs/heads/master
| 2020-05-19T08:11:38.442105
| 2015-07-08T16:21:45
| 2015-07-08T16:21:45
| 23,965,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
rd
|
insample_sim.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/insample_sim.R
\name{insample_sim}
\alias{insample_sim}
\title{Run a simulation and then perform prediction. Based on readMe_sim_prediction(), which was never used..}
\usage{
insample_sim(sim_seed = 101, sample_size = 1000, sim_slope = 100,
sim_intercept = 12, sim_ses_coef = 0.01, sim_age_coef = 0.01)
}
\arguments{
\item{sim_seed}{passed to set.seed()}
\item{sample_size}{(defaults to 1000) and must be a multiple of 100}
\item{sim_slope}{see slope in calculate_ses()}
\item{sim_intercept}{see intercept in calculate_ses()}
\item{sim_ses_coef}{see ses_coef in apply_censoring()}
\item{sim_age_coef}{see age_coef in apply_censoring()}
}
\value{
results a data frame with rmse for each approach for that simulated dataset
}
\description{
This function comes after many months of running readme_sim and talks with Bryan Lau on
what we need to demonstrate for the method.
}
\examples{
---
}
|
68338b31a477643fd4a96d7695919d10722d8f44
|
156ac98410018f52bda6ec75f04d0382cd715b5e
|
/data-raw/powerset.R
|
6d38326f8b46597c15e7eed5de8177ab11c2b11b
|
[
"MIT"
] |
permissive
|
muriteams/similR
|
c8edc227692e166fc6662da534eeabd071579cd3
|
3de97d8fadf8be4cbbeb631524b886011e460715
|
refs/heads/master
| 2022-09-07T16:27:58.295810
| 2022-08-16T17:14:40
| 2022-08-16T17:14:40
| 157,758,962
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 111
|
r
|
powerset.R
|
library(lergm)
powerset03 <- powerset(3)
powerset04 <- powerset(4)
usethis::use_data(powerset03, powerset04)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.