blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
733a348c8fcf2de5808c4812055682ee549db21a | a7f0b0a405fc7f1d4c9e4ebb754529f3877a20dd | /RcodeData/dnaRAD.r | 744a833608661ed9a662a98ea4d700c1434e2ce3 | [] | no_license | tmuntianu/advancedstatistics | 4298c0d0f3a52675d85d67f3aac3b7b11ab5cdc1 | 3885b67da560702df4b6d05b240ada6fde2f727c | refs/heads/master | 2020-12-22T19:03:58.858657 | 2020-01-29T04:08:34 | 2020-01-29T04:08:34 | 236,890,079 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,456 | r | dnaRAD.r | dnaRAD <-
function()
{
da=read.csv("./advancedstatistics/RcodeData/dnaRAD.csv",stringsAsFactors=F)
Surv=da[,2];log10Surv=log10(Surv)
rad=da[,1];n=length(rad)
par(mfrow=c(1,1),mar=c(4,4,1,1))
plot(rad,log10Surv,ylim=c(1,4),axes=F,xlab="",ylab="")
mtext(side=1,"Radiation given, Gy",cex=1.5,line=2.75)
mtext(side=2,"Survival, hours",cex=1.5,line=2.75)
axis(side=1,at=seq(from=0,to=24,by=2))
axis(side=2,at=1:4,labels=c("1","10","100","1000"),srt=90)
#return()
rT=1:23;ntr=length(rT)
ssMIN=10^10
for(i in 1:ntr)
{
d=rep(0,n);d[rad>rT[i]]=1
x1=-rad;x2=-pmax(rad-rT[i],0)
o=lm(log10Surv~x1+x2)
SS=sum(o$residuals^2)
if(SS<ssMIN)
{
oOPT=o
trOPT=rT[i]
ssMIN=SS
}
}
a=coef(oOPT)
print(summary(oOPT))
print(trOPT)
x=seq(from=0,to=24,length=200);nx=length(x)
#y=a[1]-a[2]*x-a[3]*pmax(x-trOPT,0)
b=c(a,trOPT)
RSS.MIN=10^10 # our own iterations
for(it in 1:10)
{
x2=pmax(rad-b[4],0)
f=b[1]-b[2]*rad-b[3]*x2
J=cbind(rep(1,n),-rad,x2,b[3]*(rad>b[4]))
de=t(J)%*%(log10Surv-f)
iJJ=solve(t(J)%*%J)
b.new=b+iJJ%*%de
x2=pmax(rad-b.new[4],0)
f=b.new[1]-b.new[2]*rad-b.new[3]*x2
RSS=sum((log10Surv-f)^2)
if(RSS>RSS.MIN) break
b=b.new
RSS.MIN=RSS
}
s=sqrt(RSS.MIN/(n-4))
seb=s*sqrt(diag(iJJ))
out=as.data.frame(cbind(b,seb,2*(1-pnorm(abs(b/seb)))))
names(out)=c("beta-est","SE","P-value");row.names(out)=c("b1","b2","b3","b4")
print(out)
y=b[1]-b[2]*x-b[3]*pmax(x-b[4],0)
lines(x,y,lwd=2)
}
|
9e27bab8cba4db99ab419a74608d842a7a262ffa | 4672564b592e6ae952c2c2c4e39ef8d86e2555a8 | /R/unit_names.R | 79c364b3078fff36ea9463f78dece11de5423fd1 | [] | no_license | kidusasfaw/spatPomp | f590d477f95aa20e3e7b44b15094c2f34396c689 | 5d93429fe300e84fea1dd83c99bcb44186cb5fb9 | refs/heads/master | 2023-08-17T07:17:12.316329 | 2023-07-28T17:58:45 | 2023-07-28T17:58:45 | 231,446,313 | 9 | 13 | null | 2023-09-11T18:09:59 | 2020-01-02T19:29:33 | R | UTF-8 | R | false | false | 805 | r | unit_names.R | ##' Unit names of a spatiotemporal model
##'
##' \code{unit_names} outputs the contents of the \code{unit_names} slot
##' of a \code{spatPomp} object. The order in which the units
##' appear in the output vector determines the order in which latent
##' states and observations for the spatial units are stored.
##'
##' @name unit_names
##' @rdname unit_names
##' @include spatPomp_class.R
##' @return A character vector with the unit names used to create the \sQuote{spatPomp} object.
NULL
setGeneric("unit_names", function(x)standardGeneric("unit_names"))
##' @name unit_names-spatPomp
##' @rdname unit_names
##' @aliases unit_names,spatPomp-method
##' @param x a \code{spatPomp} object
##' @export
setMethod(
"unit_names",
signature=signature(x="spatPomp"),
definition=function(x) x@unit_names
)
|
8ea5d17181b26394089e30ebca59a0f208bc7b78 | 7667c5c3f9d34396c9a8176aee964db74107246e | /R/split_query.R | da4448d30c61eba86001054e7f754587c78d119f | [
"Apache-2.0"
] | permissive | ianmcook/queryparser | f2dbfe42ac3cb7d198f88d5470193a140d4ca73f | ab74cc99d9df657607d28cc31104cda0618e130e | refs/heads/master | 2023-02-01T20:49:41.655631 | 2023-01-09T21:38:23 | 2023-01-09T21:38:23 | 202,947,077 | 56 | 8 | Apache-2.0 | 2020-03-29T17:27:45 | 2019-08-18T01:29:26 | R | UTF-8 | R | false | false | 9,474 | r | split_query.R | # Copyright 2023 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Split a SQL query
#'
#' @description Splits a SQL \code{SELECT} statement into clauses, and splits
#' comma-separated column lists within the clauses.
#'
#' @param query a character string containing a SQL \code{SELECT} statement
#' @param tidyverse for queryparser internal use only
#' @return A list object with named elements representing the clauses of the
#' query
#' @examples
#' my_query <- "SELECT origin, dest,
#' COUNT(flight) AS num_flts,
#' round(AVG(distance)) AS dist,
#' round(AVG(arr_delay)) AS avg_delay
#' FROM flights
#' WHERE distance BETWEEN 200 AND 300
#' AND air_time IS NOT NULL
#' GROUP BY origin, dest
#' HAVING num_flts > 3000
#' ORDER BY num_flts DESC, avg_delay DESC
#' LIMIT 100;"
#'
#' split_query(my_query)
#' @seealso \code{\link{parse_query}}
#' @export
split_query <- function(query, tidyverse) {
if (!identical(typeof(query), "character") || !identical(length(query), 1L)) {
stop("Unexpected input to split_query()", call. = FALSE)
}
original_encoding <- Encoding(query)
if (original_encoding == "unknown") {
original_encoding <- "UTF-8"
}
query <- trimws(query)
query <- squish_sql(query)
query <- sub(";$", "", query)
rc <- rawConnection(raw(0L), "r+")
on.exit(close(rc))
writeChar(query, rc)
len <- seek(rc, 0L) - 1L
if (!clause_starts_here(rc, "select")) {
stop("Query must begin with the SELECT keyword", call. = FALSE)
}
seek(rc, 7L)
select_distinct <- FALSE
if (clause_starts_here(rc, "all")) {
seek(rc, 10L)
} else if (clause_starts_here(rc, "distinct")) {
select_distinct <- TRUE
seek(rc, 15L)
} else {
seek(rc, 6L)
}
pos_from <- NULL
pos_where <- NULL
pos_group_by <- NULL
pos_having <- NULL
pos_order_by <- NULL
pos_limit <- NULL
in_quotes <- FALSE
in_parens <- 0
escaped <- FALSE
while((pos <- seek(rc, NA)) <= len) {
# identify when inside strings and parentheses
char <- readChar(rc, 1L, useBytes = TRUE)
if (char %in% quote_chars) {
if (!in_quotes) {
in_quotes <- TRUE
escaped <- FALSE
quo_char <- char
} else if (char == quo_char) {
if (escaped) {
escaped <- FALSE
} else {
esc_quo <- c(quo_char, "\\")
if (!readChar(rc, 1L, useBytes = TRUE) %in% esc_quo) {
in_quotes <- FALSE
escaped <- FALSE
rm(quo_char)
} else {
escaped <- TRUE
}
seek(rc, -1L, "current")
}
}
in_word <- FALSE
} else if (!in_quotes && char == "(") {
escaped <- FALSE
in_parens <- in_parens + 1
in_word <- FALSE
} else if (!in_quotes && char == ")") {
escaped <- FALSE
in_parens <- in_parens - 1
in_word <- FALSE
} else if (is_word_character(char, useBytes = TRUE)) {
escaped <- FALSE
in_word <- TRUE
} else {
escaped <- FALSE
in_word <- FALSE
}
if (!in_quotes && !in_word) {
# identify unsupported syntax
if (clause_starts_here(rc, "over")) {
stop("OVER clauses are not supported", call. = FALSE)
}
if (clause_starts_here(rc, "select")) {
if (in_parens > 0) {
stop("Subqueries are not supported", call. = FALSE)
} else {
stop("The SELECT keyword is used two or more times", call. = FALSE)
}
}
}
if (!in_quotes && in_parens <= 0 && !in_word) {
# identify unsupported syntax
if (clause_starts_here(rc, "union")) {
stop("The UNION operator is not supported", call. = FALSE)
}
if (clause_starts_here(rc, "intersect")) {
stop("The INTERSECT operator is not supported", call. = FALSE)
}
if (clause_starts_here(rc, "except")) {
stop("The EXCEPT operator is not supported", call. = FALSE)
}
# identify beginnings of clauses
if (clause_starts_here(rc, "from")) {
# don't split on the "from" is "is [not] distinct from"
if (!preceded_by_keyword(rc, "distinct", useBytes = TRUE)) {
pos_from <- append(pos_from, pos + 1L)
}
} else if (clause_starts_here(rc, "where")) {
pos_where <- append(pos_where, pos + 1L)
} else if (clause_starts_here(rc, "group by")) {
pos_group_by <- append(pos_group_by, pos + 1L)
} else if (clause_starts_here(rc, "having")) {
pos_having <- append(pos_having, pos + 1L)
} else if (clause_starts_here(rc, "order by")) {
pos_order_by <- append(pos_order_by, pos + 1L)
} else if (clause_starts_here(rc, "limit")) {
pos_limit <- append(pos_limit, pos + 1L)
}
}
seek(rc, pos + 1)
}
if (in_quotes) {
stop("Query contains unmatched quotation marks", call. = FALSE)
}
if (in_parens > 0) {
stop("Query contains unmatched parentheses", call. = FALSE)
}
start_pos <- list(
"select" = 0,
"from" = pos_from,
"where" = pos_where,
"group_by" = pos_group_by,
"having" = pos_having,
"order_by" = pos_order_by,
"limit" = pos_limit
)
if (any(lapply(start_pos, length) > 1)) {
stop("One or more clauses is used two or more times", call. = FALSE)
}
start_pos <- unlist(start_pos) + 1
if (any(diff(start_pos) < 0)) {
stop("Clauses are in an incorrect order", call. = FALSE)
}
stop_pos <- c(start_pos[-1] - 1, len)
names(stop_pos) <- names(start_pos)
clauses <- mapply(
function(x, y) list(start = x, stop = y),
start_pos,
stop_pos,
SIMPLIFY = FALSE
)
Encoding(query) <- "bytes"
clauses <- lapply(
clauses, function(x) {
substr(query, x$start, x$stop)
}
)
clauses$select <- split_select(clauses$select)
clauses$from <- split_from(clauses$from)
clauses$where <- split_where(clauses$where)
clauses$group_by <- split_group_by(clauses$group_by)
clauses$having <- split_having(clauses$having)
clauses$order_by <- split_order_by(clauses$order_by)
clauses$limit <- split_limit(clauses$limit)
clauses <- lapply(clauses, function(clause) {Encoding(clause) <- original_encoding; clause})
if (select_distinct) {
attr(clauses$select, "distinct") <- TRUE
}
clauses
}
clause_starts_here <- function(rc, keyword) {
keyword_starts_here(rc, keyword, useBytes = TRUE, look_back = FALSE)
}
split_select <- function(clause) {
split_comma_list(split_clause(clause, "select( all)?( distinct)?"))
}
split_from <- function(clause) {
split_clause(clause, "from")
}
split_where <- function(clause) {
split_clause(clause, "where")
}
split_group_by <- function(clause) {
split_comma_list(split_clause(clause, "group by"))
}
split_having <- function(clause) {
split_clause(clause, "having")
}
split_order_by <- function(clause) {
split_comma_list(split_clause(clause, "order by"))
}
split_limit <- function(clause) {
split_clause(clause, "limit")
}
split_clause <- function(clause, keyword) {
if (is.null(clause)) return(NULL)
clause <- trimws(clause)
keyword_regex <- paste0("^", keyword, ws_regex, "*")
clause <- sub(keyword_regex, "", clause, ignore.case = TRUE, useBytes = TRUE)
clause
}
split_comma_list <- function(comma_list) {
if (is.null(comma_list)) return(NULL)
rc <- rawConnection(raw(0L), "r+")
on.exit(close(rc))
writeChar(comma_list, rc)
len <- seek(rc, 0L) - 1L
pos_comma <- NULL
in_quotes <- FALSE
in_parens <- 0
escaped <- FALSE
while((pos <- seek(rc, NA)) <= len) {
char <- readChar(rc, 1L, useBytes = TRUE)
if (char %in% quote_chars) {
if (!in_quotes) {
in_quotes <- TRUE
escaped <- FALSE
quo_char <- char
} else if (char == quo_char) {
if (escaped) {
escaped <- FALSE
} else {
esc_quo <- c(quo_char, "\\")
if (!readChar(rc, 1L, useBytes = TRUE) %in% esc_quo) {
in_quotes <- FALSE
escaped <- FALSE
rm(quo_char)
} else {
escaped <- TRUE
}
seek(rc, -1L, "current")
}
} else {
escaped <- FALSE
}
} else if (!in_quotes && char == "(") {
escaped <- FALSE
in_parens <- in_parens + 1
} else if (!in_quotes && char == ")") {
escaped <- FALSE
in_parens <- in_parens - 1
} else if (!in_quotes && in_parens <= 0) {
escaped <- FALSE
if (char == ",") {
pos_comma <- append(pos_comma, pos)
}
} else {
escaped <- FALSE
}
}
pos_comma <- pos_comma + 1
if (is.null(pos_comma)) {
trimws(comma_list)
} else {
original_encoding <- Encoding(comma_list)
if (original_encoding == "unknown") {
original_encoding <- "UTF-8"
}
Encoding(comma_list) <- "bytes"
out <- trimws(
substring(comma_list, c(1, pos_comma + 1), c(pos_comma - 1, len))
)
Encoding(out) <- original_encoding
out
}
}
|
adeebd3aaefdabb6915b040e7030d35025faa5ce | edac0241b52bdfb0729044ee8222573f3fcc631d | /R_scripts/log2_transform.R | 2c699414225f3948e67dd62e8b011afc8c933863 | [] | no_license | thaddad91/Intern_cluster-specific-SNPs | 913163655fb4a79b218c1175d76a0f2887aa0412 | de301f16d3d72d8e03c5a032b8bde5cbbe6f0ef3 | refs/heads/master | 2023-07-23T09:38:11.943666 | 2021-09-06T18:36:22 | 2021-09-06T18:36:22 | 341,704,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,989 | r | log2_transform.R | # Quick script for fixing the csSNP plots
installdir <- "/data/haddadt/NL_SNP"
setwd(dir = installdir)
snp_dir <- paste(installdir, "/curated_SNPs/", sep = "")
library("ggplot2") # plotting
library("numbers") # fibonacci nrs for bins
# Read frequencies from files
cluster_freq <-
read.csv(paste(snp_dir, "cluster_frequencies.csv", sep = ""), sep = ' ')
cluster_freq <-
cluster_freq[!cluster_freq$Frequency == 0, ] # Drop zero-levels
size_freq <-
read.csv(paste(snp_dir, "frequency_of_cluster_sizes.csv", sep = ""),
sep = ' ')
size_freq <-
size_freq[!size_freq$Unique_SNP_size == 0, ] # Drop zero-levels
###################################
# Comparison of cluster size #
# relative to SNP group size #
###################################
con_wgsid <- dbConnect(RMySQL::MySQL(), group = "wgsid")
wgsid.data <- dbReadTable(conn = con_wgsid, name = 'wgsid')
dbDisconnect(con_wgsid)
#Cluster sizes
c_sizes <- as.data.frame(table(wgsid.data$wgsid))
colnames(c_sizes)[2] <- "cluster_size"
colnames(c_sizes)[1] <- "WGSID"
c_sizes <- inner_join(c_sizes, cluster_freq, by = c("WGSID"))
colnames(c_sizes)[3] <- "specific_SNPs"
# let's plot
cx_nr <- log(as.numeric(c_sizes$cluster_size), 2)
bin_cx_nr <- as.data.frame(table(cut(cx_nr, c(0, 1, 2, 3, 4, 5, 6))))
cy_nr <- log(as.numeric(c_sizes$specific_SNPs), 2)
plot(cx_nr,
cy_nr,
xlab = "Log2 cluster size",
ylab = "Log2 number of cluster-specific SNPs",
main = "Comparison of WGSID cluster size to number of cluster-specific SNPs")
xc_labels <- c("0-1", "1-2", "2-3", "3-4", "4-5", "5-6", "6-7")
freq2c <- ggplot(data = bin_cx_nr, aes(x = Var1, y = Freq)) +
geom_bar(stat = "identity", fill = "steelblue") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90)) +
labs(title = "Comparison of cluster size to number of csSNPs",
x = 'Log2 cluster size', y = 'Log2 number of cluster-specific SNPs') +
scale_x_discrete(labels = xc_labels)
freq2c
###################################
# Plot frequencies of cluster- #
# specific SNPs #
###################################
# Frequencies of nr of csSNP found
y_nr <- as.numeric(cluster_freq$Frequency)
# Dividing the ranges into bins for clarity, fibonacci ranges
range_nrs <- unique(fibonacci(16, sequence = TRUE))
bin_counts <- as.data.frame(table(cut(y_nr, range_nrs)))
bin_counts$Freq <- log(bin_counts$Freq, 2)
xlabels <-
list(
'1-2',
'2-3',
'3-5',
'5-8',
'8-13',
'13-21',
'21-34',
'34-55',
'55-89',
'89-144',
'144-233',
'233-377',
'377-610',
'610-987'
)
freq2 <- ggplot(data = bin_counts, aes(x = Var1, y = Freq)) +
geom_bar(stat = "identity", fill = "steelblue") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90)) +
labs(title = "Frequencies of the number of csSNP's found per cluster",
x = 'binned number of csSNPs found per cluster', y = 'Log2 frequencies') +
scale_x_discrete(labels = xlabels)
freq2 |
4c272dbf4a8a4e596d1dbcc7e8866d21f63e1116 | 5de89e1e5d4ee4a69d6b6671136fa35bb9b9c34e | /man/print.summary.aggr.Rd | caba66f2c19e25317dfe917f71cbfafdb363653b | [] | no_license | MichaelChirico/VIM | 7ade6599a1eee573705d43cbbfd2930c844fdd5f | 9c71155296f80d4642c6f7d66e69aa815de8643c | refs/heads/master | 2021-10-13T01:18:55.433423 | 2019-08-21T14:17:53 | 2019-08-21T14:17:53 | 211,123,980 | 0 | 0 | null | 2019-09-26T15:37:25 | 2019-09-26T15:37:24 | null | UTF-8 | R | false | true | 650 | rd | print.summary.aggr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggr.R
\name{print.summary.aggr}
\alias{print.summary.aggr}
\title{Print method for objects of class summary.aggr}
\usage{
\method{print}{summary.aggr}(x, ...)
}
\arguments{
\item{x}{an object of class \code{"summary.aggr"}.}
\item{\dots}{Further arguments, currently ignored.}
}
\description{
Print method for objects of class \code{"summary.aggr"}.
}
\examples{
data(sleep, package = "VIM")
s <- summary(aggr(sleep, plot=FALSE))
s
}
\seealso{
\code{\link{summary.aggr}}, \code{\link{aggr}}
}
\author{
Andreas Alfons, modifications by Bernd Prantner
}
\keyword{print}
|
75cfac0c49ca7ce41c741d5a605e0b918a64b47d | 0c736031951055b72ce0472432bac227ad5a7a85 | /modules/models/containers/train.R | 8d019a561ba1c7c03b1d64a8f4f34b6cf62da29d | [] | no_license | JulioMh/TFG | b9a3a8b1d9e18cf37dfcac6e7bdd8796469b1ed7 | a11e1f5c1ad5353c0125f90dd183e29ddd8481f4 | refs/heads/master | 2022-11-25T16:59:49.743989 | 2020-08-01T12:45:17 | 2020-08-01T12:45:17 | 262,852,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,917 | r | train.R | trainUI <- function (id) {
ns <- NS(id)
setUpTrainUI(ns("set_up"))
}
trainServer <- function (input, output, session) {
values <- reactiveValues()
settings <- callModule(setUpTrain, "set_up")
state <-
reactiveValues(
processing_data = FALSE,
training_model = FALSE,
saving_model = FALSE,
done = FALSE
)
observeEvent(state$processing_data, {
if (state$processing_data) {
sendSweetAlert(
session = session,
title = "Preparando datos...",
closeOnClickOutside = FALSE,
type = "info",
btn_labels = NA,
showCloseButton = FALSE
)
tryCatch({
state$processedData <-
prepareDataForTraining(
dataset = settings$dataset(),
preds = settings$preds(),
preds_to_center = settings$to_center(),
do_pred_na = settings$pred_missing_fields(),
target = settings$target()
)
state$training_model <- TRUE
},
error = function(cond) {
sendSweetAlert(
session = session,
title = "Hay un problema con los datos...",
text = cond,
type = "error"
)
},
finally = {
state$processing_data <- FALSE
})
}
})
observeEvent(state$training_model, {
if (state$training_model) {
tryCatch({
sendSweetAlert(
session = session,
title = "Entrenando...",
text = "Espere un momento, estamos trabajando en ello",
closeOnClickOutside = FALSE,
type = "info",
btn_labels = NA,
showCloseButton = FALSE
)
models <-
doTrain(settings$methods(),
state$processedData$trainData,
settings$target())
models$impute_model <- state$processedData$impute_model
models$dummy_model <- state$processedData$dummy_model
models$center_model <- state$processedData$center_model
state$models <- models
state$saving_model <- TRUE
},
error = function(cond) {
print(cond)
sendSweetAlert(
session = session,
title = "No se han podido entrenar los modelos...",
text = "Prueba utilizando otro algoritmo de entrenamiento",
type = "error"
)
},
finally = state$training_model <- FALSE)
}
})
observeEvent(state$saving_model, {
if (state$saving_model) {
tryCatch({
sendSweetAlert(
session = session,
title = "Almacenando en la base de datos...",
closeOnClickOutside = FALSE,
type = "info",
btn_labels = NA,
showCloseButton = FALSE
)
trainRowNumbers <-
paste(as.vector(state$processedData$index), collapse = ", ")
values$id <- saveModel(
state$models,
settings$target(),
settings$name(),
settings$description(),
session$userData$user$id,
settings$dataset_id(),
trainRowNumbers,
settings$preds(),
settings$isPublic()
)
sendSweetAlert(
session = session,
title = "Listo!!",
text = "Modelo entrenado y guardado",
type = "success"
)
state$done <- TRUE
},
error = function(cond) {
sendSweetAlert(
session = session,
title = "No se han podido guardar los mdelos...",
text = cond,
type = "error"
)
},
finally = {
state$saving_model <- FALSE
state$models <- NULL
state$datsets <- NULL
})
}
})
observeEvent(settings$confirm(), {
if (isTRUE(settings$confirm())) {
state$processing_data <- TRUE
}
})
return(reactive(values$id))
}
|
dd485a1101ece1b02dde92dd01c23ca7f40de9c8 | d746fef241f9a0e06ae48cc3b1fe72693c43d808 | /tesseract/rotate/d7ps3c-014.r | f5495f4c6e96b5635ffd08b2b5809d8b3a3f41f0 | [
"MIT"
] | permissive | ucd-library/wine-price-extraction | 5abed5054a6e7704dcb401d728c1be2f53e05d78 | c346e48b5cda8377335b66e4a1f57c013aa06f1f | refs/heads/master | 2021-07-06T18:24:48.311848 | 2020-10-07T01:58:32 | 2020-10-07T01:58:32 | 144,317,559 | 5 | 0 | null | 2019-10-11T18:34:32 | 2018-08-10T18:00:02 | JavaScript | UTF-8 | R | false | false | 195 | r | d7ps3c-014.r | r=0.65
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7ps3c/media/images/d7ps3c-014/svc:tesseract/full/full/0.65/default.jpg Accept:application/hocr+xml
|
285398f8a3889f1515525141837ad0ce36dc6084 | 66cf4adc85683e73290da2139c071365b40b6209 | /man/plot_ForestPlot.Rd | 5c1392f8ffda23b9b5fd980a3c889f49be402cbc | [
"MIT"
] | permissive | dpelegri/EASIER | db51d623eae85300b4b63f30f97ac93c9676a004 | ce9101b990874c13f6d8564867fdb3cbdc5a7841 | refs/heads/main | 2023-08-21T21:33:43.968027 | 2021-10-22T15:28:07 | 2021-10-22T15:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,049 | rd | plot_ForestPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_forestplot.R
\name{plot_ForestPlot}
\alias{plot_ForestPlot}
\title{Forest plot}
\usage{
plot_ForestPlot(
datas,
files_meta,
islowCpg,
gwana_dir,
metaname,
files,
outputgwama,
nsignificatives = 30
)
}
\arguments{
\item{datas}{complete data obtained from meta-analysis after QC without annotations}
\item{files_meta}{string vector with files used in the meta-analysis}
\item{islowCpg}{string string indicating if the execution has been carried out with all the CpGs or with the filtered CpGs, possible values : 'Normal', 'lowcpgs'}
\item{gwana_dir}{string with gwama input data path}
\item{metaname}{string with meta-analysis name}
\item{files}{string vector with files used in QC because we need data from this files to perform ForestPlots}
\item{outputgwama}{string with gwama output path}
\item{nsignificatives}{number, lowest p-values to show in forestplot, by default, lowestp=30}
}
\value{
distribution plot
}
\description{
Forest plot
}
|
772751bdc467c29188163fec6618e4c4f2634015 | 602c144363277f2efc062d78bce139dd7fb75480 | /tests/testthat/test-read_yaml.R | 592840c3501adf306f4795e4451e8933af765add | [] | no_license | mbojan/mbtools | 637c6cfba11a63d7a052867d0afa211af00060ad | ed7680f0f9ae68ea12f6bca403f1b686f20e9687 | refs/heads/master | 2022-07-14T03:25:05.525650 | 2022-06-25T18:43:09 | 2022-06-25T18:43:09 | 29,505,211 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 265 | r | test-read_yaml.R | context("Testing read_yaml() and co")
test_that("it works", {
# Some YAML
y <- "num: 1
key0:
key1: value1
key2:
key22: value22
rcode_num: !r 1:5
rcode_ch: !r paste(LETTERS[1:10], collapse=', ')
date: !r Sys.Date()
"
expect_silent(
read_yaml(y)
)
})
|
300ef954eb0be8077b14bdf597139a7c148f3951 | b2a43f613dd34e70f87c6e757fa8b96fc905e940 | /RcppArmadillo/logisticReg.R | 1af756b4c86c4dda72c72dcd7bea1b73c4384449 | [] | no_license | BonnyRead/Rcpp_RcppArmadillo_tutorial | e98f7ac34b3439d152495d4a2a33762878283f67 | d26687f05804a5b4bf026194fe696f985721719f | refs/heads/master | 2020-04-15T17:17:10.661485 | 2019-01-09T04:39:21 | 2019-01-09T04:39:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,591 | r | logisticReg.R | # install installr
if (!"installr" %in% rownames(installed.packages()))
install.packages("installr")
library(installr)
# require2 will automatically install nonexistent packages
require2(Rcpp)
require2(RcppArmadillo)
require2(microbenchmark)
logisticFunc <- function(t) pmin(pmax(1/(1+exp(-t)), 10*.Machine$double.eps), 1 - 10*.Machine$double.eps)
getRandomDataFunc <- function(n, p) {
X <- matrix(rnorm(n*p), n)
trueBeta <- as.matrix(sample(-5:5, p+1L, TRUE)) / 10
y <- sapply(logisticFunc(cbind(1, X) %*% trueBeta), function(prob) rbinom(1, 1, prob))
return(list(X = cbind(1, X), y = y))
}
# RcppArmadillo
Rcpp::sourceCpp("armaLogisticRegFunc.cpp")
# R
rLogisticRegFunc <- function(X, y, epsilon = 1e-8, maxit = 25L) {
# get degree of freedom
dfResidual <- nrow(X) - ncol(X)
# initialize parameters
it <- 0L
converged <- TRUE
coefOld <- rep(0, ncol(X))
pHatOld <- y / 2 + 0.25
devOld <- Inf
# iterations to update coefficients
repeat {
# update coefficients
coefNew <- coefOld + solve(t(X) %*% diag(as.vector(pHatOld * (1-pHatOld))) %*% X, t(X) %*% (y - pHatOld))
# calculate new p hat
pHatNew <- logisticFunc(X %*% coefNew)
# calculate new deviance
devNew <- sum(-2*c(y*log(pHatNew), (1-y)*log((1-pHatNew))))
# check whether converged
if (abs(devNew - devOld) / (abs(devNew) + 0.1) < epsilon)
break
# check whether reaches maximum iteration
if (it >= maxit) {
warning("Exceed maximum iteration, it is not converged!")
converged <- FALSE
break
}
# update parameters for next iteration
it <- it + 1L
coefOld <- coefNew
pHatOld <- pHatNew
devOld <- devNew
}
# retrun
return(list(
coefficients = as.vector(coefNew),
se = sqrt(diag(solve(t(X) %*% diag(as.vector(pHatNew * (1-pHatNew))) %*% X))),
deviance = devNew,
dfResidual = dfResidual,
iter = it,
converged = converged
))
}
# check that the results of two functions are equal
set.seed(100)
with(getRandomDataFunc(100L, 20L),
all.equal(
coef(glm.fit(X, y, family = binomial())),
coef(rLogisticRegFunc(X, y)),
check.attributes = FALSE,
tolerance = 1e-5
)
)
with(getRandomDataFunc(100L, 20L),
all.equal(
glm.fit(X, y, family = binomial())$deviance,
rLogisticRegFunc(X, y)$deviance
)
)
with(getRandomDataFunc(100L, 20L),
all.equal(
summary(glm(y ~ 0 + X, family = binomial()))$coefficients[ , 2],
armaLogisticRegFunc(X, y)$se,
check.attributes = FALSE,
tolerance = 1e-4
)
)
# check armaLmFunc is equal to rLmFunc
with(getRandomDataFunc(100L, 20L), all.equal(armaLogisticRegFunc(X, y), rLogisticRegFunc(X, y), tolerance = 1e-4))
# benchmark performance
# n = 100 / p = 20
microbenchmark(
arma = with(getRandomDataFunc(100L, 20L), armaLogisticRegFunc(X, y)),
r = with(getRandomDataFunc(100L, 20L), rLogisticRegFunc(X, y)),
r2 = with(getRandomDataFunc(100L, 20L), glm.fit(X, y, family = binomial())),
times = 100L
)
# n = 10000 / p = 20
microbenchmark(
arma = with(getRandomDataFunc(10000L, 20L), armaLogisticRegFunc(X, y)),
r = with(getRandomDataFunc(10000L, 20L), rLogisticRegFunc(X, y)),
r2 = with(getRandomDataFunc(10000L, 20L), glm.fit(X, y, family = binomial())),
times = 20L
)
# n = 10000 / p = 200
microbenchmark(
arma = with(getRandomDataFunc(10000L, 200L), armaLogisticRegFunc(X, y)),
r = with(getRandomDataFunc(10000L, 200L), rLogisticRegFunc(X, y)),
r2 = with(getRandomDataFunc(10000L, 200L), glm.fit(X, y, family = binomial())),
times = 4L
)
|
26f1f456fbca5bc508a85f807239c7fc00ded108 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /seqest/R/gen_multi_data.R | c51eca3710544a250d80dddd5a2033efc34500c5 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,274 | r | gen_multi_data.R | #' @title Generate the training data and testing data for the categorical and
#' ordinal case.
#'
#' @description
#' \code{gen_multi_data} generate the data used for multiple-class
#' classification problems.
#'
#' @details
#' gen_multi_data creates training dataset and testing datasets. The beta0 is a
#' p * k matrix which p is the length of true coefficient and (k + 1) represents
#' the number of categories. The value of 'type' can be 'ord' or 'cat' . If it
#' equals to 'ord', it means the data has an ordinal relation among classes
#' ,which is common in applications (e.g., the label indicates the severity of a
#' disease or product preference). If it is 'cat', it represents there is no
#' such ordinal relations among classes. In addition, the response variable y
#' are then generated from a multinomial distribution with the explanatory
#' variables x generated from a multivariate normal distribution with mean
#' vector equal to 0 and the identity covariance matrix.
#' @param beta0 A numeric matrix that represent the true coefficient that used
#' to generate the synthesized data.
#' @param N A numeric number specifying the number of the synthesized data. It
#' should be a integer. Note that the value shouldn't be too small. We
#' recommend that the value be 10000.
#' @param type A character string that determines which type of data will be
#' generated, matching one of 'ord' or 'cat'.
#' @param test_ratio A numeric number specifying proportion of test sets in all
#' data. It should be a number between 0 and 1. Note that the value of the
#' test_ratio should not be too large, it is best if this value is equal to
#' 0.2-0.3.
#' @export
#' @return a list containing the following components
#' \item{train_id}{The id of the training samples}
#' \item{train}{the training datasets. Note that the id of the data in the train
#' dataset is the same as the train_id}
#' \item{test}{the testing datasets}
#'
#'
#' @references {
#' Li, J., Chen, Z., Wang, Z., & Chang, Y. I. (2020). Active learning in
#' multiple-class classification problems via individualized binary models.
#' \emph{Computational Statistics & Data Analysis}, 145, 106911.
#' doi:10.1016/j.csda.2020.106911
#' }
#'
#' @seealso{
#' \code{\link{gen_bin_data}} for binary classification case
#'
#' \code{\link{gen_GEE_data}} for generalized estimating equations case.
#'
#'}
#'
#' @examples
#'# For an example, see example(seq_ord_model)
gen_multi_data <- function(beta0, N, type, test_ratio) {
beta_mat <- beta0
p <- dim(beta_mat)[1]
nClass <- dim(beta_mat)[2]
X <- MASS::mvrnorm(N, rep(0, p-1), 1*diag(p-1))
X <- cbind(rep(1, N), X)
tmp <- X %*% beta_mat
if(type=="ord"){
tmp <- t(apply(tmp, 1, cumsum))
}
mProb <- apply(tmp, 1, function(z) {
vprob <- exp(z) / (1 + sum(exp(z)));
matrix(c(1 - sum(vprob), vprob), nrow = 1) })
Y_1hot <- apply(mProb, 2, function(vprob) rmultinom(1, 1, vprob))
Y <- apply(Y_1hot, 2, function(yi) which(yi == 1)) - 1
data_mat <- cbind(Y, X)
train_id <- sample(N, (1-test_ratio)*N)
train <- data_mat[train_id, ]
test <- data_mat[-train_id, ]
return(list(train_id = train_id, train = train, test = test))
}
|
497cb1cd0ad04120b7da15b2fae3ae11bde46ba7 | bd07cc2347bba2b3790a6ef772a72656a5be81a4 | /R/RcppExports.R | a8dfcad278df5cd67ace786dd3b6859ab3ad2951 | [] | no_license | certifiedwaif/blma | 48ee453ecd97784e09b015d37bb8a759856eb4d9 | 80f438fbad99fb02d542dc36e12327789cf7c91a | refs/heads/master | 2023-07-13T03:33:09.766851 | 2023-07-05T06:22:03 | 2023-07-05T06:22:03 | 114,207,717 | 4 | 4 | null | 2020-07-29T07:51:53 | 2017-12-14T05:45:13 | R | UTF-8 | R | false | false | 27,478 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' BIC prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
BIC <- function(n, p_gamma, R2) {
.Call('_blma_BIC', PACKAGE = 'blma', n, p_gamma, R2)
}
#' ZE prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
ZE <- function(n, p_gamma, R2) {
.Call('_blma_ZE', PACKAGE = 'blma', n, p_gamma, R2)
}
#' log_hyperg_2F1 prior
#'
#' @param b
#' @param c
#' @param x
#' @return
#' @export
log_hyperg_2F1 <- function(b, c, x) {
.Call('_blma_log_hyperg_2F1', PACKAGE = 'blma', b, c, x)
}
#' log_hyperg_2F1_naive
#'
#' @param b
#' @param c
#' @param x
#' @return The log of the Bayes Factor
#' @export
log_hyperg_2F1_naive <- function(b, c, x) {
.Call('_blma_log_hyperg_2F1_naive', PACKAGE = 'blma', b, c, x)
}
#' Liang's hyper g-prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g1 <- function(n, p_gamma, R2) {
.Call('_blma_liang_g1', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Liang's g prior
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g2 <- function(n, p_gamma, R2) {
.Call('_blma_liang_g2', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Liang's g/n prior Appell
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g_n_appell <- function(n, p_gamma, R2) {
.Call('_blma_liang_g_n_appell', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Liang's g/n prior quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g_n_quad <- function(n, p_gamma, R2) {
.Call('_blma_liang_g_n_quad', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Liang's g/n prior approximation
#' Liang's g/n prior quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
liang_g_n_approx <- function(n, p_gamma, R2) {
.Call('_blma_liang_g_n_approx', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Robust Bayarri 1
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
robust_bayarri1 <- function(n, p_gamma, R2) {
.Call('_blma_robust_bayarri1', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Robust Bayarri 2
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
robust_bayarri2 <- function(n, p_gamma, R2) {
.Call('_blma_robust_bayarri2', PACKAGE = 'blma', n, p_gamma, R2)
}
#' log_BF_g_on_n_integrand
#'
#' @param vu The argument, vu
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_g_on_n_integrand <- function(vu, n, p_gamma, R2) {
.Call('_blma_log_BF_g_on_n_integrand', PACKAGE = 'blma', vu, n, p_gamma, R2)
}
#' hyper-g/n Gauss-Legendre quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_g_on_n_quad <- function(n, p_gamma, R2) {
.Call('_blma_log_BF_g_on_n_quad', PACKAGE = 'blma', n, p_gamma, R2)
}
#' log_BF_Zellner_Siow_integrand
#'
#' @param x The argument, x
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_Zellner_Siow_integrand <- function(x, n, p_gamma, R2) {
.Call('_blma_log_BF_Zellner_Siow_integrand', PACKAGE = 'blma', x, n, p_gamma, R2)
}
#' Zellner-Siow Gauss-Laguerre quadrature
#'
#' @param n The sample size, an integer
#' @param p_gamma The number of covariates in the model gamma
#' @param R2 The correlation co-efficient, a number between -1 and 1
#' @return The log of the Bayes Factor
#' @export
log_BF_Zellner_Siow_quad <- function(n, p_gamma, R2) {
.Call('_blma_log_BF_Zellner_Siow_quad', PACKAGE = 'blma', n, p_gamma, R2)
}
#' Run a Collapsed Variational Approximation to find the K best linear models
#'
#' @param vy_in Vector of responses
#' @param mX_in The matrix of covariates which may or may not be included in each model
#' @param mGamma_in Matrix of initial models, a K by p logical matrix
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model averaging. The choices
#' available include:
#' \itemize{
#' \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake prior
#' of Ormerod et al. (2017).}
#'
#' \item{"ZE"}{-- special case of the prior structure described by Maruyama and George (2011).}
#'
#' \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior hyperparameter
#' \eqn{a=3} evaluated directly using Equation (10) of Greenaway and Ormerod (2018) where the Gaussian
#' hypergeometric function is evaluated using the {gsl} library. Note: this option can lead to numerical problems and is only
#'' meant to be used for comparative purposes.}
#'
#' \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior hyperparameter
#' \eqn{a=3} evaluated directly using Equation (11) of Greenaway and Ormerod (2018).}
#'
#' \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#' \item{"liang_g_n_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008) with prior hyperparameter
#' \eqn{a=3} using the approximation Equation (15) of Greenaway and Ormerod (2018) for model with more
#' than two covariates and numerical quadrature (see below) for models with one or two covariates.}
#'
#' \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008) with prior hyperparameter
#' \eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#' \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using default prior hyper
#' parameter choices evaluated directly using Equation (18) of Greenaway and Ormerod (2018) with the
#' {gsl} library.}
#'
#' \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using default prior hyper
#' parameter choices evaluated directly using Equation (19) of Greenaway and Ormerod (2018).}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform", "beta-binomial" or
#' "bernoulli". The choice of model prior dictates the meaning of the parameter modelpriorvec.
#' @param modelpriorvec_in If modelprior is "uniform", then the modelpriorvec is ignored and can be null.
#'
#' If
#' the modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first element containing
#' the alpha hyperparameter for the beta prior and the second element containing the beta hyperparameter for
#' the beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the number
#' of columns in mX. Each element i of modelpriorvec contains the prior probability of the the ith covariate
#' being included in the model.
#' @param bUnique Whether to ensure uniqueness in the population of particles or not. Defaults to true.
#' @param lambda The weighting factor for the entropy in f_lambda. Defaults to 1.
#' @param cores The number of cores to use. Defaults to 1.
#' @return The object returned is a list containing:
#' \itemize{
#' \item{"mGamma"}{-- A K by p binary matrix containing the final population of models}
#'
#' \item{"vgamma.hat"}{-- The most probable model found by pva}
#'
#' \item{"vlogBF"}{-- The null-based Bayes factor for each model in the population}
#'
#' \item{"posterior_model_probabilities"}{-- The estimated posterior model parameters for each model in
#' the population.}
#'
#' \item{"posterior_inclusion_probabilities"}{-- The estimated variable inclusion probabilities for each
#' model in the population.}
#'
#' \item{"vR2"}{-- The fitted R-squared values for each model in the population.}
#'
#' \item{"vp"}{-- The model size for each model in the population.}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#' mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#' "log(AGE)",
#' "S",
#' "log(ED)",
#' "log(Ex0)",
#' "log(Ex1)",
#' "log(LF)",
#' "log(M)",
#' "log(N)",
#' "log(NW)",
#' "log(U1)",
#' "log(U2)",
#' "log(W)",
#' "log(X)",
#' "log(prison)",
#' "log(time)")
#'
#' y.t <- mD$y
#' X.f <- data.matrix(cbind(mD[1:15]))
#' colnames(X.f) <- varnames
#' K <- 100
#' p <- ncol(X.f)
#' initial_gamma <- matrix(rbinom(K * p, 1, .5), K, p)
#' pva_result <- pva(y.t, X.f, initial_gamma, prior = "BIC", modelprior = "uniform",
#' modelpriorvec_in=NULL)
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for Bayesian
#' model choice with application to variable selection. Annals of Statistics 40 (3), 1550-
#' 1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models averaging using mixture
#' g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g priors for
#' Bayesian variable selection. Journal of the American Statistical Association 103 (481),
#' 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
pva <- function(vy_in, mX_in, mGamma_in, prior, modelprior, modelpriorvec_in = NULL, bUnique = TRUE, lambda = 1., cores = 1L) {
.Call('_blma_pva', PACKAGE = 'blma', vy_in, mX_in, mGamma_in, prior, modelprior, modelpriorvec_in, bUnique, lambda, cores)
}
#' Perform Bayesian Linear Model Averaging over all of the possible linear models where
#' vy is the response and the covariates are in mX.
#'
#' @importFrom Rcpp evalCpp
#' @useDynLib blma
#'
#' @usage blma(vy, mX, prior = "BIC", modelprior = "uniform", modelpriorvec = NULL)
#' @param vy Vector of responses
#' @param mX Covariate matrix
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model
#' averaging. The choices available include:
#' \itemize{
#' \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake
#' prior of Ormerod et al. (2017).}
#'
#' \item{"ZE"}{-- special case of the prior structure described by BIC and
#' George (2011).}
#'
#' \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated directly using Equation (10) of Greenaway and
#' Ormerod (2018) where the Gaussian hypergeometric function is evaluated using the
#' {gsl} library. Note: this option can lead to numerical problems and is only
#' meant to be used for comparative purposes.}
#'
#' \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated directly using Equation (11) of Greenaway and
#' Ormerod (2018).}
#'
#' \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#' \item{"liang_g_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter eqn{a=3} using the approximation Equation (15) of
#' Greenaway and Ormerod (2018) for model with more than two covariates and
#' numerical quadrature (see below) for models with one or two covariates.}
#'
#' \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#' \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using
#' default prior hyper choices evaluated directly using Equation (18) of Greenaway
#' and Ormerod (2018) with the {gsl} library.}
#'
#' \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using
#' default prior hyper choices evaluated directly using Equation (19) of Greenaway
#' and Ormerod (2018).}
#' \item{"zellner_siow_gauss_laguerre"}{-- the Zellner-Siow prior calculated
#' using Gauss-Laguerre quadrature with 1000 quadrature points}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform",
#' "beta-binomial" or "bernoulli". The choice of model prior dictates the meaning of the
#' parameter modelpriorvec.
#' @param modelpriorvec If modelprior is "uniform", then the modelpriorvec is ignored
#' and can be null.
#'
#' If modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first
#' element containing alpha hyperparameter for the beta prior and the second element
#' containing the beta hyperparameter for beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the
#' number columns in mX. Each element i of modelpriorvec contains the prior probability
#' of the the ith covariate being included in the model.
#' @param cores The number of cores to use. Defaults to 1
#' @return A list containing
#' \describe{
#' \item{vR2}{the vector of correlations for each model}
#' \item{vp_gamma}{the vector of number of covariates for each model}
#' \item{vlogBF}{the vector of logs of the Bayes Factors of each model}
#' \item{vinclusion_prob}{the vector of inclusion probabilities for each of the
#' covariates}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#' mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#' "log(AGE)",
#' "S",
#' "log(ED)",
#' "log(Ex0)",
#' "log(Ex1)",
#' "log(LF)",
#' "log(M)",
#' "log(N)",
#' "log(NW)",
#' "log(U1)",
#' "log(U2)",
#' "log(W)",
#' "log(X)",
#' "log(prison)",
#' "log(time)")
#'
#' vy <- mD$y
#' mX <- data.matrix(cbind(mD[1:15]))
#' colnames(mX) <- varnames
#' blma_result <- blma(vy, mX, "BIC")
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for
#' Bayesian model choice with application to variable selection. Annals of Statistics
#' 40 (3), 1550-1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models
#' averaging using mixture g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g
#' priors for Bayesian variable selection. Journal of the American Statistical
#' Association 103 (481), 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
blma <- function(vy, mX, prior, modelprior = "uniform", modelpriorvec = NULL, cores = 1L) {
.Call('_blma_blma', PACKAGE = 'blma', vy, mX, prior, modelprior, modelpriorvec, cores)
}
#' Perform Bayesian Linear Model Averaging over all of the possible linear models where //' vy is the response, covariates that may be included are in mZ and covariates which
#' are always included are in mX.
#'
#' @usage blma_fixed(y.t, X.f, Z.f, "BIC")
#' @param vy The vector of responses
#' @param mX The matrix of fixed covariates which will be included in every model
#' @param mZ The matrix of varying covariates, which may or may not be included in each
#' model
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model
#' averaging. The choices available include:
#' \itemize{
#' \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake
#' prior of Ormerod et al. (2017).}
#'
#' \item{"ZE"}{-- special case of the prior structure described by BIC and
#' George (2011).}
#'
#' \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated directly using Equation (10) of Greenaway and
#' Ormerod (2018) where the Gaussian hypergeometric function is evaluated using the
#' {gsl} library. Note: this option can lead to numerical problems and is only
#' meant to be used for comparative purposes.}
#'
#' \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated directly using Equation (11) of Greenaway and
#' Ormerod (2018).}
#'
#' \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#' \item{"liang_g_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter eqn{a=3} using the approximation Equation (15) of
#' Greenaway and Ormerod (2018) for model with more than two covariates and
#' numerical quadrature (see below) for models with one or two covariates.}
#'
#' \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#' \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using
#' default prior hyper choices evaluated directly using Equation (18) of
#' Greenaway and Ormerod (2018) with the {gsl} library.}
#'
#' \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using
#' default prior hyper choices evaluated directly using Equation (19) of
#' Greenaway Ormerod (2018).}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform",
#' "beta-binomial" or "bernoulli". The choice of model prior dictates the meaning of the
#' parameter modelpriorvec.
#' @param modelpriorvec If modelprior is "uniform", then the modelpriorvec is ignored
#' and can be null.
#'
#' If modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first
#' element containing alpha hyperparameter for the beta prior and the second element
#' containing the beta hyperparameter for beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the
#' number columns in mX. Each element i of modelpriorvec contains the prior probability
#' of the the ith covariate being included in the model.
#' @param cores The number of cores to use. Defaults to 1
#'
#' @return A list containing
#' \describe{
#' \item{vR2}{the vector of correlations for each model}
#' \item{vp_gamma}{the vector of number of covariates for each model}
#' \item{vlogBF}{the vector of logs of the Bayes Factors of each model}
#' \item{vinclusion_prob}{the vector of inclusion probabilities for each of the
#' covariates}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#' mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#' "log(AGE)",
#' "S",
#' "log(ED)",
#' "log(Ex0)",
#' "log(Ex1)",
#' "log(LF)",
#' "log(M)",
#' "log(N)",
#' "log(NW)",
#' "log(U1)",
#' "log(U2)",
#' "log(W)",
#' "log(X)",
#' "log(prison)",
#' "log(time)")
#'
#' vy <- mD$y
#' mX <- data.matrix(cbind(mD[, 1:10]))
#' colnames(mX) <- varnames[1:10]
#' mZ <- data.matrix(cbind(mD[, 11:15]))
#' blma_result <- blma_fixed(vy, mX, mZ, "BIC")
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for
#' Bayesian model choice with application to variable selection. Annals of Statistics
#' 40 (3), 1550-1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models
#' averaging using mixture g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g
#' priors for Bayesian variable selection. Journal of the American Statistical
#' Association 103 (481), 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
blma_fixed <- function(vy, mX, mZ, prior, modelprior = "uniform", modelpriorvec = NULL, cores = 1L) {
.Call('_blma_blma_fixed', PACKAGE = 'blma', vy, mX, mZ, prior, modelprior, modelpriorvec, cores)
}
#' Return the graycode matrix
#'
#' @usage graycode(varying, fixed)
#' @param varying The number of covariates varying in the graycode matrix
#' @param fixed The number of fixed covariates in the graycode matrix. These
#' covariates will always be included
#' @return The graycode matrix. The number of fixed columns will be included in the //' lower indexed columns as 1s, while the higher indexed columns will varying
#' depending on whether each covariate in the varying set of covariates is included
#' or not.
#' @export
graycode <- function(varying, fixed = 0L) {
.Call('_blma_graycode', PACKAGE = 'blma', varying, fixed)
}
#' sampler
#'
#' @usage sampler(iterations, vy, mX, prior = "BIC", modelprior = "uniform",
#' modelpriorvec_in=NULL, cores=1)
#' @param iterations The number of iterations to run the MCMC sampler for
#' @param vy_in Vector of responses
#' @param mX_in The matrix of covariates which may or may not be included in each model
#' @param prior -- the choice of mixture $g$-prior used to perform Bayesian model
#' averaging. The choices available include:
#' \itemize{
#' \item{"BIC"}{-- the Bayesian information criterion obtained by using the cake
#' prior of Ormerod et al. (2017).}
#'
#' \item{"ZE"}{-- special case of the prior structure described by Maruyama and
#' George (2011).}
#'
#' \item{"liang_g1"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated directly using Equation (10) of Greenaway and
#' Ormerod (2018) where the Gaussian hypergeometric function is evaluated using the
#' {gsl} library. Note: this option can lead to numerical problems and is only
#' meant to be used for comparative purposes.}
#'
#' \item{"liang_g2"}{-- the mixture \eqn{g}-prior of Liang et al. (2008) with prior
#' hyperparameter \eqn{a=3} evaluated directly using Equation (11) of Greenaway and
#' Ormerod (2018).}
#'
#' \item{"liang_g_n_appell"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter \eqn{a=3} evaluated using the {appell R} package.}
#'
#' \item{"liang_g_approx"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter eqn{a=3} using the approximation Equation (15) of
#' Greenaway and Ormerod (2018) for model with more than two covariates and
#' numerical quadrature (see below) for models with one or two covariates.}
#'
#' \item{"liang_g_n_quad"}{-- the mixture \eqn{g/n}-prior of Liang et al. (2008)
#' with prior hyperparameter eqn{a=3} evaluated using a composite trapezoid rule.}
#'
#' \item{"robust_bayarri1"}{-- the robust prior of Bayarri et al. (2012) using
#' default prior hyper choices evaluated directly using Equation (18) of Greenaway
#' and Ormerod (2018) with the {gsl} library.}
#'
#' \item{"robust_bayarri2"}{-- the robust prior of Bayarri et al. (2012) using
#' default prior hyper choices evaluated directly using Equation (19) of Greenaway
#' and Ormerod (2018).}
#' }
#' @param modelprior The model prior to use. The choices of model prior are "uniform",
#' "beta-binomial" or "bernoulli". The choice of model prior dictates the meaning of the
#' parameter modelpriorvec.
#' @param modelpriorvec If modelprior is "uniform", then the modelpriorvec is ignored
#' and can be null.
#'
#' If modelprior is "beta-binomial" then modelpriorvec should be length 2 with the first
#' element containing alpha hyperparameter for the beta prior and the second element
#' containing the beta hyperparameter for beta prior.
#'
#' If modelprior is "bernoulli", then modelpriorvec must be of the same length as the
#' number columns in mX. Each element i of modelpriorvec contains the prior probability
#' of the the ith covariate being included in the model.
#' @param cores The number of cores to use. Defaults to 1.
#' @return The object returned is a list containing:
#' \itemize{
#' \item{"mGamma"}{-- An iterations by p binary matrix containing the sampled models.}
#' \item{"vinclusion_prob"}{-- The vector of inclusion probabilities.}
#' \item{"vlogBF"}{-- The vector of logs of the Bayes Factors of the models in mGamma.}
#' }
#' @examples
#' mD <- MASS::UScrime
#' notlog <- c(2,ncol(MASS::UScrime))
#' mD[,-notlog] <- log(mD[,-notlog])
#'
#' for (j in 1:ncol(mD)) {
#' mD[,j] <- (mD[,j] - mean(mD[,j]))/sd(mD[,j])
#' }
#'
#' varnames <- c(
#' "log(AGE)",
#' "S",
#' "log(ED)",
#' "log(Ex0)",
#' "log(Ex1)",
#' "log(LF)",
#' "log(M)",
#' "log(N)",
#' "log(NW)",
#' "log(U1)",
#' "log(U2)",
#' "log(W)",
#' "log(X)",
#' "log(prison)",
#' "log(time)")
#'
#' vy <- mD$y
#' mX <- data.matrix(cbind(mD[1:15]))
#' colnames(mX) <- varnames
#' K <- 100
#' p <- ncol(mX)
#' sampler_result <- sampler(10000, vy, mX, prior = "BIC",
#' modelprior = "uniform",
#' modelpriorvec_in=NULL)
#'
#' @references
#' Bayarri, M. J., Berger, J. O., Forte, A., Garcia-Donato, G., 2012. Criteria for
#' Bayesian model choice with application to variable selection. Annals of Statistics
#' 40 (3), 1550-1577.
#'
#' Greenaway, M. J., J. T. Ormerod (2018) Numerical aspects of Bayesian linear models
#' averaging using mixture g-priors.
#'
#' Liang, F., Paulo, R., Molina, G., Clyde, M. a., Berger, J. O., 2008. Mixtures of g
#' priors for Bayesian variable selection. Journal of the American Statistical
#' Association 103 (481), 410-423.
#'
#' Ormerod, J. T., Stewart, M., Yu, W., Romanes, S. E., 2017. Bayesian hypothesis tests
#' with diffuse priors: Can we have our cake and eat it too?
#' @export
sampler <- function(iterations, vy_in, mX_in, prior, modelprior, modelpriorvec_in = NULL, cores = 1L) {
.Call('_blma_sampler', PACKAGE = 'blma', iterations, vy_in, mX_in, prior, modelprior, modelpriorvec_in, cores)
}
|
43c76f3f657dc5660f8bd70a7ba93cf57b4c9bd9 | 9d41ea1f26243f178a8d4405373e016c3c0889c7 | /R/supercomp/array_vr.R | 335c2afff282f5653baec2d7c454d02a7757313a | [] | no_license | AldoCompagnoni/climate_drivers_methods | e25ae279c06a899624a428042fc10f0f12dd8fe7 | 8b63e1feb90bf4910eb383f6ad648efe60c14d48 | refs/heads/master | 2023-03-16T05:12:41.331480 | 2023-03-07T20:32:15 | 2023-03-07T20:32:15 | 165,881,013 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 31,300 | r | array_vr.R | library(dplyr)
library(tidyr)
library(loo)
print('load rstan')
library(rstan)
library(testthat)
# "pipeable" Reduce rbind
rbind_l <- function(x) Reduce(function(...) rbind(...), x)
# arguments from command line
args <- commandArgs(trailingOnly = TRUE)
print(args)
# climate variable
spp_num <- args[1]
data_dir <- args[2]
out_dir <- args[3]
clim_var <- args[4]
response <- args[5]
family <- args[6]
code_dir <- args[7]
cores <- args[8]
m_back <- 36
expp_beta<- 20
# set rstan options
rstan_options( auto_write = TRUE )
options( mc.cores = cores )
# check arguments
args <- args %>% setNames( c('spp_num', 'data_dir','out_dir', 'clim_var',
'response','family', 'code_dir','cores' ) )
print('these are our arguments')
print(args)
print('current working directory')
getwd()
# read format data functions
source(file.path(code_dir,'format_data.R'))
# read data -----------------------------------------------------------------------------------------
lam <- read.csv( paste0(data_dir, 'all_demog_updt.csv'), stringsAsFactors = F)
m_info <- read.csv( paste0(data_dir, 'MatrixEndMonth_information.csv'), stringsAsFactors = F)
clim <- read.csv( paste0(data_dir, clim_var, "_chelsa_prism_hays_2014.csv"), stringsAsFactors = F)
spp <- lam$SpeciesAuthor %>% unique
head(lam)
head(m_info)
head(clim)
spp
# format data ---------------------------------------------------------------------------------------
print( "start formatting")
ii <- as.numeric(args[1])
spp_name <- spp[ii] # test run w/ spp number 1
print(spp_name)
# lambda data
spp_resp <- format_species(spp_name, lam, response)
# climate data
clim_separate <- clim_list(spp_name, clim, spp_resp)
clim_detrnded <- lapply(clim_separate, clim_detrend, clim_var)
clim_mats <- Map(clim_long, clim_detrnded, spp_resp, m_back)
# model data
mod_data <- lambda_plus_clim(spp_resp, clim_mats, response)
mod_data$climate <- mod_data$climate #/ diff(range(mod_data$climate))
# throw error if not enough data
if( nrow(mod_data$resp) < 6 ) stop( paste0("not enough temporal replication for '",
spp_name, "' and response variable '" , response, "'") )
print("done formatting")
# Transform response variables (if needed) ------------------------------------------------------------------
# transform survival/growth - ONLY if less than 30% data points are 1/0
if( grepl("surv", response, ignore.case = T) |
grepl("grow", response, ignore.case = T) ){
raw_x <- mod_data$resp[,response]
pc_1 <- sum( raw_x == 1 ) / length(raw_x)
pc_0 <- sum( raw_x == 0 ) / length(raw_x)
# for survival
if( grepl("surv", response, ignore.case = T) & pc_1 < 0.3 ){
n <- length(raw_x)
new_x <- ( raw_x*(n - 1) + 0.5 ) / n
mod_data$resp[,response] <- new_x
}
# for growth
if( grepl("grow", response, ignore.case = T) & pc_0 < 0.3 ){
n <- length(raw_x)
new_x <- ( raw_x*(n - 1) + 0.5 ) / n
mod_data$resp[,response] <- new_x
}
}
# avoid absolute zeros
if( response == "fec" | response == "RepSSD" ){
# transform from [0, infinity) to (0, infinity)
# I add quantity 2 orders of mag. lower than lowest obs value.
mod_data$resp[,response] <- mod_data$resp[,response] + 1.54e-12
}
if( response == "rho" | response == "react_fsa" ){
# bound responses to (0,infinity) instead of [1, infinity)
mod_data$resp[,response] <- mod_data$resp[,response] - 0.99999
}
# fit models ----------------------------------------------------------------------------------------
# organize data into list to pass to stan
dat_stan <- list(
n_time = nrow(mod_data$climate),
n_lag = ncol(mod_data$climate),
y = mod_data$resp[,response],
clim = mod_data$climate,
clim_means = rowMeans(mod_data$climate),
clim_yr = list( rowMeans(mod_data$climate[, 1:12]),
rowMeans(mod_data$climate[,13:24]),
rowMeans(mod_data$climate[,25:36]) ) %>% do.call(rbind,.),
M = 12, # number of months in a year
K = ncol(mod_data$climate) / 12,
expp_beta = expp_beta
)
# simulation parameters
sim_pars <- list(
warmup = 1000,
iter = 4000,
thin = 2,
chains = 4
)
print('start first model')
# NULL model (model of the mean)
fit_ctrl1 <- sampling(
object = readRDS(paste0(family,"_null.RDS")),
data = dat_stan,
pars = c('alpha', 'y_sd',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
print('end first model')
# year t-1
dat_stan$clim_means <- rowMeans(mod_data$climate[,13:24])
fit_yr2 <- sampling(
object = readRDS(paste0(family,"_yr.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd',
'yhat','log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end yr2')
# year t
dat_stan$clim_means <- rowMeans(mod_data$climate[,1:12 ])
fit_yr1 <- sampling(
object = readRDS(paste0(family,"_yr.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd',
'yhat','log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end yr1')
# year t-2
dat_stan$clim_means <- rowMeans(mod_data$climate[,25:36])
fit_yr3 <- sampling(
object = readRDS(paste0(family,"_yr.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
dat_stan$clim_means <- rowMeans(mod_data$climate)
print('end yr3')
# gaussian moving window
fit_gaus <- sampling(
object = readRDS(paste0(family,"_gaus.RDS")),
data = dat_stan,
pars = c('sens_mu', 'sens_sd', 'alpha', 'beta', 'y_sd',
'yhat','log_lik'), #
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end gaus')
# exponential power moving window
fit_expp <- sampling(
object = readRDS(paste0(family,"_expp.RDS")),
data = dat_stan,
pars = c('sens_mu', 'sens_sd', 'alpha', 'beta', 'y_sd',
'yhat', 'log_lik'), #'log_lik'
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end expp')
# moving beta hierarchical
fit_mb_h <- sampling(
object = readRDS(paste0(family,"_movbeta_h.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd', 'mu_beta', 'sigma_beta',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end movb_h')
# moving beta correlated
fit_mb <- sampling(
object = readRDS(paste0(family,"_movbeta.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd', 'mu_beta',
'yhat','log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end movb')
# moving beta hierarchical
fit_mb_h_n <- sampling(
object = readRDS(paste0(family,"_movbeta_h_nest.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd', 'mu_beta', 'sigma_beta', 'theta_y',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end movb_h_n')
# moving beta correlated
fit_mb_n <- sampling(
object = readRDS(paste0(family,"_movbeta_nest.RDS")),
data = dat_stan,
pars = c('alpha', 'beta', 'y_sd', 'mu_beta', 'theta_y',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
print('end movb_n')
# Nested models
# update data list
dat_stan$clim <- t(mod_data$climate)
dat_stan$clim1 <- t(mod_data$climate)[1:12 ,]
dat_stan$clim2 <- t(mod_data$climate)[13:24,]
dat_stan$clim3 <- t(mod_data$climate)[25:36,]
# Simplex nested
fit_24_nest <- sampling(
object = readRDS(paste0(family,"_dirichlet_nest.RDS")),
data = dat_stan,
pars = c('theta_y', 'theta_m', 'alpha', 'beta', 'y_sd',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# Gaussian nested
fit_gaus_nest <- sampling(
object = readRDS(paste0(family,"_gaus_nest.RDS")),
data = dat_stan,
pars = c('sens_mu', 'sens_sd', 'theta_y', 'alpha', 'beta', 'y_sd',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# Power exponential nested
fit_expp_nest <- sampling(
object = readRDS(paste0(family,"_expp_nest.RDS")),
data = dat_stan,
pars = c('sens_mu', 'sens_sd', 'theta_y', 'alpha', 'beta', 'y_sd',
'yhat', 'log_lik'),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains#,
#control = list(adapt_delta = 0.999, stepsize = 0.001, max_treedepth = 20)
)
# parameter values and diagnostics ----------------------------------------------------------------
# list of model fits
mod_fit <- list( ctrl1 = fit_ctrl1,
yr1 = fit_yr1,
yr2 = fit_yr2,
yr3 = fit_yr3,
gaus = fit_gaus,
expp = fit_expp,
mb_h = fit_mb_h,
mb = fit_mb,
mb_h_n = fit_mb_h_n,
mb_n = fit_mb_n,
simpl_n = fit_24_nest,
expp_n = fit_expp_nest,
gaus_n = fit_gaus_nest )
# get central tendencies
pars_diag_extract <- function(x){
# central tendencies
tmp <- rstan::extract(x) %>% as.data.frame
tmp <- setNames(tmp, gsub("\\.","_",names(tmp)))
par_means <- sapply(tmp, function(x) mean(x)) %>%
setNames( paste0(names(tmp),"_mean") )
par_medians <- sapply(tmp, function(x) median(x)) %>%
setNames( paste0(names(tmp),"_median") )
central_tend<- c(par_means, par_medians)
# diagnostics
diverg <- do.call(rbind, args = get_sampler_params(x, inc_warmup = F))[,5]
n_diverg <- length(which(diverg == 1))
df_summ <- as.data.frame(summary(x)$summary)
rhat_high <- length(which(df_summ$Rhat > 1.1))
n_eff <- df_summ$n_eff / length(diverg)
n_eff_low <- length(which(n_eff < 0.1))
mcse_high <- length(which(df_summ$se_mean / df_summ$sd > 0.1))
diagnostics <- c(n_diverg = n_diverg, rhat_high = rhat_high,
n_eff_low = n_eff_low, mcse_high = mcse_high)
out <- c( central_tend, diagnostics ) %>% t %>% as.data.frame
rm(tmp) ; return(out)
}
# extract rhat~n_eff for all parameters
all_diag_extract <- function(x,y){
as.data.frame(summary(x)$summary) %>%
select(n_eff, Rhat, se_mean, sd ) %>%
tibble::add_column(.before=1, model = y)
}
# store posteriors
posterior_extract <- function(model_fit, model_name){
# central tendencies
tmp <- rstan::extract(model_fit) %>% as.data.frame
post_df <- setNames(tmp, gsub("\\.","_",names(tmp)))
post_df <- tibble::add_column(post_df,
model = model_name, .before=1)
rm(tmp) ; return(post_df)
}
# calculate central tendencies
pars_diag_l <- lapply(mod_fit, pars_diag_extract)
mod_pars_diag <- Reduce(function(...) bind_rows(...), pars_diag_l) %>%
tibble::add_column(model = names(mod_fit), .before = 1)
# store posteriors
posts_l <- Map(posterior_extract, mod_fit, names(mod_fit) )
posteriors <- bind_rows(posts_l)
# extract rhat/neff
diag_df <- Map( rhat_n_eff_extract,
mod_fit,
names(mod_fit) ) %>% bind_rows
# WAIC model comparison --------------------------------------------------------------------
# wAIC model selection using loo approximation (from library 'loo')
log_liks <- lapply(mod_fit, extract_log_lik)
# leave-one-out estimates
loo_l <- lapply(log_liks, loo) %>%
setNames( c("loo_ctrl1",
"loo_yr1", "loo_yr2", "loo_yr3",
"loo_gaus", "loo_expp",
"loo_mb_h", "loo_mb",
"loo_mb_h_n", "loo_mb_n",
"loo_simpl_n", "loo_gaus_n", "loo_expp_n") )
loo_df <- loo::compare(loo_l$loo_ctrl1,
loo_l$loo_yr1, loo_l$loo_yr2, loo_l$loo_yr3,
loo_l$loo_gaus, loo_l$loo_expp,
loo_l$loo_mb_h, loo_l$loo_mb,
loo_l$loo_mb_h_n, loo_l$loo_mb_n,
loo_l$loo_simpl_n, loo_l$loo_gaus_n, loo_l$loo_expp_n ) %>%
as.data.frame %>%
tibble::add_column(model = gsub("loo_l\\$loo_","",row.names(.) ), .before = 1) %>%
rename( se_diff_loo = se_diff )
# WAIC estimates
waic_l <- lapply(log_liks, waic) %>%
setNames(c("waic_ctrl1",
"waic_yr1", "waic_yr2", "waic_yr3",
"waic_gaus", "waic_expp",
"waic_mb_h", "waic_mb",
"waic_mb_h_n", "waic_mb_n",
"waic_simpl_n", "waic_gaus_n", "waic_expp_n") )
waic_df <- loo::compare(waic_l$waic_ctrl1,
waic_l$waic_yr1, waic_l$waic_yr2, waic_l$waic_yr3,
waic_l$waic_gaus, waic_l$waic_expp,
waic_l$waic_mb_h, waic_l$waic_mb,
waic_l$waic_mb_h_n, waic_l$waic_mb_n,
waic_l$waic_simpl_n, waic_l$waic_gaus_n, waic_l$waic_expp_n) %>%
as.data.frame %>%
tibble::add_column(model = gsub("waic_l\\$waic_","",row.names(.) ),
.before = 1) %>%
# this is useless to me, causes a conflict down the line
dplyr::select(-elpd_diff) %>%
rename( se_diff_waic = se_diff )
# leave-one-out crossvalidation ------------------------------------------------------------------------
# crossvalidation function
CrossVal <- function(i, mod_data, response){ # i is index for row to leave out
# identify years
uniq_yr <- mod_data$resp$year %>% unique
test_i <- which(mod_data$resp$year == uniq_yr[i])
# put all in matrix form
x_clim <- mod_data$climate
x_clim_means <- rowMeans(mod_data$climate) # climate averages over entire window (for control model #2)
# response variable
y_train <- mod_data$resp[-test_i, response]
y_test <- mod_data$resp[test_i, response]
# climate variable
clim_train <- x_clim[-test_i,]
clim_test <- x_clim[test_i,]
# climate averages over full 24-month window (for control model #2)
clim_means_train <- x_clim_means[-test_i]
clim_means_test <- x_clim_means[test_i]
# organize data into list to pass to stan
dat_stan_crossval <- list(
n_train = length(y_train), # number of data points in train set (length of response var)
n_test = length(y_test), # number of data points in test set
n_lag = ncol(clim_train), # maximum lag
y_train = array(y_train),
y_test = array(y_test),
clim_train = array(clim_train),
clim_test = array(clim_test),
clim_means_train = array(clim_means_train), # climate averages over full 24-month window (for control model #2)
clim_means_test = array(clim_means_test), # climate averages over full 24-month window (for control model #2)
clim_yr_train = list( rowMeans(clim_train[, 1:12]),
rowMeans(clim_train[,13:24]),
rowMeans(clim_train[,25:36]) ) %>% do.call(rbind,.),
clim_yr_test = list( rowMeans(clim_test[, 1:12]),
rowMeans(clim_test[,13:24]),
rowMeans(clim_test[,25:36]) ) %>% do.call(rbind,.),
expp_beta = expp_beta, # beta paramater for exponential power distribution
M = 12, # number of months in a year
K = ncol(clim_train) / 12
)
# fit control 1 (intercept only)
fit_ctrl1_crossval <- sampling(
object = readRDS(paste0(family,"_null_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains
)
# year t
dat_stan_crossval$clim_means_test <- rowMeans( mod_data$climate[test_i, 1:12,drop=F] ) %>% array
dat_stan_crossval$clim_means_train <- rowMeans( mod_data$climate[-test_i,1:12,drop=F] ) %>% array
fit_yr1_crossval <- sampling(
object = readRDS(paste0(family,"_yr_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains
)
# year t-1
dat_stan_crossval$clim_means_test <- rowMeans( mod_data$climate[test_i, 13:24,drop=F] ) %>% array
dat_stan_crossval$clim_means_train <- rowMeans( mod_data$climate[-test_i,13:24,drop=F] ) %>% array
fit_yr2_crossval <- sampling(
object = readRDS(paste0(family,"_yr_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains
)
# year t-2
dat_stan_crossval$clim_means_test <- rowMeans( mod_data$climate[test_i, 25:36,drop=F] ) %>% array
dat_stan_crossval$clim_means_train <- rowMeans( mod_data$climate[-test_i,25:36,drop=F] ) %>% array
fit_yr3_crossval <- sampling(
object = readRDS(paste0(family,"_yr_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains
)
# fit moving window, gaussian
fit_gaus_crossval <- sampling(
object = readRDS(paste0(family,"_gaus_crossval.RDS")),
data = dat_stan_crossval,
pars = c("sens_mu", "sens_sd", "alpha", "beta", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# fit moving window, exponential power
fit_expp_crossval <- sampling(
object = readRDS(paste0(family,"_expp_crossval.RDS")),
data = dat_stan_crossval,
pars = c("sens_mu", "sens_sd", "alpha", "beta", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# moving beta model, hierarchical
fit_mb_h_crossval <- sampling(
object = readRDS(paste0(family,"_movbeta_h_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd", "mu_beta", "sigma_beta",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# moving beta model, NON-hierarchical
fit_mb_crossval <- sampling(
object = readRDS(paste0(family,"_movbeta_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd", "mu_beta", "eta", "rho",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# nested moving beta model, hierarchical
fit_mb_h_n_crossval <- sampling(
object = readRDS(paste0(family,"_movbeta_h_nest_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd", "mu_beta", "sigma_beta",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# nested moving beta model, NON-hierarchical
fit_mb_n_crossval <- sampling(
object = readRDS(paste0(family,"_movbeta_nest_crossval.RDS")),
data = dat_stan_crossval,
pars = c("alpha", "beta", "y_sd", "mu_beta", "eta", "rho",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# update data list
dat_stan_crossval$clim_train <- t(dat_stan_crossval$clim_train)
dat_stan_crossval$clim_test <- t(dat_stan_crossval$clim_test)
dat_stan_crossval$clim1_train <- dat_stan_crossval$clim_train[1:12,]
dat_stan_crossval$clim1_test <- dat_stan_crossval$clim_test[ 1:12, ,drop=F]
dat_stan_crossval$clim2_train <- dat_stan_crossval$clim_train[13:24,]
dat_stan_crossval$clim2_test <- dat_stan_crossval$clim_test[ 13:24, ,drop=F]
dat_stan_crossval$clim3_train <- dat_stan_crossval$clim_train[25:36,]
dat_stan_crossval$clim3_test <- dat_stan_crossval$clim_test[ 25:36, ,drop=F]
dat_stan_crossval$clim1_means_train <- colMeans(dat_stan_crossval$clim_train[1:12,])
dat_stan_crossval$clim1_means_test <- colMeans(dat_stan_crossval$clim_test[ 1:12, ,drop=F]) %>% array
dat_stan_crossval$clim2_means_train <- colMeans(dat_stan_crossval$clim_train[13:24,])
dat_stan_crossval$clim2_means_test <- colMeans(dat_stan_crossval$clim_test[ 13:24, ,drop=F]) %>% array
dat_stan_crossval$clim3_means_train <- colMeans(dat_stan_crossval$clim_train[25:36,])
dat_stan_crossval$clim3_means_test <- colMeans(dat_stan_crossval$clim_test[ 25:36, ,drop=F]) %>% array
# fit simplex nested within year
fit_24_nest_crossval <- sampling(
object = readRDS(paste0(family,"_dirichlet_nest_crossval.RDS")),
data = dat_stan_crossval,
pars = c("theta_m", "theta_y","alpha", "beta", "y_sd", "pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# fit gaus nested within year
fit_gaus_nest_crossval <- sampling(
object = readRDS(paste0(family,"_gaus_nest_crossval.RDS")),
data = dat_stan_crossval,
pars = c("sens_mu","sens_sd", "theta_y","alpha", "beta", "y_sd",
"pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# fit exponential power nested within year
fit_expp_nest_crossval <- sampling(
object = readRDS(paste0(family,"_expp_nest_crossval.RDS")),
data = dat_stan_crossval,
pars = c("sens_mu","sens_sd", "theta_y","alpha", "beta", "y_sd", "pred_y", "log_lik","log_lik_test"),
warmup = sim_pars$warmup,
iter = sim_pars$iter,
thin = sim_pars$thin,
chains = sim_pars$chains,
control = list(adapt_delta = 0.99)
)
# posterior mean prediction for the out-of-sample value
crossval_mods <- list( ctrl1 = fit_ctrl1_crossval,
yr1 = fit_yr1_crossval,
yr2 = fit_yr2_crossval,
yr3 = fit_yr3_crossval,
gaus = fit_gaus_crossval,
expp = fit_expp_crossval,
mb_h = fit_mb_h_crossval,
mb = fit_mb_crossval,
mb_h_n = fit_mb_h_n_crossval,
mb_n = fit_mb_n_crossval,
simpl_n = fit_24_nest_crossval,
gaus_n = fit_gaus_nest_crossval,
expp_n = fit_expp_nest_crossval )
# predictions
mod_preds <- lapply(crossval_mods, function(x) rstan::extract(x, "pred_y")$pred_y %>% apply(2,mean) )
# Expected Log Predictive Density
mod_elpds <- lapply(crossval_mods, function(x){
rstan::extract(x, "log_lik_test")$log_lik_test %>%
exp %>%
apply(2,mean) %>%
log
} )
# diagnostics
diagnostics <- function(fit_obj, name_mod){
diverg <- do.call(rbind, args = get_sampler_params(fit_obj, inc_warmup = F))[,5]
n_diverg <- length(which(diverg == 1))
df_summ <- as.data.frame(summary(fit_obj)$summary)
rhat_high <- length(which(df_summ$Rhat > 1.1))
n_eff <- df_summ$n_eff / length(diverg)
n_eff_low <- length(which(n_eff < 0.1))
mcse_high <- length(which(df_summ$se_mean / df_summ$sd > 0.1))
out <- data.frame(n_diverg, rhat_high,
n_eff_low, mcse_high)
# out <- setNames(out, paste0(names(out),"_",name_mod) )
return(out)
}
# store diagnostics
# gaus_expp <- crossval_mods[c("gaus","expp","gev","simpl")]
diagnost_l <- Map(diagnostics, crossval_mods, names(crossval_mods))
diagnost_df <- do.call(cbind, diagnost_l) %>%
bind_cols( unique( dplyr::select(mod_data$resp[test_i,],year) ) ) %>%
setNames( gsub("\\.","_",names(.)) )
# function
pred_elpd_df<- mod_data$resp[test_i,] %>%
mutate( # predictions
ctrl1_pred = mod_preds$ctrl1,
yr1_pred = mod_preds$yr1,
yr2_pred = mod_preds$yr2,
yr3_pred = mod_preds$yr3,
gaus_pred = mod_preds$gaus,
expp_pred = mod_preds$expp,
mb_pred = mod_preds$mb,
mb_h_pred = mod_preds$mb_h,
mb_n_pred = mod_preds$mb_n,
mb_h_n_pred = mod_preds$mb_h_n,
simpl_n_pred = mod_preds$simpl_n,
gaus_n_pred = mod_preds$gaus_n,
expp_n_pred = mod_preds$expp_n,
# Expected Log Predictive Density
ctrl1_elpd = mod_elpds$ctrl1,
yr1_elpd = mod_elpds$yr1,
yr2_elpd = mod_elpds$yr2,
yr3_elpd = mod_elpds$yr3,
gaus_elpd = mod_elpds$gaus,
expp_elpd = mod_elpds$expp,
mb_elpd = mod_elpds$mb,
mb_h_elpd = mod_elpds$mb_h,
mb_n_elpd = mod_elpds$mb_n,
mb_h_n_elpd = mod_elpds$mb_h_n,
simpl_n_elpd = mod_elpds$simpl_n,
gaus_n_elpd = mod_elpds$gaus_n,
expp_n_elpd = mod_elpds$expp_n )
# df to return
out <- left_join(pred_elpd_df, diagnost_df)
# remove stanfit objects (garbage collection)
rm(fit_ctrl1_crossval)
rm(fit_yr1_crossval)
rm(fit_yr2_crossval)
rm(fit_yr3_crossval)
rm(fit_gaus_crossval)
rm(fit_expp_crossval)
rm(fit_mb_h_crossval)
rm(fit_mb_crossval)
rm(fit_mb_h_n_crossval)
rm(fit_mb_n_crossval)
rm(fit_24_nest_crossval)
rm(fit_gaus_nest_crossval)
rm(fit_expp_nest_crossval)
return(out)
}
print( "start crossvalidation" )
# spp-specific cross validation
year_inds <- seq_along(unique(mod_data$resp$year))
cxval_res <- lapply( year_inds, CrossVal, mod_data, response)
cxval_pred <- do.call(rbind, cxval_res)
print( "end crossvalidation" )
# measures of fit --------------------------------------------------------------------------
# calculate either mse or deviance
pred_perform <- function(x, mod_data, response, type){
if( type == "mse"){
res <- (x - mod_data$resp[,response])^2 %>% mean
}
if(type == "deviance"){
res <-calc.deviance(x, mod_data$resp[,response],
weights = rep(1, length(x) ),
family="gaussian", calc.mean = TRUE)
}
return(res)
}
# format results into a data frame
perform_format <- function(x, var){
x %>%
unlist %>%
t %>%
t %>%
as.data.frame %>%
tibble::rownames_to_column(var = "model") %>%
mutate( model = gsub("mod_preds.", "", model)) %>%
setNames( c("model", var) )
}
# order of 'mod_data$resp' and 'cxval_pred' need be the same
expect_true( all.equal(dplyr::select(mod_data$resp, year, population),
dplyr::select(cxval_pred, year, population)) )
print('IMPORTANT: Check names')
names(cxval_pred)
# mean squared error
mse <- cxval_pred %>%
dplyr::select(ctrl1_pred:expp_n_pred) %>%
lapply(pred_perform, mod_data, response, "mse") %>%
perform_format("mse") %>%
mutate( model = gsub("_pred","",model) )
# Expected Log Predictive Density
elpd <- cxval_pred %>%
dplyr::select(ctrl1_pred:expp_n_elpd) %>%
apply(2, sum) %>%
as.matrix %>%
as.data.frame %>%
tibble::add_column(model = rownames(.), .before=1) %>%
mutate( model = gsub("_elpd","",model) ) %>%
setNames( c("model", "elpd") )
# measures of fit
# mof <- merge(mse, devi)
mof <- merge(mse, elpd)
# store results ---------------------------------------------------------------------------
mod_summs <- Reduce(function(...) full_join(...),
list(mod_pars_diag, loo_df, waic_df, mof) ) %>%
arrange( mse )
write.csv(mod_summs, paste0(out_dir,"-",spp_num,"_mod_summaries_", spp_name,"_",response,"_",clim_var,".csv"), row.names = F)
write.csv(posteriors, paste0(out_dir,"-",spp_num,"_posterior_", spp_name,"_",response,"_",clim_var,".csv"), row.names = F)
write.csv(cxval_pred, paste0(out_dir,"-",spp_num,"_crossval_pred_diag_",spp_name,"_",response,"_",clim_var,".csv"), row.names = F)
write.csv(diag_df, paste0(out_dir,"-",spp_num,"_diagnostics_",spp_name,"_",response,"_",clim_var,".csv"), row.names = F)
save.image( paste0(out_file,'_workshace_',spp_num,'.RData') )
|
1b6977b67765df57d42668fcd0d257f654a87775 | 84258cd059fd20b0239d56fae64ba9e28fb10578 | /t-test_IHDP.R | 3a8c20a119c9e37392bc7859ef169563ca1883fe | [] | no_license | dhdths914/UCL-MSc-CSML-dissertation | a771caacc8540222e9d57fec00b7a40782541107 | ba8fe7b2483f80b64947c026c7ce48406ebaf531 | refs/heads/master | 2020-12-04T15:30:59.388981 | 2016-09-05T03:46:59 | 2016-09-05T03:46:59 | 67,371,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 716 | r | t-test_IHDP.R | # Load the data
X_test <- "add your file path"
Y_test <- "add your file path"
NN_all_IHDP <- "add your file path"
NN_college_IHDP <- "add your file path"
NN_high_IHDP <- "add your file path"
GP_all_IHDP <- "add your file path"
GP_college_IHDP <- "add your file path"
GP_high_IHDP <- "add your file path"
# compute the error
gp_error_all <- abs(GP_all_IHDP-Y_test)
gp_error_college <- abs(GP_college_IHDP-Y_test)
gp_error_high <- abs(GP_high_IHDP-Y_test)
nn_error_all <- abs(NN_all_IHDP-Y_test)
nn_error_college <- abs(NN_college_IHDP-Y_test)
nn_error_high <- abs(NN_high_IHDP-Y_test)
#t-test
t.test(gp_error_all,nn_error_all)
t.test(gp_error_college,nn_error_college)
t.test(gp_error_high,nn_error_high)
|
9e6e1844af55c440df486fbf5fa517e41869ceea | 1ade0e09b238c51f6c67a92c4ccb750d4d9a9ce0 | /positivity graphs.R | b93be94d59e4d9cbaf0a474377b583ed3bb630ce | [] | no_license | Gielpy/COVIDaz | b56ee5cf4803ba8f1e82aae84b4632c6ddfb3a20 | 2083cc4d9bf3c0b08452628dccb17de20829e1dc | refs/heads/master | 2022-12-07T15:15:00.557962 | 2020-08-08T17:39:15 | 2020-08-08T17:39:15 | 278,990,283 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,002 | r | positivity graphs.R | ggplot(covid.az, aes(x=Date, y=New.Cases))+
geom_bar(stat='identity', color='white', fill='#003153')+
geom_line(aes(y=rollmean(New.Cases,7,fill=NA)), linetype='twodash', color='red', size=1.5)+
geom_line(aes(y=rollmean(New.PCR,7,fill=NA)), linetype='twodash', color='green', size=1.5)+
ggtitle('New Cases by Day - Arizona')+
# labs()+
theme_bw()
positivity = NULL
covid.az$positivity <- as.data.frame(covid.az$Cases.Date.of.Administration/covid.az$PCR.Date.of.Test)*100
p<-ggplot(covid.az[-c(1:7),], aes(x=Date))+
geom_line(aes(y=rollmean(Cases.Date.of.Administration,7,fill=NA)), linetype='twodash', color='blue', size=1.5)+
geom_line(aes(y=rollmean(PCR.Date.of.Test,7,fill=NA)), linetype='twodash', color='red', size=1.5)+
ggtitle('Comparing PCR Tests and Cases - Arizona')+
# labs()+
theme_bw()
q<-ggplot(covid.az[-c(1:7),], aes(x=Date))+
geom_line(aes(y=rollmean(positivity,7,fill=NA)), linetype='twodash', color='green', size=1.5)+
ggtitle('Positivity - Arizona')+
# labs()+
theme_bw()
require(gridExtra)
grid.arrange(p, q, ncol=2)
ggsave(paste0("Tests, Cases, and Positivity "
, format(Sys.time(), "%Y-%m-%d")
, ".png"))
ggplot(covid.az, aes(x=Date))+
geom_line(aes(y=rollmean(New.Inpatient.Change,7,fill=NA)), linetype='solid', color='#003153', size=1.5)+
theme_bw()
rolling.cases=NULL
rolling.cases$Date <- covid.az$Date
rolling.cases$under20 <- covid.az$delta.Cases.Under20 / (covid.az$delta.Positives/1000)
rolling.cases$c20.44 <- covid.az$delta.Cases.20.44 / (covid.az$delta.Positives/1000)
rolling.cases$c45.54 <- covid.az$delta.Cases.45.54 / (covid.az$delta.Positives/1000)
rolling.cases$c55.64 <- covid.az$delta.Cases.55.64 / (covid.az$delta.Positives/1000)
rolling.cases$over65 <- covid.az$delta.Cases.Over65 / (covid.az$delta.Positives/1000)
rolling.cases <- data.frame(rolling.cases)
ggplot(rolling.cases, aes(x=Date))+
geom_line(aes(y=rollmean(under20,7,fill=NA), color='Under 20'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(c20.44,7,fill=NA), color='20 - 44'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(c45.54,7,fill=NA), color='45 - 54'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(c55.64,7,fill=NA), color='55 - 64'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(over65,7,fill=NA), color='Over 65'), linetype='solid', size=1.5)+
ggtitle('Cases per 1,000 Positives by Age Group - Arizona')+
scale_color_manual(values=c('Under 20' = '#DF536B','20 - 44' = '#61d04f','45 - 54' = '#2297e6','55 - 64' = '#28e2e5',
'Over 65' = '#cd0bbc'))+
labs(color='Age Group')+
ylab('Cases/1,000 Positive Tests')+
theme_bw()
rolling.cases=NULL
rolling.cases$Date <- covid.az$Date
rolling.cases$under20 <- covid.az$delta.Cases.Under20 / (covid.az$delta.Tests/1000)
rolling.cases$c20.44 <- covid.az$delta.Cases.20.44 / (covid.az$delta.Tests/1000)
rolling.cases$c45.54 <- covid.az$delta.Cases.45.54 / (covid.az$delta.Tests/1000)
rolling.cases$c55.64 <- covid.az$delta.Cases.55.64 / (covid.az$delta.Tests/1000)
rolling.cases$over65 <- covid.az$delta.Cases.Over65 / (covid.az$delta.Tests/1000)
rolling.cases <- data.frame(rolling.cases)
ggplot(rolling.cases, aes(x=Date))+
geom_line(aes(y=rollmean(under20,7,fill=NA), color='Under 20'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(c20.44,7,fill=NA), color='20 - 44'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(c45.54,7,fill=NA), color='45 - 54'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(c55.64,7,fill=NA), color='55 - 64'), linetype='solid', size=1.5)+
geom_line(aes(y=rollmean(over65,7,fill=NA), color='Over 65'), linetype='solid', size=1.5)+
ggtitle('Cases per 1,000 Tests by Age Group - Arizona')+
scale_color_manual(values=c('Under 20' = '#DF536B','20 - 44' = '#61d04f','45 - 54' = '#2297e6','55 - 64' = '#28e2e5',
'Over 65' = '#cd0bbc'))+
labs(color='Age Group')+
ylab('Cases/1,000 Tests')+
ylim(0,200)+
theme_bw()
|
79508d53f0eebf657341339b40ca7cfd7d66a5a6 | 451233d58b2828dabdec57b4ef95e6ba6e8bae8c | /rcodes/completion.R | 5c1968caf4fe0ce63db3a482bbacc332d24abfc8 | [] | no_license | Miaoyanwang/nonparametric | db24bbfd196077dffab58514202b5e992768b715 | 24cf0a188b53f448245742b97929682a6168da08 | refs/heads/master | 2023-05-03T10:32:54.998042 | 2021-05-19T04:45:22 | 2021-05-19T04:45:22 | 248,854,912 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,824 | r | completion.R | ## nonparametric estimation of probability matrix, Aug 31, 2020
source("SMMKfunctions_con.R")
library("tensorordinal")
library(gridExtra)
set.seed(1)
m = 11; n = 11;b0 = 0;
## simulate X
X=list();index=0;
for(i in 1:m){
for(j in 1:n){
index=index+1;
X[[index]]=matrix(0,nrow=m,ncol=n)
X[[index]][i,j]=1
}
}
## simulate probability matrix
#B1=matrix(runif(m*r,-1,1),nrow = m)
#B2=matrix(runif(m*r,0,1),nrow = m)
#signal=5*B1%*%t(B2)
#levelplot(signal)
signal=matrix(2,nrow=n,ncol=m)
signal[1:9,1:9]=-2
signal[1:5,1:5]=2
levelplot(signal)
###simulate probability based on logistic model
prob=exp(signal)/(1+exp(signal))
hist(prob)
## true step probability
H=10
prob_step=array(0,dim=c(n,m,H-1))
for(h in 1:(H-1)){
prob_step[,,h]=1*(prob<h/H)
}
## simulate Y (1 replicate)
nrep=1
Y=array(0,dim=c(m,n,nrep))
for(rep in 1:nrep){
Y[,,rep]=matrix(rbinom(m*n,1,prob),nrow=m,ncol=n,byrow=F)*2-1
}
levelplot(apply(Y,c(1,2),mean))
#levelplot(prob)
y_est=array(0,dim=c(n,m,nrep,H-1))
for(h in 1:(H-1)){
for(rep in 1:nrep){
con=SMMK_con(X,c(t(Y[,,rep])),r=1,kernel_row="linear",kernel_col="linear",cost = 1, rep = 2, p =1-h/H)
y_est[,,rep,h]=matrix(con$fitted,nrow=m,ncol=n,byrow=T)
}
}
est=prob_est(sign(y_est)[,,1,])
cum=array(0,dim=c(n,m,(H-1)))
for(h in 1:(H-1)){
for(i in 1:n){
for(j in 1:m){
cum[i,j,h]=sum(sign(y_est)[i,j,1,1:h]==-1)/H
}
}
}
### approach 2: parametric model
data=array(0,dim=c(m,n,2))
data[,,1]=Y
data[,,2]=Y
res=fit_ordinal(data[,,1:2],c(4,4,2),omega=0,alpha=10)
est2=estimation(res$theta,res$omega,type="mode")
p = matrix(theta_to_p(res$theta,res$omega)[,1],nrow=n,ncol=m)
levelplot(1-p,main="parametric estimation", at=seq(0, 1, length.out=100),col.regions = gray(100:0/100))
## plot
p1=levelplot(prob,main="true probability",at=seq(0, 1, length.out=100),col.regions = gray(100:0/100),cex=2)
p2=levelplot((1+Y[,,1])*0.5,main="binary observation",at=seq(0, 1, length.out=100),col.regions = gray(100:0/100))
p3=levelplot(est,main="nonparametric estimation",at=seq(0, 1, length.out=100),col.regions = gray(100:0/100))
p4=levelplot(1-p,main="parametric (logistic-model) estimation", at=seq(0, 1, length.out=100),col.regions = gray(100:0/100))
grid.arrange(p1, p2, p3,p4,ncol=4)
levelplot(sign(prob_step),main="intermediate estimation: step function", at=seq(0, 1, length.out=100),col.regions = gray(100:0/100))
levelplot(cum,main="intermediate estimation: cum function",at=seq(0, 1, length.out=100),col.regions = gray(100:0/100))
#### help function
prob_est=function(est){
n=dim(est)[1];m=dim(est)[2];H=dim(est)[3]+1
prob_est=matrix(0,nrow=n,ncol=m)
for(i in 1:n){
for(j in 1:m){
prob_est[i,j]=which.max(cumsum(est[i,j,]))/H
}
}
return(prob_est)
}
|
ca4b415424499b5f74b9fd230ccbae1e2e2b2fa3 | 82b5268313427938259bb90c5f19574e349df6a5 | /man/df_opt_dtgch_cbem4_rrlop.Rd | a5dcf8e4c8d135783cf4d6ea876ddc7b9024299c | [
"MIT"
] | permissive | FanWangEcon/PrjOptiAlloc | 72e749fda9d452a5aa1a202503bd8371b2501901 | d9dd2aef97f5cb8a12ef89abc585789bf188df49 | refs/heads/master | 2022-02-12T09:15:17.058676 | 2022-01-24T04:51:59 | 2022-01-24T04:51:59 | 231,813,064 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,443 | rd | df_opt_dtgch_cbem4_rrlop.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ffd_opt_dtgch_cbem4_rrlop.R
\docType{data}
\name{df_opt_dtgch_cbem4_rrlop}
\alias{df_opt_dtgch_cbem4_rrlop}
\title{Optimal Linear Allocation based on dtgch cbem4 solved by solin relow}
\format{
csv
}
\source{
\url{https://www.sciencedirect.com/science/article/pii/S1570677X16300107}
}
\usage{
data(df_opt_dtgch_cbem4_rrlop)
}
\description{
Generated by *\url{https://fanwangecon.github.io/PrjOptiAlloc/articles/ffv_opt_solin_relow.html}*, opti allocate and expected outcome.
solin, linear solution. relow, relative to lowest
}
\examples{
data(df_opt_dtgch_cbem4_rrlop)
ar_opti_inpalc <- df_opt_dtgch_cbem4_rrlop[['opti_allocate']]
ar_opti_expout <- df_opt_dtgch_cbem4_rrlop[['opti_exp_outcome']]
ar_opti_inpalc_cv <- (ar_opti_inpalc - mean(ar_opti_inpalc))/sd(ar_opti_inpalc)
ar_opti_expout_cv <- (ar_opti_expout - mean(ar_opti_expout))/sd(ar_opti_expout)
# Print
head(df_opt_dtgch_cbem4_rrlop, 10)
summary(df_opt_dtgch_cbem4_rrlop)
print('opti allocation and outcomes rescaled:')
print(cbind(ar_opti_expout_cv, ar_opti_inpalc_cv))
}
\references{
“Early life height and weight production functions with endogenous energy and protein inputs”, September 2016, 65-81, Economics & Human Biology, 22, Esteban Puentes, Fan Wang, Jere R. Behrman, Flavio Cunha, John Hoddinott, John A. Maluccio, Linda S. Adair, Judith Borja and Reynaldo Martorell
}
\keyword{datasets}
|
09b8ba89f3c32fb8df1b9bd69890259eae139acc | d5e1d90cc9aab6920c6ee38f863e49c3a2bbf91a | /R/get_academic_pipeline.R | 9b2d9e7b85dd08ac4947f84e5fe4502c466d9960 | [
"MIT"
] | permissive | heike/CyChecks | df32c67bc3072bf3b6349dc22b730b18c7c7e476 | cda09e4d5fbd355e0e32dbb3691776ace3284952 | refs/heads/master | 2020-05-17T16:15:24.452392 | 2019-04-30T14:40:43 | 2019-04-30T14:40:43 | 183,812,546 | 0 | 0 | NOASSERTION | 2019-04-27T19:15:06 | 2019-04-27T19:15:06 | null | UTF-8 | R | false | false | 2,328 | r | get_academic_pipeline.R | #' Create a dataframe of salary data for positions in the academic pipeline
#'
#' @description Create a dataframe that contains salaray data for graduate
#' assistants, lecturers, and professors
#' @name get_academic_pipeline
#' @title get_academic_pipeline
#' @usage get_academic_pipeline(dataframe)
#' @importFrom dplyr mutate
#' @importFrom dplyr filter
#' @param dataframe A dataframe of salary data with a variable 'position'
#'
#' @return academic_pipeline A dataframe of graduate assistants', lecturers' and
#' professors' salary data with tidied position and level variables
#' @export
#'
#' @examples
#' get_academic_pipeline(dataframe)
#'
get_academic_pipeline <- function(dataframe=all_sals){
# create levels
df <- all_sals %>%
dplyr::mutate(position = as.character(position)) %>%
dplyr::filter(grepl('GRAD ASST|POSTDOC|LECTURER', position)) %>%
dplyr::mutate(position_simplified = gsub(".*GRAD ASST-TA.*",'TA', position),
position_simplified = gsub(".*GRAD ASST-RA.*",'RA', position_simplified),
position_simplified = gsub(".*GRAD ASST-TA/RA.*",'TA/RA', position_simplified),
position_simplified = gsub(".*GRAD ASST-AA.*",'AA', position_simplified),
position_simplified = gsub(".*GRAD ASST-OTHER.*",'other', position_simplified),
position_simplified = replace(position_simplified,position_simplified=="GRAD ASST",'graduate assistant'),
position_simplified = gsub(".*SENIOR LECTURER.*",'senior', position_simplified),
position_simplified = replace(position_simplified,position_simplified=="LECTURER",'lecturer'),
position_simplified = gsub(".*RES ASSOC.*",'research associate', position_simplified),
position_simplified = gsub(".*FELLOW.*",'fellow', position_simplified),
position_simplified = gsub(".*TRAINEE.*",'trainee', position_simplified))
# replace positions
df <- df %>%
dplyr::mutate(position = gsub(".*GRAD ASST.*",'graduate assistant', position),
position = gsub(".*LECTURER.*",'lecturer', position),
position = gsub(".*POSTDOC.*",'postdoc', position))
# Get prof positions
profs_df <- get_profs(dataframe)
# Combine dataframes
academic_pipeline <- rbind(df,profs_df)
return(academic_pipeline)
}
|
8a18d8c9d6c35ef674c8eb8d6a5c9a1eac7cade2 | 814214910998c704b9b0fb40ba37649f31721154 | /run_analysis.R | 10a0432f7d09362c087c9fa0fa49dac84b1e4f12 | [] | no_license | juandarnay/GettingandCleaningData | b406057bbc1d62ee76ee9dce84d5ae9d2f793aca | 329b3886130a5126d97daca24c1641174b09e210 | refs/heads/master | 2023-01-21T13:24:09.127703 | 2020-12-03T21:57:32 | 2020-12-03T21:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,790 | r | run_analysis.R | # Download and read the test and training sets from the txt files provided
if(!file.exists("UCI HAR Dataset")){
dir.create("UCI HAR Dataset")
}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./UCI HAR Dataset.zip")
unzip("UCI HAR Dataset.zip")
path <- "/UCI HAR Dataset"
subj_train <- read.table(file.path(path, "train", "subject_train.txt" ))
X_train <- read.table(file.path(path, "train", "X_train.txt" ))
Y_train <- read.table(file.path(path, "train", "Y_train.txt" ))
subj_test <- read.table(file.path(path, "test", "subject_test.txt" ))
X_test <- read.table(file.path(path, "test", "X_test.txt" ))
Y_test <- read.table(file.path(path, "test", "Y_test.txt" ))
train<- cbind(subj_train, Y_train, X_train)
test<- cbind(subj_test, Y_test, X_test)
HumanActivity <- rbind(train, test)
features <- read.table(file.path(path, "features.txt"), as.is = TRUE)
activity_labels <- read.table(file.path(path, "activity_labels.txt"), as.is = TRUE)
colnames(HumanActivity) <- c("Subject", "Activity", features[ ,2])
HumanActivity$Activity <- factor(HumanActivity$Activity, activity_labels[,1], activity_labels[,2])
## Extracts only the measurements on the mean and standard deviation for each measurement
## and create a new datatable with the selected variables, including the "Subject ID" and "Activity"
columns_extr <- grepl("Subject|Activity|mean|sdt", colnames(HumanActivity) )
HumanActivity <- HumanActivity[, columns_extr]
names(HumanActivity)
HumanActivity_Cols <- colnames(HumanActivity)
## Remove special characters and typos
HumanActivity_Cols <- gsub("\\-", " ", HumanActivity_Cols)
HumanActivity_Cols <- gsub("BodyBody", "Body", HumanActivity_Cols)
## Explicit some of the indicators in the variables' names
HumanActivity_Cols <- gsub("^f", "frequencyDomain ", HumanActivity_Cols)
HumanActivity_Cols <- gsub("^t", "timeDomain ", HumanActivity_Cols)
HumanActivity_Cols <- gsub("Acc", "Accelerometer", HumanActivity_Cols)
HumanActivity_Cols <- gsub("Gyro", "Gyroscope", HumanActivity_Cols)
HumanActivity_Cols <- gsub("Mag", "Magnitude", HumanActivity_Cols)
HumanActivity_Cols <- gsub("Freq", "Frequency", HumanActivity_Cols)
HumanActivity_Cols <- gsub("mean()", "Mean", HumanActivity_Cols)
HumanActivity_Cols <- gsub("std()", "StandardDeviation", HumanActivity_Cols)
colnames(HumanActivity) <- HumanActivity_Cols
## Create a second, independent tidy set with the average of each variable for each activity and each subject
HumanActivity_Summary <- aggregate(. ~ Subject + Activity, data = HumanActivity, mean)
## Save the tidy set as a text file
write.table(HumanActivity_Summary , "HumanActivity_Tidy_Summary.txt", row.names = FALSE)
|
155455414e175b4b32faf6b1e4ae7b0b2030afd7 | 17e0b4e4c0fddaa71ce2b137b7f59d17fa47243b | /res/lsr_zero2.r | ea0d3c4c956d833b242c3b2d4eceb14f8c85961a | [
"MIT"
] | permissive | JSpuri/EmuParadise | 6f6d26c43d9dce8f05448b6c07db133d691e39b2 | b8f6cf8823f8553f28dab5c6b44df20978ad6ba0 | refs/heads/master | 2020-06-28T18:33:56.341244 | 2019-11-22T22:49:53 | 2019-11-22T22:49:53 | 200,309,043 | 0 | 0 | MIT | 2019-09-27T15:59:31 | 2019-08-02T23:26:20 | C | UTF-8 | R | false | false | 482 | r | lsr_zero2.r | | pc = 0xc002 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc004 | a = 0xfe | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110101 |
| pc = 0xc006 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110111 |
| pc = 0xc008 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110111 | MEM[0x0001] = 0x00 |
| pc = 0xc00a | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x0001] = 0x00 |
|
34c2290b5c20ddf39b7a09487cec02db81937933 | 6e5efc0b6b6b37c735c1c773531c41b51675eb10 | /man/RerenderMetPAGraph.Rd | 09af23d72ed71d2d73e86934e2a20852fbc3bef1 | [
"GPL-2.0-or-later"
] | permissive | xia-lab/MetaboAnalystR | 09aa09c9e57d7da7d73679f5a515eb68c4158e89 | 9edbbd1e2edda3e0796b65adf440ad827abb7beb | refs/heads/master | 2023-08-10T06:08:56.194564 | 2023-08-01T15:13:15 | 2023-08-01T15:13:15 | 109,994,826 | 268 | 165 | MIT | 2023-03-02T16:33:42 | 2017-11-08T15:38:12 | R | UTF-8 | R | false | true | 892 | rd | RerenderMetPAGraph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrich_path_graphics.R
\name{RerenderMetPAGraph}
\alias{RerenderMetPAGraph}
\title{Redraw current graph for zooming or clipping then return a value}
\usage{
RerenderMetPAGraph(mSetObj = NA, imgName, width, height, zoom.factor = NA)
}
\arguments{
\item{mSetObj}{Input name of the created mSet Object}
\item{imgName}{Input the name of the plot}
\item{width}{Input the width, there are 2 default widths, the first, width = NULL, is 10.5.
The second default is width = 0, where the width is 7.2. Otherwise users can input their own width.}
\item{height}{Input the height of the created plot.}
\item{zoom.factor}{zoom factor, numeric}
}
\description{
Redraw current graph for zooming or clipping then return a value
}
\author{
Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
bf08ed20cf3d1b66f532e10902b9bb7d9fae7d60 | cabfade76bc5e7ca91184e5f0a135b1739a0f4cc | /R/server.R | 499fd48511722e1a8200c18942409b992e98ffa0 | [] | no_license | RuiyangXu/PredSoccer | 6a0d0c8d2c766a82b0a8ea8acaaa6ac3023f85f6 | 3588270fa1228c748a8a987ae2f5418e1b5c7c3f | refs/heads/master | 2021-01-19T09:13:33.549560 | 2017-04-09T21:55:53 | 2017-04-09T21:55:53 | 87,741,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,447 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(stringi)
library(ggplot2)
library(plotly)
source('functions.R')
shinyServer(function(input, output, session) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
result <- as.data.frame(t(predict_byMatch(input$team_a, input$team_b, input$location)))
colnames(result) <- 'prob'
result$outcome <- as.character(rownames(result))
result$prob <- as.numeric(100 * result$prob)
result$outcome[which(result$outcome=='-1')] <- paste(input$team_b,' Win', sep = '')
result$outcome[which(result$outcome=='0')] <- 'Draw'
result$outcome[which(result$outcome=='1')] <- paste(input$team_a,' Win', sep = '')
ggplot(result, aes(x = outcome, y = as.numeric(prob), fill = outcome)) +
geom_bar(stat='identity') +
ggtitle(paste('Predicted probability of match outcome between ',input$team_a,' and ', input$team_b, sep = '',' at ',input$location)) +
xlab("Match Outcome") +
ylab("Probability based on previous matches") +
geom_text(aes(label=prob),size=5, position= position_dodge(width=0.9), vjust=-.5, color="black") +
ylim(0,100)
# ggplotly()
})
output$table1 <- renderTable({
result <- getHeadtoHead(input$team_a, input$team_b)
names(result) <- c("outcome","numbers","Recent Games")
result$outcome <- as.character(result$outcome)
result[which(result$outcome=='-1'),1] <- paste(input$team_b,' Win', sep = '')
result[which(result$outcome=='0'),1] <- 'Draw'
result[which(result$outcome=='1'),1] <- paste(input$team_a,' Win', sep = '')
result <- rbind(result, c("Matches", sum(result$numbers)))
result <- as.data.frame(t(result))
recent_result <- as.character(paste(result[3,1], collapse = " "))
rownames(result) <- NULL
colnames(result) <- NULL
result <- result[1:2,]
row <- c("Recent Matches", recent_result, " ", "")
result[,1] <- as.character(result[,1] )
result[,2] <- as.character(result[,2] )
result[,3] <- as.character(result[,3] )
result[,4] <- as.character(result[,4] )
result <- rbind(result ,row)
result
})
output$homeForm <- renderPrint(paste(getRecentForm(input$team_a), collapse = " "))
output$awayForm <- renderPrint(paste(getRecentForm(input$team_b), collapse = " "))
})
|
0fc8e7cf568dd5f2d5320db0af26f634f5f504ec | 61ae80dc84faad496a920817f56b2f2c94cd7d59 | /removals/ipm/IPM-figures.r | 6196a4d9cf9f3d0bdb8cd2d835129ce747185f3f | [] | no_license | pbadler/ExperimentTests | 1f78c6be8c193345bd63e11c7ba65a423bf77ad4 | b2bfeb446a54446210e3f04a010d689dc577f257 | refs/heads/master | 2021-04-09T17:39:42.583837 | 2021-03-16T22:12:05 | 2021-03-16T22:12:05 | 53,069,884 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,894 | r | IPM-figures.r |
# call from removal_analysis_wrapper.r
# box plots
if(!exists("baseline")) baseline <- read.csv("ipm/baselineCover.csv")
if(!exists("baseline.noARTR")) baseline.noARTR <- read.csv("ipm/baselineCover-noARTR.csv")
if(!exists("removal.noARTR")) removal.noARTR <- read.csv("ipm/removalCover-noARTR.csv")
if(!exists("removal.noARTR.maxCI")) removal.noARTR.maxCI <- read.csv("ipm/removalCover-noARTR-maxCI.csv")
myCols<-c("darkgrey","blue3","red3")
sppNames <-c("A. tripartita","H. comata","P. secunda","P. spicata")
png("ipm/boxplots.png",height=3.5, width=8, units="in",res=400)
par(tcl=-0.2,mgp=c(2,0.5,0),mar=c(3,4,1,1),cex.lab=1.2)
plot(c(1:17),rep(NA,17),ylim=c(0,10),ylab="Cover (%)",xlab="",xaxt="n")
for(doSpp in 1:4){
tmp <- cbind(baseline[,doSpp],baseline.noARTR[,doSpp],removal.noARTR[,doSpp])
boxplot(100*tmp,col=myCols,add=T, at=((2:4)+(doSpp-1)*4),names=rep("",3),xaxt="n")
}
axis(side=1,at=c(3,7,11,15),labels=sppNames,font=4)
legend("topright",c("Baseline model","Baseline model, no ARTR","Removal model, no ARTR"),
fill=myCols,bty="n",cex=0.9)
dev.off()
# same figure as previous, but add max treatment effect simulation
png("ipm/boxplots-maxCI.png",height=3.5, width=8, units="in",res=400)
par(tcl=-0.2,mgp=c(2,0.5,0),mar=c(3,4,1,1),cex.lab=1.2)
plot(c(1:17),rep(NA,17),ylim=c(0,10),ylab="Cover (%)",xlab="",xaxt="n")
for(doSpp in 1:4){
tmp <- cbind(baseline[,doSpp],baseline.noARTR[,doSpp],removal.noARTR[,doSpp],removal.noARTR.maxCI[,doSpp])
boxplot(100*tmp,col=c(myCols,"orange"),add=T, at=(seq(1:4)+(doSpp-1)*4),names=rep("",4),xaxt="n")
}
axis(side=1,at=c(3,7,11,15),labels=sppNames,font=4)
abline(v=4.5);abline(v=8.5);abline(v=12.5)
legend("topright",c("Baseline model","Baseline model, no ARTR","Removal model, no ARTR","Removal model, no ARTR, max CI"),
fill=c(myCols,"orange"),bg="white",cex=0.9)
dev.off()
|
29a21ed6806ef2505911cb5ffb326cd8c0954356 | d837a6b0beaede292aa430adbeb38d7ccc1a3363 | /man/manageArchive.Rd | 99d863715fd3b707e4b4c0ff75098938e0058d99 | [] | no_license | jameslhao/dbgapr_dev | 0503d740e4ffb394426638be31da609fb8f7f950 | 4d67dff0b4bd8dbda0bc74c6ca26362e93619f55 | refs/heads/master | 2020-12-30T13:09:21.487245 | 2017-08-15T12:20:00 | 2017-08-15T12:20:00 | 91,335,051 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,586 | rd | manageArchive.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commons_internal.R
\docType{methods}
\name{manageArchive}
\alias{manageArchive}
\alias{manageArchive,Commons,character-method}
\title{(internal) Logging archive manager}
\usage{
manageArchive(object, workFile, archiveDir, type, ...)
\S4method{manageArchive}{Commons,character}(object, workFile, archiveDir,
type = "infojson", ..., maxSizeInMb = 1, keptDay = 90, minFileAge = 1)
}
\arguments{
\item{object}{Commons class object.}
\item{workFile}{a character string. The path to the log file, such as project_setup.log.}
\item{archiveDir}{a character string. The path to the archive directory.}
\item{type}{a character string. The logging type of either 'setup' for project_setup.log or 'process' for data_process.log.}
\item{...}{There are optional arguments.}
\item{maxSizeInMb}{an integer. (optional) The maximum size (MB) of a log file. The log file is moved to the archived_log directory after reaching the maxSizeInDb (default 1MB).}
\item{keptDay}{an integer. (optional) The maximum days of an archived log file is kept. The archived log file is deleted after the durationDay (default 90 days).}
\item{minFileAge}{an integer. (optional) The minimum age in days of a file can be archived.}
}
\description{
The method moves expired log files into archive and deletes old archived files based on given conditions.
}
\section{Methods (by class)}{
\itemize{
\item \code{object = Commons,workFile = character}: A method of class Commons
}}
\keyword{internal}
|
31f9e9b62c4e1fe56a1f908ae29880ef16eaafcd | 5bdfc99dda831c3b5219a2b9988e0f03a5bf0e4e | /man/SAXLimitsDataAdaptive.Rd | 09235b27f6d0f56466f224678448e0a44f9c6d91 | [
"MIT"
] | permissive | vishalbelsare/FastTSDistances | 1d20fc4c0be39652245da8fcd71e52ce86dbb111 | d7ce4c1dde1c8681558e41c349e856f4e3820888 | refs/heads/master | 2021-09-25T20:45:11.052275 | 2018-10-25T07:25:29 | 2018-10-25T07:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,218 | rd | SAXLimitsDataAdaptive.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AggregationFunctions.R
\name{SAXLimitsDataAdaptive}
\alias{SAXLimitsDataAdaptive}
\title{Symbolic Aggregate Aproximation Limits (Flexible)}
\usage{
SAXLimitsDataAdaptive(tsList, alphabetSize = 8)
}
\arguments{
\item{tsList}{A list of numeric vectors/matrixes (uni- or multi-variate time
series).}
\item{alphabetSize}{Number of symbols (intervals).}
}
\value{
The interval boundaries as monotonically increasing vector from negative
infinity to positive infinity.
}
\description{
Calculates the interval boundaries for a SAX representation based on the real
distribution of values in a time series list. Uses evenly-spaced quantiles on
the real data instead of the normal distribution proposed by the original authors.
}
\section{References}{
Lin, J., Keogh, E., Lonardi, S. & Chiu, B. (2003). A symbolic representation
of time series, with implications for streaming algorithms. In \emph{Proceedings
of the 8th acm sigmod workshop on research issues in data mining and knowledge
discovery} (pp. 2-11). ACM.
}
\seealso{
Other SAX functions: \code{\link{SAXLimitsOriginal}},
\code{\link{SAX_fast}}
}
|
8128a7a819ff7f60f067bf958b03245e3bd65207 | c606feaab4dd54e5084f5d11edbed968ff5becbd | /R/bindCNRX.R | 5381165b0b9b02adf07002c953afa2a1f76d1241 | [
"Artistic-2.0"
] | permissive | lwaldron/ccleWrap | 31d0a2c4911152982b9cb658c25931862d3a1c10 | d1eb6b8a3ac80c7c51ed0619cd4a4b0fefb40ca0 | refs/heads/master | 2021-01-19T20:18:48.603475 | 2015-06-17T21:53:39 | 2015-06-17T21:53:39 | 32,178,773 | 1 | 1 | null | 2015-06-17T21:53:40 | 2015-03-13T20:17:08 | R | UTF-8 | R | false | false | 413 | r | bindCNRX.R |
bindCNRX = function( segrng, rxobj ) {
ic50s = IC50(rxobj)
orgs = organ(rxobj)
comps = compound(rxobj)
rn = repairNames(rxobj)
names(ic50s) = names(orgs) = names(comps) = rn
linesInSeg = segrng$CCLE_name
names(segrng) = linesInSeg
okn = intersect(names(ic50s), names(segrng))
segrng = segrng[okn]
segrng$IC50 = ic50s[okn]
segrng$organ = orgs[okn]
segrng$compound = comps[okn]
segrng
}
|
fbd3eec6d646ee5ffba5e1fbb37734659b203893 | f0210d6c7a3bcc5f6fa4e40aad4fae560a278f10 | /R_Data_Analysis/Tanzania_Pump_Data_Analysis.R | 6f9a7aef27ac4e20fba8f237b372ebdf4cfe4107 | [] | no_license | Kuntal-sys/Tanzania_Water_Pump | 283625f22bb9ea51d312b759fcb39dbfd48b47fa | 37f9b9b708af1e4fe6bd1e99df6981d691ea45d4 | refs/heads/master | 2022-11-24T11:35:34.278111 | 2020-06-29T04:36:30 | 2020-06-29T04:36:30 | 274,038,668 | 0 | 0 | null | 2020-06-22T04:21:49 | 2020-06-22T04:21:49 | null | UTF-8 | R | false | false | 16,705 | r | Tanzania_Pump_Data_Analysis.R | library(tidyverse)
# import datasets
pump_data <- read.csv(file='clean_pump.csv',check.names=T,stringsAsFactors = F)
# create Region summary table
summarize_region <- pump_data %>% group_by(region) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Basin summary table
summarize_basin <- pump_data %>% group_by(basin) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Scheme Management summary table
summarize_scheme_management <- pump_data %>% group_by(scheme_management) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Permit summary table
summarize_permit <- pump_data %>% group_by(permit) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Extraction Type summary table
summarize_extraction_type <- pump_data %>% group_by(extraction_type) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Management summary table
summarize_management <- pump_data %>% group_by(management) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Payment summary table
summarize_payment <- pump_data %>% group_by(payment) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Water Quality summary table
summarize_water_quality <- pump_data %>% group_by(water_quality) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Quantity summary table
summarize_quantity <- pump_data %>% group_by(quantity) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Source summary table
summarize_source <- pump_data %>% group_by(source) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Source Class summary table
summarize_source_class <- pump_data %>% group_by(source_class) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create Waterpoint Type summary table
summarize_waterpoint_type <- pump_data %>% group_by(waterpoint_type) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create quantity, region, source, waterpoint summary table
summarize_quantity_with_additional <- pump_data %>% group_by(quantity, region, source, waterpoint_type) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create quantity, region summary table
summarize_quantity_region <- pump_data %>% group_by(quantity, region) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create quantity, source summary table
summarize_quantity_source <- pump_data %>% group_by(quantity, source) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create quantity, waterpoint summary table
summarize_quantity_waterpoint <- pump_data %>% group_by(quantity, waterpoint_type) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# create quantity, extraction_type summary table
summarize_quantity_extraction_type <- pump_data %>% group_by(quantity, extraction_type) %>% summarize(
Functional=sum(status_group_functional),
Functional_Needs_Repair=sum(status_group_functional.needs.repair),
NonFunctional=sum(status_group_non.functional),
Functional_Percentage=sum(status_group_functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
Needs_Repair_Percentage=sum(status_group_functional.needs.repair)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)),
NonFunctional_Percentage=sum(status_group_non.functional)/(sum(status_group_functional)+sum(status_group_functional.needs.repair)+sum(status_group_non.functional)))
# Region chi-squared test
tbl <- table(pump_data$region,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between regions: p-value < 2.2e-16
# Basin chi-squared test
tbl <- table(pump_data$basin,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between basins: p-value < 2.2e-16
# Scheme Management chi-squared test
tbl <- table(pump_data$scheme_management,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between scheme management: p-value < 2.2e-16
# Permit chi-squared test
tbl <- table(pump_data$permit,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between permit: p-value = 1.542e-15
# Extration Type chi-squared test
tbl <- table(pump_data$extraction_type,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between extraction type: p-value = 2.2e-16
# Management chi-squared test
tbl <- table(pump_data$management,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between scheme management: p-value = 2.2e-16
# Payment chi-squared test
tbl <- table(pump_data$payment,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between payment: p-value = 2.2e-16
# Water Quality chi-squared test
tbl <- table(pump_data$water_quality,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between water quality: p-value = 2.2e-16
# Quantity chi-squared test
tbl <- table(pump_data$quantity,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between quantity p-value = 2.2e-16
# Source chi-squared test
tbl <- table(pump_data$source,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between source: p-value = 2.2e-16
# Source Class chi-squared test
tbl <- table(pump_data$source_class,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between scheme management: p-value = 2.2e-16
# Waterpoint Type chi-squared test
tbl <- table(pump_data$waterpoint_type,pump_data$status_group) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# significant difference in pump functional distribution between waterpoint: p-value = 2.2e-16
|
c0e5a9f6e48824a90885e66be69cc56ae56d0a03 | 7d9955652fd699a515b9b4e178daa9ba5b903cdd | /Auswertung/EvaluationThroughput.R | d2777ed2623d874cf83e8779b3090687023d5ef5 | [] | no_license | andreasPfaffelhuber/Faster-than-in-Real-Time | b0bb07e74e3958121d069d23eed01f6d736b211f | b9f17d75c15916454fee9713bc146abbbbcfc975 | refs/heads/master | 2021-03-02T15:13:05.971013 | 2020-03-19T22:57:38 | 2020-03-19T22:57:38 | 245,878,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,109 | r | EvaluationThroughput.R | # Load libraries
library(tidyverse)
library(ggpubr)
library(rstatix)
library(ggpmisc)
options("scipen"=100, "digits"=5)
# Prepare Data Frame
summary <- data.frame(PredictionModel=integer(),TotalTime=integer(), Proband=integer(),stringsAsFactors=FALSE)
# Read original CSV Data
filenames <- c()
for(i in 1:24) {
filenames <- c(filenames, paste("Daten/FittsLaw_Unity/Proband_",i, "_Unity_FittsLawTask.csv", sep=""))
}
# Prepare Data for Evaluation, Compute signifikant Values (ID, IDe, SDx, Ae, Throughput)
for(i in 1:length(filenames)) {
print(filenames[i])
df <- read.table(filenames[i],header = TRUE,sep = ";")
# Remove unimportant columns
df$CircleCounter <- NULL
df$StartPointX <- NULL
df$StartPointY <- NULL
df$StartPointZ <- NULL
df$EndPointX <- NULL
df$EndPointY <- NULL
df$EndPointZ <- NULL
df$SelectPointX <- NULL
df$SelectPointY <- NULL
df$SelectpointZ <- NULL
df$NormalizedSelectionPointX <- NULL
df$NormalizedSelectionPointY <- NULL
df$NormalizedSelectionPointZ <- NULL
df$DX <- NULL
df$AE <- NULL
library(dplyr)
# Filter on only the important entry after each trial was ended
df <- filter(df, Notice == "endedTrial")
# Read rows in correct type
df[, 1] <- as.numeric( df[, 1] )
df[, 9] <- gsub(",", ".",df[, 9])
df[, 10] <- gsub(",", ".",df[, 10])
df[, 9] <- as.numeric(( df[, 9] ))
df[, 10] <- as.numeric(( df[, 10] ))
df[, 8] <- as.numeric(as.character( df[, 8] ))
df[, 7] <- as.numeric(as.character( df[, 7] ))
df[, 2] <- as.numeric(as.character( df[, 2] ))
df[, 13] <- as.character( df[, 13] )
df[, 13] <- gsub(",", ".",df[, 13])
df[, 14] <- as.character( df[, 14] )
df[, 14] <- gsub(",", ".",df[, 14])
# Compute signifikant values for evalution
for(i in 1:nrow(df)) {
dx <- ((strsplit(df[i,13], "_")))
df[i,15] <- sd(as.numeric(unlist(dx)))
ae <- ((strsplit(df[i,14], "_")))
df[i,16] <- mean(as.numeric(unlist(ae)))
id_shannon <- log2(df[i, 9]/(df[i, 10])+1)
id_shannon_e <- log2((df[i, 16]/(df[i, 15] * 4.133))+1)
mean_movement_time <- (df[i, 11]/16.0)/1000
throughput <- id_shannon_e/mean_movement_time
df[i,17] <- id_shannon
df[i,18] <- id_shannon_e
df[i,19] <- mean_movement_time
df[i,20] <- throughput
}
# Trial Time Computation
#totalTrialTime["Proband"] <- c(df[1,2], df[1,2],df[1,2],df[1,2],df[1,2],df[1,2])
#colnames(totalTrialTime) <- c("PredictionModel", "TotalTime","Proband")
# Append values
colnames(df)[15] <- "SDx"
colnames(df)[16] <- "Mean AE"
colnames(df)[17] <- "ID_Shannon"
colnames(df)[18] <- "IDE"
colnames(df)[19] <- "MeanMovementTime"
colnames(df)[20] <- "Throughput"
df[, 15] <- as.numeric( df[ ,15] )
df[, 16] <- as.numeric( df[ ,16] )
df[, 17] <- as.numeric( df[ ,17] )
df[, 18] <- as.numeric( df[ ,18] )
df[, 19] <- as.numeric( df[, 19] )
df[, 20] <- as.numeric( df[, 20] )
summary <- rbind(summary, df)
}
# Create Dataframe for Throughput Evaluation
total_throughputs <- data.frame(matrix(ncol = 3, nrow = 0))
# Fill Dataframe for Throughput Evaluation with average Throughput for each Participant per Conditon
number_of_participants <- 24
prediction_models <- c(-12, 0, 12, 24, 36, 48)
for (participant in 1:number_of_participants) {
for (predicton_model in prediction_models) {
condition_subset <- summary[summary$PredictionModel == predicton_model, ]
participant_and_condition_subset <- condition_subset[condition_subset$ProbandenID == participant, ]
mean_throughput_for_proband_and_condition <- mean(participant_and_condition_subset$Throughput)
total_throughputs <- rbind(total_throughputs, c(participant, predicton_model,mean_throughput_for_proband_and_condition ))
}
}
total_throughputs_names <- c("ProbandenID", "PredictionModel", "Throughput")
colnames(total_throughputs) <- total_throughputs_names
# Change grouping columns into factors for Anova with repeated measures
total_throughputs$ProbandenID <- factor(total_throughputs$ProbandenID)
total_throughputs$PredictionModel <- factor(total_throughputs$PredictionModel)
# Get Simple Summarizing Statistics
total_throughputs %>%
#group_by(PredictionModel) %>%
get_summary_stats(Throughput, type = "mean_sd")
# Get Simple Boxplot
#bxp <- ggboxplot(total_throughputs, x = "PredictionModel", y = "Throughput", add = "point")
#bxp
total_throughputs$PredictionModel <- factor(total_throughputs$PredictionModel, levels = c("-12", "0", "12", "24", "36", "48"),
labels = c("-48 ms", "Base", "+48 ms", "+96 ms", "+144 ms", "+192 ms"))
ggplot(total_throughputs,aes(x=PredictionModel, y=Throughput, fill=PredictionModel)) +
geom_boxplot(outlier.shape=16,outlier.size=2, position=position_dodge2(width=0.9, preserve="single"), width=0.9) +
ylab(label = "Throughput [bit/s]") +
xlab(label ="") +
scale_x_discrete(position = "bottom", labels = NULL)+
stat_summary(fun.y=mean, geom="point", shape=4, size=5, color="black") +
#ggtitle("Throughput")
theme_light() +
#theme(legend.position = "none") +
guides(fill=guide_legend(title="Prediction Time Offset")) +
theme(legend.position="bottom", text = element_text(size=20)) +
ggsave("boxplotchart_Throughput.pdf", width=10, height=6, device=cairo_pdf)
# Check Assumptions for repeated measures Anova
total_throughputs %>%
group_by(PredictionModel) %>%
identify_outliers(Throughput)
# No extreme outliers => Outlier Assumption is met
# Check Normality Assumption
total_throughputs %>%
group_by(PredictionModel) %>%
shapiro_test(Throughput)
# No condition with p < 0.05 => Normality Assumption is met
ggqqplot(total_throughputs, "Throughput", facet.by = "PredictionModel")
# This would be the repeated measures anova code, but is not used here since the Prerequisits are not met
# (Assumption of Normality is not given for total throughput)
#res.aov <- anova_test(data = total_throughputs, dv = Throughput, wid = ProbandenID, within = PredictionModel)
#get_anova_table(res.aov)
# Would compute group comparisons using pairwise t tests with Bonferroni multiple testing correction method if Anova is significant
#pwc <- total_throughputs %>% pairwise_t_test(Throughput ~ PredictionModel, paired = TRUE, p.adjust.method = "bonferroni")
#pwc
# Since the prerequisits for a repeated measures anova are not met, we use a non-parametric alternative, the Friedmann-Test
res.fried <- total_throughputs %>% friedman_test(Throughput ~ PredictionModel |ProbandenID)
res.fried
# p > 0.05 => There are significant differences between the groups
# Compute effect size
res.fried.effect <- total_throughputs %>% friedman_effsize(Throughput ~ PredictionModel |ProbandenID)
res.fried.effect
# Compute group comparisons using pairwise Wilcoxon signed-rank tests with Bonferroni multiple testing correction method
pwc <- total_throughputs %>% wilcox_test(Throughput ~ PredictionModel, paired = TRUE, p.adjust.method = "bonferroni")
pwc
# Visualization: box plots with p-values
pwc <- pwc %>% add_xy_position(x = "PredictionModel")
ggboxplot(total_throughputs, x = "PredictionModel", y = "Throughput", add = "point") +
stat_pvalue_manual(pwc, hide.ns = TRUE) +
labs(
subtitle = get_test_label(res.fried, detailed = TRUE),
caption = get_pwc_label(pwc)
)
# Visualize Fitts Slope for Throughput
total_throughputs_per_condition_and_id <- data.frame(matrix(ncol = 3, nrow = 0))
fitts_width <- c(1.5,2.5,3.0)
fitts_target_width <- c(0.15,0.30,0.70)
prediction_models <- c(-12, 0, 12, 24, 36, 48)
for (width in fitts_width) {
for (target_width in fitts_target_width) {
for (predicton_model in prediction_models) {
subset_id_and_cond <- filter(summary, BigCircleRadius == width, SmallCircleRadius == target_width, PredictionModel == predicton_model)
id_s <- subset_id_and_cond[1,17]
mean_throughput_for_id_and_cond <- mean(subset_id_and_cond$Throughput)
total_throughputs_per_condition_and_id <- rbind(total_throughputs_per_condition_and_id, c(id_s, predicton_model,mean_throughput_for_id_and_cond ))
}
}
}
total_throughputs_per_condition_and_id_names <- c("ID_Shannon", "PredictionModel", "Throughput")
colnames(total_throughputs_per_condition_and_id) <- total_throughputs_per_condition_and_id_names
total_throughputs_per_condition_and_id$PredictionModel <- factor(total_throughputs_per_condition_and_id$PredictionModel)
my.formula <- y ~ x
ggplot(total_throughputs_per_condition_and_id, aes(x=ID_Shannon, y = Throughput, color=PredictionModel)) +
coord_fixed(ratio = 1) +
geom_point() +
geom_smooth(method="lm",se=FALSE, formula = my.formula) +
ggtitle("Fitts' Law Model: Movement time over ID") +
stat_poly_eq(formula = my.formula,
eq.with.lhs = "italic(hat(y))~`=`~",
aes(label = paste(..eq.label.., ..rr.label.., sep = "~~~")),
vstep = 0.03,
show.legend = TRUE,
size = 2.4,
parse = TRUE) +
ylab(label = "Throughput [bit/s]") +
scale_x_continuous("ID [bit]", position = "bottom")+
theme_light() +
ggsave("lin_Throughput_ID.pdf", width=8, height=6, device=cairo_pdf)
## regression
descStats <- function(x) c(mean = mean(x),
sd = sd(x), se = sd(x)/sqrt(length(x)),
ci = qt(0.95,df=length(x)-1)*sd(x)/sqrt(length(x)))
total_throughputs_2 <- total_throughputs
total_throughputs_2$PredictionModel <- factor(total_throughputs_2$PredictionModel, levels = c("-48 ms", "Base", "+48 ms", "+96 ms", "+144 ms", "+192 ms"),
labels =c("-12", "0", "12", "24", "36", "48"))
total_throughputs_2$PredictionModel <- as.numeric(levels(total_throughputs_2$PredictionModel))[total_throughputs_2$PredictionModel]
total_throughputs_2_means <- aggregate(total_throughputs_2$Throughput, by=list(total_throughputs_2$PredictionModel, total_throughputs_2$ProbandenID), FUN=mean)
total_throughputs_2_means <- do.call(data.frame, total_throughputs_2_means)
ggplot() +
geom_boxplot(data=total_throughputs,aes(x=PredictionModel, y=Throughput, fill=PredictionModel), outlier.shape=16,outlier.size=2, position=position_dodge2(width=0.9, preserve="single"), width=0.9) +
geom_smooth(data = total_throughputs_2_means,aes(x=PredictionModel, y=Throughput), method="lm", se=TRUE, fill=NA,
formula=y ~ poly(x, 2, raw=TRUE),colour="red") +
ylab(label = "Throughput [bit/s]") +
xlab(label ="") +
stat_summary(fun.y=mean, geom="point", shape=4, size=5, color="black") +
#ggtitle("Throughput")
theme_light() +
#theme(legend.position = "none") +
guides(fill=guide_legend(title="Prediction Time Offset")) +
theme(legend.position="bottom", text = element_text(size=20)) +
ggsave("rrrr", width=10, height=6, device=cairo_pdf)
|
ae9860634d2d7718e1a65b4ef00d12b56f7c072f | 6bea1cce82a7c0ef4eafb33b75360c0830d7bdff | /tests/testthat/test-utils.R | c8c6b338aa224a795ae165de298ae25027d979f2 | [] | no_license | krlmlr/quantities | cb86a48da7dc11f16ba03d8d1d42e86d1e2c2ea2 | 5cd26e488835e672d8cd529e7f7c40f4e67023c9 | refs/heads/master | 2023-03-07T13:19:54.686971 | 2021-02-21T16:20:01 | 2021-02-21T16:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 950 | r | test-utils.R | context("utils")
test_that("objects with errors & units attributes are reclassed", {
x <- 1
expect_is(reclass(x), "numeric")
attr(x, "errors") <- 1
attr(x, "units") <- NULL
expect_is(reclass(x), "numeric")
attr(x, "errors") <- NULL
attr(x, "units") <- 1
expect_is(reclass(x), "numeric")
attr(x, "errors") <- 1
attr(x, "units") <- 1
expect_is(reclass(x), "quantities")
})
test_that("offset units (vs. scale units) are detected", {
expect_equal(get_scaling("K", "celsius"), 1)
expect_equal(get_scaling("K", "fahrenheit"), 9/5)
expect_equal(get_scaling("K", "mK"), 1000)
})
test_that("dots are converted to the units of the first argument", {
xval <- 1
xerr <- 0.1
x <- set_quantities(xval, m/s, xerr)
y <- set_units(x, km/h)
z <- set_quantities(xval, m, xerr)
expect_quantities(cbind(x, y, x, y), rep(xval, 4), units(as_units("m/s")), rep(xerr, 4))
expect_error(cbind(x, 2))
expect_error(cbind(x, z))
})
|
86b7a771cbd4d4b2172571a3cc3e1e6415baf908 | 820a329025ccb3a872337607e243a8a9c94c9da3 | /H_update.R | 8fe86cb357fc35d4ea1bea37241f1fc86f9045e2 | [] | no_license | shadowmoon1988/ChangePoint | f63ecc33d5c4f094f838ea1904d65a5c0e17edc4 | c8c5b92a6f33fce9fb51d142f59f6152f0dcd147 | refs/heads/master | 2021-01-12T16:24:42.685465 | 2017-12-17T22:24:50 | 2017-12-17T22:24:50 | 71,990,788 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 16,659 | r | H_update.R | require(lars)
require(MASS)
require(cpm)
require(bcp)
require(ecp)
## Function
Summary =function(S,Y,X,Z){
if (is.null(S)){
coef=lm(Y~X-1)$coefficients
names(coef)=NULL
Estimation=list(Break_Point=NULL,Beta=coef[1:(p+1)],diff=NULL,raw_beta=coef[1:(p+1)])
} else {
p=dim(X)[2]-1
n=length(Y)
coef=lm(Y~cbind(X,Z[,S])-1)$coefficients
names(coef)=NULL
ls0=p+1
est_beta=coef[1:ls0]
#### shrink some coefficient
idx_shrinkage = abs(est_beta)<0.001
est_beta[idx_shrinkage]=0
####
est_kesi=rep(0,(n-1)*(p+1))
est_kesi[S]=coef[-(1:ls0)]
est_kesi =matrix(est_kesi,p+1,n-1)
est_idx=rep(FALSE,(n-1)*(p+1))
est_idx[S]=TRUE
est_idx =matrix(est_idx,p+1,n-1)
est_bp=which(apply(est_idx,2,any))
temp_beta=est_beta
for ( i in 1:length(est_bp)){
temp_beta = temp_beta + est_kesi[,est_bp[i]]
est_beta = cbind(est_beta,temp_beta)
}
colnames(est_beta)=NULL
Estimation=list(Break_Point=est_bp,Beta=est_beta,diff=t(diff(t(est_beta))),raw_beta=cbind(coef[1:(p+1)],est_kesi))
}
return(Estimation)
}
dist_err=function(BP1,BP2){
max(sapply(BP1, function(x) min(abs(x-BP2))))
}
PDR_FDR=function(M1,M2){
nrow=dim(M1)[1]
ncol=dim(M1)[2]
FP=0
FN=0
r1=0
r2=0
for ( i in 1:nrow){
for ( j in 1:ncol){
if (M2[i,j] == 0){
if (M1[i,j]!=0) {
FP=FP+1
r1=r1+1
}
}
else {
r2=r2+1
if (M1[i,j]==0) {
FN=FN+1
} else {
r1=r1+1
}
}
}
}
output=c(FP/r1,1-FN/r2)
names(output)=c("FDR","PDR")
return(output)
}
PDR_bias=function(M1,M2){
nrow=dim(M1)[1]
ncol=dim(M1)[2]
FP=0
FN=0
r1=0
r2=0
for ( i in 1:nrow){
for ( j in 1:ncol){
if (M2[i,j] != 0){
r2=r2+1
if (M1[i,j]==0 & M1[i,max(j-1,1)]==0 & M1[i,min(j+1,ncol)]==0){
FN=FN+1
}
}
}
}
for ( i in 1:nrow){
for ( j in 1:ncol){
if (M1[i,j] != 0){
r1=r1+1
if (M2[i,j]==0 & M2[i,max(j-1,1)]==0 & M2[i,min(j+1,ncol)]==0){
FP=FP+1
}
}
}
}
output=c(FP/r1,1-FN/r2)
names(output)=c("FDR","PDR")
return(output)
}
makeTable = function(sim_list){
mean_table=t(sapply(sim_list,colMeans))
row.names(mean_table)=names(sim_list)
print(round(mean_table,4))
sd_table=t(sapply(sim_list,function(x) apply(x,2,sd)))
row.names(sd_table)=names(sim_list)
print(round(sd_table,4))
}
## SLasso Detection
SIFS = function(X,Y,n,p,normalize=TRUE){
EBIC= function(S,projH){
full=(n-1)*(p+1)
gamma = (1-log(n)/(2*log(full)))
return( n*log( sum((projH%*%Y)^2) /n)+length(S)*log(n)+2*gamma*log(choose(full,length(S))) )
}
projection_H = function(HH,update_idx){
newHH=HH
for (ii in update_idx){
z = as.matrix(Z[,ii],n,1)
newHH = newHH %*% (diag(1,n)- (z %*% t(z) %*% newHH) / as.numeric (t(z)%*% newHH %*% z) )
}
return(newHH)
}
Z = matrix(0,n,(n-1)*(p+1))
for (t in 1:(n-1)){
Z[(t+1):n,(t-1)*(p+1)+1:(p+1)] = X[(t+1):n,]
}
## Standarized##
if(normalize){
X[,-1]=scale(X[,-1])
Z=scale(Z)
}
################
S1 = NULL
SA=1:(n-1)*(p+1)
ebic=Inf
H = diag(1,n)- X%*%solve(t(X)%*%X)%*%t(X)
repeat{
ybar = H%*%Y
R=0
for ( i in SA){
tempr = abs(t(Z[,i])%*%ybar)
if(tempr > R){
R=tempr
temp_s=i
}
}
new_S=c(S1,temp_s)
new_S=new_S[order(new_S)]
new_H= projection_H(H, temp_s)
rm(tempr)
new_ebic=EBIC(new_S,new_H)
if(new_ebic>ebic | length(S1)==(n-1)*(p+1)){
break
} else {
ebic=new_ebic
S1=new_S
H=new_H
SA=SA[SA!=temp_s]
}
}
rm(i,R)
rm(temp_s,new_S,new_ebic,ebic,SA,ybar)
estimation=Summary(S1,Y,X,Z)
}
SBFS = function(X,Y,n,p){
EBIC= function(S,projH,v){
projection = sum((projH%*%Y)^2)
gamma = (1-log(n)/(2*log((n-1)*(p+1))))
if (length(v) == 0){
return( n*log(projection/n)+(length(S))*log(n) )
} else {
return( n*log(projection/n)+(length(S)+2*gamma*length(v))*log(n)+2*gamma*sum(sapply( v,function(x) log(choose(p+1,x)) )) )
}
}
projection_H = function(HH,update_idx){
newHH=HH
for (ii in update_idx){
z = as.matrix(Z[,ii],n,1)
newHH = newHH %*% (diag(1,n)- (z %*% t(z) %*% newHH) / as.numeric (t(z)%*% newHH %*% z) )
}
return(newHH)
}
Z = matrix(0,n,(n-1)*(p+1))
for (t in 1:(n-1)){
Z[(t+1):n,(t-1)*(p+1)+1:(p+1)] = X[(t+1):n,]
}
## Standarized##
#X[,-1]=scale(X[,-1])
Z=scale(Z)
################
S = NULL
TA=1:(n-p-1)
v = NULL
ebic=Inf
H = diag(1,n)- X%*%solve(t(X)%*%X)%*%t(X)
repeat{
ybar = H%*%Y
R2=0
for ( i in TA){
Zt = Z[,((i-1)*(p+1)+1):(i*(p+1))]
tempR2 = t(ybar)%*%Zt%*%solve(t(Zt)%*%Zt)%*%t(Zt)%*%ybar
if(tempR2 > R2){
R2=tempR2
temp_t=i
}
}
ST = ((temp_t-1)*(p+1)+1):(temp_t*(p+1))
rm(tempR2)
new_S=c(S,ST)
new_S=new_S[order(new_S)]
new_H=projection_H(H,ST)
v = c(v,p+1)
inter_ebic=EBIC(new_S,new_H,v)
repeat{## deduction current block
tebic=Inf
tempv=v
tempv[length(tempv)]=tempv[length(tempv)]-1
for(i in ST){
temp_new_S=new_S[new_S!=i]
temp_ebic = EBIC(temp_new_S,projection_H(H,ST[ST!=i]),tempv)
if(temp_ebic<tebic){
tebic=temp_ebic
temp_rms=i
}
}
if(tebic<inter_ebic){
ST = ST[ST!=temp_rms]
new_S = new_S[new_S!=temp_rms]
new_H = projection_H(H,ST)
v=tempv
inter_ebic=EBIC(new_S,new_H,v)
} else {
break
}
}
new_ebic=inter_ebic
if(new_ebic < ebic){
ebic=new_ebic
S=new_S
TA=TA[TA!=temp_t]
H=new_H
} else {
break
}
}
rm(i,v,R2)
rm(temp_rms,temp_t,inter_ebic,temp_ebic,new_ebic,ebic,tebic,temp_new_S,new_S,ST,TA,ybar)
estimation=Summary(S,Y,X,Z)
}
## LS_TV
LS_TV= function(Y,n,K_max,K_chs){
Z = matrix(0,n,n)
for (t in 1:n){
Z[t:n,t] = 1
}
if (K_chs>0){
### LAR
lar = lars(Z,Y,type='lar',intercept=FALSE,max.steps=K_max+1)
cf_max = coef(lar)[K_max+2,]
cp_max = which(cf_max!=0) [-1]
### DP
path = matrix(NA,(K_max-K_chs+1),K_chs)
####step 0
tmp = sapply(cp_max[1:(K_max-K_chs+1)], function(x) var(Y[1:x])*(x-1))
path[,1] = 1: (K_max-K_chs+1)
####step 1 to K_chs-1
if ( K_chs > 1 ){
for ( k in 2:K_chs-1){
new_path= matrix(NA,(K_max-K_chs+1), k+1)
new_tmp=rep(0,(K_max-K_chs+1))
for(x in 1:(K_max-K_chs+1) ) {
L = cp_max[x+k]
min_tmp=tmp[1:x] +sapply(cp_max[k:(x+k-1)], function(y) ifelse(y<L-2,var(Y[y:(L-1)])*(L-y-1),0) )
new_tmp[x]=min( min_tmp )
new_path[x,]=c(path[which(new_tmp[x]==min_tmp)[1],1:k],x+k)
}
tmp=new_tmp
path[,1:(k+1)]=new_path
}
}
####step K_chs
min_tmp=tmp +sapply(cp_max[K_chs:K_max], function(y) ifelse(y<n,var(Y[y:n])*(n-y),0) )
final_path=path[which(min(min_tmp)==min_tmp),]
bp = cp_max[final_path]-1
#LS = min(min_tmp)
} else if (K_chs == 0) {
bp=NULL
#LS = var(Y)*(n-1)
} else if (K_chs < 0) {
### LAR
lar = lars(Z,Y,type='lar',intercept=FALSE,max.steps=K_max+1)
cf_max = coef(lar)[K_max+2,]
cp_max = which(cf_max!=0) [-1]
library(snow)
inputs <- 1:K_max
rDP <- function(K_pt,K_max,cp_max) {
path = matrix(NA,(K_max-K_pt+1),K_pt)
####step 0
tmp = sapply(cp_max[1:(K_max-K_pt+1)], function(x) var(Y[1:x])*(x-1))
path[,1] = 1: (K_max-K_pt+1)
####step 1 to K_chs-1
if ( K_pt > 1 ){
for ( k in 2:K_pt-1){
new_path= matrix(NA,(K_max-K_pt+1), k+1)
new_tmp=rep(0,(K_max-K_pt+1))
for(x in 1:(K_max-K_pt+1) ) {
L = cp_max[x+k]
min_tmp=tmp[1:x] +sapply(cp_max[k:(x+k-1)], function(y) ifelse(y<L-2,var(Y[y:(L-1)])*(L-y-1),0) )
new_tmp[x]=min( min_tmp )
new_path[x,]=c(path[which(new_tmp[x]==min_tmp)[1],1:k],x+k)
}
tmp=new_tmp
path[,1:(k+1)]=new_path
}
}
####step K_chs
min_tmp=tmp +sapply(cp_max[K_pt:K_max], function(y) ifelse(y<n,var(Y[y:n])*(n-y),0) )
LS = min(min_tmp)
final_path=path[which(LS==min_tmp)[1],]
return(list(LS=LS,PATH=final_path))
}
numCores = 7
cl <- makeCluster(numCores)
results = clusterApplyLB(cl, inputs, rDP,K_max=K_max,cp_max=cp_max)
stopCluster(cl)
ratio=1-0.05
min_ratio = Inf
for ( i in 1:(K_max-1)){
tmp_ratio = results[[i+1]]$LS/results[[i]]$LS
if (tmp_ratio>ratio & tmp_ratio<min_ratio) {
min_ratio=tmp_ratio
idx=i
}
}
final_path=results[[idx]]$PATH
bp = cp_max[final_path]-1
}
### final
estimation=Summary(bp,Y,matrix(1,n,1),Z[,-1])
return(estimation)
}
## https://www.r-bloggers.com/change-point-detection-in-time-series-with-r-and-tableau/ other methods
CPM = function(Y){
Z = matrix(0,n,n)
for (t in 1:n){
Z[t:n,t] = 1
}
bp = NULL
y = Y
last_bp = 0
repeat{
cpm_tmp = detectChangePoint(y,"Student")
if (cpm_tmp$changeDetected ){
last_bp = last_bp + cpm_tmp$detectionTime-1
bp = c(bp, last_bp)
y = Y[ (last_bp+1):n ]
} else {
break
}
}
estimation=Summary(bp,Y,matrix(1,n,1),Z[,-1])
return(estimation)
}
BCP = function(Y){
Z = matrix(0,n,n)
for (t in 1:n){
Z[t:n,t] = 1
}
bcp_fit = bcp(Y)
bp = 1:(n-1)
bp = bp[bcp_fit$posterior.prob[1:(n-1)]>0.95]
estimation=Summary(bp,Y,matrix(1,n,1),Z[,-1])
return(estimation)
}
ECP = function(Y){
Z = matrix(0,n,n)
for (t in 1:n){
Z[t:n,t] = 1
}
ecp_fit = e.divisive(Y)
bp = ecp_fit$estimates[c(-1,-length(ecp_fit$estimates))]-1
estimation=Summary(bp,Y,matrix(1,n,1),Z[,-1])
return(estimation)
}
#Y = matrix(0,n,1)
# Beta = NULL
# for (bp in 1:K){
# beta = runif(p+1,bp,1+bp) # random beta
# Y = Y + diag(c(rep(0,break_point[bp]),rep(1,diff_bp[bp]),rep(0,n-break_point[bp+1])))%*% X %*% beta
# Beta = cbind(Beta,beta)
# }
############################################# No Covariates ###################################################
##Settings
n=5000
sigma=0.05
p=0
K=12
##Initial Setup
break_point = c(10, 13, 15, 23, 25, 40, 44, 65, 76, 78, 81)*n/100
break_point_ex = c(0,break_point,n)
diff_bp = diff(break_point_ex)
Beta = matrix(c(0 ,40 ,-10 , 20 ,-20 , 30, -12 , 9 ,52 ,21 , 42 , 0)/20 -0.8,nrow=1,byrow = TRUE)
raw_Beta=matrix(0,p+1,n)
raw_Beta[,c(0,break_point)+1]=Beta[1:(p+1),]
result_SIFS=NULL
result_LSTV_1=NULL
result_LSTV_2=NULL
result_cpm=NULL
result_bcp=NULL
result_ecp=NULL
for (i in 1:100){
X = matrix(rnorm(n*p),n,p)
X = cbind(rep(1,n),X)
Y = matrix(0,n,1)
##Setting 1
for (bp in 1:K){
Y = Y + diag(c(rep(0,break_point_ex[bp]),rep(1,diff_bp[bp]),rep(0,n-break_point_ex[bp+1])))%*% X %*% Beta[1:(p+1),bp]
}
##Setting 2
#idx=sample(1:12,n,replace=TRUE,prob=c(0.1, 0.03, 0.02, 0.08, 0.02, 0.15, 0.04, 0.21, 0.11, 0.02, 0.03,0.19))
#idx=idx[order(idx)]
#break_point=which(diff(idx)==1)
#raw_Beta=matrix(0,p+1,n)
#raw_Beta[,c(0,break_point)+1]=Beta[1:(p+1),]
# for ( j in 1:n){
# Y[j]=X[j,]%*% Beta[idx[j]]
#}
##Add Noise
E = matrix(rnorm(n,0,sigma),n,1)
Y = Y + E
##Evaluation
estimation_SIFS=SIFS(X,Y,n,p,TRUE)
estimation_LSTV_1=LS_TV(Y,n,3*(K-1),K-1)
estimation_LSTV_2=LS_TV(Y,n,3*(K-1),-1)
estimation_cpm=CPM(Y)
estimation_bcp=BCP(Y)
estimation_ecp=ECP(Y)
result_SIFS=rbind(result_SIFS,c(
dist_err(break_point,estimation_SIFS$Break_Point)/n,
dist_err(estimation_SIFS$Break_Point,break_point)/n,
PDR_FDR(estimation_SIFS$raw_beta,raw_Beta),
PDR_bias(estimation_SIFS$raw_beta,raw_Beta))
)
result_LSTV_1=rbind(result_LSTV_1,c(
dist_err(break_point,estimation_LSTV_1$Break_Point)/n,
dist_err(estimation_LSTV_1$Break_Point,break_point)/n,
PDR_FDR(estimation_LSTV_1$raw_beta,raw_Beta),
PDR_bias(estimation_LSTV_1$raw_beta,raw_Beta))
)
result_LSTV_2=rbind(result_LSTV_2,c(
dist_err(break_point,estimation_LSTV_2$Break_Point)/n,
dist_err(estimation_LSTV_2$Break_Point,break_point)/n,
PDR_FDR(estimation_LSTV_2$raw_beta,raw_Beta),
PDR_bias(estimation_LSTV_2$raw_beta,raw_Beta))
)
result_cpm=rbind(result_cpm,c(
dist_err(break_point,estimation_cpm$Break_Point)/n,
dist_err(estimation_cpm$Break_Point,break_point)/n,
PDR_FDR(estimation_cpm$raw_beta,raw_Beta),
PDR_bias(estimation_cpm$raw_beta,raw_Beta))
)
result_bcp=rbind(result_SIFS,c(
dist_err(break_point,estimation_bcp$Break_Point)/n,
dist_err(estimation_bcp$Break_Point,break_point)/n,
PDR_FDR(estimation_bcp$raw_beta,raw_Beta),
PDR_bias(estimation_bcp$raw_beta,raw_Beta))
)
result_ecp=rbind(result_SIFS,c(
dist_err(break_point,estimation_ecp$Break_Point)/n,
dist_err(estimation_ecp$Break_Point,break_point)/n,
PDR_FDR(estimation_ecp$raw_beta,raw_Beta),
PDR_bias(estimation_ecp$raw_beta,raw_Beta))
)
print(i)
}
sim_list=list(SIFS=result_SIFS,LS_TV1=result_LSTV_1,LS_TV2=result_LSTV_2,CPM=result_cpm,BCP=result_bcp,ECP=result_ecp)
makeTable(sim_list)
sim_list_2000=sim_list
#################################### Covariates Involved ##############################################
n=2000
sigma=0.1
p=5
K=3
break_point = c(25,80)*n/100
break_point_ex = c(0,break_point,n)
diff_bp = diff(break_point_ex)
Beta = cbind(
c( 1, 1, 1, 1, 2, -1, 0, 0, 1.2, 0),
c(1.2, 1.1, 1, -1, 1.8, 1, 0, 0, 1, 0),
c(1.8, 1.2, 1, 0, 1.6, 0, 0, 0, 0.8, 0)
)
raw_Beta=matrix(0,p+1,n)
raw_Beta[,1]=Beta[1:(p+1),1]
raw_Beta[,break_point+1]=t(diff(t(Beta[1:(p+1),])))
result_SIFS=NULL
result_SBFS=NULL
for (i in 1:100){
X = matrix(rnorm(n*p),n,p)
X = cbind(rep(1,n),X)
Y = matrix(0,n,1)
##Setting 1
for (bp in 1:K){
Y = Y + diag(c(rep(0,break_point_ex[bp]),rep(1,diff_bp[bp]),rep(0,n-break_point_ex[bp+1])))%*% X %*% Beta[1:(p+1),bp]
}
E = matrix(rnorm(n,0,sigma),n,1)
Y = Y + E
##Evaluation
estimation_SIFS=SIFS(X,Y,n,p)
estimation_SBFS=SBFS(X,Y,n,p)
result_SIFS=rbind(result_SIFS,c(
dist_err(break_point,estimation_SIFS$Break_Point)/n,
dist_err(estimation_SIFS$Break_Point,break_point)/n,
PDR_FDR(estimation_SIFS$raw_beta,raw_Beta))
)
result_SBFS=rbind(result_SBFS,c(
dist_err(break_point,estimation_SBFS$Break_Point)/n,
dist_err(estimation_SBFS$Break_Point,break_point)/n,
PDR_FDR(estimation_SBFS$raw_beta,raw_Beta))
)
print(i)
}
result=result_SIFS
colMeans(result)
apply(result,2,sd)
result=result_SBFS
colMeans(result)
apply(result,2,sd) |
383a143f9d2d91a0feeac6642c8623450f748059 | 79eb7c6958b247770731ee20a5d9be525d8f5ed0 | /exercises/practice/anagram/test_anagram.R | f10a1a0fdee972e27196dd580b770bc7def770d1 | [
"CC-BY-SA-4.0",
"CC-BY-3.0",
"CC-BY-4.0",
"MIT"
] | permissive | exercism/r | 345781f314b8a66be047abd889238cba2630a20c | 566676cca76019e3e6a602f8f4d8086c54a51e1e | refs/heads/main | 2023-08-03T09:30:59.027153 | 2023-07-28T00:18:31 | 2023-07-28T00:18:31 | 24,401,761 | 22 | 37 | MIT | 2023-09-05T11:19:45 | 2014-09-24T05:22:10 | R | UTF-8 | R | false | false | 3,566 | r | test_anagram.R | source("./anagram.R")
library(testthat)
context("anagram")
test_that("no matches", {
subject <- "diaper"
candidates <- c("hello", "world", "zombies", "pants")
expect_equal(anagram(subject, candidates),
c())
})
test_that("detects simple anagram", {
subject <- "ant"
candidates <- c("tan", "stand", "at")
expect_equal(anagram(subject, candidates),
c("tan"))
})
test_that("does not detect false positives", {
subject <- "galea"
candidates <- c("eagle")
expect_equal(anagram(subject, candidates),
c())
})
test_that("detects multiple anagrams", {
subject <- "master"
candidates <- c("stream", "pigeon", "maters")
expect_equal(anagram(subject, candidates),
c("stream", "maters"))
})
test_that("does not detect anagram subsets", {
subject <- "good"
candidates <- c("dog", "goody")
expect_equal(anagram(subject, candidates),
c())
})
test_that("detects anagram", {
subject <- "listen"
candidates <- c("enlists", "google", "inlets", "banana")
expect_equal(anagram(subject, candidates),
c("inlets"))
})
test_that("detects multiple anagrams", {
subject <- "allergy"
candidates <-
c("gallery", "ballerina", "regally", "clergy", "largely", "leading")
expect_equal(anagram(subject, candidates),
c("gallery", "regally", "largely"))
})
test_that("does not detect indentical words", {
subject <- "corn"
candidates <- c("corn", "dark", "Corn", "rank", "CORN", "cron", "park")
expect_equal(anagram(subject, candidates),
c("cron"))
})
test_that("does not detect non-anagrams with identical checksum", {
subject <- "mass"
candidates <- c("last")
expect_equal(anagram(subject, candidates),
c())
})
test_that("detects anagrams case-insensitively", {
subject <- "Orchestra"
candidates <- c("cashregister", "Carthorse", "radishes")
expect_equal(anagram(subject, candidates),
c("Carthorse"))
})
test_that("detects anagrams using case-insensitive subject", {
subject <- "Orchestra"
candidates <- c("cashregister", "carthorse", "radishes")
expect_equal(anagram(subject, candidates),
c("carthorse"))
})
test_that("detects anagrams using case-insensitve possible matches", {
subject <- "orchestra"
candidates <- c("cashregister", "Carthorse", "radishes")
expect_equal(anagram(subject, candidates),
c("Carthorse"))
})
test_that("does not detect a word as its own anagram", {
subject <- "banana"
candidates <- c("Banana")
expect_equal(anagram(subject, candidates),
c())
})
test_that("does not detect a anagram if the original word is repeated", {
subject <- "go"
candidates <- c("go Go GO")
expect_equal(anagram(subject, candidates),
c())
})
test_that("anagrams must use all letters exactly once", {
subject <- "tapper"
candidates <- c("patter")
expect_equal(anagram(subject, candidates),
c())
})
test_that("eliminates anagrams with the same checksum", {
subject <- "mass"
candidates <- c("last")
expect_equal(anagram(subject, candidates),
c())
})
test_that("capital word is not own anagram", {
subject <- "BANANA"
candidates <- c("Banana")
expect_equal(anagram(subject, candidates),
c())
})
test_that("anagrams must use all letters exactly once", {
subject <- "patter"
candidates <- c("tapper")
expect_equal(anagram(subject, candidates),
c())
})
message("All tests passed for exercise: anagram")
|
75da177e97963f7d6046aff7bd65667881fc6f00 | 68ebca23f59b1b751a2f025d5556be74e97393d2 | /Plot1.R | 3595fa76f982a9b9e96a8636eab8d275972b2c0e | [] | no_license | rachitagrawal/ExData_Plotting1 | 0cec148f0ce6e518de9afee98fad682f6b621f49 | 3e5527574208446f4d5f68fb65dcae20731893ac | refs/heads/master | 2021-01-15T20:04:46.581905 | 2015-03-07T18:18:03 | 2015-03-07T18:18:03 | 31,604,340 | 0 | 0 | null | 2015-03-03T15:05:58 | 2015-03-03T15:05:58 | null | UTF-8 | R | false | false | 440 | r | Plot1.R | Plot1<-function(file){
tb<-read.table(file, sep=";", header=T)
tb$Date<-as.Date(tb$Date, format="%d/%m/%Y")
tb<-subset(tb, Date == as.Date("2007-02-01") | Date == as.Date("2007-02-02"))
print(class(tb$Global_active_power))
tb$Global_active_power<-as.numeric(as.character(tb$Global_active_power))
png("Plot1.png")
hist(tb$Global_active_power, main="Global Active Power", xlab="Global Active Power(kilowatts)", col="red")
dev.off()
} |
711c0c8beef2856032c145728c6c66a8680ac96d | 9aeff507412b57718da6db67e708bdf04aa83228 | /man/get.epiweek.Rd | 1633e7238552f92bade2c7f38e03cf4162c95822 | [] | no_license | lozalojo/mem | df00ae00aa190e91d3217ddc60736eb86894f037 | e8bbdcc1df8e31cbeb036ac5037f79b7375d976c | refs/heads/master | 2023-07-03T13:44:40.371634 | 2023-06-21T06:21:11 | 2023-06-21T06:21:11 | 47,120,918 | 11 | 3 | null | 2023-06-21T06:21:12 | 2015-11-30T13:39:39 | R | UTF-8 | R | false | true | 317 | rd | get.epiweek.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.epiweek.R
\name{get.epiweek}
\alias{get.epiweek}
\title{create epidemiological calendar}
\usage{
get.epiweek(i.date = as.POSIXct(as.POSIXlt(Sys.Date()), tz = "CET"))
}
\description{
create epidemiological calendar
}
\keyword{internal}
|
c2307c1f1e1902eec9ff9bd655d9e5bb806c5956 | 2bafced5f43ecee6fc6e7a78e3be54627f8a864e | /inst/ct_shiny/Inputs/tradeInputs.R | 55e486f60d1f27ff60654bc0b2f0836a38577533 | [] | no_license | luciu5/competitiontoolbox | 9fd70d5aa40e739dab4d3961418c9739702ff5af | 04a1f19c9845f1a8a01dac030dd31e480dfe41c9 | refs/heads/master | 2022-10-08T14:57:48.544298 | 2022-08-24T20:53:41 | 2022-08-24T20:53:41 | 198,494,717 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,579 | r | tradeInputs.R |
##### This function is just the "Tariffs"/"Quotas" version of genInputDataMergers() for "Horizontal". This entire thing needs to be desperately refactored...
tradeInputs <- function(nrows, type = c("Tariffs", "Quotas")) {
# a function to generate default input data set for simulations
type = match.arg(type)
exampleData <- data.frame(
Name = c("Prod1","Prod2","Prod3","Prod4"),
Owner = c("Firm1","Firm2","Firm3","Firm3"),
'Prices \n($/unit)' = rep(10,4),
'Quantities' =c(0.4,.3,.2,.1)*100,
'Margins\n(p-c)/p' =c(0.25,NA,NA,NA),
stringsAsFactors = FALSE,
check.names=FALSE
)
exampleData <- exampleData[order(exampleData$`Quantities`, decreasing = TRUE),]
rownames(exampleData) <- NULL
if(type == "Tariffs"){
fx <- data.frame('Current \nTariff \n(proportion)' = c(.05,.05,0,0),
'New \nTariff \n(proportion)' = c(.25,.25,0,0),
stringsAsFactors = FALSE,
check.names=FALSE)
}
else if (type == "Quotas"){
fx <- data.frame('Current \nQuota \n(proportion)' = c(Inf,Inf,Inf,Inf),
'New \nQuota \n(proportion)' = c(.75,.75,Inf,Inf),
stringsAsFactors = FALSE,
check.names=FALSE)
}
exampleData <- cbind(exampleData, fx)
inputData <- as.data.frame(matrix(NA_real_,nrow=max(nrows, nrow(exampleData)),ncol= ncol(exampleData)))
colnames(inputData) <- colnames(exampleData)
inputData[1:nrow(exampleData),] <- exampleData
return(inputData)
}
|
6437343b1f19b5777ae13b5049cef607d86617dc | f90c3956e18e75fd3e2779f00c9ace847114ba82 | /app.R | 58c7d7cd3c16f64cbab53c7b138f6553f3a701ff | [] | no_license | adalrada/JIRD | 31bcb82becf1df1c3f1bdb2891be98b9c34e5255 | b56592657d564f49b0f58fdb6ec0a1bcfe20fe6d | refs/heads/main | 2023-04-22T11:11:19.210019 | 2021-05-11T00:32:23 | 2021-05-11T00:32:23 | 361,972,947 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,822 | r | app.R | options(shiny.maxRequestSize=100*1024^2)
library(shiny)
library(tidyverse)
library(googleVis) ## Version >= 0.3.2 required
library(knitr)
library(markdown)
library(RCurl)
library("rvest")
library(shinydashboard)
library(shinycssloaders)
library(quantmod)
library(DT)
ui <- dashboardPage(
dashboardHeader(title = "Propuesta JIRD"),
dashboardSidebar(
sidebarMenu(
menuItem("Vistazo Rápido", tabName = "page2", icon = icon("home")),
menuItem("Análisis dinámico", tabName = "Analisis", icon = icon("calendar")),
uiOutput('style_tag'))
),
dashboardBody(
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
plotOutput("Test") %>% withSpinner(color="#0dc5c1"),
tags$div("Estamos preparando todo, por favor espere...",id="loadmessage")
),
tabItems(
tabItem(tabName = "Analisis",
tags$h1("Análisis de población estudiantil y el acceso a TIC's"),
# Horizontal line ----
tags$hr(),
# Input: Select a file ----
fileInput("file1", "Seleccione el archivo a procesar",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
tags$hr(),
uiOutput('ui.action'),
# Horizontal line ----
tags$hr(),
downloadButton("report", "Generar reporte"),
),#FIN TAB 1
tabItem(tabName = "page2",
tags$h1('Analisis de datos!'),
selectInput("variable", "Archivo a analizar:",
c("2019" = "https://raw.githubusercontent.com/adalrada/JIRD/main/enaho19_hogares_df_challenge.csv",
"2020" = "https://raw.githubusercontent.com/adalrada/JIRD/main/enaho20_hogares_df_challenge.csv")),
helpText("Selecciona la zona y cantidad de dispositivos"),
frow1 <- fluidRow(
valueBoxOutput("value1")#CANTIDAD DE ESTUDIANTES
,valueBoxOutput("value2")#CANTIDAD DE HOGARES
,valueBoxOutput("value3")#ACCESO A TICS
),
tabsetPanel(
tabPanel("Resumen de datos", DTOutput('tbl')),
tabPanel("Graficos",
selectInput("variableanalisis", "Generar grafico distribucion de variable:",
c("Tenencia de Tics" = "2",
"Cantidad de telefonos" = "3",
"Cantidad tablets"="4",
"Cantidad Pc escritorio"="5",
"Cantidad laptop"="6")),
plotOutput("plot1", click = "plot_click"),
verbatimTextOutput("info")
)
)
)#FIN TAB 2
)#FIN TABS
))##CIERRE DE UI
server <- function(input, output) {
#GRAFICO MUESTRA
mydata <- reactive({
inFile <- input$variable
if (is.null(inFile))
return(NULL)
tbl <- read.csv(input$variable,header = TRUE,sep =";",dec=",",check.names= F,fileEncoding="latin1",stringsAsFactors=TRUE)
TotalEstudiantes<-sum(as.integer(tbl$estudiantes))
TOTALHOGARES <-nrow(tbl)
ACCESOTICS<-sum(tbl$tenenciaTIC=="Sí")
dt<-tbl[,c(3,21,22,23,24,25)]
return(list(dt,TotalEstudiantes,TOTALHOGARES,ACCESOTICS))
})
datatables <- reactive({
inFile <- input$variable
if (is.null(inFile))
return(NULL)
tbl <- read.csv(input$variable,header = TRUE,sep =";",dec=",",check.names= F,fileEncoding="latin1",stringsAsFactors=TRUE)
dt<-tbl[,c(3,21,22,23,24,25)]
return(dt)
})
output$tbl = renderDT(
datatables(),options = list(pageLength = 5,autoWidth = TRUE,lengthChange = FALSE)
)
output$plot1 <- renderPlot({
xval <- datatables()[,as.integer(input$variableanalisis)]
df<-cbind(datatables(), xval)
str(df)
ggplot(df, aes(x = factor(xval),fill=xval)) +
geom_bar(stat="count",fill="steelblue")+
stat_count(geom = "text", colour = "white", size = 4.5,
aes(label = ..count..),position=position_stack(vjust=0.5))+
labs(title = "",
x = "CATEGORIA",
y = "TOTAL",
caption = "INEC")
#barplot(c(1,2), col=c("#009999", "#0000FF"))
})
#KPI
output$value1 <- renderValueBox({
valueBox(
formatC(mydata()[2], format="d", big.mark=',')
,paste('Total de estudiantes')
,icon = icon("stats",lib='glyphicon')
,color = "purple")
})
output$value2 <- renderValueBox({
valueBox(
format(mydata()[3], format="d", big.mark='.'),
'ToTal de hogares'
,icon = icon("home")
,color = "green")
})
output$value3 <- renderValueBox({
valueBox(
format(mydata()[4], format="d", big.mark='.'),
'Acceso a TIC'
,icon = icon("fas fa-brain")
,color = "yellow")
})
#REPORT
url <- "https://github.com/adalrada/JIRD/blob/c6498def7529593393d2296798521ce809da127a/MBC_MDJIRD.csv"
pagina <- read_html(url, encoding = "UTF-8")
tables <- html_table(pagina, fill = TRUE)
datafe <- tables[[1]]
datafe$X2[6]<-" html_document: default"
datafe$X2[8]<-" n: NA"
write.table(datafe$X2,"MBC_MDJIRD.Rmd",na ="", row.names=FALSE,col.names = FALSE, append=FALSE, sep='\t', quote=FALSE)
#Estado cargando del panel condicional
output$plot <- renderPlot({
Sys.sleep(2); hist(runif(input$n))
})
output$info <- renderText({
paste0("x=", input$plot_click$x, "\ny=", input$plot_click$y)
})
#carga del archivo
output$contents <- renderTable({
req(input$file1)
tryCatch(
{
df <- read.csv(input$file1$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
},
error = function(e) {
stop(safeError(e))
}
)
if(input$disp == "head") {
return(head(df))
}
else {
return(df)
}
})
#DESCARGA DE ARCHIVO
output$report <- downloadHandler(
filename = "JIRDreport.html",
content = function(file) {
Sys.sleep(2)
tempReport <- file.path(getwd(), "MBC_MDJIRD.Rmd")
# Knit the document, passing in the `params` list, and eval it in a
# child of the global environment (this isolates the code in the document
# from the code in this app).
rmarkdown::render(tempReport, output_file = file,
params = list(n=input$file1$datapath),
envir = new.env(parent = globalenv())
)
}
)
}
shinyApp(ui, server)
|
f75ab7f3f2fac2891fdb9970501dc924486adfa2 | 3a2b15c469cf4778a100340bcc2cf2642edd37b0 | /man/tdeath_other.Rd | 59bd503696953c0ab720a5326bdbdbd982600df6 | [] | no_license | Qingys/MILC_backup | 9657aaf2267ffad568c8f8fa2772d3381f31a874 | cabc1bcbabf99cd250abf7b44f662138ed5a4f7d | refs/heads/master | 2023-03-17T01:53:43.623830 | 2014-02-18T00:00:00 | 2014-02-18T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,798 | rd | tdeath_other.Rd | \name{tdeath_other}
\alias{tdeath_other}
\title{Predict the age at death from a cause other than lung cancer
}
\description{Function to predict the age (years) at which a person may die from a cause other than lung cancer given age, gender and smoking intensity, when relevant.
}
\usage{
tdeath_other(u1, u2, status, covs_other)
}
\arguments{
\item{u1, u2}{random numbers from Unif[0,1] required for the simulation
}
\item{status}{smoking status ("never", "former", or "current" smoker)
}
\item{covs_other}{3-dimensional vector with values for the covariates (other than smoking status) related to death from other causes, i.e., age (years) at the beginning of the prediction period, gender, smoking intensity expressed as average number of cigarettes smoked per day.
}
}
\value{
An R-object of class "list" with the following six components:
[[1]]: random number u1 used in the simulation
[[2]]: random number u2 used in the simulation
[[3]]: index number of the time interval
[[4]]: time interval at which death from other causes may occur
[[5]]: age (years) at death from cause other than lung cancer
[[6]]: R-object of class "list" with the relevant CIF estimates
}
\note{
Components [[1]]-[[4]] and [[6]] are returned for testing purposes only.
}
\author{
Stavroula A. Chrysanthopoulou}
\seealso{\code{\link{current.other}, \link{former.other}, \link{never.other}, \link{tdeath_lung}}
}
\examples{
# Predict the age at death from a cause other than lung cancer for a man 52 years old,
# who have never smoked.
data(current.other, former.other, never.other)
d.other <- tdeath_other(runif(1), runif(1), "never", c(52, "male", NA))
d.other[[1]]
d.other[[2]]
d.other[[3]]
d.other[[4]]
d.other[[5]]
d.other[[6]]
}
\keyword{Functions} |
faab46323a149034990970f6ea282d575c9aa5e6 | b1e1e81f1ae71dfe17a671eb3035332c9d4c36ed | /ODEs.R | 57e14bcaa3f7af773422546fade4c29f23c1cda1 | [] | no_license | ThomasLastName/Rudiments-of-Numerical-Algorithms | ee262ff4365ece644be2af6d0aaeabe74e8ebff9 | bda68ca4c57fca62b170c0c64e65952eec8ddbad | refs/heads/master | 2022-05-31T08:20:32.214990 | 2020-05-03T06:26:32 | 2020-05-03T06:26:32 | 260,302,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,878 | r | ODEs.R |
#
##
### the increment function in classical fourth order Runge-Kutta
##
#
Increment.RK4 <- function(from, to, f, y.at.from) {
a <- from
b <- to
yhat <- y.at.from
h <- b-a
K1 <- h*f(a, yhat)
K2 <- h*f(a + 0.5*h, yhat + 0.5*K1)
K3 <- h*f(a + 0.5*h, yhat + 0.5*K2)
K4 <- h*f(b, yhat + K3)
yhat + (K1 + 2*K2 + 2*K3 + K4)/6
}
#
##
###
#### classical Runge-Kutta of order four
###
##
#
Runge.Kutta <- function(diff.eq, interval, initial.value, step.size=NULL, custom.points=NULL, plot=T, print=NULL, store=NULL, points=FALSE, add=FALSE, calc.dy=FALSE) {
#
# warnings
#
Custom.Points <- custom.points
dim <- length(initial.value)
if (dim>2 & plot) warning('plot=T Will Be Ignored Because dim(initial.value)>2.')
if (!is.function(diff.eq)) stop('f Must Be A Function Of Two Variables, Equal To The Derivative Of Its Second Argument, i.e., f(t,y)=dy/dt.')
if (!(is.character(store) | is.null(store))) stop('store Argument Must Be a Character String or NULL.')
if (length(interval)<2) stop('Please Make interval of the Form c(a,b) where a<b.')
if (!interval[2]>interval[1]) stop('Please Make interval of the Form c(a,b) where a<b.')
if (!is.numeric(interval)) stop('Please Make interval of the Form c(a,b) where a<b.')
if (length(interval)>2) warning('interval Has Length Greater Than Two; Places After Two Ignored.')
if (!is.logical(plot)) stop('plot Must Be Logical.')
if (!is.logical(add)) stop('add Must Be Logical.')
if (!is.logical(calc.dy)) stop('calc.dy Must Be Logical.')
if (!(is.logical(print) | is.null(print))) stop('print Should Be Either Logical Or NULL.')
test <- diff.eq(interval[1], initial.value)
if (!length(test)==dim) stop('The Dimension Of The Codomain Of diff.eq Must Match The Dimension Of initial.value.')
if (!is.null(dim(test))) stop('We Have !is.null(dim(test)) Where test <- diff.eq(interval[1], initial.value).')
if (!is.numeric(test) & !is.complex(test)) stop('We Have !is.numeric(test) & !is.complex(test) Where test <- diff.eq(interval[1], initial.value).')
if (plot & !calc.dy & dim==1) {
calc.dy <- TRUE
warning('calc.dy Set To TRUE Automatically In Order To Construc The Hermite Spline.')
}
#
# the algorithm
#
f <- diff.eq
t <- interval[1]
y <- initial.value
dy <- test
if (is.null(Custom.Points)) {custom <- FALSE} else {custom <- TRUE}
if (dim>1) {
inc <- y
incinc <- dy
y <- list(inc[1])
dy <- list(incinc[1])
for (i in 1:dim) {
y[[i]] <- inc[i]
dy[[i]] <- incinc[i]
names(y[[i]]) <- paste('y', i, sep='')
names(dy[[i]]) <- paste('dy', i, sep='')
}
}
if (custom) {
if (!is.null(step.size)) warning('Argument step.size Ignored Because custom.points Was Supplied.')
t <- Custom.Points
} else {
if (is.null(step.size)) step.size <- 0.01
if (!is.null(Custom.Points)) warning('Argument xustom.points Ignored Because No Points Were Supplied.')
h <- step.size
t <- seq(interval[1], interval[2], h)
# if (!(interval[2] %in% t)) t[length(t)+1] <- interval[2]
}
if (dim>1) {
for (i in 2:length(t)) {
inc <- Increment.RK4(t[i-1], t[i], f, inc)
for (j in 1:dim) y[[j]][i] <- inc[j]
if (calc.dy) {
dee <- f(t, inc)
for (j in 1:dim) dy[[j]][i] <- dee[j]
}
}
} else {
for (i in 2:length(t)) y[i] <- Increment.RK4(t[i-1], t[i], f, y[i-1])
if (calc.dy) dy <- f(t, y)
}
#
# what to do with the results
#
if (
plot & ( (dim==1 & any(is.complex(y))) | (dim>1 & any(is.complex(y[[1]]))) )
) {
plot <- FALSE
if (dim>1) {
warning('Algorithm Successful. Graph Not Easily Plotted Because Values Inhabit Complex Space With Dimension>1.')
} else {
all.imaginary <- !any(!Re(y)==0)
all.real <- !any(!Im(y)==0)
if (all.real) {
plot <- TRUE
}
if (all.imaginary) {
plot(t, Im(y), type='l', las=1, col=4,
xlab = 'Domain Of The Solution',
ylab = 'Imaginary Part (Simple Affine Linear Spline)',
main = 'Approximate Solution (Pure Imaginary)')
} else {
plot(y, type='l', las=1, col=4,
xlab = 'Real Part Of The Solution Function',
ylab = 'Imaginary Part',
main = 'Parametric Approximate (Complex) Solution')
points(as.complex(initial.value))
}
abline(v=0, h=0, lty=3)
}
}
if (dim==1) {
if (calc.dy) {
results <- data.frame(t=t, y=y, dy=dy)
} else {
results <- data.frame(t=t, y=y)
}
if (plot) {
graph <- Cubic.Spline(t, y, dy, method='Hermite', print.all=T, plot=F)$graph
if (add) {
lines(graph, col=4)
} else {
plot(graph, type='l', las=1, col=4,
xlab = 'Domain Of The Solution',
ylab = 'Piecewise Hermite Cubic Spline',
main = 'Approximate (Spline) Solution To The Diff. Eq.')
}
if (points) points(t,y)
abline(v=0, h=0, lty=3)
}
} else {
if (calc.dy) {
results <- list(t=t, y=y, dy=dy)
} else {
results <- list(t=t, y=y)
}
if (plot) {
if (add) {
lines(y[[1]], y[[2]], col=3)
points(initial.value[1], initial.value[2])
} else {
ploty1 <- as.numeric(y[[1]])
ploty2 <- as.numeric(y[[2]])
plot(ploty1, ploty2, type='l', las=1, col=4,
xlab = 'First Coordinate Of Solution Function',
ylab = 'Second Coordinate',
main = 'Parametric Plot Of The Approximate Solution')
points(initial.value[1], initial.value[2])
}
abline(v=0, h=0, lty=3)
}
}
if (is.null(print)) {if (length(t)<=10) {print <- TRUE} else {print <- FALSE}}
if (!is.null(store)) assign(store, results, envir = .GlobalEnv)
if (print) return(results)
#
# end
#
}
#
##
###
#### Runge-Kutta-Fehlberg
###
##
#
Runge.Kutta.Fehlberg <- function(diff.eq, interval, initial.value, tol=1e-5, min.step.size=1e-4, max.step.size=0.1, plot=TRUE, points=FALSE, print=NULL, store=NULL, add=FALSE, calc.dy=TRUE) {
#
# warnings
#
Norm <- function(x) sqrt(sum(abs(x)^2))
dim <- length(initial.value)
if (dim>2 & plot) warning('plot=T Will Be Ignored Because dim(initial.value)>2.')
if (!is.function(diff.eq)) stop('f Must Be A Function Of Two Variables, Equal To The Derivative Of Its Second Argument With Respect To Its First Argument, i.e., f(t,y)=dy/dt.')
if (!(is.character(store) | is.null(store))) stop('store Argument Must Be a Character String or NULL.')
if (length(interval)<2) stop('Please Make interval of the Form c(a,b) where a<b.')
if (!interval[2]>interval[1]) stop('Please Make interval of the Form c(a,b) where a<b.')
if (!is.numeric(interval)) stop('Please Make interval of the Form c(a,b) where a<b.')
if (length(interval)>2) warning('interval Has Length Greater Than Two; Places After Two Ignored.')
if (interval[1]+max.step.size > interval[2]) stop('interval Is Too Short (i.e., interval[1]+max.step.size > interval[2]).')
if (!is.numeric(max.step.size) | !max.step.size>0) stop('max.step.size Should Be A Small Positive Real Number.')
if (missing(min.step.size)) {
min.step.size <- max.step.size/4
} else {
if (!is.numeric(min.step.size) | !min.step.size>0) stop('min.step.size Should Be A Small Positive Real Number.')
}
stopifnot(min.step.size<max.step.size)
if (!is.logical(plot)) stop('plot Must Be Logical.')
if (!is.logical(points)) stop('points Must Be Logical.')
if (!is.logical(add)) stop('add Must Be Logical.')
if (!(is.logical(print) | is.null(print))) stop('print Should Be Either Logical Or NULL.')
if (tol<=0) stop('tol Must Be Positive (and should be small).')
test <- diff.eq(interval[1], initial.value)
if (!length(test)==dim) stop('The Dimension Of The Codomain Of diff.eq Must Match The Dimension Of initial.vale.')
if (!is.null(dim(test))) stop('We Have !is.null(dim(test)) Where test <- diff.eq(interval[1], initial.value).')
if (!is.numeric(test) & !is.complex(test)) stop('We Have !is.numeric(test) & !is.complex(test) Where test <- diff.eq(interval[1], initial.value).')
#
# the algorithm
#
f <- diff.eq
t <- interval[1]
inc <- initial.value
dee <- test
h <- max.step.size
i <- go <- 1
y <- list(inc[1])
dy <- list(dee[1])
for (j in 1:dim) {
y[[j]] <- inc[j]
dy[[j]] <- dee[j]
names(y[[j]]) <- paste('y', j, sep='')
names(dy[[j]]) <- paste('dy', j, sep='')
}
while (go==1) {
flag <- 1
while (flag==1) {
K1 <- h*dee
K2 <- h*f(t[i]+.25*h, inc + .25*K1)
K3 <- h*f(t[i]+.375*h, inc + .09375*K1 + .28125*K2)
K4 <- h*f(t[i] + 12*h/13, inc + (1932*K1-7200*K2+7296*K3)/2197)
K5 <- h*f(t[i]+h, inc + 439*K1/216 - 8*K2 + 3680*K3/513 - 845*K4/4104)
K6 <- h*f(t[i]+0.5*h, inc - 8*K1/27 + 2*K2 - 3544*K3/2565 + 1859*K4/4104 - 11*K5/40)
measure <- Norm( (K1/360 - 128*K3/4275 + 2197*K4/75240 + 0.02*K5 + 2*K6/55)/h )
if (measure<tol & t[i]+h<interval[2]) {
inc <- inc + 25*K1/216 + 1408*K3/2565 + 2197*K4/4104 - .2*K5
for (j in 1:dim) y[[j]][i+1] <- inc[j]
if (calc.dy) {
dee <- f(t[i], inc)
for (j in 1:dim) dy[[j]][i+1] <- dee[j]
}
t[i+1] <- t[i]+h
flag <- 0
i <- i+1
} else {
delta <- 0.84*(tol/measure)^0.25
if (delta <= 0.1) {h <- 0.1*h} else {if (delta>=4) {h <- 4*h} else {h <- delta*h}}
if (h>max.step.size) h <- max.step.size
if (h<min.step.size) h <- min.step.size
if (t[i]+h > interval[2]) {
h <- interval[2]-t[i]
K1 <- h*dee
K2 <- h*f(t[i]+.25*h, inc + .25*K1)
K3 <- h*f(t[i]+.375*h, inc + .09375*K1 + .28125*K2)
K4 <- h*f(t[i] + 12*h/13, inc + (1932*K1-7200*K2+7296*K3)/2197)
K5 <- h*f(t[i]+h, inc + 439*K1/216 - 8*K2 + 3680*K3/513 - 845*K4/4104)
K6 <- h*f(t[i]+0.5*h, inc - 8*K1/27 + 2*K2 - 3544*K3/2565 + 1859*K4/4104 - 11*K5/40)
inc <- inc + 25*K1/216 + 1408*K3/2565 + 2197*K4/4104 - .2*K5
dee <- f(t[i], inc)
for (j in 1:dim) y[[j]][i+1] <- inc[j]
if (calc.dy) {
for (j in 1:dim) dy[[j]][i+1] <- dee[j]
}
t[i+1] <- interval[2]
go <- flag <- 0
go <- 0
flag <- 0
i <- i+1
}
if (h==min.step.size) {
K1 <- h*dee
K2 <- h*f(t[i]+.25*h, inc + .25*K1)
K3 <- h*f(t[i]+.375*h, inc + .09375*K1 + .28125*K2)
K4 <- h*f(t[i] + 12*h/13, inc + (1932*K1-7200*K2+7296*K3)/2197)
K5 <- h*f(t[i]+h, inc + 439*K1/216 - 8*K2 + 3680*K3/513 - 845*K4/4104)
K6 <- h*f(t[i]+0.5*h, inc - 8*K1/27 + 2*K2 - 3544*K3/2565 + 1859*K4/4104 - 11*K5/40)
inc <- inc + 25*K1/216 + 1408*K3/2565 + 2197*K4/4104 - .2*K5
dee <- f(t[i], inc)
for (j in 1:dim) y[[j]][i+1] <- inc[j]
if (calc.dy) {
for (j in 1:dim) dy[[j]][i+1] <- dee[j]
}
t[i+1] <- t[i]+h
flag <- 0
i <- i+1
}
}
}
}
if (dim==1) {
y <- c(y[[1]])
if (calc.dy) dy <- c(dy[[1]])
}
#
# what to do with the results
#
if (any(is.complex(y))) {
plot <- FALSE
complex.plot <- TRUE
if (dim>1) {
warning('Algorithm Successhul. Graph Not Easily Plotted. Values Inhabit Complex Space With Dimension>1.')
} else {
all.imaginary <- !any(!Re(y)==0)
all.real <- !any(!Im(y)==0)
if (all.real) plot <- TRUE
if (all.imaginary) {
plot(t, Im(y), type='l', las=1, col=4,
xlab = 'Domain Of The Solution',
ylab = 'Imaginary Part (Simple Affine Linear Spline)',
main = 'Approximate Solution (Pure Imaginary)')
} else {
plot(y, type='l', las=1, col=4,
xlab = 'Real Part Of The Solution Function',
ylab = 'Imaginary Part',
main = 'Parametric Approximate (Complex) Solution')
points(as.complex(initial.value))
}
abline(v=0, h=0, lty=3)
}
}
if (dim==1) {
if (calc.dy) {
results <- data.frame(t=t, y=y, dy=dy)
} else {
results <- data.frame(t=t, y=y)
}
if (plot) {
graph <- Cubic.Spline(t, y, dy, method='Hermite', print.all=T, plot=F, table=F)$graph
if (add) {
lines(graph, col=4)
} else {
plot(graph, type='l', las=1, col=4,
xlab = 'Domain Of The Solution',
ylab = 'Piecewise Hermite Cubic Spline',
main = 'Approximate (Spline) Solution To The Diff. Eq.')
}
if (points) points(t,y)
abline(v=0, h=0, lty=3)
}
} else {
if (calc.dy) {
results <- list(t=t, y=y, dy=dy)
} else {
results <- list(t=t, y=y)
}
if (plot) {
if (add) {
lines(y[[1]], y[[2]], col=3)
points(initial.value[1], initial.value[2])
} else {
ploty1 <- as.numeric(y[[1]])
ploty2 <- as.numeric(y[[2]])
plot(ploty1, ploty2, type='l', las=1, col=4,
xlab = 'First Coordinate Of Solution Function',
ylab = 'Second Coordinate',
main = 'Parametric Plot Of The Approximate Solution')
points(initial.value[1], initial.value[2])
}
abline(v=0, h=0, lty=3)
}
}
if (is.null(print)) {if (length(t)<=10) {print <- TRUE} else {print <- FALSE}}
if (!is.null(store)) assign(store, results, envir = .GlobalEnv)
if (print) return(results)
#
# end
#
}
#
##
###
#### analytically solve a system of two first order ODEs
###
##
#
Solve.Matrix.OIVP <- function(A, initial.time, initial.value) {
#
# warnings
#
stopifnot(is.matrix(A))
stopifnot(dim(A)[1]==dim(A)[2])
ei <- eigen(A)
lam <- ei$values
v <- ei$vectors
b <- initial.value
# if (abs(det(v))<tol) stop('A Does Not Have dim(A)[1]-Many Lin. Indep. Eigenvectors.')
C <- A
for (i in 1:dim(A)[1]) {
for (j in 1:dim(A)[1]) {
C[i,j] <- v[i,j] * exp( lam[j]*initial.time )
}
}
c <- solve(C,b)
results <- list(data.frame(scale=c*v[1,], exp=lam))
for (i in 2:dim(A)[1]) results[[i]] <- data.frame(scale=c*v[i,], exp=lam)
results
}
Convert.2D <- function(asdf) {
function (t) {
summands <- out <- dim <- length(asdf)
for (i in 1:dim) {
for (j in 1:dim) {
summands[j] <- asdf[[i]]$scale[j]*exp( asdf[[i]]$exp[j]*t )
}
out[i] <- sum(summands)
}
out
# c(
# asdf[[1]]$scale[1]*exp( asdf[[1]]$exp[1]*t ) + asdf[[1]]$scale[2]*exp( asdf[[1]]$exp[2]*t) ,
# asdf[[2]]$scale[1]*exp( asdf[[2]]$exp[1]*t ) + asdf[[2]]$scale[2]*exp( asdf[[2]]$exp[2]*t)
# )
}
}
Solution.Space <- function(f, xlim=c(-5,5), ylim=c(-5,5), buf=0, gridx=NULL, gridy=NULL, add=FALSE, interval=c(0,2)) {
if (is.null(gridx) | is.null(gridy)) {
gridspace <- (xlim[2]-xlim[1])/10
gridspace[2] <- (ylim[2]-ylim[1])/10
x <- seq(xlim[1], xlim[2], (xlim[2]-xlim[1])/10)
y <- seq(ylim[1], ylim[2], (ylim[2]-ylim[1])/10)
} else {
x <- gridx
y <- gridy
}
xlim <- c(xlim[1]-buf, xlim[2]+buf)
ylim <- c(ylim[1]-buf, ylim[2]+buf)
if (!add) {
plot.new()
par(las=1)
plot.window(xlim, ylim, las=1)
axis(1)
axis(2)
title(xlab='First Coordinate Of Solution Function(s)')
title(ylab='Second Coordinate')
title(main='Scetch Of Two-Dimensional Solution Space')
}
for (i in 1:length(x)) {
for (j in 1:length(y)) {
Runge.Kutta(f, interval, c(x[i],y[j]), add=T)
}
}
box()
}
|
56c4be401518816139b2b719df809c7197a69d59 | 818348533cce918e2b466ad9ad86eb83b345e9cc | /R/parseFile.OSCURS.R | 8699afe7ebb3bb6d1db11d649202e4c2d8bdfe5a | [
"MIT"
] | permissive | wStockhausen/rOSCURS | 11b4d1bfd15718465d2f0fea337dada4056e30d1 | 76c755779293beeafcfbf074114b7f2cd88ca2f2 | refs/heads/master | 2023-05-01T00:34:31.173883 | 2023-04-19T15:59:33 | 2023-04-19T15:59:33 | 185,640,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,370 | r | parseFile.OSCURS.R | #'
#' @title Parse an OSCURS output trajectory file to a dataframe
#'
#' @description Function to parse an OSCURS output trajectory file to a dataframe.
#'
#' @param fn - name of file to parse
#' @param verbose - flag to print diagnostic info
#'
#' @return a list with elements "dfr" and "track". The latter is a spatial tibble using a sf line class to represent the track.
#'
#' @details Parses output text file from OSCURS run. The returned object is a list with elements "dfr" and "track". 'dfr' is a dataframe with
#' each row representing a time and position along the track. "track is a spatial tibble using an
#' sf line geometry class to represent the track.
#'
#' Requires packages \code{stringr}, \code{tibble} and \code{tmap_tools}.
#'
#' @export
#'
parseFile.OSCURS<-function(fn,
verbose=FALSE){
con<-file(fn,open="rt");
txt<-readLines(con=con);
close(con);
n<-length(txt);
lat <- as.numeric(stringr::str_split(string=txt[1],pattern=":|,")[[1]][2]);
lon <- 360 -as.numeric(stringr::str_split(string=txt[2],pattern=":|,")[[1]][2]);#converted to east longitude, 0-360
yr <- as.numeric(stringr::str_split(string=txt[3],pattern=":|,")[[1]][2]);
mn <- as.numeric(stringr::str_split(string=txt[4],pattern=":|,")[[1]][2]);
dy <- as.numeric(stringr::str_split(string=txt[5],pattern=":|,")[[1]][2]);
nd<-as.numeric(stringr::str_split(string=txt[6],pattern=":|,")[[1]][2]);
dt<-vector(mode="character",length=nd+2);
lt<-vector(mode="numeric",length=nd+2);
ln<-vector(mode="numeric",length=nd+2);
dt[1]<-paste(yr,mn,dy,sep="-");
lt[1]<-lat;
ln[1]<-lon;
for (i in 1:(nd+1)){
trk<-stringr::str_split(string=txt[9+i],pattern='\\[|,|\\]|\\\"')[[1]];
dt[i+1]<-stringr::str_sub(trk[3],1,10); #extract date as string
lt[i+1]<-as.numeric(trk[5]); #extract lat
ln[i+1]<-as.numeric(trk[6]); #extract lon as east longitude, 0-360
}
ln<-atan2(sin(2*pi*ln/360),cos(2*pi*ln/360))*360/(2*pi);#convert to east longitude, -180-180
dt<-as.Date(dt,"%Y-%m-%d");#convert date strings to Dates
dt[nd+2]<-dt[nd+2]+1; #need to round up on last date (position given at end of day)
edt<-as.numeric(dt-dt[1]); #calculate elapsed time, in days
dfr<-data.frame(dayStart=dt[1],latStart=lt[1],lonStart=ln[1],
date=dt,elapsedTime=edt,lat=lt,lon=ln,stringsAsFactors=FALSE);
if (verbose) print(utils::head(dfr))
sfg.line<-sf::st_linestring(as.matrix(dfr[,c("lon","lat")]),dim="XY");
if (verbose) print(sfg.line);
if (verbose) cat("class = ",class(sfg.line),"\n");
sfc.line<-sf::st_sfc(sfg.line);
if (verbose) utils::str(tibble::tibble(dayStart=dt[1],latStart=lt[1],lonStart=ln[1],
dayEnd=dt[nd+2],latEnd=lt[nd+2],lonEnd=ln[nd+2]));
# tbl.track<-tmaptools::append_data(sfc.line,
# tibble::tibble(dayStart=dt[1],latStart=lt[1],lonStart=ln[1],
# dayEnd=dt[nd+2],latEnd=lt[nd+2],lonEnd=ln[nd+2]),
# fixed.order=TRUE);
tbl.track<-sf::st_sf(tibble::tibble(dayStart=dt[1], latStart=lt[1], lonStart=ln[1],
dayEnd=dt[nd+2],latEnd=lt[nd+2],lonEnd=ln[nd+2],
geometry=sfc.line));
return(list(dfr=dfr,track=tbl.track));
}
|
ad583d9d4cea10b4ece67dc78314b4c27c7e0086 | ef33279f31f8b45cc610a581736264c61f19dbc3 | /appendix1.R | 9920c9f285c2c2bbfb9944cf847059dfbeaa7221 | [] | no_license | Jorge-Dona/cophylogenetic_extinction_rate | ba2685479cfa0f31ad5cd88cc90b4cf517517ccb | 0faa8173f3a1fc72ba9a7de1a0ae357ded3502ef | refs/heads/master | 2022-09-07T05:14:20.800191 | 2022-09-02T08:50:36 | 2022-09-02T08:50:36 | 208,768,344 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,049 | r | appendix1.R | # This function computes the “cophylogenetic extinction rate” (Ec), a statistics that uses data from event-based cophylogenetic analyses and might be informative to assess symbiont extinction risks.
# See Doña & Johnson 2020 for more details.
require(DescTools)
#Load the function
ec <- function(L,E,S) {
est<-BinomCI(L, (E+S),
conf.level = 0.95,
method = "modified wilson")
paste("Ec=", est[1],";","Lower CI=", est[2],";", "Upper CI=", est[3])
}
#L = number of losses
#E = Total number of events resulting from the cophylogenetic reconstruction (i.e., Losses+switches+duplications, etc.)
#S = Number of host-switches
# Example of usage:
# L = 1; E = 10; S = 2
# 1. Load the function (note that modified wilson is used as the default method for computing the CI; modify the function if you want to use a different method).
# 2. For the number of events of this example, call it simply as follows:
ec(1,10,2)
# [1] "Ec= 0.0833333333333333 ; Lower CI= 0.00427444119896255 ; Upper CI= 0.353879911141117"
|
c7da561a2079d5d02ba03fc396df04d2a934362b | 9aa317973931936d668493359d4e6ce67de1fb0f | /Podatki/R_01_get_WBank_data_20200402.R | cbb27fcc86e3fed4b3aa62294355d514dbfd625c | [] | no_license | matjazjeran/Covid-19 | cb5879ea28df640d2afc4e24766307bbe1d7583c | dc0279d839a0a1aed42fe3c16429bdaf5311430a | refs/heads/master | 2021-05-23T13:57:38.368534 | 2020-04-21T11:05:00 | 2020-04-21T11:05:00 | 253,324,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,347 | r | R_01_get_WBank_data_20200402.R | # Original code from
# Tidying the new Johns Hopkins Covid-19 time-series datasets (old code before 30.03.2020)
# March 24, 2020 in R
# https://joachim-gassen.github.io/2020/03/tidying-the-new-johns-hopkins-covid-19-datasests/
# adapted for daily use on different platforms
# Matjaz Jeran 02.04.2020
# getting world bank data
# input: World Bank statistical data
# output: "jh_add_wbank_data.csv"
# "jh_add_wbank_data.Rdata"
rm (list = ls (all = TRUE))
### Warning: adjust working directory as needed for operating systems Windows, macOS, linux
if (Sys.info () ["sysname"] == "Windows") setwd ("C:/Moji Dokumenti/Corona/Podatki")
if (Sys.info () ["sysname"] == "Darwin") setwd ("/Users/matjaz/Corona/Podatki")
if (Sys.info () ["sysname"] == "Linux") setwd ("/home/matjaz/Corona/Podatki")
library(tidyverse)
library(wbstats)
wbank.file <- "jh_add_wbank_data.csv"
wbank.dfile <- "jh_add_wbank_data.RData"
pull_worldbank_data <- function(vars) {
new_cache <- wbstats::wbcache()
all_vars <- as.character(unique(new_cache$indicators$indicatorID))
data_wide <- wb(indicator = vars, mrv = 10, return_wide = TRUE)
new_cache$indicators[new_cache$indicators[,"indicatorID"] %in% vars, ] %>%
dplyr::rename(var_name = indicatorID) %>%
dplyr::mutate(var_def = paste(indicator, "\nNote:",
indicatorDesc, "\nSource:", sourceOrg)) %>%
dplyr::select(var_name, var_def) -> wb_data_def
new_cache$countries %>%
dplyr::select(iso3c, iso2c, country, region, income) -> ctries
dplyr::left_join(data_wide, ctries, by = "iso3c") %>%
dplyr::rename(year = date,
iso2c = iso2c.y,
country = country.y) %>%
dplyr::select(iso3c, iso2c, country, region, income, everything()) %>%
dplyr::select(-iso2c.x, -country.x) %>%
dplyr::filter(!is.na(NY.GDP.PCAP.KD),
region != "Aggregates") -> wb_data
wb_data$year <- as.numeric(wb_data$year)
wb_data_def<- dplyr::left_join(data.frame(var_name = names(wb_data),
stringsAsFactors = FALSE),
wb_data_def, by = "var_name")
wb_data_def$var_def[1:6] <- c(
"Three letter ISO country code as used by World Bank",
"Two letter ISO country code as used by World Bank",
"Country name as used by World Bank",
"World Bank regional country classification",
"World Bank income group classification",
"Calendar year of observation"
)
wb_data_def$type = c("cs_id", rep("factor", 4), "ts_id",
rep("numeric", ncol(wb_data) - 6))
return(list(wb_data, wb_data_def))
}
vars <- c("SP.POP.TOTL", "AG.LND.TOTL.K2", "EN.POP.DNST", "EN.URB.LCTY", "SP.DYN.LE00.IN", "NY.GDP.PCAP.KD")
wb_list <- pull_worldbank_data(vars)
wb_data <- wb_list[[1]]
wb_data_def <- wb_list[[2]]
wb_data %>%
dplyr::group_by(iso3c) %>%
dplyr::arrange(iso3c, year) %>%
dplyr::summarise(
population = last(na.omit(SP.POP.TOTL)),
land_area_skm = last(na.omit(AG.LND.TOTL.K2)),
pop_density = last(na.omit(EN.POP.DNST)),
pop_largest_city = last(na.omit(EN.URB.LCTY)),
gdp_capita = last(na.omit(NY.GDP.PCAP.KD)),
life_expectancy = last(na.omit(SP.DYN.LE00.IN))
) %>% left_join(wb_data %>% select(iso3c, region, income) %>% distinct()) -> wb_cs
readr::write_csv(wb_cs, wbank.file)
save (wb_cs, file = wbank.dfile)
|
e563002610ff76aef353901ff8ad17dd0371677f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ads/examples/triangulate.Rd.R | 98a7e4686318503971557e6ea3b469b00812848d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 667 | r | triangulate.Rd.R | library(ads)
### Name: triangulate
### Title: Triangulate polygon
### Aliases: triangulate
### Keywords: spatial
### ** Examples
data(BPoirier)
BP <- BPoirier
plot(BP$poly1$x, BP$poly1$y)
# a single polygon triangulation
tri1 <- triangulate(BP$poly1)
plot(swin(BP$rect, tri1))
# a single polygon with a hole
#tri2 <- triangulate(c(-10,-10,120,100), BP$poly1)
#plot(swin(c(-10,-10,120,100), tri2))
# the same with narrower outer polygon
#tri3 <- lapply(BP$poly2,triangulate)
#tri3<-do.call(rbind,tri3)
#xr<-range(tri3$ax,tri3$bx,tri3$cx)
#yr<-range(tri3$ay,tri3$by,tri3$cy)
#plot(swin(c(xr[1],yr[1],xr[2],yr[2]), tri3))
|
9e0e8665e40653fd338c86fcb4f6259ab87b5357 | 71e663993cdc75945bae8db86e81f6b4fa71ead6 | /PCA logistic regression model.R | 7993d1d6fe37c639baa5e02c350a2001acc552a5 | [] | no_license | LDang47/VMWare_Customer-Enagement | 9bb41cca579eb31093681be64055e469d8e028dc | 06e2887c97d2f3bb7cfd1f11b41cde6ef523fc3f | refs/heads/master | 2022-11-22T23:52:55.101104 | 2020-07-27T16:50:59 | 2020-07-27T16:50:59 | 279,432,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,263 | r | PCA logistic regression model.R | rm(list = ls())
options(scipen = 99,digits = 10,max.print = 9999)
gc()
## Check if you have universal installer package, install if not
if("pacman" %in% rownames(installed.packages()) == FALSE) {install.packages("pacman")}
## Check, and if needed install the necessary packages
pacman::p_load("na.tools", "tidyverse", "caret", "dplyr", "DMwR", "ROCR", "lift", "FactoMineR", "dummies")
training <- read.csv("C:\\Users\\Alo-Ai day-Toi day\\Desktop\\MMA COURSE\\8. MMA 831 Marketing Analytics\\GRADED ASSIGNMENT\\Midterm Assignment_25Jul\\Data\\Training.csv")
validation <- read.csv("C:\\Users\\Alo-Ai day-Toi day\\Desktop\\MMA COURSE\\8. MMA 831 Marketing Analytics\\GRADED ASSIGNMENT\\Midterm Assignment_25Jul\\Data\\Validation.csv")
## Join datasets for data cleansing
dataset <- rbind(training, validation)
## Address missing values - NAs
na.cols <- which(colSums(is.na(dataset))>0)
sort(colSums(sapply(dataset[na.cols],is.na)), decreasing = TRUE)
paste('There are', length(na.cols), 'columns with missing values')
# Drop complete NAs columns - 3
# gu_ind_vmw_major_lookup & gu_ind_vmw_sub_category & ftr_first_date_seminar_page_view
vmware = subset(dataset, select =
-c(gu_ind_vmw_major_lookup, gu_ind_vmw_sub_category, ftr_first_date_seminar_page_view))
# Drop mostly NAs columns (that has over 95% NAs) - 8
vmware = subset(vmware, select =
-c(ftr_first_date_webinar_page_view, ftr_first_date_eval_page_view, ftr_first_date_whitepaper_download,
ftr_first_date_any_download, ftr_first_date_hol_page_view, hyperthreading_active_flag,
hv_replay_capable_flag, db_accountwatch))
## numeric NAs imputation
# check numeric features and categorical features with NAs
vmware_numeric <- select_if(vmware, is.numeric)
num_na.cols <- which(colSums(is.na(vmware_numeric))>0)
sort(colSums(sapply(vmware_numeric[num_na.cols],is.na)), decreasing = TRUE)
paste('There are', length(num_na.cols), 'numeric columns with missing values')
## integer NAs imputation
# check integer features with NAs
vmware_integer <- select_if(vmware, is.integer)
int_na.cols <- which(colSums(is.na(vmware_integer))>0)
sort(colSums(sapply(vmware_integer[int_na.cols],is.na)), decreasing = TRUE)
# fix integer NAs accordingly
vmware$gu_num_of_employees <- as.numeric(vmware$gu_num_of_employees)
vmware$highest_prodA_edition <- as.factor(vmware$highest_prodA_edition)
paste('There are', length(int_na.cols), 'integer columns with missing values')
## factor NAs imputation
# check factor features with NAs
vmware_factor <- select_if(vmware, is.factor)
fac_na.cols <- which(colSums(is.na(vmware_factor))>0)
sort(colSums(sapply(vmware_factor[fac_na.cols],is.na)), decreasing = TRUE)
paste('There are', length(fac_na.cols), 'factor columns with missing values')
# check categorical features with NAs
#paste('There are still', length(na.cols)-3-8-length(num_na.cols), 'categorical columns with missing values')
## Create a custom function to fix the rest of missing values ("NAs") and preserve the NA info as surrogate variables
fixNAs<-function(data_frame){
# Define reactions to NAs
integer_reac<-0
factor_reac<-"Missing"
character_reac<-"Missing"
date_reac<-as.Date("1900-01-01")
# Loop through columns in the data frame and depending on which class the variable is, apply the defined reaction and create a surrogate
for (i in 1 : ncol(data_frame)){
# create an additional column to capture whether or not the value was imputed for numeric columns
if (class(data_frame[,i]) %in% c("numeric")) {
if (any(is.na(data_frame[,i]))){
data_frame[,paste0(colnames(data_frame)[i],"_impute")]<-
as.integer(is.na(data_frame[,i]))
data_frame[is.na(data_frame[,i]),i] <- mean(data_frame[,i], na.rm = TRUE)
}
} else
if (class(data_frame[,i]) %in% c("integer")) {
if (any(is.na(data_frame[,i]))){
data_frame[,paste0(colnames(data_frame)[i],"_impute")]<-
as.factor(ifelse(is.na(data_frame[,i]),"1","0"))
data_frame[is.na(data_frame[,i]),i]<-integer_reac
}
} else
if (class(data_frame[,i]) %in% c("factor")) {
if (any(is.na(data_frame[,i]))){
data_frame[,i]<-as.character(data_frame[,i])
data_frame[,paste0(colnames(data_frame)[i],"_impute")]<-
as.factor(ifelse(is.na(data_frame[,i]),"1","0"))
data_frame[is.na(data_frame[,i]),i]<-factor_reac
data_frame[,i]<-as.factor(data_frame[,i])
}
} else {
if (class(data_frame[,i]) %in% c("character")) {
if (any(is.na(data_frame[,i]))){
data_frame[,paste0(colnames(data_frame)[i],"_impute")]<-
as.factor(ifelse(is.na(data_frame[,i]),"1","0"))
data_frame[is.na(data_frame[,i]),i]<-character_reac
}
} else {
if (class(data_frame[,i]) %in% c("Date")) {
if (any(is.na(data_frame[,i]))){
data_frame[,paste0(colnames(data_frame)[i],"_impute")]<-
as.factor(ifelse(is.na(data_frame[,i]),"1","0"))
data_frame[is.na(data_frame[,i]),i]<-date_reac
}
}
}
}
}
return(data_frame)
}
# Apply fixNAs function to the data to fix missing values
vmware <- fixNAs(vmware)
# check numeric features no longer has NAs and mean remains unchanged as expected per FAQ
summary(vmware$db_annualsales)
# impute flag equals NAs proportion
table(vmware$db_annualsales_impute)
# check for rare categorical features
table(vmware$gu_state)
table(vmware$gu_state_impute)
## Create another a custom function to combine rare categories into "Other."+the name of the original variable (e.g., Other.State)
# This function has two arguments: the name of the data frame and the count of observation in a category to define "rare"
combinerarecategories<-function(data_frame,mincount){
for (i in 1 : ncol(data_frame)){
a<-data_frame[,i]
replace <- names(which(table(a) < mincount))
levels(a)[levels(a) %in% replace] <-paste("Other",colnames(data_frame)[i],sep=".")
data_frame[,i]<-a }
return(data_frame)
}
# Apply combinerarecategories function to the data and then split it into testing and training data.
# combine categories with <20 values in vmware dataset into "Other"
vmware <- combinerarecategories(vmware,20)
## convert character to factor so PCA will work
vmware_cha <- select_if(vmware, is.character)
colnames(vmware_cha)
vmware <- vmware %>% dplyr::mutate_if(is.character,as.factor)
## cut cleansed dataset into half for training and validation
clean.training <- vmware[1:50006,]
clean.validation <- vmware[50007:100012,]
## rename clean.validation to vmware_ to be consistent on naming convention
vmware_validation <- clean.validation
########################
######################## DATA PROCESSING BEFORE FED INTO MODEL
## Create binary target for the entire dataset
vmware_df <- clean.training
vmware_df$dummy_target <- ifelse(clean.training$target >= 1, "1","0")
vmware_df$dummy_target <- as.factor(vmware_df$dummy_target)
vmware_df$target <- NULL
table(vmware_df$dummy_target)
# 0 1
# 48670 1336
## REMOVE variables that can cause data leakeage: all variables start with "tgt"
vmware_df <- vmware_df %>% dplyr:: select(!starts_with("tgt"))
######################
###################### CORRELATION (Just on the train set)
## set a random number generation seed to ensure that the holdout split is the same every time
set.seed(1000)
inTrain <- createDataPartition(y = vmware_df$dummy_target,
p = 0.8, list = FALSE)
vmware_train <- vmware_df[ inTrain,]
vmware_test <- vmware_df[ -inTrain,]
## Correlation Maxtrix
# Identifying numeric variables
vmware_train_num <- vmware_train[sapply(vmware_train, is.numeric)]
# Remove numeric variables that have zero or near zero variance
vmware_train_num <- vmware_train_num[ , which(apply(vmware_train_num, 2, var) != 0)]
# Calculate correlation matrix
numCor <- cor(vmware_train_num)
# find attributes that are highly corrected
highlyCorrelated_num <- findCorrelation(numCor, cutoff = 0.7)
# Indentifying Variable Names of Highly Correlated Variables
highlyCorCol <- colnames(vmware_train_num)[highlyCorrelated_num]
print(highlyCorCol)
##########################
########################## MODELLING: CATERGORICAL INCLUDED
# Remove the respond variable: dummy target
vmware_df2 <- subset(vmware_df, select = -c(dummy_target))
# Only select catergorical variables with less than 15 levels for easy one hot encoding
sapply(vmware_df2, function(col) length(unique(col)))
cat <- sapply(vmware_df2, is.factor) # Select categorical variables
vmware_df_cat <- Filter(function(x) nlevels(x)<15, vmware_df2[,cat]) # 34 vars
names <- colnames(vmware_df_cat)
# Identify numeric variables
vmware_df_num <- vmware_df2[sapply(vmware_df2, is.numeric)]
# Remove highly correlated variables and create a new dataset.
vmware_df_num <- vmware_df_num[,-c(highlyCorrelated_num)]
str(vmware_df_num)
# Combine numeric variables and less than 15 levels catergorical variables
vmware_df2 <- cbind(vmware_df_num, vmware_df_cat)
str(vmware_df2)
# One hot econding on categorical variables in the new dataset
vmware_df3 <- dummy.data.frame(vmware_df2, names = c(names))
dummy_target <- vmware_df$dummy_target
vmware_df4 <- cbind(vmware_df3, dummy_target)
str(vmware_df4)
table(vmware_df4$dummy_target)
# 0 1
# 48670 1336
# Remove numeric variables that have zero or near zero variance
vmware_df4 <- vmware_df4[ , which(apply(vmware_df4, 2, var) != 0)]
str(vmware_df4)
## set a random number generation seed to ensure that the holdout split is the same every time
set.seed(1000)
inTrain <- createDataPartition(y = vmware_df4$dummy_target,
p = 0.8, list = FALSE)
vmware_train_2 <- vmware_df4[ inTrain,]
vmware_test_2 <- vmware_df4[ -inTrain,]
dummy_target_train <- vmware_train_2$dummy_target
dummy_target_test <- vmware_test_2$dummy_target
vmware_train_2$dummy_target <- NULL
vmware_test_2$dummy_target <- NULL
# Transform features of Train Dataset into Principal Components. Apply PCA
pca = prcomp(vmware_train_2, center = TRUE, scale. = TRUE) # TOTAL: 488 PCs
summary(pca)
# Variance explained by each Principal Component
std_dev <- pca$sdev
pr_comp_var <- std_dev^2
pr_comp_var
# Ratio of Variance explained by each component
prop_var_ex <- pr_comp_var/sum(pr_comp_var)
prop_var_ex
# PCA Chart
plot(cumsum(prop_var_ex), xlab = "Principal Component",ylab = "Proportion of Variance Explained")
abline(h=0.9, col = "red", lwd=2) # 160 PCs
abline(h=0.95, col = "blue", lwd=2) # 196 PCs
text(34.86014235,0.98, "95 % Mark")
text(34.86014235,0.93, "90 % Mark")
# Concatenate Dependent variable and Principal Components
loadings <- as.data.frame(pca$x)
dummy_target <- dummy_target_train
pca_train <- cbind(loadings,dummy_target)
pca_train <- as.data.frame(pca_train)
# Creating Dataset having Principal Components
loadings2 <- loadings[1:196]
pca_train2 <- cbind(loadings2,dummy_target)
# Transform features of Test Dataset into Principal Components
# Create a full dataset with all PCs
pca_test <- predict(pca, newdata = vmware_test_2)
pca_test <- as.data.frame(pca_test)
# Create a test set with only 196 PCs
pca_test2 <- pca_test[1:196]
dummy_target <- dummy_target_test
pca_test3 <- cbind(pca_test2,dummy_target)
str(pca_train2)
str(pca_test3)
## SMOTE to handle imbalance dataset in Binary Classification
smote_pca_train <- SMOTE(dummy_target ~., pca_train2, perc.over = 1000 , k = 5, perc.under = 300)
summary(smote_pca_train$dummy_target)
# 0 1
# 32070 11759
### Initializing and Fitting Logistic Regression Model
model_logistic <-glm(dummy_target ~., data=smote_pca_train, binomial("logit"))
summary(model_logistic)
# Looking at the Variable Importance table
varImp(model_logistic, scale = TRUE)
# Make predictions on the test data
logistic_probabilities <- predict(model_logistic, newdata= pca_test3, type="response")
# Translate probabilities to predictions
mean(pca_test3$dummy_target == "1")
logistic_classification <- ifelse(logistic_probabilities > 0.02669733, "1", "0")
logistic_classification <- as.factor(logistic_classification)
pca_test3$dummy_target <- as.factor(pca_test3$dummy_target)
# Model Accuracy
observed_classes <- pca_test3$dummy_target
mean(logistic_classification == observed_classes) # 0.8927107289
###Confusion matrix
confusionMatrix(logistic_classification,pca_test3$dummy_target,positive = "1")
# Reference
# Prediction 0 1
# 0 8669 8
# 1 1065 259
# Sensitivity : 0.97003745
# Specificity : 0.89058969
####ROC Curve
logistic_ROC_prediction <- prediction(logistic_probabilities, pca_test3$dummy_target)
logistic_ROC <- performance(logistic_ROC_prediction,"tpr","fpr") #Create ROC curve data
plot(logistic_ROC) #Plot ROC curve
####AUC (area under curve)
auc.tmp.logit <- performance(logistic_ROC_prediction,"auc") #Create AUC data
logistic_auc_testing <- as.numeric(auc.tmp.logit@y.values) #Calculate AUC
logistic_auc_testing # AUC value 0.9303135694
#### Lift chart
plotLift(logistic_probabilities, pca_test3$dummy_target, cumulative = TRUE, n.buckets = 10) # Plot Lift chart
###############################
############################### Fit the model into Validation dataset
vmware_validation <- clean.validation
## Create binary target for the entire dataset
vmware_validation$dummy_target <- ifelse(vmware_validation$target >= 1, "1","0")
vmware_validation$dummy_target <- as.factor(vmware_validation$dummy_target)
dummy_target <- vmware_validation$dummy_target
vmware_validation$target <- NULL
table(vmware_validation$dummy_target)
# 0 1
# 48601 1405
## RESHAPE THE VALIDATION SET BEFORE FED TO THE MODEL
# Create a list of column names from pre-one hot encoding training data
names1 <- colnames(vmware_df2)
# Match the column names with the validation set to get the list of index for column order
idx <- match(names1, names(vmware_validation))
# Create a new validation set with same column names as the training set
vmware_validation2 <- vmware_validation[,idx] # 449 vars
str(vmware_validation2)
# Identify categorical variables
vmware_val_cat <- vmware_validation2[sapply(vmware_validation2, is.factor)] # 34 vars
names2 <- colnames(vmware_val_cat)
# One hot econding on categorical variables in the new dataset
vmware_validation3 <- dummy.data.frame(vmware_validation2, names = c(names2)) # 551 vars
# Create a list of column names from post-one hot encoding training data
names3 <- colnames(vmware_test_2)
# Match the column names with the validation set to get the list of index for column order
idx1 <- match(names3, names(vmware_validation3))
# Create a new validation set with same column names as the training set
vmware_validation4 <- vmware_validation3[,idx1]
str(vmware_validation4) # 488 vars
# Transform features of Validation Dataset into Principal Components
# Create a full dataset with all PCs
pca_val <- predict(pca, newdata = vmware_validation4)
pca_val <- as.data.frame(pca_val)
# Create a validation set with only 196 PCs
pca_val2 <- pca_val[1:196]
dummy_target <- vmware_validation$dummy_target
pca_val3 <- cbind(pca_val2,dummy_target)
str(pca_val3)
# Make predictions on the test data
logistic_probabilities_val <- predict(model_logistic, newdata= pca_val3, type="response")
# Translate probabilities to predictions
logistic_classification_val <- ifelse(logistic_probabilities_val > 0.02669733, "1", "0")
logistic_classification_val <- as.factor(logistic_classification_val)
pca_val3$dummy_target <- as.factor(pca_val3$dummy_target)
# Model Accuracy
observed_classes_val <- pca_val3$dummy_target
mean(logistic_classification_val == observed_classes_val) # 0.8936327641
###Confusion matrix
confusionMatrix(logistic_classification_val, pca_val3$dummy_target,positive = "1")
# Reference
# Prediction 0 1
# 0 45276 34
# 1 3325 1371
# Sensitivity : 0.97153025
# Specificity : 0.89138084
####ROC Curve
logistic_ROC_prediction_val <- prediction(logistic_probabilities_val, pca_val3$dummy_target)
logistic_ROC_val <- performance(logistic_ROC_prediction_val,"tpr","fpr") #Create ROC curve data
plot(logistic_ROC_val) #Plot ROC curve
####AUC (area under curve)
auc.tmp.logit_val <- performance(logistic_ROC_prediction_val,"auc") #Create AUC data
logistic_auc_testing_val <- as.numeric(auc.tmp.logit_val@y.values) #Calculate AUC
logistic_auc_testing_val # AUC value 0.9314555424
#### Lift chart
plotLift(logistic_probabilities_val, pca_val3$dummy_target, cumulative = TRUE, n.buckets = 10) # Plot Lift chart
|
5e243d32c704ff6dfdf31b82a86cecb116c220c7 | b1f59ceea9d75220d3667abb6417ae07312079f1 | /analysis/lib/varpart.sqr.euc_functions.R | 07ebfc87d0bc09e1f2374c26219fa66ccfcbcb67 | [] | no_license | Vileu/omrgc_v2_scripts | efa960c1429da3af2752f25b1a2b8ea1d756a116 | 2bcb70a7d20d59126ecf0802de1eb2c73172ef1c | refs/heads/master | 2022-04-13T00:50:51.044263 | 2020-03-31T08:02:50 | 2020-03-31T08:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,511 | r | varpart.sqr.euc_functions.R | # Distance functions
sqr.euc.dist<-function(taula,dim.div=T){
taula<-as.matrix(taula)
if (dim.div==T) n.dim<-ncol(taula) else n.dim<-1
res<-sapply(1:nrow(taula),function(y){sapply(1:nrow(taula),function(x){sum((taula[y,]-taula[x,])^2)})})/n.dim
rownames(res)<-rownames(taula)
colnames(res)<-rownames(taula)
res
}
sqr.euc.int<-function(taula1,taula2,dim.div=T){
taula1<-as.matrix(taula1)
taula2<-as.matrix(taula2)
if (dim.div==T) n.dim<-ncol(taula1) else n.dim<-1
res<-sapply(1:nrow(taula1),function(y){sapply(1:nrow(taula1),function(x){sum(2*(taula1[y,]-taula1[x,])*(taula2[y,]-taula2[x,]))})})/n.dim
rownames(res)<-rownames(taula1)
colnames(res)<-rownames(taula1)
res
}
sqr.euc.int.aprox<-function(taula1,taula2,dim.div=T){
taula1<-as.matrix(taula1)
taula2<-as.matrix(taula2)
if (dim.div==T) n.dim<-ncol(taula1) else n.dim<-1
tmp<-2*as.matrix(dist(taula1))*as.matrix(dist(taula1))
tmp2<-sapply(1:nrow(taula1),function(y){sapply(1:nrow(taula1),function(x){cor((taula1[y,]-taula1[x,]),(taula2[y,]-taula2[x,]))})})/n.dim
res<-tmp*tmp2
rownames(res)<-rownames(taula1)
colnames(res)<-rownames(taula1)
res
}
varpart.sqr.euc.mean<-function(mat.T,mat.G,mat.E,tol=1E-09){
# Distance functions
sqr.euc.dist<-function(taula,dim.div=T){
taula<-as.matrix(taula)
if (dim.div==T) n.dim<-ncol(taula) else n.dim<-1
res<-sapply(1:nrow(taula),function(y){sapply(1:nrow(taula),function(x){sum((taula[y,]-taula[x,])^2)})})/n.dim
rownames(res)<-rownames(taula)
colnames(res)<-rownames(taula)
res
}
sqr.euc.int<-function(taula1,taula2,dim.div=T){
taula1<-as.matrix(taula1)
taula2<-as.matrix(taula2)
if (dim.div==T) n.dim<-ncol(taula1) else n.dim<-1
res<-sapply(1:nrow(taula1),function(y){sapply(1:nrow(taula1),function(x){sum(2*(taula1[y,]-taula1[x,])*(taula2[y,]-taula2[x,]))})})/n.dim
rownames(res)<-rownames(taula1)
colnames(res)<-rownames(taula1)
res
}
# Compute distance components
cat("Computing square euclidean distance of mat.T \n")
res.T<-as.dist(sqr.euc.dist(mat.T))
cat("Computing square euclidean distance of mat.G \n")
res.G<-as.dist(sqr.euc.dist(mat.G))
cat("Computing square euclidean distance of mat.E \n")
res.E<-as.dist(sqr.euc.dist(mat.E))
cat("Computing interaction component\n")
res.int<-as.dist(sqr.euc.int(mat.G,mat.E))
# Check that res.T=res.G+res.E+res.int
if (max(c(res.T)-c(c(res.G)+c(res.E)+c(res.int)))>1E-09) {
plot(res.T,res.G+res.E+res.int)
stop("The equality 'metaT=Abundance+Expression+Interaction' is not met with tolerance = ",tol,"\n Make sure that the input matrices are correct.")}
# Compute means
res<-c(mean(res.G),mean(res.E),mean(res.int))
names(res)<-c("Abundance","Expression","interaction")
#res.norm<-res/sum(abs(res))
res.norm<-res/sum(res)
list(components=res,components.norm=res.norm)
}
varpart.sqr.euc.all<-function(mat.T,mat.G,mat.E,tol=1E-09){
# Distance functions
sqr.euc.dist<-function(taula,dim.div=T){
taula<-as.matrix(taula)
if (dim.div==T) n.dim<-ncol(taula) else n.dim<-1
res<-sapply(1:nrow(taula),function(y){sapply(1:nrow(taula),function(x){sum((taula[y,]-taula[x,])^2)})})/n.dim
rownames(res)<-rownames(taula)
colnames(res)<-rownames(taula)
res
}
sqr.euc.int<-function(taula1,taula2,dim.div=T){
taula1<-as.matrix(taula1)
taula2<-as.matrix(taula2)
if (dim.div==T) n.dim<-ncol(taula1) else n.dim<-1
res<-sapply(1:nrow(taula1),function(y){sapply(1:nrow(taula1),function(x){sum(2*(taula1[y,]-taula1[x,])*(taula2[y,]-taula2[x,]))})})/n.dim
rownames(res)<-rownames(taula1)
colnames(res)<-rownames(taula1)
res
}
# Compute distance components
cat("Computing square euclidean distance of mat.T \n")
res.T<-sqr.euc.dist(mat.T)
cat("Computing square euclidean distance of mat.G \n")
res.G<-sqr.euc.dist(mat.G)
cat("Computing square euclidean distance of mat.E \n")
res.E<-sqr.euc.dist(mat.E)
cat("Computing interaction component\n")
res.int<-sqr.euc.int(mat.G,mat.E)
diag(res.G)<-NA
res.G[upper.tri(res.G)]<-NA
diag(res.T)<-NA
res.T[upper.tri(res.T)]<-NA
diag(res.E)<-NA
res.E[upper.tri(res.E)]<-NA
diag(res.int)<-NA
res.int[upper.tri(res.int)]<-NA
res.T<-mutate(as.data.frame(res.T),sample1=rownames(as.data.frame(res.T))) %>% gather("sample2","metaT",-sample1) %>% filter(!is.na(metaT))
res.G<-mutate(as.data.frame(res.G),sample1=rownames(as.data.frame(res.G))) %>% gather("sample2","Abundance",-sample1) %>% filter(!is.na(Abundance))
res.E<-mutate(as.data.frame(res.E),sample1=rownames(as.data.frame(res.E))) %>% gather("sample2","Expression",-sample1) %>% filter(!is.na(Expression))
res.int<-mutate(as.data.frame(res.int),sample1=rownames(as.data.frame(res.int))) %>% gather("sample2","interaction",-sample1) %>% filter(!is.na(interaction))
# Check that res.T=res.G+res.E+res.int
if (max(res.T$metaT-c(res.G$Abundance+res.E$Expression+res.int$interaction))>1E-09) {
plot(res.T$metaT,res.G$Abundance+res.E$Expression+res.int$interaction)
stop("The equality 'metaT=Abundance+Expression+Interaction' is not met with tolerance = ",tol,"\n Make sure that the input matrices are correct.")}
# Compute means
res<-cbind(res.G,Expression=res.E$Expression,interaction=res.int$interaction)
#res.norm<-cbind(res[,1:2],t(apply(res[,3:5],1,function(x){x/sum(abs(x))})))
res.norm<-cbind(res[,1:2],t(apply(res[,3:5],1,function(x){x/sum(x)})))
list(components=res,components.norm=res.norm)
}
|
37152215698f11609236371100da9e40fe36bf39 | d9c8cb891c7bd2250ecad8116f8863adac9fcd4e | /R/estimateMSVmetrics.R | 11d67e7242a32447fc8f45de1f87d2726e012b71 | [
"Apache-2.0"
] | permissive | carsaesi/EHRsourceVariability | 866eeab1508ec1611b3982a920b4dc35101a5353 | 2ab3f3575d6e79384a034d52d120c9f4bace366d | refs/heads/master | 2020-05-25T06:20:18.681077 | 2019-05-21T11:31:14 | 2019-05-21T11:31:14 | 187,666,467 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,560 | r | estimateMSVmetrics.R | # Copyright 2019 Biomedical Data Science Lab, Universitat Polit?cnica de Val?ncia (Spain)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Estimates the GPD and SPO multi-source variability metrics and the corresponding source projection vertices from a matrix of probability distributions of different data sources
#'
#' @name estimateMSVmetrics
#' @rdname estimateMSVmetrics-methods
#' @param probabilities m-by-n matrix containing the probability mass of n data sources
#' on m distribution bins
#' @return A list containing the following results. GPD: the value of the Global Probabilistic
#' Deviation metric, where 0 means equal distributions and 1 means non-overlapping distributions.
#' SPOs: the values of Source Probabilistic Outlyingness for each data source, where 0 means equal
#' to central tendency and 1 completely non-overlapping. Vertices: a n-by-(n-1) matrix containing
#' the coordinates of each data source in the projected probabilistic space conserving their
#' dissimilarities, e.g, for 3D projections the 3 first columns can be used
#' @export
estimateMSVmetrics <- function(probabilities) {
ns = ncol(probabilities);
distsM = matrix(data=0,nrow=ns,ns)
for(i in 1:(ns-1)){
for(j in (i+1):ns){
distsM[i,j] = sqrt(jsdiv(probabilities[,i],probabilities[,j]))
distsM[j,i] = distsM[i,j]
}
}
vertices <- cmdscale(distsM,eig=FALSE, k=ns-1)
c = colSums(vertices)/ns
cc = matrix(rep(c, ns),nrow=ns,byrow=TRUE)
cc2 = vertices-cc
dc = apply(cc2, 1, norm, type="2")
gpdmetric = mean(dc)/distc(ns)
sposmetrics = dc/(1-(1/ns))
msvMetrics <- list(gpdmetric, sposmetrics, vertices)
names(msvMetrics) <- c("GPD","SPOs","Vertices")
return(msvMetrics)
}
jsdiv <- function(p, q){
m <- log2(0.5 * (p + q))
jsdiv <- 0.5 * (sum(p * (log2(p) - m),na.rm = TRUE) + sum(q * (log2(q) - m),na.rm = TRUE))
}
distc <- function(D){
gamma = acos(-1/D)
temp = sin((pi-gamma)/2)/sin(gamma)
temp[D==1] = 0.5
distc = temp
}
|
d741baada1d1a6dc15aa445e851fa6c83175bef1 | bb3d5006868d5f3251ed0e3cee2020f27ec9215a | /man/get_bipartite_nodeset.Rd | 4a1594fded7895543689f180a5c7226027c3d7b5 | [] | no_license | jmw86069/multienrichjam | 133398bf24612bd708eb4fc1246fe891f571b798 | 92730d106893e1e62af5d6f924bb7393f412d704 | refs/heads/master | 2023-07-25T00:10:22.585074 | 2023-07-06T15:46:32 | 2023-07-06T15:46:32 | 139,867,348 | 12 | 2 | null | null | null | null | UTF-8 | R | false | true | 3,952 | rd | get_bipartite_nodeset.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jamgraph-edgebundle.R
\name{get_bipartite_nodeset}
\alias{get_bipartite_nodeset}
\title{Get partite/connected graph nodesets}
\usage{
get_bipartite_nodeset(
g,
type = "nodeType",
set_nodes = NULL,
sep = ",",
return_type = c("list", "df"),
verbose = FALSE,
...
)
}
\arguments{
\item{g}{\code{igraph} object that contains one attribute column with
node type.}
\item{type}{\code{character} string of the node/vertex attribute that
represents the node type.}
\item{set_nodes}{\code{character} or \code{NULL}, which contains the set
of node neighbors for the requested nodeset. For example,
one might want all nodes that connect with \code{c("A", "B", "C")}.
When \code{set_nodes=NULL} then all nodesets are returned.}
\item{sep}{\code{character} string used as a delimiter between
node names when defining a nodeset name}
\item{return_type}{\code{character} string indicating the type of
data to return:
\itemize{
\item \code{"list"} returns a list of nodesets, each element in the \code{list}
is a \code{character} vector with node names.
\item \code{"df"} returns a \code{data.frame} with more detailed annotation
for each node, including nodesets, neighbor nodes, etc.
}}
\item{verbose}{\code{logical} indicating whether to print verbose output.}
\item{...}{additional arguments are ignored.}
}
\description{
Get partite/connected graph nodesets defined by shared connections
}
\details{
This method is under development, the intent is to bundle
edges where a large subset of nodes are all connected to
the same node neighbors. A typical graph may not have any
two nodes with the same neighbors, but this situation tends
to happen much more often with bipartite graphs,
where nodes of one type are only permitted to have node
neighbors of the other type. It is not required for this
method to work, however.
The driving scenario is with Cnet (concept network) plots,
which is a bipartite network with \code{"Gene"} and \code{"Set"} nodes.
It is fairly common to have multiple genes present in the
same one or few pathways. As a result, these nodes are
most often positioned near each other as a natural
by-product of having the same connected neighbor nodes.
Identifying a nodeset with identical node neighbors enables
some other useful operations:
\itemize{
\item re-positioning, rotating, expanding, compressing the
whole nodeset layout to improve network graph aesthetics,
node label readability, reducing overlaps
\item edge bundling to improve visual distinctiveness between
multiple nodesets
}
}
\seealso{
Other jam igraph functions:
\code{\link{cnet2df}()},
\code{\link{cnet2im}()},
\code{\link{cnetplotJam}()},
\code{\link{cnetplot_internalJam}()},
\code{\link{color_edges_by_nodegroups}()},
\code{\link{color_edges_by_nodes_deprecated}()},
\code{\link{color_edges_by_nodes}()},
\code{\link{color_nodes_by_nodegroups}()},
\code{\link{communities2nodegroups}()},
\code{\link{drawEllipse}()},
\code{\link{edge_bundle_bipartite}()},
\code{\link{edge_bundle_nodegroups}()},
\code{\link{enrichMapJam}()},
\code{\link{fixSetLabels}()},
\code{\link{flip_edges}()},
\code{\link{igraph2pieGraph}()},
\code{\link{jam_igraph}()},
\code{\link{jam_plot_igraph}()},
\code{\link{label_communities}()},
\code{\link{layout_with_qfrf}()},
\code{\link{layout_with_qfr}()},
\code{\link{mem2emap}()},
\code{\link{memIM2cnet}()},
\code{\link{mem_multienrichplot}()},
\code{\link{nodegroups2communities}()},
\code{\link{rectifyPiegraph}()},
\code{\link{relayout_with_qfr}()},
\code{\link{removeIgraphBlanks}()},
\code{\link{removeIgraphSinglets}()},
\code{\link{reorderIgraphNodes}()},
\code{\link{rotate_igraph_layout}()},
\code{\link{spread_igraph_labels}()},
\code{\link{subgraph_jam}()},
\code{\link{subsetCnetIgraph}()},
\code{\link{subset_igraph_components}()},
\code{\link{sync_igraph_communities}()},
\code{\link{with_qfr}()}
}
\concept{jam igraph functions}
|
bc9da921d7a6208e032c18d59da1e4a31d1e0431 | 8166e672f3c7a57f8e52bce0f32799e41de165a9 | /scripts/9_Cereals_Inference_under_Parallel.R | 662a7372a6f9be39a757d743f6582921d6677b7d | [] | no_license | mnavascues/DAR_ABC | 6cde8de4f7d5a91e8299afd84edf9f6fad1983b1 | 23e5d86466bcdb03839527f529651f42d9c12575 | refs/heads/master | 2023-07-07T10:02:09.973109 | 2023-06-29T07:40:58 | 2023-06-29T07:40:58 | 545,404,223 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,665 | r | 9_Cereals_Inference_under_Parallel.R | library(abcrf)
source("../../Misc_R_tools/color.R")
source("scripts/sim14c.R")
load(file = "results/num_of_sims.rda")
# load target (i.e. observed) summary statistics
load(file = "results/Cereals_sumstats.rda")
# lead reference tables
load(file = "results/Cereals_parallel_model_reftable.rda")
reftable = reftable[!is.na(reftable$count_A),]
reftable = reftable[reftable$count_A!=1,]
reftable = reftable[!is.na(reftable$count_B),]
reftable = reftable[reftable$count_B!=1,]
load(file = "results/Cereals_time_range_BP.rda")
num_of_periods = round((time_range_BP[1] - time_range_BP[2]) / 100 / 4)
skyline_years = get_time_of_change(num_of_periods, time_range_BP,
intervals = "regular")
if ( !file.exists("results/Cereals_parallel_posterior.rda") ){
sumstats = rbind(reftable[names(cereals_sumstats_2_categories)])
# reminder:
# logit : log(x/(1-x))
# inverse logit : exp(x)/(1+exp(x))
# pi
logit_pi = log(reftable["pi"]/(1-reftable["pi"]))
names(logit_pi) = "logit_pi"
RF_lambda = regAbcrf(logit_pi~., data.frame(logit_pi,sumstats),
ntree = 1000, paral = TRUE)
posterior_logit_pi = predict(RF_lambda, cereals_sumstats_2_categories,
training = data.frame(logit_pi,sumstats),
paral = TRUE, rf.weights = FALSE)
# lambda
lambda_error = rep(NA,length(skyline_years))
lambda_hat = rep(NA,length(skyline_years))
lambda_95low = rep(NA,length(skyline_years))
lambda_95upp = rep(NA,length(skyline_years))
for (i in seq_along(skyline_years)){
param_name_A = paste0("lambda",skyline_years[i],"_A")
param_name_B = paste0("lambda",skyline_years[i],"_B")
param_index_A = which(names(reftable)==param_name_A)
param_index_B = which(names(reftable)==param_name_B)
param = log10(reftable[param_index_A]+reftable[param_index_B])
names(param) = "param"
RF_lambda = regAbcrf(param~., data.frame(param,sumstats),
ntree = 1000, paral = TRUE)
posterior_lambda = predict(RF_lambda, cereals_sumstats_2_categories,
training = data.frame(param,sumstats),
paral = TRUE, rf.weights = FALSE)
lambda_error[i] = RF_lambda$model.rf$prediction.error
lambda_hat[i] = 10^(posterior_lambda$med[1])
lambda_95low[i] = 10^(posterior_lambda$quantiles[1])
lambda_95upp[i] = 10^(posterior_lambda$quantiles[2])
}
save(skyline_years, posterior_logit_pi,
lambda_error, lambda_hat, lambda_95low, lambda_95upp,
file="results/Cereals_parallel_posterior.rda")
}
load(file = "results/Cereals_parallel_posterior.rda")
posterior_pi_median = exp(posterior_logit_pi$med)/(1+exp(posterior_logit_pi$med))
posterior_pi_quantiles = exp(posterior_logit_pi$quantiles)/(1+exp(posterior_logit_pi$quantiles))
load(file = "results/Cereals_spd.rda")
load(file = "results/Cereals_time_range_BP.rda")
pdf(file="results/Cereals_parallel_model_result.pdf", width=10, height=5)
par(mar=c(4.5, 4.5, 1, 1) + 0.1)
#plot(triticum_spd$grid$calBP, triticum_spd$grid$PrDens, xlim = time_range_BP, ylim = c(0.0001, 1), log = "y", type="l", xlab="Years cal BP", ylab=expression(lambda), col="grey", lwd = 2)
plot(skyline_years, lambda_hat, xlim = time_range_BP, ylim = c(0.0001, 1), log = "y",
type="l", xlab="Years cal BP", ylab=expression(lambda), col = PCI_blue, lwd = 2)
lines(skyline_years, lambda_95low, lty = 2, lwd = 2, col = PCI_blue)
lines(skyline_years, lambda_95upp, lty = 2, lwd = 2, col = PCI_blue)
text(3000,0.0006,expression(hat(pi)*"=0.40 (0.22, 0.50)"), cex=1.5)
dev.off()
|
9c1eb7b1b8db030e161db9247695fe3fda51143f | 1efcd5467fff4f149fe88d3a5b275b4648a7cdda | /fake-files/clean/02_clean-demographics.R | f328c5ace9653ed9c61545fd4330cc149c49a8aa | [] | no_license | sharlagelfand/sharstudioconf | 17df24869d4daaebd59d5b4ef867099b814551a3 | 19adf23d699ffe3df61b991807370654aeecf1d5 | refs/heads/master | 2020-12-20T13:09:40.871923 | 2020-01-30T00:32:30 | 2020-01-30T00:32:30 | 236,086,370 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | 02_clean-demographics.R | read demographics data
analysis_year <- {{{ analysis_year }}}
# Check for any cases where province is "Ontario" but outside_ontario is TRUE. Some of them may have province wrong, some may have outside_ontario wrong.
# Email Carla and ask to check paper files to confirm which is correct
demographics_data <- demographics_data %>%
filter(province == "ON",
outside_ontario)
|
b6a5cf005b152a63a059c83f03da020c91a83367 | 583616a7d61e83e8a23779456421689818bb8f60 | /tools/heinz/bum.R | ae64e6b807efae084163c2139ac0cbedc28da14b | [
"MIT"
] | permissive | thobalose/tools-iuc | ea3bcc7bfb49da160e3ff1710c4ecbdf5c73664b | f337505deb06b66961f77a64bfae244f88c6a865 | refs/heads/master | 2020-04-25T15:07:36.647154 | 2019-08-29T10:47:28 | 2019-08-29T10:47:28 | 172,867,113 | 1 | 0 | MIT | 2019-02-27T11:50:35 | 2019-02-27T07:36:52 | HTML | UTF-8 | R | false | false | 1,136 | r | bum.R | # Author: Cico Zhang
# Usage: Rscript bum.R --input p-values.txt --output result.txt --verbose TRUE
# Set up R error handling to go to stderr
options(show.error.messages=F, error=function(){cat(geterrmessage(),file=stderr());q("no",1,F)})
# Avoid crashing Galaxy with an UTF8 error on German LC settings
#loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
# Import required libraries
suppressPackageStartupMessages({
library('getopt')
library('BioNet')
})
# Take in trailing command line arguments
args <- commandArgs(trailingOnly = TRUE)
# Get options using the spec as defined by the enclosed list
# Read the options from the default: commandArgs(TRUE)
option_specification <- matrix(c(
'input', 'i', 2, 'character',
'output', 'o', 2, 'character'
), byrow=TRUE, ncol=4);
# Parse options
options <- getopt(option_specification);
pvals <- read.table(options$input)
bum <- fitBumModel(pvals,plot=FALSE)
mat <- c(bum$lambda, bum$a)
#bumtablename <- paste(options$output,sep="\t")
write.table(x=mat, file=options$output,quote=FALSE, row.names=FALSE, col.names=FALSE)
message ("Parameters have been estimated successfully!")
|
9caa40be6f294c5ade480cb221db5c6dd1535af1 | 5c09b66c0bd8fb0f7b3bb93d9a039810a0702e47 | /R/Basic/14 - list.r | b29f36251ec271643bb61860cd9d3f007069cf59 | [] | no_license | leandrocotrim/curso_R_PY | a4ccb1020c7aa33dd4a38a0b1d3fe41e9f44028e | 5a9844e9dbd4f765837ea25dee489866ad51bbd1 | refs/heads/master | 2020-06-15T00:15:40.035284 | 2019-08-16T11:08:24 | 2019-08-16T11:08:24 | 195,161,930 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 603 | r | 14 - list.r | # object which contain any element multiply
# use function list()
l1 = list(1, "aa", TRUE, c(0, 2, 3))
print(l1)
# names
cat("\nnames\n")
l2 = list("number" = 4, "list" = c(1, 10))
print(l2)
cat("\norder names\n")
names(l2)
# change position
cat("\nchange position\n")
names(l2) <- c("list", "number")
print(l2)
# access elements
cat("\naccess elemente\n")
l3 = list(TRUE, 2, "name" = "Cotrim")
print(l3$name)
# merging list
cat("\nconcat in vector lists\n")
c(list(4,5,6), list(1,2,3))
# list to vector
cat("\nuse function unlist\n")
v1 = unlist(list(1,2,3,"Cotrim"))
print(class(v1))
print(v1)
|
fc69077131b2f860c4e32719656f23b64d301025 | 4438de5c07b9221dffff16db98ffa74789e77316 | /cachematrix.R | 816ac608f05371e09ccc8f5bf9ce4d792e04892e | [] | no_license | bhabishyat/ProgrammingAssignment2 | cc30c684f22de5cae24ede067c5b5aaadb9c2b6d | 75ba5d880db5a7282cc86d1ef35dffba5f16618b | refs/heads/master | 2016-08-11T21:34:53.917759 | 2016-02-06T18:52:38 | 2016-02-06T18:52:38 | 51,216,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,200 | r | cachematrix.R | ## These functions are used to save the matrix and inverse matrix and use the saved matrix.
## This function saves and returns the matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL #variable to hold inverse
set <- function(y) {
x <<- y ## save the matrix for later use
i <<- NULL ## set inverse matrix to null
}
get <- function() x ##typing x$get will print the matrix
setinverse <- function(inv) i <<- inv ##save the inverse matrix for later use.
getinverse <- function() i ##typing x$getinverse will print the matrix inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function uses the cached value of the inverse if already present or sets the inverse to cache for later use.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
d6baea68e7eb232cca2daa707785ed9d5776457c | 7efe470604aad14761329a20ea1848dd188ca93a | /Getting and Cleaning Data/Week3/Quiz3_Question3_4_and_5.R | 2572c0f7c7ab672cdb9ea5f106cf60fa8dbac232 | [] | no_license | sintecs/Coursera_Data_Science_Homework | 35980db2d786cb2a44b28cd27adb7eebb812e855 | 8c66282413abf0ceb6a595a5c7d7e2d5436f471e | refs/heads/master | 2020-05-31T20:05:43.432940 | 2015-08-22T17:34:17 | 2015-08-22T17:34:17 | 40,052,645 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,134 | r | Quiz3_Question3_4_and_5.R | ## Question 3
gdp.url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv'
gdp2.url <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv'
setwd('Programming/R/Getting and Cleaning Data/Week3/')
download.file(gdp.url, 'fgdp.csv', method = 'libcurl')
download.file(gdp2.url, 'fedstats.csv', method = 'libcurl')
fedstats <- read.csv('fedstats.csv')
fgdp <- read.csv('fgdp.csv', skip = 4, nrows = 190)
fgdp[, "X.4"] <- as.numeric(gsub(',', '', as.character(fgdp[, "X.4"])))
##fgdp[, "X.1"] <- as.numeric(gsub(',', '', as.character(fgdp[, "X.1"])))
merged.data <- merge(fedstats, fgdp, by.y = "X", by.x = "CountryCode")
merged.logical <- is.na(merged.data[, "X.1"])
length(merged.logical) - sum(merged.logical)
## 189?
library(plyr)
arrange(merged.data, X.4)
## Question 4
library(dplyr)
temp.data <- group_by(merged.data, Income.Group)
summarize(temp.data, meanx.1 = mean(X.1))
## Question 5
gdp.groups = cut(merged.data$X.1, breaks = quantile(merged.data[, "X.1"], c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)))
table(gdp.groups, merged.data[, "Income.Group"])
quantile(merged.data[, "X.1"]) |
767b5d61c6e4646f2235fc0085a51b135f127929 | 63f10e3e20abdbe467deb3c19d224a93ed0318c3 | /plot2.R | b01900d80891428cf00177e58d2cc6e1f776c726 | [] | no_license | HugoFram/ExData_Plotting1 | 1f91fb637b6e4c8cc00bd2d8538603a74447803d | 6971ae4137a74dbb9d05ba918eb2a72050541c45 | refs/heads/master | 2020-03-27T17:52:08.824978 | 2018-08-31T14:41:43 | 2018-08-31T14:41:43 | 146,880,418 | 0 | 0 | null | 2018-08-31T10:57:51 | 2018-08-31T10:57:51 | null | UTF-8 | R | false | false | 559 | r | plot2.R | # Download, read and filter data for the assignment
source("readData.R")
# Open png device
png("plot2.png", width = 480, height = 480)
# Set local time to english so that dates are printed in english
Sys.setlocale("LC_TIME", "English")
# Plot Global Active Power over time
with(householdPowerConsumption, plot(DateAndTime, Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)"))
# Save the plot as a PNG
dev.off() |
56038d28078a5557b96472d8edde0819d9e95c90 | 3e68564f4897ff9d7ee07c4bd8c6a4afda2db1db | /cachematrix.R | b327e85a9453165b89eafe6e9fef6a4abf20df0c | [] | no_license | salmanhaider94/ProgrammingAssignment2 | ee39bca1ac2f694a2a0e87457457ad044715e246 | 1695a4aaf9f7cf5e898edebe3241eff03f045264 | refs/heads/master | 2020-04-03T20:28:07.654495 | 2018-10-31T12:26:22 | 2018-10-31T12:26:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,222 | r | cachematrix.R | ##Basically matrix inverse_matrixersion is a costly computation and rather than repeatdely doing that particular time cosuming task
##we could make functions that will cache inverse_matrixerse of a matrix
## This function is to create a special matrix object
## that will cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse_matrix <- NULL
set <- function(y) {
x <<- y
inverse_matrix <<- NULL
}
get <- function() x
setInverse <- function(inverse) inverse_matrix <<- inverse
getInverse <- function() inverse_matrix
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This cacheSolve function will calculate the inverse of a special martix created by above function
##it'll retrieve the inverse from the cache if If the inverse has already been calculated and matrix is unchanged
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse_matrixerse of 'x'
inverse_matrix <- x$getInverse()
if (!is.null(inverse_matrix)) {
message("getting cached data")
return(inverse_matrix)
}
matrixx <- x$get()
inverse_matrix <- solve(matrixx, ...)
x$setInverse(inverse_matrix)
inverse_matrix
}
getwd()
|
dc47c60fb58bbaacb992f24566ce4f2bfd6a00f6 | c744f8c5401d2ca760d22d556fa2ecbebde39a56 | /rkvstore/man/rkv_store_iterator.Rd | 14865beeb3f62bc3d4c1e1e256a22ce658865bb3 | [
"Apache-2.0"
] | permissive | sshyran/oracle-nosql-drivers | 398816164de9a7507f7363ffbc868dfd73b85761 | 1f421d9ac561ee7ac4b37e084b1e97ae67f562b5 | refs/heads/master | 2022-03-13T23:52:03.170425 | 2016-04-07T14:42:14 | 2016-04-07T14:42:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,546 | rd | rkv_store_iterator.Rd | % File rnosql/man/rkv_store_iterator.Rd
\name{rkv_store_iterator}
\alias{rkv_store_iterator}
\title{Returns an iterator that provides traversal of descendant key/value pairs.}
\description{
Returns an iterator that provides traversal of descendant key/value pairs associated with the parent_key.
}
\usage{
rkv_store_iterator(store, key=NULL, start=NULL, end=NULL, keyonly=FALSE)
}
\arguments{
\item{store}{(kvStore object) The store parameter is the handle to the store, it is obtained using rkv_open_store(). }
\item{key}{(kvKey object) The key parameter is the parent key whose "child" records are to be retrieved. It may be null to fetch all records in the store. If non-null, the major key path must be a partial path and the minor key path must be empty.}
\item{start}{(string) The start parameter defines the lower bound of the key range. If NULL, no lower bound is enforced. }
\item{end}{(string) The end parameter defines the upper bound of the key range. If NULL, no upper bound is enforced. }
\item{keyonly}{(logic) This flag indicates that if only return keys or key/value pairs: TRUE - keyOnly, FALSE - key/value pairs. By default, it is FALSE. }
}
\value{
(kvIterator boject) Return a kvIterator object.
}
\examples{
iterator <- rkv_store_iterator(store)
while(rkv_iterator_next(iterator)) {
rkey <- rkv_iterator_get_key(iterator)
rvalue <- rkv_iterator_get_value(iterator)
print(rkv_get_key_uri(rkey))
print(rkv_get_value(rvalue))
}
rvk_release_iterator(iterator)
}
\seealso{
\code{\link{rkv_multiget_iterator}}.
} |
3238dfc17dc4f2090bdd0a3e4cd4353f22d16335 | fb72c93c681e581afc7ea6dad9dce6afbe6d69f2 | /HW4/hw4.R | c12ea9daae1b56d90e115be60a785e2e83a43c16 | [] | no_license | OliverXUZY/HPC_HTC-STAT605-fall19 | 515dd51475c51d274b1aff7795f7e2f3a69dd4e4 | 5d4d0ea69b59018c3eb9052d70f72ab304763ae6 | refs/heads/master | 2023-07-31T23:06:23.667114 | 2021-10-03T05:14:58 | 2021-10-03T05:14:58 | 214,275,444 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,881 | r | hw4.R | rm(list=ls())
args = (commandArgs(trailingOnly=TRUE))
if(length(args) == 2){
template = (args[1])
directory = (args[2])
} else {
cat('usage: Rscript hw4.R <template spectrum> <data directory> \n', file=stderr())
stop()
}
### hw2 code for one file
library('astro')
###
cB58 = read.fitstab(template)
#noisy = read.fitstab('data/spec-7424-57160-0977.fits')
obs = 1000 ## for test
files = list.files(directory)
ext2 = function(fil){
sig_flux = cB58[,2]
noisy = read.fitstab(paste(directory,fil,sep = "/"))
if(length(sig_flux) > dim(noisy)[1]){
MSE = 9999;corr = 1; index = 0
}
else{
MSEi = rep(0,dim(noisy)[1] - 2180)
corri = rep(0,dim(noisy)[1] - 2180)
#noise_flux = scale(noisy[,1])
for(i in 1:(dim(noisy)[1] - 2180)){
sig_flux = cB58[,2]
noise_flux = noisy[i:(i+2180),1]
noise_flux = noise_flux[noisy[i:(i+2180),4] == 0] ### ??
sig_flux = sig_flux[which(noisy[i:(i+2180),4] == 0)]
sig_flux = as.vector(scale(sig_flux))
noise_flux = as.vector(scale(noise_flux))
if(length(noise_flux) <= 1000){MSEi[i] = 9999
}else{
MSEi[i] = sqrt(sum((sig_flux - noise_flux)^2))/length(sig_flux)
corri[i] = cor(sig_flux,noise_flux)
}
}
index = union(order(MSEi)[1], order(corri,decreasing = TRUE)[1])
MSE = MSEi[index]
corr = corri[index]
}
return(list(index,MSE,corr))
}
# ext2(files[1])
sele = lapply(files, ext2)
MSE = unlist(lapply(1:obs, function(x) sele[[x]][[2]][1]))
index = unlist(lapply(1:obs, function(x) sele[[x]][[1]][1]))
#corr = unlist(lapply(1:100, function(x) sele[[x]][[3]][2]))
#index = unlist(lapply(1:100, function(x) sele[[x]][[1]][2]))
hwdf = data.frame(distance = MSE, spectrumID = files, i = index)
hwdf = hwdf[order(hwdf$distance),]
hwdf = hwdf[is.na(hwdf$distance) == FALSE,]
# hwdf$direc = directory
write.csv(hwdf,paste(directory,'.csv',sep = ''), row.names = FALSE)
|
2ab61e2906f649a47624dd99c596754c2b7946b5 | 51d2291c595cb72aa8f23fb39009fd92db1a3c3a | /man/readMZ.Rd | f92dd001e013a5e73c73fc1b1ab523e7edb708f2 | [] | no_license | Yang0014/MassSpectrometry | cee487a9db8a9b6786b28c64995056b4c74352a7 | 27d4cd0f125dd3156d9c37b5964fd4b62fc44893 | refs/heads/master | 2021-01-21T04:40:10.302068 | 2016-07-21T20:14:01 | 2016-07-21T20:14:01 | 54,397,527 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,292 | rd | readMZ.Rd | \name{readMZ}
\alias{readMZ}
\title{
Read mzXML files
}
\description{
Read mzXML files.
}
\usage{
readMZ(fns, scanCounts = NULL, starts = NULL, ends = NULL)
}
\arguments{
\item{fns}{
\code{character}(n): the filenames of mzXML files.
}
\item{scanCounts}{
\code{integer}(n): the scanCount to use in each file. If NULL,
the last scan will be used.
}
\item{starts}{
\code{numeric}(n): the start coordinates of the ranges.
}
\item{ends}{
\code{numeric}(n): the end coordinates of the ranges.
}
}
\details{
The peak intensities are normalised to the maximal intensity of 100
within eahc file.
}
\value{
When \code{starts} and \code{ends} are NULL, a list of \code{matrix} with
intensities will be returned.
When they are given, a \code{data.frame} of the added intensities
within the ranges will be returned.
}
\author{
Yang Yang
}
\examples{
mzFns <- system.file(c("threonine/threonine_i2_e35_pH_tree.mzXML",
"lockmass/LockMass_test.mzXML"),
package = "msdata")
## Read all the mzXML data
allIntensities <- readMZ(mzFns)
## Read the peaks data within certain ragnges
starts <- c(50, 70, 80)
ends <- c(55, 75, 85)
rangedIntensities <- readMZ(mzFns, starts=starts, ends=ends)
} |
41ea8c110282315c0b5dc8b79f94e94a8daa22fe | 22f3f3f959b0af491de1cc2cdd2d887343c93969 | /CLASS-CDA-ToDo/reports/Homework/HW3-Due 12052016/HW3-Q1b.R | 59d15e1ad4100497a78691487c6c1ddbf182c62f | [] | no_license | EccRiley/CLASS-CDA | ffef431e2c32579c1b2e2d6d067308609e00cfdf | 5d74ca152e7553987d2ede3d6d9c9eed186e47bc | refs/heads/master | 2021-05-01T01:24:39.675352 | 2017-05-01T16:46:23 | 2017-05-01T16:46:23 | 72,780,877 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,367 | r | HW3-Q1b.R | #' ---
#' title: "Homework 3, Question 1b"
#' author: "Rachel M. Smith"
#' date: "`r format(Sys.Date(), '%d %B %Y')`"
#' ---
#'
#+ setup, echo=FALSE, results='hide', message=FALSE, warning=FALSE, cache=FALSE
source("../SETUP.R") ## still need all of my setup tools though ##
knitr::opts_chunk$set(tidy = FALSE, echo = TRUE, cache = FALSE, results = 'asis', Rplot = NULL, dev = 'pdf', fig.path = "graphics/HW3/q1b-", fig.width = 7, fig.height = 7, out.width = "\\linewidth")
#'
#' -----
#'
#' # Data Descriptives
#'
#+ dat1, echo=TRUE
dat <- R.rspss("data/child2.sav", vlabs = FALSE)
dat <- na.omit(dat)
R.msmm(dat[, -5])
#'
#' -----
#'
#' # Question $1.b.$:Multiple Logistic Regression Model
#'
fit <- glm(abuse ~ program + boyfriend + white + welfare,
data = dat,
family = binomial(link = "logit"))
fit
anova(fit, test = "LRT")
#'
#' \newpage
#'
#' ## Model Fit Diagnostics
#'
#'
#+ outTest, results='asis', echo=TRUE
car::outlierTest(fit, cutoff = Inf)
#'
#'
#+ glmDX1, fig.height = 6, echo=FALSE
# When no observations are found with a bonferroni p-value exceeding the cutoff, the *one* with the largest Studentized residual is reported. ##
library(car)
dx1 <- read.csv("data/hw3-q1b2.csv") ## Saved output from "LogisticDX::dx()" ##
dx1046 <- as.data.frame(dx1[dx1$x2 == 1213, ])
plot(dx1$P, dx1$sPr, xlab = "Predicted Probabilities", ylab = "Standardized Pearson Residuals"); abline(h = 0, lwd = 1, col = pal_my[19], lty = 'dashed'); loessLine(predict(fit), residuals(fit), log.x = FALSE, log.y = FALSE, col=pal_my[5], smoother.args=list(lty=1, lwd=2, lty.spread=2, lwd.spread=1)); text(x = dx1046$P, y = dx1046$sPr+0.5, labels = "1046")
# car::residualPlot(fit, type = "pearson",
# col.smooth = pal_my[5], id.n = 1, linear = FALSE)
#'
#'
library(car)
cutoff <- 4/((nrow(dat) - length(fit$coefficients) - 2))
plot(fit, which = 4, cook.levels = cutoff)
#'
#+ glmdx1, echo=FALSE, fig.height = 4.5
dx1$col <- mpal(1:nrow(dx1), p = sci, a = 0.55)
plot(
dx1$P,
dx1$dChisq,
type = 'n',
xlab = "Predicted Probabilities",
ylab = expression(Delta ~ ~ Chi ^ 2)
)
points(
dx1$P,
dx1$dChisq,
cex = 2,
bg = dx1$col,
col = pal_my[2],
pch = 21,
lwd = 0.5
)
text(x = dx1046$P,
y = dx1046$dChisq - 3,
labels = "1046")
lines(lowess(dx1$P, dx1$dChisq), lwd = 3, col = pal_my[18])
#'
#'
#' `r tufte::newthought(" ")`
#'
#+ glmdx13, echo=FALSE, fig.height = 4.25
plot(dx1$P,
dx1$dBhat,
type = 'n',
xlab = "Predicted Probabilities",
ylab = " ")
points(
dx1$P,
dx1$dBhat,
cex = 2,
bg = dx1$col,
col = pal_my[2],
pch = 21,
lwd = 0.5
)
text(x = dx1046$P,
y = dx1046$dBhat - 0.035,
labels = "1046"); mtext(text =
expression(Delta ~ ~ hat(beta)),
side = 2, line = 2)
lines(lowess(dx1$P, dx1$dBhat), lwd = 3, col = pal_my[18])
#'
#'
#' `r tufte::newthought(" ")`
#'
#+ glmdx14, echo=FALSE, fig.height = 4.5
plot(
dx1$P,
dx1$dD,
type = 'n',
xlab = "Predicted Probabilities",
ylab = expression(Delta ~ ~ "Deviance")
)
points(
dx1$P,
dx1$dD,
cex = 2,
bg = dx1$col,
col = pal_my[2],
pch = 21,
lwd = 0.5
)
text(x = dx1046$P,
y = dx1046$dD - 0.25,
labels = "1046")
lines(lowess(dx1$P, dx1$dD), lwd = 3, col = pal_my[18])
#'
#'
#+ echo=FALSE
kable(dx1[dx1$x2 == 1213, c(-1, -2, -3, -9)], caption = "Residual Diagnostic Statistics for Case No. 1046", col.names = c("Standardized Pearson Residual", "Predicted Probability", "$\\Delta\\chisq$", "$\\Delta Deviance$", "$\\Delta\\hat{\\beta}$"))
#'
#'
#'
#' `r tufte::newthought("Multiple Logistic Regression Diagnostics Summary")`. In the second residual plot above, with standardized pearson resdiuals on the Y-axis and predicted values on the X-axis, _Case \#`1046`_ is identified as an outlier. Examining the $\Delta\chisq$ and $\Delta\beta$ for this case (see above) against the aggregated descriptives for the full set of observations included in the model (see below), it is clear that this case is is an outlier. This conclusion is supported by the studentized residual outlier test provided above. However, the residual data visualizations collectively suggest that this one observation (i.e., _Case \#`1046`_) is not necessarily heavily influential on the fitted model's coefficients. For example, in the first diagostic plot provided above, the solid line represents the fitted _loess model_ for the tested model's predicted values against the model's residuals. The fitted loess line's slope appears to correspond appropriately with the data with little influence from the outlier case^[Located and labeled in the bottom right corner of the plot]. The same behavior is observed across subsequent visualizations three plots respectively showing the $\Delta\chisq$, $\Delta\beta$, and $\Delta Deviance (D)$ plotted against the tested model's predicted probabilities, where the solid gray line in each plot represents the best fitting (loess) curve for each diagnostic statistic against the predicted probabilities. In all of the above-described visualizations, the best fitting line appears most heavily influened by the data clustered toward the lower ends of each diagnostic statistic's range, rather than the labeled outlying data point in each plot. However, the difference between _Case \#`1046`'s_ predicted probability ($P = 0.9914$ in the table above) against the mean predicted probability for the full set of observations included in the model ($M = 0.07$ in the table below), suggests that this particular data point's predicted value could be influentual on the tested model's outcome (_Abuse_). This influence could increase the risk of Type I error regarding the model's predictors relations with the outcome. In particular, _Case \#`1046`'s_ relatively high score on the _Welfare_ predictor ($Welfare_{1046} = 8$, whereas $\mu_{Welfare} = `r mean(dat$welfare)`$) could influence the regression coefficient obtained for this predictor ($\beta_{Welfare} = `r fit$coefficients[[5]]`$).
#'
#' -----
#'
#+ echo=FALSE
dxmsmm <- R.msmm(dx1[, c(-1, -2, -3, -9)])[, -5]
rownames(dxmsmm) <- c("Standardized Pearson Residual", "Predicted Probability", "$\\Delta\\chisq$", "$\\Delta Deviance$", "$\\Delta\\hat{\\beta}$")
kable(dxmsmm, caption = "Descriptive Statistics for Residual Diagnostics")
#' |
bab6f41e2a38d8137e233d538766274029651eab | 115855fbf4efcdc2211399e1f7a0be6735e0e0e4 | /man/categ.Rd | 2e5d2893a42975bd94071b685b2d2c1862212d87 | [] | no_license | cran/gambin | 145552192e31d5399bfa77eb4f5f78d379462502 | 74158cbb84ba6a564a2a931c865a86db422c8f1f | refs/heads/master | 2021-07-10T16:53:08.513485 | 2021-04-16T17:10:05 | 2021-04-16T17:10:05 | 17,696,247 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 731 | rd | categ.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_docs.R
\docType{data}
\name{categ}
\alias{categ}
\title{Simulated bird SAD dataset with species classification data}
\format{
A dataframe with three columns: 1) 'abundances' = the abundance of
each species, 2) 'species' = the species names, and 3) 'status' the species
origin classification. In regards to (3) each species is classified as
either native (N), exotic (E) or invasive (I).
}
\source{
This package.
}
\description{
A randomly generated bird SAD dataset where each species has
been randomly classified according to its origin (native, exotic or
invasive).
}
\examples{
data(categ, package = "gambin")
}
|
fdc011064d72ef7ea345c2e62f94e978e5d47916 | 7f98b5078a213a0e9ddfc3f7bcad9e0c4e3250a2 | /2013/Arda_stat_generator.r | 6ed59a6a1c6b6548d8e7a281849ff70af7a88d64 | [] | no_license | bclay/upenn-research-scripts | d1e0dea7286840a2fdf6c854ffce90464b1b84aa | 804d2240798be5899744b0bfaabb1b165cc4c233 | refs/heads/master | 2020-06-06T10:23:42.668746 | 2014-08-26T18:13:33 | 2014-08-26T18:13:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 401 | r | Arda_stat_generator.r | # script of R commands
library(igraph)
g <- graph.data.frame(d,directed=FALSE)
z <- length(V(g))
y <- length(E(g))
x <- graph.density(g)
w <- clusters(g)$no
v <- transitivity(g,"global")
t <- graph.adhesion(g)
s <- diameter(g)
r <- average.path.length(g)
rstr <- paste(z,y,x,w,v,t,s,r, sep = "\t", collapse = "\n")
#rstr <- cat(z, "\t", y, "\t", x, "\t", w, "\t", v, "\t", t, "\t", s, "\t", r, "\n")
|
8f5f1c5371b7e04d45afa015b08fc74cb1a013fb | 90495633545b740ead993b01fb98b85bd62e4b5d | /cullDHs.R | dd0e7ff7b8c126f59597dbd7bb12661a507cfff1 | [] | no_license | smallgrains-uiuc/Wheat-Selection-Decisions-2021 | fca5764aa5f1beca64fd87057141cca0a48979a5 | 7ff5610346050101d7129645a01c9cdbe6ba6cb7 | refs/heads/main | 2023-08-24T16:57:59.854517 | 2021-10-14T16:01:48 | 2021-10-14T16:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,983 | r | cullDHs.R | setwd("~/Documents/GitHub/Wheat-Selection-Decisions-2021")
dhdata<- read.csv('DHphenotype_download.csv')
#combine with data from flatfile
setwd("~/Documents/Wheat/2021/Data Upload Files/check urbana dh data")
dhdata1<- read.csv('dhfield_urb_21-upload.csv', row.names=1)
colnames(dhdata1)[1]<- 'observationUnitName'
dhdata<- merge(dhdata, dhdata1, by='observationUnitName')
#add the row column information
setwd("~/Documents/Wheat/2021/HarvestMaps")
mirus<- read.csv('AugDHfield2021_mirusfile.csv', row.names=1)[,c(1,2,4)]
colnames(mirus)[3]<-'observationUnitName'
dhdata<- merge(dhdata, mirus, by='observationUnitName', all.x = TRUE, all.y=FALSE)
gids<- levels(dhdata$germplasmName)
#add the neoga data
setwd("~/Documents/Wheat/2021/Data Upload Files/check neoga yield data")
neoga<- read.csv('2021-06-22T113715phenotype_download.csv')
neoga2<-read.csv('Neoga2021_mirisfile_curated_bothdays.csv', row.names=1)
colnames(neoga2)[5]<- 'observationUnitName'
colnames(neoga2)[4]<- 'row'
neoga<- merge(neoga, neoga2, by='observationUnitName')
neogaDH<- neoga[grep('DH', neoga$observationUnitName),]
library(breedbase)
neogaDH$Test.Weight<- convert_lbsbu_gL(neogaDH$Test.Weight)
neogaDH$kgpha<- convert_buac_kgHa(neogaDH$buperacre, "wheat")
#combine data
commoncols<- intersect(colnames(dhdata), colnames(neogaDH))
neogaDH<- neogaDH[,commoncols]
dhdata<- dhdata[,commoncols]
dhdata<- droplevels.data.frame(rbind(dhdata, neogaDH))
#blocking factor
dhdata$blockNumber<- paste(dhdata$blockNumber, dhdata$studyName, sep="_")
dhdata$row<- paste(dhdata$row, dhdata$studyName, sep="_")
dhdata$range<- paste(dhdata$range, dhdata$studyName, sep="_")
#change to factors
dhdata$blockNumber<- as.factor(dhdata$blockNumber)
dhdata$studyName<- as.factor(dhdata$studyName)
dhdata$germplasmName<- as.factor(dhdata$germplasmName)
dhdata$entryType<- as.factor(dhdata$entryType)
dhdata$row<- as.factor(dhdata$row)
dhdata$range<- as.factor(dhdata$range)
dhdata$locationName<- as.factor(dhdata$locationName)
#make sure checks are recorded as checks
dhdata[which(dhdata$germplasmName=='Kaskaskia'),'entryType']<- 'check'
dhdata[which(dhdata$germplasmName=='07-4415'),'entryType']<- 'check'
#fit models, neoga only
library(asreml)
dhdataNeo<- dhdata[which(dhdata$locationName=='Neoga, IL'),]
dhdataNeo0<- dhdataNeo
traits<- c("Heading.time...Julian.date..JD..CO_321.0001233", "Plant.height...cm.CO_321.0001301","kgpha","Test.Weight")
for(i in 1:length(traits)){
dhdataNeo<- dhdataNeo0
colnames(dhdataNeo)[match(traits[i], colnames(dhdataNeo))]<- 'Y'
asreml.options(extra=100, maxit=500)
mod<- asreml(fixed= Y~1+at(entryType, 'check'):germplasmName,
random= ~at(entryType, 'check'):blockNumber+at(entryType, 'test'):germplasmName+at(entryType, 'check'):row, data=dhdataNeo)
mod<- update(mod)
blups<- na.omit(predict(mod, classify='at(entryType, test):germplasmName', ignore=c('(Intercept)'))$pvals)
pev<- blups[,'std.error']^2
Vg<- summary(mod)$varcomp['at(entryType, test):germplasmName','component']
rel<- 1-(pev/Vg)
blups<- data.frame(blups, rel, trait=traits[i])
if(i==1){
blupsAll<- blups
}else{
blupsAll<- rbind(blupsAll, blups)
}
mod2<- asreml(fixed= Y~1+at(entryType, 'check'):germplasmName+at(entryType, 'test'):germplasmName,
random= ~at(entryType, 'check'):blockNumber+at(entryType, 'check'):row,data=dhdataNeo)
mod2<- update(mod2)
blues<- na.omit(predict(mod2, classify='at(entryType, test):germplasmName')$pvals)
blues<- data.frame(blues, trait=traits[i])
if(i==1){
bluesAll<- blues
}else{
bluesAll<- rbind(bluesAll, blues)
}
}
blupsAll<- blupsAll[-c(grep('07-4415', blupsAll$germplasmName), grep('Kaskaskia', blupsAll$germplasmName)),]
unique(blupsAll[,c('trait', 'rel')])
bluesNeoga<- data.frame(loc='Neoga', bluesAll)
#fit models, urbana only
dhdataUrb<- droplevels.data.frame(dhdata[which(dhdata$locationName=='Urbana, IL'),])
dhdataUrb0<- dhdataUrb
library(asreml)
traits<- c("Heading.time...Julian.date..JD..CO_321.0001233", "Plant.height...cm.CO_321.0001301","kgpha","Test.Weight")
for(i in 1:length(traits)){
dhdataUrb<- dhdataUrb0
colnames(dhdataUrb)[match(traits[i], colnames(dhdataUrb))]<- 'Y'
asreml.options(extra=100, maxit=500)
mod<- asreml(fixed= Y~1+at(entryType, 'check'):germplasmName,
random= ~at(entryType, 'check'):blockNumber+
at(entryType, 'test'):germplasmName+at(entryType, 'check'):row, data=dhdataUrb)
mod<- update(mod)
blups<- na.omit(predict(mod, classify='at(entryType, test):germplasmName',ignore=c('(Intercept)'))$pvals)
pev<- blups[,'std.error']^2
Vg<- summary(mod)$varcomp['at(entryType, test):germplasmName','component']
rel<- 1-(pev/Vg)
blups<- data.frame(blups, rel, trait=traits[i])
if(i==1){
blupsAll<- blups
}else{
blupsAll<- rbind(blupsAll, blups)
}
mod2<- asreml(fixed= Y~1+at(entryType, 'check'):germplasmName+at(entryType, 'test'):germplasmName,
random= ~at(entryType, 'check'):blockNumber+at(entryType, 'check'):row, data=dhdataUrb)
mod2<- update(mod2)
blues<- na.omit(predict(mod2, classify='at(entryType, test):germplasmName')$pvals)
blues<- data.frame(blues, trait=traits[i])
if(i==1){
bluesAll<- blues
}else{
bluesAll<- rbind(bluesAll, blues)
}
}
blupsAll<- blupsAll[-c(grep('07-4415', blupsAll$germplasmName), grep('Kaskaskia', blupsAll$germplasmName)),]
unique(blupsAll[,c('trait', 'rel')])
bluesUrbana<- data.frame(loc='Urbana', bluesAll)
#all blues
dhmeans<- rbind(bluesNeoga, bluesUrbana)
dhmeans<- data.frame(dhmeans, wt= 1/(dhmeans$std.error^2))
for(i in 1:length(traits)){
sub<- dhmeans[which(dhmeans$trait==traits[i]),]
modME<- asreml(fixed=predicted.value~1+loc, random= ~germplasmName, weights=wt,family = asr_gaussian(dispersion = 1),
data=sub)
blups<- predict(modME, classify='germplasmName',ignore=c('(Intercept)', 'loc'))$pvals
pev<- blups[,'std.error']^2
Vg<- summary(modME)$varcomp['germplasmName','component']
rel<- 1-(pev/Vg)
blups<- data.frame(blups, trait=traits[i], rel)
if(i==1){
blupsAll<- blups
}else{
blupsAll<- rbind(blupsAll, blups)
}
}
wide<- cast(blupsAll, germplasmName~trait, value='predicted.value')
##Select based on net merit
#Net merit function
#starting price of wheat and soybean, five year average based on macrotrends.net
wheat_price0<- mean(c(5.4621, 4.9414, 4.9757, 4.4014, 4.3945))
soybean_price<- mean(c(9.3785, 8.9298, 9.3456, 9.7820, 9.8753))
#wheat price fcn
wheatPrice<- function(fdk, don, twt, wheat_price0){
if(don==0){
donDiscount<- 0
}else{
donDiscount<- sqrt(don)*-0.2
}
if(fdk==0){
fdkDiscount<- 0
}else{
fdkDiscount<- sqrt(fdk)*-0.04
}
twtDiscount<- c(58-twt)*-.2
twtDiscount[which(twtDiscount>0)]<- 0
wheat_price<- wheat_price0+donDiscount+fdkDiscount+twtDiscount
return(wheat_price)
}
#net merit function
netMerit<- function(headings, yields, dons, fdks, twt, wheat_price0, soybean_price){
wheat_price1<- wheatPrice(fdks, dons, twt, wheat_price0)
soy_yld_gain<- 0.5* (max(headings)-headings)
soy_profit_gain<- soy_yld_gain*soybean_price
wheat_profit<- yields*wheat_price1
total_profit<- wheat_profit + soy_profit_gain
return(total_profit)
}
wide$Test.Weight_imperial<- wide$Test.Weight/convert_lbsbu_gL(1)
wide$Yield_imperial<- wide$kgpha/convert_buac_kgHa(1, "wheat")
convert_buac_kgHa(wide$Yield_imperial, "wheat")[1:10]
wide$Test.Weight_imperial<- wide$Test.Weight_imperial+58
wide$Yield_imperial<- wide$Yield_imperial+80
wide$Heading.time...Julian.date..JD..CO_321.0001233<- wide$Heading.time...Julian.date..JD..CO_321.0001233+136
nets<- c()
for(i in 1:nrow(wide)){
net<- netMerit(wide[i,'Heading.time...Julian.date..JD..CO_321.0001233'], wide[i,'Yield_imperial'], 0,
0, wide[i,'Test.Weight_imperial'],wheat_price0, soybean_price)
nets<- append(nets, net)
}
wide<- data.frame(wide, nets)
wide<- wide[-which(wide$germplasmName %in% c('07-4415', 'Kaskaskia')),]
notes<- dhdata[,c('notes','germplasmName','locationName')]
notes<- notes[-which(notes$germplasmName %in% c('07-4415', 'Kaskaskia')),]
notes<- cast(notes, germplasmName~locationName, value='notes')
notes<- notes[,c(1,3)]
colnames(notes)<- c("germplasmName", "notes")
wide<- merge(wide, notes, by='germplasmName')
wide$decision<- 'select'
wide[grep('exclude_plot:true', wide$notes),'decision']<- 'discard'
wide<- wide[order(-wide$nets),]
wide[122:541, 'decision']<- 'discard'
wide[which(wide$Heading.time...Julian.date..JD..CO_321.0001233>139),'decision']<- 'discard'
wide[which(wide$Plant.height...cm.CO_321.0001301>10),'decision']<- 'discard'
table(wide$decision)
#make file for inventory
selectedDH<- as.character(wide[which(wide$decision =='select'),'germplasmName'])
dhdata$decision<- 'discard'
dhdata[match(selectedDH, dhdata$germplasmName),'decision']<- 'select'
inventoryfile<- dhdata[,c('observationUnitDbId', 'studyDbId','plotNumber', 'germplasmName','observationUnitName', 'decision')]
#combine with aug_urb file for inventory
setwd("~/Documents/Wheat/2021/HarvestMaps")
meta<- read.csv('AugUrbphenotypedownload.csv')
aug<- read.csv('AugUrb2021_mirusfile.csv', row.names=1)
#colnames(aug)[4]<- 'observationUnitName'
aug<- merge(meta, aug, by='observationUnitName')
aug<- aug[,c('observationUnitDbId','studyDbId','plotNumber.x', 'germplasmName.x', 'observationUnitName', 'bag')]
augInventory<- aug[which(aug$bag=='yes'),]
colnames(augInventory)<- colnames(inventoryfile)
augInventory$decision='select'
setwd("~/Documents/Wheat/2021/Seed inventory and cleaning")
inventoryfile<- rbind(inventoryfile,augInventory)
inventoryfile<- inventoryfile[-grep('Neo', inventoryfile$observationUnitName),]
inventoryfile<- inventoryfile[order(as.numeric(as.character(inventoryfile$plotNumber))),]
inventoryfile<- inventoryfile[order(as.numeric(as.character(inventoryfile$studyDbId))),]
write.csv(inventoryfile, file='Stg2inventory_fb.csv', row.names=FALSE)
setwd("~/Documents/GitHub/Wheat-Selection-Decisions-2021") |
7370495a3e846e84761dda393a73b0225a6fed9a | 491f755c2255bd133c087e07bd2da0c33a99587d | /R/virtuoso.R | 799f2801785b91dd77a96b8cf7a204358ab4bb08 | [
"MIT"
] | permissive | i-Eloise/virtuoso | 11c4849b3bf86e3662bc79549ff2f0e7e54402db | 0886bd9d6f0d6dc7de7ed19021d232835413af73 | refs/heads/master | 2020-04-12T04:37:32.584651 | 2018-12-11T17:22:15 | 2018-12-11T17:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,721 | r | virtuoso.R | ## Some possible helper routines for common requests.
#' Clear all triples from a graph
#'
#' @details NOTE: after clearing a graph, re-running the bulk
#' importer may not re-import triples.
#' @inheritParams vos_import
vos_clear_graph <- function(con, graph = "rdflib"){
DBI::dbGetQuery(con, paste0("SPARQL CLEAR GRAPH <", graph, ">"))
}
#' Delete Virtuoso Database
#'
#' delete the entire Virtuoso database for a fresh start.
#' @param ask prompt before deleting?
#' @param db_dir location of the directory to delete
#' @export
vos_delete_db <- function(ask = interactive(),
db_dir = vos_db()){
if(ask)
continue <- askYesNo("Are you sure?")
if(continue)
unlink(db_dir, recursive = TRUE)
}
#' List graphs
#'
#' @export
#' @inheritParams vos_import
vos_list_graphs <- function(con){
DBI::dbGetQuery(con,
paste("SPARQL SELECT",
"DISTINCT ?g",
"WHERE {",
"GRAPH ?g {?s ?p ?o}",
"}",
"ORDER BY ?g")
)
}
## Methods not yet implemented, see notes inline.
#' count triples
#'
#' @inheritParams vos_import
vos_count_triples <- function(con, graph = NULL){
## Official query method below. Not sure why these return
## large negative integer on debian and fail on mac...
#DBI::dbGetQuery(con, "SPARQL SELECT COUNT(*) FROM <rdflib>")
#DBI::dbGetQuery(con, paste("SPARQL SELECT (COUNT(?s) AS ?triples)",
## "WHERE { GRAPH ?g { ?s ?p ?o } }"))
## this way with dplyr way works but requires in-memory
## loading of all triples, probably a terrible idea!
## df <- DBI::dbGetQuery(con, paste(
## "SPARQL SELECT ?g ?s ?p ?o WHERE { GRAPH ?g {?s ?p ?o} }"))
## dplyr::count_(df, "g")
}
|
edb162e93c536bb9a3aae62830cc10e669426363 | 66f567627c6e8c85ef92f1bf52c0f62a00913e59 | /man/getCommandNames.Rd | 0f092b920a03d4dc0b252ce9fb5697d2444ffba0 | [] | no_license | sebastianrossel/Bioconductor_RCy3_the_new_RCytoscape | 52ae752689e5411043e83cb82e298130acc94c5b | 0649270e489597054b8cd5bb471026e37bbf08ca | refs/heads/master | 2023-03-17T14:13:08.074463 | 2018-02-03T19:55:23 | 2018-02-03T19:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 706 | rd | getCommandNames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3.R
\docType{methods}
\name{getCommandNames,CytoscapeConnectionClass-method}
\alias{getCommandNames,CytoscapeConnectionClass-method}
\title{Gets commands available from within cytoscape from
functions within cytoscape and from installed plugins.}
\usage{
\S4method{getCommandNames}{CytoscapeConnectionClass}(obj)
}
\arguments{
\item{obj}{Cytoscape network where commands are fetched via RCy3}
}
\value{
Vector of available commands from all namespaces (e.g. functions and plugins)
}
\description{
Gets commands available from within cytoscape from
functions within cytoscape and from installed plugins.
}
\concept{
RCy3
}
|
97cd594f18c1176a35283c5b49a640277ba79f88 | 12765da07fc94de912a7031e496eec6c1f7eb5c1 | /plot4.R | c0a9b475c0e782f7899f86e1bafa2039f67948ca | [] | no_license | tmarkam/ExData_Plotting1 | 417e393ce0cfcd698376ca944f9b1eb4894ee809 | d7412a5bdb6bb00fba0770d54fa4ec476f274aae | refs/heads/master | 2021-01-20T19:45:46.796628 | 2016-09-13T07:01:39 | 2016-09-13T07:01:39 | 68,065,272 | 0 | 0 | null | 2016-09-13T02:08:44 | 2016-09-13T02:08:43 | null | UTF-8 | R | false | false | 2,316 | r | plot4.R | # plot4.R
# author: Ted M
# date: Sep 11, 2016
# title: Coursera Exploratory Data Analysis Week 4 exercise, plot 4
# function: display 4 plots for period 2/1/2007-2/2/2007
# notes: this script uses the "Electric power consumption" dataset from UC Irvine Machine Learning Repository
# we use the "readr" library - as it's less memory-intensive than the base read functions
library(readr)
library(lubridate)
# read the dataset, read the date and time as strings and interpret the question mark as an NA value, and remaining cols as double
hpc <-read_delim("household_power_consumption.txt",";",na=c("?"),
col_types = cols(col_date("%d/%m/%Y"),
col_time("%H:%M:%S"),
col_double(),
col_double(),
col_double(),
col_double(),
col_double(),
col_double(),
col_double()))
# get the subset for the desired date range
hpc_sub<-subset(hpc,hpc$Date >= as.Date("2007-02-01") & hpc$Date <= as.Date("2007-02-02") )
# combine the date and time columns into a timestamp
hpc_sub$Timestamp <- hpc_sub$Date + seconds(hpc_sub$Time)
# note: to test, comment out the png and dev.off statements
png(filename = "plot4.png", width = 480, height = 480)
# set the plot parameters to display 4 plots in a grid
par(mfcol=c(2,2))
# 1. plot the Global Active Power
plot(hpc_sub$Timestamp, hpc_sub$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
# 2. Energy Sub Metering
plot(hpc_sub$Timestamp, hpc_sub$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(hpc_sub$Timestamp, hpc_sub$Sub_metering_2, col="red")
lines(hpc_sub$Timestamp, hpc_sub$Sub_metering_3, col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black", "red", "blue"), lwd=1)
# 3. Voltage
plot(hpc_sub$Timestamp, hpc_sub$Voltage, type="l", xlab="datetime", ylab="Voltage")
# 4. Global Reactive Power
plot(hpc_sub$Timestamp, hpc_sub$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
print("Done!") |
c6fddb871006c85a4cd3a915a3cdd515c1553728 | 7f3a2d4bd48c7bbbe877894291e2cbf676f6b548 | /R/Plot.R | 814338f23fd2a14d3e9e4b44c61a69caaa3e5ec1 | [
"Artistic-2.0"
] | permissive | yurasong/powsimR | 5420610bbf1143e738ea9793401d6314c1e98001 | d69ca441deb8d57f29717c389b720bcbe3ff5abe | refs/heads/master | 2020-11-28T07:20:04.301930 | 2019-11-27T16:40:52 | 2019-11-27T16:40:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91,508 | r | Plot.R |
# plotParam -------------------------------------------------------------
#' @name plotParam
#' @aliases plotParam
#' @title Visualize distributional characteristics of RNAseq experiment
#' @description This function plots the results of the parameter estimation. This includes the absolute and relative sequencing depth (i.e. library size factor) as well as marginal log2(mean+1), log2(dispersion) and dropout. Furthermore, the mean-dispersion relationship with loess fit for simulations is visualized. Lastly, the mean-dropout rate is presented as a smooth scatter plot.
#' @usage plotParam(estParamRes, annot=TRUE)
#' @param estParamRes The output of \code{\link{estimateParam}}.
#' @param annot A logical vector. If \code{TRUE}, a short figure legend is included.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' plotParam(estParamRes = kolodziejczk_param, annot=TRUE)
#' }
#' @author Beate Vieth
#' @importFrom ggplot2 ggplot aes geom_boxplot geom_point position_jitterdodge scale_fill_manual labs theme element_text element_blank element_rect geom_violin geom_dotplot stat_summary scale_y_continuous theme_classic theme_light scale_y_log10 geom_hline geom_bar facet_grid as_labeller scale_x_discrete stat_density2d scale_fill_gradientn geom_line
#' @importFrom scales trans_breaks trans_format math_format
#' @importFrom grid unit
#' @importFrom dplyr bind_rows
#' @importFrom ggpubr ggtexttable ttheme
#' @importFrom grDevices blues9 colorRampPalette
#' @importFrom reshape2 melt
#' @importFrom cowplot plot_grid add_sub ggdraw
#' @importFrom stats reorder
#' @rdname plotParam
#' @export
plotParam <- function(estParamRes, annot=TRUE) {
## QC plot
if(attr(estParamRes, "RNAseq")=="singlecell" | estParamRes$detectS>12) {
## QC plot
# sequencing depth with marker for dropout samples
lib.size.dat <- data.frame(Seqdepth=estParamRes$Parameters$Raw$seqDepth,
Sample=names(estParamRes$Parameters$Raw$seqDepth),
Dropout=estParamRes$DropOuts$Sample$totCounts,
stringsAsFactors = F)
libsize.plot <- ggplot2::ggplot(lib.size.dat, ggplot2::aes(x = "", y = Seqdepth)) +
ggplot2::geom_point(ggplot2::aes(fill=Dropout), pch = 21,
position = ggplot2::position_jitterdodge()) +
ggplot2::geom_boxplot(outlier.shape = NA, width = 0.5, alpha=0.5) +
ggplot2::theme_light() +
ggplot2::scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
ggplot2::scale_fill_manual(values = c("grey75", "red"),
labels = c("Included", "Outlier")) +
ggplot2::labs(x = NULL,
y = NULL,
title = "Sequencing Depth") +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white"))
# library size factor plot
sf.dat <- data.frame(SizeFactor=estParamRes$sf,
Sample=names(estParamRes$sf),
stringsAsFactors = FALSE)
sf.max <- max(sf.dat$SizeFactor)*1.05
sf.plot <- ggplot2::ggplot(sf.dat, ggplot2::aes(x = "", y=SizeFactor)) +
ggplot2::geom_violin(fill = "grey90", width=0.8, color = "black") +
ggplot2::stat_summary(fun.y = median,
fun.ymin = median,
fun.ymax = median,
color = "black",
width = 0.5,
geom = "crossbar") +
ggplot2::scale_y_continuous(limits = c(0, sf.max)) +
ggplot2::theme_light() +
ggplot2::labs(x = NULL,
y = NULL,
title = paste0("Library Size Factors (", estParamRes$normFramework, ")")) +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white"))
# total features
totfeatures.dat <- data.frame(TotFeatures=estParamRes$Parameters$Raw$totFeatures,
Sample=names(estParamRes$Parameters$Raw$totFeatures),
Dropout=estParamRes$DropOuts$Sample$totFeatures,
stringsAsFactors = F)
totfeatures.plot <- ggplot2::ggplot(totfeatures.dat, ggplot2::aes(x = "", y = TotFeatures)) +
ggplot2::geom_point(ggplot2::aes(fill=Dropout), pch = 21,
position = ggplot2::position_jitterdodge()) +
ggplot2::geom_boxplot(outlier.shape = NA, width = 0.5, alpha=0.5) +
ggplot2::theme_light() +
ggplot2::scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
ggplot2::scale_fill_manual(values = c("grey75", "red"),
labels = c("Included", "Outlier")) +
ggplot2::labs(x = NULL,
y = NULL,
title = "Detected Genes") +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white"))
# gene spike ratio
if(all(!is.na(estParamRes$DropOuts$Sample$GeneSpikeRatio))){
genespike.dat <- data.frame(Ratio=estParamRes$DropOuts$Sample$GeneSpikeRatio/100,
Sample=names(estParamRes$Parameters$Raw$totFeatures),
Dropout=estParamRes$DropOuts$Sample$GeneSpike,
stringsAsFactors = F)
gs.max <- max(genespike.dat$Ratio)*1.05
genespike.plot <- ggplot2::ggplot(genespike.dat, ggplot2::aes(x = "", y=Ratio)) +
ggplot2::geom_point(ggplot2::aes(fill=Dropout), pch = 21,
position = ggplot2::position_jitterdodge()) +
ggplot2::geom_boxplot(outlier.shape = NA, width = 0.5, alpha=0.5) +
ggplot2::theme_light() +
ggplot2::scale_fill_manual(values = c("grey75", "red"),
labels = c("Included", "Outlier")) +
ggplot2::scale_y_continuous(limits = c(0, gs.max),
labels = scales::percent_format()) +
ggplot2::labs(x = NULL,
y = NULL,
title = "Gene Spike Count Ratio") +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white"))
}
}
if(attr(estParamRes, "RNAseq")=="bulk" | estParamRes$detectS<12) {
# sequencing depth with marker for dropout samples
lib.size.dat <- data.frame(Seqdepth=estParamRes$Parameters$Raw$seqDepth,
Sample=names(estParamRes$Parameters$Raw$seqDepth),
Dropout=estParamRes$DropOuts$Sample$totCounts,
stringsAsFactors = F)
libsize.plot <- ggplot2::ggplot(lib.size.dat,
ggplot2::aes(reorder(Sample, Seqdepth),Seqdepth)) +
ggplot2::geom_bar(stat="identity", width=.5, ggplot2::aes(fill=Dropout)) +
ggplot2::geom_hline(yintercept = median(lib.size.dat$Seqdepth), linetype = 2, colour="grey40") +
ggplot2::theme_light() +
ggplot2::scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
ggplot2::scale_fill_manual(values = c("grey75", "red"),
labels = c("Included", "Outlier")) +
ggplot2::labs(x = NULL,
y = NULL,
title = "Sequencing Depth") +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white")) +
ggplot2::coord_flip()
# library size factor plot
sf.dat <- data.frame(SizeFactor=estParamRes$sf,
Sample=names(estParamRes$sf),
stringsAsFactors = FALSE)
sf.max <- max(sf.dat$SizeFactor)*1.05
sf.plot <- ggplot2::ggplot(sf.dat, ggplot2::aes(x = "", y=SizeFactor)) +
ggplot2::geom_dotplot(binaxis='y', stackdir='center', dotsize=1, fill = "grey75") +
ggplot2::stat_summary(fun.y = median,
fun.ymin = median,
fun.ymax = median,
color = "black",
width = 0.5,
geom = "crossbar") +
ggplot2::scale_y_continuous(limits = c(0, sf.max)) +
ggplot2::theme_light() +
ggplot2::labs(x = NULL,
y = NULL,
title = paste0("Library Size Factors (", estParamRes$normFramework, ")")) +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white"))
# total features
totfeatures.dat <- data.frame(TotFeatures=estParamRes$Parameters$Raw$totFeatures,
Sample=names(estParamRes$Parameters$Raw$totFeatures),
Dropout=estParamRes$DropOuts$Sample$totFeatures,
stringsAsFactors = F)
totfeatures.plot <- ggplot2::ggplot(totfeatures.dat,
ggplot2::aes(reorder(Sample, TotFeatures),TotFeatures)) +
ggplot2::geom_bar(stat="identity", width=.5, ggplot2::aes(fill=Dropout)) +
ggplot2::geom_hline(yintercept = median(totfeatures.dat$TotFeatures),
linetype = 2, colour="grey40") +
ggplot2::theme_light() +
ggplot2::scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x),
labels = scales::trans_format("log10", scales::math_format(10^.x))) +
ggplot2::scale_fill_manual(values = c("grey75", "red"),
labels = c("Included", "Outlier")) +
ggplot2::labs(x = NULL,
y = NULL,
title = "Detected Genes") +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white")) +
ggplot2::coord_flip()
# gene spike ratio
if(all(!is.na(estParamRes$DropOuts$Sample$GeneSpikeRatio))){
genespike.dat <- data.frame(Ratio=estParamRes$DropOuts$Sample$GeneSpikeRatio/100,
Sample=names(estParamRes$Parameters$Raw$totFeatures),
Dropout=estParamRes$DropOuts$Sample$GeneSpike,
stringsAsFactors = F)
gs.max <- max(genespike.dat$Ratio)*1.05
genespike.plot <- ggplot2::ggplot(genespike.dat, ggplot2::aes(x = "", y=Ratio)) +
ggplot2::geom_dotplot(binaxis='y', stackdir='center', dotsize=0.75,
ggplot2::aes(fill=Dropout)) +
ggplot2::stat_summary(fun.y = median,
fun.ymin = median,
fun.ymax = median,
color = "black",
width = 0.5,
geom = "crossbar") +
ggplot2::scale_y_continuous(limits = c(0, gs.max),
labels = scales::percent_format()) +
ggplot2::theme_light() +
ggplot2::scale_fill_manual(values = c("grey75", "red"),
labels = c("Included", "Outlier")) +
ggplot2::labs(x = NULL,
y = NULL,
title = "Gene Spike Count Ratio") +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white"))
}
}
## Marginal Distributions
if(attr(estParamRes, "Distribution") == "NB"){
param.names <- names(estParamRes$Parameters)[!is.na(estParamRes$Parameters)]
set.relabels <- c(`Raw` = "Provided",
`Full` = "All Genes",
`Filtered`="Filtered Genes",
`DropGene` = "Dropout Genes",
`DropSample` = ifelse(attr(estParamRes, "RNAseq")=="singlecell",
"Cell Outliers", "Sample Outliers"))
param.relabels <- c(`Mean` = "Log Mean",
`Dispersion` = "Log Dispersion",
`Dropout` = "Gene Dropout Rate")
margs.L <- sapply(param.names, function(i){
tmp <- estParamRes$Parameters[[i]]
data.frame(Mean=log2(tmp$means+1),
Dispersion=log2(tmp$dispersion),
Dropout=tmp$gene.dropout)
}, simplify = F, USE.NAMES = T)
margs.dat <- dplyr::bind_rows(margs.L, .id = "Set")
margs.dat <- suppressMessages(reshape2::melt(margs.dat))
margs.plot <- ggplot2::ggplot(margs.dat, ggplot2::aes(x=Set, y=value)) +
ggplot2::geom_violin(fill = "#597EB5", alpha = 0.5) +
ggplot2::stat_summary(fun.y = median,
fun.ymin = median,
fun.ymax = median,
color = "black",
width = 0.5,
geom = "crossbar") +
ggplot2::facet_grid(~variable, scales = "free_x", labeller = ggplot2::as_labeller(param.relabels)) +
ggplot2::scale_x_discrete(labels = set.relabels) +
ggplot2::labs(x = NULL,
y = NULL) +
ggplot2::theme_light() +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white")) +
ggplot2::coord_flip()
}
if(attr(estParamRes, "Distribution") == "ZINB"){
param.names <- names(estParamRes$Parameters)[!is.na(estParamRes$Parameters)]
set.relabels <- c(`Raw` = "Provided",
`Full` = "All Genes",
`Filtered`="Filtered Genes",
`DropGene` = "Dropout Genes",
`DropSample` = ifelse(attr(estParamRes, "RNAseq")=="singlecell",
"Cell Outliers", "Sample Outliers"))
param.relabels <- c(`Mean` = "Log Positive Mean",
`Dispersion` = "Log Positive Dispersion",
`Dropout` = "Gene Dropout Rate")
margs.L <- sapply(param.names, function(i){
tmp <- estParamRes$Parameters[[i]]
data.frame(Mean=log2(tmp$pos.means+1),
Dispersion=log2(tmp$pos.dispersion),
Dropout=tmp$gene.dropout)
}, simplify = F, USE.NAMES = T)
margs.dat <- dplyr::bind_rows(margs.L, .id = "Set")
margs.dat <- suppressMessages(reshape2::melt(margs.dat))
margs.plot <- ggplot2::ggplot(margs.dat, ggplot2::aes(x=Set, y=value)) +
ggplot2::geom_violin(fill = "#597EB5", alpha = 0.5) +
ggplot2::stat_summary(fun.y = median,
fun.ymin = median,
fun.ymax = median,
color = "black",
width = 0.5,
geom = "crossbar") +
ggplot2::facet_grid(~variable, scales = "free_x",
labeller = ggplot2::as_labeller(param.relabels)) +
ggplot2::scale_x_discrete(labels = set.relabels) +
ggplot2::labs(x = NULL,
y = NULL) +
ggplot2::theme_light() +
ggplot2::theme(legend.text = ggplot2::element_text(size=10, color='black'),
legend.position = "none",
legend.title = ggplot2::element_blank(),
legend.key.size = grid::unit(1.5, "lines"),
axis.text.x=ggplot2::element_text(size=10, color='black'),
axis.text.y=ggplot2::element_text(size=10, color='black'),
axis.title=ggplot2::element_text(size=10, face="bold", color='black'),
plot.title=ggplot2::element_text(size=10, face="bold", color='black'),
strip.text = ggplot2::element_text(size=10, face="bold", color='black'),
strip.background = ggplot2::element_rect(fill="white")) +
ggplot2::coord_flip()
}
# Table with numbers of genes / samples
sample.out = ifelse(attr(estParamRes, "RNAseq")=="singlecell", "Cell Outliers", "Sample Outliers")
sample.names = ifelse(attr(estParamRes, "RNAseq")=="singlecell", "Single Cells", "Bulk Samples")
no.dat <- data.frame(c("Provided", "Detected", "All Genes", "Filtered Genes", "Dropout Genes", sample.out),
c(estParamRes$totalG,
estParamRes$detectG,
estParamRes$Parameters$Full$ngenes,
estParamRes$Parameters$Filtered$ngenes,
ifelse(all(!is.na(estParamRes$Parameters$DropGene)), estParamRes$Parameters$DropGene$ngenes, 0),
ifelse(all(!is.na(estParamRes$Parameters$DropSample)), estParamRes$Parameters$DropSample$ngenes, 0)),
c(NA,
NA,
estParamRes$Fit$Full$estG,
estParamRes$Fit$Filtered$estG,
ifelse(all(!is.na(estParamRes$Fit$DropGene)), estParamRes$Fit$DropGene$estG, 0),
NA),
c(estParamRes$totalS,
estParamRes$detectS,
estParamRes$Parameters$Full$nsamples,
estParamRes$Parameters$Filtered$nsamples,
ifelse(all(!is.na(estParamRes$Parameters$DropGene)), estParamRes$Parameters$DropGene$nsamples, 0),
ifelse(all(!is.na(estParamRes$Parameters$DropSample)), estParamRes$Parameters$DropSample$nsamples, 0)),
stringsAsFactors = F )
colnames(no.dat) <- c("Set", "# Genes", "# Genes for Fit", paste0("# ", sample.names))
no.table <- ggpubr::ggtexttable(no.dat,
rows = NULL,
theme = ggpubr::ttheme("mOrange"))
## Fitting Lines
# mean-disp and mean-p0
if(attr(estParamRes, "Distribution") == "NB"){
# mean vs dispersion plot
meanvsdisp.dat <- data.frame(Mean=estParamRes$Fit$Filtered$meandispfit$model$x[,"x"],
Dispersion=estParamRes$Fit$Filtered$meandispfit$model$y)
meanvsdisp.plot <- ggplot2::ggplot(data=meanvsdisp.dat,
ggplot2::aes(x=Mean, y=Dispersion)) +
ggplot2::theme_classic() +
ggplot2::geom_point(size=0.5) +
ggplot2::stat_density2d(geom="tile", ggplot2::aes(fill=..density..^0.25,
alpha=ifelse(..density..^0.15<0.4,0,1)),
contour=FALSE) +
ggplot2::scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white", grDevices::blues9))(256)) +
ggplot2::geom_hline(yintercept = log1p(estParamRes$Parameters$Filtered$common.dispersion),
linetype = 2, colour="grey40") +
ggplot2::geom_line(ggplot2::aes(x=estParamRes$Fit$Filtered$meandispfit$x,
y=estParamRes$Fit$Filtered$meandispfit$y),
linetype=1, size=1.5, colour="orange") +
ggplot2::geom_line(ggplot2::aes(x=estParamRes$Fit$Filtered$meandispfit$x,
y=estParamRes$Fit$Filtered$meandispfit$upper),
linetype=2, size=1, colour="orange") +
ggplot2::geom_line(ggplot2::aes(x=estParamRes$Fit$Filtered$meandispfit$x,
y=estParamRes$Fit$Filtered$meandispfit$lower),
linetype=2, size=1, colour="orange") +
ggplot2::labs(y=expression(bold(paste(Log, " Dispersion", sep=""))),
x=expression(bold(paste(Log, " Mean")))) +
ggplot2::theme(legend.position='none',
axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold"))
# mean vs p0 plot
meanvsp0.dat <- data.frame(Mean=log1p(estParamRes$Parameters$Filtered$means),
Dropout=estParamRes$Parameters$Filtered$gene.dropout)
meanvsp0.plot <- ggplot2::ggplot(data=meanvsp0.dat, ggplot2::aes(x=Mean, y=Dropout)) +
ggplot2::theme_classic() +
ggplot2::geom_point(size=0.5) +
ggplot2::stat_density2d(geom="tile", ggplot2::aes(fill=..density..^0.25,
alpha=ifelse(..density..^0.15<0.4,0,1)),
contour=FALSE) +
ggplot2::scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white", grDevices::blues9))(256)) +
ggplot2::ylim(c(0,1)) +
ggplot2::labs(y="Gene Dropout Rate",
x=expression(bold(paste(Log, " Mean")))) +
ggplot2::theme(legend.position='none',
axis.text=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"))
if(attr(estParamRes, 'RNAseq') == 'bulk'){
meanvsp0.plot <- meanvsp0.plot +
ggplot2::geom_vline(xintercept = estParamRes$Fit$Filtered$g0.cut,
linetype = 2, colour="grey40")
}
}
if(attr(estParamRes, "Distribution") == "ZINB"){
# mean vs dispersion plot
meanvsdisp.dat <- data.frame(Mean=estParamRes$Fit$Filtered$meandispfit$model$x[,"x"],
Dispersion=estParamRes$Fit$Filtered$meandispfit$model$y)
meanvsdisp.plot <- ggplot2::ggplot(data=meanvsdisp.dat,
ggplot2::aes(x=Mean, y=Dispersion)) +
ggplot2::theme_classic() +
ggplot2::geom_point(size=0.5) +
ggplot2::stat_density2d(geom="tile", ggplot2::aes(fill=..density..^0.25,
alpha=ifelse(..density..^0.15<0.4,0,1)),
contour=FALSE) +
ggplot2::scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white", grDevices::blues9))(256)) +
ggplot2::geom_hline(yintercept = log2(estParamRes$Parameters$Filtered$common.dispersion),
linetype = 2, colour="grey40") +
ggplot2::geom_line(ggplot2::aes(x=estParamRes$Fit$Filtered$meandispfit$x,
y=estParamRes$Fit$Filtered$meandispfit$y),
linetype=1, size=1.5, colour="orange") +
ggplot2::geom_line(ggplot2::aes(x=estParamRes$Fit$Filtered$meandispfit$x,
y=estParamRes$Fit$Filtered$meandispfit$upper),
linetype=2, size=1, colour="orange") +
ggplot2::geom_line(ggplot2::aes(x=estParamRes$Fit$Filtered$meandispfit$x,
y=estParamRes$Fit$Filtered$meandispfit$lower),
linetype=2, size=1, colour="orange") +
ggplot2::labs(y=expression(bold(paste(Log, " Positive Dispersion", sep=""))),
x=expression(bold(paste(Log, " Positive Mean")))) +
ggplot2::theme(legend.position='none',
axis.text=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"))
# mean vs p0 plot
meanvsp0fit.dat <- data.frame(Mean=estParamRes$Fit$Filtered$meang0fit$x,
Dropout=estParamRes$Fit$Filtered$meang0fit$y)
meanvsp0.dat <- data.frame(Mean=log2(estParamRes$Parameters$Filtered$pos.means+1),
Dropout=estParamRes$Parameters$Filtered$gene.dropout)
meanvsp0.plot <- ggplot2::ggplot(data=meanvsp0.dat, ggplot2::aes(x=Mean, y=Dropout)) +
ggplot2::theme_classic() +
ggplot2::geom_point(size=0.5) +
ggplot2::stat_density2d(geom="tile", ggplot2::aes(fill=..density..^0.25,
alpha=ifelse(..density..^0.15<0.4,0,1)),
contour=FALSE) +
ggplot2::scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white", grDevices::blues9))(256)) +
ggplot2::geom_vline(xintercept = estParamRes$Fit$Filtered$g0.cut,
linetype = 2, colour="grey40") +
ggplot2::geom_line(data = meanvsp0fit.dat,
ggplot2::aes(x=Mean,
y=Dropout),
linetype=1,
size=1.5,
colour="orange") +
ggplot2::ylim(c(0,1)) +
ggplot2::labs(y="Gene Dropout Rate",
x=expression(bold(paste(Log, " (Positive Mean)")))) +
ggplot2::theme(legend.position='none',
axis.text=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"))
}
# read-umi
if(all(!is.na(estParamRes$Fit$UmiRead))) {
readvsumifit.dat <- data.frame(UMI=estParamRes$Fit$UmiRead$Fit$model$x[,"x"],
Ratio=estParamRes$Fit$UmiRead$Fit$model$y)
readvsumi.dat <- data.frame(UMI=estParamRes$Fit$UmiRead$lUMI,
Ratio=estParamRes$Fit$UmiRead$lRatio)
readvsumi.plot <- ggplot2::ggplot(data=readvsumi.dat,
ggplot2::aes(x=UMI, y=Ratio)) +
ggplot2::theme_classic() +
ggplot2::geom_point(size=0.5) +
ggplot2::stat_density2d(geom="tile", ggplot2::aes(fill=..density..^0.25,
alpha=ifelse(..density..^0.15<0.4,0,1)),
contour=FALSE) +
ggplot2::scale_fill_gradientn(colours = grDevices::colorRampPalette(c("white", grDevices::blues9))(256)) +
ggplot2::geom_line(data = readvsumifit.dat,
ggplot2::aes(x=estParamRes$Fit$UmiRead$Fit$x,
y=estParamRes$Fit$UmiRead$Fit$y),
linetype=1, size=1.5, colour="orange") +
ggplot2::geom_line(data = readvsumifit.dat,
ggplot2::aes(x=estParamRes$Fit$UmiRead$Fit$x,
y=estParamRes$Fit$UmiRead$Fit$upper),
linetype=2, size=1, colour="orange") +
ggplot2::geom_line(data = readvsumifit.dat,
ggplot2::aes(x=estParamRes$Fit$UmiRead$Fit$x,
y=estParamRes$Fit$UmiRead$Fit$lower),
linetype=2, size=1, colour="orange") +
ggplot2::labs(y=expression(bold(paste("Amplification Rate"))),
x=expression(bold(paste(Log[10], " UMI")))) +
ggplot2::theme(legend.position='none',
axis.text=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"))
}
# combine the plots into one output
if(all(!is.na(estParamRes$DropOuts$Sample$GeneSpikeRatio))){
top_row <- suppressWarnings(cowplot::plot_grid(libsize.plot,
sf.plot,
totfeatures.plot,
genespike.plot,
ncol=4, nrow=1))
}
if(all(is.na(estParamRes$DropOuts$Sample$GeneSpikeRatio))){
top_row <- suppressWarnings(cowplot::plot_grid(libsize.plot,
sf.plot,
totfeatures.plot,
ncol=3, nrow=1))
}
middle_row <- suppressWarnings(cowplot::plot_grid(margs.plot,
no.table,
labels=c('B', 'C'),
rel_widths = c(0.7,0.3),
ncol=2,
nrow=1))
if(all(!is.na(estParamRes$Fit$UmiRead))){
bottom_row <- suppressWarnings(cowplot::plot_grid(meanvsdisp.plot,
meanvsp0.plot,
readvsumi.plot,
labels=c('D', 'E', 'F'),
ncol=3, nrow=1))
}
if(all(is.na(estParamRes$Fit$UmiRead))){
bottom_row <- suppressWarnings(cowplot::plot_grid(meanvsdisp.plot,
meanvsp0.plot,
labels=c('D', 'E'),
ncol=2, nrow=1))
}
p.final <- suppressWarnings(cowplot::plot_grid(top_row,
middle_row,
bottom_row,
labels=c('A', NULL, NULL),
ncol=1, nrow=3))
# annotation under plot
if (annot) {
p.final <- cowplot::add_sub(p.final, x = 0, hjust = 0, "A) Quality Control Metrics: Sequencing depth. Outliers are marked in red; Library size factors with median (black line) for the filtered data set.; Detected genes. Outliers are marked in red; Ratio of gene to spike-in counts (if spike-ins were provided). Outliers are marked in red. \nB) Marginal Distribution of mean, dispersion and dropout per estimation set. \nC) Number of genes and samples per estimation set. Provided by the user; Detected = number of genes and samples with at least one count; Full = number of genes for which mean, dispersion and dropout could be estimated using non-outlying samples. \nFiltered = number of genes above filter threshold for which mean, dispersion and dropout could be estimated using non-outlying samples. Dropout Genes = number of genes filtered out due to dropout rate. \nD) Local polynomial regression fit between mean and dispersion estimates with variability band per gene (yellow). Common dispersion estimate (grey dashed line). \nE) Fraction of dropouts versus estimated mean expression per gene. \nF) Local polynomial regression fit between UMI and read counts with variability band per gene (yellow). Only present when read count matrix of UMI data was provided.", size=8)
}
# draw the plot
cowplot::ggdraw(p.final)
}
# plotSpike ---------------------------------------------------------------
#' @name plotSpike
#' @aliases plotSpike
#' @title Visualize distributional characteristics of spike-ins
#' @description This function plots the results of the parameter estimation for spike-ins. This includes the absolute and relative sequencing depth (i.e. library size factor), a calibration curve as well as the capture efficiency given as a binomial regression.
#' @usage plotSpike(estSpike, annot=TRUE)
#' @param estSpike The output of \code{\link{estimateParam}}.
#' @param annot A logical vector. If \code{TRUE}, a short figure legend is included.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' #' ## batch annotation
#' data(scrbseq_spike_cnts)
#' data(scrbseq_spike_info)
#' batch_info <- data.frame(Batch = ifelse(grepl(pattern = "SCRBseqA_",
#' colnames(scrbseq_spike_cnts)), "A", "B"),
#' row.names = colnames(scrbseq_spike_cnts))
#' ## spike information table
#' spike_info <- scrbseq_spike_info[-1,]
#' ## estimation
#' spike_param <- estimateSpike(spikeData = scrbseq_spike_cnts,
#' spikeInfo = spike_info,
#' MeanFragLength = NULL,
#' batchData = batch_info,
#' = 'depth')
#' ## plotting
#' plotSpike(estSpike = spike_param, annot=TRUE)
#' }
#' @author Beate Vieth
#' @importFrom ggplot2 ggplot aes theme_minimal geom_bar geom_density geom_hline theme labs scale_y_continuous coord_flip geom_pointrange geom_point geom_smooth annotate scale_x_log10 scale_y_log10 annotation_logticks
#' @importFrom dplyr left_join group_by mutate ungroup do summarise n
#' @importFrom tidyr "%>%"
#' @importFrom broom glance
#' @importFrom reshape2 melt
#' @importFrom cowplot plot_grid add_sub ggdraw
#' @importFrom stats reorder
#' @rdname plotSpike
#' @export
plotSpike <- function(estSpike, annot=TRUE) {
if(length(estSpike$seqDepth)<15) {
# library size plot
lib.size.dat <- data.frame(Seqdepth=estSpike$seqDepth,
Sample=names(estSpike$seqDepth))
libsize.plot <- ggplot2::ggplot(data=lib.size.dat, ggplot2::aes(reorder(Sample, Seqdepth),Seqdepth)) +
ggplot2::theme_minimal() +
ggplot2::geom_bar(stat="identity",width=.5) +
ggplot2::geom_hline(yintercept = median(lib.size.dat$Seqdepth), linetype = 2, colour="grey40") +
ggplot2::theme(axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold")) +
ggplot2::labs(x=NULL, y="Sequencing depth") +
ggplot2::scale_y_continuous(labels=.plain) +
ggplot2::coord_flip()
# size factor plot
sf.dat <- data.frame(SizeFactor=estSpike$size.factors,
Sample=names(estSpike$size.factors))
sf.plot <- ggplot2::ggplot(data=sf.dat, ggplot2::aes(reorder(Sample, SizeFactor),SizeFactor)) +
ggplot2::theme_minimal() +
ggplot2::geom_bar(stat="identity",width=.5) +
ggplot2::geom_hline(yintercept = median(sf.dat$SizeFactor), linetype = 2, colour="grey40") +
ggplot2::theme(axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold")) +
ggplot2::labs(x=NULL, y="Library Size Factor") +
ggplot2::scale_y_continuous(labels=.plain) +
ggplot2::coord_flip()
}
if(length(estSpike$seqDepth)>=15) {
# library size plot
lib.size.dat <- data.frame(Seqdepth=estSpike$seqDepth,
Sample=names(estSpike$seqDepth))
libsize.plot <- ggplot2::ggplot(lib.size.dat, ggplot2::aes(Seqdepth)) +
ggplot2::theme_minimal() +
ggplot2::geom_density() +
ggplot2::geom_vline(xintercept = median(lib.size.dat$Seqdepth), linetype = 2, colour="grey40") +
ggplot2::theme(axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold")) +
ggplot2::labs(x="Sequencing Depth", y="Density") +
ggplot2::scale_x_continuous(labels=.plain)
# size factor plot
sf.dat <- data.frame(SizeFactor=estSpike$size.factors,
Sample=names(estSpike$size.factors))
sf.plot <- ggplot2::ggplot(sf.dat, ggplot2::aes(SizeFactor)) +
ggplot2::theme_minimal() +
ggplot2::geom_density() +
ggplot2::geom_vline(xintercept = median(sf.dat$SizeFactor), linetype = 2, colour="grey40") +
ggplot2::theme(axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold")) +
ggplot2::labs(x="Library Size Factor", y="Density") +
ggplot2::scale_x_continuous(labels=.plain)
}
# calibration curve data
cal.dat <- reshape2::melt(estSpike$normCounts)
names(cal.dat) <- c("SpikeID", "SampleID", "normCounts")
cal.info.dat <- cal.dat %>%
dplyr::left_join(estSpike$FilteredInput$spikeInfo, by="SpikeID") %>%
dplyr::group_by(factor(SpikeInput)) %>%
dplyr::mutate(Expectation=mean(normCounts),
Deviation=sd(normCounts),
Error=sd(normCounts)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
limits <- ggplot2::aes(ymax = log10(Expectation) + log10(Error),
ymin= log10(Expectation) - log10(Error))
Calibration = cal.dat %>%
dplyr::left_join(estSpike$FilteredInput$spikeInfo, by="SpikeID") %>%
dplyr::group_by(SampleID) %>%
dplyr::do(LmFit = lm(log10(normCounts+1) ~ log10(SpikeInput+1), data = .)) %>%
broom::glance(LmFit) %>%
dplyr::ungroup() %>%
dplyr::summarise(Rsquared=mean(r.squared), RsquaredSE=sd(r.squared))
# calibration curve plot
cal.plot <- ggplot2::ggplot(data = cal.info.dat,
ggplot2::aes(x=log10(SpikeInput),
y=log10(Expectation))) +
ggplot2::geom_pointrange(limits) +
ggplot2::geom_point() +
ggplot2::geom_smooth(method='lm',formula=y~x) +
ggplot2::annotate("text", label = paste0("italic(R) ^ 2 == ",
round(Calibration$Rsquared, digits = 2),
"%+-%",
round(Calibration$RsquaredSE, digits = 2)),
parse = T, x = 0.2, y = 4, size = 4) +
ggplot2::theme_minimal() +
ggplot2::scale_x_log10(labels=c("0.1","1","10","100","1,000"),breaks=c(0.1,1,10,100,1000)) +
ggplot2::scale_y_log10(labels=c("0.1","1","10","100","1,000"),breaks=c(0.1,1,10,100,1000)) +
ggplot2::annotation_logticks(sides = "bl") +
ggplot2::labs(y=expression(bold(paste(Log[10], " Estimated Expression", sep=""))),
x=expression(bold(paste(Log[10], " Spike-In Molecules")))) +
ggplot2::theme(axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold"),
axis.line.x = ggplot2::element_line(colour = "black"),
axis.line.y = ggplot2::element_line(colour = "black"))
# capture efficiency data
capture.dat <- estSpike$CaptureEfficiency$`Spike-In` %>%
tibble::rownames_to_column(var = "SpikeID") %>%
dplyr::select(SpikeID, p_success, hat_p_success_cilower, hat_p_success_ciupper) %>%
dplyr::left_join(estSpike$FilteredInput$spikeInfo, by="SpikeID")
capture.plot <- ggplot2::ggplot(data = capture.dat,
ggplot2::aes(x=log10(SpikeInput+1),
y=p_success)) +
ggplot2::geom_point() +
ggplot2::geom_smooth(method="glm", method.args = list(family = "binomial"), se=T) +
ggplot2::theme_minimal() +
ggplot2::scale_x_log10(labels=c("0.1","1","10","100","1,000"),breaks=c(0.1,1,10,100,1000)) +
ggplot2::annotation_logticks(sides = "b") +
ggplot2::labs(y=expression(bold("Detection Probability")),
x=expression(bold(paste(Log[10], " Spike-In Molecules")))) +
ggplot2::theme(axis.text=ggplot2::element_text(size=12),
axis.title=ggplot2::element_text(size=14, face="bold"),
axis.line.x = ggplot2::element_line(colour = "black"),
axis.line.y = ggplot2::element_line(colour = "black"))
top_row <- suppressWarnings(cowplot::plot_grid(libsize.plot,sf.plot,
labels=c('A', 'B'),
ncol=2, nrow=1))
bottom_row <- suppressWarnings(cowplot::plot_grid(cal.plot,
capture.plot,
labels=c('C', 'D'),
ncol=2, nrow=1))
p.final <- suppressWarnings(cowplot::plot_grid(top_row,
bottom_row,
rel_heights = c(1, 1.5),
ncol=1, nrow=2))
# annotation under plot
if (annot) {
p.final <- cowplot::add_sub(p.final, "A) Sequencing depth per sample with median sequencing depth (grey dashed line).
\nB) Library size normalisation factor per sample with median size factor (grey dashed line).
\nC) Calibration curve with mean expression estimates and average R squared over all cells.
\nD) Capture efficiency with binomial logistic regression fit over all cells.", size=8)
}
# draw the plot
cowplot::ggdraw(p.final)
}
# plotCounts --------------------------------------------------------------
#' @name plotCounts
#' @aliases plotCounts
#' @title Visualize simulated counts
#' @description This function performs multidimensional scaling of the simulated counts from the pairwise sample distances using variable genes (i.e. variance unequal to zero). Prior to distance calculation, the counts are normalized using the simulated size factors and log2 transformed. In the plot, the samples are annotated by phenotype and batch, if present.
#' @usage plotCounts(simCounts, Distance, Scale, DimReduce, verbose = T)
#' @param simCounts The output of \code{\link{simulateCounts}}.
#' @param Distance The (dis-)similarity measure to be used. This can be "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski" for distance measures and "spearman", "pearson" or "kendall" for correlation measures converted into distances, respectively. For more information, see \code{\link[stats]{dist}} and \code{\link[stats]{cor}}.
#' @param Scale A logical vector indicating whether to use scaled log2 transformed counts or not.
#' @param DimReduce The dimension reduction approach to be used. This can be "MDS" \code{\link[stats]{cmdscale}}, "PCA" \code{\link[stats]{prcomp}}, "t-SNE" \code{\link[Rtsne]{Rtsne}}, "ICA" \code{\link[fastICA]{fastICA}} or "LDA" \code{\link[MASS]{lda}}.
#' @param verbose Logical value to indicate whether to print function information. Default is \code{TRUE}.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' ## not yet
#' }
#' @author Beate Vieth
#' @importFrom ggplot2 ggplot aes geom_point theme_minimal
#' @importFrom stats dist cor as.dist cmdscale prcomp predict
#' @importFrom MASS isoMDS
#' @importFrom Rtsne Rtsne
#' @importFrom fastICA fastICA
#' @importFrom tidyr "%>%" separate
#' @importFrom tibble rownames_to_column
#' @importFrom dplyr select
#' @rdname plotCounts
#' @export
plotCounts <- function(simCounts, Distance, Scale, DimReduce, verbose = T) {
# normalize
norm.counts <- t(t(simCounts$GeneCounts)/simCounts$sf)
# log2 transform
lnorm.counts <- log2(norm.counts+1)
# kick out invariable genes
drop_genes <- apply(lnorm.counts, 1, function(x) {var(x) < 0.01})
if(isTRUE(verbose)) {
message(paste0("Dropping ", sum(drop_genes), " genes out of a total of ",
nrow(lnorm.counts), " genes."))
}
lnorm.counts <- lnorm.counts[!drop_genes, ]
# transpose
vals <- t(lnorm.counts)
# apply scale
if(Scale) {
vals <- scale(vals, scale = TRUE)
}
# calculate dissimilarity matrix
if(Distance %in% c("euclidean", "maximum", "manhattan", "canberra", "binary", "minkowski") &&
DimReduce == "MDS") {
if(isTRUE(verbose)) {
message(paste0("Calculating distance matrix."))
}
dist.mat <- stats::dist(vals, method = Distance)
}
if(Distance %in% c("pearson", "kendall", "spearman") &&
DimReduce == "MDS") {
if(isTRUE(verbose)) {
message(paste0("Calculating distance matrix."))
}
cor.mat <- stats::cor(vals, method = Distance)
dist.mat <- stats::as.dist(1-cor.mat)
}
# else { stop("Unrecognized form of distance measure!\n") }
# apply dimension reduction
if(isTRUE(verbose)) {
message(paste0("Applying dimension reduction."))
}
if(DimReduce == "MDS") {
mds_out <- stats::cmdscale(d = dist.mat)
}
if(DimReduce == "PCA") {
mds_out <- stats::prcomp(x = vals, center = T, scale = Scale)
mds_out <- mds_out$x[,c(1:2)]
}
if(DimReduce == "t-SNE") {
# dimension reduction on expression matrix: PCA + t-SNE
# sample.value <- ncol(lnorm.counts) -2
# max.perplexity <- sample.value/3
tsne.res <- Rtsne::Rtsne(X=vals, pca=T, is_distance=F, perplexity=30 )
mds_out <- tsne.res$Y
rownames(mds_out) <- colnames(lnorm.counts)
}
if(DimReduce == "ICA") {
ica.res <- fastICA::fastICA(X = vals,
n.comp = 2,
alg.typ = "deflation",
fun = "logcosh",
alpha = 1,
method = "R",
row.norm = FALSE,
maxit = 200,
tol = 0.0001,
verbose = FALSE)
mds_out <- ica.res$S # ICA components
rownames(mds_out) <- colnames(lnorm.counts)
}
if(DimReduce == "LDA") {
lda.dat <- vals %>%
tibble::rownames_to_column(var="Sample") %>%
tidyr::separate(col = Sample, into = c("SampleID", "Phenotype", "Batch"),
extra = "drop", fill = "right", remove = TRUE) %>%
dplyr::select(-SampleID, -Batch)
lda.prior <- as.vector(table(lda.dat$Phenotype)/sum(table(lda.dat$Phenotype)))
lda.res <- MASS::lda(Phenotype ~ ., lda.dat, prior = lda.prior)
plda.res <- stats::predict(object = lda.res, newdata = lda.dat)
mds_out <- plda.res$x
rownames(mds_out) <- colnames(lnorm.counts)
}
# else { stop("Unrecognized form of dimension reduction!\n") }
# collect data to plot
if(isTRUE(verbose)) {
message(paste0("Creating plot."))
}
colnames(mds_out) <- c("Dimension1", "Dimension2")
dat.plot <- data.frame(mds_out) %>%
tibble::rownames_to_column(var="Sample") %>%
tidyr::separate(col = Sample, into = c("SampleID", "Phenotype", "Batch"),
extra = "drop", fill = "right", remove = FALSE)
# plot
p1 <- ggplot2::ggplot(data = dat.plot,
ggplot2::aes(x = Dimension1, y = Dimension2))
if(all(!is.na(dat.plot$Batch))) {
p2 <- p1 + ggplot2::geom_point(ggplot2::aes(shape = Batch, colour = Phenotype),
size = 2, alpha =0.5, data = dat.plot)
}
if(all(is.na(dat.plot$Batch))) {
p2 <- p1 + ggplot2::geom_point(ggplot2::aes(colour = Phenotype),
size = 2, alpha =0.5, data = dat.plot)
}
p3 <- p2 + ggplot2::theme_minimal()
return(p3)
}
# plotEvalDE -------------------------------------------------------------
#' @name plotEvalDE
#' @aliases plotEvalDE
#' @title Visualize power assessment
#' @description This function plots the results of \code{\link{evaluateDE}} for assessing the error rates and sample size requirements.
#' @usage plotEvalDE(evalRes, rate=c('marginal', 'stratified'),
#' quick=TRUE, annot=TRUE)
#' @param evalRes The output of \code{\link{evaluateDE}}.
#' @param rate Character vector defining whether the marginal or condtional rates should be plotted. Conditional depends on the choice of stratify.by in \code{\link{evaluateDE}}.
#' @param quick A logical vector. If \code{TRUE}, the TPR and FDR are only plotted. If \code{FALSE}, then all rates are plotted.
#' @param annot A logical vector. If \code{TRUE}, a short figure legend under the plot is included.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' ## using example data set
#' eval.de <- evaluateDE(simRes = kolodziejczk_simDE)
#' plotEvalDE(evalRes=eval.de, rate ="marginal", quick=T, annot=T)
#' plotEvalDE(evalRes=eval.de, rate ="stratified", quick=T, annot=T)
#' }
#' @author Beate Vieth
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot aes labs theme scale_y_continuous geom_line geom_hline geom_pointrange facet_wrap geom_boxplot position_dodge scale_fill_manual geom_bar theme_minimal
#' @importFrom grid unit
#' @importFrom scales percent
#' @importFrom cowplot plot_grid add_sub ggdraw
#' @importFrom dplyr group_by summarise ungroup n
#' @importFrom tidyr %>%
#' @rdname plotEvalDE
#' @export
plotEvalDE <- function(evalRes, rate=c('marginal', 'stratified'), quick=TRUE, annot=TRUE) {
rate = match.arg(rate)
# marginal rates over sample sizes
if(rate=='marginal') {
if(quick) {
dat.marginal <- evalRes[c('TPR.marginal', 'FDR.marginal')]
names(dat.marginal) <- substr(x = names(dat.marginal), start = 1, stop = 3)
dat.marginal <- lapply(dat.marginal, "rownames<-", paste0(evalRes[['n1']], " vs ", evalRes[['n2']]))
dat.marginal.long <- reshape2::melt(dat.marginal)
refval <- data.frame(L1 = c("FDR", "TPR"), ref = c(evalRes$alpha.nominal, 0.8))
dat.marginal.calc <- dat.marginal.long %>% dplyr::group_by(Var1, L1) %>%
dplyr::summarise(Expectation=mean(value), Deviation=sd(value), Error=sd(value)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
limits <- ggplot2::aes(ymax = Expectation + Deviation, ymin= Expectation - Deviation)
# marginal in one
grandplot <- ggplot2::ggplot(data = dat.marginal.long, ggplot2::aes(x=Var1, y=value, color=L1)) +
ggplot2::geom_boxplot() +
ggplot2::theme_minimal() +
ggplot2::labs(x=NULL, y="Rate") +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0,1)) +
ggplot2::theme(legend.position='top',
legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"))
# faceted marginal
facetplot <- ggplot2::ggplot(data = dat.marginal.calc, ggplot2::aes(x=Var1, y=Expectation, fill=L1, color=L1)) +
ggplot2::geom_line(ggplot2::aes(group=L1)) +
ggplot2::geom_pointrange(limits) +
ggplot2::theme_minimal() +
ggplot2::labs(x="Samples", y="Rate") +
ggplot2::scale_y_continuous(labels = scales::percent) +
ggplot2::theme(legend.position='none',
legend.title = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y = ggplot2::element_text(size=10),
axis.title = ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold")) +
ggplot2::facet_wrap(~L1, scales = 'free', ncol=1) +
ggplot2::geom_hline(data = refval, ggplot2::aes(yintercept = ref), linetype="dashed", color='grey')
# annotation under plot
p.final <- suppressWarnings(cowplot::plot_grid(grandplot,
facetplot,
labels=c('A', 'B'),
rel_heights = c(1,1.5),
ncol=1, nrow=2))
if(annot) {
p.final <- cowplot::add_sub(p.final, "A) Marginal TPR and FDR per sample size comparison. \nB) Marginal TPR and FDR per sample size comparison with dashed line indicating nominal alpha level (type I error) and nominal 1-beta level, i.e. 80% power (type II error).", size=8)
}
}
if(!quick) {
dat.marginal <- evalRes[grep('*R.marginal', names(evalRes))]
names(dat.marginal) <- substr(x = names(dat.marginal), start = 1, stop = 3)
dat.marginal <- lapply(dat.marginal, "rownames<-", paste0(evalRes[['n1']], " vs ", evalRes[['n2']]))
dat.marginal.long <- reshape2::melt(dat.marginal)
refval <- data.frame(L1 = c("FDR", "TPR"), ref = c(evalRes$alpha.nominal, 0.8))
dat.marginal.calc <- dat.marginal.long %>%
dplyr::group_by(Var1, L1) %>%
dplyr::summarise(Expectation=mean(value), Deviation=sd(value), Error=sd(value)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
limits <- ggplot2::aes(ymax = Expectation + Deviation, ymin= Expectation - Deviation)
# marginal in one
grandplot <- ggplot(data = dat.marginal.long, ggplot2::aes(x=Var1, y=value, color=L1)) +
ggplot2::geom_boxplot() +
ggplot2::theme_minimal() +
ggplot2::labs(x=NULL, y="Rate") +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0,1)) +
ggplot2::theme(legend.position='top',
legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"))
# faceted marginal
facetplot <- ggplot2::ggplot(data = dat.marginal.calc, ggplot2::aes(x=Var1, y=Expectation, fill=L1, color=L1)) +
ggplot2::geom_line(ggplot2::aes(group=L1)) +
ggplot2::geom_pointrange(limits) +
ggplot2::theme_minimal() +
ggplot2::labs(x='Samples', y="Rate") +
ggplot2::scale_y_continuous(labels = scales::percent) +
ggplot2::theme(legend.position='none',
legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold")) +
ggplot2::facet_wrap(~L1, scales = 'free', ncol=2) +
ggplot2::geom_hline(data = refval, ggplot2::aes(yintercept = ref), linetype="dashed", color='grey')
# annotation under plot
p.final <- suppressWarnings(cowplot::plot_grid(grandplot,
facetplot,
labels=c('A', 'B'),
rel_heights = c(1,2),
ncol=1, nrow=2))
if(annot) {
p.final <- cowplot::add_sub(p.final, "A) Marginal error rates per sample size comparison. \nB) Marginal error rates per sample size comparison with dashed line indicating nominal alpha level (type I error) and nominal 1-beta level, i.e. 80% power (type II error).", size=8)
}
}
}
#stratified rates
if(rate=='stratified') {
if(quick){
dat.stratified <- evalRes[c('TPR', 'FDR')]
strata <- evalRes$strata.levels
dat.stratified <- lapply(dat.stratified, "dimnames<-", list(strata, paste0(evalRes[['n1']], " vs ", evalRes[['n2']]), NULL))
dat.stratified.long <- reshape2::melt(dat.stratified)
dat.stratified.calc <- dat.stratified.long %>%
dplyr::group_by(Var1, Var2, L1) %>%
dplyr::summarise(Expectation=mean(value), Deviation=sd(value), Error=sd(value)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
limits <- ggplot2::aes(ymax = Expectation + Deviation, ymin= Expectation - Deviation)
refval <- data.frame(L1 = c("FDR", "TPR"), ref = c(evalRes$alpha.nominal, 0.8))
facetplot <- ggplot2::ggplot(data = dat.stratified.calc, ggplot2::aes(x=Var1, y=Expectation, fill=Var2, color=Var2)) +
ggplot2::geom_point() +
ggplot2::geom_line(ggplot2::aes(group=Var2)) +
ggplot2::geom_pointrange(limits) +
ggplot2::theme_minimal() +
ggplot2::labs(x=NULL, y="Rate") +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0,1)) +
ggplot2::theme(legend.position='top',
legend.title = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y = ggplot2::element_text(size=10),
axis.title = ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold")) +
ggplot2::facet_wrap(~L1, scales = 'free', ncol=2) +
ggplot2::geom_hline(data = refval, ggplot2::aes(yintercept = ref), linetype="dashed", color='grey')
# strata genes
N <- length(evalRes$n1)
dat.genes <- list("Ngenes"=evalRes$stratagenes[,N,],'DEgenes'=evalRes$stratadiffgenes[,N,])
dat.genes <- lapply(dat.genes, "rownames<-", strata)
dat.genes.long <- reshape2::melt(dat.genes)
dat.genes.calc <- dat.genes.long %>% dplyr::group_by(Var1, L1) %>%
dplyr::summarise(Expectation=mean(value), Deviation=sd(value), Error=sd(value)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
dodge <- ggplot2::position_dodge(width=0.9)
strataplot <- ggplot2::ggplot(data = dat.genes.calc, ggplot2::aes(x=Var1, y=Expectation, fill=L1)) +
ggplot2::geom_bar(stat="identity") +
ggplot2::theme_minimal() +
ggplot2::labs(x='Stratum', y="Count") +
ggplot2::theme(legend.position='right',
legend.title = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y = ggplot2::element_text(size=10),
axis.title = ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"),
strip.text = ggplot2::element_text(size=10)) +
ggplot2::scale_fill_manual(values=c('grey', 'black'),
breaks = c("DEgenes", "Ngenes"),
labels = c("DE genes", "EE genes"))
# annotation under plot
p.final <- suppressWarnings(cowplot::plot_grid(facetplot,
strataplot,
labels=c('A', 'B'),
rel_heights = c(2,1),
ncol=1, nrow=2))
if(annot) {
p.final <- cowplot::add_sub(p.final, "A) Conditional TPR and FDR per sample size comparison per stratum. \nB) Number of equally (EE) and differentially expressed (DE) genes per stratum.", size=8)
}
}
if(!quick) {
dat.stratified <- evalRes[grep('*R$', names(evalRes))]
strata <- evalRes$strata.levels
dat.stratified <- lapply(dat.stratified, "dimnames<-", list(strata, paste0(evalRes[['n1']], " vs ", evalRes[['n2']]), NULL))
dat.stratified.long <- reshape2::melt(dat.stratified)
dat.stratified.calc <- dat.stratified.long %>%
dplyr::group_by(Var1, Var2, L1) %>%
dplyr::summarise(Expectation=mean(value), Deviation=sd(value), Error=sd(value)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
limits <- ggplot2::aes(ymax = Expectation + Deviation, ymin= Expectation - Deviation)
refval <- data.frame(L1 = c("FDR", "TPR"), ref = c(evalRes$alpha.nominal, 0.8))
facetplot <- ggplot2::ggplot(data = dat.stratified.calc, ggplot2::aes(x=Var1, y=Expectation, fill=Var2, color=Var2)) +
ggplot2::geom_point() +
ggplot2::geom_line(ggplot2::aes(group=Var2)) +
ggplot2::geom_pointrange(limits) +
ggplot2::theme_minimal() +
ggplot2::labs(x=NULL, y="Rate") +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0,1)) +
ggplot2::theme(legend.position = 'top',
legend.title = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y = ggplot2::element_text(size=10),
axis.title = ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold")) +
ggplot2::facet_wrap(~L1, scales = 'free_x', ncol=2) +
ggplot2::geom_hline(data = refval, ggplot2::aes(yintercept = ref), linetype="dashed", color='grey')
# strata genes
N <- length(evalRes$n1)
dat.genes <- list("Ngenes"=evalRes$stratagenes[,N,],'DEgenes'=evalRes$stratadiffgenes[,N,])
dat.genes <- lapply(dat.genes, "rownames<-", strata)
dat.genes.long <- reshape2::melt(dat.genes)
dat.genes.calc <- dat.genes.long %>%
dplyr::group_by(Var1, L1) %>%
dplyr::summarise(Expectation=mean(value), Deviation=sd(value), Error=sd(value)/sqrt(dplyr::n())) %>%
dplyr::ungroup()
dodge <- ggplot2::position_dodge(width=0.9)
strataplot <- ggplot2::ggplot(data = dat.genes.calc, ggplot2::aes(x=Var1, y=Expectation, fill=L1)) +
ggplot2::geom_bar(stat="identity") +
ggplot2::theme_minimal() +
ggplot2::labs(x="Stratum", y="Count") +
ggplot2::theme(legend.position='right',
legend.title = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(1, "cm"),
strip.text = ggplot2::element_text(size=10)) +
ggplot2::scale_fill_manual(values=c('grey', 'black'),
breaks = c("DEgenes", "Ngenes"),
labels = c("DE genes", "EE genes"))
# annotation under plot
p.final <- suppressWarnings(cowplot::plot_grid(facetplot,
strataplot,
labels=c('A', 'B'),
rel_heights = c(3,1),
ncol=1, nrow=2))
if(annot) {
p.final <- cowplot::add_sub(p.final, "A) Conditional error rates over stratum. \nB) Number of equally (EE) and differentially expressed (DE) genes per stratum.", size=8)
}
}
}
# draw final plot
cowplot::ggdraw(p.final)
}
# plotEvalSim -------------------------------------------------------------
#' @name plotEvalSim
#' @aliases plotEvalSim
#' @title Visualize power assessment
#' @description This function plots the results of \code{\link{evaluateSim}} for assessing the setup performance, i.e. normalisation method performance.
#' @usage plotEvalSim(evalRes, annot=TRUE)
#' @param evalRes The output of \code{\link{evaluateSim}}.
#' @param annot A logical vector. If \code{TRUE}, a short figure legend under the plot is included.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' ## using example data set
#' eval.sim <- evaluateSim(simRes = kolodziejczk_simDE, timing = T)
#' plotEvalSim(eval.sim)
#' }
#' @author Beate Vieth
#' @importFrom reshape2 melt
#' @importFrom ggplot2 ggplot aes labs theme element_blank scale_y_continuous geom_line geom_hline geom_pointrange facet_wrap geom_boxplot position_dodge scale_fill_manual geom_bar theme_minimal
#' @importFrom grid unit
#' @importFrom scales percent
#' @importFrom cowplot plot_grid add_sub ggdraw
#' @importFrom dplyr group_by summarise ungroup n
#' @importFrom tidyr "%>%"
#' @rdname plotEvalSim
#' @export
plotEvalSim <- function(evalRes, annot=TRUE) {
# log fold changes
lfc <- reshape2::melt(evalRes$Log2FoldChange)
colnames(lfc) <- c("SimNo", "Metric", "Value", "Samples")
lfc.dat <- lfc %>%
tidyr::separate(Metric, c("DE-Group", "Metric", "Type"), "_") %>%
dplyr::filter(!Type=="NAFraction") %>%
dplyr::group_by(Samples, `DE-Group`, Metric) %>%
dplyr::summarise(Expectation=mean(Value, na.rm=T),
Deviation=sd(Value, na.rm=T),
Error=sd(Value, na.rm=T)/sqrt(dplyr::n())) %>%
dplyr::ungroup() %>%
tidyr::separate(Samples, c('n1', 'n2'), " vs ", remove=FALSE) %>%
dplyr::mutate(SumN = as.numeric(n1)+as.numeric(n2)) %>%
dplyr::arrange(SumN)
# to label the x axis from smallest to largest n group!
lfc.dat$Samples <- factor(lfc.dat$Samples,
levels=unique(lfc.dat$Samples[order(lfc.dat$SumN,decreasing = F)]))
limits <- ggplot2::aes(ymax = Expectation + Deviation, ymin= Expectation - Deviation)
dodge <- ggplot2::position_dodge(width=0.7)
lfc.plot <- ggplot2::ggplot(data = lfc.dat, ggplot2::aes(x=Samples,
y=Expectation,
fill=`DE-Group`,
colour=`DE-Group`)) +
ggplot2::geom_point(position = dodge) +
# ggplot2::geom_line(ggplot2::aes(group=Metric),position = dodge) +
ggplot2::geom_pointrange(limits, position = dodge) +
ggplot2::theme_minimal() +
ggplot2::labs(x="Samples", y="Value") +
ggplot2::facet_wrap(~Metric, ncol=3, scales="free") +
ggplot2::theme(legend.position='right', legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(0.5, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold"))
# size factors
sf <- reshape2::melt(evalRes$SizeFactors)
colnames(sf) <- c("SimNo", "Metric", "Value", "Samples")
sf.stats <- sf %>%
dplyr::filter(Metric %in% c("MAD", "rRMSE")) %>%
dplyr::group_by(Samples, Metric) %>%
dplyr::summarise(Expectation=mean(Value, na.rm=T),
Deviation=sd(Value, na.rm=T),
Error=sd(Value, na.rm=T)/sqrt(dplyr::n())) %>%
dplyr::ungroup() %>%
tidyr::separate(Samples, c('n1', 'n2'), " vs ", remove=FALSE) %>%
dplyr::mutate(SumN = as.numeric(n1)+as.numeric(n2)) %>%
dplyr::arrange(SumN)
# to label the x axis from smallest to largest n group!
sf.stats$Samples <- factor(sf.stats$Samples,
levels=unique(sf.stats$Samples[order(sf.stats$SumN,decreasing = F)]))
limits <- ggplot2::aes(ymax = Expectation + Deviation, ymin= Expectation - Deviation)
dodge <- ggplot2::position_dodge(width=0.7)
sfstats.plot <- ggplot2::ggplot(data = sf.stats, ggplot2::aes(x=Samples,
y=Expectation)) +
ggplot2::geom_point(position = dodge) +
# ggplot2::geom_line(ggplot2::aes(group=Metric),position = dodge) +
ggplot2::facet_wrap(~Metric, ncol=2, scales="free") +
ggplot2::geom_pointrange(limits, position = dodge) +
ggplot2::theme_minimal() +
ggplot2::labs(x="Samples", y="Value") +
ggplot2::theme(legend.position='right', legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(0.5, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold"))
# ratio of size factors per group
ratio.dat <- sf %>%
dplyr::filter(grepl("Group", Metric)) %>%
tidyr::separate(Samples, c('n1', 'n2'), " vs ", remove=FALSE) %>%
dplyr::mutate(SumN = as.numeric(n1)+as.numeric(n2)) %>%
dplyr::arrange(SumN)
ratio.dat$Samples <- factor(ratio.dat$Samples,
levels=unique(ratio.dat$Samples[order(ratio.dat$SumN,decreasing = F)]))
ratio.plot <- ggplot2::ggplot(data = ratio.dat, ggplot2::aes(x=Samples,
y=Value,
color=Metric)) +
ggplot2::geom_boxplot() +
ggplot2::theme_minimal() +
ggplot2::labs(x="Samples", y="Value") +
ggplot2::geom_hline(yintercept=1,linetype="dashed", color='darkgrey') +
ggplot2::theme(legend.position='right', legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(size=10, angle=45, hjust=1),
axis.text.y=ggplot2::element_text(size=10),
axis.title=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(0.5, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold"))
bottom_row <- suppressWarnings(cowplot::plot_grid(sfstats.plot,
ratio.plot,
labels = c('B', 'C'),
align = 'hv',
ncol=2,
nrow=1,
rel_widths = c(1.3, 1)))
p.final <- suppressWarnings(cowplot::plot_grid(lfc.plot,
bottom_row,
labels=c('A', ''),
rel_heights = c(1.3,1),
ncol=1, nrow=2))
# annotation under plot
if(annot) {
p.final <- cowplot::add_sub(p.final, "A) Mean Absolute Error (MAE), Root Mean Squared Error (RMSE) and robust Root Mean Squared Error (rRMSE) \n for the estimated log2 fold changes of all (ALL), differentially expressed (DE) and equally expressed (EE) genes compared to the true log2 fold changes.
\nB) Median absolute deviation (MAD) and robust Root Mean Squared Error (rRMSE) between estimated and true size factors.
\nC) The average ratio between simulated and true size factors in the two groups of samples.", size=8)
}
# draw final plot
cowplot::ggdraw(p.final)
}
# plotTime ---------------------------------------------------------------
#' @name plotTime
#' @aliases plotTime
#' @title Visualize computational time
#' @description This function plots the computational running time of simulations.
#' @usage plotTime(simRes, Table=TRUE, annot=TRUE)
#' @param simRes The output of \code{\link{simulateDE}}.
#' @param Table A logical vector. If \code{TRUE}, a table of average computational running time per step and sample size is printed additionally.
#' @param annot A logical vector. If \code{TRUE}, a short figure legend under the plot is included.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' ## using example data set
#' plotTime(simRes = kolodziejczk_simDE)
#' }
#' @author Beate Vieth
#' @importFrom tidyr "%>%" separate
#' @importFrom dplyr mutate arrange
#' @importFrom tibble rownames_to_column
#' @importFrom ggplot2 ggplot aes position_dodge geom_point geom_pointrange facet_wrap theme_bw labs theme element_blank element_text guide_legend guides
#' @importFrom grid unit
#' @importFrom cowplot add_sub ggdraw
#' @importFrom matrixStats rowSds
#' @rdname plotTime
#' @export
plotTime <- function(simRes, Table=TRUE, annot=TRUE) {
# simulation parameters
Nreps1 = simRes$sim.settings$n1
Nreps2 = simRes$sim.settings$n2
time.taken = simRes$time.taken
nsims = simRes$sim.settings$nsims
ncores = simRes$sim.settings$NCores
if(is.null(ncores)) {ncores=1}
# create output objects
my.names = paste0(Nreps1, " vs ", Nreps2)
time.taken.mat <- lapply(1:length(my.names), function(x) {
data.frame(matrix(NA, nrow = length(rownames(time.taken[,1,]))+1,
ncol = 3, dimnames = list(c(rownames(time.taken[,1,]), "Total"),
c("Mean", "SD", "SEM")))
)
})
names(time.taken.mat) <- my.names
for(j in seq(along=Nreps1)) {
tmp.time <- time.taken[,j,]
Total <- colSums(tmp.time, na.rm = T)
tmp.time <- rbind(tmp.time, Total)
time.taken.mat[[j]][,"Mean"] <- rowMeans(tmp.time)
time.taken.mat[[j]][,"SD"] <- matrixStats::rowSds(tmp.time)
time.taken.mat[[j]][,"SEM"] <- matrixStats::rowSds(tmp.time)/sqrt(nsims)
}
time.taken.dat <- do.call('rbind', time.taken.mat)
# time.taken.dat <- time.taken.dat[!is.na(time.taken.dat$Mean),]
time.taken.dat <- time.taken.dat %>%
tibble::rownames_to_column(var="ID") %>%
tidyr::separate(col = ID, into=c("Samples", "Step"), sep="[.]", remove=TRUE) %>%
tidyr::separate(Samples, c('n1', 'n2'), " vs ", remove=FALSE) %>%
dplyr::mutate(SumN = as.numeric(n1)+as.numeric(n2)) %>%
dplyr::arrange(SumN)
# to label the x axis from smallest to largest n group!
time.taken.dat$Samples <- factor(time.taken.dat$Samples,
levels=unique(time.taken.dat$Samples[order(time.taken.dat$SumN,decreasing = F)]))
time.taken.dat$Step <- factor(time.taken.dat$Step,
levels=c("Preprocess", "Normalisation", "Clustering", "DE", "Moments", "Total"))
if(isTRUE(Table)) {
printtime <- time.taken.dat[,c(1,4:7)]
printtime[,c(3:5)] <- signif(printtime[,c(3:5)],2)
print(printtime)
}
time.taken.dat <- time.taken.dat[!is.na(time.taken.dat$Mean),]
# plot
limits <- ggplot2::aes(ymax = Mean + SEM, ymin= Mean - SEM)
dodge <- ggplot2::position_dodge(width=0.7)
p.final <- ggplot2::ggplot(data = time.taken.dat, ggplot2::aes(x=Step,
y=Mean,
colour=Samples)) +
ggplot2::geom_point(position = dodge) +
# ggplot2::geom_line(ggplot2::aes(group=Metric),position = dodge) +
ggplot2::geom_pointrange(limits, position = dodge) +
ggplot2::facet_wrap(~Step, nrow=1, scales="free") +
ggplot2::theme_bw() +
ggplot2::labs(x=NULL, y="Time in minutes") +
ggplot2::theme(legend.position='bottom', legend.title = ggplot2::element_blank(),
axis.text.x=ggplot2::element_blank(),
axis.text.y=ggplot2::element_text(size=10),
axis.title.x=ggplot2::element_blank(),
axis.title.y=ggplot2::element_text(size=10, face="bold"),
legend.text = ggplot2::element_text(size=10),
legend.key.size = grid::unit(0.5, "cm"),
strip.text = ggplot2::element_text(size=10, face="bold")) +
ggplot2::guides(colour=ggplot2::guide_legend(nrow=1))
if(isTRUE(annot)) {
subtitle <- paste0("The average time in minutes per simulation step and sample size.\nThe number of cores was set to ", ncores, ".")
p.final <- cowplot::add_sub(p.final, subtitle, size=8)
}
# draw final plot
cowplot::ggdraw(p.final)
}
# plotEvalDist ------------------------------------------------------------
#' @name plotEvalDist
#' @aliases plotEvalDist
#' @title Visualize distribution assessment
#' @description This function plots the results of \code{\link{evaluateDist}} to assess goodness-of-fit testing.
#' @usage plotEvalDist(evalDist, annot=TRUE)
#' @param evalDist The output of \code{\link{evaluateDist}}.
#' @param annot A logical vector. If \code{TRUE}, a short description of the plot is included.
#' @return A ggplot object.
#' @examples
#' \dontrun{
#' ## using example data set
#' data(kolodziejczk_cnts)
#' evaldist <- evaluateDist(countData = kolodziejczk_cnts,
#' RNAseq = "singlecell", ="scran",
#' frac.genes=1, min.meancount = 0.1,
#' max.dropout=0.7, min.libsize=1000,
#' verbose = TRUE)
#' plotEvalDist(evaldist, annot = TRUE)
#' }
#' @author Beate Vieth, Ines Hellmann
#' @importFrom ggplot2 ggplot aes ylab xlab theme scale_y_continuous geom_boxplot position_dodge geom_bar theme_minimal scale_x_discrete labs ggtitle coord_flip
#' @importFrom scales percent
#' @importFrom cowplot plot_grid add_sub ggdraw
#' @importFrom dplyr filter group_by summarise mutate bind_rows slice select ungroup left_join count
#' @importFrom tidyr %>% separate gather spread
#' @importFrom utils stack
#' @rdname plotEvalDist
#' @export
plotEvalDist <- function(evalDist, annot=TRUE){
# define naming labels for plotting
# "Multiple"='Multiple',
dist_labels <- c("None"='None',
"ZIP"='Zero-Inflated \n Poisson',
"Poisson"='Poisson',
"ZINB"="Zero-Inflated \n Negative Binomial",
"NB"="Negative \n Binomial"
)
label_names<-c("PoiBeta" ="Beta-Poisson",
'zifpois' = "Zero-Inflated \n Poisso",
'pois' = "Poisson",
"zifnbinom" = "Zero-Inflated \n Negative Binomial",
"nbinom" = "Negative \n Binomial"
)
combi.ind <- c("1_1_1_1"='Multiple',
"0_0_0_0"='None',
"0_1_0_0"='Poisson',
"1_0_0_0"="NB",
"0_0_1_0"="ZINB",
"0_0_0_1"='ZIP',
"1_1_0_0"='Multiple',
"1_0_0_1"='Multiple',
"1_0_1_0"="Multiple",
"0_0_1_1"="Multiple",
"1_0_1_1"='Multiple',
"1_1_1_0"='Multiple',
"0_1_1_1"='Multiple'
)
combi.ind.df <- data.frame(combi.ind, stringsAsFactors=F) %>%
tibble::rownames_to_column(var = "nbinom_pois_zifnbinom_zifpois") %>%
dplyr::rename(Distribution=combi.ind)
# extract the GOF results from object
gofres <- evalDist$GOF_res
# reshape the table
gofres <- cbind(rn = rownames(gofres), utils::stack(gofres))
gofres <- gofres %>%
tidyr::separate(ind, c("distribution", "framework", 'type'), "_", remove = F)
# extract the observed zero table
obszero <- evalDist$ObservedZeros
obszero <- cbind(rn = rownames(obszero), obszero)
obszero <- obszero[which(obszero$rn %in% gofres$rn), ]
# GOF p-value based on chisquare test
gof.pval <- gofres %>%
dplyr::filter(type=='gofpval', framework=='standard') %>%
dplyr::mutate(values, TestRes=ifelse(values>0.05, 1, 0)) %>%
dplyr::select(rn, TestRes, distribution) %>%
tidyr::spread( distribution, TestRes) %>%
tidyr::unite(nbinom_pois_zifnbinom_zifpois, nbinom, pois, zifnbinom,zifpois, remove = F) %>%
dplyr::full_join(combi.ind.df, by='nbinom_pois_zifnbinom_zifpois') %>%
dplyr::mutate(Distribution=ifelse(is.na(Distribution), 'Multiple', Distribution)) %>%
dplyr::group_by(Distribution) %>%
dplyr::summarise(Total=length(nbinom_pois_zifnbinom_zifpois)) %>%
dplyr::mutate(Percentage=Total/sum(Total)) %>%
dplyr::filter(Distribution != 'Multiple')
p.chisquare <- ggplot2::ggplot(gof.pval, aes(x = Distribution, y=Percentage)) +
ggplot2::theme_minimal() +
ggplot2::labs(x = 'Distribution', y = "Percentage") +
geom_bar(stat='identity', width=0.7) +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0,1)) +
ggplot2::scale_x_discrete(labels= dist_labels, limits=names(dist_labels)) +
ggplot2::coord_flip() +
ggplot2::ggtitle('Goodness-of-fit statistic')
# AIC (lowest value AND intersect with GOF p-value)
AIC.calc <- gofres %>%
dplyr::filter(framework %in%c('Marioni', 'standard'), type %in% c("gofpval","aic")) %>%
dplyr::select(-ind ) %>%
tidyr::spread(type,values) %>%
dplyr::group_by(rn) %>%
dplyr::mutate(minAIC= (aic==min(aic)), GOF = gofpval>0.05) %>%
dplyr::ungroup() %>%
dplyr::group_by(distribution) %>%
dplyr::summarise(`Total Lowest\nAIC`= sum(minAIC,na.rm = T),
`Total Lowest\nAIC + \nGOF \np >0.05` = sum((minAIC & GOF), na.rm=T)) %>%
dplyr::mutate(Total.Lowest=sum(`Total Lowest\nAIC`),Total.Sub=sum(`Total Lowest\nAIC + \nGOF \np >0.05`)) %>%
dplyr::mutate(`Percentage Lowest\nAIC`=`Total Lowest\nAIC`/Total.Lowest, `Percentage Lowest\nAIC + \nGOF \np >0.05`=`Total Lowest\nAIC + \nGOF \np >0.05`/Total.Sub) %>%
dplyr::select(-Total.Lowest, -Total.Sub) %>%
gather(variable, value, `Total Lowest\nAIC`:`Percentage Lowest\nAIC + \nGOF \np >0.05`, factor_key=FALSE) %>%
mutate(Type=ifelse(grepl(pattern='Percentage', variable), 'Percentage', 'Total'), Set=sub(".+? ", "", variable)) %>%
dplyr::filter(Type=='Percentage')
AIC.calc$distribution <- factor(AIC.calc$distribution, levels = c('PoiBeta', 'zifpois', 'pois', 'zifnbinom', 'nbinom'))
p.aic <- ggplot2::ggplot(data=AIC.calc, aes(x=Set, y=value, fill=distribution)) +
ggplot2::geom_bar(stat='identity', width=0.7, colour="black") +
ggplot2::theme_minimal() +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0,1)) +
ggplot2::scale_fill_brewer(labels= label_names, limits=names(label_names), palette="Set1") +
ggplot2::xlab(NULL) +
ggplot2::ylab('Percentage') +
ggplot2::labs(fill=NULL) +
ggplot2::guides(fill=guide_legend(reverse=TRUE)) +
ggplot2::theme(legend.position = "bottom") +
ggplot2::ggtitle("Akaike Information Criterion") +
ggplot2::coord_flip()
# predicted zeroes vs observed zeros
predzero.dat <- gofres %>%
dplyr::filter(type %in% c("gofpval","predzero"),
framework %in% c("Marioni", 'standard')) %>%
dplyr::select(-ind ) %>%
tidyr::spread(type,values) %>%
dplyr::left_join(obszero, by='rn') %>%
dplyr::mutate(`Obs. vs. Pred.\n Zeros`=ObsZero-predzero,
`Obs. vs. Pred.\n Zeros, GOF p>0.05`=ifelse(gofpval>0.05,ObsZero-predzero,NA)) %>%
tidyr::gather(comp,divergentzero,7:8)
dd = 1:5
zpdat <- dplyr::filter(predzero.dat, comp == "Obs. vs. Pred.\n Zeros")
p.zero <- ggplot2::ggplot(zpdat, aes(x = distribution, y = divergentzero)) +
geom_boxplot(width = 0.7, position = position_dodge(width = 0.8), outlier.shape = NA) +
ggplot2::theme_minimal() +
ggplot2::labs(x = "Distribution", y = "Observed - Predicted Zeros") +
ggplot2::scale_x_discrete(labels = label_names[dd], limits = names(label_names[dd])) +
ggplot2::ggtitle("Dropouts") +
ggplot2::theme(legend.position = "none", legend.title = element_blank()) +
coord_flip()
# Best fit by LRT / Vuong
model.pval <- gofres %>%
dplyr::filter(type=='gofpval', framework=='standard') %>%
dplyr::mutate(values, TestRes=ifelse(values>0.05, 1, 0)) %>%
dplyr::select(rn, TestRes, distribution) %>%
tidyr::spread( distribution, TestRes) %>%
na.omit() %>%
tidyr::unite(nbinom_pois_zifnbinom_zifpois, nbinom, pois, zifnbinom,zifpois, remove = F) %>%
mutate_if(is.factor, as.character)
model.calc <- gofres %>%
dplyr::filter(distribution %in% c('LRT', 'Vuong'), values<0.05) %>%
dplyr::select(rn, type) %>%
mutate_if(is.factor, as.character)
out <- split(model.calc, f = model.calc$type)
out2 <- lapply(out, function(x) {
left_join(x, model.pval, by='rn') %>% na.omit()
})
out2[['NBPoisson']] <- out2[['NBPoisson']] %>% dplyr::filter(nbinom==1, pois==1) %>% select(rn, type)
out2[['ZNB']] <- out2[['ZNB']] %>% dplyr::filter(nbinom==1, zifnbinom==1) %>% select(rn, type)
out2[['ZNBZPoisson']] <- out2[['ZNBZPoisson']] %>% dplyr::filter(zifnbinom==1, zifpois==1) %>% select(rn, type)
out2[['ZPoisson']] <- out2[['ZPoisson']] %>% dplyr::filter(pois==1, zifpois==1) %>% select(rn, type)
out3 <- do.call('rbind', out2)
ModelTest <- out3 %>%
dplyr::group_by(type) %>%
dplyr::count() %>%
mutate(Percentage=n/sum(n))
p.modeltest <- ggplot2::ggplot(data = ModelTest, aes(x = type, y = Percentage)) +
ggplot2::geom_bar(stat = "identity", width = 0.7) +
ggplot2::theme_minimal() +
ggplot2::scale_y_continuous(labels = scales::percent, limits = c(0, 1)) +
ggplot2::labs(x = "Test", y = "Percentage") +
ggplot2::scale_x_discrete(labels = c(NBPoisson = "Negative Binomial \n> Poisson",
ZNB = "Zero-inflated Negative Binomial \n> Negative Binomial",
ZPoisson = "Zero-inflated Poisson \n> Poisson",
ZNBZPoisson = "Zero-inflated Negative Binomial \n> Zero-inflated Poisson"),
limits = c("ZNBZPoisson", "ZPoisson", "NBPoisson", "ZNB")) +
ggplot2::ggtitle("Model Comparisons") +
ggplot2::coord_flip()
p.final <- cowplot::ggdraw() +
cowplot::draw_plot(p.chisquare, x= 0 , y= 0.5, width = 0.5, height = 0.5) +
cowplot::draw_plot(p.aic, x=0.5, y=0.5, width = 0.5, height=0.5) +
cowplot::draw_plot(p.zero, x=0, y= 0, width = 0.5, height=0.5) +
cowplot::draw_plot(p.modeltest, x=0.5, y= 0, width = 0.5, height=0.5) +
cowplot::draw_plot_label( c("A","B","C", "D"), x=c(0, 0.5, 0, 0.5), y=c(1,1,0.5, 0.5), size=15, vjust=1)
if (annot) {
p.final <- cowplot::add_sub(p.final, "A) Goodness-of-fit of the model assessed with a chi-square test based on residual deviance and degrees of freedom.
\nB) Akaike Information Criterion per gene: Model with the lowest AIC. Model with the lowest AIC and passed goodness-of-fit statistic test.
\nC) Observed versus predicted dropouts per model and gene plotted without outliers.
\nD) Model Assessment based on LRT for nested models and Vuong test for nonnested models.",
size = 8)
}
cowplot::ggdraw(p.final)
}
|
3f27654a1a251bc733ad047ca9cec423786f0c73 | 70ae6baa8ba5ec4af1cb683e81a7f7dfb27ac77b | /man/layer_point.Rd | 2f8737d82c4cb9d5e25eaa48f564798fb971b6d8 | [] | no_license | nteetor/chartisan | e89e86ba683e07748aa8f00c1c8081e2909dfa09 | d2280928a4dd6a2337a76cfad2c638238eb38e17 | refs/heads/master | 2021-09-04T06:53:09.061171 | 2018-01-16T22:07:16 | 2018-01-16T22:07:16 | 108,591,025 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 282 | rd | layer_point.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer-point.R
\name{layer_point}
\alias{layer_point}
\title{A point chart}
\usage{
layer_point(chart)
}
\arguments{
\item{chart}{A \code{chartist} object.}
}
\description{
Add a point layer to a chart.
}
|
a43b111868f0cc039ab3dd1cdc08ea03d11dc813 | 995fa583e9473eee01e003d3bebc8b284ddae31b | /tests/testthat/tests_with_reference_data.R | 99369f0464870e001e15c45e235d59a11211a05b | [
"MIT"
] | permissive | nrosed/ssPATHS | 0892f1a56f61dfb6f6cc0c0109fcf7e09744e379 | d4c0e364591ce9d588d79d40471b31c7b81db3cc | refs/heads/master | 2020-06-24T14:57:53.966786 | 2020-04-14T17:02:31 | 2020-04-14T17:02:31 | 198,993,426 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,209 | r | tests_with_reference_data.R | test_that("Gene weight on reference dataset works", {
data(tcga_expr_df)
# transform from data.frame to SummarizedExperiment
tcga_se <- SummarizedExperiment(t(tcga_expr_df[ , -(1:4)]),
colData=tcga_expr_df[ , 2:4])
colnames(tcga_se) <- tcga_expr_df$tcga_id
colData(tcga_se)$sample_id <- tcga_expr_df$tcga_id
hypoxia_gene_ids <- get_hypoxia_genes()
hypoxia_gene_ids <- intersect(hypoxia_gene_ids, rownames(tcga_se))
hypoxia_se <- tcga_se[hypoxia_gene_ids,]
colData(hypoxia_se)$Y <- ifelse(colData(hypoxia_se)$is_normal, 0, 1)
# now we can get the gene weightings
res <- get_gene_weights(hypoxia_se)
gene_weights_test <- res[[1]]
sample_scores <- res[[2]]
data("gene_weights_reference")
expect_equal(gene_weights_test[,1], gene_weights_reference[,1])
expect_equal(gene_weights_test[,2], gene_weights_reference[,2])
expect_equal(row.names(gene_weights_test), row.names(gene_weights_reference))
})
test_that("test classification on reference dataset works", {
data(tcga_expr_df)
# transform from data.frame to SummarizedExperiment
tcga_se <- SummarizedExperiment(t(tcga_expr_df[ , -(1:4)]),
colData=tcga_expr_df[ , 2:4])
colnames(tcga_se) <- tcga_expr_df$tcga_id
colData(tcga_se)$sample_id <- tcga_expr_df$tcga_id
hypoxia_gene_ids <- get_hypoxia_genes()
hypoxia_gene_ids <- intersect(hypoxia_gene_ids, rownames(tcga_se))
hypoxia_se <- tcga_se[hypoxia_gene_ids,]
colData(hypoxia_se)$Y <- ifelse(colData(hypoxia_se)$is_normal, 0, 1)
# now we can get the gene weightings
res <- get_gene_weights(hypoxia_se)
sample_scores <- res[[2]]
training_res <- get_classification_accuracy(sample_scores, positive_val=1)
print(training_res[[2]]-0.91112)
expect_equal(round(training_res[[2]], 5), 0.91112)
})
test_that("test classification on new dataset works", {
data(tcga_expr_df)
# transform from data.frame to SummarizedExperiment
tcga_se <- SummarizedExperiment(t(tcga_expr_df[ , -(1:4)]),
colData=tcga_expr_df[ , 2:4])
colnames(tcga_se) <- tcga_expr_df$tcga_id
colData(tcga_se)$sample_id <- tcga_expr_df$tcga_id
hypoxia_gene_ids <- get_hypoxia_genes()
hypoxia_gene_ids <- intersect(hypoxia_gene_ids, rownames(tcga_se))
hypoxia_se <- tcga_se[hypoxia_gene_ids,]
colData(hypoxia_se)$Y <- ifelse(colData(hypoxia_se)$is_normal, 0, 1)
# now we can get the gene weightings
res <- get_gene_weights(hypoxia_se)
gene_weights <- res[[1]]
sample_scores <- res[[2]]
data(new_samp_df)
new_samp_se <- SummarizedExperiment(t(new_samp_df[ , -(1)]),
colData=new_samp_df[, 1, drop=FALSE])
colnames(colData(new_samp_se)) <- "sample_id"
new_score_df_calculated <- get_new_samp_score(gene_weights, new_samp_se)
data(expected_score_output)
row.names(expected_score_output) <- NULL
new_score_df_calculated <- as.data.frame(new_score_df_calculated)
row.names(new_score_df_calculated) <- NULL
expect_equal(new_score_df_calculated, expected_score_output)
})
|
d207912a1d2096683fd79f0818e8209b20bd1fa2 | b6b61aac8bb54d857433728fe6fa33c20eb32fd3 | /pipeline/multilocus_genotyper.R | 7dba03e62299e692a82ace7efb9febcca864dfd8 | [] | no_license | asulovar/tandemrepeats | a5e2c6276ad5e070290a015c4c7caed8f431be6e | 618055ca0564edfe651db83369c7f0b5e8de0695 | refs/heads/master | 2021-04-17T00:17:27.182363 | 2018-04-09T22:40:36 | 2018-04-09T22:40:36 | 126,521,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 342 | r | multilocus_genotyper.R | source("strvntr_genotyping_functions.R")
args <- commandArgs(TRUE)
input <- toString(args[1])
output <- toString(args[2])
tab_ds <- read.delim(toString(input),header=T)
tab_ds <- PureRepeatCounter(my_dataset=tab_ds,output=tab_ds,start=1,end=nrow(tab_ds),multicounts=T)
write.table(tab_ds,toString(output),sep="\t",quote=F,row.names=F)
|
7de3b31aaef8753176054a395eacd42021e26be4 | 13af613761463c204117fed1faa169b474f49d28 | /man/make_figure_title.Rd | 17ec85bdefbfc23e136274b1e457aba4e6c750b2 | [
"MIT"
] | permissive | taiawu/dsfworld_package | 22908b87af3add64b45f7ddde772d4e6a83cc582 | e564d25cc5e8a60a87774e04ed23147c1033e964 | refs/heads/master | 2023-08-24T20:58:26.263206 | 2021-11-07T23:04:18 | 2021-11-07T23:04:18 | 342,920,918 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,733 | rd | make_figure_title.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_dye_screen_figs.R
\name{make_figure_title}
\alias{make_figure_title}
\title{Automatically generate acceptable figure and plot titles from data}
\usage{
make_figure_title(
df,
.exp_num_col = "exp_num",
.prot_name_col = "identity",
.override_names = FALSE,
.manual_exp_num = Sys.Date(),
.manual_prot_name = "screened protein",
.manual_buffer_name = "screening buffer",
.type_col = "type",
...
)
}
\arguments{
\item{df}{a df, e.g. one created by the \code{tidy_dye_screen()} function.}
\item{.exp_num_col}{a string, giving the name of the column whose value will appear first in the titles/names. Defaults to "exp_num", which, if the dataframe comes from the \code{tidy_dye_screen()} function, contains the experiment number, e.g. "Exp1100". Ideally, this column would contain only one unique value. If more than one value is found, the first is used.}
\item{.prot_name_col}{a string, giving the name of the column whose value will appear second in the titles/names. Defaults to "identity", which, if the dataframe comes from the \code{tidy_dye_screen()} function., contains the specific names of the protein and buffer corresponding to the experiment. This argument takes a dependence on the df also having a "type" column (the name of which can be supplied via .type_col), which can be used to filter to contain only the values "protein" or "buffer", to extract the name of the protein and the name of the buffer, as single unique strings, respectively.}
\item{.override_names}{a boolean, which, if TRUE, overrides the names extracted from ..exp_num_col and .prot_name_col within the function, with the strings provided to the following arguments of this function. Defaults to FALSE.}
\item{.manual_exp_num}{a string, which, if .override_names is set to TRUE, is used as the first element of both plot title and saved name.}
\item{.manual_prot_name}{a string, which, if .override_names is set to TRUE, is used as the second element of both plot title and saved name, where the name of the protein typically appears.}
\item{.manual_buffer_name}{a string, which, if .override_names is set to TRUE, is used as the third element of both plot title and saved name, where the name of the buffer typically appears.}
\item{.type_col}{the name of the column which contains values of either "protein" or "buffer", which can be used to filter the input dataframe such that a single, unique name can be pulled from the .prot_name_col column for the protein (when filtered such that .type_col == "protein), or buffer (when filtered such that .type_col == "buffer").}
\item{...}{tolerate unmatched arguments passed via .. from upstream functions; ignore if they don't match anything in this function}
}
\value{
a names list with two named elements: "print" -- the name to be printed at the head of the plot, and "save" -- the name to be used to save the plot via ggsave. Additional elements can be appended to the save name in the saving functions, to specify which figure is being saved when more than one figure is saved for the same experiment. Specific paths to save to are also set in the save_* plot functions of this package.
}
\description{
A helper function of limited scope and flexibility. Given a dataframe, extracts values from specified columns and assembles them into titles and names. Exported only because it can be useful for general workflows as well.
Generates experiment-specific names, of the format: Experiment_number (linebreak) Protein: protein name (linebreak) Buffer: buffer name
Defaults to creating a title of the form: And a name of the form: <Experiment_number>protein_<protein_name>buffer_<buffer_name>
}
|
a709cc8db492e0b3d13eaa4c6ae8be1d92ceb699 | db5caac98462b53be43301f41ffb3becb9faea6a | /run_analysis.R | 1659a5083f60ae82fa2ef1e444cea9e416577312 | [] | no_license | arl1024/Getting_and_Cleaning_Data | 874aacb38230fc4049868b4b6c5761b125a36d2f | f84f6561a2c503bc2bece755ccdb5c819fa04021 | refs/heads/master | 2020-05-18T05:06:35.883739 | 2015-04-26T18:51:52 | 2015-04-26T18:51:52 | 34,622,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,077 | r | run_analysis.R | ####################
# run_analysis.R
####################
library("data.table")
########################
# Preparation directory
########################
#setwd("C:/CURSOS/COURSERA/Getting and Cleaning Data/EJERCICIO/")
# Read features names
nomcamp<-read.table("features.txt",header=FALSE)
# Identify dir's
dir0<-getwd()
dirTr<-paste0(dir0,"/train/")
dirTe<-paste0(dir0,"/test/")
# Files to try
setwd(dirTr)
filesTr<-list.files(pattern="\\.txt$")
setwd(dirTe)
filesTe<-list.files(pattern="\\.txt$")
#########################
# PART 1 ###############
#########################
setwd(dir0)
# Merge files
file=for (f in filesTr) { # for all train files:
a<-read.table(paste0(dirTr,"/",f),header=FALSE)
g<-gsub("train","test",f) # search equivalet test
b<-read.table(paste0(dirTe,"/",g), header=FALSE)
conjunto<-rbind(a,b) # bind all rows
nomfile<-strsplit(f, "\\.")[[1]]
write.table(conjunto, paste0(nomfile[1],"_NEW.txt"),row.names=FALSE,col.names=FALSE)
# write new merged file
}
# file _NEW.txt in conjunto
######################
# PART 2,3,4 #########
######################
# Work with labels
nomcamp<-read.table("features.txt", header=FALSE) # features names
nomcamp<-gsub("[()-]","",nomcamp$V2)
nomcamp<-gsub("mean","Mean",nomcamp)
nomcamp<-gsub("^[tf]","",nomcamp)
nomcamp<-gsub("std","Std",nomcamp)
duplica<-duplicated(nomcamp)
nomsit<-read.table("activity_labels.txt", header=FALSE) # activity names
# nomsit<-conjunto
# Naming the columns
situacion<-read.table("y_train_NEW.txt", header=FALSE,col.names="activity")
persona<-read.table("subject_train_NEW.txt", header=FALSE,col.names="subject")
tabla1<-read.table("X_train_NEW.txt", header=FALSE, col.names=nomcamp)
tabla2<-cbind(situacion,tabla1)
# Adding activity
tabla2$activity<-nomsit[tabla2$activity,"V2"]
# Choose mean std
busca<-".[Mm]ean|[Ss]td|activity"
# TABLE WITH ACTIVITY, MEAN AND STD
dat_mean_std<-tabla2[,grep(busca,names(tabla2))]
# Write dataset
# write.table(dat_mean_std,"data_set_1.txt", row.names=FALSE,col.names=TRUE,quote=FALSE)
#########################
# PART 5 ###############
#########################
nomsit<-read.table("activity_labels.txt", header=FALSE)
# head(nomsit)
# origen<-read.table("data_set_1.txt",header=TRUE)
origen<-dat_mean_std
usuari<-read.table("subject_train_NEW.txt", header=FALSE,col.names="subject")
grupos<-as.data.table(cbind(usuari,origen)) # Contiene todos los campos
situa<-nomsit$V2
person<-c(1:30)
conjunto<-data.table() # Output dataset
for (n in person) { # For each subject
grupoA<-subset(grupos,as.numeric(subject)==n)
for (m in situa){ # For each activity
grupoB<-subset(grupoA,activity==m)
dt <- as.data.table(grupoB)
setkey(dt,activity)
grupoC<-dt[, lapply(.SD,mean), by=activity] # Mean activity
conjunto<-rbind(conjunto,grupoC)
}
}
write.table(conjunto, "solution.txt",row.names=FALSE)
|
40967d183c6dc43fd774671ad6b328c9a45a4fe9 | 29585dff702209dd446c0ab52ceea046c58e384e | /stratigraph/R/writeTilia.R | 8deb0db291ab701871c5b301a02920d0a5a85a14 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 55 | r | writeTilia.R | "writeTilia" <-
function(...){
writeGPDascii(...)
}
|
d89e47abdc0475dd96f469e66184a05bf961babf | bae5c96ad27bd08805ccd949ff75514e43631fb9 | /04_Codes/06_Summary.R | 7db843c0e6885db3edcfc0a55e7e6dfbbbd5ed7b | [] | no_license | Zaphiroth/MSD_CHC_2020Q3 | 24551cf8f5d4a513bdaf91cf7390e99c41838e54 | e90c5e194bbe1ce84c8a789f765d1214e69a8a4f | refs/heads/main | 2023-01-19T20:12:33.604470 | 2020-11-27T09:32:08 | 2020-11-27T09:32:08 | 315,498,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,078 | r | 06_Summary.R | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ProjectName: MSD CHC 2020Q3
# Purpose: Summary
# programmer: Zhe Liu
# Date: 2020-11-24
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
##---- Readin data ----
## pack info
pack.ref <- fread("02_Inputs/cn_prod_ref_201912_1.txt",
stringsAsFactors = FALSE, sep = "|") %>%
distinct() %>%
mutate(Pack_Id = stri_pad_left(Pack_Id, 7, 0))
## corp info
corp.ref <- fread("02_Inputs/cn_corp_ref_201912_1.txt",
stringsAsFactors = FALSE, sep = "|") %>%
distinct()
## pack & corp
corp.pack <- pack.ref %>%
distinct(Pack_Id, Prd_desc, Pck_Desc, Corp_ID, PckSize_Desc) %>%
left_join(corp.ref, by = "Corp_ID") %>%
select(packid = Pack_Id, Pck_Desc, Corp_Desc, PckSize_Desc)
## product desc
prod.desc <- read.csv("02_Inputs/pfc与ims数据对应_20200824.csv") %>%
mutate(Pack_Id = stri_pad_left(Pack_Id, 7, 0)) %>%
select(packid = Pack_Id, Prd_desc = `商品名`)
##---- Result ----
## Servier result
servier.result <- read.xlsx("02_Inputs/08_Servier_CHC_2016Q4_2020Q3_v2.xlsx") %>%
filter(Channel == 'CHC',
MKT == 'OAD',
Date %in% c('2020Q3'),
Molecule_Desc %in% market.def$Molecule) %>%
mutate(Pack_ID = stri_pad_left(Pack_ID, 7, 0)) %>%
select(Pack_ID, Channel, Province, City, Date, ATC3, MKT, Molecule_Desc,
Prod_Desc, Pck_Desc, Corp_Desc, Sales, Units, DosageUnits)
servier.city <- unique(servier.result$City)
## city result
msd.city.result <- msd.price %>%
filter(!(city %in% servier.city)) %>%
left_join(corp.pack, by = "packid") %>%
left_join(prod.desc, by = "packid") %>%
mutate(Channel = "CHC",
dosageunits = PckSize_Desc * units) %>%
group_by(Pack_ID = packid, Channel, Province = province, City = city,
Date = quarter, ATC3 = atc3, MKT = market, Molecule_Desc = molecule,
Prod_Desc = Prd_desc, Pck_Desc, Corp_Desc) %>%
summarise(Sales = sum(sales, na.rm = TRUE),
Units = sum(units, na.rm = TRUE),
DosageUnits = sum(dosageunits, na.rm = TRUE)) %>%
ungroup() %>%
bind_rows(servier.result) %>%
mutate(Corp_Desc = if_else(Corp_Desc == "LVYE GROUP", "LUYE GROUP", Corp_Desc),
Units = if_else(City == '上海' & Pack_ID == '4268604',
Sales / 103,
Units),
DosageUnits = if_else(City == '上海' & Pack_ID == '4268604',
Units * 14,
DosageUnits),
Units = if_else(City == '上海' & Pack_ID == '4268602',
Sales / 52,
Units),
DosageUnits = if_else(City == '上海' & Pack_ID == '4268602',
Units * 7,
DosageUnits)) %>%
group_by(Pack_ID, Channel, Province, City, Date, ATC3, MKT, Molecule_Desc,
Prod_Desc, Pck_Desc, Corp_Desc) %>%
summarise(Sales = sum(Sales, na.rm = TRUE),
Units = sum(Units, na.rm = TRUE),
DosageUnits = sum(DosageUnits, na.rm = TRUE)) %>%
ungroup() %>%
filter(Sales > 0, Units > 0, DosageUnits > 0) %>%
arrange(Date, Province, City, Pack_ID)
## nation result
msd.nation.result <- msd.city.result %>%
group_by(Pack_ID, Channel, Province = "National", City = "National",
Date, ATC3, MKT, Molecule_Desc, Prod_Desc, Pck_Desc, Corp_Desc) %>%
summarise(Sales = sum(Sales, na.rm = TRUE),
Units = sum(Units, na.rm = TRUE),
DosageUnits = sum(DosageUnits, na.rm = TRUE)) %>%
ungroup()
# adj.factor <- read.xlsx("02_Inputs/Adjust_Factor.xlsx") %>%
# mutate(Pack_ID = stri_pad_left(Pack_ID, 7, 0)) %>%
# setDT() %>%
# melt(id.vars = c("Prod_Desc", "Pack_ID"),
# variable.name = "City",
# value.name = "factor",
# variable.factor = FALSE)
## final result
msd.result <- msd.city.result %>%
filter(City %in% kTargetCity) %>%
bind_rows(msd.nation.result) %>%
group_by(Pack_ID, Channel, Province, City, Date, ATC3, MKT, Molecule_Desc,
Prod_Desc, Pck_Desc, Corp_Desc) %>%
summarise(Sales = sum(Sales, na.rm = TRUE),
Units = sum(Units, na.rm = TRUE),
DosageUnits = sum(DosageUnits, na.rm = TRUE)) %>%
ungroup() %>%
mutate(Sales = case_when(
Pack_ID == '4268604' & City == '福州' ~ Sales * 1.8,
Pack_ID == '4268604' & City == '广州' ~ Sales * 0.7,
Pack_ID == '4268602' & City == '广州' ~ Sales * 4,
Pack_ID == '4268604' & City == '上海' ~ Sales * 1700 * 0.8,
Pack_ID == '4268602' & City == '上海' ~ Sales * 4100 * 0.8,
Pack_ID == '4268602' & City == '苏州' ~ Sales * 1.2,
Pack_ID == '4268604' & City == 'National' ~ Sales * 1.08,
Pack_ID == '4268602' & City == 'National' ~ Sales * 1.6 * 1.08,
Prod_Desc == 'JANUVIA' & City == '南京' ~ Sales * 0.8,
Prod_Desc == 'JANUMET' & City == 'National' ~ Sales * 3,
Prod_Desc == 'DIAMICRON' & City %in% c('杭州', '南京', '上海', '苏州') ~ Sales * 2,
Prod_Desc == 'DIAMICRON' & City == 'National' ~ Sales * 1.1,
Prod_Desc == 'VICTOZA' & City == 'National' ~ Sales * 2.5,
TRUE ~ Sales
),
Units = case_when(
Pack_ID == '4268604' & City == '福州' ~ Units * 1.8,
Pack_ID == '4268604' & City == '广州' ~ Units * 0.7,
Pack_ID == '4268602' & City == '广州' ~ Units * 4,
Pack_ID == '4268604' & City == '上海' ~ Units * 1700 * 0.8,
Pack_ID == '4268602' & City == '上海' ~ Units * 4100 * 0.8,
Pack_ID == '4268602' & City == '苏州' ~ Units * 1.2,
Pack_ID == '4268604' & City == 'National' ~ Units * 1.08,
Pack_ID == '4268602' & City == 'National' ~ Units * 1.6 * 1.08,
Prod_Desc == 'JANUVIA' & City == '南京' ~ Units * 0.8,
Prod_Desc == 'JANUMET' & City == 'National' ~ Units * 3,
Prod_Desc == 'DIAMICRON' & City %in% c('杭州', '南京', '上海', '苏州') ~ Units * 2,
Prod_Desc == 'DIAMICRON' & City == 'National' ~ Units * 1.1,
Prod_Desc == 'VICTOZA' & City == 'National' ~ Units * 2.5,
TRUE ~ Units
),
DosageUnits = case_when(
Pack_ID == '4268604' & City == '福州' ~ DosageUnits * 1.8,
Pack_ID == '4268604' & City == '广州' ~ DosageUnits * 0.7,
Pack_ID == '4268602' & City == '广州' ~ DosageUnits * 4,
Pack_ID == '4268604' & City == '上海' ~ DosageUnits * 1700 * 0.8,
Pack_ID == '4268602' & City == '上海' ~ DosageUnits * 4100 * 0.8,
Pack_ID == '4268602' & City == '苏州' ~ DosageUnits * 1.2,
Pack_ID == '4268604' & City == 'National' ~ DosageUnits * 1.08,
Pack_ID == '4268602' & City == 'National' ~ DosageUnits * 1.6 * 1.08,
Prod_Desc == 'JANUVIA' & City == '南京' ~ DosageUnits * 0.8,
Prod_Desc == 'JANUMET' & City == 'National' ~ DosageUnits * 3,
Prod_Desc == 'DIAMICRON' & City %in% c('杭州', '南京', '上海', '苏州') ~ DosageUnits * 2,
Prod_Desc == 'DIAMICRON' & City == 'National' ~ DosageUnits * 1.1,
Prod_Desc == 'VICTOZA' & City == 'National' ~ DosageUnits * 2.5,
TRUE ~ DosageUnits
)) %>%
# left_join(adj.factor, by = c("Prod_Desc", "Pack_ID", "City")) %>%
# mutate(factor = if_else(is.na(factor), 1, factor),
# Sales = round(Sales * factor, 2),
# Units = round(Units * factor),
# DosageUnits = round(DosageUnits * factor)) %>%
# select(-factor) %>%
arrange(Date, Province, City, Pack_ID)
write.xlsx(msd.result, "03_Outputs/06_MSD_CHC_OAD_2020Q3.xlsx")
write.xlsx(msd.city.result, '03_Outputs/06_MSD_CHC_OAD_2020Q3_city.xlsx')
##---- Dashboard ----
## MSD history
msd.history <- read.xlsx('02_Inputs/MSD_CHC_OAD_Dashboard_2020Q2_v8.xlsx',
sheet = 2)
## DPP4
# add.dpp4 <- msd.history %>%
# filter(Prod_Desc %in% c('JANUVIA', 'ONGLYZA', 'FORXIGA', 'GALVUS', 'TRAJENTA',
# 'VICTOZA', 'NESINA', 'JANUMET', 'EUCREAS'),
# Date == '2020Q1') %>%
# mutate(Date = '2020Q2',
# Value = case_when(
# City %in% c('Beijing', 'Shanghai') & Prod_Desc == 'TRAJENTA' ~ Value * 1.185,
# City %in% c('Beijing', 'Hangzhou', 'Nanjing', 'Shanghai') & Prod_Desc == 'VICTOZA' ~ Value * 1.247,
# City == 'Fuzhou' & Prod_Desc == 'NESINA' ~ Value * 1.089,
# City %in% c('Hangzhou', 'Shanghai') & Prod_Desc == 'ONGLYZA' ~ Value * 1.045,
# City == 'Shanghai' & Prod_Desc == 'JANUMET' ~ Value * 3,
# City == 'Shanghai' & Prod_Desc == 'FORXIGA' ~ Value * 1.924,
# TRUE ~ NaN
# )) %>%
# filter(!is.na(Value))
## dashoboard info
dly.dosage <- read.xlsx("02_Inputs/OAD_PDot转换关系.xlsx", startRow = 4)
dly.dosage.sup <- read.xlsx("02_Inputs/Daily_Dosage_Supplement_20201126.xlsx")
msd.category <- read.xlsx("02_Inputs/MSD交付匹配.xlsx", cols = 1:2)
city.en <- read.xlsx("02_Inputs/MSD交付匹配.xlsx", cols = 4:7)
dly.dosage0 <- dly.dosage %>%
mutate(PROD_NAME = gsub(" \\S*$", "", PROD_NAME)) %>%
distinct()
dly.dosage.pack <- msd.result %>%
mutate(PROD_NAME = paste0(Prod_Desc, " ", Pck_Desc),
PROD_NAME = gsub("\\s+", " ", str_trim(PROD_NAME))) %>%
left_join(dly.dosage0, by = "PROD_NAME") %>%
bind_rows(dly.dosage.sup) %>%
filter(!is.na(DLY_DOSAGE)) %>%
distinct(Pack_ID, DLY_DOSAGE)
## MSD dashboard
msd.dashboard <- msd.result %>%
mutate(PROD_NAME = paste0(Prod_Desc, " ", Pck_Desc),
PROD_NAME = gsub("\\s+", " ", str_trim(PROD_NAME))) %>%
left_join(dly.dosage.pack, by = "Pack_ID") %>%
left_join(msd.category, by = "ATC3") %>%
left_join(city.en, by = c("Province", "City")) %>%
mutate(Province = Province_EN,
City = City_EN,
PDot = DosageUnits / DLY_DOSAGE) %>%
setDT() %>%
melt(id.vars = c("Channel", "MKT", "Date", "Province", "City", "ATC3",
"Category", "Molecule_Desc", "Prod_Desc", "Pck_Desc",
"Pack_ID", "Corp_Desc"),
measure.vars = c("Sales", "Units", "DosageUnits", "PDot"),
variable.name = "Measurement",
value.name = "Value",
variable.factor = FALSE) %>%
bind_rows(msd.history) %>%
mutate(Value = case_when(Date == '2020Q1' & City == 'Beijing' &
Pack_ID == '1861202' & Measurement == 'PDot' ~
181 / 1.91,
Date == '2020Q1' & City == 'Beijing' &
Pack_ID == '3166502' & Measurement == 'PDot' ~
368 / 1.19,
TRUE ~ Value),
Value = round(Value)) %>%
arrange(Channel, Date, Province, City, MKT, Pack_ID)
write.xlsx(msd.dashboard, '03_Outputs/06_MSD_CHC_OAD_Dashboard_2020Q3.xlsx')
|
52600bf7773b5371f5f6736bd2aed37288eaa959 | 34770834499440fc30c10961a3665a37ec5697d9 | /00_meta_roxy.R | d47fb4331eda611c47b947b20b8a1863ccc74f25 | [] | no_license | statsccpr/MultCal2Sim | e4170b2640f9e9f676b9295b196e1741dd59933b | 86e131e90d5d081d2ff84cf6d85cb753ab38c338 | refs/heads/master | 2022-09-19T06:16:47.156157 | 2022-09-01T20:16:22 | 2022-09-01T20:16:22 | 193,562,003 | 0 | 2 | null | 2019-10-08T22:47:45 | 2019-06-24T18:55:10 | R | UTF-8 | R | false | false | 148 | r | 00_meta_roxy.R |
# datzen::rmabd()
setwd("~/projects/MultCal2Sim/")
devtools::load_all()
devtools::document()
roxygen2::roxygenise()
devtools::use_readme_rmd()
|
f6f3b8b52eca473d784bdec5ee4f2b653389bc40 | 3176b20e5bfc4a0dfcf1723ec12932355d418804 | /man/analyzeBoundNBF.Rd | 31ffadfb2a6ab45bb9abc399f1cae1c3e06c0bfc | [] | no_license | cran/binseqtest | abc6be566b0fdd16df5bf70c2dc73406301cc1eb | b3b9bf79354d7a7777aa1251b16db0470957904e | refs/heads/master | 2023-09-01T13:19:15.020724 | 2023-08-24T15:30:02 | 2023-08-24T16:35:12 | 17,694,776 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,788 | rd | analyzeBoundNBF.Rd | \name{analyze}
\alias{analyzeBoundNBF}
\alias{analyzeBound}
\alias{analyze}
\title{
Methods for calculating estimates, confidence intervals, and p-values from binary sequential boundaries
}
\description{
For routine use, these functions will not need to be called directly but are called from within the design functions (see \code{\link{designOBF}}). If needed, use \code{analyze} for any class representing a binary sequential boundary (see \code{\link{bound-class}}),
and the appropriate function is called.
}
\usage{
analyzeBound(object, theta0 = 0.5, stats = "all",
alternative = "two.sided", conf.level = 0.95,
tsalpha = NULL, ...)
analyzeBoundNBF(object, theta0 = 0.5, stats = "all",
alternative = "two.sided", conf.level = 0.95,
tsalpha = NULL, cipMatch = TRUE, ...)
}
\arguments{
\item{object}{a binary sequential boundary (for classes see \code{\link{bound-class}})}
\item{theta0}{probability of success under the null}
\item{stats}{character, either 'all' or 'pval'}
\item{tsalpha}{vector of length 2, error on either side, if present overrides alternative and conf.level (see details)}
\item{alternative}{character, either 'two.sided', 'less', or 'greater'}
\item{conf.level}{confidence level}
\item{cipMatch}{logical, for non-binding futility boundaries, should CI match the p-values on the binding boundary}
\item{\dots}{further arguments to be passed}
}
%\details{}
\value{if stats='all' returns an object of class 'boundEst', otherwise returns a numeric vector of p-values}
%\references{}
%\author{%% ~~who you are~~}
%\note{%% ~~further notes~~}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See \code{\link{analyze-methods}}
}
%\examples{ }
%\keyword{ ~kwd1 } |
c05bd24b735a86f60a489cd5f99fff5c2c8172e9 | 4848ca8518dc0d2b62c27abf5635952e6c7d7d67 | /R/W_O_ped.R | 507a19597627602ab7bcd9bc6bfcbc2e48fc3a2f | [] | no_license | regenesis90/KHCMinR | ede72486081c87f5e18f5038e6126cb033f9bf67 | 895ca40e4f9953e4fb69407461c9758dc6c02cb4 | refs/heads/master | 2023-06-28T00:29:04.365990 | 2021-07-22T04:44:03 | 2021-07-22T04:44:03 | 369,752,159 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,587 | r | W_O_ped.R | #' The width of the sidewalk obstructed by the facility
#'
#' The width of the sidewalk obstructed by the facility on a pedestrian road.
#' It is an obstruction width caused by factors that impede walking.
#' It follows <Table 14-7> in KHCM(2013), p.621.
#' @param lamppost Number of lamppost
#' @param signal_controller Number of signal controllar
#' @param fireplug Number of fireplug
#' @param sign Number of road sign
#' @param mailbox Number of mailbox
#' @param p_booth Number of phone booth
#' @param trash_can Number of trash can
#' @param curb Number of curb
#' @param subway_stair Number of subway stairs
#' @param tree Number of tree
#' @param tree_guard Number of tree guard
#' @param pillar Number of pillar
#' @param main_door Number of main door
#' @param revolving_door Number of revolving door
#' @param pipe Number of pipe connection
#' @param awn_pillar Number of awning pillar
#' @param valtype Choose one from : \code{'max'}, \code{'min'}, \code{'mean'}
#' @export W_O_ped(m)
#' @seealso
#' @examples
#' W_O_ped(lamppost = 3, signal_controller = 1, fireplug = 1, trash_can = 3, tree = 2, valtype = 'max')
W_O_ped <- function(lamppost = 0, signal_controller = 0, fireplug = 0,
sign = 0, mailbox = 0, p_booth = 0, trash_can = 0,
curb = 0, subway_stair = 0, tree = 0, tree_guard = 0,
pillar = 0, entrance_stair = 0, revolving_door = 0, pipe = 0, awn_pillar = 0,
valtype = 0){
if (valtype == 'max'){
w <- lamppost * 1.1 + signal_controller * 1.2 + fireplug * 0.9 +
sign * 0.6 + mailbox * 1.1 + p_booth * 1.2 + trash_can * 0.9 +
curb * 0.5 + subway_stair * 2.1 + tree * 1.2 + tree_guard * 1.5 +
pillar * 0.9 + entrance_stair * 1.8 + revolving_door * 2.1 + pipe * 0.3 + awn_pillar * 0.8
}
else if (valtype == 'min'){
w <- lamppost * 0.8 + signal_controller * 0.9 + fireplug * 0.8 +
sign * 0.6 + mailbox * 1.0 + p_booth * 1.2 + trash_can * 0.9 +
curb * 0.5 + subway_stair * 1.7 + tree * 0.6 + tree_guard * 1.5 +
pillar * 0.8 + entrance_stair * 0.6 + revolving_door * 1.5 + pipe * 0.3 + awn_pillar * 0.8
}
else if (valtype == 'mean'){
w <- lamppost *0.95 + signal_controller * 1.05 + fireplug * 0.85 +
sign * 0.6 + mailbox * 1.05 + p_booth * 1.2 + trash_can * 0.9 +
curb * 0.5 + subway_stair * 1.9 + tree * 0.9 + tree_guard * 1.5 +
pillar * 0.85 + entrance_stair * 1.2 + revolving_door * 1.8 + pipe * 0.3 + awn_pillar * 0.8
}
else {w <- 'Error : [valtype] must be one of [max], [min], [mean]. Please check that.'}
w
}
|
b869f47f74678110e21aa5173cb38168391b9cae | 1837634a27be991e0ada423c43e6181a903e87b6 | /interactive-script-bezier.R | d590e35c94c765364cfd9ba0054f308bc2e3d6ab | [] | no_license | ddediu/bezier-hard-palate | b5b8b0de6f9e8dd5647f03054b52ecc39918e123 | 46670b11b66b4e2444c81910eac8d47d5e71d77a | refs/heads/master | 2021-05-12T06:11:52.106898 | 2018-02-15T19:09:58 | 2018-02-15T19:09:58 | 117,211,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,226 | r | interactive-script-bezier.R | ################################################################################################################################
#
# R script (for RStudio) for interactively visualize the Bezier curves corresponding to various discretized parameter values.
#
# Copyright (C) 2015 Dan Dediu
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
################################################################################################################################
################################################################################################################################
#
# This script *must* be run within RStudio (https://www.rstudio.com/)
#
################################################################################################################################
# Loading the 51-steps discretized parameter space of Bezier curves
# This can be *very* slow (about 20 mins on a Core i7 2630QM @2.0GHz 8Go RAM Ubuntu 14.04 64 bits with R 3.2.2 machine) and requires quite a bit of RAM (about 6Go on the same machine)!:
bezier.file <- xzfile("./generated-bezier.tsv.xz");
bezier <- read.table(bezier.file, header=TRUE, sep="\t", quote="");
# Extract the x-coordinates from the column names:
xs <- as.numeric(vapply(names(bezier)[6:ncol(bezier)], function(s){ substr(s,2,nchar(s)) }, character(1)));
# Optimization: cache the unique parameter values:
angle.values <- unique(bezier$angle); conc.values <- unique(bezier$conc); fronting.values <- unique(bezier$fronting); weigth.values <- unique(bezier$weigth);
# Plot a given Bezier curve given either as a row in the grid.params data.frame or as the values of the four parameters:
.plot.Bezier.curve <- function( row=NA, # can be gives ad the row in the grid.params data.frame, or
angle=NA, conc=NA, fronting=NA, weigth=NA, # as the actual parameter values which uniquely select a row in the grid.params data.frame
tol=1e-3, # in which case we need a small tolerance when comparing floating point numbers for equality
rotate=NA, xy.ratio=NA # generic rotation and xy scaling for plotting
)
{
if( is.na(row) && is.na(angle) && is.na(conc) && is.na(fronting) && is.na(weigth) ) return (FALSE);
if( is.na(row) && (!is.na(angle) && !is.na(conc) && !is.na(fronting) && !is.na(weigth)) )
{
# Select the case using the closes actual parameter values to the ones given:
angle.val <- angle.values[ abs(angle.values - angle) <= tol ];
conc.val <- conc.values[ abs(conc.values - conc) <= tol ];
fronting.val <- fronting.values[ abs(fronting.values - fronting) <= tol ];
weigth.val <- weigth.values[ abs(weigth.values - weigth) <= tol ];
row <- which((bezier$angle == angle.val) & (bezier$conc == conc.val) & (bezier$fronting == fronting.val) & (bezier$weigth == weigth.val));
if( length(row) != 1 )
{
# Nothing really to plot:
plot( c(0,1), c(0,1), type="n", xlab="", ylab="", axes=FALSE,
main=paste0("a=",sprintf("%.2f",angle)," c=",sprintf("%.2f",conc)," f=",sprintf("%.2f",fronting)," w=",sprintf("%.2f",weigth)));
abline(v=seq(from=0, to=1, length.out=5), col=gray(0.8), lty="dashed");
abline(h=seq(from=0, to=1, length.out=5), col=gray(0.8), lty="dashed");
points( c(0,1), c(0,1), type="l", col="red"); points( c(0,1), c(1,0), type="l", col="red");
mtext(c("back","front"), side=1, line=0, at=c(0,1), cex=0.75, col=gray(0.5));
mtext(c("bottom","top"), side=2, line=0, at=c(0,1), cex=0.75, col=gray(0.5));
return (FALSE);
}
}
# The rotation and scaling:
if( is.na(rotate) ) rotate <- -0.3217506; # fixed rotation
if( is.na(xy.ratio) ) xy.ratio <- 1/bezier$ratio[row];
# The y coordinates:
ys = as.numeric(bezier[row,-(1:5)]);
# Rescale and rotate the plot:
cos.rotate = cos(rotate); sin.rotate = sin(rotate); # cache for speedup
M = matrix(c( cos.rotate, -sin.rotate,
sin.rotate, cos.rotate), byrow=TRUE, nrow=2);
coords = matrix(c(xs-0.5, ys-0.5), byrow=TRUE, nrow=2);
coords[2,] = coords[2,] * xy.ratio;
coords2 = (M %*% coords);
range.xs = range(coords2[1,]); range.ys = range(coords2[2,]); # speedups
plot( coords2[1,], coords2[2,], type="n", xlab="", ylab="", axes=FALSE,
main=paste0("a=",sprintf("%.2f",bezier$angle[row])," c=",sprintf("%.2f",bezier$conc[row])," f=",sprintf("%.2f",bezier$fronting[row])," w=",sprintf("%.2f",bezier$weigth[row])));
abline(v=seq(from=range.xs[1], to=range.xs[2], length.out=5), col=gray(0.8), lty="dashed");
abline(h=seq(from=range.ys[1], to=range.ys[2], length.out=5), col=gray(0.8), lty="dashed");
points( coords2[1,], coords2[2,], type="l", col="blue");
mtext(c("back","front"), side=1, line=0, at=range.xs, cex=0.75, col=gray(0.5));
mtext(c("bottom","top"), side=2, line=0, at=range.ys, cex=0.75, col=gray(0.5));
return (TRUE);
}
# TEST: .plot.Bezier.curve( row=123 )
# TEST: .plot.Bezier.curve( angle=0, conc=0, fronting=0.02, weigth=0.38 )
# Interactive exploration of the parameters:
library(manipulate);
manipulate( invisible(.plot.Bezier.curve(angle=angle, conc=conc, fronting=fronting, weigth=weigth)),
angle=slider(0,1,initial=0.5,label="angle",step=0.02),
conc=slider(0,1,initial=0.5,label="conc",step=0.02),
fronting=slider(0,1,initial=0.5,label="fronting",step=0.02),
weigth=slider(0,1,initial=0.5,label="weigth",step=0.02));
|
32d0a82515dd767a0588a4830232322d32d46a3b | 3c8c09da0c1313f4ad867d74e71d9c3290104ea2 | /generate-files.R | 82b92d006f8820df289c8e756aa4cbac2423b34f | [] | no_license | pkeeffe-zz/data-science-capstone | 066f431a62be1908fc5e35fd2963968b42fa6df9 | cccf2c7d98eb8b01586baeb907395943334beae0 | refs/heads/master | 2022-02-22T19:45:46.483227 | 2018-11-26T21:48:37 | 2018-11-26T21:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,465 | r | generate-files.R | options(java.parameters = "-Xmx32000m")
library(tm)
library(NLP)
library(RWeka)
library(stringr)
dtm_2gTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm_3gTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
dtm_4gTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
dtm_5gTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 5, max = 5))
dtm_6gTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 6, max = 6))
setwd('~/pat/data-science-capstone')
file_src <- DirSource (directory = '~/pat/data-science-capstone/data/en_US/')
corp <- VCorpus(file_src,readerControl = list(
reader = readPlain,
encoding = "UTF-8",
language = 'en_US',
load = 'false',
dbcontrol = list (
useDb = TRUE,
dbName = 'texts.db',
dbType = 'DB1'
)
))
# Clean the corpus of punctuation, white space and numbers (not used for text prediction)
corp <- tm_map(corp,removePunctuation)
corp <- tm_map(corp,stripWhitespace)
corp <- tm_map(corp,removeNumbers)
dtm_2g <- DocumentTermMatrix(corp, control =list(
tokenize = dtm_2gTokenizer,
tolower = TRUE,
weighting = function(x) weightTfIdf(x, normalize = TRUE)
))
saveRDS(dtm_2g, file = 'data/dtm_2g_large.data')
dtm_2g <- removeSparseTerms(dtm_2g,0.95)
saveRDS(dtm_2g, file = 'data/dtm_2g_sparse.data')
dtm_3g <- DocumentTermMatrix(corp, control =list(
tokenize = dtm_3gTokenizer,
tolower = TRUE,
weighting = function(x) weightTf(x)
))
saveRDS(dtm_3g, file = 'data/dtm_3g_large.data')
dtm_3g <- removeSparseTerms(dtm_3g,0.2)
saveRDS(dtm_3g, file = 'data/dtm_3g_sparse.data')
dtm_4g <- DocumentTermMatrix(corp, control =list(
tokenize = dtm_4gTokenizer,
tolower = TRUE,
weighting = function(x) weightTf(x)
))
saveRDS(dtm_4g, file = 'data/dtm_4g_large.data')
dtm_4g <- removeSparseTerms(dtm_4g,0.2)
saveRDS(dtm_4g, file = 'data/dtm_4g_sparse.data')
dtm_5g <- DocumentTermMatrix(corp, control =list(
tokenize = dtm_5gTokenizer,
tolower = TRUE,
weighting = function(x) weightTf(x)
))
saveRDS(dtm_5g, file = 'data/dtm_5g_large.data')
dtm_5g <- removeSparseTerms(dtm_5g,0.2)
saveRDS(dtm_5g, file = 'data/dtm_5g_sparse.data')
dtm_6g <- DocumentTermMatrix(corp, control =list(
tokenize = dtm_6gTokenizer,
tolower = TRUE,
weighting = function(x) weightTf(x)
))
saveRDS(dtm_6g, file = 'data/dtm_6g_large.data')
dtm_6g <- removeSparseTerms(dtm_6g,0.2)
saveRDS(dtm_6g, file = 'data/dtm_6g_sparse.data')
|
42295babbc9f8736ff47ece0f3d9e44c59bd00bf | e6c4a1ea6b4f5aebda43165c5c08bc9099165f01 | /week2/my_na_rm_test.R | 1c500af1ec195eb8aeda59bc8d90b2739c1e0519 | [] | no_license | d1sant/data-analysis-in-r | 1a211381f40b99c598e6d592fc34613986e80579 | 0bf97919d337b5982558d51bb138a9bdb596185c | refs/heads/master | 2021-05-31T16:11:28.096729 | 2015-07-14T08:07:28 | 2015-07-14T08:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 142 | r | my_na_rm_test.R | d1 <- rnorm(2000)
hist(d1)
d2 <- runif(2000)
hist(d2)
d1[1:10] <- NA
d2[1:10] <- NA
source("my_na_rm.R")
d1 <- myNaRm(d1)
d2 <- myNaRm(d2) |
314798211f345ea9204970c24886d189c259466f | cfa320693a39096c88adf717e81ad5ceaf826550 | /plot2.R | ad01e3ea891e5e25f1b9992a3058fa34abb74676 | [] | no_license | xsunsmile/ExData_Plotting1 | 8fa189ae1d47a9b7e693e9393e7e964e42bbf8bf | 6685f74c10143e3291ab10a67c22d5a6e001a390 | refs/heads/master | 2021-01-13T16:18:00.153292 | 2014-12-03T17:50:36 | 2014-12-03T17:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 547 | r | plot2.R | if(!require("sqldf")) {
install.packages("sqldf")
}
library(sqldf)
png("plot2.png", width = 480, height = 480)
par(mar = c(5,5,4,4))
filePath <- path.expand("~/Documents/DataMining/LearnR/household_power_consumption.txt")
target <- read.csv2.sql(filePath, "select * from file where Date in ('1/2/2007','2/2/2007')")
target$DateTime <- strptime(paste(target$Date, target$Time, sep = ' '), format = "%d/%m/%Y %H:%M:%S")
plot(target$DateTime, target$Global_active_power, type = 'l', ylab = 'Global Active Power (kilowatts)', xlab = '')
dev.off() |
22504c4c82ae4eeba2d77ef9b21147fa680bbea1 | f13f280b39b39c1f673b5e53acdeb4ceba755335 | /ImportWood.R | f4b64e691e3f1e8105b12ee9fb5bc0c7ecd2f880 | [] | no_license | bquast/ReplicationWood | 7addbab1052a3634e46fee9912602f9fa996ea96 | f59da89e5f756c961a464c3739c0de7e5401b301 | refs/heads/master | 2021-01-01T19:50:51.174216 | 2014-05-28T09:05:44 | 2014-05-28T09:05:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 182 | r | ImportWood.R | # load package for importing STATA file
library(foreign)
# find the working directory
getwd()
# load the data
Wood <- read.dta('WoodReplication.dta')
# inspect the data
View(Wood) |
25ac944de56d4b88ddcb5fe3f5e9c466ad09883a | 82b018b01a20685e080ee4cb6de3e8bfe816df23 | /Chapter 4/Data import.R | 252ae66558c5a0822cb331e57b50a779e7361670 | [] | no_license | cgolden1993/Dissertation | ce0008c43d789547d557276984fdd7e992045eaf | 96790cff770cf64010b61921277117c7c2e081ac | refs/heads/master | 2020-12-29T23:47:51.496704 | 2020-03-02T16:36:44 | 2020-03-02T16:36:44 | 238,782,708 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,113 | r | Data import.R | ####LISTERIA FARM PRACTICE DATA IMPORT####
setwd("~/Documents/College/Mishra Lab/Poultry Dissertation Project/Listeria Farm Practice Paper")
pacman::p_load(readxl, caret, rpart, plyr, Zelig, pROC, gbm, randomForest,DMwR, Metrics, pdp, gridExtra, ggplot2,
tidyverse)
#FECES DATA#
#read in feces data
feces <- read_excel("FinalData.xlsx", sheet = "Feces")
#remove indicator variables and chemical variables
feces <- subset(feces, select = -c(pH:Zn, SampleID, SampleType, ListeriaSpecies, Farm, FlockAgeDays))
#make all character variables factors
feces[sapply(feces, is.character)] <- lapply(feces[sapply(feces, is.character)], as.factor)
#make listeria text change
feces$Listeria <- revalue(feces$Listeria, c("+" = "Positive", "-" = "Negative"))
#check which columns within the data set contain NAs
#we want to make sure that none of the categorical variables contain NAs or that will make model fitting tough
colnames(feces)[colSums(is.na(feces)) > 0]
#quantitative variables listed here can be imputed in data pre processing if we would like
# SOIL DATA
soil <- read_excel("FinalData.xlsx", sheet = "Soil")
soil[sapply(soil, is.character)] <- lapply(soil[sapply(soil, is.character)], as.factor)
soil$Listeria <- revalue(soil$Listeria, c("+" = "Positive", "-" = "Negative"))
#now we are going to only select columns from soil that are the same as feces
#this helps us streamline this process with just a few lines of code
common_names <- intersect(names(feces), names(soil))
#now select columns with these names from soil data
soil <- select(soil, common_names)
#combine two sets
combine <- rbind(feces, soil)
#we are going to use 'feces' as the keyword in our modeling, so we will assign the df to the 'feces' tag
feces <- combine
#change names of levels for factors egg source and feed to keep anonymous
levels(feces$EggSource) <- c('C','A', 'F', 'E', 'D', 'B')
levels(feces$PastureFeed) <- c('BWO', 'CMW', 'W', 'W', 'CSW', 'CSW', 'WC', 'CSW', 'CSO', 'PCO', 'WC')
levels(feces$BroodFeed) <- c('BWO', 'CSW', 'CSW', 'CSW', 'CSW', 'WC', 'W', 'CSO', 'PCO', 'WC')
levels(feces$Breed) <- c('CC', 'FR', 'FR')
#one final check to see if there are any NAs
colnames(feces)[colSums(is.na(feces)) > 0]
#WCR-P Data
WCRP <- read_excel("FinalData.xlsx", sheet = "WCR-P")
#new way to drop vars
WCRP <- subset(WCRP, select = -c(SampleID, ListeriaSpecies, ScalderTempC))
#make all character variables factors
WCRP[sapply(WCRP, is.character)] <- lapply(WCRP[sapply(WCRP, is.character)], as.factor)
WCRP$Listeria <- revalue(WCRP$Listeria, c("+" = "Positive", "-" = "Negative"))
colnames(WCRP)[colSums(is.na(WCRP)) > 0]
str(WCRP)
#WCR-F Data
WCRF <- read_excel("FinalData.xlsx", sheet = "WCR-F")
#new way to drop vars
WCRF <- subset(WCRF, select = -c(SampleID, Farm, PaMedicated))
#make all character variables factors
WCRF[sapply(WCRF, is.character)] <- lapply(WCRF[sapply(WCRF, is.character)], as.factor)
WCRF$Listeria <- revalue(WCRF$Listeria, c("+" = "Positive", "-" = "Negative"))
colnames(WCRF)[colSums(is.na(WCRF)) > 0]
str(WCRF)
levels(WCRF$EggSource) <- c('C', 'A', 'F', 'E', 'D', 'B')
|
e9ffb3c791a8dc62dd90cb63b70a926ffca6a0f4 | 6acb1cdd526912a39685aaf3d22c286ff033d892 | /tests/testthat/test-xgboost-deploy.R | 6a7d149c3096508611c75e8201c6f72c76af6468 | [
"MIT"
] | permissive | niilante/healthcareai-r | 921d1fcda3b47b3bc6f3fa70bebc97b36b04e389 | f28c3ed2763bd806a936761e1528dfaa2a3e5c4d | refs/heads/master | 2021-01-21T15:00:21.296649 | 2017-06-20T18:01:30 | 2017-06-20T18:01:30 | 95,368,231 | 1 | 0 | null | 2017-06-25T15:25:54 | 2017-06-25T15:25:54 | null | UTF-8 | R | false | false | 2,432 | r | test-xgboost-deploy.R | context("Checking xgboost deployment")
library(healthcareai)
# 1. Load data. Categorical columns should be characters.
csvfile <- system.file("extdata",
"dermatology_multiclass_data.csv",
package = "healthcareai")
# Replace csvfile with 'path/file'
df <- read.csv(file = csvfile,
header = TRUE,
stringsAsFactors = FALSE,
na.strings = c("NULL", "NA", "", "?"))
dfDeploy <- df[347:366,] # reserve 20 rows for deploy step.
# 2. Develop and save model
set.seed(42)
p <- SupervisedModelDevelopmentParams$new()
p$df <- df
p$type <- "multiclass"
p$impute <- TRUE
p$grainCol <- "PatientID"
p$predictedCol <- "target"
p$debug <- FALSE
p$cores <- 1
# xgb_params must be a list with all of these things in it.
# if you would like to tweak parameters, go for it!
# Leave objective and eval_metric as they are.
p$xgb_params <- list("objective" = "multi:softprob",
"eval_metric" = "mlogloss",
"max_depth" = 6, # max depth of each learner
"eta" = 0.1, # learning rate
"silent" = 0, # verbose output when set to 1
"nthread" = 2) # number of processors to use
# Run model
xNew <- capture.output(boost <- XGBoostDevelopment$new(p))
xRun <- capture.output(boost$run())
## 3. Load saved model (automatic) and use DEPLOY to generate predictions.
p2 <- SupervisedModelDeploymentParams$new()
p2$type <- "multiclass"
p2$df <- dfDeploy
p2$grainCol <- "PatientID"
p2$predictedCol <- "target"
p2$impute <- TRUE
p2$debug <- FALSE
# Deploy model to make new predictions
xNew <- capture.output(boostD <- XGBoostDeployment$new(p2))
xDeploy <- capture.output(boostD$deploy())
# Get output dataframe for csv or SQL
xDf <- capture.output(outDf <- boostD$getOutDf())
###########
# Multiclass
test_that("Grain is correctly attached", {
expect_true(outDf$PatientID[1] == 347)
expect_true(outDf$PatientID[4] == 350)
})
test_that("Probabilities are correctly sorted", {
expect_true(round(outDf$PredictedProb1[1],5) == 0.91198)
expect_true(round(outDf$PredictedProb2[4],5) == 0.08339)
expect_true(round(outDf$PredictedProb3[7],5) == 0.00312)
})
test_that("Top categories are correctly parsed", {
expect_true(outDf$PredictedClass1[2] == 'six')
expect_true(outDf$PredictedClass1[5] == 'one')
expect_true(outDf$PredictedClass1[9] == 'five')
})
|
885da161b1a0a94129268e9a34285b57a3176291 | 76faeed99f04b513d1944c6cd666e92aae40539f | /02 Data Wrangling/Set 2.R | 2afaf98cd2afce08ad5b0a4466e8570bf180828c | [] | no_license | rachelannearthur/DV_RProject3 | 3146dba573224b2a1561f53395ff42cbfe94a99f | 8dc8440e217d79fad2a208d5322dc2f2ac1a255b | refs/heads/master | 2021-05-30T09:06:35.592897 | 2015-10-16T07:08:30 | 2015-10-16T07:08:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,100 | r | Set 2.R | require(tidyr)
require(dplyr)
require(ggplot2)
require (jsonlite)
require (RCurl)
# outer_join_df
outer_join_df <- enrl_working %>% left_join(., k_12_frl, by = "SCHOOL_CODE", copy=TRUE) %>% right_join(., final_grades, by = "SCHOOL_CODE", copy=TRUE) %>% mutate(PCT_NONWHITE = PCT_AMIND + PCT_ASIAN + PCT_BLACK + PCT_HISP + PCT_PI + PCT_2ORMORE, FRL = X_FREE_AND_REDUCED / 100) %>% select(SPF_PS_IND_GRAD_RATE, FRL, EMH, PCT_NONWHITE, FINAL_PLANTYPE) %>% dplyr::rename(., GRAD_RATE=SPF_PS_IND_GRAD_RATE) %>% filter(EMH != "A")
plot2 <- ggplot() +
coord_cartesian() +
scale_x_continuous(labels=percent) +
scale_y_continuous(labels=percent) +
facet_grid(.~EMH)+
labs(title='FRL School Minority Distribution') +
labs(x="Percentage of Non-White Students", y=paste("Percentage of Students on Free/Reduced Lunch Meal Plan")) +
layer(data=outer_join_df,
mapping=aes(x = PCT_NONWHITE, y= FRL, color=FINAL_PLANTYPE),
stat="identity",
stat_params=list(),
geom="point",
geom_params=list(),
position= position_jitter(width=0.3)
)
plot2
|
a5748fe519c4323e0573acc9b91e1c73848c1daf | d488095d94b00bc42d355f9b34c276772936b803 | /man/breastcancer.Rd | 1968327e54605b923315abd580fda4867a37eb64 | [] | no_license | DavidHofmeyr/spuds | 5415186be67182ef806b74b0a607ea75ca552edf | ecc9247178a20ae543987dc6631067b52486dd8b | refs/heads/master | 2020-04-13T16:04:33.139774 | 2019-01-06T07:39:08 | 2019-01-06T07:39:08 | 163,311,906 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 975 | rd | breastcancer.Rd | \name{breastcancer}
\docType{data}
\alias{breastcancer}
\title{Breast Cancer Identification}
\description{
Physical measurements from breast masses for distinguishing malignant from benign cases.
}
\usage{breastcancer}
\format{A list with entries $x (a 699x9 matrix with each row corresponding to an individual case) and $c (a vector of labels, benign = 2 and malignant = 4.}
\source{UCI Machine Learning Repository.}
\references{
Dheeru, D. and E. Karra Taniskidou (2017). UCI machine learning repository. \url{https://archive.ics.uci.edu/ml}
Wolberg, W.H., & Mangasarian, O.L. (1990). Multisurface method of pattern separation for medical diagnosis applied to breast cytology. In Proceedings of the National Academy of Sciences, 87, 9193--9196.
Zhang, J. (1992). Selecting typical instances in instance-based learning. In Proceedings of the Ninth International Machine Learning Conference (pp. 470--479). Aberdeen, Scotland: Morgan Kaufmann.
}
\keyword{datasets}
|
70a1dd422881a461ecec315979747714390018ac | 18bfdd5b507b261c1dbd7242d9061abc0cf4aaf3 | /src/population/population.r | b628b13be13d3d6294c69bb9d847c704e0903fd6 | [] | no_license | fealuin/app-genetic-mds | 1d44394816a2adec8dd57b25c8cd6fd4cb55996c | 5a5fab43520c6163624973c7b1f7f2b8267c57a9 | refs/heads/master | 2020-03-20T02:36:57.310305 | 2018-06-12T19:49:34 | 2018-06-12T19:49:34 | 137,117,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,840 | r | population.r | source('individual/individual.r')
Population<-function(size=100,n=10,m=2){
thisEnv <- environment()
individuals<-list()
me<-list(
thisEnv = thisEnv,
getEnv = function(){
return(get("thisEnv",thisEnv))
},
getIndividuals=function(){
return(get("individuals",thisEnv))
},
setIndividuals=function(I){
return(assign("individuals",I,thisEnv))
},
getIndividual=function(i){
return(me$getIndividuals()[[i]])
},
#setIndividual=function(i,I){
# return(assign(paste("individuals[[",i,"]]"),I,thisEnv))
#},
initialize=function(type="random",max=100){
init=list()
for (i in 1:size){
init[[i]]=Individual(n,m)
}
me$setIndividuals(init)
lapply(me$getIndividuals(),function(x) x$initialize(type,max))
return("ok")
},
setFitness=function(D){
lapply(me$getIndividuals(),function(x) x$setFitness(D))
return("ok")
},
getFitness=function() {
return(unlist(lapply(me$getIndividuals(),function(x) x$getFitness())))
},
orderByFitness=function(decreasing=FALSE){
return(me$setIndividuals(me$getIndividuals()[order(me$getFitness(),decreasing=decreasing)]))
},
getMutation=function(p=0.4,ratio=0.5,type="flipPoints") {
mutation=Population(size,n,m)
mutation$setIndividuals(me$getIndividuals())
lapply(mutation$getIndividuals(),function(x) if(runif(1)<p){x$getMutation(type=type)})
#for(i in size){
# if(runif(1)<p){
# mutation$setIndividual(i,me$getIndividual(i)$getMutation(type=type))
# }
#}
return(mutation)
}
)
class(me) <- append(class(me),"Population")
return(me)
}
pop=Population(n=214)
pop$initialize()
pop$setFitness(D)
pop2<-pop$getMutation()
pop2$setFitness(D)
pop$getFitness()==pop2$getFitness()
|
964be904253f9a49d43d5a66592c446747931dc3 | 8e35f38bbb6f3f27896e567b75dace5e0be013c1 | /man/make_tint.Rd | 7f75e8372564b976f1d05c07d7dd92facdd9686b | [] | no_license | eknit/isotopevis | 4785699263f2f35629f20e93bab3d61512b48651 | 103a6af8d6ab090950c95f20ec18c50091a4474f | refs/heads/master | 2020-12-02T18:22:25.412682 | 2016-02-06T12:13:51 | 2016-02-06T12:13:51 | 37,761,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 351 | rd | make_tint.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/isotopevis.R
\name{make_tint}
\alias{make_tint}
\title{make_tint
Automatically makes tinted shade for plotting. Returns RGB colour.}
\usage{
make_tint(col, tint_factor)
}
\description{
make_tint
Automatically makes tinted shade for plotting. Returns RGB colour.
}
|
1466c618112cb2f5b1334723863c589eb80f1419 | f10abc277676761051fae9413764ddc41e5549e3 | /plot4.R | 03ab9d1734ecb5e2bceccade53c135337796c17d | [] | no_license | Kug/ExData_Plotting1 | 6fc4afe8d3a35cd0195c79470cd0eb16ee7e09d8 | 941aa2dacdcfca17886cb48c6812afec4313ecdf | refs/heads/master | 2021-01-18T13:19:55.606829 | 2014-12-07T15:57:20 | 2014-12-07T15:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,486 | r | plot4.R | data <- read.table(file="household_power_consumption.txt",sep=";",header=FALSE,na.strings="?",nrows=2880,skip=66637)
colnames(data) <- c("date","time","GlobalActivePower","GlobalReactivePower","Voltage","GlobalIntensity","SubMetering1","SubMetering2","SubMetering3")
data$datetime <- strptime(paste(data$date,data$time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
data$date <- as.Date(data$date,format="%d/%m/%Y")
## Plot the data and save the plot as plot4.png
png(filename = "plot4.png", width = 480, height = 480)
par(mfcol=c(2,2))
#subplot 1,1
with(data, plot(y=GlobalActivePower,x=datetime, type = "n",xlab ="", ylab="Global Active Power (kilowatts)"))
with(data, lines(y=GlobalActivePower,x=datetime))
#subplot 2,1
with(data, plot(y=SubMetering1,x=datetime, type = "n",xlab ="", ylab="Energy sub metering"))
with(data, lines(y=SubMetering1,x=datetime,col="black"))
with(data, lines(y=SubMetering2,x=datetime,col="red"))
with(data, lines(y=SubMetering3,x=datetime,col="blue"))
legend("topright",lty="solid",col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#subplot 1,2
with(data,plot(y=Voltage,x=datetime, type = "n" ))
with(data, lines(y=Voltage,x=datetime,col="black"))
#subplot 2,2
with(data,plot(y=GlobalReactivePower,x=datetime, type = "n" ,ylab = "Globle_reactive_power"))
with(data, lines(y=GlobalReactivePower,x=datetime,col="black"))
dev.off()
|
5218a35f224f887b1f7983ca559bacfd074b88b6 | c86b2a299808d638b373832cad752900c0679537 | /Tutorials_OtherDocs/2017-11_uRos2017/R_code_presentation.R | b2c2cc1ae27e73e511d75cbe3c7ab66e928c08ac | [] | no_license | marcellodo/univOutl | 9518b6ec963a8429aef50437e78b20f4afbac434 | db5431cfd67ef26de0a2ed8527a5096fd0f608b1 | refs/heads/master | 2022-07-12T15:03:20.846840 | 2022-06-21T08:59:40 | 2022-06-21T08:59:40 | 94,315,178 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,215 | r | R_code_presentation.R | library(sn)
library(univOutl)
# Location-scale outlier detection
# generate data from normal distr.
set.seed(123456)
r <- rsn(n=200, xi=50, omega=5, alpha=0)
hist(r, xlim=c(30,70))
mc(r) # medCouple skewnes measure
a1 <- LocScaleB(x=r, method = "IQR")
a2 <- LocScaleB(x=r, method = "MAD")
a3 <- LocScaleB(x=r, method = "Sn")
a4 <- LocScaleB(x=r, method = "Qn")
a5 <- LocScaleB(x=r, method = "scaletau2")
mpars <- rbind(IQR=a1$pars,
MAD=a2$pars,
Sn=a3$pars,
Qn=a4$pars,
scaleTau2=a5$pars)
mbounds <- rbind(IQR=a1$bounds,
MAD=a2$bounds,
Sn=a3$bounds,
Qn=a4$bounds,
scaleTau2=a5$bounds)
mpars
mbounds
abline(v=a1$bounds, col=2, lwd=2, lty=1)
abline(v=a2$bounds, col=3, lwd=2, lty=2)
abline(v=a3$bounds, col=4, lwd=2, lty=3)
abline(v=a4$bounds, col=5, lwd=2, lty=4)
abline(v=a5$bounds, col=6, lwd=2, lty=5)
# generate data positive skewed normal distr.
set.seed(432123)
r <- rsn(n=200, xi=50, omega=5, alpha=4)
hist(r, xlim = c(40,70))
mc(r) # medCouple skewnes measure
a1 <- LocScaleB(x=r, method = "IQR")
a2 <- LocScaleB(x=r, method = "dq")
a1$pars
a2$pars
a1$bounds
a2$bounds
abline(v=median(r), col=2, lwd=2, lty=1)
abline(v=a1$bounds, col=2, lwd=2, lty=1)
abline(v=a2$bounds, col=3, lwd=2, lty=2)
mc(r)
mc(log(r))
LocScaleB(x=r, method = "MAD", logt = TRUE)
# boxplot-based outlier detection
set.seed(11122)
r <- rsn(n=200, xi=50, omega=5, alpha=5)
hist(r, xlim = c(40, 70))
mc(r) # medCouple skewnes measure
a1 <- boxB(x=r, k=1.5, method='resistant')
a2 <- boxB(x=r, k=1.5, method='asymmetric')
a3 <- boxB(x=r, k=1.5, method='adjbox')
mfenc <- rbind(std=a1$fences,
asym=a2$fences,
adjb=a3$fences)
mfenc
abline(v = median(r), col=2, lwd=2, lty=1)
abline(v = a1$fences, col=2, lwd=2, lty=3)
abline(v = a2$fences, col=3, lwd=2, lty=2)
abline(v = a3$fences, col=4, lwd=2, lty=1)
#####
# Hidiroglou-Berthelot ratios
# rice <- read_delim("AAA-Work/progetti-GdL/univOutl/2017-11 - uRos 2017 Bucharest/rice.csv", ";", escape_double = FALSE, trim_ws = TRUE)
#
# rice1 <- subset(rice, Year==2014)
# rice2 <- subset(rice, Year==2015)
#
# lab <- c("geographicAreaM49", "GeographicArea", "Value")
# mm <- merge(rice1[lab], rice2[lab],
# by=c("geographicAreaM49", "GeographicArea"),
# all=TRUE, suffixes = c('2014', '2015'))
# RiceProd <- mm
# save(RiceProd, file='RiceProd.rda')
load(file='rice.rda')
outlRice <- HBmethod(yt1 = rice$Prod2014,
yt2 = rice$Prod2015,
return.dataframe = TRUE,
C=15)
outlRice$quartiles.E
outlRice$bounds.E
hist(outlRice$data$Escore, xlim=c(-2000, 1500))
abline(v = outlRice$quartiles.E['50%'], col=2, lwd=2, lty=1)
abline(v = outlRice$bounds.E, col=2, lwd=2, lty=3)
head(outlRice$excluded, 3)
head(outlRice$data, 3)
outl.HB <- outlRice$data$id[outlRice$data$outliers==1]
# ratioSize
oo <- ratioSize(numerator = rice$Prod2015,
denominator = rice$Prod2014,
return.dataframe = T)
oo$median.r
oo$bounds
hist(oo$data$c.ratio)
abline(v = median(oo$data$c.ratio), col=2, lwd=2, lty=1)
abline(v = oo$bounds, col=2, lwd=2, lty=3)
head(oo$data, 3)
oo <- ratioSize(numerator = rice$Prod2015,
denominator = rice$Prod2014,
return.dataframe = T, size.th = 1000)
head(oo$data)
# adjusted boxplot on E-scores
outlRice <- HBmethod(yt1 = rice$Prod2014,
yt2 = rice$Prod2015,
return.dataframe = TRUE,
C=5.4)
oo <- boxB(x=outlRice$data$Escore,
method = 'adjbox')
outlRice$bounds.E
oo$fences
outlRice.adj <- oo$outliers
intersect(outl.HB, outlRice.adj)
outl.HB
outlRice.adj
hist(outlRice$data$Escore, xlim=c(-2000, 1500))
abline(v = outlRice$quartiles.E['50%'], col=2, lwd=2, lty=1)
abline(v = outlRice$bounds.E, col=2, lwd=2, lty=3)
abline(v = oo$fences, col=3, lwd=2, lty=4)
####
## bivariate outlier detection
#
load(file="rice.rda")
par(mfrow=c(2,2))
par(pty='s')
hist(rice$Area2014, main='Rice-Prod_2014', xlab='')
hist(rice$Prod2015, main='Rice-Prod_2015', xlab='')
hist(log(rice$Prod2014+1), main='Log(Rice-Prod_2014+1)', xlab='')
hist(log(rice$Prod2015+1), main='Log(Rice-Prod_2015+1)', xlab='')
library("splines")
library("ggplot2")
library("MASS")
# scatterplot con linear regr
rice$logProd2014 <- log(rice$Prod2014+1)
rice$logProd2015 <- log(rice$Prod2015+1)
ggplot(rice, aes(x=logProd2014, y=logProd2015)) +
geom_point(shape=1) + # Use hollow circles
geom_smooth(method=lm) +
labs(x='Log(Rice-Prod_2014+1)', y='Log(Rice-Prod_2015+1)')
# # scatterplot with robust regression
# ggplot(rice, aes(x=z14, y=z15)) +
# geom_point(shape=1)+
# stat_smooth(method="rlm",fullrange=TRUE)
#
# # scatterpot with splines
# ggplot(rice, aes(x=z14, y=z15)) +
# geom_point() +
# stat_smooth(method = "lm", formula = y ~ ns(x, 3), level=0.9999999) +
#
library("mvoutlier")
par(mfrow=c(1,2))
corr.plot(rice$logProd2014, rice$logProd2015, alpha=0.01)
dd <- dd.plot(rice[,c("logProd2014", "logProd2015")], alpha=0.01)
#sp <- symbol.plot(cbind(rice$z14, rice$z15), alpha=0.01)
# cp <- color.plot(cbind(rice$z14, rice$z15), alpha=0.01)
# vcz.mcd <- covMcd(x=rice[, c('z14','z15')])
# plot(vcz.mcd, which="tolEllipsePlot", classic=TRUE, cutoff=0.01)
# plot(vcz.mcd, which="dd", cutoff=0.01)
sum(dd$outliers)
outl <- dd$outliers
head(rice[outl,], 3)
par(mfrow=c(1,1))
plot(x=rice$logProd2014[!outl], y=rice$logProd2015[!outl],
col=3, lwd=1, xlim=c(0,18), ylim=c(0,18),
xlab='Log(Rice-Prod_2014+1)', ylab='Log(Rice-Prod_2015+1)')
abline(0,1)
points(x=rice$logProd2014[outl], y=rice$logProd2015[outl],
pch='+', col=2, lwd=3)
text(x=rice$logProd2014[outl], y=rice$logProd2015[outl],
labels=rice$Geographic.Area[outl], cex=0.8, pos=4)
# SeleMix, joint model (no predictor variable without errors)
library("SeleMix")
out.sel <- ml.est(y = rice[,c("logProd2014", "logProd2015")],
model="N", w=0.005, w.fix=F, t.outl=0.5)
out.sel$w # estimated proportion of contaminated data
out.sel$lambda # estimated variance inflation factor
sum(out.sel$outlier) # estimated number of contamined obs
sum(out.sel$tau==1)
sum(out.sel$tau>0.98)
toCheck <- data.frame(Geographic.Area=rice$Geographic.Area,
postProb=out.sel$tau,
rice[,c("logProd2014", "logProd2015")],
out.sel$ypred)
toCheck <- subset(toCheck, postProb>0.5)
toCheck <- toCheck[order(toCheck$postProb, decreasing = T), ]
head(toCheck)
outl.mix <- as.logical(out.sel$outlier)
###
par(mfrow=c(1,1))
plot(x=rice$logProd2014[!outl], y=rice$logProd2015[!outl],
col=3, lwd=1, xlim=c(0,18), ylim=c(0,18),
xlab='Log(Rice-Prod_2014+1)', ylab='Log(Rice-Prod_2015+1)')
abline(0,1)
points(x=rice$logProd2014[outl], y=rice$logProd2015[outl],
pch='x', col=2, lwd=3)
points(x=rice$logProd2014[outl.mix], y=rice$logProd2015[outl.mix],
pch='+', col=4, lwd=3)
text(x=rice$logProd2014[outl], y=rice$logProd2015[outl],
labels=rice$Geographic.Area[outl], cex=0.8, pos=4)
|
f4b766f830d3bd7b1a0831ff66d769991be339e1 | 5d06cbf5b59b5bc94df9ad9024e382c5611c1ce6 | /temperature_uk/tensor2d_kron.r | debed8ba2aaa51f5e2efaaaa15340c3f76723d1f | [] | no_license | hyunjik11/GPT | f01168d3dc91bf2fe4a68e930cd4d49442db2402 | 1466cb4c34f0f8f0c6de9ffbf494a5baa73e2926 | refs/heads/master | 2020-12-03T05:09:47.873586 | 2016-05-17T08:17:49 | 2016-05-17T08:17:49 | 44,272,333 | 1 | 0 | null | 2015-10-14T19:48:31 | 2015-10-14T19:48:30 | null | UTF-8 | R | false | false | 3,101 | r | tensor2d_kron.r | library(ggplot2)
library(rstan)
load('temp_final.RData')
library(h5); file=h5file("temp_eigenfeatures.h5"); phiU=t(file["phiU"][]); phiV=t(file["phiV"][]);
s=4.4960
r=10
data=list(N=N,Ntrain=Ntrain,n1=n1,n2=n2,r=r,phiU=phiU,phiV=phiV,
indtrainU=indtrainU,indtrainV=indtrainV,indtestU=indtestU,indtestV=indtestV,
ytrain=ytrain,ytest=ytest,sigma=sigma)
rstan_options(auto_write = TRUE)
options(mc.cores = 4)
model = stan_model("tensor2d_kron.stan")
#sink("tensor2d_10r.txt",append=TRUE)
numiter=200
warmup=100
ptm=proc.time()
fit = sampling(model, data=data, iter=numiter, chains=4,warmup=warmup)
#opt = optimizing(model,data=data)
#fit = vb(model,data=data)
time_elapsed=proc.time()-ptm
print(time_elapsed)
out = extract(fit)
trainRMSE=rep(0,numiter-warmup)
testRMSE=rep(0,numiter-warmup)
for (i in 1:(numiter-warmup)){
id=(4*(i-1)+1):(4*i)
yfittrain=colMeans(out$trainpred[id,])
yfittest=colMeans(out$testpred[id,])
trainRMSE[i]=sqrt(mean((ytrain-yfittrain)^2))*s
testRMSE[i]=sqrt(mean((ytest-yfittest)^2))*s
}
cat("r=",r,"\n")
print("trainRMSE=")
print(trainRMSE)
print("testRMSE=")
print(testRMSE)
rhat=summary(fit)$summary[,"Rhat"];neff=summary(fit)$summary[,"n_eff"];
cat("rhat=",mean(rhat),"+/-",sd(rhat),";n_eff=",mean(neff),"+/-",sd(neff),"\n")
trainpred=colMeans(out$trainpred)
testpred=colMeans(out$testpred)
cat("final trainRMSE=",sqrt(mean((ytrain-trainpred)^2))*s,"\n")
cat("final testRMSE=",sqrt(mean((ytest-testpred)^2))*s,"\n")
#sink()
# library(R.matlab)
# df = readMat("/homes/hkim/TensorGP/src/uk_temp/temp.mat")
# attach(df)
# xtrain=as.matrix(xtrain)
# xtest=as.matrix(xtest)
# f = rbind(xtrain,xtest)
# ytrain=as.vector(ytrain)
# ytest=as.vector(ytest)
#
# l=exp(hyp.cov[c(1,3)]);
# sigma_RBF = exp(hyp.cov[c(2,4)])
# sigma=exp(hyp.lik)[1]
# # find Ls,Lt
# L_func = function(x,l,sigma_RBF) {
# if (length(dim(x)) >1) {
# N=dim(x)[1]
# K=matrix(0,N,N)
# for (i in 1:N) {
# for (j in 1:N) {
# K[i,j] = sigma_RBF*exp(-1/l^2 * sum((x[i,] - x[j,])^2))
# }
# }}
# else {N=length(x)
# K=matrix(0,N,N)
# for (i in 1:N) {
# for (j in 1:N) {
# K[i,j] = sigma_RBF*exp(-1/l^2 * sum((x[i] - x[j])^2))
# }
# }}
# return(t(chol(K,pivot=TRUE)))
# }
# space=unique(f[,1:2])
# n1=dim(space)[1]
# perm=sample(1:n1)
# space=space[perm,]
# temporal=unique(f[,3])
# temporal=as.matrix(temporal)
# n2=length(temporal)
# temporal=sample(temporal)
#
# phiU=L_func(space,l[1],sigma_RBF[1])
# phiV=L_func(temporal,l[2],sigma_RBF[2])
# Ntrain=length(ytrain);Ntest=length(ytest)
# N=Ntrain+Ntest
#
# indtrainU=vector(mode="integer",length=Ntrain)
# indtrainV=vector(mode="integer",length=Ntrain)
# indtestU=vector(mode="integer",length=Ntest)
# indtestV=vector(mode="integer",length=Ntest)
#
#
# for (i in 1:Ntrain) {
# idu = which(apply(space,1,function(x) all(x== xtrain[i,1:2])));
# indtrainU[i]=idu
# }
# indtrainV=match(xtrain[,3],temporal)
# for (i in 1:Ntest) {
# idu = which(apply(space,1,function(x) all(x== xtest[i,1:2])));
# indtestU[i]=idu
# }
# indtestV=match(xtest[,3],temporal)
|
18c32244913249a66ccbd8fdf709dc51ffb97b04 | 5266235fb56102a3506aab04c29e87208ab5412b | /script_modelR_pkg.r | 199f2bed37013db3d19361044205dbc222e3a359 | [] | no_license | diogosbr/scripts-modelagem | eaa3692174643a4adfc5691a1f6d8a23fc3bf8e8 | e14a3c70cb7d9edfda1b687c7c6ce92841961a90 | refs/heads/master | 2021-01-25T12:30:10.799520 | 2019-10-01T12:29:01 | 2019-10-01T12:29:01 | 123,472,865 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,572 | r | script_modelR_pkg.r |
#--------------------------#
#### Script modelR ####
#--------------------------#
#Instalando os pacotes, se necessário ----
packages = c("devtools", "leaflet", "dplyr", "raster", "dismo")
for (p in setdiff(packages, installed.packages()[, "Package"])) {
install.packages(p, dependencies = T)
}
# Instalando pacotes ----
devtools::install_github("Model-R/modelr_pkg", ref = "documentation")
# Carregando pacotes
require(modleR)
require(raster)
require(dplyr)
require(leaflet)
#pontos de ocorrência ----
registros <- read.table("registros.csv", sep = ";", header = TRUE, stringsAsFactors = FALSE)
registros <- dismo::gbif(genus = "guapira",species = "laxa")
registros <- registros[,c("species","lon","lat")]
names(registros)[1] <- "sp"
#verificando as ocorrências ----
head(registros)
tail(registros)
#Mapa Sem agrupar os pontos ----
registros %>%
na.omit() %>%
leaflet() %>%
addTiles() %>%
addMarkers(lng = ~lon, lat = ~lat, popup = ~sp)
#Importando variáveis preditoras
lista <- list.files("./wc2.0_10m_bio", pattern = ".tif$", full.names = TRUE)
predictors <- stack(lista)
plot(predictors[[1]])
#máscara de corte do modelo
mascara
plot(mascara, axes = T, add = T)
especies <- unique(registros$sp)
especies
result_folder <- "./teste"
for (i in 1:length(especies)) {
ocorrencias <- registros[registros$sp == especies[i], c("lon", "lat")]
#setup data
sdmdata_1sp <- setup_sdmdata(species_name = especies[i],
occurrences = ocorrencias,
predictors = predictors,
models_dir = result_folder,
partition_type = "crossvalidation",
cv_partitions = 4,
cv_n = 1,
seed = 512,
buffer_type = "mean",
plot_sdmdata = T,
n_back = 500,
clean_dupl = T,
clean_uni = T,
clean_nas = T,
geo_filt = F,
geo_filt_dist = 10,
select_variables = T,
percent = 0.5,
cutoff = 0.7
)
#gerando os modelos
do_many(species_name = especies[i],
predictors = predictors,
models_dir = result_folder,
write_png = T,
write_bin_cut = F,
bioclim = T,
domain = F,
glm = T,
svmk = T,
svme = T,
maxent = T,
maxnet = T,
rf = T,
mahal = F,
brt = T,
equalize = T)
#gerando os ensembles por algoritmo
final_model(species_name = especies[i],
algorithms = NULL, #if null it will take all the in-disk algorithms
models_dir = result_folder,
select_partitions = TRUE,
select_par = "TSS",
select_par_val = 0,
which_models = c("raw_mean", "bin_consensus"),
consensus_level = 0.5,
uncertainty = T,
overwrite = T)
#gerando o modelo final
ens <- ensemble_model(especies[i],
occurrences = ocorrencias,
which_final = "raw_mean",
models_dir = result_folder,
overwrite = TRUE) #argument from writeRaster
}
#explorando os resultados
ensemble_files <- list.files(paste0(test_folder,"/", species[1],"/present/ensemble"),
recursive = T,
pattern = "raw_mean.+tif$",
full.names = T)
ensemble_files
ens_mod <- stack(ensemble_files)
names(ens_mod) <- c("mean", "median", "range", "st.dev")
plot(ens_mod)
#carregando o modelo final
modelo = raster("./teste/Eugenia florida DC/ensemble/Eugenia florida DC._Final.mean.bin7_ensemble.tif")
#Plotando o modelo com o leaflet
coordenadas %>%
leaflet() %>%
addTiles(urlTemplate="http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}") %>%
addRasterImage(modelo, colors = rev(terrain.colors(25)), opacity = 0.4) %>%
addMarkers(lng = ~lon, lat = ~lat, popup = ~sp, clusterOptions = markerClusterOptions())
|
6c791887903ee53ad7a7b9c1f56749f1feeee1a9 | 277450a215e4d40276b921ea6adba802085f638c | /graphics.R | ccc7e1a7f66fafc9ca97393f88e22a43213cf31a | [] | no_license | mcfrank/word_learning_models | 93c40e55a10d88ba03c3daea9d433c1b54a3a36a | d524e0221120a326fe6f731a68ad3c5f897cc525 | refs/heads/master | 2020-09-12T19:30:44.379116 | 2019-11-18T19:35:19 | 2019-11-18T19:35:19 | 222,527,764 | 0 | 0 | null | 2019-11-18T19:29:50 | 2019-11-18T19:29:49 | null | UTF-8 | R | false | false | 1,426 | r | graphics.R | # functions to graph word-object co-occurrences from trial orderings and model associations
make_cooccurrence_matrix <- function(cond, print_matrix=F, heatmap_filename=c()) {
# makes word x object co-occurrence matrix from training list of words and objects by trial
# prints a heatmap if filename is specified
words = cond$train$words
objs = cond$train$objs
m = matrix(0, nrow=max(words), ncol=max(objs))
for(t in 1:nrow(words)) {
m[words[t,], objs[t,]] = m[words[t,], objs[t,]] + 1
}
if(print_matrix==T) print(m)
if(length(heatmap_filename>0)) {
pdf(paste(heatmap_filename,".pdf",sep=""))
heatmap(m, Rowv = NA, Colv = "Rowv", scale="none", margin=c(3,3), xlab="Object", ylab="Word", col=heat.colors(10))
# labRow=NA, labCol=NA
dev.off()
}
return(m)
}
animate_trajectory <- function(mod, modname='', condname='') {
# creates an animation of a model's word-object associations
require(animation)
ani.options(interval=.1) # delay between frames
col.range <- heat.colors(20)
breaks = seq(0,1, .05)
saveGIF({
#layout(matrix(c(1, rep(2, 5)), 6, 1))
for(i in 1:length(mod$traj)) {
mm = mod$traj[[i]] + 1e-7 # to eliminate division by zero
mm = mm / rowSums(mm) # row normalize
heatmap(mm, Rowv=NA, Colv="Rowv", scale="none", margin=c(3,3), breaks=breaks, xlab="Object", ylab="Word", col=col.range)
}
}, movie.name=paste(modname,"_model",condname,"_cond_trajectory.gif",sep=''))
} |
859815b1271e15d14419aa5e8c426646bd2a6504 | 668a51ac43842e497dc6633ce59a9cba02e41bf2 | /man/VariSel.Rd | 3b74933d20027d8b0f8efaeb770d4d9676ba7cf5 | [] | no_license | Marie-PerrotDockes/VariSel | e31e8fdb5f072a12e54de13606f50980c89ceed3 | 5ffb3105734f7546c378f0563a17a5d15115e542 | refs/heads/master | 2021-08-07T00:01:48.040966 | 2020-04-26T11:54:51 | 2020-04-26T11:54:51 | 161,149,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,800 | rd | VariSel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QTLmodclasss.R
\name{VariSel}
\alias{VariSel}
\title{Description of the function}
\value{
The coefficients of the fused lasso ANCOVA for the different value of lambda
}
\description{
Description of the function
Description of the function
}
\examples{
B <- c(1, -1, 1.5, 1.5, rep(0, 6), 2, 0, 2, 0)
group <- c(rep('M1', 10), rep('M2', 10))
regressors <- matrix(rnorm(6*20), ncol = 6)
X <- model.matrix(~group + group:regressors - 1)
y <- X\%*\%B + rnorm(20)
y <- scale(y)
mod <- fl2(y, regressors, group)
colors <- c(rep("grey",2), rep('green',2),rep('black', 6), rep(c("orange","blue"), 2), 'darkgreen', rep('yellow',3), rep('purple',2))
matplot(mod$lambda ,t(mod$beta),type='l',col=colors)
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{VariSel$new()}}
\item \href{#method-estime}{\code{VariSel$estime()}}
\item \href{#method-predict}{\code{VariSel$predict()}}
\item \href{#method-get_coef}{\code{VariSel$get_coef()}}
\item \href{#method-plot_path}{\code{VariSel$plot_path()}}
\item \href{#method-plot_error}{\code{VariSel$plot_error()}}
\item \href{#method-plot_BIC}{\code{VariSel$plot_BIC()}}
\item \href{#method-plot_coef}{\code{VariSel$plot_coef()}}
\item \href{#method-ROC}{\code{VariSel$ROC()}}
\item \href{#method-plot_anime}{\code{VariSel$plot_anime()}}
\item \href{#method-plot_cv}{\code{VariSel$plot_cv()}}
\item \href{#method-clone}{\code{VariSel$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$new(
x,
y,
univ = TRUE,
Sigma_12inv = diag(1, ncol(as.data.frame(y))),
sepy = "__",
sepx = NULL
)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-estime"></a>}}
\subsection{Method \code{estime()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$estime()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-predict"></a>}}
\subsection{Method \code{predict()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$predict(new_x, lambda = NULL, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{lambda}}{if the user wants to use it owns values of lambdas}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_coef"></a>}}
\subsection{Method \code{get_coef()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$get_coef()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-plot_path"></a>}}
\subsection{Method \code{plot_path()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$plot_path(type = "first", nb = 6)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-plot_error"></a>}}
\subsection{Method \code{plot_error()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$plot_error(print = TRUE)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-plot_BIC"></a>}}
\subsection{Method \code{plot_BIC()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$plot_BIC(print = TRUE)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-plot_coef"></a>}}
\subsection{Method \code{plot_coef()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$plot_coef(tresh, sel = colnames(private$y))}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-ROC"></a>}}
\subsection{Method \code{ROC()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$ROC(b)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-plot_anime"></a>}}
\subsection{Method \code{plot_anime()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$plot_anime(name_pos = NULL, num_max = 30)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-plot_cv"></a>}}
\subsection{Method \code{plot_cv()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$plot_cv(sel = colnames(private$y), s = "lambda.min")}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VariSel$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
91434499939cc3a496ec9dd355cd4fe6f2571751 | 317e0bf18fddd62d1121dd320e37323e18beac1c | /Code/Modeling/linear_model_testing.R | bdb212540c2ee42f1b55a91c251ca069e430d9bd | [
"MIT"
] | permissive | gorodnichy/o-canada-election | 154ab2addf50146e30e3bcd9cf50e27c38b0f7db | b3f243ccb0511da461fe72158b5b6367dba29358 | refs/heads/master | 2022-12-02T20:50:56.386278 | 2020-08-25T02:11:16 | 2020-08-25T02:11:16 | 287,423,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,352 | r | linear_model_testing.R | #### Testing linear regression models ####
source("Code/historical_results.R")
results <- historical_results.district %>%
filter(year != 2004) %>%
mutate_at(vars(contains("funds")), function(x) {
x[is.na(x)] <- 0
return(x)
}) %>%
mutate(Quebec = (province == "Quebec"),
Atlantic = (region == "Atlantic"),
Vancouver_Island = district_code %in% c(59008, 59014, 59015, 59024, 59031, 59035),
incumbent = relevel(incumbent, ref = "None"),
incumbent_LPC = incumbent == "Liberal",
incumbent_CPC = incumbent == "Conservative",
incumbent_NDP = incumbent == "NDP",
incumbent_Green = incumbent == "Green",
incumbent_Bloc = incumbent == "Bloc",
incumbent_PPC = name_english == "Beauce")
results$Bloc[is.na(results$Bloc)] <- 0
results$Bloc[is.na(results$Bloc)] <- 0
n <- nrow(results)
LPC_error <- CPC_error <- NDP_error <- Green_error <- Bloc_error <- rep(NA, n)
for(i in 1:n) {
cat("District ", results$district_code[i], ": ", results$name_english[i], " (", results$year[i], ")", "\n", sep = "")
# Train/test split
train <- results[-i,]
test <- results[i,]
# Fit linear models
model_LPC.linear_test <- lm(LPC~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+LPC_nation+
LPC_region+LPC_region_lag+educ_university+minority, data = train)
model_CPC.linear_test <- lm(CPC~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+CPC_nation+
CPC_region+CPC_nation_lag+CPC_region_lag+minority, data = train)
model_NDP.linear_test <- lm(NDP~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+NDP_nation+
NDP_region+NDP_nation_lag+NDP_region_lag, data = train)
model_Green.linear_test <- lm(Green~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Green_nation+
Green_region+Green_nation_lag+Green_region_lag+minority, data = train)
model_Bloc.linear_test <- lm(Bloc~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Bloc_region+
Bloc_region_lag, data = train)
# Make test prediction and compute error
test <- test %>%
mutate(LPC.pred = predict(model_LPC.linear_test, newdata = .), LPC.error = LPC.pred - LPC,
CPC.pred = predict(model_CPC.linear_test, newdata = .), CPC.error = CPC.pred - CPC,
NDP.pred = predict(model_NDP.linear_test, newdata = .), NDP.error = NDP.pred - NDP,
Green.pred = predict(model_Green.linear_test, newdata = .), Green.error = Green.pred - Green,
Bloc.pred = predict(model_Bloc.linear_test, newdata = .), Bloc.error = Bloc.pred - Bloc)
LPC_error[i] <- test$LPC.error[1]
CPC_error[i] <- test$CPC.error[1]
NDP_error[i] <- test$NDP.error[1]
Green_error[i] <- test$Green.error[1]
Bloc_error[i] <- test$Bloc.error[1]
}
linear_model.errors <- results %>%
mutate(LPC_error = LPC_error,
CPC_error = CPC_error,
NDP_error = NDP_error,
Green_error = Green_error,
Bloc_error = Bloc_error) %>%
dplyr::select(district_code, name_english, year, incumbent, province, region, LPC_error, CPC_error, NDP_error, Green_error, Bloc_error)
write.csv(linear_model.errors, file = "Output/Model testing/linear_model_errors2.csv", row.names = FALSE)
## RMSE
linear_model.errors %>%
summarise(n = n(),
LPC = sqrt(mean_squares(LPC_error)),
CPC = sqrt(mean_squares(CPC_error)),
NDP = sqrt(mean_squares(NDP_error)),
Green = sqrt(mean_squares(Green_error)),
Bloc = sqrt(mean_squares(Bloc_error)))
# RMSE by party, region
linear_model.errors %>%
group_by(region) %>%
summarise(districts = n(),
LPC = sqrt(mean_squares(LPC_error)),
CPC = sqrt(mean_squares(CPC_error)),
NDP = sqrt(mean_squares(NDP_error)),
Green = sqrt(mean_squares(Green_error)),
Bloc = sqrt(mean_squares(Bloc_error)))
# RMSE by party, year
linear_model.errors %>%
group_by(year) %>%
summarise(districts = n(),
LPC = sqrt(mean_squares(LPC_error)),
CPC = sqrt(mean_squares(CPC_error)),
NDP = sqrt(mean_squares(NDP_error)),
Green = sqrt(mean_squares(Green_error)),
Bloc = sqrt(mean_squares(Bloc_error)))
## Density plots
# Party, overall
linear_model.errors %>%
filter(region != "The frigid northlands") %>%
melt(id.vars = c("district_code", "name_english", "year", "incumbent", "province", "region"),
variable.name = "party", value.name = "error") %>%
ggplot(aes(x = error, fill = party)) +
geom_histogram(col = "black", binwidth = 0.02) +
scale_fill_manual(name = "Party", values = quebec_colors[1:5], labels = quebec_parties[1:5]) +
labs(title = "Distribution of errors by party and region",
subtitle = "Linear regression model",
x = "Error", y = "Density")
# Party, by region
linear_model.errors %>%
filter(region != "The frigid northlands") %>%
melt(id.vars = c("district_code", "name_english", "year", "incumbent", "province", "region"),
variable.name = "party", value.name = "error") %>%
ggplot(aes(x = error, fill = party)) +
facet_wrap(~region) +
geom_histogram(col = "black", binwidth = 0.02) +
scale_fill_manual(name = "Party", values = quebec_colors[1:5], labels = quebec_parties[1:5]) +
labs(title = "Distribution of errors by party and region",
subtitle = "Linear regression model",
x = "Error", y = "Observations")
ggsave(filename = "Output/Model graphs/linear_model_errors_region.png", width = 20, height = 12)
# Party, by year
linear_model.errors %>%
melt(id.vars = c("district_code", "name_english", "year", "incumbent", "province", "region"),
variable.name = "party", value.name = "error") %>%
ggplot(aes(x = error, fill = party)) +
facet_wrap(~year) +
geom_histogram(col = "black", binwidth = 0.02) +
scale_fill_manual(name = "Party", values = quebec_colors[1:5], labels = quebec_parties[1:5]) +
labs(title = "Distribution of errors by party and region",
subtitle = "Linear regression model",
x = "Error", y = "Observations")
ggsave(filename = "Output/Model graphs/linear_model_errors_year.png", width = 20, height = 7)
#### Using regional and national swings instead of raw numbers ####
results <- historical_results.district %>%
filter(year != 2004) %>%
mutate_at(vars(contains("funds")), function(x) {
x[is.na(x)] <- 0
return(x)
}) %>%
mutate(Quebec = (province == "Quebec"),
Atlantic = (region == "Atlantic"),
Vancouver_Island = district_code %in% c(59008, 59014, 59015, 59024, 59031, 59035),
incumbent = relevel(incumbent, ref = "None"),
incumbent_LPC = incumbent == "Liberal",
incumbent_CPC = incumbent == "Conservative",
incumbent_NDP = incumbent == "NDP",
incumbent_Green = incumbent == "Green",
incumbent_Bloc = incumbent == "Bloc",
incumbent_PPC = name_english == "Beauce")
results$Bloc[is.na(results$Bloc)] <- 0
results$Bloc[is.na(results$Bloc)] <- 0
n <- nrow(results)
LPC_error <- CPC_error <- NDP_error <- Green_error <- Bloc_error <- rep(NA, n)
for(i in 1:n) {
cat("District ", results$district_code[i], ": ", results$name_english[i], " (", results$year[i], ")", "\n", sep = "")
# Train/test split
train <- results[-i,]
test <- results[i,]
# Fit linear models
model_LPC.linear_test2 <- lm(LPC~incumbent_LPC+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Quebec+I(LPC_nation-LPC_nation_lag)+I(LPC_region-LPC_region_lag)+
educ_university+minority, data = train)
model_CPC.linear_test2 <- lm(CPC~incumbent_LPC+incumbent_CPC+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+CPC_nation+I(CPC_region-CPC_region_lag)+minority,
data = train)
model_NDP.linear_test2 <- lm(NDP~incumbent_LPC+incumbent_CPC+incumbent_NDP+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+
I(NDP_nation-NDP_nation_lag)+I(NDP_region-NDP_region_lag)+age_65, data = train)
model_Green.linear_test2 <- lm(I(Green-Green_lag)~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+incumbent_Green+LPC_lag+CPC_lag+NDP_lag+
Bloc_lag+I(Green_nation-Green_nation_lag)+I(Green_region-Green_region_lag)+Vancouver_Island,
data = train)
model_Bloc.linear_test2 <- lm(Bloc~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+
I(Bloc_nation-Bloc_nation_lag)+I(Bloc_region-Bloc_region_lag), data = train %>% filter(Quebec))
# Make test prediction and compute error
test <- test %>%
mutate(LPC.pred = predict(model_LPC.linear_test2, newdata = .), LPC.error = LPC.pred - LPC,
CPC.pred = predict(model_CPC.linear_test2, newdata = .), CPC.error = CPC.pred - CPC,
NDP.pred = predict(model_NDP.linear_test2, newdata = .), NDP.error = NDP.pred - NDP,
Green.pred = predict(model_Green.linear_test2, newdata = .) + Green_lag, Green.error = Green.pred - Green,
Bloc.pred = case_when(Quebec ~ predict(model_Bloc.linear_test2, newdata = .),
!Quebec ~ 0),
Bloc.error = Bloc.pred - Bloc)
LPC_error[i] <- test$LPC.error[1]
CPC_error[i] <- test$CPC.error[1]
NDP_error[i] <- test$NDP.error[1]
Green_error[i] <- test$Green.error[1]
Bloc_error[i] <- test$Bloc.error[1]
}
linear_model.errors2 <- results %>%
mutate(LPC_error = LPC_error,
CPC_error = CPC_error,
NDP_error = NDP_error,
Green_error = Green_error,
Bloc_error = Bloc_error) %>%
dplyr::select(district_code, name_english, year, incumbent, province, region, LPC_error, CPC_error, NDP_error, Green_error, Bloc_error)
write.csv(linear_model.errors2, file = "Output/Model testing/linear_model_errors2.csv", row.names = FALSE)
## RMSE
linear_model.errors2 %>%
summarise(n = n(),
LPC = sqrt(mean_squares(LPC_error)),
CPC = sqrt(mean_squares(CPC_error)),
NDP = sqrt(mean_squares(NDP_error)),
Green = sqrt(mean_squares(Green_error)),
Bloc = sqrt(mean_squares(Bloc_error)))
# RMSE by party, region
linear_model.errors2 %>%
group_by(region) %>%
summarise(districts = n(),
LPC = sqrt(mean_squares(LPC_error)),
CPC = sqrt(mean_squares(CPC_error)),
NDP = sqrt(mean_squares(NDP_error)),
Green = sqrt(mean_squares(Green_error)),
Bloc = sqrt(mean_squares(Bloc_error)))
# RMSE by party, year
linear_model.errors2 %>%
group_by(year) %>%
summarise(districts = n(),
LPC = sqrt(mean_squares(LPC_error)),
CPC = sqrt(mean_squares(CPC_error)),
NDP = sqrt(mean_squares(NDP_error)),
Green = sqrt(mean_squares(Green_error)),
Bloc = sqrt(mean_squares(Bloc_error)))
## Density plots
# Party, overall
linear_model.errors2 %>%
filter(region != "The frigid northlands") %>%
melt(id.vars = c("district_code", "name_english", "year", "incumbent", "province", "region"),
variable.name = "party", value.name = "error") %>%
ggplot(aes(x = error, fill = party)) +
geom_histogram(col = "black", binwidth = 0.02) +
scale_fill_manual(name = "Party", values = quebec_colors[1:5], labels = quebec_parties[1:5]) +
labs(title = "Distribution of errors by party and region",
subtitle = "Linear regression model",
x = "Error", y = "Density")
# Party, by region
linear_model.errors2 %>%
filter(region != "The frigid northlands") %>%
melt(id.vars = c("district_code", "name_english", "year", "incumbent", "province", "region"),
variable.name = "party", value.name = "error") %>%
ggplot(aes(x = error, fill = party)) +
facet_wrap(~region) +
geom_histogram(col = "black", binwidth = 0.02) +
scale_fill_manual(name = "Party", values = quebec_colors[1:5], labels = quebec_parties[1:5]) +
labs(title = "Distribution of errors by party and region",
subtitle = "Linear regression model",
x = "Error", y = "Observations")
ggsave(filename = "Output/Model graphs/linear_model_errors_region2.png", width = 20, height = 12)
# Party, by year
linear_model.errors2 %>%
melt(id.vars = c("district_code", "name_english", "year", "incumbent", "province", "region"),
variable.name = "party", value.name = "error") %>%
ggplot(aes(x = error, fill = party)) +
facet_wrap(~year) +
geom_histogram(col = "black", binwidth = 0.02) +
scale_fill_manual(name = "Party", values = quebec_colors[1:5], labels = quebec_parties[1:5]) +
labs(title = "Distribution of errors by party and region",
subtitle = "Linear regression model",
x = "Error", y = "Observations")
ggsave(filename = "Output/Model graphs/linear_model_errors_year2.png", width = 20, height = 7)
## Use AIC and BIC for model selection
#### Liberals ####
model_LPC.initial <- lm(LPC~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Quebec+LPC_nation+
LPC_region+LPC_nation_lag+LPC_region_lag+CPC_nation+CPC_region+sex_female+age_65+educ_university+minority+
pop_growth_rate,
data = results)
## AIC
LPC.stepAIC <- step(model_LPC.initial, scope = ~., direction = "both")
## BIC
LPC.stepBIC <- step(model_LPC.initial, scope = ~., direction = "both", k = log(n))
#### Conservatives ####
model_CPC.initial <- lm(CPC~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Quebec+CPC_nation+
CPC_region+CPC_nation_lag+CPC_region_lag+LPC_nation+LPC_region+LPC_region_lag+sex_female+age_65+educ_university+minority+
pop_growth_rate,
data = results)
## AIC
CPC.stepAIC <- step(model_CPC.initial, scope = ~., direction = "both")
## BIC
CPC.stepBIC <- step(model_CPC.initial, scope = ~., direction = "both", k = log(n))
#### NDP ####
model_NDP.initial <- lm(NDP~incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Quebec+NDP_nation+
NDP_region+NDP_lag+NDP_nation_lag+NDP_region+LPC_nation_lag+LPC_region_lag+CPC_nation+CPC_region+sex_female+age_65+
educ_university+minority+pop_growth_rate,
data = results)
## AIC
NDP.stepAIC <- step(model_NDP.initial, scope = ~., direction = "both")
## BIC
NDP.stepBIC <- step(model_NDP.initial, scope = ~., direction = "both", k = log(n))
#### Green ####
model_Green.initial <- lm(Green~incumbent_Green+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Quebec+Green_nation+Green_region+Green_lag+Green_nation_lag+
Green_region_lag+LPC_nation+LPC_region+CPC_nation+CPC_region+sex_female+age_65+educ_university+minority+pop_growth_rate,
data = results)
## AIC
Green.stepAIC <- step(model_Green.initial, scope = ~., direction = "both")
## BIC
Green.stepBIC <- step(model_Green.initial, scope = ~., direction = "both", k = log(n))
#### Bloc ####
model_Bloc.initial <- lm(Bloc~Quebec*(incumbent_LPC+incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Bloc_nation+
Bloc_region+Bloc_lag+Bloc_nation_lag+Bloc_region+LPC_nation_lag+LPC_region_lag+CPC_nation+CPC_region+sex_female+age_65+
educ_university+minority+pop_growth_rate),
data = results)
## AIC
Bloc.stepAIC <- step(model_Bloc.initial, scope = ~., direction = "both")
## BIC
Bloc.stepBIC <- step(model_Bloc.initial, scope = ~., direction = "both", k = log(n))
#### Selected models ####
n <- nrow(results)
LPC_error.AIC <- LPC_error.BIC <- CPC_error.AIC <- CPC_error.BIC <- NDP_error.AIC <- Green_error.AIC <- Green_error.BIC <- Bloc_error.AIC <-
Bloc_error.BIC <- rep(NA, n)
for(i in 1:n) {
cat("District ", results$district_code[i], ": ", results$name_english[i], " (", results$year[i], ")\n", sep = "")
## Train/test split
train <- results[-i,]
test <- results[i,]
## Models (AIC and BIC agree on the NDP model)
LPC_model_cv.AIC <- lm(LPC~incumbent_LPC+incumbent_NDP+LPC_lag+CPC_lag+NDP_lag+Bloc_lag+LPC_nation+LPC_nation_lag+LPC_region+LPC_region_lag+
educ_university+minority+pop_growth_rate, data = train)
LPC_model_cv.BIC <- lm(LPC~incumbent_LPC+LPC_lag+CPC_lag+NDP_lag+Bloc_lag+LPC_nation+LPC_nation_lag+LPC_region+LPC_region_lag+minority,
data = train)
CPC_model_cv.AIC <- lm(CPC~incumbent_CPC+incumbent_NDP+incumbent_Bloc+LPC_lag+CPC_lag+NDP_lag+Green_lag+CPC_lag+CPC_region+CPC_region_lag+
CPC_nation+CPC_nation_lag+Quebec+educ_university, data = train)
CPC_model_cv.BIC <- lm(CPC~incumbent_CPC+LPC_lag+CPC_lag+NDP_lag+Bloc_lag+CPC_nation+CPC_nation_lag+CPC_region+CPC_region_lag+Quebec+educ_university,
data = train)
NDP_model_cv.AIC <- lm(NDP~incumbent_LPC+incumbent_CPC+incumbent_NDP+NDP_lag+Quebec+NDP_nation+NDP_region+NDP_nation_lag+NDP_region_lag+
LPC_region_lag+CPC_region+age_65, data = train)
Green_model_cv.AIC <- lm(Green~incumbent_Green+LPC_lag+CPC_lag+NDP_lag+Green_lag+Bloc_lag+Green_region+Green_region_lag+Green_nation_lag+
age_65+educ_university+minority, data = train)
Green_model_cv.BIC <- lm(Green~Green_lag+Green_region+Green_region_lag+educ_university+minority, data = train)
Bloc_model_cv.AIC <- lm(Bloc~Quebec+Quebec:(incumbent_LPC+incumbent_CPC+incumbent_NDP+LPC_lag+CPC_lag+NDP_lag+Bloc_nation+educ_university+age_65)+
Bloc_lag+Bloc_region, data = train)
Bloc_model_cv.BIC <- lm(Bloc~Quebec+Quebec:(NDP_lag+Bloc_nation+educ_university)+Bloc_lag+Bloc_region, data = train)
## LOOCV
LPC_error.AIC[i] <- predict(LPC_model_cv.AIC, data = test) - test$LPC
LPC_error.BIC[i] <- predict(LPC_model_cv.BIC, data = test) - test$LPC
CPC_error.AIC[i] <- predict(CPC_model_cv.AIC, data = test) - test$CPC
CPC_error.BIC[i] <- predict(CPC_model_cv.BIC, data = test) - test$CPC
NDP_error.AIC[i] <- predict(NDP_model_cv.AIC, data = test) - test$NDP
Green_error.AIC[i] <- predict(Green_model_cv.AIC, data = test) - test$Green
Green_error.BIC[i] <- predict(Green_model_cv.BIC, data = test) - test$Green
Bloc_error.AIC[i] <- predict(Bloc_model_cv.AIC, data = test) - test$Bloc
Bloc_error.BIC[i] <- predict(Bloc_model_cv.BIC, data = test) - test$Bloc
}
#### RMSE ####
## Liberal
sqrt(mean(LPC_error.AIC^2))
sqrt(mean(LPC_error.BIC^2))
## Conservative
sqrt(mean(CPC_error.AIC^2))
sqrt(mean(CPC_error.BIC^2))
## NDP
sqrt(mean(NDP_error.AIC^2))
## Green
sqrt(mean(Green_error.AIC^2))
sqrt(mean(Green_error.BIC^2))
## Bloc
sqrt(mean(Bloc_error.AIC^2))
sqrt(mean(Bloc_error.BIC^2))
## THIS IS A DISASTER AND I HAVE NO IDEA WHY AHHHHHH
|
fc5cdb66d6d87212d459d543be4a796be6e38414 | d3626380f7447b8bae9a0b344c64854cb461e6ea | /R/calc-agreement-mat.R | 237f75fcc22728a89f97cb950b885501a3fe519a | [
"MIT"
] | permissive | emilelatour/lagree | cb05207da2f284457d80b0474957a756d13e648f | ccfda37473ecb9cdd6de8320086ec63b509f0b39 | refs/heads/master | 2020-07-14T02:12:24.995797 | 2019-08-30T17:48:43 | 2019-08-30T17:48:43 | 205,210,029 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,144 | r | calc-agreement-mat.R | #' @title
#' Calculate an agreement matrix
#'
#' @description
#' Create an n x q agreement matrix representing the distribution of raters by
#' subjects (n) and category (q)
#'
#' @param data A tbld_df or data.frame
#' @param ... Columns (unquoted) with the "ratings" done by the raters. Follows
#' the argument methodology from `dplyr::select`.
#' @param subject_id Optional. Name of the column (unquoted) that contains the
#' IDs for the subjects or units being rated.
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr select
#' @importFrom dplyr summarise
#' @importFrom purrr discard
#' @importFrom rlang enquo
#' @importFrom rlang quo_is_null
#' @importFrom tidyr gather
#'
#' @references
#' 2014. Handbook of Inter-Rater Reliability: The Definitive Guide to Measuring
#' the Extent of Agreement Among Raters. 4th ed. Gaithersburg, MD: Advanced
#' Analytics.
#'
#' @return An n x q table
#' @export
#'
#' @examples
#' # See Gwet page 72
#' # Classification of 12 subjects by 4 raters into 5 categories
#'
#' library(dplyr)
#' library(tibble)
#'
#' table_215 <- tibble::tribble(
#' ~unit, ~rater_1, ~rater_2, ~rater_3, ~rater_4,
#' 1, "a", "a", NA, "a",
#' 2, "b", "b", "c", "b",
#' 3, "c", "c", "c", "c",
#' 4, "c", "c", "c", "c",
#' 5, "b", "b", "b", "b",
#' 6, "a", "b", "c", "d",
#' 7, "d", "d", "d", "d",
#' 8, "a", "a", "b", "a",
#' 9, "b", "b", "b", "b",
#' 10, NA, "e", "e", "e",
#' 11, NA, NA, "a", "a",
#' 12, NA, NA, "c", NA
#' )
#'
#' calc_agree_mat(data = table_215,
#' dplyr::starts_with("rater"))
#' calc_agree_mat(data = table_215,
#' dplyr::starts_with("rater"),
#' subject_id = "unit")
#'
calc_agree_mat <- function(data, ...,
subject_id = NULL) {
subject_id <- rlang::enquo(subject_id)
## Creating a vector the categories used by the raters ----------------
# If the raw data are factors, then this will get the unique factor levels
fac_lvls <- data %>%
dplyr::select(...) %>%
unlist() %>%
unique() %>%
levels()
# Else if there are not factor levels then this will get the unique values
if (is.null(fac_lvls)) {
fac_lvls <- data %>%
dplyr::select(...) %>%
unlist() %>%
unique() %>%
purrr::discard(., is.na)
}
## Calculate number of raters, subjects, and categories ----------------
summary_counts <- data %>%
dplyr::select(...) %>%
summarise(k_raters = ncol(.),
n_subjects = nrow(.),
q_categories = length(fac_lvls))
if (rlang::quo_is_null(subject_id)) {
agree_mat <- data %>%
dplyr::select(...) %>%
mutate(subject = dplyr::row_number(),
subject = factor(subject)) %>%
tidyr::gather(.,
key = "raters",
value = "ratings",
- subject,
factor_key = TRUE) %>%
mutate(ratings = factor(ratings,
levels = fac_lvls)) %>%
with(., table(subject, ratings))
} else if (!rlang::quo_is_null(subject_id)) {
agree_mat <- data %>%
dplyr::select(!! subject_id, ...) %>%
tidyr::gather(.,
key = "raters",
value = "ratings",
...,
factor_key = TRUE) %>%
mutate(ratings = factor(ratings,
levels = fac_lvls)) %>%
dplyr::select(!! subject_id, ratings) %>%
table(.)
}
return(agree_mat)
}
# #### Gwet's version --------------------------------
#
#
# # ==============================================================
# # trim(x): This is an r function for trimming leading and trealing blanks
# # ==============================================================
# trim <- function( x ) {
# gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
# }
#
# calc_agree_mat_gwet <- function(data, ...,
# subject_id = NULL) {
#
# ratings.mat <- data %>%
# dplyr::select(...) %>%
# as.matrix(.)
#
# if (is.character(ratings.mat)) {
# ratings.mat <- trim(toupper(ratings.mat))
# ratings.mat[ratings.mat == ''] <- NA_character_
# }
#
# n <- nrow(ratings.mat) # number of subjects
# r <- ncol(ratings.mat) # number of raters
# # f <- n / N # finite population correction
#
# # creating a vector containing all categories used by the raters
#
# categ.init <- unique(na.omit(as.vector(ratings.mat)))
# categ <- sort(categ.init)
# q <- length(categ)
#
# agree.mat <- matrix(0, nrow = n, ncol = q)
#
# for (k in 1:q) {
# categ.is.k <- (ratings.mat == categ[k])
# agree.mat[, k] <-
# (replace(categ.is.k, is.na(categ.is.k), FALSE)) %*% rep(1, r)
# }
#
# return(agree.mat)
#
#
# }
|
561393e598e10d0d2c4e269f0e4bce158d805845 | 1b158221ddb10742a54e2224cb741d3f8285ff29 | /codiceR/confrontoBias.R | bd14672d92212200e4c72a54fb0bbcdd4fbc1cc3 | [] | no_license | tega/grafici-stat2 | e971f1aad78c4be2ae3dfb7494362a743b57cb5d | 6aacffaba9f1ab86b69f7c4fbcd4762f0a1d81ed | refs/heads/master | 2021-12-14T08:51:25.092775 | 2021-12-03T15:06:21 | 2021-12-03T15:06:21 | 871,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 636 | r | confrontoBias.R | # TODO: Add comment
#
# Author: ortellic
###############################################################################
# TODO: Add comment
#
# Author: ortellic
###############################################################################
pdf(file="grafici/confrontoBias.pdf")
f <- function(x) {
1/27+4/9*x^2
}
x <- seq(-2,2,length.out = 100)
y1 <- f(x)
y2 <- rep(1/3,100)
plot(x,y1,type="l",lty=3,las=1,
xlab=expression(paste("Vero valore di", ~ ~mu)),ylab="")
lines(x,y2)
text(-1.5,1.5,expression(EQM[hat(mu)[5]]),cex = 1.2)
text(1.7,0.4,expression(EQM[hat(mu)[2]]),cex = 1.2)
dev.off()
|
eb65378309ebba8911582874ffa6aba968516282 | b5338c02bd9bf11af2b61576eed473ad472ecd55 | /man/translate_old_english.Rd | 1bda898d1d12113c48e081417c1cb4b549fd3f3d | [
"MIT"
] | permissive | rsquaredacademy/funtranslater | 4eddd216f39c5a9f109f2f778c18ac77dded24cc | 149f3eb5d2607134693ab033b338e5ad7030d620 | refs/heads/master | 2023-08-30T17:01:58.605113 | 2018-11-10T07:17:39 | 2018-11-10T07:17:39 | 156,668,183 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 399 | rd | translate_old_english.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/english.R
\name{translate_old_english}
\alias{translate_old_english}
\title{Old English translator}
\usage{
translate_old_english(text)
}
\arguments{
\item{text}{Text to be converted.}
}
\description{
Convert from modern English to old English.
}
\examples{
\dontrun{
translate_old_english("What nonsense is this?")
}
}
|
a17a7d41d3281aa8df6edd0b09e181669dd1d986 | 0d82a708ed6247ef43f7af9bd346544243fb83f5 | /maps.R | ee166523912f5c3bf9cb54362191c4242beee283 | [] | no_license | mattmikem/flp | 94488c763518eb84b2c23e6545204f95bb9bb528 | be5bcc7d182e98d9a87db496b86c04d7f26a6498 | refs/heads/master | 2021-01-18T18:32:41.794081 | 2016-09-13T18:10:43 | 2016-09-13T18:10:43 | 61,953,985 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,766 | r | maps.R | ####################################################################
# Employment Density Maps for HSI #
# M. Miller, UCLA, 16X #
####################################################################
#Packages requires ggmap, ggplot2
#install.packages("ggmap")
#install.packages("ggplot2")
#install.packages("doBy")
#install.packages("foreign")
#install.packages("maptools")
#install.packages("raster")
#install.packages("rgdal")
#install.packages("rgeos")
#install.packages("dplyr")
install.packages("gpclib", type = "source")
library(ggmap)
library(ggplot2)
library(raster)
library(rgeos)
library(maptools)
library(rgdal)
library(foreign)
library(dplyr)
setwd("C:/Users/Matthew/Dropbox/Research/Urban/Papers/Heat Eq and Urban Structure/Data")
shp <- "C:/Users/Matthew/Desktop/ECON/Research/Urban/Papers/City Center Resurgence/GIS/Working Files/Output"
dta <- "C:/Users/Matthew/Dropbox/Research/Urban/Papers/Delayed Marriage/Draft"
#Load in shapefiles
test <- readOGR(shp,"ua_lehd_acs_0")
#NY example of gentrification and gender plot
test.ny <- fortify(test, region = "GEOID")
ny_map <- get_map(location = "New York, NY", zoom=12)
ncdb_path <- paste(dta, "/ncdb_fortableau.csv", sep = "")
data <- read.csv(ncdb_path, stringsAsFactors = FALSE)
test.ny$id <- as.numeric(test.ny$id)
data$id <- data$geo2010
MapData <- left_join(data, test.ny)
MD.2010 <- MapData[MapData$year == 2010,]
MD.2010 <- MD.2010[MD.2010$mf_rat < 10,]
ggplot() + geom_polygon(data = MD.2010, aes(x=long,y=lat,group=group, fill = MD.2010$mf_rat))
persp(test$INTPTLON, test$INTPTLAT, test$EMP_DENS)
#Load in employment LEHD data (Worker Area Characteristics - WAC)
wac <- read.dta("wac_trct_12.dta")
|
2270bd5671fa7a0695cfbc0cab65882c6fcc382f | 909d765c7075ffdec2085e9ea8c74780689cda59 | /naive_bayes/naive_bayes.R | 4fd2595e676db9ec11fd240a830aab353f366feb | [] | no_license | tianqinglong/dmc2019 | 3b76515f935f74c02049ed82c659d6ce7dd8d1da | 38987bac2e8ffdbe487e405db6809a30ac141c29 | refs/heads/master | 2020-05-05T05:11:02.552011 | 2020-03-15T04:05:04 | 2020-03-15T04:05:04 | 179,742,094 | 4 | 3 | null | 2019-04-12T18:40:38 | 2019-04-05T19:23:55 | R | UTF-8 | R | false | false | 1,571 | r | naive_bayes.R | library(tidyverse)
library(caret)
library(h2o)
train <- read.csv("train.csv", sep = "|")
train %>% mutate(
fraud = as.factor(fraud),
totalItems = totalScanTimeInSeconds * scannedLineItemsPerSecond
) -> train
h2o.no_progress()
h2o.init()
train.h2o <- train %>% as.h2o()
y <- "fraud"
x <- setdiff(names(train), y)
nb.h2o <- h2o.naiveBayes(
x = x,
y = y,
training_frame = train.h2o,
nfolds = 10,
laplace = 0
)
h2o.confusionMatrix(nb.h2o)
preprocess <- preProcess(train, method = c("BoxCox",
"center",
"scale"))
train_pp <- predict(preprocess, train)
train_pp.h2o <-
train_pp %>%
as.h2o()
y <- "fraud"
x <- setdiff(names(train), y)
hyper_params <- list(
laplace = seq(0, 5, by = 0.5)
)
grid <- h2o.grid(
algorithm = "naivebayes",
grid_id = "nb_grid",
x = x,
y = y,
training_frame = train_pp.h2o,
nfolds = 10,
hyper_params = hyper_params
)
sorted_grid <- h2o.getGrid("nb_grid", sort_by = "accuracy", decreasing = TRUE)
sorted_grid
best_h2o_model <- sorted_grid@model_ids[[1]]
best_model <- h2o.getModel(best_h2o_model)
h2o.confusionMatrix(best_model)
# auc <- h2o.auc(best_model, xval = TRUE)
# fpr <- h2o.performance(best_model, xval = TRUE) %>% h2o.fpr() %>% .[['fpr']]
# tpr <- h2o.performance(best_model, xval = TRUE) %>% h2o.tpr() %>% .[['tpr']]
# data.frame(fpr = fpr, tpr = tpr) %>%
# ggplot(aes(fpr, tpr) ) +
# geom_line() +
# ggtitle( sprintf('AUC: %f', auc) ) |
9999d5803655687421e675b3899a7b6cc4e6d266 | a2a0f6efe0c0c9977fe8cef230d43c7d3592139d | /server.R | 48af19d0f3a0c673f0bd2f87d3bd2592beb0a1a4 | [] | no_license | advcloud/retail | 2f500d5915199b99798634c9372cb84742ea199c | 5e997264c6a374f9071f2cd1e88e40e2a395489c | refs/heads/master | 2016-08-12T07:43:06.861491 | 2016-02-18T04:08:47 | 2016-02-18T04:08:47 | 51,977,704 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,006 | r | server.R | # title : 2015資料科學與產業應用工作坊-互動式分析平台工作坊
# date : 2015.6.5
# file : server.R
# author : Ming-Chang Lee (李明昌)
# email : alan9956@gmail.com
# RWEPA : http://rwepa.blogspot.tw/
# encoding: UTF-8
suppressPackageStartupMessages(library(googleVis))
options(shiny.maxRequestSize=4000*1024^2) # max filesize=4GB
options(java.parameters = "-Xmx8192m")
require(XLConnect)
require(DT)
require(googleVis)
progShow <- function(showLabels="資料處理 ... ") {
withProgress(message = showLabels, value = 0, {
n <- 100
for (i in 1:n) {
incProgress(1/n, detail = paste(i, "%", sep=""))
Sys.sleep(0.05) # 暫停 0.05 秒
}
})
}
#
# if (length(dir("data")) != 0) {
#
# selectfile <- paste(getwd(), "retail/data/breakfasts_s.RData", sep="")
# load(selectfile)
# }
shinyServer(function(input, output) {
# 檔案匯入
output$text1 <- renderText({
inFile <- input$file1
if (is.null(inFile))
return("請選取檔案")
if (is.null(inFile) == FALSE) {
fileMain <- unlist(strsplit(inFile$name, "[.]"))[[1]]
fileExt <- tolower(unlist(strsplit(inFile$name, "[.]"))[[2]])
if (fileExt == "xlsx" | fileExt == "xls") {
progShow("資料載入 ... ")
excelname <- loadWorkbook(inFile$datapath, create=TRUE)
storeSheet <- "dhStoreLookup"
progShow("商店資料處理 ... ")
storeData <- readWorksheet(excelname, sheet=storeSheet)
productSheet <- "dhProductsLookup"
progShow("產品資料處理 ... ")
productData <- readWorksheet(excelname, sheet=productSheet)
transSheet <- "dhTransactionData"
progShow("交易資料處理 ... ")
transData <- readWorksheet(excelname, sheet=transSheet)
transData$WEEK_END_DATE <- as.Date(transData$WEEK_END_DATE)
}
filename <- paste("data/", fileMain, ".RData", sep="")
progShow("資料儲存 ... ")
save(storeData, productData, transData, file=filename)
message_label <- paste(inFile$name, " 檔案上傳完畢,請選[資料檢視]!", sep="")
return(message_label)}
})
output$dataMessage <- renderText({
if (length(dir("data")) == 0) {
return("請選取[檔案匯入] !!!")
}
else {
return("檔案已匯入完成!")
}
})
# 分店資料檢視
output$mytable1 <- DT::renderDataTable({
if (length(dir("data")) == 0) {
return(NULL)
}
else {
# progShow("資料載入 ... ")
files <- dir("data", pattern="\\.RData", full.names=TRUE)[1]
load(files)
}
if (input$store.duplicate == TRUE) {
storeData <- storeData[!duplicated(storeData$STORE_ID),]
names(transData)[2] <- "STORE_ID"
transData <- merge(transData, storeData[,c(1,2,4)], by="STORE_ID", all.x=TRUE)
transData <- transData[c(1,13:14,2:12)]
progShow("分店,交易合併資料更新中 ... ")
save(storeData, productData, transData, file=files)
}
datatable(storeData,
options=list(pageLength=10,
searching=FALSE))
})
# 分店重覆資料檢視
output$mytable1.duplicate <- DT::renderDataTable({
if (length(dir("data")) == 0) {
return(NULL)
}
else {
files <- dir("data", pattern="\\.RData", full.names=TRUE)[1]
load(files)
}
storeData <- storeData[order(storeData$STORE_ID), ]
# verify duplicated(storeData$STORE_ID)
duplicated.Store_ID <- storeData$STORE_ID[duplicated(storeData$STORE_ID) == TRUE]
datatable(storeData[storeData$STORE_ID %in% duplicated.Store_ID,],
options=list(pageLength=10,
searching=FALSE))
})
# 交易資料檢視
output$mytable2 <- DT::renderDataTable({
if (length(dir("data")) == 0) {
return(NULL)
}
else {
files <- dir("data", pattern="\\.RData", full.names=TRUE)[1]
load(files)
}
mindate <- input$dates[1]
maxdate <- input$dates[2]
if (input$stateName == "ALL") {
selectdata <- transData[which(transData$WEEK_END_DATE >= mindate &
transData$WEEK_END_DATE <= maxdate),]
} else {
selectdata <- transData[which(transData$WEEK_END_DATE >= mindate &
transData$WEEK_END_DATE <= maxdate &
transData$ADDRESS_STATE_PROV_CODE == input$stateName),]
}
datatable(selectdata,
options=list(pageLength=10,
searching=FALSE))
})
# 視覺化圖表
output$view1 <- renderGvis({
if (length(dir("data")) == 0) {
return(NULL)
}
else {
files <- dir("data", pattern="\\.RData", full.names=TRUE)[1]
load(files)
}
agg.df <- aggregate(transData[,"SPEND"],
by=list(transData$ADDRESS_STATE_PROV_CODE),
FUN=sum)
names(agg.df) <- c("StateName", "SUM")
maxvalue <- max(agg.df[,2])*1.05
if (input$visRadio == "gauge") {
gvisGauge(agg.df, options=list(min=0, max=maxvalue,
greenFrom=maxvalue/2, greenTo=maxvalue,
yellowFrom=maxvalue/3, yellowTo=maxvalue/2,
redFrom=0, redTo=maxvalue/3))
} else {
gvisColumnChart(agg.df, xvar="StateName", yvar=c("SUM"))
}
})
# 時間推移圖
output$view2 <- renderGvis({
if (length(dir("data")) == 0) {
return(NULL)
}
else {
files <- dir("data", pattern="\\.RData", full.names=TRUE)[1]
load(files)
}
state.IN <- transData[transData$ADDRESS_STATE_PROV_CODE == "IN", c("WEEK_END_DATE","SPEND")]
state.KY <- transData[transData$ADDRESS_STATE_PROV_CODE == "KY", c("WEEK_END_DATE","SPEND")]
state.OH <- transData[transData$ADDRESS_STATE_PROV_CODE == "OH", c("WEEK_END_DATE","SPEND")]
state.TX <- transData[transData$ADDRESS_STATE_PROV_CODE == "TX", c("WEEK_END_DATE","SPEND")]
state.IN <- state.IN[order(state.IN$WEEK_END_DATE),]
state.KY <- state.IN[order(state.IN$WEEK_END_DATE),]
state.OH <- state.IN[order(state.IN$WEEK_END_DATE),]
state.TX <- state.IN[order(state.IN$WEEK_END_DATE),]
if (input$stateSelect == "KY") {
selectData <- state.KY
} else if (input$stateSelect == "OH") {
selectData <- state.OH
} else if (input$stateSelect == "TX") {
selectData <- state.TX
} else {
selectData <- state.IN
}
gvisLineChart(selectData, xvar="WEEK_END_DATE", yvar=c("SPEND"))
})
# 資料摘要
output$summary <- renderPrint({
if (length(dir("data")) == 0) {
return(NULL)
}
else {
files <- dir("data", pattern="\\.RData", full.names=TRUE)[1]
load(files)
}
summary(transData)
})
}) |
16eaff4ecca101a4cfd423bf6bead5f6cd9230b2 | 19cc2a3aed2c01401a942294847315bd902aa5b9 | /MLR.R | 63c1df679e865d40e6e5f4fbf37ae2f6a5f99e5a | [] | no_license | sagarhowal/thesis0 | 0648e8d9e7f15b69216166cb91341045fbf0219f | b9639179dbf608223eb82bdaaa192024f507b9a7 | refs/heads/master | 2020-04-14T15:16:05.367215 | 2019-01-27T03:14:52 | 2019-01-27T03:14:52 | 163,920,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,989 | r | MLR.R | setwd("/Users/sagarhowal/Code/Thesis/CER Electricity Revised March 2012")
library(data.table)
library(lubridate)
library(forecast)
library(ggplot2)
#Load Data
load("RnD_Clean.RData")
#cleanup if needed [after loading]
rm(DT1491, DT1491New, DT1951, DT1951New, DT1951New2, NArows, rez_full)
rm(corrected_date, DateOrig, rezidences, ts3)
#-----------------New--------------------------------
DT_agg <- as.data.table(aggregate(DT[, .(VALUE)], by = DT[, .(TIME)],
FUN = sum, simplify = TRUE))
DT_agg$DATETIME <- rep(ts)
DT_agg <- subset(DT_agg, DT_agg$DATETIME >= "2010-01-01 00:00:00" & DT_agg$DATETIME <= "2010-12-31 23:30:00" )
ggplot(data = DT_agg, aes(x = DATETIME, y = VALUE)) +
geom_line() +
#facet_grid(type ~ ., scales = "free_y") +
theme(panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.minor = element_line(colour = "grey90"),
panel.grid.major = element_line(colour = "grey90"),
panel.grid.major.x = element_line(colour = "grey90"),
axis.text = element_text(size = 10),
axis.title = element_text(size = 12, face = "bold"),
strip.text = element_text(size = 9, face = "bold")) +
labs(x = "Date", y = "Load (kW)")
#Analysis
DT_agg[, week:= weekdays(DATETIME)]
DT_agg[, week_num := as.integer(as.factor(DT_agg[, week]))]
DT_agg[, type := rep("Residential")]
DT_agg[, date := date(DATETIME)]
n_type <- unique(DT_agg[, type])
n_date <- unique(DT_agg[, date])
n_weekdays <- unique(DT_agg[, week])
period <- 48
data_r <- DT_agg[(type == n_type[1] & date %in% n_date[57:70])]
ggplot(data_r, aes(DATETIME, VALUE)) +
geom_line() +
theme(panel.border = element_blank(),
panel.background = element_blank(),
panel.grid.minor = element_line(colour = "grey90"),
panel.grid.major = element_line(colour = "grey90"),
panel.grid.major.x = element_line(colour = "grey90"),
axis.text = element_text(size = 10),
axis.title = element_text(size = 12, face = "bold")) +
labs(x = "Date", y = "Load (kW)")
N <- nrow(data_r)
window <- N / period # number of days in the train set
# 1, ..., period, 1, ..., period - and so on for the daily season
# using feature "week_num" for the weekly season
matrix_train <- data.table(Load = data_r[, VALUE],
Daily = as.factor(rep(1:period, window)),
Weekly = as.factor(data_r[, week_num]))
#Model [Linear Model]
lm_m_1 <- lm(Load ~ 0 + ., data = matrix_train)
smmr_1 <- summary(lm_m_1)
paste("R-squared: ",
round(smmr_1$r.squared, 3),
", p-value of F test: ",
1-pf(smmr_1$fstatistic[1], smmr_1$fstatistic[2], smmr_1$fstatistic[3]))
datas <- rbindlist(list(data_r[, .(VALUE, DATETIME)],
data.table(VALUE = lm_m_1$fitted.values, data_time = data_r[, DATETIME])))
datas[, type := rep(c("Real", "Fitted"), each = nrow(data_r))]
ggplot(data = datas, aes(DATETIME, VALUE, group = type, colour = type)) +
geom_line(size = 0.8) +
theme_bw() +
labs(x = "Time", y = "Load (kW)",
title = "Fit from MLR")
#Plot the Residuals
ggplot(data = data.table(Fitted_values = lm_m_1$fitted.values,
Residuals = lm_m_1$residuals),
aes(Fitted_values, Residuals)) +
geom_point(size = 1.7) +
geom_smooth() +
geom_hline(yintercept = 0, color = "red", size = 1) +
labs(title = "Fitted values vs Residuals")
#QQ-Plot
ggQQ <- function(lm){
# extract standardized residuals from the fit
d <- data.frame(std.resid = rstandard(lm))
# calculate 1Q/4Q line
y <- quantile(d$std.resid[!is.na(d$std.resid)], c(0.25, 0.75))
x <- qnorm(c(0.25, 0.75))
slope <- diff(y)/diff(x)
int <- y[1L] - slope * x[1L]
p <- ggplot(data = d, aes(sample = std.resid)) +
stat_qq(shape = 1, size = 3) + # open circles
labs(title = "Normal Q-Q", # plot title
x = "Theoretical Quantiles", # x-axis label
y = "Standardized Residuals") + # y-axis label
geom_abline(slope = slope, intercept = int, linetype = "dashed",
size = 1, col = "firebrick1") # dashed reference line
return(p)
}
#Plot
ggQQ(lm_m_1) #Not a good fit.
#Model 2 [with interactions]
lm_m_2 <- lm(Load ~ 0 + Daily + Weekly + Daily:Weekly,
data = matrix_train)
c(Previous = summary(lm_m_1)$r.squared, New = summary(lm_m_2)$r.squared)
# Previous New
# 0.9973367 0.9995551
#Naaaaaayce!
datas <- rbindlist(list(data_r[, .(VALUE, DATETIME)],
data.table(VALUE = lm_m_2$fitted.values, data_time = data_r[, DATETIME])))
datas[, type := rep(c("Real", "Fitted"), each = nrow(data_r))]
#Plot Fitted vs Real for Model 2
ggplot(data = datas, aes(DATETIME, VALUE, group = type, colour = type)) +
geom_line(size = 0.8) +
theme_bw() +
labs(x = "Time", y = "Load (kW)",
title = "Fit from MLR")
#Plot Resuduals for Model 2
ggplot(data = data.table(Fitted_values = lm_m_2$fitted.values,
Residuals = lm_m_2$residuals),
aes(Fitted_values, Residuals)) +
geom_point(size = 1.7) +
geom_hline(yintercept = 0, color = "red", size = 1) +
labs(title = "Fitted values vs Residuals")
#QQ-Plot for Model 2
ggQQ(lm_m_2)
#Create a function for predicting next week using last two weeks training data
predWeekReg <- function(data, set_of_date){
# Subsetting the dataset by dates
data_train <- data[date %in% set_of_date]
N <- nrow(data_train)
window <- N / period # number of days in the train set
# 1, ..., period, 1, ..., period - and so on for the daily season
# Using feature "week_num" for the weekly season
matrix_train <- data.table(Load = data_train[, VALUE],
Daily = as.factor(rep(1:period, window)),
Weekly = as.factor(data_train[, week_num]))
# Creation of the model
lm_m <- lm(Load ~ 0 + Daily + Weekly + Daily:Weekly, data = matrix_train)
# Creation of the forecast for one week ahead
pred_week <- predict(lm_m, matrix_train[1:(7*period), -1, with = FALSE])
return(as.vector(pred_week))
}
#Define MAPE
mape <- function(real, pred){
return(100 * mean(abs((real - pred)/real)))
}
#Forecasting the whole year with 2 weeks training and 1 week prediction.
n_weeks <- floor(length(n_date)/7) - 2
# Forecasts
lm_pred_weeks_1 <- sapply(0:(n_weeks-1), function(i)
predWeekReg(DT_agg[type == n_type[1]], n_date[((i*7)+1):((i*7)+7*2)]))
# Evaluation (computation of errors)
lm_err_mape_1 <- sapply(0:(n_weeks-1), function(i)
mape(DT_agg[(type == n_type[1] & date %in% n_date[(15+(i*7)):(21+(i*7))]), VALUE],
lm_pred_weeks_1[, i+1]))
#Plot Distribution of MAPE
plot(density(lm_err_mape_1))
summary(lm_err_mape_1)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1.884 2.482 3.518 4.537 5.158 12.810
#Save Preprocessed Dataframe for fast execution Later
write_feather(DT_agg, "DT_agg.feather")
|
fbf3206bd216932f87ed6678199a53e5e5e95037 | bd94861ff5caafe67200c1cd99f94b98d5a70557 | /R/glm.hermite.R | 5067427ce3a1103e026c30a224cc655d0f1da0a0 | [] | no_license | cran/hermite | d0a8f5a75f95b5d1440acf1178369bfd4a1399f5 | 5ab6bb8afafb34202124d13af7ec234b8e9f7651 | refs/heads/master | 2021-01-10T19:42:00.240786 | 2018-05-17T12:33:53 | 2018-05-17T12:33:53 | 26,024,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,069 | r | glm.hermite.R | glm.hermite <- function (formula, data, link = "log", start = NULL, m = NULL)
{
Call <- match.call()
m.fixed <- FALSE
intercept <- FALSE
if (!is.null(m))
{
m.fixed <- TRUE
if (floor(m) != m ) {
stop("improper m parameter specification")
}
}
if (missing(data))
data <- environment(formula)
mf <- match.call()
mm <- match(c("formula", "data"), names(mf), 0)
mf <- mf[c(1, mm)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
X <- model.matrix(mt, mf)
Y <- model.response(mf, "numeric")
if (length(dim(Y)) == 1L) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt))
model.matrix(mt, mf, contrasts)
Xnames <- dimnames(X)[[2L]]
kx <- NCOL(X)
if (all(X[, 1] == 1))
intercept <- TRUE
maxY <- max(Y)
n <- length(Y)
linkstr <- link
linkobj <- make.link(linkstr)
linkinv <- linkobj$linkinv
prob <- function(y, mu, d) {
ny <- length(mu)
maxy <- max(m, max(y), 2)
p <- matrix(NA, ny, maxy + 1)
p[, 1] <- exp(mu * (-1 + (d - 1)/m))
p[, 2:m] <- vapply(1:(m - 1), function(k) p[, 1] * ((mu^k)/(factorial(k))) *
(((m - d)/(m - 1))^k), rep(1, ny))
if(m <= maxy)
{
for (i in m:maxy) {
p[, i + 1] <- mu * (p[, i - m + 1] * (d - 1) + p[, i] * (m - d))/(i * (m - 1))
}
}
IJ <- as.matrix(cbind(row = 1:ny, col = y + 1))
p[IJ] <- ifelse(p[IJ] < 0, 0, p[IJ])
return(p[IJ])
}
loglik <- function(parms) {
mu <- as.vector(linkinv(X %*% parms[1:kx]))
d <- parms[kx + 1]
if (d < 1) d <- 1.1
loglikh <- sum(log(prob(Y, mu, d))[is.finite(log(prob(Y, mu, d)))])
loglikh
}
mloglik <- function(parms) {
-1*loglik(parms)
}
m.naive <- FALSE
if (is.null(m)) {
d <- var(Y)/mean(Y)
p0 <- length(Y[Y == 0])/length(Y)
m.n <- ifelse(p0 != 0 & dim(X)[2]==1, round((d - 1)/(1 + log(p0)/mean(Y))),
NA)
if (!is.na(m.n) & m.n < 0) m.n <- 1
m <- m.n
if (!is.na(m) & !m.fixed) m.naive <- TRUE
}
if (is.na(m)) m <- 1
if (is.null(start) & link == "identity") start <- c(rep(1, NCOL(X)), 1.1)
if (is.null(start) & link == "log")
start <- c(as.numeric(glm.fit(X, Y, family = poisson(link = link))$coefficients),
1.1)
ex.solMLE <- function(m)
{
if (dim(X)[2]>1) return(TRUE)
res <- TRUE
fm <- 0
for (i in 1:length(Y)) {
mult <- 1
for (j in 0:(m - 1)) {
mult <- mult * (Y[i] - j)
}
fm <- fm + mult
}
fm <- fm/length(Y)
if (fm <= (mean(Y))^m) {
res <- FALSE
}
return(res)
}
if (m.fixed & m > 1)
{
if (ex.solMLE(m)==FALSE)
{
coefs <- c(mean(Y), 1)
llik <- loglik(c(coefs[1], coefs[2]))
hess <- length(Y)/mean(Y)
w <- 2 * (loglik(c(coefs[1], coefs[2])) - loglik(c(coefs[1], 1)))
pval <- pchisq(w, 1, lower.tail = F)
warning("MLE equations have no solution")
output <- list()
output$coefs <- coefs
output$loglik <- llik
output$hess <- hess
output$w <- w
output$pval <- pval
class(output) <- "glm.hermite"
attr(output, "Call") <- Call
attr(output, "x") <- X
return(output)
}
}
if (link == "log") {
A <- matrix(c(rep(0, kx), 1, c(rep(0,kx), -1)), 2, kx + 1, byrow=TRUE)
B <- c(1,m)
}
else {
if (intercept) {
A <- rbind(cbind(X,0), c(rep(0,kx), 1), c(1,rep(0,kx)), c(rep(0,kx), -1))
B <- c(rep(0,n),1,0,m)
}
if (!intercept)
{
if (m.fixed)
{
if (m > 1) fit2 <- optim(start, mloglik, method="L-BFGS-B", lower=c(rep(-Inf, NCOL(X)), 1), upper=c(rep(Inf,NCOL(X)),m), hessian=TRUE)
if (m == 1)
{
if(class(try(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]),silent = TRUE))!="try-error")
{
fit2 <- suppressWarnings(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]))
fit2$maximum <- logLik(fit2)
}else{
stop("Error on glm Poisson fit")
}
}
}
if (!m.fixed)
{
if(class(try(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]),silent = TRUE))!="try-error")
{
fit2 <- suppressWarnings(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]))
fit2$maximum <- logLik(fit2)
}else{
m <- 2
if (ex.solMLE(m))
{
fit2 <- optim(start, mloglik, method="L-BFGS-B", lower=c(rep(-Inf, NCOL(X)), 1), upper=c(rep(Inf,NCOL(X)),m), hessian=TRUE)
fit2$maximum <- -fit2$value
}else{
warning("MLE equations have no solution for m=", m)
fit2$maximum <- -Inf
}
}
m.f <- m
j <- 3
while((j <= m.f+1 & j <= min(max(Y), 10)) | (m.f == 1 & j <= 5))
{
m <- j
if (ex.solMLE(m))
{
fit <- optim(start, mloglik, method="L-BFGS-B", lower=c(rep(-Inf, NCOL(X)), 1), upper=c(rep(Inf,NCOL(X)),m), hessian=TRUE)
fit$maximum <- -fit$value
if (!is.null(fit$maximum) & fit$convergence==0)
{
if (fit$maximum > fit2$maximum)
{
fit2 <- fit
m.f <- m
}
}
}else{
warning("MLE equations have no solution for m=", m)
fit2$maximum <- -Inf
}
m <- m.f
j <- j + 1
}
}
coefs <- c(fit2$par, m)
l1 <- length(fit2$par)
l2 <- l1 - 1
names(coefs) <- c(Xnames, "dispersion.index", "order")
mu <- as.vector(linkinv(X %*% coefs[1:kx]))
hess <- fit2$hessian
if (is.null(hess)) hess <- hessian(fit2)
if (m == 1) w <- ifelse(l2 > 1, 2 * (fit2$maximum - loglik(c(coefs[1], coefs[2:l2], 1))), 2 * (fit2$maximum - loglik(c(coefs[1], 1))))
if (m > 1) w <- ifelse(l2 > 1, 2 * (-fit2$value - loglik(c(coefs[1], coefs[2:l2], 1))), 2 * (-fit2$value - loglik(c(coefs[1], 1))))
pval <- pchisq(w, 1, lower.tail = F)/2
output <- list()
output$coefs <- coefs
output$loglik <- -fit2$value
output$vcov <- solve(hess)
if(class(fit2)!="list") output$vcov <- vcov(fit2)
output$hess <- hess
output$fitted.values <- mu
output$w <- w
output$pval <- pval
class(output) <- "glm.hermite"
attr(output, "Call") <- Call
attr(output, "x") <- X
return(output)
}
}
constraints <- list(ineqA = A, ineqB = B)
if (any(A %*% start + B < 0)) stop("initial value not feasible")
if (m.fixed & m>1) fit2 <- maxLik(logLik = loglik, start = start, constraints = constraints, iterlim = 1000)
if (m.fixed & m==1)
{
fit2 <- suppressWarnings(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]))
fit2$maximum <- logLik(fit2)
}
if (!m.fixed)
{
fit2 <- suppressWarnings(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]))
fit2$maximum <- logLik(fit2)
m.f <- m
j <- 2
while((j <= m.f+1 & j <= min(max(Y), 10)) | (m.f == 1 & j <= 5))
{
m <- j
if (ex.solMLE(m))
{
if (link=="log") B <- c(1, m)
if (link=="identity") B <- B <- c(rep(0,n),1,0,m)
constraints <- list(ineqA = A, ineqB = B)
fit <- maxLik(logLik = loglik, start = start, constraints = constraints, iterlim = 1000)
if (!is.null(fit$maximum))
{
if (fit$maximum > fit2$maximum)
{
fit2 <- fit
m.f <- m
}
}
}else{
warning("MLE equations have no solution for m=", m)
fit$maximum <- -Inf
}
m <- m.f
j <- j + 1
}
}
if (m==1)
{
fit2 <- suppressWarnings(glm(formula, family = poisson(link = link), data=data, start=start[-length(start)]))
coefs <- c(fit2$coefficients, 1, m)
names(coefs) <- c(Xnames, "dispersion.index", "order")
mu <- as.vector(linkinv(X %*% coefs[1:kx]))
output <- list()
output$coefs <- coefs
output$loglik <- as.numeric(logLik(fit2))
output$vcov <- vcov(fit2)
colnames(output$vcov) <- NULL
rownames(output$vcov) <- NULL
output$hess <- solve(output$vcov)
output$fitted.values <- mu
output$w <- NA
output$pval <- NA
class(output) <- "glm.hermite"
attr(output, "Call") <- Call
attr(output, "x") <- X
return(output)
}
if (m.naive==TRUE)
{
if(m.n > j)
{
if (link=="log") B <- c(1, m.n)
if (link=="identity") B <- B <- c(rep(0,n),1,0,m.n)
constraints <- list(ineqA = A, ineqB = B)
fit3 <- maxLik(logLik = loglik, start = start, constraints = constraints, iterlim = 1000)
if (!is.null(fit3$maximum))
{
if (fit3$maximum > fit2$maximum)
{
fit2 <- fit3
m <- m.n
}
}
}
}
coefs <- c(fit2$estimate, m)
l1 <- length(fit2$estimate)
l2 <- l1 - 1
names(coefs) <- c(Xnames, "dispersion.index", "order")
mu <- as.vector(linkinv(X %*% coefs[1:kx]))
hess <- hessian(fit2)
w <- ifelse(l2 > 1, 2 * (fit2$maximum - loglik(c(coefs[1],
coefs[2:l2], 1))), 2 * (fit2$maximum - loglik(c(coefs[1],
1))))
pval <- pchisq(w, 1, lower.tail = F)/2
output <- list()
output$coefs <- coefs
output$loglik <- fit2$maximum
output$vcov <- solve(-hess)
output$hess <- hess
output$fitted.values <- mu
output$w <- w
output$pval <- pval
class(output) <- "glm.hermite"
attr(output, "Call") <- Call
attr(output, "x") <- X
return(output)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.