content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Basic information about the input MAF file.
#'
#' Takes MAF file as input and gives intial impression of the data, such as number of mutations per sample, variant types, etc. Also plots barplot and boxplot of variant distributions.
#'
#' @param maf_file input file in MAF format.
#' @param removeSilent logical. Whether to discard silent mutations ("Silent","Intron","RNA","3'UTR"). Default is TRUE.
#' @import ggplot2
#' @import plyr
#' @import reshape
#' @return returns a list of data frames with number of mutations per sample, mutations classified according to variant type and variant classification.
#' @export
#'
maf_stats = function(maf_file,removeSilent = TRUE){
require(package = "RColorBrewer",quietly = T)
require(package = "gridExtra",quietly = T)
#Read file
tot.muts = read.delim(file = maf_file,header = T,sep = "\t",stringsAsFactors = F,comment.char = "#")
#Remove silent mutations
if(removeSilent){
tot.muts = tot.muts[!tot.muts$Variant_Classification %in% c("Silent","Intron","RNA","3'UTR"),]
}
#convert columns into factors
tot.muts$Variant_Type = as.factor(as.character(tot.muts$Variant_Type))
tot.muts$Variant_Classification = as.factor(as.character(tot.muts$Variant_Classification))
#split maf file into according to tumor sample barcode
pid.split = split(tot.muts,f = as.factor(as.character(tot.muts$Tumor_Sample_Barcode)))
variants.df = data.frame(variants = sapply(pid.split,nrow))
#create dataframes to store output
variant.type.df = data.frame()
variant.classification.df = data.frame()
#get variant classes
variant.classes = levels(tot.muts$Variant_Classification)
#Make colors for variants classes.
if(length(variant.classes) <= 9 ){
type_col = structure(brewer.pal(length(variant.classes),name = "Set1"), names = variant.classes)
} else{
type_col = structure(rainbow(n = length(variant.classes),alpha = 1), names = variant.classes)
}
for(i in 1:length(pid.split)){
mut = pid.split[[i]] #each patient
variant.type.df = rbind(variant.type.df,sapply(split(mut,as.factor(mut$Variant_Type)),nrow))
rownames(variant.type.df)[nrow(variant.type.df)] = names(pid.split[i])
variant.classification.df = rbind(variant.classification.df,sapply(split(mut,as.factor(mut$Variant_Classification)),nrow))
rownames(variant.classification.df)[nrow(variant.classification.df)] = names(pid.split[i])
}
colnames(variant.type.df) = levels(tot.muts$Variant_Type)
colnames(variant.classification.df) = levels(tot.muts$Variant_Classification)
#Reorder data acoording to number of mutations.
variant.classification.df = variant.classification.df[order(rowSums(variant.classification.df),decreasing = T),]
variant.classification.df = variant.classification.df[,order(colSums(variant.classification.df),decreasing = T)]
#Melt data for ggplot
vc = melt(as.matrix(variant.classification.df))
vc$X1 = factor(vc$X1,levels = rownames(variant.classification.df)) #reorder patient levels
vc$X2 = factor(vc$X2,levels = colnames(variant.classification.df)) #reorder variant class levels
p = ggplot()+geom_bar(data = vc, aes(x = X1,y = value,fill = X2),stat = "identity")+theme(legend.position = "none")
p.bar = p + scale_fill_manual(values = type_col, name = "Variant Class: ") + xlab("") + ylab("Number of Mutations") + theme(axis.text.x = element_blank())
p.box = ggplot(data = vc)+geom_boxplot(aes(x = X2,y = value,fill = X2))+theme_bw()+theme(legend.position = "bottom")+scale_fill_manual(values = type_col, name = "Variant Class: ") + xlab("") + ylab("Number of Mutations")+ theme(axis.text.x = element_blank())
grid.arrange(p.bar,p.box,nrow = 2, ncol=1)
#Return results as a list of data frames. Self explainatory.
return(list(variants.per.sample = variants.df,variant.type.summary = variant.type.df,variant.classification.summary = variant.classification.df))
} | /R/maf_stats.R | no_license | gnsljw/mafTools | R | false | false | 3,935 | r | #' Basic information about the input MAF file.
#'
#' Takes MAF file as input and gives intial impression of the data, such as number of mutations per sample, variant types, etc. Also plots barplot and boxplot of variant distributions.
#'
#' @param maf_file input file in MAF format.
#' @param removeSilent logical. Whether to discard silent mutations ("Silent","Intron","RNA","3'UTR"). Default is TRUE.
#' @import ggplot2
#' @import plyr
#' @import reshape
#' @return returns a list of data frames with number of mutations per sample, mutations classified according to variant type and variant classification.
#' @export
#'
maf_stats = function(maf_file,removeSilent = TRUE){
require(package = "RColorBrewer",quietly = T)
require(package = "gridExtra",quietly = T)
#Read file
tot.muts = read.delim(file = maf_file,header = T,sep = "\t",stringsAsFactors = F,comment.char = "#")
#Remove silent mutations
if(removeSilent){
tot.muts = tot.muts[!tot.muts$Variant_Classification %in% c("Silent","Intron","RNA","3'UTR"),]
}
#convert columns into factors
tot.muts$Variant_Type = as.factor(as.character(tot.muts$Variant_Type))
tot.muts$Variant_Classification = as.factor(as.character(tot.muts$Variant_Classification))
#split maf file into according to tumor sample barcode
pid.split = split(tot.muts,f = as.factor(as.character(tot.muts$Tumor_Sample_Barcode)))
variants.df = data.frame(variants = sapply(pid.split,nrow))
#create dataframes to store output
variant.type.df = data.frame()
variant.classification.df = data.frame()
#get variant classes
variant.classes = levels(tot.muts$Variant_Classification)
#Make colors for variants classes.
if(length(variant.classes) <= 9 ){
type_col = structure(brewer.pal(length(variant.classes),name = "Set1"), names = variant.classes)
} else{
type_col = structure(rainbow(n = length(variant.classes),alpha = 1), names = variant.classes)
}
for(i in 1:length(pid.split)){
mut = pid.split[[i]] #each patient
variant.type.df = rbind(variant.type.df,sapply(split(mut,as.factor(mut$Variant_Type)),nrow))
rownames(variant.type.df)[nrow(variant.type.df)] = names(pid.split[i])
variant.classification.df = rbind(variant.classification.df,sapply(split(mut,as.factor(mut$Variant_Classification)),nrow))
rownames(variant.classification.df)[nrow(variant.classification.df)] = names(pid.split[i])
}
colnames(variant.type.df) = levels(tot.muts$Variant_Type)
colnames(variant.classification.df) = levels(tot.muts$Variant_Classification)
#Reorder data acoording to number of mutations.
variant.classification.df = variant.classification.df[order(rowSums(variant.classification.df),decreasing = T),]
variant.classification.df = variant.classification.df[,order(colSums(variant.classification.df),decreasing = T)]
#Melt data for ggplot
vc = melt(as.matrix(variant.classification.df))
vc$X1 = factor(vc$X1,levels = rownames(variant.classification.df)) #reorder patient levels
vc$X2 = factor(vc$X2,levels = colnames(variant.classification.df)) #reorder variant class levels
p = ggplot()+geom_bar(data = vc, aes(x = X1,y = value,fill = X2),stat = "identity")+theme(legend.position = "none")
p.bar = p + scale_fill_manual(values = type_col, name = "Variant Class: ") + xlab("") + ylab("Number of Mutations") + theme(axis.text.x = element_blank())
p.box = ggplot(data = vc)+geom_boxplot(aes(x = X2,y = value,fill = X2))+theme_bw()+theme(legend.position = "bottom")+scale_fill_manual(values = type_col, name = "Variant Class: ") + xlab("") + ylab("Number of Mutations")+ theme(axis.text.x = element_blank())
grid.arrange(p.bar,p.box,nrow = 2, ncol=1)
#Return results as a list of data frames. Self explainatory.
return(list(variants.per.sample = variants.df,variant.type.summary = variant.type.df,variant.classification.summary = variant.classification.df))
} |
/classify_new_cases.R | no_license | jequihua/short_text_classification | R | false | false | 5,442 | r | ||
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
# load the library
library(snotelr)
# check cran, same routine as skip_on_cran()
# but not dependent on testthat which might
# not be available on user systems (not required
# only suggested)
check_cran <- function() {
if (identical(tolower(Sys.getenv("NOT_CRAN")), "true")) {
return(TRUE)
} else {
return(FALSE)
}
}
# do cran check
cran <- check_cran()
# for local render set to true
# mainly important for rendering
# a website using pkgdown
#cran <- TRUE
## ----eval = cran--------------------------------------------------------------
# download and list site information
site_meta_data <- snotel_info()
head(site_meta_data)
## ----eval = cran--------------------------------------------------------------
# downloading data for a random site
snow_data <- snotel_download(site_id = 670, internal = TRUE)
# show the data
head(snow_data)
## ----fig.width = 7, fig.height=3, eval = cran---------------------------------
# A plot of snow accummulation through the years
plot(as.Date(snow_data$date),
snow_data$snow_water_equivalent,
type = "l",
xlab = "Date",
ylab = "SWE (mm)")
## ----eval = cran--------------------------------------------------------------
# calculate snow phenology
phenology <- snotel_phenology(snow_data)
## ----fig.width = 7, fig.height=3, eval = cran---------------------------------
# subset data to the first decade of the century
snow_data_subset <- subset(snow_data, as.Date(date) > as.Date("2000-01-01") &
as.Date(date) < as.Date("2010-01-01"))
# plot the snow water equivalent time series
plot(as.Date(snow_data_subset$date),
snow_data_subset$snow_water_equivalent,
type = "l",
xlab = "Date",
ylab = "SWE (mm)")
# plot the dates of first snow accumulation as a red dot
points(as.Date(paste(phenology$year, phenology$first_snow_acc),"%Y %j"),
rep(1,nrow(phenology)),
col = "red",
pch = 19,
cex = 0.5)
| /inst/doc/snotelr-vignette.R | no_license | cran/snotelr | R | false | false | 2,098 | r | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
# load the library
library(snotelr)
# check cran, same routine as skip_on_cran()
# but not dependent on testthat which might
# not be available on user systems (not required
# only suggested)
check_cran <- function() {
if (identical(tolower(Sys.getenv("NOT_CRAN")), "true")) {
return(TRUE)
} else {
return(FALSE)
}
}
# do cran check
cran <- check_cran()
# for local render set to true
# mainly important for rendering
# a website using pkgdown
#cran <- TRUE
## ----eval = cran--------------------------------------------------------------
# download and list site information
site_meta_data <- snotel_info()
head(site_meta_data)
## ----eval = cran--------------------------------------------------------------
# downloading data for a random site
snow_data <- snotel_download(site_id = 670, internal = TRUE)
# show the data
head(snow_data)
## ----fig.width = 7, fig.height=3, eval = cran---------------------------------
# A plot of snow accummulation through the years
plot(as.Date(snow_data$date),
snow_data$snow_water_equivalent,
type = "l",
xlab = "Date",
ylab = "SWE (mm)")
## ----eval = cran--------------------------------------------------------------
# calculate snow phenology
phenology <- snotel_phenology(snow_data)
## ----fig.width = 7, fig.height=3, eval = cran---------------------------------
# subset data to the first decade of the century
snow_data_subset <- subset(snow_data, as.Date(date) > as.Date("2000-01-01") &
as.Date(date) < as.Date("2010-01-01"))
# plot the snow water equivalent time series
plot(as.Date(snow_data_subset$date),
snow_data_subset$snow_water_equivalent,
type = "l",
xlab = "Date",
ylab = "SWE (mm)")
# plot the dates of first snow accumulation as a red dot
points(as.Date(paste(phenology$year, phenology$first_snow_acc),"%Y %j"),
rep(1,nrow(phenology)),
col = "red",
pch = 19,
cex = 0.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sa.time.R
\name{sa.time}
\alias{sa.time}
\title{Sensitivity of R0 to time estimation windows}
\usage{
sa.time(
incid,
GT,
begin = NULL,
end = NULL,
est.method,
t = NULL,
date.first.obs = NULL,
time.step = 1,
res = NULL,
...
)
}
\arguments{
\item{incid}{A vector of incident cases.}
\item{GT}{Generation time distribution from \code{\link[=generation.time]{generation.time()}}.}
\item{begin}{Vector of begin dates for the estimation of epidemic.}
\item{end}{Vector of end dates for estimation of the epidemic.}
\item{est.method}{Estimation method used for sensitivity analysis.}
\item{t}{Dates vector to be passed to estimation function.}
\item{date.first.obs}{Optional date of first observation, if t not specified.}
\item{time.step}{Optional. If date of first observation is specified, number of day between each incidence observation.}
\item{res}{If specified, will extract most of data from a \code{R0.R}-class contained within the \verb{$estimate} component of a result from \code{\link[=estimate.R]{estimate.R()}} and run sensitivity analysis with it.}
\item{...}{Parameters passed to inner functions}
}
\value{
A list with components :
\item{df}{data.frame object with all results from sensitivity analysis.}
\item{df.clean}{the same object, with NA rows removed. Used only for easy export of results.}
\item{mat.sen}{Matrix with values of R0 given begin (rows) and end (columns) dates.}
\item{begin}{A range of begin dates in epidemic.}
\item{end}{A range of end dates in epidemic.}
}
\description{
Sensitivity analysis to estimate the variation of reproduction numbers
according to period over which the incidence is analyzed.
}
\details{
By varying different pairs of begin and end dates,different estimates of
reproduction ratio can be analyzed.
'begin' and 'end' vector must have the same length for the sensitivity
analysis to run. They can be provided either as dates or numeric values,
depending on the other parameters (see \code{\link[=check.incid]{check.incid()}}). If some begin/end
dates overlap, they are ignored, and corresponding uncomputed data are set
to \code{NA}.
Also, note that unreliable Rsquared values are achieved for very small time
periods (begin ~ end). These values are not representative of the epidemic
outbreak behaviour.
}
\author{
Pierre-Yves Boelle, Thomas Obadia
}
| /man/sa.time.Rd | no_license | tobadia/R0 | R | false | true | 2,417 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sa.time.R
\name{sa.time}
\alias{sa.time}
\title{Sensitivity of R0 to time estimation windows}
\usage{
sa.time(
incid,
GT,
begin = NULL,
end = NULL,
est.method,
t = NULL,
date.first.obs = NULL,
time.step = 1,
res = NULL,
...
)
}
\arguments{
\item{incid}{A vector of incident cases.}
\item{GT}{Generation time distribution from \code{\link[=generation.time]{generation.time()}}.}
\item{begin}{Vector of begin dates for the estimation of epidemic.}
\item{end}{Vector of end dates for estimation of the epidemic.}
\item{est.method}{Estimation method used for sensitivity analysis.}
\item{t}{Dates vector to be passed to estimation function.}
\item{date.first.obs}{Optional date of first observation, if t not specified.}
\item{time.step}{Optional. If date of first observation is specified, number of day between each incidence observation.}
\item{res}{If specified, will extract most of data from a \code{R0.R}-class contained within the \verb{$estimate} component of a result from \code{\link[=estimate.R]{estimate.R()}} and run sensitivity analysis with it.}
\item{...}{Parameters passed to inner functions}
}
\value{
A list with components :
\item{df}{data.frame object with all results from sensitivity analysis.}
\item{df.clean}{the same object, with NA rows removed. Used only for easy export of results.}
\item{mat.sen}{Matrix with values of R0 given begin (rows) and end (columns) dates.}
\item{begin}{A range of begin dates in epidemic.}
\item{end}{A range of end dates in epidemic.}
}
\description{
Sensitivity analysis to estimate the variation of reproduction numbers
according to period over which the incidence is analyzed.
}
\details{
By varying different pairs of begin and end dates,different estimates of
reproduction ratio can be analyzed.
'begin' and 'end' vector must have the same length for the sensitivity
analysis to run. They can be provided either as dates or numeric values,
depending on the other parameters (see \code{\link[=check.incid]{check.incid()}}). If some begin/end
dates overlap, they are ignored, and corresponding uncomputed data are set
to \code{NA}.
Also, note that unreliable Rsquared values are achieved for very small time
periods (begin ~ end). These values are not representative of the epidemic
outbreak behaviour.
}
\author{
Pierre-Yves Boelle, Thomas Obadia
}
|
#' CoronaNet government policy response database
#'
#' This dataset contains variables from the CoronaNet government
#' response project, representing national and sub-national policy
#' event data from more than 140 countries since January 1st,
#' 2020. The data include source links, descriptions, targets
#' (i.e. other countries), the type and level of enforcement, and a
#' comprehensive set of policy types.
#'
#' @importFrom readr read_csv cols
#'
#' @references Cheng, Cindy, Joan Barcelo, Allison Hartnett,
#' Robert Kubinec, and Luca Messerschmidt. 2020. “Coronanet: A
#' Dyadic Dataset of Government Responses to the COVID-19
#' Pandemic.” SocArXiv. April 12. doi:10.31235/osf.io/dkvxy.
#'
#' - \url{https://coronanet-project.org/working_papers}
#'
#' @source
#' - \url{https://coronanet-project.org/download}
#'
#' @return
#' - record_id Unique identifier for each policy record
##' - entry_type Whether the record is new, meaning no restriction had been in place before, or an update (restriction was in place but changed). Corrections are corrections to previous entries.
##' - event_description A short description of the policy change
##' - type The category of the policy
##' - country The country initiating the policy
##' - init_country_level Whether the policy came from the national level or a sub-national unit
##' - index_prov The ID of the sub-national unit
##' - target_country Which foreign country a policy is targeted at (i.e. travel policies)
##' - target_geog_level Whether the target of the policy is a country as a whole or a sub-national unit of that country
##' - target_who_what Who the policy is targeted at
##' - recorded_date When the record was entered into our data
##' - target_direction Whether a travel-related policy affects people coming in (Inbound) or leaving (Outbound)
##' - travel_mechanism If a travel policy, what kind of transportation it affects
##' - compliance Whether the policy is voluntary or mandatory
##' - enforcer What unit in the country is responsible for enforcement
##' - date_announced When the policy goes into effect
##' - link A link to at least one source for the policy
##' - ISO_A3 3-digit ISO country codes
##' - ISO_A2 2-digit ISO country codes
##' - severity_index_5perc 5% posterior low estimate (i.e. lower bound of uncertainty interval) for severity index
##' - severity_index_median posterior median estimate (point estimate) for severity index, which comes from a Bayesian latent variable model aggregating across policy types to measure country-level policy severity (see paper on our website)
##' - severity_index_5perc 95% posterior high estimate (i.e. upper bound of uncertainty interval) for severity index
##'
##' @family data-import
##' @family NPI
##'
##'
#' @examples
#' res = coronanet_government_response_data()
#' head(res)
#' colnames(res)
#' dplyr::glimpse(res)
#' summary(res)
#'
#'
#' @export
coronanet_government_response_data <- function() {
url = "https://raw.githubusercontent.com/saudiwin/corona_tscs/master/data/CoronaNet/coronanet_release.csv"
rpath = s2p_cached_url(url)
dat = readr::read_csv(rpath, col_types = readr::cols(), progress=FALSE, guess_max=5000)
dat %>% rename(iso2c = "ISO_A2", iso3c="ISO_A3")
}
| /R/coronanet_government_response_data.R | permissive | kartechbabu/sars2pack | R | false | false | 3,257 | r | #' CoronaNet government policy response database
#'
#' This dataset contains variables from the CoronaNet government
#' response project, representing national and sub-national policy
#' event data from more than 140 countries since January 1st,
#' 2020. The data include source links, descriptions, targets
#' (i.e. other countries), the type and level of enforcement, and a
#' comprehensive set of policy types.
#'
#' @importFrom readr read_csv cols
#'
#' @references Cheng, Cindy, Joan Barcelo, Allison Hartnett,
#' Robert Kubinec, and Luca Messerschmidt. 2020. “Coronanet: A
#' Dyadic Dataset of Government Responses to the COVID-19
#' Pandemic.” SocArXiv. April 12. doi:10.31235/osf.io/dkvxy.
#'
#' - \url{https://coronanet-project.org/working_papers}
#'
#' @source
#' - \url{https://coronanet-project.org/download}
#'
#' @return
#' - record_id Unique identifier for each policy record
##' - entry_type Whether the record is new, meaning no restriction had been in place before, or an update (restriction was in place but changed). Corrections are corrections to previous entries.
##' - event_description A short description of the policy change
##' - type The category of the policy
##' - country The country initiating the policy
##' - init_country_level Whether the policy came from the national level or a sub-national unit
##' - index_prov The ID of the sub-national unit
##' - target_country Which foreign country a policy is targeted at (i.e. travel policies)
##' - target_geog_level Whether the target of the policy is a country as a whole or a sub-national unit of that country
##' - target_who_what Who the policy is targeted at
##' - recorded_date When the record was entered into our data
##' - target_direction Whether a travel-related policy affects people coming in (Inbound) or leaving (Outbound)
##' - travel_mechanism If a travel policy, what kind of transportation it affects
##' - compliance Whether the policy is voluntary or mandatory
##' - enforcer What unit in the country is responsible for enforcement
##' - date_announced When the policy goes into effect
##' - link A link to at least one source for the policy
##' - ISO_A3 3-digit ISO country codes
##' - ISO_A2 2-digit ISO country codes
##' - severity_index_5perc 5% posterior low estimate (i.e. lower bound of uncertainty interval) for severity index
##' - severity_index_median posterior median estimate (point estimate) for severity index, which comes from a Bayesian latent variable model aggregating across policy types to measure country-level policy severity (see paper on our website)
##' - severity_index_5perc 95% posterior high estimate (i.e. upper bound of uncertainty interval) for severity index
##'
##' @family data-import
##' @family NPI
##'
##'
#' @examples
#' res = coronanet_government_response_data()
#' head(res)
#' colnames(res)
#' dplyr::glimpse(res)
#' summary(res)
#'
#'
#' @export
coronanet_government_response_data <- function() {
url = "https://raw.githubusercontent.com/saudiwin/corona_tscs/master/data/CoronaNet/coronanet_release.csv"
rpath = s2p_cached_url(url)
dat = readr::read_csv(rpath, col_types = readr::cols(), progress=FALSE, guess_max=5000)
dat %>% rename(iso2c = "ISO_A2", iso3c="ISO_A3")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cairo--.R
\name{cairo_push_group_with_content}
\alias{cairo_push_group_with_content}
\title{cairo_push_group_with_content}
\usage{
cairo_push_group_with_content(cr, content)
}
\arguments{
\item{cr}{[\code{cairo_t *}] a cairo context}
\item{content}{[\code{int}] a #cairo_content_t indicating the type of group that will be created}
}
\description{
Temporarily redirects drawing to an intermediate surface known as a
group. The redirection lasts until the group is completed by a call
to cairo_pop_group() or cairo_pop_group_to_source(). These calls
provide the result of any drawing to the group as a pattern,
(either as an explicit object, or set as the source pattern).
}
\details{
The group will have a content type of content. The ability to
control this content type is the only distinction between this
function and cairo_push_group() which you should see for a more
detailed description of group rendering.
Since: 1.2
C function prototype: \code{void cairo_push_group_with_content (cairo_t *cr, cairo_content_t content)}
}
\seealso{
Other cairo--:
\code{\link{cairo_append_path}()},
\code{\link{cairo_arc_negative}()},
\code{\link{cairo_arc}()},
\code{\link{cairo_clip_extents}()},
\code{\link{cairo_clip_preserve}()},
\code{\link{cairo_clip}()},
\code{\link{cairo_close_path}()},
\code{\link{cairo_copy_page}()},
\code{\link{cairo_copy_path_flat}()},
\code{\link{cairo_copy_path}()},
\code{\link{cairo_create}()},
\code{\link{cairo_curve_to}()},
\code{\link{cairo_device_to_user_distance}()},
\code{\link{cairo_device_to_user}()},
\code{\link{cairo_fill_extents}()},
\code{\link{cairo_fill_preserve}()},
\code{\link{cairo_fill}()},
\code{\link{cairo_font_extents}()},
\code{\link{cairo_get_antialias}()},
\code{\link{cairo_get_current_point}()},
\code{\link{cairo_get_dash_count}()},
\code{\link{cairo_get_dash}()},
\code{\link{cairo_get_fill_rule}()},
\code{\link{cairo_get_font_face}()},
\code{\link{cairo_get_font_matrix}()},
\code{\link{cairo_get_group_target}()},
\code{\link{cairo_get_line_cap}()},
\code{\link{cairo_get_line_join}()},
\code{\link{cairo_get_line_width}()},
\code{\link{cairo_get_matrix}()},
\code{\link{cairo_get_miter_limit}()},
\code{\link{cairo_get_operator}()},
\code{\link{cairo_get_source}()},
\code{\link{cairo_get_target}()},
\code{\link{cairo_get_tolerance}()},
\code{\link{cairo_has_current_point}()},
\code{\link{cairo_identity_matrix}()},
\code{\link{cairo_in_clip}()},
\code{\link{cairo_in_fill}()},
\code{\link{cairo_in_stroke}()},
\code{\link{cairo_line_to}()},
\code{\link{cairo_mask_surface}()},
\code{\link{cairo_mask}()},
\code{\link{cairo_move_to}()},
\code{\link{cairo_new_path}()},
\code{\link{cairo_new_sub_path}()},
\code{\link{cairo_paint_with_alpha}()},
\code{\link{cairo_paint}()},
\code{\link{cairo_path_extents}()},
\code{\link{cairo_pop_group_to_source}()},
\code{\link{cairo_pop_group}()},
\code{\link{cairo_push_group}()},
\code{\link{cairo_rectangle}()},
\code{\link{cairo_rel_curve_to}()},
\code{\link{cairo_rel_line_to}()},
\code{\link{cairo_rel_move_to}()},
\code{\link{cairo_reset_clip}()},
\code{\link{cairo_restore}()},
\code{\link{cairo_rotate}()},
\code{\link{cairo_save}()},
\code{\link{cairo_scale}()},
\code{\link{cairo_select_font_face}()},
\code{\link{cairo_set_antialias}()},
\code{\link{cairo_set_dash}()},
\code{\link{cairo_set_fill_rule}()},
\code{\link{cairo_set_font_face}()},
\code{\link{cairo_set_font_matrix}()},
\code{\link{cairo_set_font_size}()},
\code{\link{cairo_set_line_cap}()},
\code{\link{cairo_set_line_join}()},
\code{\link{cairo_set_line_width}()},
\code{\link{cairo_set_matrix}()},
\code{\link{cairo_set_miter_limit}()},
\code{\link{cairo_set_operator}()},
\code{\link{cairo_set_source_rgba}()},
\code{\link{cairo_set_source_rgb}()},
\code{\link{cairo_set_source_surface}()},
\code{\link{cairo_set_source}()},
\code{\link{cairo_set_tolerance}()},
\code{\link{cairo_show_page}()},
\code{\link{cairo_show_text}()},
\code{\link{cairo_status}()},
\code{\link{cairo_stroke_extents}()},
\code{\link{cairo_stroke_preserve}()},
\code{\link{cairo_stroke}()},
\code{\link{cairo_tag_begin}()},
\code{\link{cairo_tag_end}()},
\code{\link{cairo_text_extents}()},
\code{\link{cairo_text_path}()},
\code{\link{cairo_transform}()},
\code{\link{cairo_translate}()},
\code{\link{cairo_user_to_device_distance}()},
\code{\link{cairo_user_to_device}()}
}
\concept{cairo--}
| /man/cairo_push_group_with_content.Rd | permissive | coolbutuseless/cairocore | R | false | true | 4,434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cairo--.R
\name{cairo_push_group_with_content}
\alias{cairo_push_group_with_content}
\title{cairo_push_group_with_content}
\usage{
cairo_push_group_with_content(cr, content)
}
\arguments{
\item{cr}{[\code{cairo_t *}] a cairo context}
\item{content}{[\code{int}] a #cairo_content_t indicating the type of group that will be created}
}
\description{
Temporarily redirects drawing to an intermediate surface known as a
group. The redirection lasts until the group is completed by a call
to cairo_pop_group() or cairo_pop_group_to_source(). These calls
provide the result of any drawing to the group as a pattern,
(either as an explicit object, or set as the source pattern).
}
\details{
The group will have a content type of content. The ability to
control this content type is the only distinction between this
function and cairo_push_group() which you should see for a more
detailed description of group rendering.
Since: 1.2
C function prototype: \code{void cairo_push_group_with_content (cairo_t *cr, cairo_content_t content)}
}
\seealso{
Other cairo--:
\code{\link{cairo_append_path}()},
\code{\link{cairo_arc_negative}()},
\code{\link{cairo_arc}()},
\code{\link{cairo_clip_extents}()},
\code{\link{cairo_clip_preserve}()},
\code{\link{cairo_clip}()},
\code{\link{cairo_close_path}()},
\code{\link{cairo_copy_page}()},
\code{\link{cairo_copy_path_flat}()},
\code{\link{cairo_copy_path}()},
\code{\link{cairo_create}()},
\code{\link{cairo_curve_to}()},
\code{\link{cairo_device_to_user_distance}()},
\code{\link{cairo_device_to_user}()},
\code{\link{cairo_fill_extents}()},
\code{\link{cairo_fill_preserve}()},
\code{\link{cairo_fill}()},
\code{\link{cairo_font_extents}()},
\code{\link{cairo_get_antialias}()},
\code{\link{cairo_get_current_point}()},
\code{\link{cairo_get_dash_count}()},
\code{\link{cairo_get_dash}()},
\code{\link{cairo_get_fill_rule}()},
\code{\link{cairo_get_font_face}()},
\code{\link{cairo_get_font_matrix}()},
\code{\link{cairo_get_group_target}()},
\code{\link{cairo_get_line_cap}()},
\code{\link{cairo_get_line_join}()},
\code{\link{cairo_get_line_width}()},
\code{\link{cairo_get_matrix}()},
\code{\link{cairo_get_miter_limit}()},
\code{\link{cairo_get_operator}()},
\code{\link{cairo_get_source}()},
\code{\link{cairo_get_target}()},
\code{\link{cairo_get_tolerance}()},
\code{\link{cairo_has_current_point}()},
\code{\link{cairo_identity_matrix}()},
\code{\link{cairo_in_clip}()},
\code{\link{cairo_in_fill}()},
\code{\link{cairo_in_stroke}()},
\code{\link{cairo_line_to}()},
\code{\link{cairo_mask_surface}()},
\code{\link{cairo_mask}()},
\code{\link{cairo_move_to}()},
\code{\link{cairo_new_path}()},
\code{\link{cairo_new_sub_path}()},
\code{\link{cairo_paint_with_alpha}()},
\code{\link{cairo_paint}()},
\code{\link{cairo_path_extents}()},
\code{\link{cairo_pop_group_to_source}()},
\code{\link{cairo_pop_group}()},
\code{\link{cairo_push_group}()},
\code{\link{cairo_rectangle}()},
\code{\link{cairo_rel_curve_to}()},
\code{\link{cairo_rel_line_to}()},
\code{\link{cairo_rel_move_to}()},
\code{\link{cairo_reset_clip}()},
\code{\link{cairo_restore}()},
\code{\link{cairo_rotate}()},
\code{\link{cairo_save}()},
\code{\link{cairo_scale}()},
\code{\link{cairo_select_font_face}()},
\code{\link{cairo_set_antialias}()},
\code{\link{cairo_set_dash}()},
\code{\link{cairo_set_fill_rule}()},
\code{\link{cairo_set_font_face}()},
\code{\link{cairo_set_font_matrix}()},
\code{\link{cairo_set_font_size}()},
\code{\link{cairo_set_line_cap}()},
\code{\link{cairo_set_line_join}()},
\code{\link{cairo_set_line_width}()},
\code{\link{cairo_set_matrix}()},
\code{\link{cairo_set_miter_limit}()},
\code{\link{cairo_set_operator}()},
\code{\link{cairo_set_source_rgba}()},
\code{\link{cairo_set_source_rgb}()},
\code{\link{cairo_set_source_surface}()},
\code{\link{cairo_set_source}()},
\code{\link{cairo_set_tolerance}()},
\code{\link{cairo_show_page}()},
\code{\link{cairo_show_text}()},
\code{\link{cairo_status}()},
\code{\link{cairo_stroke_extents}()},
\code{\link{cairo_stroke_preserve}()},
\code{\link{cairo_stroke}()},
\code{\link{cairo_tag_begin}()},
\code{\link{cairo_tag_end}()},
\code{\link{cairo_text_extents}()},
\code{\link{cairo_text_path}()},
\code{\link{cairo_transform}()},
\code{\link{cairo_translate}()},
\code{\link{cairo_user_to_device_distance}()},
\code{\link{cairo_user_to_device}()}
}
\concept{cairo--}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Iris Identification"),
sidebarLayout(
sidebarPanel(
"Please specify values in the following boxes.",
textInput("box1", "Enter Sepal Length (cm):", value = "5.80"),
textInput("box2", "Enter Sepal Width (cm):", value = "3.00"),
textInput("box3", "Enter Petal Length (cm):", value = "4.35"),
textInput("box4", "Enter Petal Width (cm):", value = "1.30"),
textInput("box5", "Enter K (k-Nearest Neighbour in Euclidean Distance):", value = "1"),
submitButton("Submit")
),
mainPanel(
h3("Predicted Iris Species:"),
h2(textOutput("pred")),
h5("Note:"),
"This App is used to predict iris species using K-Nearest Neighbour Algorithm.
The model is trained using Edgar Anderson's iris data which can be found in datasets package.
The pre-filled values are the median values for corresponding features in the dataset.
shiny and class package should be installed to run the app."
)
)
))
| /ShinyApp/IrisIdentification/ui.R | no_license | rainbowfan/DataProductsDeveloping | R | false | false | 1,035 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Iris Identification"),
sidebarLayout(
sidebarPanel(
"Please specify values in the following boxes.",
textInput("box1", "Enter Sepal Length (cm):", value = "5.80"),
textInput("box2", "Enter Sepal Width (cm):", value = "3.00"),
textInput("box3", "Enter Petal Length (cm):", value = "4.35"),
textInput("box4", "Enter Petal Width (cm):", value = "1.30"),
textInput("box5", "Enter K (k-Nearest Neighbour in Euclidean Distance):", value = "1"),
submitButton("Submit")
),
mainPanel(
h3("Predicted Iris Species:"),
h2(textOutput("pred")),
h5("Note:"),
"This App is used to predict iris species using K-Nearest Neighbour Algorithm.
The model is trained using Edgar Anderson's iris data which can be found in datasets package.
The pre-filled values are the median values for corresponding features in the dataset.
shiny and class package should be installed to run the app."
)
)
))
|
.norm.draw2 <-
function(y, ry, x, ridge = 1e-05, vcovcor=vcovcor,correct=correct, ...) {
xobs <- x[ry, ]
yobs <- y[ry]
xtx <- t(xobs) %*% xobs
pen <- ridge * diag(xtx)
if (length(pen) == 1)
pen <- matrix(pen)
v <- solve(xtx + diag(pen))
coef <- t(yobs %*% xobs %*% v)
residuals <- yobs - xobs %*% coef
df <- max(sum(ry) - ncol(x), 1)
v <- vcovcor
sigma.star <- sqrt(sum((residuals)^2)/rchisq(1, df))/sqrt(correct)
#print(sigma.star)
sigma.star <-mean(sigma.star)
#print(sigma.star)
beta.star <- coef + (t(chol((v + t(v))/2)) %*% rnorm(ncol(x))) * sigma.star
parm <- list(coef, beta.star, sigma.star)
names(parm) <- c("coef", "beta", "sigma")
return(parm)
}
.Random.seed <-
c(403L, 10L, -1910847694L, 239932973L, 1140268263L, -393351666L,
1313444840L, -562295317L, 1029723785L, 1518347800L, -1025643194L,
1905413025L, 1771710003L, -928154718L, -1684787564L, 232090903L,
-901891315L, -801584604L, -588991990L, 1903733621L, 1481535183L,
-1593056810L, -757248640L, 1312277955L, 1702241121L, 520772048L,
-286756562L, -1757849383L, -1183264021L, -1917108390L, -1851528932L,
-292478945L, 1423182965L, -1801103092L, 1538274530L, 983539133L,
2080541239L, 1757575390L, -687721448L, 1111133051L, 1566448409L,
-85669976L, -387250346L, -1874763823L, -1735836797L, 945913362L,
-780428892L, -752288409L, -1092533571L, -471481804L, -1606780518L,
592602597L, -548298369L, -264828666L, -219049328L, -1932774605L,
1154263249L, 201902720L, -918955170L, 231221417L, 1525951323L,
1032763754L, -1932973044L, -375976881L, 964992261L, -836428356L,
1836513426L, -1247241331L, 501849031L, 949338286L, -1715728120L,
-253686005L, -1502381591L, -872268104L, -159635098L, 910420481L,
-1991547757L, 167280450L, -1741885196L, -1098273545L, -1904171795L,
-559933564L, -1579734614L, 1786613525L, -1378857553L, -414214730L,
1261456352L, -1340423389L, -1189845951L, 1227831088L, -211569330L,
1430042553L, 206893515L, 1212658298L, 194000764L, -1289209921L,
-19185515L, 354537132L, 2119154818L, -800699299L, 812307287L,
-2099761602L, 991912568L, 1070502235L, -382189383L, 1846415624L,
-1677646922L, -440033039L, 1593759523L, 2070704498L, -1415920700L,
770944903L, 1007342941L, 3066708L, 2122051962L, -315622203L,
-1115349473L, 1147533734L, 1866873648L, -411133485L, -1025646607L,
-908154592L, -540584002L, -1639516215L, 1007884155L, -801485494L,
-920671828L, 353895599L, -522781083L, 1388422684L, 777156722L,
319946477L, 665756455L, 1555660622L, -212191320L, 52544427L,
18809033L, 2030291160L, -1374358138L, 2114088033L, 348298739L,
1605983330L, 363764436L, 1762978775L, -47636019L, -1226492572L,
1563148618L, -908072779L, 290553871L, 2114238742L, 153002944L,
395374467L, 1790500257L, 1904705936L, -2116990354L, -970331111L,
529297579L, 51025050L, -1199135652L, 1487872351L, -1126448331L,
754173644L, 1340691490L, -1197889923L, 535886711L, 147109022L,
-454003880L, 44578491L, 1135433817L, -1467266072L, 1523994134L,
-1225436783L, -76971453L, -940703406L, 1078449636L, -1573225433L,
378390653L, 2034111860L, 934107482L, 1458130853L, 2015915967L,
-1802542010L, 362758864L, 985074931L, 1385376273L, -206803904L,
-587644898L, -1299718423L, -2062230501L, -224234454L, 496879436L,
-2006090481L, -621715387L, -1228994052L, 295422034L, 906326477L,
1929171847L, -1591544210L, 1830303304L, -532934581L, 1167948201L,
1309783288L, -1303592922L, 68538689L, -1006899501L, 2144327298L,
1393745076L, 2036081719L, -125876435L, -447069116L, 499902826L,
2049508821L, -12410001L, -2111554954L, 1135689120L, 1911925347L,
2080439041L, 1171337840L, -2081985010L, -756390279L, 41062923L,
177326906L, 70660668L, -2007273089L, -184755499L, 1756347116L,
1607945282L, -437689443L, 223443479L, -1238035348L, -1474056524L,
2022811666L, 216074208L, 1432216524L, 1850154464L, 53953058L,
-268718808L, -1399044852L, -1594655172L, 874295538L, 9064176L,
-369651900L, -569508264L, -1808438726L, 2052501232L, -1430040388L,
-1353068012L, 2101204162L, 539554912L, -451011204L, -188755760L,
961184802L, -565068120L, -483376244L, 224734652L, -941646798L,
1835509568L, -1407204172L, 1546880840L, 1698384570L, 665771792L,
514751724L, -916372396L, 1877085970L, -1485260896L, -368127028L,
-371371168L, 127091906L, 1740805672L, -1263479380L, 2071022396L,
-479348878L, -8923344L, -1374570972L, 298959928L, -1828298534L,
1540470160L, -1686963460L, -780553324L, -1586546878L, 851697152L,
-1865472452L, 1170991696L, 1253971618L, -1325325496L, -158608884L,
-2120468260L, 1963719250L, -637087616L, -39711916L, 1538934248L,
218693370L, 240298832L, 155905068L, 155740532L, 899085970L, -227430816L,
-2084952436L, 918132576L, -1417331998L, 665628840L, -916685620L,
-1448910340L, -2024669262L, -1866520080L, -315595324L, 1999108952L,
-128438406L, 834524592L, -1187424836L, -1138146220L, -685455422L,
1426081568L, 1631632316L, 1267280L, -1282078238L, -100756120L,
-1473278644L, -1093208836L, 904043250L, -185482496L, 1621872244L,
1234392072L, -1291953734L, -117247024L, 704284140L, 1614376852L,
1780858578L, 478145120L, 1072905548L, -1214791392L, -1901724030L,
-852232728L, -96857108L, 871979580L, 1078597234L, -410768528L,
-1602481884L, -1483267528L, 908155290L, -18174512L, 1648246204L,
1211516052L, -1037793150L, -490349120L, 1895841596L, 1848111248L,
-1846973662L, 333486600L, -996808436L, 1424882076L, 893651474L,
228269952L, -1216017644L, -757133016L, -1110206406L, -500311088L,
1508363884L, -2030497228L, 938616466L, -685273760L, -1242470068L,
-1277565984L, 1552994722L, -348956248L, -649292148L, -690876996L,
721183346L, -866333712L, 664433220L, -1130246056L, -502987718L,
1976792176L, 1968288828L, 1202345236L, 654326466L, -944245024L,
1219972220L, -1419388208L, 1004554658L, -537224152L, -777988340L,
310009276L, 1306629938L, -1537466688L, -1938456524L, -1571619128L,
-1918728006L, -1484934512L, 2105597676L, 1691322964L, 675434130L,
-1292808928L, -1159772724L, -746012960L, 1211442370L, 1971936296L,
888513452L, 476851900L, -131278862L, 1735797296L, 204800932L,
1478761912L, 1168875354L, -1083088752L, 1091907452L, -365071084L,
2000197954L, 697988352L, 1528295868L, -339765936L, 1093280034L,
1364611656L, -38956020L, 2018234972L, -1953360430L, 1750928896L,
1345108308L, 1774517480L, -482498310L, 408603344L, 1516732204L,
-1994156812L, 1906388370L, 1315619168L, 1836830732L, 834697440L,
1981728354L, -1508777816L, -301658036L, -368695428L, -1985467086L,
-283103632L, 639518532L, -627008552L, -786864262L, 1251058736L,
1272981436L, -1437939116L, -1062606782L, 1984296608L, 2004841148L,
-89595952L, 1931665762L, -1462719768L, -1991308084L, 863910396L,
1063211506L, -2029262720L, 980917748L, 1897736840L, -122222790L,
-1588342448L, 21786732L, 1550472596L, 557524050L, 956717810L,
-266943680L, -422877015L, 838638875L, -2133095700L, 120657658L,
-418461905L, -1710364871L, 2006571342L, -593672292L, -1681570547L,
196988791L, 1047702400L, -233995810L, 1331181227L, 2126125485L,
502296090L, -1254158120L, -87081663L, 1047999987L, 218354708L,
-341337694L, -874664265L, -37787519L, 1198678150L, 1063949508L,
44937717L, 136653919L, 2003799736L, 1178771350L, -8268381L, 374147429L,
-1670553470L, -1880192528L, -354959463L, -783631989L, 234808444L,
-91631958L, -1173067553L, 159022697L, 1537617022L, 309519468L,
223311037L, -1198412409L, 899385712L, 582175758L, 621347131L,
2079362589L, 1683329738L, -1975009816L, 1032741585L, 821944003L,
1001657796L, 43737714L, -381805753L, -11066607L, 918072982L,
-1961122828L, -1063055995L, -1124466481L, 1006594504L, 1578109030L,
-388096365L, 377802229L, -37105710L, 1796697120L, -231813623L,
-520308933L, 68385420L, 455614234L, 461369743L, 1149972185L,
1482838382L, -560227076L, 69391981L, -2080366057L, 1749456864L,
173675454L, 1792913803L, 673776141L, 47431226L, -177637768L,
1393303073L, 881365203L, -583577612L, 32393346L, 2072633623L,
-1273899039L, 1621426470L, -507273180L, -1563311403L, -1388876161L,
-775676136L, 903974454L, 1019019459L, -716374779L, -253393758L,
414612112L, -796654023L, 931485419L, -1994618916L, -1773204982L,
1069063423L, -1963091639L, 188779230L, 1787000972L, -469626787L,
-360112537L, 757600144L, -689339474L, -585373093L, -2118572483L,
-2066114390L, 1685023048L, 599121265L, -1412850973L, -1919143452L,
233700626L, 807383271L, 1772488369L, 1084538102L, 701046548L,
-796575707L, 1719090479L, 570692328L, -1574326330L, 1491980915L,
-545581867L, -1763477838L, -1963790720L, -791144855L, -1788773669L,
1239025196L, -1676704198L, -1842495249L, -1140192391L, 1420347406L,
-1360974372L, -905896371L, -1103086153L, -244640960L, -265199074L,
1614800107L, 1354378349L, -894759718L, 1172264088L, -1805987071L,
-1406945101L, -307227692L, -1140420894L, -118878345L, -1357737279L,
1238803014L, 978849156L, -1523829963L, -706399969L, 1038456952L,
-919403178L, 644987875L, -2055137883L, -1534265278L, 823014960L,
-1907224999L, -909342773L, 2103679036L, -21043606L, -558859105L,
612175145L, -197133890L, 1533103397L)
| /R/miceMNAR-internal.R | no_license | prakash5801/miceMNAR | R | false | false | 9,133 | r | .norm.draw2 <-
function(y, ry, x, ridge = 1e-05, vcovcor=vcovcor,correct=correct, ...) {
xobs <- x[ry, ]
yobs <- y[ry]
xtx <- t(xobs) %*% xobs
pen <- ridge * diag(xtx)
if (length(pen) == 1)
pen <- matrix(pen)
v <- solve(xtx + diag(pen))
coef <- t(yobs %*% xobs %*% v)
residuals <- yobs - xobs %*% coef
df <- max(sum(ry) - ncol(x), 1)
v <- vcovcor
sigma.star <- sqrt(sum((residuals)^2)/rchisq(1, df))/sqrt(correct)
#print(sigma.star)
sigma.star <-mean(sigma.star)
#print(sigma.star)
beta.star <- coef + (t(chol((v + t(v))/2)) %*% rnorm(ncol(x))) * sigma.star
parm <- list(coef, beta.star, sigma.star)
names(parm) <- c("coef", "beta", "sigma")
return(parm)
}
.Random.seed <-
c(403L, 10L, -1910847694L, 239932973L, 1140268263L, -393351666L,
1313444840L, -562295317L, 1029723785L, 1518347800L, -1025643194L,
1905413025L, 1771710003L, -928154718L, -1684787564L, 232090903L,
-901891315L, -801584604L, -588991990L, 1903733621L, 1481535183L,
-1593056810L, -757248640L, 1312277955L, 1702241121L, 520772048L,
-286756562L, -1757849383L, -1183264021L, -1917108390L, -1851528932L,
-292478945L, 1423182965L, -1801103092L, 1538274530L, 983539133L,
2080541239L, 1757575390L, -687721448L, 1111133051L, 1566448409L,
-85669976L, -387250346L, -1874763823L, -1735836797L, 945913362L,
-780428892L, -752288409L, -1092533571L, -471481804L, -1606780518L,
592602597L, -548298369L, -264828666L, -219049328L, -1932774605L,
1154263249L, 201902720L, -918955170L, 231221417L, 1525951323L,
1032763754L, -1932973044L, -375976881L, 964992261L, -836428356L,
1836513426L, -1247241331L, 501849031L, 949338286L, -1715728120L,
-253686005L, -1502381591L, -872268104L, -159635098L, 910420481L,
-1991547757L, 167280450L, -1741885196L, -1098273545L, -1904171795L,
-559933564L, -1579734614L, 1786613525L, -1378857553L, -414214730L,
1261456352L, -1340423389L, -1189845951L, 1227831088L, -211569330L,
1430042553L, 206893515L, 1212658298L, 194000764L, -1289209921L,
-19185515L, 354537132L, 2119154818L, -800699299L, 812307287L,
-2099761602L, 991912568L, 1070502235L, -382189383L, 1846415624L,
-1677646922L, -440033039L, 1593759523L, 2070704498L, -1415920700L,
770944903L, 1007342941L, 3066708L, 2122051962L, -315622203L,
-1115349473L, 1147533734L, 1866873648L, -411133485L, -1025646607L,
-908154592L, -540584002L, -1639516215L, 1007884155L, -801485494L,
-920671828L, 353895599L, -522781083L, 1388422684L, 777156722L,
319946477L, 665756455L, 1555660622L, -212191320L, 52544427L,
18809033L, 2030291160L, -1374358138L, 2114088033L, 348298739L,
1605983330L, 363764436L, 1762978775L, -47636019L, -1226492572L,
1563148618L, -908072779L, 290553871L, 2114238742L, 153002944L,
395374467L, 1790500257L, 1904705936L, -2116990354L, -970331111L,
529297579L, 51025050L, -1199135652L, 1487872351L, -1126448331L,
754173644L, 1340691490L, -1197889923L, 535886711L, 147109022L,
-454003880L, 44578491L, 1135433817L, -1467266072L, 1523994134L,
-1225436783L, -76971453L, -940703406L, 1078449636L, -1573225433L,
378390653L, 2034111860L, 934107482L, 1458130853L, 2015915967L,
-1802542010L, 362758864L, 985074931L, 1385376273L, -206803904L,
-587644898L, -1299718423L, -2062230501L, -224234454L, 496879436L,
-2006090481L, -621715387L, -1228994052L, 295422034L, 906326477L,
1929171847L, -1591544210L, 1830303304L, -532934581L, 1167948201L,
1309783288L, -1303592922L, 68538689L, -1006899501L, 2144327298L,
1393745076L, 2036081719L, -125876435L, -447069116L, 499902826L,
2049508821L, -12410001L, -2111554954L, 1135689120L, 1911925347L,
2080439041L, 1171337840L, -2081985010L, -756390279L, 41062923L,
177326906L, 70660668L, -2007273089L, -184755499L, 1756347116L,
1607945282L, -437689443L, 223443479L, -1238035348L, -1474056524L,
2022811666L, 216074208L, 1432216524L, 1850154464L, 53953058L,
-268718808L, -1399044852L, -1594655172L, 874295538L, 9064176L,
-369651900L, -569508264L, -1808438726L, 2052501232L, -1430040388L,
-1353068012L, 2101204162L, 539554912L, -451011204L, -188755760L,
961184802L, -565068120L, -483376244L, 224734652L, -941646798L,
1835509568L, -1407204172L, 1546880840L, 1698384570L, 665771792L,
514751724L, -916372396L, 1877085970L, -1485260896L, -368127028L,
-371371168L, 127091906L, 1740805672L, -1263479380L, 2071022396L,
-479348878L, -8923344L, -1374570972L, 298959928L, -1828298534L,
1540470160L, -1686963460L, -780553324L, -1586546878L, 851697152L,
-1865472452L, 1170991696L, 1253971618L, -1325325496L, -158608884L,
-2120468260L, 1963719250L, -637087616L, -39711916L, 1538934248L,
218693370L, 240298832L, 155905068L, 155740532L, 899085970L, -227430816L,
-2084952436L, 918132576L, -1417331998L, 665628840L, -916685620L,
-1448910340L, -2024669262L, -1866520080L, -315595324L, 1999108952L,
-128438406L, 834524592L, -1187424836L, -1138146220L, -685455422L,
1426081568L, 1631632316L, 1267280L, -1282078238L, -100756120L,
-1473278644L, -1093208836L, 904043250L, -185482496L, 1621872244L,
1234392072L, -1291953734L, -117247024L, 704284140L, 1614376852L,
1780858578L, 478145120L, 1072905548L, -1214791392L, -1901724030L,
-852232728L, -96857108L, 871979580L, 1078597234L, -410768528L,
-1602481884L, -1483267528L, 908155290L, -18174512L, 1648246204L,
1211516052L, -1037793150L, -490349120L, 1895841596L, 1848111248L,
-1846973662L, 333486600L, -996808436L, 1424882076L, 893651474L,
228269952L, -1216017644L, -757133016L, -1110206406L, -500311088L,
1508363884L, -2030497228L, 938616466L, -685273760L, -1242470068L,
-1277565984L, 1552994722L, -348956248L, -649292148L, -690876996L,
721183346L, -866333712L, 664433220L, -1130246056L, -502987718L,
1976792176L, 1968288828L, 1202345236L, 654326466L, -944245024L,
1219972220L, -1419388208L, 1004554658L, -537224152L, -777988340L,
310009276L, 1306629938L, -1537466688L, -1938456524L, -1571619128L,
-1918728006L, -1484934512L, 2105597676L, 1691322964L, 675434130L,
-1292808928L, -1159772724L, -746012960L, 1211442370L, 1971936296L,
888513452L, 476851900L, -131278862L, 1735797296L, 204800932L,
1478761912L, 1168875354L, -1083088752L, 1091907452L, -365071084L,
2000197954L, 697988352L, 1528295868L, -339765936L, 1093280034L,
1364611656L, -38956020L, 2018234972L, -1953360430L, 1750928896L,
1345108308L, 1774517480L, -482498310L, 408603344L, 1516732204L,
-1994156812L, 1906388370L, 1315619168L, 1836830732L, 834697440L,
1981728354L, -1508777816L, -301658036L, -368695428L, -1985467086L,
-283103632L, 639518532L, -627008552L, -786864262L, 1251058736L,
1272981436L, -1437939116L, -1062606782L, 1984296608L, 2004841148L,
-89595952L, 1931665762L, -1462719768L, -1991308084L, 863910396L,
1063211506L, -2029262720L, 980917748L, 1897736840L, -122222790L,
-1588342448L, 21786732L, 1550472596L, 557524050L, 956717810L,
-266943680L, -422877015L, 838638875L, -2133095700L, 120657658L,
-418461905L, -1710364871L, 2006571342L, -593672292L, -1681570547L,
196988791L, 1047702400L, -233995810L, 1331181227L, 2126125485L,
502296090L, -1254158120L, -87081663L, 1047999987L, 218354708L,
-341337694L, -874664265L, -37787519L, 1198678150L, 1063949508L,
44937717L, 136653919L, 2003799736L, 1178771350L, -8268381L, 374147429L,
-1670553470L, -1880192528L, -354959463L, -783631989L, 234808444L,
-91631958L, -1173067553L, 159022697L, 1537617022L, 309519468L,
223311037L, -1198412409L, 899385712L, 582175758L, 621347131L,
2079362589L, 1683329738L, -1975009816L, 1032741585L, 821944003L,
1001657796L, 43737714L, -381805753L, -11066607L, 918072982L,
-1961122828L, -1063055995L, -1124466481L, 1006594504L, 1578109030L,
-388096365L, 377802229L, -37105710L, 1796697120L, -231813623L,
-520308933L, 68385420L, 455614234L, 461369743L, 1149972185L,
1482838382L, -560227076L, 69391981L, -2080366057L, 1749456864L,
173675454L, 1792913803L, 673776141L, 47431226L, -177637768L,
1393303073L, 881365203L, -583577612L, 32393346L, 2072633623L,
-1273899039L, 1621426470L, -507273180L, -1563311403L, -1388876161L,
-775676136L, 903974454L, 1019019459L, -716374779L, -253393758L,
414612112L, -796654023L, 931485419L, -1994618916L, -1773204982L,
1069063423L, -1963091639L, 188779230L, 1787000972L, -469626787L,
-360112537L, 757600144L, -689339474L, -585373093L, -2118572483L,
-2066114390L, 1685023048L, 599121265L, -1412850973L, -1919143452L,
233700626L, 807383271L, 1772488369L, 1084538102L, 701046548L,
-796575707L, 1719090479L, 570692328L, -1574326330L, 1491980915L,
-545581867L, -1763477838L, -1963790720L, -791144855L, -1788773669L,
1239025196L, -1676704198L, -1842495249L, -1140192391L, 1420347406L,
-1360974372L, -905896371L, -1103086153L, -244640960L, -265199074L,
1614800107L, 1354378349L, -894759718L, 1172264088L, -1805987071L,
-1406945101L, -307227692L, -1140420894L, -118878345L, -1357737279L,
1238803014L, 978849156L, -1523829963L, -706399969L, 1038456952L,
-919403178L, 644987875L, -2055137883L, -1534265278L, 823014960L,
-1907224999L, -909342773L, 2103679036L, -21043606L, -558859105L,
612175145L, -197133890L, 1533103397L)
|
# Plots the continuous descriptive results using a box plot
plot_boxplot <- function(data){
# Manually select the lower and upper bounds
lower <- as.numeric(data$qnt_25) - 1.5 *
(as.numeric(data$qnt_75) - as.numeric(data$qnt_25))
upper <- as.numeric(data$qnt_75) + 1.5 *
(as.numeric(data$qnt_75) - as.numeric(data$qnt_25))
# Manually prepare the outliers to a suitable format for the bxp function
outliers <- lapply(strsplit(data$outliers, split = ", "), as.numeric)
names(outliers) <- seq(1:length(outliers))
outliers_groups <- lapply(seq_along(outliers), function(x){rep(names(outliers[x]), length(outliers[x][[1]]))})
plot <- bxp(list(stats = matrix(c(lower,
as.numeric(data$qnt_25),
as.numeric(data$median),
as.numeric(data$qnt_75),
upper),
ncol = length(unique(data$treatment)),
byrow = TRUE),
n = as.numeric(data$N),
names = data$treatment,
out = unlist(outliers),
group = unlist(outliers_groups)
),
horizontal = T,
pars = list(ylim = c(min(c(unlist(outliers),lower))-5,
max(c(unlist(outliers), upper))+6),
xlab = paste("\n", unique(data$name), "\n"),
yaxt = "n",
boxfill = hue_pal()(length(unique(data$treatment))),
boxwex = 0.4,
axes = F)
)
# Add the axis
axis(side=1,
at=round(seq(min(unlist(outliers),lower)-1,
max(unlist(outliers), upper)+1)))
legend("top", legend=data$treatment, horiz=TRUE,
fill=hue_pal()(length(data$treatment)),
inset = c(0, 0), bty="n")
}
# Plots the categorical descriptive results using a dot plot
plot_dotplot <- function(data){
data <- data %>%
mutate(value = ordered(value, levels = unique(value)))
ggplot(data, aes(y = value,
x = proportion,
col = treatment)) +
geom_point(alpha = 0.75, size = 4) +
facet_wrap(~name, scales = "free") +
theme_light() +
theme(axis.title = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="lightgray", size=0.075),
legend.position = "top",
legend.title = element_blank()) +
labs(x = "Proportion")
}
# Plots the Kaplan-Meier curves from the survival analysis
plot_km <- function(data){
ggplot(data, aes(x=time, y=estimate, col=strata)) +
geom_line(aes(linetype=strata)) +
labs(x = "Time to event", y ="Probability of surival") +
theme_bw() +
theme(legend.position = "top")
}
| /ARDM/applications/plots.R | permissive | joanacmbarros/analysis-results-data-model | R | false | false | 2,991 | r | # Plots the continuous descriptive results using a box plot
plot_boxplot <- function(data){
# Manually select the lower and upper bounds
lower <- as.numeric(data$qnt_25) - 1.5 *
(as.numeric(data$qnt_75) - as.numeric(data$qnt_25))
upper <- as.numeric(data$qnt_75) + 1.5 *
(as.numeric(data$qnt_75) - as.numeric(data$qnt_25))
# Manually prepare the outliers to a suitable format for the bxp function
outliers <- lapply(strsplit(data$outliers, split = ", "), as.numeric)
names(outliers) <- seq(1:length(outliers))
outliers_groups <- lapply(seq_along(outliers), function(x){rep(names(outliers[x]), length(outliers[x][[1]]))})
plot <- bxp(list(stats = matrix(c(lower,
as.numeric(data$qnt_25),
as.numeric(data$median),
as.numeric(data$qnt_75),
upper),
ncol = length(unique(data$treatment)),
byrow = TRUE),
n = as.numeric(data$N),
names = data$treatment,
out = unlist(outliers),
group = unlist(outliers_groups)
),
horizontal = T,
pars = list(ylim = c(min(c(unlist(outliers),lower))-5,
max(c(unlist(outliers), upper))+6),
xlab = paste("\n", unique(data$name), "\n"),
yaxt = "n",
boxfill = hue_pal()(length(unique(data$treatment))),
boxwex = 0.4,
axes = F)
)
# Add the axis
axis(side=1,
at=round(seq(min(unlist(outliers),lower)-1,
max(unlist(outliers), upper)+1)))
legend("top", legend=data$treatment, horiz=TRUE,
fill=hue_pal()(length(data$treatment)),
inset = c(0, 0), bty="n")
}
# Plots the categorical descriptive results using a dot plot
plot_dotplot <- function(data){
data <- data %>%
mutate(value = ordered(value, levels = unique(value)))
ggplot(data, aes(y = value,
x = proportion,
col = treatment)) +
geom_point(alpha = 0.75, size = 4) +
facet_wrap(~name, scales = "free") +
theme_light() +
theme(axis.title = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour="lightgray", size=0.075),
legend.position = "top",
legend.title = element_blank()) +
labs(x = "Proportion")
}
# Plots the Kaplan-Meier curves from the survival analysis
plot_km <- function(data){
ggplot(data, aes(x=time, y=estimate, col=strata)) +
geom_line(aes(linetype=strata)) +
labs(x = "Time to event", y ="Probability of surival") +
theme_bw() +
theme(legend.position = "top")
}
|
readRDS.type <- function(distribution, n, percents, prefix, type) {
if (type != 'mean' && type != 'var' && type != 'mean1var' && type != 'mean2var') {
stop("\nSelect type from:\n 1) mean;\n 2) var;\n 3) mean1var;\n 4) mean2var.")
}
size <- if (n == 5) {
'N5M1000Exact.RDS'
} else {
'N50M1000D800.RDS'
}
res <- round(readRDS(paste0('res/', distribution, '/', type, size)), 3)
if (percents) {
res <- res * 100
}
switch (type,
mean = {
first <- rownames(res)
second <- 1
}, var = {
first <- 0
second <- rownames(res)
}, mean1var = {
first <- rownames(res)
second <- 1 + as.numeric(rownames(res))
}, mean2var = {
first <- rownames(res)
second <- 1 + 2*as.numeric(rownames(res))
}
)
c2 <- c(paste0(prefix, '(', first, ', ', second, ')'))
c1 <- rep(c2[1], 5)
cbind(c1, c2, res)
}
readRDS.distribution <- function(distribution, n = 50, percents = TRUE, prefix = NULL) {
if (is.null(prefix)) {
prefix <- toupper(if (substr(distribution, 1, 3) == 'log') {
paste0('L', substr(distribution, 4, 4))
} else {
substr(distribution, 1, 1)
})
}
res <- rbind(
readRDS.type(distribution, n, percents, prefix, 'mean'),
readRDS.type(distribution, n, percents, prefix, 'var'),
readRDS.type(distribution, n, percents, prefix, 'mean1var'),
readRDS.type(distribution, n, percents, prefix, 'mean2var')
)
rownames(res) <- 1:length(rownames(res))
res
}
MakeTable <- function(res, file = 'table.tex', with_F1 = TRUE, append = FALSE) {
if (!with_F1) res <- res[,-1]
cat('% ', file = file, append = append)
write.table(res, file,
quote = F, sep = ' & ', eol = ' \\\\\n',
row.names = F, col.names = T, append = TRUE)
write('\\hline', file, append = TRUE)
}
res.norm <- readRDS.distribution('norm')[, -c(4, 6, 8)]
res.norm
res.norm <- res.norm[c(1, 4, 8, 13, 18), ]
MakeTable(res.norm, 'table2.tex')
res.cauchy <- readRDS.distribution('cauchy')[, -c(4, 6, 8)]
res.cauchy
res.cauchy <- res.cauchy[c(1, 3, 8, 13, 18), ]
MakeTable(res.cauchy, 'table2.tex', append = TRUE)
res.levy <- readRDS.distribution('levy')[, -c(4, 6, 8)]
res.levy
res.levy <- res.levy[c(1, 5, 10, 13, 18), ]
MakeTable(res.levy, 'table2.tex', append = TRUE)
res.logcauchy <- readRDS.distribution('logcauchy')[, -c(4, 6, 8)]
res.logcauchy
res.logcauchy <- res.logcauchy[c(1, 5, 8, 15, 20), -c(6, 8)]
MakeTable(res.logcauchy, 'table3.tex')
| /for_art/for_art.R | no_license | DmitrySalnikov/new-criterion | R | false | false | 2,481 | r | readRDS.type <- function(distribution, n, percents, prefix, type) {
if (type != 'mean' && type != 'var' && type != 'mean1var' && type != 'mean2var') {
stop("\nSelect type from:\n 1) mean;\n 2) var;\n 3) mean1var;\n 4) mean2var.")
}
size <- if (n == 5) {
'N5M1000Exact.RDS'
} else {
'N50M1000D800.RDS'
}
res <- round(readRDS(paste0('res/', distribution, '/', type, size)), 3)
if (percents) {
res <- res * 100
}
switch (type,
mean = {
first <- rownames(res)
second <- 1
}, var = {
first <- 0
second <- rownames(res)
}, mean1var = {
first <- rownames(res)
second <- 1 + as.numeric(rownames(res))
}, mean2var = {
first <- rownames(res)
second <- 1 + 2*as.numeric(rownames(res))
}
)
c2 <- c(paste0(prefix, '(', first, ', ', second, ')'))
c1 <- rep(c2[1], 5)
cbind(c1, c2, res)
}
readRDS.distribution <- function(distribution, n = 50, percents = TRUE, prefix = NULL) {
if (is.null(prefix)) {
prefix <- toupper(if (substr(distribution, 1, 3) == 'log') {
paste0('L', substr(distribution, 4, 4))
} else {
substr(distribution, 1, 1)
})
}
res <- rbind(
readRDS.type(distribution, n, percents, prefix, 'mean'),
readRDS.type(distribution, n, percents, prefix, 'var'),
readRDS.type(distribution, n, percents, prefix, 'mean1var'),
readRDS.type(distribution, n, percents, prefix, 'mean2var')
)
rownames(res) <- 1:length(rownames(res))
res
}
MakeTable <- function(res, file = 'table.tex', with_F1 = TRUE, append = FALSE) {
if (!with_F1) res <- res[,-1]
cat('% ', file = file, append = append)
write.table(res, file,
quote = F, sep = ' & ', eol = ' \\\\\n',
row.names = F, col.names = T, append = TRUE)
write('\\hline', file, append = TRUE)
}
res.norm <- readRDS.distribution('norm')[, -c(4, 6, 8)]
res.norm
res.norm <- res.norm[c(1, 4, 8, 13, 18), ]
MakeTable(res.norm, 'table2.tex')
res.cauchy <- readRDS.distribution('cauchy')[, -c(4, 6, 8)]
res.cauchy
res.cauchy <- res.cauchy[c(1, 3, 8, 13, 18), ]
MakeTable(res.cauchy, 'table2.tex', append = TRUE)
res.levy <- readRDS.distribution('levy')[, -c(4, 6, 8)]
res.levy
res.levy <- res.levy[c(1, 5, 10, 13, 18), ]
MakeTable(res.levy, 'table2.tex', append = TRUE)
res.logcauchy <- readRDS.distribution('logcauchy')[, -c(4, 6, 8)]
res.logcauchy
res.logcauchy <- res.logcauchy[c(1, 5, 8, 15, 20), -c(6, 8)]
MakeTable(res.logcauchy, 'table3.tex')
|
\name{OptimalTrades.BenchmarkDecisions}
\alias{OptimalTrades.BenchmarkDecisions}
\title{
Benchmark Trading Decisions with Optimal Decisions
}
\description{
Generate a confusion matrix to count times that myDecisions
agrees with optimalDecisions
}
\usage{
OptimalTrades.BenchmarkDecisions(prices, initFunds = 1000, numAssets, myDecisions)
}
\arguments{
\item{prices}{
A vector of prices
}
\item{initFunds}{
Initial funds that are available to the trader
}
\item{numAssets}{
The number of unique assets that are represented in the prices vector
}
\item{myDecisions}{
A vector of the same length as prices describing how many contracts were traded
at period t.
}
}
\details{
Calls OptimalTrades and determines the optimal trading strategy. Converts the
two decision vectors into vectors of factors, indicating a "Buy", "Sell", or
"Hold". Creates a confusion matrix based on these two vectors of factors which
shows how many times I bought when I should have bought, sold when I should have
sold, and all occurrences of when my trading strategy differed from the optimal.
}
\value{
A table representing the confusion matrix.
}
\references{
}
\author{
Jeffrey Wong
}
\note{
}
\seealso{
}
\examples{
}
| /OptimalTrading/man/OptimalTrades.BenchmarkDecisions.Rd | no_license | rootfs-analytics/RFinance | R | false | false | 1,258 | rd | \name{OptimalTrades.BenchmarkDecisions}
\alias{OptimalTrades.BenchmarkDecisions}
\title{
Benchmark Trading Decisions with Optimal Decisions
}
\description{
Generate a confusion matrix to count times that myDecisions
agrees with optimalDecisions
}
\usage{
OptimalTrades.BenchmarkDecisions(prices, initFunds = 1000, numAssets, myDecisions)
}
\arguments{
\item{prices}{
A vector of prices
}
\item{initFunds}{
Initial funds that are available to the trader
}
\item{numAssets}{
The number of unique assets that are represented in the prices vector
}
\item{myDecisions}{
A vector of the same length as prices describing how many contracts were traded
at period t.
}
}
\details{
Calls OptimalTrades and determines the optimal trading strategy. Converts the
two decision vectors into vectors of factors, indicating a "Buy", "Sell", or
"Hold". Creates a confusion matrix based on these two vectors of factors which
shows how many times I bought when I should have bought, sold when I should have
sold, and all occurrences of when my trading strategy differed from the optimal.
}
\value{
A table representing the confusion matrix.
}
\references{
}
\author{
Jeffrey Wong
}
\note{
}
\seealso{
}
\examples{
}
|
#' @title TO ADD
#' @description TO ADD
#' @description This is a new line ...
#' @details What's this?
#' @param rld DESeq2 rld object
#' @return TO ADD
pca_and_plot <- function(rld, annot_label="none", annot_color="none", annot_shape="none") {
if(class(rld)=="matrix") {
y = rld
} else {
y = t(assays(rld)[[1]])
}
dim(y)
y = y[,!apply(y, 2, function(x) sd(x)==0)] # remove regions with no variance
dim(y)
pca_res <- prcomp(y, scale=TRUE, center=TRUE)
pca_res_summary = summary(pca_res)
yy = data.frame(pca_res$x[,1:2])
names(yy) = c("x","y")
yy$annot_label = factor(annot_label)
yy$annot_color = factor(annot_color)
yy$annot_shape = factor(annot_shape)
my_plot = ggplot(yy, aes(x=x, y=y, color=annot_color)) + geom_point(size=5, aes(shape=annot_shape)) + xlab(paste0("PC", 1, ": ", pca_res_summary$importance[2,1]*100, "%")) + ylab(paste0("PC", 2, ": ", pca_res_summary$importance[2,2]*100, "%")) + theme_thesis() + geom_text_repel(aes(label=annot_label), fontface="bold", size=5, force=0.5) # + theme(legend.position="none")
return(my_plot)
}
| /R/pca_and_plot.R | no_license | aidanmacnamara/epiChoose | R | false | false | 1,104 | r | #' @title TO ADD
#' @description TO ADD
#' @description This is a new line ...
#' @details What's this?
#' @param rld DESeq2 rld object
#' @return TO ADD
pca_and_plot <- function(rld, annot_label="none", annot_color="none", annot_shape="none") {
if(class(rld)=="matrix") {
y = rld
} else {
y = t(assays(rld)[[1]])
}
dim(y)
y = y[,!apply(y, 2, function(x) sd(x)==0)] # remove regions with no variance
dim(y)
pca_res <- prcomp(y, scale=TRUE, center=TRUE)
pca_res_summary = summary(pca_res)
yy = data.frame(pca_res$x[,1:2])
names(yy) = c("x","y")
yy$annot_label = factor(annot_label)
yy$annot_color = factor(annot_color)
yy$annot_shape = factor(annot_shape)
my_plot = ggplot(yy, aes(x=x, y=y, color=annot_color)) + geom_point(size=5, aes(shape=annot_shape)) + xlab(paste0("PC", 1, ": ", pca_res_summary$importance[2,1]*100, "%")) + ylab(paste0("PC", 2, ": ", pca_res_summary$importance[2,2]*100, "%")) + theme_thesis() + geom_text_repel(aes(label=annot_label), fontface="bold", size=5, force=0.5) # + theme(legend.position="none")
return(my_plot)
}
|
testthat::context('basic')
testthat::describe('basic',{
it('empty',{
testthat::expect_error(
details::details(),
regexp = 'argument "object" is missing'
)
})
it('object',{
testthat::expect_equal(
print(details::details(mtcars,output = 'character')),
unlist(strsplit(bench_fun(mtcars),'\\n'))
)
})
it('dots',{
testthat::expect_equal(
print(details::details(mtcars,output = 'character',row.names = FALSE)),
unlist(strsplit(bench_fun(mtcars,row.names = FALSE),'\\n'))
)
})
it('file',{
testthat::expect_equal(
print(details::details(object = 'helpers.R',output = 'character')),
unlist(strsplit(bench_fun(readLines('helpers.R')),'\\n'))
)
})
it('non file singleton',{
testthat::expect_equal(
print(details::details(mtcars[1,1],output = 'character')),
unlist(strsplit(bench_fun(mtcars[1,1]),'\\n'))
)
})
it('no lang',{
testthat::expect_equal(
print(details::details(object = 'helpers.R',output = 'character',lang = 'none')),
unlist(strsplit(bench_fun(readLines('helpers.R'),lang = 'none'),'\\n'))
)
})
})
| /tests/testthat/test-basic.R | permissive | yonicd/details | R | false | false | 1,182 | r | testthat::context('basic')
testthat::describe('basic',{
it('empty',{
testthat::expect_error(
details::details(),
regexp = 'argument "object" is missing'
)
})
it('object',{
testthat::expect_equal(
print(details::details(mtcars,output = 'character')),
unlist(strsplit(bench_fun(mtcars),'\\n'))
)
})
it('dots',{
testthat::expect_equal(
print(details::details(mtcars,output = 'character',row.names = FALSE)),
unlist(strsplit(bench_fun(mtcars,row.names = FALSE),'\\n'))
)
})
it('file',{
testthat::expect_equal(
print(details::details(object = 'helpers.R',output = 'character')),
unlist(strsplit(bench_fun(readLines('helpers.R')),'\\n'))
)
})
it('non file singleton',{
testthat::expect_equal(
print(details::details(mtcars[1,1],output = 'character')),
unlist(strsplit(bench_fun(mtcars[1,1]),'\\n'))
)
})
it('no lang',{
testthat::expect_equal(
print(details::details(object = 'helpers.R',output = 'character',lang = 'none')),
unlist(strsplit(bench_fun(readLines('helpers.R'),lang = 'none'),'\\n'))
)
})
})
|
MakeInput_Fn <-
function(Version, Y, X, Y_Report=NULL, LastRun=NULL, Loc, isPred, ObsModel, VaryingKappa, n_factors, n_stations, Use_REML, Aniso, mesh, spde){
# Pre-processing in R: Assume 2-Dimensional
Dset = 1:2
# Triangle info
TV = mesh$graph$tv # Triangle to vertex indexing
V0 = mesh$loc[TV[,1],Dset] # V = vertices for each triangle
V1 = mesh$loc[TV[,2],Dset]
V2 = mesh$loc[TV[,3],Dset]
E0 = V2 - V1 # E = edge for each triangle
E1 = V0 - V2
E2 = V1 - V0
# Calculate Areas
TmpFn = function(Vec1,Vec2) abs(det( rbind(Vec1,Vec2) ))
Tri_Area = rep(NA, nrow(E0))
for(i in 1:length(Tri_Area)) Tri_Area[i] = TmpFn( E0[i,],E1[i,] )/2 # T = area of each triangle
# Other pre-processing
NAind = ifelse( is.na(Y), 1, 0)
if(is.null(Y_Report)) Y_Report = array(0, dim=dim(Y))
n_species = ncol(Y)
n_fit = nrow(Y)
# Data
ErrorDist = as.integer(switch(ObsModel, "Poisson"=0, "NegBin0"=1, "NegBin1"=1, "NegBin2"=1, "NegBin12"=1, "Lognorm_Pois"=0))
if(Version=="spatial_factor_analysis_v18") Data = list(Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v19","spatial_factor_analysis_v20") ) Data = list(Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v21","spatial_factor_analysis_v22") ) Data = list(Aniso=as.integer(Aniso), Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, n_i=mesh$n, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, n_tri=nrow(mesh$graph$tv), Tri_Area=Tri_Area, E0=E0, E1=E1, E2=E2, TV=TV-1, G0=spde$param.inla$M0, G0_inv=spde$param.inla$M0_inv, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ) Data = list(Aniso=as.integer(Aniso), Options_Vec=NA, Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, n_i=mesh$n, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, n_tri=nrow(mesh$graph$tv), Tri_Area=Tri_Area, E0=E0, E1=E1, E2=E2, TV=TV-1, G0=spde$param.inla$M0, G0_inv=spde$param.inla$M0_inv, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(ObsModel!="Lognorm_Pois") Data[["Options_Vec"]] = as.integer(0)
if(ObsModel=="Lognorm_Pois" & Use_REML==TRUE) Data[["Options_Vec"]] = as.integer(2)
if(ObsModel=="Lognorm_Pois" & Use_REML==FALSE) Data[["Options_Vec"]] = as.integer(1)
}
# Parameters
if(is.null(LastRun) || StartValue=="Default" ){
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19") ) Params = list( beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl=matrix(-2,nrow=3,ncol=n_species), Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v20") Params = list( beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v21") Params = list( ln_H_input=c(0,-10,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v22") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v23") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors), Lognorm_input=array(0,dim=dim(Data[["Y"]])) )
if(Version %in% "spatial_factor_analysis_v24") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB0=NA, ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors), Lognorm_input=array(0,dim=dim(Data[["Y"]])) )
if(ObsModel=="Poisson"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin0"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-2,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin1"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-2,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin2"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-2,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin12"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-2,n_species)
Params[['ln_VarInfl_NB2']] = rep(-2,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="Lognorm_Pois"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-5,n_species)
}
}
if(!is.null(LastRun) && StartValue=="Last_Run"){
Par_last = LastRun$opt$par
Par_best = LastRun$ParBest
# names(Save)
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19") ) Params = list( beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl=Par_last[which(names(Par_last)=="ln_VarInfl")], Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v20") Params = list( beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v21") Params = list( ln_H_input=Par_last[which(names(Par_last)=="ln_H_input")], beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v22") Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v23") Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors), Lognorm_input=matrix(Par_best[which(names(Par_best)=="Lognorm_input")],ncol=n_species))
if(Version %in% c("spatial_factor_analysis_v24")) Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB0=NA, ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors), Lognorm_input=matrix(Par_best[which(names(Par_best)=="Lognorm_input")],ncol=n_species))
if(ObsModel=="Poisson"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin0"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB0")]
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin1"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB1")]
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin2"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB2")]
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin12"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB1")]
Params[['ln_VarInfl_NB2']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB2")]
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="Lognorm_Pois"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = Par_best[which(names(Par_best)=="ln_VarInfl_Lognorm")]
}
if(Version %in% c("spatial_factor_analysis_v20","spatial_factor_analysis_v21") ){
if(Aniso==0) Params[['ln_H_input']] = c(0,-10,0)
}
if(Version %in% c("spatial_factor_analysis_v22","spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(Aniso==0) Params[['ln_H_input']] = c(0,0)
}
}
# Fix parameters
Map = list()
if(ObsModel=="Poisson"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin0"){
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin1"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin2"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin12"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="Lognorm_Pois"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
}
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19","spatial_factor_analysis_v20","spatial_factor_analysis_v21") ){
if(Aniso==0) Map[['ln_H_input']] = factor( rep(NA,3) )
if(Aniso==1 & VaryingKappa==0) Map[['log_kappa_input']] = factor( NA )
}
if(Version %in% c("spatial_factor_analysis_v22","spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(Aniso==0) Map[['ln_H_input']] = factor( rep(NA,2) )
}
# Declare random
Random = c("Omega_input")
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ) Random = c(Random, "Lognorm_input")
if(Use_REML==TRUE) Random = c(Random, "beta", "ln_VarInfl_NB0", "ln_VarInfl_NB1", "ln_VarInfl_NB2", "ln_VarInfl_ZI", "ln_VarInfl_Lognorm")
# Return
Return = list("Map"=Map, "Random"=Random, "Params"=Params, "Data"=Data, "spde"=spde)
}
| /R/MakeInput_Fn.R | no_license | GodinA/spatial_factor_analysis | R | false | false | 17,603 | r | MakeInput_Fn <-
function(Version, Y, X, Y_Report=NULL, LastRun=NULL, Loc, isPred, ObsModel, VaryingKappa, n_factors, n_stations, Use_REML, Aniso, mesh, spde){
# Pre-processing in R: Assume 2-Dimensional
Dset = 1:2
# Triangle info
TV = mesh$graph$tv # Triangle to vertex indexing
V0 = mesh$loc[TV[,1],Dset] # V = vertices for each triangle
V1 = mesh$loc[TV[,2],Dset]
V2 = mesh$loc[TV[,3],Dset]
E0 = V2 - V1 # E = edge for each triangle
E1 = V0 - V2
E2 = V1 - V0
# Calculate Areas
TmpFn = function(Vec1,Vec2) abs(det( rbind(Vec1,Vec2) ))
Tri_Area = rep(NA, nrow(E0))
for(i in 1:length(Tri_Area)) Tri_Area[i] = TmpFn( E0[i,],E1[i,] )/2 # T = area of each triangle
# Other pre-processing
NAind = ifelse( is.na(Y), 1, 0)
if(is.null(Y_Report)) Y_Report = array(0, dim=dim(Y))
n_species = ncol(Y)
n_fit = nrow(Y)
# Data
ErrorDist = as.integer(switch(ObsModel, "Poisson"=0, "NegBin0"=1, "NegBin1"=1, "NegBin2"=1, "NegBin12"=1, "Lognorm_Pois"=0))
if(Version=="spatial_factor_analysis_v18") Data = list(Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v19","spatial_factor_analysis_v20") ) Data = list(Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v21","spatial_factor_analysis_v22") ) Data = list(Aniso=as.integer(Aniso), Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, n_i=mesh$n, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, n_tri=nrow(mesh$graph$tv), Tri_Area=Tri_Area, E0=E0, E1=E1, E2=E2, TV=TV-1, G0=spde$param.inla$M0, G0_inv=spde$param.inla$M0_inv, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ) Data = list(Aniso=as.integer(Aniso), Options_Vec=NA, Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, n_i=mesh$n, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, n_tri=nrow(mesh$graph$tv), Tri_Area=Tri_Area, E0=E0, E1=E1, E2=E2, TV=TV-1, G0=spde$param.inla$M0, G0_inv=spde$param.inla$M0_inv, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(ObsModel!="Lognorm_Pois") Data[["Options_Vec"]] = as.integer(0)
if(ObsModel=="Lognorm_Pois" & Use_REML==TRUE) Data[["Options_Vec"]] = as.integer(2)
if(ObsModel=="Lognorm_Pois" & Use_REML==FALSE) Data[["Options_Vec"]] = as.integer(1)
}
# Parameters
if(is.null(LastRun) || StartValue=="Default" ){
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19") ) Params = list( beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl=matrix(-2,nrow=3,ncol=n_species), Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v20") Params = list( beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v21") Params = list( ln_H_input=c(0,-10,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v22") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v23") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors), Lognorm_input=array(0,dim=dim(Data[["Y"]])) )
if(Version %in% "spatial_factor_analysis_v24") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB0=NA, ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors), Lognorm_input=array(0,dim=dim(Data[["Y"]])) )
if(ObsModel=="Poisson"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin0"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-2,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin1"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-2,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin2"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-2,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin12"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-2,n_species)
Params[['ln_VarInfl_NB2']] = rep(-2,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="Lognorm_Pois"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-5,n_species)
}
}
if(!is.null(LastRun) && StartValue=="Last_Run"){
Par_last = LastRun$opt$par
Par_best = LastRun$ParBest
# names(Save)
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19") ) Params = list( beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl=Par_last[which(names(Par_last)=="ln_VarInfl")], Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v20") Params = list( beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v21") Params = list( ln_H_input=Par_last[which(names(Par_last)=="ln_H_input")], beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v22") Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v23") Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors), Lognorm_input=matrix(Par_best[which(names(Par_best)=="Lognorm_input")],ncol=n_species))
if(Version %in% c("spatial_factor_analysis_v24")) Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB0=NA, ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors), Lognorm_input=matrix(Par_best[which(names(Par_best)=="Lognorm_input")],ncol=n_species))
if(ObsModel=="Poisson"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin0"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB0")]
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin1"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB1")]
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin2"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB2")]
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin12"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB1")]
Params[['ln_VarInfl_NB2']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB2")]
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="Lognorm_Pois"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = Par_best[which(names(Par_best)=="ln_VarInfl_Lognorm")]
}
if(Version %in% c("spatial_factor_analysis_v20","spatial_factor_analysis_v21") ){
if(Aniso==0) Params[['ln_H_input']] = c(0,-10,0)
}
if(Version %in% c("spatial_factor_analysis_v22","spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(Aniso==0) Params[['ln_H_input']] = c(0,0)
}
}
# Fix parameters
Map = list()
if(ObsModel=="Poisson"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin0"){
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin1"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin2"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin12"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="Lognorm_Pois"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
}
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19","spatial_factor_analysis_v20","spatial_factor_analysis_v21") ){
if(Aniso==0) Map[['ln_H_input']] = factor( rep(NA,3) )
if(Aniso==1 & VaryingKappa==0) Map[['log_kappa_input']] = factor( NA )
}
if(Version %in% c("spatial_factor_analysis_v22","spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(Aniso==0) Map[['ln_H_input']] = factor( rep(NA,2) )
}
# Declare random
Random = c("Omega_input")
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ) Random = c(Random, "Lognorm_input")
if(Use_REML==TRUE) Random = c(Random, "beta", "ln_VarInfl_NB0", "ln_VarInfl_NB1", "ln_VarInfl_NB2", "ln_VarInfl_ZI", "ln_VarInfl_Lognorm")
# Return
Return = list("Map"=Map, "Random"=Random, "Params"=Params, "Data"=Data, "spde"=spde)
}
|
library(shiny)
library(shinydashboard)
library(shinyjs)
library(DT)
library(plyr)
library(dplyr)
library(tidyr)
library(digest)
load("initial_setting.RData")
## curve funcion
curve <- function(name,input_x){
data <- name
if (input_x<=min(data$x)) {
y <- data$y[which.min(data$x)]
} else if (input_x>=max(data$x)) {
y <- data$y[which.max(data$x)]
} else {
left <- data[which.min(abs(input_x-data$x)),]
tmp <- data[-which.min(abs(input_x-data$x)),]
right <- tmp[which.min(abs(input_x-tmp$x)),]
y <- ifelse(left$x <= right$x,
(1-(input_x-left$x)/(right$x-left$x))*left$y +
(1-(right$x-input_x)/(right$x-left$x))*right$y,
(1-(input_x-right$x)/(left$x-right$x))*right$y +
(1-(left$x-input_x)/(left$x-right$x))*left$y)}
y
}
contact_fit <- function(input,best){
if (input>=best ) {
out <- 1
} else {
out<-round(input/best,2)
}
out
}
decision1_summary <- function(input,phase,hospital){
total <- sum(c(
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_doc",sep="")]]),
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_diet",sep="")]]),
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_admin",sep="")]]),
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_nurs",sep="")]])),na.rm=T)
total
}
calculator <- function(input,phase){
phase1_promotional_budget=0
phase1_total_time_arrangement1 <- 0
phase1_total_time_arrangement2 <- 0
phase1_total_time_arrangement3 <- 0
phase1_total_time_arrangement4 <- 0
phase1_total_time_arrangement5 <- 0
for(i in 1:10){
phase1_promotional_budget <-
sum(c(phase1_promotional_budget,
as.numeric(input[[paste("p",phase,"_promotional_budget_hosp",i,sep="")]])),
na.rm = TRUE)
tmp <- sum(c(as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_1",sep="")]])/100*worktime,
as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_2",sep="")]])/100*worktime,
as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_3",sep="")]])/100*worktime,
as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_4",sep="")]])/100*worktime),
na.rm = TRUE)
if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[1]){
phase1_total_time_arrangement1 <-
phase1_total_time_arrangement1 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[2]) {
phase1_total_time_arrangement2 <-
phase1_total_time_arrangement2 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[3]) {
phase1_total_time_arrangement3 <-
phase1_total_time_arrangement3 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[4]) {
phase1_total_time_arrangement4 <-
phase1_total_time_arrangement4 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[5]) {
phase1_total_time_arrangement5 <-
phase1_total_time_arrangement5 +tmp
}
}
team_meeting <- as.numeric(input[[paste("p",phase,"_flm_team_meeting",sep="")]])
phase1_total_time_arrangement1 <- phase1_total_time_arrangement1 +
as.numeric(input[[paste("p",phase,"_sr1_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr1_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement2 <- phase1_total_time_arrangement2 +
as.numeric(input[[paste("p",phase,"_sr2_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr2_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement3 <- phase1_total_time_arrangement3 +
as.numeric(input[[paste("p",phase,"_sr3_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr3_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement4 <- phase1_total_time_arrangement4 +
as.numeric(input[[paste("p",phase,"_sr4_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr4_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement5 <- phase1_total_time_arrangement5 +
as.numeric(input[[paste("p",phase,"_sr5_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr5_product_training",sep="")]]) +
team_meeting
data <- c(phase1_promotional_budget,
phase1_total_time_arrangement1,
phase1_total_time_arrangement2,
phase1_total_time_arrangement3,
phase1_total_time_arrangement4,
phase1_total_time_arrangement5)
data
}
sales_training <- function(input,phase){sum(c(
as.numeric(input[[paste("p",phase,"_sr1_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr2_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr3_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr4_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr5_sales_training",sep = "")]]),
na.rm = T))}
field_work <- function(input,phase){sum(c(
as.numeric(input[[paste("p",phase,"_sr1_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr2_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr3_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr4_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr5_field_work",sep = "")]]),
na.rm = T))}
total_management <- function(input,phase){sum(c(
sales_training(input,phase),
field_work(input,phase),
as.numeric(input[[paste("p",phase,"_flm_team_meeting",sep = "")]]),
as.numeric(input[[paste("p",phase,"_flm_kpi_analysis",sep = "")]]),
as.numeric(input[[paste("p",phase,"_flm_strategy_planning",sep = "")]]),
as.numeric(input[[paste("p",phase,"_flm_admin_work",sep = "")]])),
na.rm = T
)}
calculation <- function(pp_data1,
pp_data2,
cp_data1,
cp_data2){
#
#
tmp1 <- left_join(cp_data1,pp_data1,by=c("hospital","product"))
tmp2 <- left_join(cp_data2,pp_data2,by=c("sales_rep"))
tmp <- left_join(tmp1,tmp2,by=c("phase","sales_rep")) %>%
group_by(hospital) %>%
mutate(pp_sales_performance_by_hosp=sum(pp_sales_performance),
time_on_doc=sum(time_on_doc.tmp),
time_on_diet=sum(time_on_diet.tmp),
time_on_admin=sum(time_on_admin.tmp),
time_on_nurs=sum(time_on_nurs.tmp)) %>%
ungroup() %>%
mutate(volume_factor=sapply(pp_sales_performance_by_hosp,function(x) curve(curve31,x)),
real_volume=(1+volume_factor)*potential_volume,
product_price = sapply(product,function(x) production_price[which(production_price$product==x),]$price),
real_revenue= real_volume*product_price,
target_revenue= sales_target*product_price) %>%
group_by(phase,sales_rep) %>%
mutate(no.hospitals = n_distinct(hospital),
sr_revenue = round(sum(real_revenue,na.rm=T),2),
sr_volume = round(sum(real_volume,na.rm=T),2),
sr_target_revenue = sum(target_revenue,na.rm=T),
sr_time_total=sum(sr_time,na.rm=T)) %>%
ungroup %>%
dplyr::mutate(sr_acc_revenue = sr_revenue+pp_sr_acc_revenue,
experience_index = sapply(sr_acc_revenue, function(x) round(curve(curve11,x),2)),
sr_acc_field_work = pp_sr_acc_field_work+field_work,
overhead_factor = sapply(pp_motivation_index,function(x) curve(curve12,x)),
overhead_time = round(overhead_factor*overhead,0),
pp_experience_index = sapply(pp_sr_acc_revenue,function(x) round(curve(curve11,x),2)),
sales_target_realization = round(sr_target_revenue/sr_revenue*100,0),
incentive_factor = sapply(sales_target_realization,function(x) curve(curve10,x)),
contact_priority_fit_doc =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.a),
phase,hospital,time_on_doc),
contact_priority_fit_diet =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.b),
phase,hospital,time_on_diet),
contact_priority_fit_admin =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.c),
phase,hospital,time_on_admin),
contact_priority_fit_nurs =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.d),
phase,hospital,time_on_nurs),
contact_priority_fit_index = contact_priority_fit_doc*weightage$contact_priority$a+
contact_priority_fit_diet*weightage$contact_priority$b+
contact_priority_fit_admin*weightage$contact_priority$c+
contact_priority_fit_nurs*weightage$contact_priority$d,
field_work_peraccount = field_work/no.hospitals,
product_knowledge_addition_current_period = sapply(product_training,function(x)curve(curve26,x)),
product_knowledge_transfer_value = sapply(pp_product_knowledge_index,function(x)curve(curve28,x)),
ss_accumulated_field_work_delta = sapply(sr_acc_field_work,function(x)curve(curve42,x)),
ss_accumulated_sales_training_delta = sapply(sales_training,function(x)curve(curve43,x)),
ss_experience_index_pp = sapply(pp_experience_index,function(x)curve(curve44,x)),
m_meeting_with_team_delta = mapply(function(x,y){
if (x == "junior") {
curve(curve13,y)
} else if(x=="middle"){
curve(curve14,y)
} else {curve(curve15,y)}
},sales_level,
meetings_with_team,SIMPLIFY=T),
m_sales_target_realization_delta = sapply(sales_target_realization,function(x)curve(curve16,x)),
m_sales_training_delta = sapply(sales_training,function(x)curve(curve17,x)),
m_admin_work_delta = sapply(admin_work,function(x)curve(curve18,x)))%>%
mutate(sales_skills_index = round(
ss_accumulated_field_work_delta*((weightage$sales_skills)$field_work)+
ss_accumulated_sales_training_delta*((weightage$sales_skills)$sales_training)+
ss_experience_index_pp*((weightage$sales_skills)$experience)),
product_knowledge_index = round(
product_knowledge_addition_current_period+
product_knowledge_transfer_value),
motivation_index = round(
(pp_motivation_index+m_admin_work_delta)*
((weightage$motivation)$admin_work)+
(pp_motivation_index+m_sales_target_realization_delta)*
((weightage$motivation)$sales_target_realization)+
(pp_motivation_index+m_meeting_with_team_delta)*
((weightage$motivation)$meetings_with_team)+
(pp_motivation_index+m_sales_training_delta)*
((weightage$motivation)$sales_training))) %>%
mutate(srsp_motivation_factor = sapply(pp_motivation_index,function(x)curve(curve32,x)),
srsp_sales_skills_factor = sapply(sales_skills_index,function(x)curve(curve34,x)),
srsp_product_knowledge_factor = sapply(product_knowledge_index,function(x)curve(curve33,x)),
srsp_time_with_account_factor =
mapply(function(x,y){if (x==as.character(product_code$product[1])){
curve(curve35,y)} else if(
x==as.character(product_code$product[2])){
curve(curve36,y)} else if (
x==as.character(product_code$product[3])) {
curve(curve37,y)} else {
curve(curve38,y)}},
product,sr_time)) %>%
mutate(sr_sales_performance =
srsp_motivation_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$motivation)+
srsp_sales_skills_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$sales_skills)+
srsp_product_knowledge_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$product_knowledge)+
srsp_time_with_account_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$time_with_account))%>%
mutate(dq_admin_work_delta = sapply(admin_work,function(x)curve(curve5,x)),
dq_priority_fit_delta = sapply(contact_priority_fit_index,function(x)curve(curve6,x)),
dq_meetings_with_team_delta =sapply(meetings_with_team,function(x)curve(curve7,x)),
dq_kpi_analysis_factor = sapply(kpi_analysis,function(x)curve(curve8,x)),
dq_strategy_planning_delta = sapply(strategy_and_cycle_planning,function(x)curve(curve9,x)))%>%
mutate(deployment_quality_index = round(
(pp_deployment_quality_index+dq_admin_work_delta)*
((weightage$deployment_quality)$admin_work)+
(pp_deployment_quality_index+dq_priority_fit_delta)*
((weightage$deployment_quality)$priority_fit)+
(pp_deployment_quality_index+dq_meetings_with_team_delta)*
((weightage$deployment_quality)$meetings_with_team)+
pp_deployment_quality_index*dq_kpi_analysis_factor*
((weightage$deployment_quality)$kpi_report_analysis)+
(pp_deployment_quality_index+dq_strategy_planning_delta)*
((weightage$deployment_quality)$strategy_and_cycle_planning)))%>%
mutate(ps_strategy_planning_factor = sapply(strategy_and_cycle_planning,function(x) curve(curve29,x)),
ps_promotional_budget_factor = sapply(promotional_budget,function(x)curve(curve30,x))) %>%
mutate(promotional_support_index =
pp_promotional_support_index*ps_strategy_planning_factor*
((weightage$promotional_support)$strategy_and_cycle_planning)+
pp_promotional_support_index*ps_promotional_budget_factor*
((weightage$promotional_support)$promotional_budget)) %>%
mutate(sp_field_work_delta = sapply(field_work_peraccount,function(x)curve(curve40,x)),
sp_deployment_quality_factor = sapply(deployment_quality_index,function(x)curve(curve41,x))) %>%
mutate(sales_performance =
sr_sales_performance*((weightage$sales_performance)$sr_sales_performance)+
(pp_sales_performance+sp_field_work_delta)*
((weightage$sales_performance)$field_work)+
(pp_sales_performance*sp_deployment_quality_factor)*
((weightage$sales_performance)$deployment_quality))%>%
mutate(#cr_market_share_delta = curve(curve1,market_share_peraccount),
cr_product_knowledge_delta = sapply(product_knowledge_index-pp_product_knowledge_index,function(x)curve(curve2,x)),
cr_promotional_support_delta = sapply(promotional_support_index/pp_promotional_support_index,function(x)curve(curve3,x)),
cr_pp_customer_relationship_index = sapply(pp_customer_relationship_index,function(x)curve(curve4,x)))%>%
mutate(customer_relationship_index = round(
(cr_pp_customer_relationship_index+cr_product_knowledge_delta)*
(weightage$customer_relaitonship)$product_knowledge+
(cr_pp_customer_relationship_index+cr_promotional_support_delta)*
(weightage$customer_relaitonship)$promotional_support+
cr_pp_customer_relationship_index*
(weightage$customer_relaitonship)$past_relationship)) %>%
mutate(oa_customer_relationship_factor =
mapply(function(x,y){if (x==as.character(product_code$product[1])){
curve(curve19,y)} else if(
x==as.character(product_code$product[2])){
curve(curve20,y)} else if (
x==as.character(product_code$product[3])) {
curve(curve21,y)} else {
curve(curve22,y)}},
product,customer_relationship_index),
oa_sales_performance_factor = sapply(sales_performance,function(x)curve(curve25,x))) %>%
mutate(cp_offer_attractiveness =
pp_offer_attractiveness*oa_customer_relationship_factor*
(weightage$cp_offer_attractiveness)$customer_relationship+
pp_offer_attractiveness*oa_sales_performance_factor*
(weightage$cp_offer_attractiveness)$sales_performance) %>%
mutate(offer_attractiveness = round(cp_offer_attractiveness*(weightage$total_attractiveness)$cp_offer_attractiveness+
pp_offer_attractiveness*(weightage$total_attractiveness)$pp_offer_attractiveness),
acc_offer_attractiveness = round(pp_acc_offer_attractiveness+offer_attractiveness))
tmp
}
get.data1 <- function(input,phase){
data_decision <- data.frame(
phase = NULL,
hospital = NULL,
sales_rep = NULL,
product = NULL,
sales_target = NULL,
potential_volume = NULL,
#real_sales = NULL,
discount = NULL,
promotional_budget = NULL,
sr_time = NULL,
time_on_doc.tmp = NULL,
time_on_diet.tmp = NULL,
time_on_admin.tmp = NULL,
time_on_nurs.tmp = NULL
)
for (j in 1:10) {
for (q in 1:4){
name.phase = as.character(paste("周期",phase,sep=""))
name.hospital = as.character(hospital_info$hospital[j])
name.product = as.character(product_info_initial$product[q])
name.sales_rep <- as.character(input[[paste("p",phase,"_sr_hosp",j,sep="")]])
value.sales_target <- as.numeric(input[[paste("p",phase,"_hosp",j,"_sales_target_",q,sep="")]])
value.discount <- as.numeric(input[[paste("p",phase,"_discount_hosp",j,"_",q,sep="")]])
value.promotional_budget <- as.numeric(input[[paste("p",phase,"_promotional_budget_hosp",j,sep="")]])/100*
total_promotional_budget[[paste("phase",phase,sep="")]]
value.sr_time <- as.numeric(input[[paste("p",phase,"_hosp",j,"_worktime_",q,sep="")]])/100*worktime
value.time_on_doc <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_doc",sep="")]])/100*value.sr_time
value.time_on_diet <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_diet",sep="")]])/100*value.sr_time
value.time_on_admin <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_admin",sep="")]])/100*value.sr_time
value.time_on_nurs <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_nurs",sep="")]])/100*value.sr_time
data_decision <- plyr::rbind.fill(data_decision,data.frame(
phase = name.phase,
hospital = name.hospital,
sales_rep = name.sales_rep,
product = name.product,
sales_target = value.sales_target,
potential_volume = volume_info[which(volume_info$phase==name.phase&
volume_info$hospital==name.hospital&
volume_info$product==name.product),]$potential_volume,
#real_sales = as.numeric(get(paste("real_sales_product",q,sep=""))[[j]][[phase]]),
discount = value.discount,
promotional_budget = value.promotional_budget,
sr_time = value.sr_time,
# time_on_doc = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_doc",sep="")]]),
# time_on_diet = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_diet",sep="")]]),
# time_on_admin = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_admin",sep="")]]),
# time_on_nurs = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_nurs",sep="")]])
time_on_doc.tmp = value.time_on_doc,
time_on_diet.tmp = value.time_on_diet,
time_on_admin.tmp = value.time_on_admin,
time_on_nurs.tmp = value.time_on_nurs
))
}}
data_decision
}
get.data2 <- function(input,phase){
data_decision2 <- data.frame(
phase = NULL,
sales_rep = NULL,
sales_training = NULL,
product_training = NULL,
field_work = NULL,
meetings_with_team = NULL,
kpi_analysis = NULL,
strategy_and_cycle_planning = NULL,
admin_work = NULL
)
for (j in 1:5) {
name.sales_rep <- as.character(sr_info_initial_value$sales_rep[j])
value.sales_training <- as.numeric(
input[[paste("p",phase,"_sr",j,"_sales_training",sep="")]])
value.product_training <- as.numeric(
input[[paste("p",phase,"_sr",j,"_product_training",sep="")]])
value.field_work <- as.numeric(
input[[paste("p",phase,"_sr",j,"_field_work",sep="")]])
value.meetings_with_team <- as.numeric(
input[[paste("p",phase,"_flm_team_meeting",sep="")]])
value.kpi_analysis <- as.numeric(
input[[paste("p",phase,"_flm_kpi_analysis",sep="")]])
value.strategy_and_cycle_planning <- as.numeric(
input[[paste("p",phase,"_flm_strategy_planning",sep="")]])
value.admin_work <- as.numeric(
input[[paste("p",phase,"_flm_admin_work",sep="")]])
data_decision2 <- plyr::rbind.fill(data_decision2,data.frame(
phase = as.character(paste("周期",phase,sep="")),
sales_rep = name.sales_rep,
sales_training = value.sales_training,
product_training = value.product_training,
field_work = value.field_work,
meetings_with_team = value.meetings_with_team,
kpi_analysis = value.kpi_analysis,
strategy_and_cycle_planning = value.strategy_and_cycle_planning,
admin_work = value.admin_work
))
}
data_decision2
}
get.data3 <- function(input,phase){
flm_decision <- data.frame(
flm_sales_training = sales_training(input,phase),
flm_field_work = field_work(input,phase),
flm_meetings_with_team = as.numeric(input[[paste("p",phase,"_flm_team_meeting",sep = "")]]),
flm_kpi_analysis = as.numeric(input[[paste("p",phase,"_flm_kpi_analysis",sep = "")]]),
flm_strategy_planning = as.numeric(input[[paste("p",phase,"_flm_strategy_planning",sep = "")]]),
flm_admin_work = as.numeric(input[[paste("p",phase,"_flm_admin_work",sep = "")]]))
flm_decision
}
data_filter <- function(data,contribution_margin) {
out <- data %>%
select(phase,
hospital,
product,
offer_attractiveness,
acc_offer_attractiveness,
real_revenue,
customer_relationship_index,
sales_skills_index,
product_knowledge_index,
motivation_index) %>%
distinct() %>%
group_by(phase,hospital) %>%
mutate(hospital_offer_attractiveness = sum(offer_attractiveness),
hospital_acc_offer_attractiveness = sum(acc_offer_attractiveness),
hospital_real_revenue = sum(real_revenue),
hospital_customer_relationship_index = sum(customer_relationship_index)) %>%
ungroup() %>%
select(phase,hospital,
hospital_offer_attractiveness,
hospital_acc_offer_attractiveness,
hospital_real_revenue,
hospital_customer_relationship_index,
sales_skills_index,
product_knowledge_index,
motivation_index) %>%
distinct() %>%
group_by(phase) %>%
dplyr::summarise(total_offer_attractiveness=round(sum(hospital_offer_attractiveness)),
total_acc_offer_attractiveness = round(sum(hospital_acc_offer_attractiveness)),
total_revenue = sum(hospital_real_revenue),
average_customer_relationship_index = mean(hospital_customer_relationship_index),
average_sales_skills_index = mean(sales_skills_index),
avarage_product_knowledge_index = mean(product_knowledge_index),
average_motivation_index = mean(motivation_index)) %>%
mutate(contribution_margin_III=contribution_margin,
success_value =
round(
(weightage$success_value)$total_sales*curve(curve50,total_revenue) +
(weightage$success_value)$contribution_margin*curve(curve49,contribution_margin_III) +
(weightage$success_value)$average_customer_relationship*curve(curve45,average_customer_relationship_index) +
(weightage$success_value)$average_sales_skills*curve(curve48,average_sales_skills_index) +
(weightage$success_value)$average_product_knowledge*curve(curve47,avarage_product_knowledge_index) +
(weightage$success_value)$average_motivaiton*curve(curve46,average_motivation_index),0)) %>%
select(phase,
success_value,
total_offer_attractiveness,
total_acc_offer_attractiveness )
out
}
final_report <- function(phase1,phase2,phase3,phase4){
final.names <- c("综合得分","商业价值","累计商业价值")
no.list <- data.frame(number=1:length(final.names),
variable=final.names)
data <- rbind(phase1,
phase2,
phase3,
phase4)
#colnames(data)[2:4] <- c("累计商业价值","商业价值","综合得分")
colnames(data)[2:4] <- c("综合得分","商业价值","累计商业价值")
data <- data %>%
gather(variable,value,-phase) %>%
spread(phase,value)
rownames(data) <- data$variable
data <- data %>% left_join(no.list,by="variable") %>%
arrange(number) %>%
select(-number)
rownames(data) <- data$variable
data <- data %>% select(-variable)
data
}
##participant report
## staff report 1
report_data <- function(tmp,flm_data) {
staff_report <- tmp %>%
select(sales_rep,
incentive_factor,
product_training,
sales_training,
meetings_with_team,
field_work,
sr_time,
sr_time_total,
product_knowledge_index,
pp_experience_index,
experience_index,
sr_revenue,
sr_acc_revenue,
pp_sales_skills_index,
sales_skills_index,
overhead_time) %>%
distinct() %>%
mutate(basic_salary=basicSalary,
incentive_salary= round(basic_salary*incentive_factor/100,2),
total_salary=round(incentive_salary+basicSalary,2),
visit_time=sr_time_total-overhead_time,
total_time=overhead_time+
product_training+
sales_training+
meetings_with_team+
visit_time)
report1_mod1 <- staff_report %>%
select(sales_rep,
basic_salary,
incentive_salary,
total_salary) %>%
distinct()
colnames(report1_mod1) <- c("销售代表",
"基本工资(元)",
"奖金(元)",
"总薪酬(元)")
report1_mod1 <- report1_mod1 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod1) <- report1_mod1$variable
report1_mod1 <- report1_mod1 %>% select(-variable)
report1_mod2 <- staff_report %>%
select(overhead_time,
product_training,
sales_training,
meetings_with_team,
visit_time,
total_time,
sales_rep) %>%
distinct()
colnames(report1_mod2) <- c("日常事物(天)",
"产品培训(天)",
"销售培训(天)",
"团队会议(天)",
"医院拜访(天)",
"总工作时间(天)",
"销售代表")
report1_mod2 <- report1_mod2 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
report1_rank2 <- data.frame(
variable=c("日常事物(天)",
"产品培训(天)",
"销售培训(天)",
"团队会议(天)",
"医院拜访(天)",
"总工作时间(天)"),
rank=1:6,
stringsAsFactors = F
)
report1_mod2 <- report1_mod2 %>%
left_join(report1_rank2,by="variable") %>%
arrange(rank)
rownames(report1_mod2) <- report1_mod2$variable
report1_mod2 <- report1_mod2 %>% select(-variable,-rank)
report1_mod3 <- staff_report %>%
select(sales_rep,
product_knowledge_index,
product_training) %>%
distinct()
colnames(report1_mod3) <- c("销售代表",
"产品知识(指数)",
"产品培训(天)")
report1_mod3 <- report1_mod3 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod3) <- report1_mod3$variable
report1_mod3 <- report1_mod3 %>% select(-variable)
report1_mod4 <- staff_report %>%
select(pp_experience_index,
sr_revenue,
sr_acc_revenue,
experience_index,
sales_rep) %>%
distinct()
colnames(report1_mod4) <- c("前期经验",
"当期销售(元)",
"累计总销售(元)",
"当期经验",
"销售代表")
report1_mod4 <- report1_mod4 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod4) <- report1_mod4$variable
report1_mod4 <- report1_mod4 %>% select(-variable)
report1_mod5 <- staff_report %>%
select(sales_rep,
pp_sales_skills_index,
sales_skills_index,
field_work) %>%
distinct()
colnames(report1_mod5) <- c("销售代表",
"前期销售技巧(指数)",
"当期销售技巧(指数)",
"经理医院随访(天)")
report1_mod5 <- report1_mod5 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod5) <- report1_mod5$variable
report1_mod5 <- report1_mod5 %>% select(-variable)
## flm report
flm_report <- staff_report %>%
mutate(all_sr_salary=sum(total_salary,na.rm=T)) %>%
select(all_sr_salary) %>%
distinct()
flm_report <- flm_data %>%
mutate(all_sr_salary = flm_report$all_sr_salary,
work_time=flm_sales_training+
flm_field_work+
flm_meetings_with_team+
flm_kpi_analysis+
flm_strategy_planning+
flm_admin_work)
report2_mod1 <- flm_report%>%
select(all_sr_salary) %>%
mutate(variable="总薪酬(元)")
rownames(report2_mod1) <- report2_mod1$variable
colnames(report2_mod1)[1] <- "值"
report2_mod1 <- report2_mod1 %>%
select(-variable)
report2_mod2 <- flm_report %>%
select(-all_sr_salary)
colnames(report2_mod2) <- c("销售培训(天)",
"经理随访(天)",
"团队会议(天)",
"KPI分析(天)",
"战略和周期计划(天)",
"行政工作(天)",
"总工作时间(天)")
report2_mod2 <- report2_mod2 %>%
gather(variable,`值`)
rownames(report2_mod2) <- report2_mod2$variable
report2_mod2 <- report2_mod2 %>% select(-variable)
## brief time allocation of hospital report
report3_rank1 <- data.frame(
"因素"=c("销售代表","计划时间分配(天)","实际时间分配(天)"),
rank=1:3,
stringsAsFactors =F
)
report3_mod1 <- tmp %>%
select(no.hospitals,
hospital,
product,
sales_rep,
sr_time,
overhead_time) %>%
distinct() %>%
mutate(real_time =
round(sr_time-overhead_time/no.hospitals,2)) %>%
select(-no.hospitals,-overhead_time)
colnames(report3_mod1) <- c("医院",
"产品",
"销售代表",
"计划时间分配(天)",
"实际时间分配(天)")
report3_mod1 <- report3_mod1 %>%
gather(`因素`,value,-`医院`,-`产品`) %>%
spread(`产品`,value) %>%
left_join(report3_rank1,by="因素") %>%
arrange(`医院`,rank) %>%
select(-rank)
## evaluation of decision report
eva_decision_report <- tmp %>%
select(hospital,
product,
sales_rep,
time_on_doc,
time_on_diet,
time_on_admin,
time_on_nurs,
strategy_and_cycle_planning,
kpi_analysis,
meetings_with_team,
admin_work,
contact_priority_fit_index,
deployment_quality_index,
pp_deployment_quality_index) %>%
group_by(hospital) %>%
mutate(total_deployment_quality_index= round(sum(deployment_quality_index),2),
total_pp_deployment_quality_index=round(sum(pp_deployment_quality_index),2)) %>%
ungroup() %>%
select(-product)
report4_mod1 <- eva_decision_report %>%
select(hospital,
sales_rep) %>%
distinct()
colnames(report4_mod1) <- c("医院",
"销售代表")
rownames(report4_mod1) <- report4_mod1$医院
report4_mod1 <- report4_mod1 %>% select(-`医院`)
report4_mod2 <- eva_decision_report %>%
select(hospital,
time_on_doc,
time_on_diet,
time_on_admin,
time_on_nurs,
contact_priority_fit_index) %>%
distinct()
colnames(report4_mod2) <- c("医院",
"A级客户时间分配",
"B级客户时间分配",
"C级客户时间分配",
"D级客户时间分配",
"总分级匹配度")
rownames(report4_mod2) <- report4_mod2$医院
report4_mod2 <- report4_mod2 %>% select(-`医院`)
report4_mod3 <- eva_decision_report %>%
select(hospital,
total_pp_deployment_quality_index,
strategy_and_cycle_planning,
kpi_analysis,
meetings_with_team,
admin_work,
contact_priority_fit_index,
total_deployment_quality_index) %>%
distinct()
colnames(report4_mod3) <- c("医院",
"上期决策质量(指数)",
"战略和周期计划(天)",
"KPI分析(天)",
"团队会议(天)",
"行政工作(天)",
"总分级匹配度",
"当期决策质量(指数)")
rownames(report4_mod3) <- report4_mod3$医院
report4_mod3 <- report4_mod3 %>% select(-`医院`)
## report d
report5_mod1 <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume) %>%
group_by(product) %>%
mutate(production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
real_revenue_by_product = sum(real_revenue),
production_fee = real_revenue_by_product*production_cost,
profit1 = real_revenue_by_product - production_fee,
production_fee_percent = round(production_fee/real_revenue_by_product*100,2),
profit1_percent = round(profit1/real_revenue_by_product*100,2)) %>%
ungroup() %>%
select(-hospital,
-real_revenue,
-real_volume,
-production_cost) %>%
distinct()
colnames(report5_mod1) <- c("产品",
"销售金额(元)",
"生产成本(元)",
"利润贡献I(元)",
"生产成本(%)",
"利润贡献I(%)")
report5_mod1 <- report5_mod1 %>%
gather(`因素`,value,-`产品`) %>%
spread(`产品`,value)
report5_rank1 <- data.frame(
"因素"= c("销售金额(元)",
"生产成本(元)",
"生产成本(%)",
"利润贡献I(元)",
"利润贡献I(%)"),
rank=1:length(report5_mod1$因素),
stringsAsFactors = F
)
report5_mod1 <- report5_mod1 %>%
left_join(report5_rank1,by="因素") %>%
distinct() %>%
arrange(rank) %>%
select(-rank)
rownames(report5_mod1) <- report5_mod1$因素
report5_mod1 <- report5_mod1 %>% select(-`因素`)
report5_mod2 <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume) %>%
group_by(product) %>%
mutate(production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
real_revenue_by_product = sum(real_revenue),
real_revenue_by_volume = sum(real_volume),
real_revenue_by_product_per = real_revenue_by_product/real_revenue_by_volume,
production_fee_per = real_revenue_by_product_per*production_cost,
profit1 = real_revenue_by_product_per - production_fee_per,
production_fee_percent = round(production_fee_per/real_revenue_by_product_per*100,2),
profit1_percent = round(profit1/real_revenue_by_product_per*100,2)) %>%
ungroup() %>%
select(-hospital,
-real_revenue,
-real_volume,
-production_cost,
-real_revenue_by_product,
-real_revenue_by_volume) %>%
distinct()
colnames(report5_mod2) <- c("产品",
"销售金额(元)",
"生产成本(元)",
"利润贡献I(元)",
"生产成本(%)",
"利润贡献I(%)")
report5_mod2 <- report5_mod2 %>%
gather(`因素`,value,-`产品`) %>%
spread(`产品`,value)
report5_mod2 <- report5_mod2 %>%
left_join(report5_rank1,by="因素") %>%
distinct() %>%
arrange(rank) %>%
select(-rank)
rownames(report5_mod2) <- report5_mod2$因素
report5_mod2 <- report5_mod2 %>% select(-`因素`)
report5_mod3 <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume,
promotional_budget,
discount) %>%
mutate(production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
production_fee = production_cost*real_revenue,
discount_fee = discount/100*real_revenue) %>%
group_by(hospital) %>%
mutate(real_revenue_by_hosp = sum(real_revenue),
total_production_fee_by_hosp =sum(production_fee),
discount_fee_by_hosp = sum(discount_fee)) %>%
ungroup() %>%
select(hospital,
total_production_fee_by_hosp,
real_revenue_by_hosp,
discount_fee,
promotional_budget) %>%
distinct() %>%
mutate(total_revenue =round(sum(real_revenue_by_hosp),2),
total_production_fee =round(sum(total_production_fee_by_hosp),2),
total_discount_fee = round(sum(discount_fee),2),
total_promotional_budget = round(sum(promotional_budget),2),
total_changeable_fee = total_discount_fee+total_promotional_budget,
total_salary=round(report2_mod1$值,2),
total_admin_fee=round(total_revenue*admin_fee_weight,2)) %>%
select(-hospital,
-total_discount_fee,
-total_promotional_budget,
-total_production_fee_by_hosp,
-real_revenue_by_hosp,
-discount_fee,
-promotional_budget) %>%
distinct() %>%
mutate(profit1=total_revenue-total_production_fee,
profit2=profit1-total_changeable_fee,
profit3=profit2-total_salary-total_admin_fee)
report5_rank3 <- data.frame(
variable=c("销售额",
"生产成本",
"利润贡献I",
"其他可变成本",
"利润贡献II",
"员工薪酬",
"总体行政花销",
"利润贡献III"),
rank = 1:8,
stringsAsFactors = F
)
report5_mod3_1 <- report5_mod3
colnames(report5_mod3_1) <- c("销售额",
"生产成本",
"其他可变成本",
"员工薪酬",
"总体行政花销",
"利润贡献I",
"利润贡献II",
"利润贡献III")
report5_mod3_1 <- report5_mod3_1 %>%
gather(variable,"金额(元)")
report5_mod3_2 <- report5_mod3 %>%
mutate(total_revenue_percent = round(total_revenue/total_revenue*100,2),
total_production_fee_percent = round(total_production_fee/total_revenue*100,2),
profit1_percent = round(profit1/total_revenue*100,2),
total_changeable_fee_percent = round(total_changeable_fee/total_revenue*100,2),
profit2_percent = round(profit2/total_revenue*100,2),
total_salary_percent = round(total_salary/total_revenue*100,2),
total_admin_fee_percent =round(total_admin_fee/total_revenue*100,2),
profit3_percent = round(profit3/total_revenue*100,2)) %>%
select(total_revenue_percent,
total_production_fee_percent,
profit1_percent,
total_changeable_fee_percent,
profit2_percent,
total_salary_percent,
total_admin_fee_percent,
profit3_percent)
colnames(report5_mod3_2) <- c("销售额",
"生产成本",
"利润贡献I",
"其他可变成本",
"利润贡献II",
"员工薪酬",
"总体行政花销",
"利润贡献III")
report5_mod3 <- report5_mod3_2 %>%
gather(variable,"占比") %>%
left_join(report5_mod3_1,by="variable") %>%
left_join(report5_rank3,by="variable") %>%
arrange(rank) %>%
select(-rank)
rownames(report5_mod3) <- report5_mod3$variable
report5_mod3 <- report5_mod3 %>%
select(-variable) %>%
select(`金额(元)`,`占比`)
## report c
report6_rank <- data.frame(
variable=c("销售额(元)",
"生产成本(元)",
"生产成本(%)",
"利润贡献I(元)",
"利润贡献I(%)",
"其他可变成本(元)",
"其他可变成本(%)",
"推广费用预算(元)",
"推广费用预算(%)",
"利润贡献I(元)",
'利润贡献II(%)'),
rank=1:11,
stringsAsFactors = F)
product_report_peraccount <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume,
promotional_budget,
discount) %>%
group_by(hospital,product) %>%
mutate(no.product=n_distinct(product),
production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
production_fee = round(production_cost*real_revenue,2),
profit1 = round(real_revenue - production_fee,2),
discount_fee = round(discount/100*real_revenue,2),
promotion_fee = round(promotional_budget/no.product,2),
profit2 = round(profit1- discount_fee - promotion_fee,2)) %>%
ungroup() %>%
select(hospital,
product,
real_revenue,
production_fee,
profit1,
discount_fee,
promotion_fee,
profit2) %>%
group_by(hospital) %>%
do(plyr::rbind.fill(.,data.frame(hospital=first(.$hospital),
product="总体",
real_revenue = sum(.$real_revenue),
production_fee = sum(.$production_fee),
profit1 = sum(.$profit1),
discount_fee = sum(.$discount_fee),
promotion_fee = sum(.$promotion_fee),
profit2 = sum(.$profit2)))) %>%
ungroup() %>%
mutate(production_fee_percent = round(production_fee/real_revenue*100,2),
profit1_percent = round(profit1/real_revenue*100,2),
discount_fee_percent = round(discount_fee/real_revenue*100,2),
promotion_fee_percent = round(promotion_fee/real_revenue*100,2),
profit2_percent = round(profit2/real_revenue*100,2))
colnames(product_report_peraccount) <- c("医院",
"产品",
"销售额(元)",
"生产成本(元)",
"利润贡献I(元)",
"其他可变成本(元)",
"推广费用预算(元)",
"利润贡献II(元)",
"生产成本(%)",
"利润贡献I(%)",
"其他可变成本(%)",
"推广费用预算(%)",
'利润贡献II(%)')
report6_mod1 <- product_report_peraccount %>%
gather(variable,value,-`医院`,-`产品`) %>%
spread(`产品`,value) %>%
left_join(report6_rank,by="variable") %>%
arrange(`医院`,rank) %>%
select(-rank)
## report b
report7_mod1 <- tmp %>%
select(hospital,
real_revenue,
pp_real_revenue,
real_volume,
pp_real_volume) %>%
group_by(hospital) %>%
dplyr::summarise(real_revenue_by_hosp = round(sum(real_revenue),2),
pp_real_revenue_by_hosp = round(sum(pp_real_revenue),2),
real_revenue_increase = real_revenue_by_hosp - pp_real_revenue_by_hosp,
real_volume_by_hosp = round(sum(real_volume),2),
pp_real_volume_by_hosp = round(sum(pp_real_volume),2),
real_volume_increase = real_volume_by_hosp - pp_real_volume_by_hosp) %>%
do(plyr::rbind.fill(.,data.frame(hospital="总体",
real_revenue_by_hosp=sum(.$real_revenue_by_hosp),
pp_real_revenue_by_hosp=sum(.$pp_real_revenue_by_hosp),
real_revenue_increase=sum(.$real_revenue_increase),
real_volume_by_hosp=sum(.$real_volume_by_hosp),
pp_real_volume_by_hosp=sum(.$pp_real_volume_by_hosp),
real_volume_increase=sum(.$real_volume_increase)))) %>%
mutate(real_revenue_increase_ratio = round(real_revenue_increase/pp_real_revenue_by_hosp*100,2),
real_volume_increase_ratio = round(real_volume_increase/pp_real_volume_by_hosp*100,2)) %>%
select(hospital,
pp_real_revenue_by_hosp,
real_revenue_by_hosp,
real_revenue_increase,
real_revenue_increase_ratio,
pp_real_volume_by_hosp,
real_volume_by_hosp,
real_volume_increase,
real_volume_increase_ratio)
colnames(report7_mod1) <- c("医院",
"上期销售额",
"当期销售额",
"销售额增长",
"销售额增长率",
"上期销售量",
"当期销售量",
"销售量增长",
"销售量增长率")
rownames(report7_mod1) <- report7_mod1$医院
report7_mod1 <- report7_mod1 %>%
select(-`医院`)
report7_mod2 <- tmp %>%
select(sales_rep,
sr_revenue,
pp_sr_revenue,
sr_volume,
pp_sr_volume) %>%
distinct() %>%
mutate(sr_revenue_increase=sr_revenue-pp_sr_revenue,
sr_volume_increase=sr_volume-pp_sr_volume) %>%
do(plyr::rbind.fill(.,data.frame(sales_rep="总体",
sr_revenue=sum(.$sr_revenue),
pp_sr_revenue =sum(.$pp_sr_revenue),
sr_revenue_increase=sum(.$sr_revenue_increase),
sr_volume=sum(.$sr_volume),
pp_sr_volume=sum(.$pp_sr_volume),
sr_volume_increase=sum(.$sr_volume_increase)))) %>%
mutate(sr_revenue_increase_ratio = round(sr_revenue_increase/pp_sr_revenue*100,2),
sr_volume_increase_ratio = round(sr_volume_increase/pp_sr_volume*100,2)) %>%
select(sales_rep,
pp_sr_revenue,
sr_revenue,
sr_revenue_increase,
sr_revenue_increase_ratio,
pp_sr_volume,
sr_volume,
sr_volume_increase,
sr_volume_increase_ratio)
colnames(report7_mod2) <- c("销售代表",
"上期销售额",
"当期销售额",
"销售额增长",
"销售额增长率",
"上期销售量",
"当期销售量",
"销售量增长",
"销售量增长率")
rownames(report7_mod2) <- report7_mod2$销售代表
report7_mod2 <- report7_mod2 %>%
select(-`销售代表`)
report7_mod3 <- tmp %>%
select(product,
real_revenue,
pp_real_revenue,
real_volume,
pp_real_volume) %>%
group_by(product) %>%
dplyr::summarise(real_revenue_by_product = round(sum(real_revenue),2),
pp_real_revenue_by_product = round(sum(pp_real_revenue),2),
real_revenue_increase = real_revenue_by_product - pp_real_revenue_by_product,
real_volume_by_product = round(sum(real_volume),2),
pp_real_volume_by_product = round(sum(pp_real_volume),2),
real_volume_increase = real_volume_by_product - pp_real_volume_by_product) %>%
do(plyr::rbind.fill(.,data.frame(product="总体",
real_revenue_by_product=sum(.$real_revenue_by_product),
pp_real_revenue_by_product=sum(.$pp_real_revenue_by_product),
real_revenue_increase=sum(.$real_revenue_increase),
real_volume_by_product=sum(.$real_volume_by_product),
pp_real_volume_by_product=sum(.$pp_real_volume_by_product),
real_volume_increase=sum(.$real_volume_increase)))) %>%
mutate(real_revenue_increase_ratio = round(real_revenue_increase/pp_real_revenue_by_product*100,2),
real_volume_increase_ratio = round(real_volume_increase/pp_real_volume_by_product*100,2)) %>%
select(product,
pp_real_revenue_by_product,
real_revenue_by_product,
real_revenue_increase,
real_revenue_increase_ratio,
pp_real_volume_by_product,
real_volume_by_product,
real_volume_increase,
real_volume_increase_ratio)
colnames(report7_mod3) <- c("产品",
"上期销售额",
"当期销售额",
"销售额增长",
"销售额增长率",
"上期销售量",
"当期销售量",
"销售量增长",
"销售量增长率")
rownames(report7_mod3) <- report7_mod3$产品
report7_mod3 <- report7_mod3 %>%
select(-`产品`)
## report a
offer_attractiveness_report <- tmp %>%
group_by(hospital) %>%
mutate(hospital_revenue = round(sum(real_revenue),2),
hospital_offer_attractiveness = round(sum(offer_attractiveness),2),
hospital_acc_offer_attractiveness =round(sum(acc_offer_attractiveness),2))%>%
ungroup() %>%
select(sales_rep,
hospital,
incentive_factor,
product_knowledge_index,
sales_skills_index,
customer_relationship_index,
motivation_index,
hospital_revenue,
hospital_offer_attractiveness,
hospital_acc_offer_attractiveness) %>%
distinct() %>%
mutate(total_revenue = round(sum(hospital_revenue),2),
total_offer_attractiveness = round(sum(hospital_offer_attractiveness),2),
total_acc_offer_attractiveness = round(sum(hospital_acc_offer_attractiveness),2),
average_customer_relationship_index = round(mean(customer_relationship_index),2),
average_sales_skills_index = round(mean(sales_skills_index),2),
average_product_knowledge_index = round(mean(product_knowledge_index),2),
average_motivation_index = round(mean(motivation_index),2))
report8_mod1 <- offer_attractiveness_report %>%
ungroup() %>%
dplyr::mutate(profit3=as.numeric(report5_mod3[8,1])) %>%
select(total_revenue,
profit3,
average_customer_relationship_index,
average_sales_skills_index,
average_product_knowledge_index,
average_motivation_index,
total_offer_attractiveness,
total_acc_offer_attractiveness) %>%
distinct()
colnames(report8_mod1) <- c("总销售(元)",
"总利润(元)",
"客户关系的平均水平(指数)",
"平均销售技巧水平(指数)",
"平均产品知识水平(指数)",
"平均动力值(指数)",
"商业价值(指数)",
"累计商业价值(指数)")
report8_mod1 <- report8_mod1 %>% gather(variable,`值`)
rownames(report8_mod1) <- report8_mod1$variable
report8_mod1 <- report8_mod1 %>% select(-variable)
report8_mod2 <- tmp %>%
select(hospital,product,real_revenue) %>%
group_by(hospital) %>%
dplyr::summarise(hospital_revenue = round(sum(real_revenue),2)) %>%
ungroup() %>%
mutate(market_revenue=round(sum(hospital_revenue),2),
market_share=round(hospital_revenue/market_revenue*100,2)) %>%
select(hospital,
hospital_revenue,
market_share) %>%
distinct()
colnames(report8_mod2) <- c("医院",
"总销售(元)",
"总市场的市场份额(%)")
rownames(report8_mod2) <- report8_mod2$医院
report8_mod2 <- report8_mod2 %>% select(-`医院`)
out<-list("report1_mod1"=report1_mod1,
"report1_mod2"=report1_mod2,
"report1_mod3"=report1_mod3,
"report1_mod4"=report1_mod4,
"report1_mod5"=report1_mod5,
"report2_mod1"=report2_mod1,
"report2_mod2"=report2_mod2,
"report3_mod1"=report3_mod1,
"report4_mod1"=report4_mod1,
"report4_mod2"=report4_mod2,
"report4_mod3"=report4_mod3,
"report5_mod1"=report5_mod1,
"report5_mod2"=report5_mod2,
"report5_mod3"=report5_mod3,
"report6_mod1"=report6_mod1,
"report7_mod1"=report7_mod1,
"report7_mod2"=report7_mod2,
"report7_mod3"=report7_mod3,
"report8_mod1"=report8_mod1,
"report8_mod2"=report8_mod2
)
out
}
| /Codes/server/global.R | no_license | RachelAnqi/Sales_training_programme | R | false | false | 57,031 | r | library(shiny)
library(shinydashboard)
library(shinyjs)
library(DT)
library(plyr)
library(dplyr)
library(tidyr)
library(digest)
load("initial_setting.RData")
## curve funcion
curve <- function(name,input_x){
data <- name
if (input_x<=min(data$x)) {
y <- data$y[which.min(data$x)]
} else if (input_x>=max(data$x)) {
y <- data$y[which.max(data$x)]
} else {
left <- data[which.min(abs(input_x-data$x)),]
tmp <- data[-which.min(abs(input_x-data$x)),]
right <- tmp[which.min(abs(input_x-tmp$x)),]
y <- ifelse(left$x <= right$x,
(1-(input_x-left$x)/(right$x-left$x))*left$y +
(1-(right$x-input_x)/(right$x-left$x))*right$y,
(1-(input_x-right$x)/(left$x-right$x))*right$y +
(1-(left$x-input_x)/(left$x-right$x))*left$y)}
y
}
contact_fit <- function(input,best){
if (input>=best ) {
out <- 1
} else {
out<-round(input/best,2)
}
out
}
decision1_summary <- function(input,phase,hospital){
total <- sum(c(
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_doc",sep="")]]),
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_diet",sep="")]]),
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_admin",sep="")]]),
as.numeric(input[[paste("p",phase,"_hosp",hospital,"_worktime_nurs",sep="")]])),na.rm=T)
total
}
calculator <- function(input,phase){
phase1_promotional_budget=0
phase1_total_time_arrangement1 <- 0
phase1_total_time_arrangement2 <- 0
phase1_total_time_arrangement3 <- 0
phase1_total_time_arrangement4 <- 0
phase1_total_time_arrangement5 <- 0
for(i in 1:10){
phase1_promotional_budget <-
sum(c(phase1_promotional_budget,
as.numeric(input[[paste("p",phase,"_promotional_budget_hosp",i,sep="")]])),
na.rm = TRUE)
tmp <- sum(c(as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_1",sep="")]])/100*worktime,
as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_2",sep="")]])/100*worktime,
as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_3",sep="")]])/100*worktime,
as.numeric(input[[paste("p",phase,"_hosp",i,"_worktime_4",sep="")]])/100*worktime),
na.rm = TRUE)
if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[1]){
phase1_total_time_arrangement1 <-
phase1_total_time_arrangement1 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[2]) {
phase1_total_time_arrangement2 <-
phase1_total_time_arrangement2 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[3]) {
phase1_total_time_arrangement3 <-
phase1_total_time_arrangement3 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[4]) {
phase1_total_time_arrangement4 <-
phase1_total_time_arrangement4 +tmp
} else if (input[[paste("p",phase,"_sr_hosp",i,sep = "")]]==
available_srs[5]) {
phase1_total_time_arrangement5 <-
phase1_total_time_arrangement5 +tmp
}
}
team_meeting <- as.numeric(input[[paste("p",phase,"_flm_team_meeting",sep="")]])
phase1_total_time_arrangement1 <- phase1_total_time_arrangement1 +
as.numeric(input[[paste("p",phase,"_sr1_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr1_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement2 <- phase1_total_time_arrangement2 +
as.numeric(input[[paste("p",phase,"_sr2_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr2_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement3 <- phase1_total_time_arrangement3 +
as.numeric(input[[paste("p",phase,"_sr3_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr3_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement4 <- phase1_total_time_arrangement4 +
as.numeric(input[[paste("p",phase,"_sr4_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr4_product_training",sep="")]]) +
team_meeting
phase1_total_time_arrangement5 <- phase1_total_time_arrangement5 +
as.numeric(input[[paste("p",phase,"_sr5_sales_training",sep="")]]) +
as.numeric(input[[paste("p",phase,"_sr5_product_training",sep="")]]) +
team_meeting
data <- c(phase1_promotional_budget,
phase1_total_time_arrangement1,
phase1_total_time_arrangement2,
phase1_total_time_arrangement3,
phase1_total_time_arrangement4,
phase1_total_time_arrangement5)
data
}
sales_training <- function(input,phase){sum(c(
as.numeric(input[[paste("p",phase,"_sr1_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr2_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr3_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr4_sales_training",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr5_sales_training",sep = "")]]),
na.rm = T))}
field_work <- function(input,phase){sum(c(
as.numeric(input[[paste("p",phase,"_sr1_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr2_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr3_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr4_field_work",sep = "")]]),
as.numeric(input[[paste("p",phase,"_sr5_field_work",sep = "")]]),
na.rm = T))}
total_management <- function(input,phase){sum(c(
sales_training(input,phase),
field_work(input,phase),
as.numeric(input[[paste("p",phase,"_flm_team_meeting",sep = "")]]),
as.numeric(input[[paste("p",phase,"_flm_kpi_analysis",sep = "")]]),
as.numeric(input[[paste("p",phase,"_flm_strategy_planning",sep = "")]]),
as.numeric(input[[paste("p",phase,"_flm_admin_work",sep = "")]])),
na.rm = T
)}
calculation <- function(pp_data1,
pp_data2,
cp_data1,
cp_data2){
#
#
tmp1 <- left_join(cp_data1,pp_data1,by=c("hospital","product"))
tmp2 <- left_join(cp_data2,pp_data2,by=c("sales_rep"))
tmp <- left_join(tmp1,tmp2,by=c("phase","sales_rep")) %>%
group_by(hospital) %>%
mutate(pp_sales_performance_by_hosp=sum(pp_sales_performance),
time_on_doc=sum(time_on_doc.tmp),
time_on_diet=sum(time_on_diet.tmp),
time_on_admin=sum(time_on_admin.tmp),
time_on_nurs=sum(time_on_nurs.tmp)) %>%
ungroup() %>%
mutate(volume_factor=sapply(pp_sales_performance_by_hosp,function(x) curve(curve31,x)),
real_volume=(1+volume_factor)*potential_volume,
product_price = sapply(product,function(x) production_price[which(production_price$product==x),]$price),
real_revenue= real_volume*product_price,
target_revenue= sales_target*product_price) %>%
group_by(phase,sales_rep) %>%
mutate(no.hospitals = n_distinct(hospital),
sr_revenue = round(sum(real_revenue,na.rm=T),2),
sr_volume = round(sum(real_volume,na.rm=T),2),
sr_target_revenue = sum(target_revenue,na.rm=T),
sr_time_total=sum(sr_time,na.rm=T)) %>%
ungroup %>%
dplyr::mutate(sr_acc_revenue = sr_revenue+pp_sr_acc_revenue,
experience_index = sapply(sr_acc_revenue, function(x) round(curve(curve11,x),2)),
sr_acc_field_work = pp_sr_acc_field_work+field_work,
overhead_factor = sapply(pp_motivation_index,function(x) curve(curve12,x)),
overhead_time = round(overhead_factor*overhead,0),
pp_experience_index = sapply(pp_sr_acc_revenue,function(x) round(curve(curve11,x),2)),
sales_target_realization = round(sr_target_revenue/sr_revenue*100,0),
incentive_factor = sapply(sales_target_realization,function(x) curve(curve10,x)),
contact_priority_fit_doc =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.a),
phase,hospital,time_on_doc),
contact_priority_fit_diet =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.b),
phase,hospital,time_on_diet),
contact_priority_fit_admin =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.c),
phase,hospital,time_on_admin),
contact_priority_fit_nurs =
mapply(function(x,y,z) contact_fit(
z,contact_priority_info[which(contact_priority_info$phase==x&
contact_priority_info$hospital==y),]$type.d),
phase,hospital,time_on_nurs),
contact_priority_fit_index = contact_priority_fit_doc*weightage$contact_priority$a+
contact_priority_fit_diet*weightage$contact_priority$b+
contact_priority_fit_admin*weightage$contact_priority$c+
contact_priority_fit_nurs*weightage$contact_priority$d,
field_work_peraccount = field_work/no.hospitals,
product_knowledge_addition_current_period = sapply(product_training,function(x)curve(curve26,x)),
product_knowledge_transfer_value = sapply(pp_product_knowledge_index,function(x)curve(curve28,x)),
ss_accumulated_field_work_delta = sapply(sr_acc_field_work,function(x)curve(curve42,x)),
ss_accumulated_sales_training_delta = sapply(sales_training,function(x)curve(curve43,x)),
ss_experience_index_pp = sapply(pp_experience_index,function(x)curve(curve44,x)),
m_meeting_with_team_delta = mapply(function(x,y){
if (x == "junior") {
curve(curve13,y)
} else if(x=="middle"){
curve(curve14,y)
} else {curve(curve15,y)}
},sales_level,
meetings_with_team,SIMPLIFY=T),
m_sales_target_realization_delta = sapply(sales_target_realization,function(x)curve(curve16,x)),
m_sales_training_delta = sapply(sales_training,function(x)curve(curve17,x)),
m_admin_work_delta = sapply(admin_work,function(x)curve(curve18,x)))%>%
mutate(sales_skills_index = round(
ss_accumulated_field_work_delta*((weightage$sales_skills)$field_work)+
ss_accumulated_sales_training_delta*((weightage$sales_skills)$sales_training)+
ss_experience_index_pp*((weightage$sales_skills)$experience)),
product_knowledge_index = round(
product_knowledge_addition_current_period+
product_knowledge_transfer_value),
motivation_index = round(
(pp_motivation_index+m_admin_work_delta)*
((weightage$motivation)$admin_work)+
(pp_motivation_index+m_sales_target_realization_delta)*
((weightage$motivation)$sales_target_realization)+
(pp_motivation_index+m_meeting_with_team_delta)*
((weightage$motivation)$meetings_with_team)+
(pp_motivation_index+m_sales_training_delta)*
((weightage$motivation)$sales_training))) %>%
mutate(srsp_motivation_factor = sapply(pp_motivation_index,function(x)curve(curve32,x)),
srsp_sales_skills_factor = sapply(sales_skills_index,function(x)curve(curve34,x)),
srsp_product_knowledge_factor = sapply(product_knowledge_index,function(x)curve(curve33,x)),
srsp_time_with_account_factor =
mapply(function(x,y){if (x==as.character(product_code$product[1])){
curve(curve35,y)} else if(
x==as.character(product_code$product[2])){
curve(curve36,y)} else if (
x==as.character(product_code$product[3])) {
curve(curve37,y)} else {
curve(curve38,y)}},
product,sr_time)) %>%
mutate(sr_sales_performance =
srsp_motivation_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$motivation)+
srsp_sales_skills_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$sales_skills)+
srsp_product_knowledge_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$product_knowledge)+
srsp_time_with_account_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$time_with_account))%>%
mutate(dq_admin_work_delta = sapply(admin_work,function(x)curve(curve5,x)),
dq_priority_fit_delta = sapply(contact_priority_fit_index,function(x)curve(curve6,x)),
dq_meetings_with_team_delta =sapply(meetings_with_team,function(x)curve(curve7,x)),
dq_kpi_analysis_factor = sapply(kpi_analysis,function(x)curve(curve8,x)),
dq_strategy_planning_delta = sapply(strategy_and_cycle_planning,function(x)curve(curve9,x)))%>%
mutate(deployment_quality_index = round(
(pp_deployment_quality_index+dq_admin_work_delta)*
((weightage$deployment_quality)$admin_work)+
(pp_deployment_quality_index+dq_priority_fit_delta)*
((weightage$deployment_quality)$priority_fit)+
(pp_deployment_quality_index+dq_meetings_with_team_delta)*
((weightage$deployment_quality)$meetings_with_team)+
pp_deployment_quality_index*dq_kpi_analysis_factor*
((weightage$deployment_quality)$kpi_report_analysis)+
(pp_deployment_quality_index+dq_strategy_planning_delta)*
((weightage$deployment_quality)$strategy_and_cycle_planning)))%>%
mutate(ps_strategy_planning_factor = sapply(strategy_and_cycle_planning,function(x) curve(curve29,x)),
ps_promotional_budget_factor = sapply(promotional_budget,function(x)curve(curve30,x))) %>%
mutate(promotional_support_index =
pp_promotional_support_index*ps_strategy_planning_factor*
((weightage$promotional_support)$strategy_and_cycle_planning)+
pp_promotional_support_index*ps_promotional_budget_factor*
((weightage$promotional_support)$promotional_budget)) %>%
mutate(sp_field_work_delta = sapply(field_work_peraccount,function(x)curve(curve40,x)),
sp_deployment_quality_factor = sapply(deployment_quality_index,function(x)curve(curve41,x))) %>%
mutate(sales_performance =
sr_sales_performance*((weightage$sales_performance)$sr_sales_performance)+
(pp_sales_performance+sp_field_work_delta)*
((weightage$sales_performance)$field_work)+
(pp_sales_performance*sp_deployment_quality_factor)*
((weightage$sales_performance)$deployment_quality))%>%
mutate(#cr_market_share_delta = curve(curve1,market_share_peraccount),
cr_product_knowledge_delta = sapply(product_knowledge_index-pp_product_knowledge_index,function(x)curve(curve2,x)),
cr_promotional_support_delta = sapply(promotional_support_index/pp_promotional_support_index,function(x)curve(curve3,x)),
cr_pp_customer_relationship_index = sapply(pp_customer_relationship_index,function(x)curve(curve4,x)))%>%
mutate(customer_relationship_index = round(
(cr_pp_customer_relationship_index+cr_product_knowledge_delta)*
(weightage$customer_relaitonship)$product_knowledge+
(cr_pp_customer_relationship_index+cr_promotional_support_delta)*
(weightage$customer_relaitonship)$promotional_support+
cr_pp_customer_relationship_index*
(weightage$customer_relaitonship)$past_relationship)) %>%
mutate(oa_customer_relationship_factor =
mapply(function(x,y){if (x==as.character(product_code$product[1])){
curve(curve19,y)} else if(
x==as.character(product_code$product[2])){
curve(curve20,y)} else if (
x==as.character(product_code$product[3])) {
curve(curve21,y)} else {
curve(curve22,y)}},
product,customer_relationship_index),
oa_sales_performance_factor = sapply(sales_performance,function(x)curve(curve25,x))) %>%
mutate(cp_offer_attractiveness =
pp_offer_attractiveness*oa_customer_relationship_factor*
(weightage$cp_offer_attractiveness)$customer_relationship+
pp_offer_attractiveness*oa_sales_performance_factor*
(weightage$cp_offer_attractiveness)$sales_performance) %>%
mutate(offer_attractiveness = round(cp_offer_attractiveness*(weightage$total_attractiveness)$cp_offer_attractiveness+
pp_offer_attractiveness*(weightage$total_attractiveness)$pp_offer_attractiveness),
acc_offer_attractiveness = round(pp_acc_offer_attractiveness+offer_attractiveness))
tmp
}
get.data1 <- function(input,phase){
data_decision <- data.frame(
phase = NULL,
hospital = NULL,
sales_rep = NULL,
product = NULL,
sales_target = NULL,
potential_volume = NULL,
#real_sales = NULL,
discount = NULL,
promotional_budget = NULL,
sr_time = NULL,
time_on_doc.tmp = NULL,
time_on_diet.tmp = NULL,
time_on_admin.tmp = NULL,
time_on_nurs.tmp = NULL
)
for (j in 1:10) {
for (q in 1:4){
name.phase = as.character(paste("周期",phase,sep=""))
name.hospital = as.character(hospital_info$hospital[j])
name.product = as.character(product_info_initial$product[q])
name.sales_rep <- as.character(input[[paste("p",phase,"_sr_hosp",j,sep="")]])
value.sales_target <- as.numeric(input[[paste("p",phase,"_hosp",j,"_sales_target_",q,sep="")]])
value.discount <- as.numeric(input[[paste("p",phase,"_discount_hosp",j,"_",q,sep="")]])
value.promotional_budget <- as.numeric(input[[paste("p",phase,"_promotional_budget_hosp",j,sep="")]])/100*
total_promotional_budget[[paste("phase",phase,sep="")]]
value.sr_time <- as.numeric(input[[paste("p",phase,"_hosp",j,"_worktime_",q,sep="")]])/100*worktime
value.time_on_doc <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_doc",sep="")]])/100*value.sr_time
value.time_on_diet <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_diet",sep="")]])/100*value.sr_time
value.time_on_admin <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_admin",sep="")]])/100*value.sr_time
value.time_on_nurs <- as.numeric(
input[[paste("p",phase,"_hosp",j,"_worktime_nurs",sep="")]])/100*value.sr_time
data_decision <- plyr::rbind.fill(data_decision,data.frame(
phase = name.phase,
hospital = name.hospital,
sales_rep = name.sales_rep,
product = name.product,
sales_target = value.sales_target,
potential_volume = volume_info[which(volume_info$phase==name.phase&
volume_info$hospital==name.hospital&
volume_info$product==name.product),]$potential_volume,
#real_sales = as.numeric(get(paste("real_sales_product",q,sep=""))[[j]][[phase]]),
discount = value.discount,
promotional_budget = value.promotional_budget,
sr_time = value.sr_time,
# time_on_doc = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_doc",sep="")]]),
# time_on_diet = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_diet",sep="")]]),
# time_on_admin = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_admin",sep="")]]),
# time_on_nurs = as.numeric(
# input[[paste("p",phase,"_hosp",j,"_worktime_nurs",sep="")]])
time_on_doc.tmp = value.time_on_doc,
time_on_diet.tmp = value.time_on_diet,
time_on_admin.tmp = value.time_on_admin,
time_on_nurs.tmp = value.time_on_nurs
))
}}
data_decision
}
get.data2 <- function(input,phase){
data_decision2 <- data.frame(
phase = NULL,
sales_rep = NULL,
sales_training = NULL,
product_training = NULL,
field_work = NULL,
meetings_with_team = NULL,
kpi_analysis = NULL,
strategy_and_cycle_planning = NULL,
admin_work = NULL
)
for (j in 1:5) {
name.sales_rep <- as.character(sr_info_initial_value$sales_rep[j])
value.sales_training <- as.numeric(
input[[paste("p",phase,"_sr",j,"_sales_training",sep="")]])
value.product_training <- as.numeric(
input[[paste("p",phase,"_sr",j,"_product_training",sep="")]])
value.field_work <- as.numeric(
input[[paste("p",phase,"_sr",j,"_field_work",sep="")]])
value.meetings_with_team <- as.numeric(
input[[paste("p",phase,"_flm_team_meeting",sep="")]])
value.kpi_analysis <- as.numeric(
input[[paste("p",phase,"_flm_kpi_analysis",sep="")]])
value.strategy_and_cycle_planning <- as.numeric(
input[[paste("p",phase,"_flm_strategy_planning",sep="")]])
value.admin_work <- as.numeric(
input[[paste("p",phase,"_flm_admin_work",sep="")]])
data_decision2 <- plyr::rbind.fill(data_decision2,data.frame(
phase = as.character(paste("周期",phase,sep="")),
sales_rep = name.sales_rep,
sales_training = value.sales_training,
product_training = value.product_training,
field_work = value.field_work,
meetings_with_team = value.meetings_with_team,
kpi_analysis = value.kpi_analysis,
strategy_and_cycle_planning = value.strategy_and_cycle_planning,
admin_work = value.admin_work
))
}
data_decision2
}
get.data3 <- function(input,phase){
flm_decision <- data.frame(
flm_sales_training = sales_training(input,phase),
flm_field_work = field_work(input,phase),
flm_meetings_with_team = as.numeric(input[[paste("p",phase,"_flm_team_meeting",sep = "")]]),
flm_kpi_analysis = as.numeric(input[[paste("p",phase,"_flm_kpi_analysis",sep = "")]]),
flm_strategy_planning = as.numeric(input[[paste("p",phase,"_flm_strategy_planning",sep = "")]]),
flm_admin_work = as.numeric(input[[paste("p",phase,"_flm_admin_work",sep = "")]]))
flm_decision
}
data_filter <- function(data,contribution_margin) {
out <- data %>%
select(phase,
hospital,
product,
offer_attractiveness,
acc_offer_attractiveness,
real_revenue,
customer_relationship_index,
sales_skills_index,
product_knowledge_index,
motivation_index) %>%
distinct() %>%
group_by(phase,hospital) %>%
mutate(hospital_offer_attractiveness = sum(offer_attractiveness),
hospital_acc_offer_attractiveness = sum(acc_offer_attractiveness),
hospital_real_revenue = sum(real_revenue),
hospital_customer_relationship_index = sum(customer_relationship_index)) %>%
ungroup() %>%
select(phase,hospital,
hospital_offer_attractiveness,
hospital_acc_offer_attractiveness,
hospital_real_revenue,
hospital_customer_relationship_index,
sales_skills_index,
product_knowledge_index,
motivation_index) %>%
distinct() %>%
group_by(phase) %>%
dplyr::summarise(total_offer_attractiveness=round(sum(hospital_offer_attractiveness)),
total_acc_offer_attractiveness = round(sum(hospital_acc_offer_attractiveness)),
total_revenue = sum(hospital_real_revenue),
average_customer_relationship_index = mean(hospital_customer_relationship_index),
average_sales_skills_index = mean(sales_skills_index),
avarage_product_knowledge_index = mean(product_knowledge_index),
average_motivation_index = mean(motivation_index)) %>%
mutate(contribution_margin_III=contribution_margin,
success_value =
round(
(weightage$success_value)$total_sales*curve(curve50,total_revenue) +
(weightage$success_value)$contribution_margin*curve(curve49,contribution_margin_III) +
(weightage$success_value)$average_customer_relationship*curve(curve45,average_customer_relationship_index) +
(weightage$success_value)$average_sales_skills*curve(curve48,average_sales_skills_index) +
(weightage$success_value)$average_product_knowledge*curve(curve47,avarage_product_knowledge_index) +
(weightage$success_value)$average_motivaiton*curve(curve46,average_motivation_index),0)) %>%
select(phase,
success_value,
total_offer_attractiveness,
total_acc_offer_attractiveness )
out
}
final_report <- function(phase1,phase2,phase3,phase4){
final.names <- c("综合得分","商业价值","累计商业价值")
no.list <- data.frame(number=1:length(final.names),
variable=final.names)
data <- rbind(phase1,
phase2,
phase3,
phase4)
#colnames(data)[2:4] <- c("累计商业价值","商业价值","综合得分")
colnames(data)[2:4] <- c("综合得分","商业价值","累计商业价值")
data <- data %>%
gather(variable,value,-phase) %>%
spread(phase,value)
rownames(data) <- data$variable
data <- data %>% left_join(no.list,by="variable") %>%
arrange(number) %>%
select(-number)
rownames(data) <- data$variable
data <- data %>% select(-variable)
data
}
##participant report
## staff report 1
report_data <- function(tmp,flm_data) {
staff_report <- tmp %>%
select(sales_rep,
incentive_factor,
product_training,
sales_training,
meetings_with_team,
field_work,
sr_time,
sr_time_total,
product_knowledge_index,
pp_experience_index,
experience_index,
sr_revenue,
sr_acc_revenue,
pp_sales_skills_index,
sales_skills_index,
overhead_time) %>%
distinct() %>%
mutate(basic_salary=basicSalary,
incentive_salary= round(basic_salary*incentive_factor/100,2),
total_salary=round(incentive_salary+basicSalary,2),
visit_time=sr_time_total-overhead_time,
total_time=overhead_time+
product_training+
sales_training+
meetings_with_team+
visit_time)
report1_mod1 <- staff_report %>%
select(sales_rep,
basic_salary,
incentive_salary,
total_salary) %>%
distinct()
colnames(report1_mod1) <- c("销售代表",
"基本工资(元)",
"奖金(元)",
"总薪酬(元)")
report1_mod1 <- report1_mod1 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod1) <- report1_mod1$variable
report1_mod1 <- report1_mod1 %>% select(-variable)
report1_mod2 <- staff_report %>%
select(overhead_time,
product_training,
sales_training,
meetings_with_team,
visit_time,
total_time,
sales_rep) %>%
distinct()
colnames(report1_mod2) <- c("日常事物(天)",
"产品培训(天)",
"销售培训(天)",
"团队会议(天)",
"医院拜访(天)",
"总工作时间(天)",
"销售代表")
report1_mod2 <- report1_mod2 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
report1_rank2 <- data.frame(
variable=c("日常事物(天)",
"产品培训(天)",
"销售培训(天)",
"团队会议(天)",
"医院拜访(天)",
"总工作时间(天)"),
rank=1:6,
stringsAsFactors = F
)
report1_mod2 <- report1_mod2 %>%
left_join(report1_rank2,by="variable") %>%
arrange(rank)
rownames(report1_mod2) <- report1_mod2$variable
report1_mod2 <- report1_mod2 %>% select(-variable,-rank)
report1_mod3 <- staff_report %>%
select(sales_rep,
product_knowledge_index,
product_training) %>%
distinct()
colnames(report1_mod3) <- c("销售代表",
"产品知识(指数)",
"产品培训(天)")
report1_mod3 <- report1_mod3 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod3) <- report1_mod3$variable
report1_mod3 <- report1_mod3 %>% select(-variable)
report1_mod4 <- staff_report %>%
select(pp_experience_index,
sr_revenue,
sr_acc_revenue,
experience_index,
sales_rep) %>%
distinct()
colnames(report1_mod4) <- c("前期经验",
"当期销售(元)",
"累计总销售(元)",
"当期经验",
"销售代表")
report1_mod4 <- report1_mod4 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod4) <- report1_mod4$variable
report1_mod4 <- report1_mod4 %>% select(-variable)
report1_mod5 <- staff_report %>%
select(sales_rep,
pp_sales_skills_index,
sales_skills_index,
field_work) %>%
distinct()
colnames(report1_mod5) <- c("销售代表",
"前期销售技巧(指数)",
"当期销售技巧(指数)",
"经理医院随访(天)")
report1_mod5 <- report1_mod5 %>%
gather(variable,`值`,-`销售代表`) %>%
spread(`销售代表`,`值`)
rownames(report1_mod5) <- report1_mod5$variable
report1_mod5 <- report1_mod5 %>% select(-variable)
## flm report
flm_report <- staff_report %>%
mutate(all_sr_salary=sum(total_salary,na.rm=T)) %>%
select(all_sr_salary) %>%
distinct()
flm_report <- flm_data %>%
mutate(all_sr_salary = flm_report$all_sr_salary,
work_time=flm_sales_training+
flm_field_work+
flm_meetings_with_team+
flm_kpi_analysis+
flm_strategy_planning+
flm_admin_work)
report2_mod1 <- flm_report%>%
select(all_sr_salary) %>%
mutate(variable="总薪酬(元)")
rownames(report2_mod1) <- report2_mod1$variable
colnames(report2_mod1)[1] <- "值"
report2_mod1 <- report2_mod1 %>%
select(-variable)
report2_mod2 <- flm_report %>%
select(-all_sr_salary)
colnames(report2_mod2) <- c("销售培训(天)",
"经理随访(天)",
"团队会议(天)",
"KPI分析(天)",
"战略和周期计划(天)",
"行政工作(天)",
"总工作时间(天)")
report2_mod2 <- report2_mod2 %>%
gather(variable,`值`)
rownames(report2_mod2) <- report2_mod2$variable
report2_mod2 <- report2_mod2 %>% select(-variable)
## brief time allocation of hospital report
report3_rank1 <- data.frame(
"因素"=c("销售代表","计划时间分配(天)","实际时间分配(天)"),
rank=1:3,
stringsAsFactors =F
)
report3_mod1 <- tmp %>%
select(no.hospitals,
hospital,
product,
sales_rep,
sr_time,
overhead_time) %>%
distinct() %>%
mutate(real_time =
round(sr_time-overhead_time/no.hospitals,2)) %>%
select(-no.hospitals,-overhead_time)
colnames(report3_mod1) <- c("医院",
"产品",
"销售代表",
"计划时间分配(天)",
"实际时间分配(天)")
report3_mod1 <- report3_mod1 %>%
gather(`因素`,value,-`医院`,-`产品`) %>%
spread(`产品`,value) %>%
left_join(report3_rank1,by="因素") %>%
arrange(`医院`,rank) %>%
select(-rank)
## evaluation of decision report
eva_decision_report <- tmp %>%
select(hospital,
product,
sales_rep,
time_on_doc,
time_on_diet,
time_on_admin,
time_on_nurs,
strategy_and_cycle_planning,
kpi_analysis,
meetings_with_team,
admin_work,
contact_priority_fit_index,
deployment_quality_index,
pp_deployment_quality_index) %>%
group_by(hospital) %>%
mutate(total_deployment_quality_index= round(sum(deployment_quality_index),2),
total_pp_deployment_quality_index=round(sum(pp_deployment_quality_index),2)) %>%
ungroup() %>%
select(-product)
report4_mod1 <- eva_decision_report %>%
select(hospital,
sales_rep) %>%
distinct()
colnames(report4_mod1) <- c("医院",
"销售代表")
rownames(report4_mod1) <- report4_mod1$医院
report4_mod1 <- report4_mod1 %>% select(-`医院`)
report4_mod2 <- eva_decision_report %>%
select(hospital,
time_on_doc,
time_on_diet,
time_on_admin,
time_on_nurs,
contact_priority_fit_index) %>%
distinct()
colnames(report4_mod2) <- c("医院",
"A级客户时间分配",
"B级客户时间分配",
"C级客户时间分配",
"D级客户时间分配",
"总分级匹配度")
rownames(report4_mod2) <- report4_mod2$医院
report4_mod2 <- report4_mod2 %>% select(-`医院`)
report4_mod3 <- eva_decision_report %>%
select(hospital,
total_pp_deployment_quality_index,
strategy_and_cycle_planning,
kpi_analysis,
meetings_with_team,
admin_work,
contact_priority_fit_index,
total_deployment_quality_index) %>%
distinct()
colnames(report4_mod3) <- c("医院",
"上期决策质量(指数)",
"战略和周期计划(天)",
"KPI分析(天)",
"团队会议(天)",
"行政工作(天)",
"总分级匹配度",
"当期决策质量(指数)")
rownames(report4_mod3) <- report4_mod3$医院
report4_mod3 <- report4_mod3 %>% select(-`医院`)
## report d
report5_mod1 <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume) %>%
group_by(product) %>%
mutate(production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
real_revenue_by_product = sum(real_revenue),
production_fee = real_revenue_by_product*production_cost,
profit1 = real_revenue_by_product - production_fee,
production_fee_percent = round(production_fee/real_revenue_by_product*100,2),
profit1_percent = round(profit1/real_revenue_by_product*100,2)) %>%
ungroup() %>%
select(-hospital,
-real_revenue,
-real_volume,
-production_cost) %>%
distinct()
colnames(report5_mod1) <- c("产品",
"销售金额(元)",
"生产成本(元)",
"利润贡献I(元)",
"生产成本(%)",
"利润贡献I(%)")
report5_mod1 <- report5_mod1 %>%
gather(`因素`,value,-`产品`) %>%
spread(`产品`,value)
report5_rank1 <- data.frame(
"因素"= c("销售金额(元)",
"生产成本(元)",
"生产成本(%)",
"利润贡献I(元)",
"利润贡献I(%)"),
rank=1:length(report5_mod1$因素),
stringsAsFactors = F
)
report5_mod1 <- report5_mod1 %>%
left_join(report5_rank1,by="因素") %>%
distinct() %>%
arrange(rank) %>%
select(-rank)
rownames(report5_mod1) <- report5_mod1$因素
report5_mod1 <- report5_mod1 %>% select(-`因素`)
report5_mod2 <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume) %>%
group_by(product) %>%
mutate(production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
real_revenue_by_product = sum(real_revenue),
real_revenue_by_volume = sum(real_volume),
real_revenue_by_product_per = real_revenue_by_product/real_revenue_by_volume,
production_fee_per = real_revenue_by_product_per*production_cost,
profit1 = real_revenue_by_product_per - production_fee_per,
production_fee_percent = round(production_fee_per/real_revenue_by_product_per*100,2),
profit1_percent = round(profit1/real_revenue_by_product_per*100,2)) %>%
ungroup() %>%
select(-hospital,
-real_revenue,
-real_volume,
-production_cost,
-real_revenue_by_product,
-real_revenue_by_volume) %>%
distinct()
colnames(report5_mod2) <- c("产品",
"销售金额(元)",
"生产成本(元)",
"利润贡献I(元)",
"生产成本(%)",
"利润贡献I(%)")
report5_mod2 <- report5_mod2 %>%
gather(`因素`,value,-`产品`) %>%
spread(`产品`,value)
report5_mod2 <- report5_mod2 %>%
left_join(report5_rank1,by="因素") %>%
distinct() %>%
arrange(rank) %>%
select(-rank)
rownames(report5_mod2) <- report5_mod2$因素
report5_mod2 <- report5_mod2 %>% select(-`因素`)
report5_mod3 <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume,
promotional_budget,
discount) %>%
mutate(production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
production_fee = production_cost*real_revenue,
discount_fee = discount/100*real_revenue) %>%
group_by(hospital) %>%
mutate(real_revenue_by_hosp = sum(real_revenue),
total_production_fee_by_hosp =sum(production_fee),
discount_fee_by_hosp = sum(discount_fee)) %>%
ungroup() %>%
select(hospital,
total_production_fee_by_hosp,
real_revenue_by_hosp,
discount_fee,
promotional_budget) %>%
distinct() %>%
mutate(total_revenue =round(sum(real_revenue_by_hosp),2),
total_production_fee =round(sum(total_production_fee_by_hosp),2),
total_discount_fee = round(sum(discount_fee),2),
total_promotional_budget = round(sum(promotional_budget),2),
total_changeable_fee = total_discount_fee+total_promotional_budget,
total_salary=round(report2_mod1$值,2),
total_admin_fee=round(total_revenue*admin_fee_weight,2)) %>%
select(-hospital,
-total_discount_fee,
-total_promotional_budget,
-total_production_fee_by_hosp,
-real_revenue_by_hosp,
-discount_fee,
-promotional_budget) %>%
distinct() %>%
mutate(profit1=total_revenue-total_production_fee,
profit2=profit1-total_changeable_fee,
profit3=profit2-total_salary-total_admin_fee)
report5_rank3 <- data.frame(
variable=c("销售额",
"生产成本",
"利润贡献I",
"其他可变成本",
"利润贡献II",
"员工薪酬",
"总体行政花销",
"利润贡献III"),
rank = 1:8,
stringsAsFactors = F
)
report5_mod3_1 <- report5_mod3
colnames(report5_mod3_1) <- c("销售额",
"生产成本",
"其他可变成本",
"员工薪酬",
"总体行政花销",
"利润贡献I",
"利润贡献II",
"利润贡献III")
report5_mod3_1 <- report5_mod3_1 %>%
gather(variable,"金额(元)")
report5_mod3_2 <- report5_mod3 %>%
mutate(total_revenue_percent = round(total_revenue/total_revenue*100,2),
total_production_fee_percent = round(total_production_fee/total_revenue*100,2),
profit1_percent = round(profit1/total_revenue*100,2),
total_changeable_fee_percent = round(total_changeable_fee/total_revenue*100,2),
profit2_percent = round(profit2/total_revenue*100,2),
total_salary_percent = round(total_salary/total_revenue*100,2),
total_admin_fee_percent =round(total_admin_fee/total_revenue*100,2),
profit3_percent = round(profit3/total_revenue*100,2)) %>%
select(total_revenue_percent,
total_production_fee_percent,
profit1_percent,
total_changeable_fee_percent,
profit2_percent,
total_salary_percent,
total_admin_fee_percent,
profit3_percent)
colnames(report5_mod3_2) <- c("销售额",
"生产成本",
"利润贡献I",
"其他可变成本",
"利润贡献II",
"员工薪酬",
"总体行政花销",
"利润贡献III")
report5_mod3 <- report5_mod3_2 %>%
gather(variable,"占比") %>%
left_join(report5_mod3_1,by="variable") %>%
left_join(report5_rank3,by="variable") %>%
arrange(rank) %>%
select(-rank)
rownames(report5_mod3) <- report5_mod3$variable
report5_mod3 <- report5_mod3 %>%
select(-variable) %>%
select(`金额(元)`,`占比`)
## report c
report6_rank <- data.frame(
variable=c("销售额(元)",
"生产成本(元)",
"生产成本(%)",
"利润贡献I(元)",
"利润贡献I(%)",
"其他可变成本(元)",
"其他可变成本(%)",
"推广费用预算(元)",
"推广费用预算(%)",
"利润贡献I(元)",
'利润贡献II(%)'),
rank=1:11,
stringsAsFactors = F)
product_report_peraccount <- tmp %>%
select(hospital,
product,
real_revenue,
real_volume,
promotional_budget,
discount) %>%
group_by(hospital,product) %>%
mutate(no.product=n_distinct(product),
production_cost = sapply(product,function(x)production_price[which(production_price$product==x),]$cost),
production_fee = round(production_cost*real_revenue,2),
profit1 = round(real_revenue - production_fee,2),
discount_fee = round(discount/100*real_revenue,2),
promotion_fee = round(promotional_budget/no.product,2),
profit2 = round(profit1- discount_fee - promotion_fee,2)) %>%
ungroup() %>%
select(hospital,
product,
real_revenue,
production_fee,
profit1,
discount_fee,
promotion_fee,
profit2) %>%
group_by(hospital) %>%
do(plyr::rbind.fill(.,data.frame(hospital=first(.$hospital),
product="总体",
real_revenue = sum(.$real_revenue),
production_fee = sum(.$production_fee),
profit1 = sum(.$profit1),
discount_fee = sum(.$discount_fee),
promotion_fee = sum(.$promotion_fee),
profit2 = sum(.$profit2)))) %>%
ungroup() %>%
mutate(production_fee_percent = round(production_fee/real_revenue*100,2),
profit1_percent = round(profit1/real_revenue*100,2),
discount_fee_percent = round(discount_fee/real_revenue*100,2),
promotion_fee_percent = round(promotion_fee/real_revenue*100,2),
profit2_percent = round(profit2/real_revenue*100,2))
colnames(product_report_peraccount) <- c("医院",
"产品",
"销售额(元)",
"生产成本(元)",
"利润贡献I(元)",
"其他可变成本(元)",
"推广费用预算(元)",
"利润贡献II(元)",
"生产成本(%)",
"利润贡献I(%)",
"其他可变成本(%)",
"推广费用预算(%)",
'利润贡献II(%)')
report6_mod1 <- product_report_peraccount %>%
gather(variable,value,-`医院`,-`产品`) %>%
spread(`产品`,value) %>%
left_join(report6_rank,by="variable") %>%
arrange(`医院`,rank) %>%
select(-rank)
## report b
report7_mod1 <- tmp %>%
select(hospital,
real_revenue,
pp_real_revenue,
real_volume,
pp_real_volume) %>%
group_by(hospital) %>%
dplyr::summarise(real_revenue_by_hosp = round(sum(real_revenue),2),
pp_real_revenue_by_hosp = round(sum(pp_real_revenue),2),
real_revenue_increase = real_revenue_by_hosp - pp_real_revenue_by_hosp,
real_volume_by_hosp = round(sum(real_volume),2),
pp_real_volume_by_hosp = round(sum(pp_real_volume),2),
real_volume_increase = real_volume_by_hosp - pp_real_volume_by_hosp) %>%
do(plyr::rbind.fill(.,data.frame(hospital="总体",
real_revenue_by_hosp=sum(.$real_revenue_by_hosp),
pp_real_revenue_by_hosp=sum(.$pp_real_revenue_by_hosp),
real_revenue_increase=sum(.$real_revenue_increase),
real_volume_by_hosp=sum(.$real_volume_by_hosp),
pp_real_volume_by_hosp=sum(.$pp_real_volume_by_hosp),
real_volume_increase=sum(.$real_volume_increase)))) %>%
mutate(real_revenue_increase_ratio = round(real_revenue_increase/pp_real_revenue_by_hosp*100,2),
real_volume_increase_ratio = round(real_volume_increase/pp_real_volume_by_hosp*100,2)) %>%
select(hospital,
pp_real_revenue_by_hosp,
real_revenue_by_hosp,
real_revenue_increase,
real_revenue_increase_ratio,
pp_real_volume_by_hosp,
real_volume_by_hosp,
real_volume_increase,
real_volume_increase_ratio)
colnames(report7_mod1) <- c("医院",
"上期销售额",
"当期销售额",
"销售额增长",
"销售额增长率",
"上期销售量",
"当期销售量",
"销售量增长",
"销售量增长率")
rownames(report7_mod1) <- report7_mod1$医院
report7_mod1 <- report7_mod1 %>%
select(-`医院`)
report7_mod2 <- tmp %>%
select(sales_rep,
sr_revenue,
pp_sr_revenue,
sr_volume,
pp_sr_volume) %>%
distinct() %>%
mutate(sr_revenue_increase=sr_revenue-pp_sr_revenue,
sr_volume_increase=sr_volume-pp_sr_volume) %>%
do(plyr::rbind.fill(.,data.frame(sales_rep="总体",
sr_revenue=sum(.$sr_revenue),
pp_sr_revenue =sum(.$pp_sr_revenue),
sr_revenue_increase=sum(.$sr_revenue_increase),
sr_volume=sum(.$sr_volume),
pp_sr_volume=sum(.$pp_sr_volume),
sr_volume_increase=sum(.$sr_volume_increase)))) %>%
mutate(sr_revenue_increase_ratio = round(sr_revenue_increase/pp_sr_revenue*100,2),
sr_volume_increase_ratio = round(sr_volume_increase/pp_sr_volume*100,2)) %>%
select(sales_rep,
pp_sr_revenue,
sr_revenue,
sr_revenue_increase,
sr_revenue_increase_ratio,
pp_sr_volume,
sr_volume,
sr_volume_increase,
sr_volume_increase_ratio)
colnames(report7_mod2) <- c("销售代表",
"上期销售额",
"当期销售额",
"销售额增长",
"销售额增长率",
"上期销售量",
"当期销售量",
"销售量增长",
"销售量增长率")
rownames(report7_mod2) <- report7_mod2$销售代表
report7_mod2 <- report7_mod2 %>%
select(-`销售代表`)
report7_mod3 <- tmp %>%
select(product,
real_revenue,
pp_real_revenue,
real_volume,
pp_real_volume) %>%
group_by(product) %>%
dplyr::summarise(real_revenue_by_product = round(sum(real_revenue),2),
pp_real_revenue_by_product = round(sum(pp_real_revenue),2),
real_revenue_increase = real_revenue_by_product - pp_real_revenue_by_product,
real_volume_by_product = round(sum(real_volume),2),
pp_real_volume_by_product = round(sum(pp_real_volume),2),
real_volume_increase = real_volume_by_product - pp_real_volume_by_product) %>%
do(plyr::rbind.fill(.,data.frame(product="总体",
real_revenue_by_product=sum(.$real_revenue_by_product),
pp_real_revenue_by_product=sum(.$pp_real_revenue_by_product),
real_revenue_increase=sum(.$real_revenue_increase),
real_volume_by_product=sum(.$real_volume_by_product),
pp_real_volume_by_product=sum(.$pp_real_volume_by_product),
real_volume_increase=sum(.$real_volume_increase)))) %>%
mutate(real_revenue_increase_ratio = round(real_revenue_increase/pp_real_revenue_by_product*100,2),
real_volume_increase_ratio = round(real_volume_increase/pp_real_volume_by_product*100,2)) %>%
select(product,
pp_real_revenue_by_product,
real_revenue_by_product,
real_revenue_increase,
real_revenue_increase_ratio,
pp_real_volume_by_product,
real_volume_by_product,
real_volume_increase,
real_volume_increase_ratio)
colnames(report7_mod3) <- c("产品",
"上期销售额",
"当期销售额",
"销售额增长",
"销售额增长率",
"上期销售量",
"当期销售量",
"销售量增长",
"销售量增长率")
rownames(report7_mod3) <- report7_mod3$产品
report7_mod3 <- report7_mod3 %>%
select(-`产品`)
## report a
offer_attractiveness_report <- tmp %>%
group_by(hospital) %>%
mutate(hospital_revenue = round(sum(real_revenue),2),
hospital_offer_attractiveness = round(sum(offer_attractiveness),2),
hospital_acc_offer_attractiveness =round(sum(acc_offer_attractiveness),2))%>%
ungroup() %>%
select(sales_rep,
hospital,
incentive_factor,
product_knowledge_index,
sales_skills_index,
customer_relationship_index,
motivation_index,
hospital_revenue,
hospital_offer_attractiveness,
hospital_acc_offer_attractiveness) %>%
distinct() %>%
mutate(total_revenue = round(sum(hospital_revenue),2),
total_offer_attractiveness = round(sum(hospital_offer_attractiveness),2),
total_acc_offer_attractiveness = round(sum(hospital_acc_offer_attractiveness),2),
average_customer_relationship_index = round(mean(customer_relationship_index),2),
average_sales_skills_index = round(mean(sales_skills_index),2),
average_product_knowledge_index = round(mean(product_knowledge_index),2),
average_motivation_index = round(mean(motivation_index),2))
report8_mod1 <- offer_attractiveness_report %>%
ungroup() %>%
dplyr::mutate(profit3=as.numeric(report5_mod3[8,1])) %>%
select(total_revenue,
profit3,
average_customer_relationship_index,
average_sales_skills_index,
average_product_knowledge_index,
average_motivation_index,
total_offer_attractiveness,
total_acc_offer_attractiveness) %>%
distinct()
colnames(report8_mod1) <- c("总销售(元)",
"总利润(元)",
"客户关系的平均水平(指数)",
"平均销售技巧水平(指数)",
"平均产品知识水平(指数)",
"平均动力值(指数)",
"商业价值(指数)",
"累计商业价值(指数)")
report8_mod1 <- report8_mod1 %>% gather(variable,`值`)
rownames(report8_mod1) <- report8_mod1$variable
report8_mod1 <- report8_mod1 %>% select(-variable)
report8_mod2 <- tmp %>%
select(hospital,product,real_revenue) %>%
group_by(hospital) %>%
dplyr::summarise(hospital_revenue = round(sum(real_revenue),2)) %>%
ungroup() %>%
mutate(market_revenue=round(sum(hospital_revenue),2),
market_share=round(hospital_revenue/market_revenue*100,2)) %>%
select(hospital,
hospital_revenue,
market_share) %>%
distinct()
colnames(report8_mod2) <- c("医院",
"总销售(元)",
"总市场的市场份额(%)")
rownames(report8_mod2) <- report8_mod2$医院
report8_mod2 <- report8_mod2 %>% select(-`医院`)
out<-list("report1_mod1"=report1_mod1,
"report1_mod2"=report1_mod2,
"report1_mod3"=report1_mod3,
"report1_mod4"=report1_mod4,
"report1_mod5"=report1_mod5,
"report2_mod1"=report2_mod1,
"report2_mod2"=report2_mod2,
"report3_mod1"=report3_mod1,
"report4_mod1"=report4_mod1,
"report4_mod2"=report4_mod2,
"report4_mod3"=report4_mod3,
"report5_mod1"=report5_mod1,
"report5_mod2"=report5_mod2,
"report5_mod3"=report5_mod3,
"report6_mod1"=report6_mod1,
"report7_mod1"=report7_mod1,
"report7_mod2"=report7_mod2,
"report7_mod3"=report7_mod3,
"report8_mod1"=report8_mod1,
"report8_mod2"=report8_mod2
)
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_13}
\alias{movie_13}
\title{(500) Days of Summer}
\format{igraph object}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt1022603
}
\usage{
movie_13
}
\description{
Interactions of characters in the movie "(500) Days of Summer" (2009)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
| /man/movie_13.Rd | permissive | physthoth/networkdata | R | false | true | 664 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-movie.R
\docType{data}
\name{movie_13}
\alias{movie_13}
\title{(500) Days of Summer}
\format{igraph object}
\source{
https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/T4HBA3
https://www.imdb.com/title/tt1022603
}
\usage{
movie_13
}
\description{
Interactions of characters in the movie "(500) Days of Summer" (2009)
}
\references{
Kaminski, Jermain; Schober, Michael; Albaladejo, Raymond; Zastupailo, Oleksandr; Hidalgo, César, 2018, Moviegalaxies - Social Networks in Movies, https://doi.org/10.7910/DVN/T4HBA3, Harvard Dataverse, V3
}
\keyword{datasets}
|
if(!require("caret")) install.packages("caret"); library("caret")
if(!require("klaR")) install.packages("klaR"); library("klaR")
library(rstudioapi)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
known <- read.csv("BADS_WS1718_known.csv", sep=",", header=TRUE)
class <- read.csv("BADS_WS1718_class_20180115.csv", sep=",", header=TRUE)
#Dealing with NA's and calculating frequencies
source("Preprocessing1.R")
prepare(known, class)
#1)This below is for the preparation of the class set
#set.seed(123)
#idx.train.woe <- createDataPartition(y = known$return, p = 0.4, list = FALSE)
#known_tr <- known[idx.train.woe, ]
#woe.set <- known[-idx.train.woe, ]
#known_test <- class
#2) Split as below when developing models on only known set
set.seed(124)
idx.train.woe <- createDataPartition(y = known$return, p = 0.6, list = FALSE)
known_total_tr <- known[idx.train.woe, ]
woe.set <- known[-idx.train.woe, ]
idx.test.woe <- createDataPartition(y = known_total_tr$return, p = 0.3333, list = FALSE)
known_test <- known_total_tr[idx.test.woe, ]
known_tr <- known_total_tr[-idx.test.woe, ]
woe.object <- woe(return ~., data = woe.set, zeroadj = 0.5)
woe.set <- predict(woe.object, newdata = woe.set, replace = F)
source("Preprocessing2.R")
Preprocessing(woe.set,known_tr,known_test)
#Dummy Encoding
known_tr$return <- (as.numeric(known_tr$return)-1)
dmy_tr <- dummyVars(" ~ .", data = known_tr, fullRank = TRUE)
known_tr <- data.frame(predict(dmy_tr, newdata = known_tr, replace=FALSE))
known_tr$return <- as.factor(known_tr$return)
if("return" %in% colnames(known_test)){
known_test$return <- (as.numeric(known_test$return)-1)
}
dmy_test <- dummyVars(" ~ .", data = known_test, fullRank = TRUE)
known_test <- data.frame(predict(dmy_test, newdata = known_test, replace=FALSE))
if("return" %in% colnames(known_test)){
known_test$return <- as.factor(known_test$return)
}
woe.set$return <- (as.numeric(woe.set$return)-1)
dmy_woe <- dummyVars(" ~ .", data = woe.set, fullRank = TRUE)
woe.set <- data.frame(predict(dmy_woe, newdata = woe.set, replace=FALSE))
woe.set$return <- as.factor(woe.set$return)
#We wrote training and test sets as: known_tr_w40t40t20_modeltest and known_test_w40t40t20_modeltest
#We wrote known and class sets as: known_tr_fin2 and class_fin2
| /Preprocessing0.R | no_license | Batuhanipekci/BADS_Assignment | R | false | false | 2,295 | r | if(!require("caret")) install.packages("caret"); library("caret")
if(!require("klaR")) install.packages("klaR"); library("klaR")
library(rstudioapi)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
known <- read.csv("BADS_WS1718_known.csv", sep=",", header=TRUE)
class <- read.csv("BADS_WS1718_class_20180115.csv", sep=",", header=TRUE)
#Dealing with NA's and calculating frequencies
source("Preprocessing1.R")
prepare(known, class)
#1)This below is for the preparation of the class set
#set.seed(123)
#idx.train.woe <- createDataPartition(y = known$return, p = 0.4, list = FALSE)
#known_tr <- known[idx.train.woe, ]
#woe.set <- known[-idx.train.woe, ]
#known_test <- class
#2) Split as below when developing models on only known set
set.seed(124)
idx.train.woe <- createDataPartition(y = known$return, p = 0.6, list = FALSE)
known_total_tr <- known[idx.train.woe, ]
woe.set <- known[-idx.train.woe, ]
idx.test.woe <- createDataPartition(y = known_total_tr$return, p = 0.3333, list = FALSE)
known_test <- known_total_tr[idx.test.woe, ]
known_tr <- known_total_tr[-idx.test.woe, ]
woe.object <- woe(return ~., data = woe.set, zeroadj = 0.5)
woe.set <- predict(woe.object, newdata = woe.set, replace = F)
source("Preprocessing2.R")
Preprocessing(woe.set,known_tr,known_test)
#Dummy Encoding
known_tr$return <- (as.numeric(known_tr$return)-1)
dmy_tr <- dummyVars(" ~ .", data = known_tr, fullRank = TRUE)
known_tr <- data.frame(predict(dmy_tr, newdata = known_tr, replace=FALSE))
known_tr$return <- as.factor(known_tr$return)
if("return" %in% colnames(known_test)){
known_test$return <- (as.numeric(known_test$return)-1)
}
dmy_test <- dummyVars(" ~ .", data = known_test, fullRank = TRUE)
known_test <- data.frame(predict(dmy_test, newdata = known_test, replace=FALSE))
if("return" %in% colnames(known_test)){
known_test$return <- as.factor(known_test$return)
}
woe.set$return <- (as.numeric(woe.set$return)-1)
dmy_woe <- dummyVars(" ~ .", data = woe.set, fullRank = TRUE)
woe.set <- data.frame(predict(dmy_woe, newdata = woe.set, replace=FALSE))
woe.set$return <- as.factor(woe.set$return)
#We wrote training and test sets as: known_tr_w40t40t20_modeltest and known_test_w40t40t20_modeltest
#We wrote known and class sets as: known_tr_fin2 and class_fin2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-visualization.R
\name{summarize_values}
\alias{summarize_values}
\title{Summarize values present}
\usage{
summarize_values(values)
}
\arguments{
\item{values}{The values to summarize in a list.}
}
\value{
String with the form "value1 (2), value2 (4)",
where the value is given with the number of
occurrences in parenthesis.
}
\description{
Get a list of values present and
the number of times each variable appeared.
}
| /man/summarize_values.Rd | permissive | karawoo/dccmonitor | R | false | true | 506 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-visualization.R
\name{summarize_values}
\alias{summarize_values}
\title{Summarize values present}
\usage{
summarize_values(values)
}
\arguments{
\item{values}{The values to summarize in a list.}
}
\value{
String with the form "value1 (2), value2 (4)",
where the value is given with the number of
occurrences in parenthesis.
}
\description{
Get a list of values present and
the number of times each variable appeared.
}
|
# ==========================================================================
# eSet Class Validator
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setMethod("initialize", signature(.Object="eSet"),
function(.Object, assayData,
phenoData=annotatedDataFrameFrom(assayData, byrow=FALSE),
featureData=annotatedDataFrameFrom(assayData, byrow=TRUE),
experimentData=MIAME(), annotation=character(0),
protocolData=phenoData[,integer(0)], ...)
{
## NB: Arguments provided in '...' are used to initialize
## slots if possible (when called from some subclass).
## Otherwise, extra args in '...' are added as elements
## to assayData. We do this to allow subclasses to
## rely on default contructor behavior for initializing
## slots.
##
## NB2: Extra args to the assayData constructor will
## be passed along as long as current class doesn't
## have a slot with a matching name.
mySlots <- slotNames(.Object)
dotArgs <- list(...)
isSlot <- names(dotArgs) %in% mySlots
if (missing(assayData))
assayData <- do.call(assayDataNew, dotArgs[!isSlot], envir=parent.frame())
else {
checkClass(assayData, "AssayData", class(.Object))
nms <-
if (storageMode(assayData)=="list") names(assayData)
else ls(assayData)
dupNames <- nms %in% names(dotArgs[!isSlot])
if (any(dupNames))
warning("initialize argument(s) '",
paste(nms[dupNames], collapse="' '"),
"' also present in 'assayData'; argument(s) ignored")
}
if (!missing(phenoData))
checkClass(phenoData, "AnnotatedDataFrame", class(.Object))
dimLabels(phenoData) <- c("sampleNames", "sampleColumns")
if (!missing(featureData))
checkClass(featureData, "AnnotatedDataFrame", class(.Object))
dimLabels(featureData) <- c("featureNames", "featureColumns")
## create the protocolData, if necessary
if (!missing(protocolData)) {
checkClass(protocolData, "AnnotatedDataFrame", class(.Object))
dimLabels(protocolData) <- c("sampleNames", "sampleColumns")
}
## coordinate sample names
adSampleNames <- sampleNames(assayData)
if (all(sapply(adSampleNames, is.null)))
sampleNames(assayData) <- sampleNames(phenoData)
pdSampleNames <- sampleNames(protocolData)
if (all(sapply(pdSampleNames, is.null)))
sampleNames(protocolData) <- sampleNames(phenoData)
## where do feature names come from? assayData or featureData
adFeatureNames <- featureNames(assayData)
if (all(sapply(adFeatureNames, is.null)))
featureNames(assayData) <- featureNames(featureData)
## create new instance from 'extra' dotArgs, and from instance
for (s in names(dotArgs)[isSlot])
slot(.Object, s) <- dotArgs[[s]]
callNextMethod(.Object, assayData=assayData, phenoData=phenoData,
featureData=featureData, experimentData=experimentData,
annotation=annotation, protocolData=protocolData)
})
updateOldESet <- function(from, toClass, ...) { # to MultiExpressionSet
from <- asS4(from)
ophenoData <- asS4(phenoData(from))
metadata <- ophenoData@varMetadata
if (all(dim(metadata)==0)) {
warning("replacing apparently empty varMetadata")
metadata <- data.frame(numeric(ncol(ophenoData@pData)))[,FALSE]
}
if (!is.null(metadata[["varName"]])) {
row.names(metadata) <- metadata[["varName"]]
metadata[["varName"]] <- NULL
} else if (!is.null(names(ophenoData@pData))) {
row.names(metadata) <- names(ophenoData@pData)
}
if (!is.null(metadata[["varLabels"]])) {
names(metadata)[names(metadata)=="varLabels"] <- "labelDescription"
metadata[["labelDescription"]] <- as.character(metadata[["labelDescription"]])
}
## phenoData
pData <- ophenoData@pData
phenoData <- AnnotatedDataFrame(data=pData, varMetadata=metadata)
## sampleNames
if (any(sampleNames(assayData(from))!=sampleNames(phenoData))) {
warning("creating assayData colnames from phenoData sampleNames")
sampleNames(assayData(from)) <- sampleNames(phenoData)
}
## reporterNames
if (length(from@reporterNames) == dim(from)[[1]]) {
if (any(sapply(assayData(from),rownames)!=from@reporterNames))
warning("creating assayData featureNames from reporterNames")
featureNames(assayData(from)) <- from@reporterNames
} else {
warning("creating numeric assayData featureNames")
featureNames(assayData(from)) <- 1:dim(from)[[1]]
}
if (sum(dups <- duplicated(featureNames(assayData(from))))>0) {
warning("removing ", sum(dups), " duplicated featureNames")
from@assayData <- lapply(from@assayData, function(elt) elt[!dups,])
}
## description
description <- from@description
if (is(description,"MIAME")) {
if (length(from@notes)!=0) {
warning("addding 'notes' to 'description'")
description@other <- c(description@other,from@notes)
}
if (length(from@history)!=0) {
warning("adding 'history' to 'description'")
description@other <- c(description@other,from@history)
}
} else {
warning("'description' is not of class MIAME; ignored")
description <- NULL
}
## reporterInfo
if (any(dim(from@reporterInfo)!=0))
warning("reporterInfo data not transfered to '",toClass, "' object")
## new object
object <- new(toClass,
assayData = from@assayData,
phenoData = phenoData,
featureData = annotatedDataFrameFrom(from@assayData, byrow=TRUE),
experimentData = updateObject(description),
annotation = from@annotation)
validObject(object)
object
}
setAs("eSet", "ExpressionSet", function(from, to) updateOldESet(from, "ExpressionSet"))
setAs("eSet", "MultiSet", function(from, to) updateOldESet(from, "MultiSet"))
updateESetTo <- function(object, template, ..., verbose=FALSE) {
if (verbose) message("updateESetTo(object = 'eSet' template = '", class(template), "')")
## cannot instantiate a 'virtual' class, so use accessor functions
## to update class components. Better than direct slot access?
funcs <- c("assayData", "phenoData", "experimentData", "annotation")
eval(parse(text=paste(funcs,"(template)<-",
"updateObject(", funcs, "(object), ..., verbose=verbose)")))
result <- try(featureData(template) <- featureData(object), silent=TRUE)
if (class(result)=="try-error")
featureData(template) <- annotatedDataFrameFrom(assayData(object), byrow=TRUE)
vers <- classVersion("eSet")
classVersion(template)[names(vers)] <- vers # current class version, eSet & 'below' only
template
}
setMethod("updateObject", signature(object="eSet"),
function(object, ..., verbose=FALSE) {
if (verbose) message("updateObject(object = 'eSet')")
object <- asS4(object)
if (isVersioned(object) && isCurrent(object)["eSet"])
return(callNextMethod())
## storage.mode likely to be useful to update versioned classes, too
storage.mode.final <- storageMode(object)
storage.mode <-
if (storage.mode.final == "lockedEnvironment") "environment"
else storage.mode.final
additionalSlots <- setdiff(slotNames(class(object)), slotNames("eSet"))
names(additionalSlots) <- additionalSlots
if (!isVersioned(object)) {
object <- updateESetTo(object, new(class(object), storage.mode=storage.mode), ..., verbose=verbose)
storageMode(object) <- storage.mode.final
} else if (classVersion(object)["eSet"]=="1.0.0") {
## added featureData slot; need to update phenoData
object <-
do.call(new,
c(list(class(object),
assayData = updateObject(assayData(object), ..., verbose=verbose),
phenoData = AnnotatedDataFrame(data=pData(object),
varMetadata=varMetadata(object)),
featureData = annotatedDataFrameFrom(assayData(object), byrow=TRUE),
experimentData = updateObject(experimentData(object), ..., verbose=verbose),
annotation = annotation(object)),
lapply(additionalSlots, function(x) slot(object, x))))
} else if (classVersion(object)["eSet"]=="1.1.0") {
## added scanDates slot
object <-
do.call(new,
c(list(class(object),
assayData = updateObject(assayData(object)),
phenoData = updateObject(phenoData(object)),
featureData = updateObject(featureData(object)),
experimentData = updateObject(experimentData(object)),
annotation = annotation(object)),
lapply(additionalSlots, function(x) slot(object, x))))
} else if (classVersion(object)["eSet"]=="1.2.0") {
## added protocolData slot, removed scanDates slot
scanDates <- object@scanDates
protocolData <- phenoData(object)[,integer(0)]
if (length(scanDates) > 0) {
protocolData[["ScanDate"]] <- scanDates
}
object <-
do.call(new,
c(list(class(object),
assayData = updateObject(assayData(object)),
phenoData = updateObject(phenoData(object)),
featureData = updateObject(featureData(object)),
experimentData = updateObject(experimentData(object)),
annotation = annotation(object),
protocolData = protocolData),
lapply(additionalSlots,
function(x) updateObject(slot(object, x)))))
} else {
stop("cannot update object of class '", class(object),
"', claiming to be eSet version '",
as(classVersion(object)["eSet"], "character"), "'")
}
object
})
setMethod("updateObjectTo", signature(object="eSet", template="eSet"), updateESetTo)
setValidity("eSet", function(object) {
msg <- validMsg(NULL, isValidVersion(object, "eSet"))
dims <- dims(object)
if (ncol(dims) > 0) {
## assayData
msg <- validMsg(msg, assayDataValidMembers(assayData(object)))
if (any(dims[1,] != dims[1,1]))
msg <- validMsg(msg, "row numbers differ for assayData members")
if (any(dims[2,] != dims[2,1]))
msg <- validMsg(msg, "sample numbers differ for assayData members")
## featureData
if (dims[1,1] != dim( featureData(object))[[1]])
msg <- validMsg(msg, "feature numbers differ between assayData and featureData")
if (!identical(featureNames(assayData(object)), featureNames(featureData(object))))
msg <- validMsg(msg, "featureNames differ between assayData and featureData")
## phenoData
if (dims[2,1] != dim(phenoData(object))[[1]])
msg <- validMsg(msg, "sample numbers differ between assayData and phenoData")
if (!identical(sampleNames(assayData(object)), sampleNames(phenoData(object))))
msg <- validMsg(msg, "sampleNames differ between assayData and phenoData")
## protocolData
if (dim(phenoData(object))[[1]] != dim(protocolData(object))[[1]])
msg <- validMsg(msg, "sample numbers differ between phenoData and protocolData")
if (!identical(sampleNames(phenoData(object)), sampleNames(protocolData(object))))
msg <- validMsg(msg, "sampleNames differ between phenoData and protocolData")
}
if (is.null(msg)) TRUE else msg
})
setMethod("preproc", "eSet", function(object) preproc(experimentData(object)))
setReplaceMethod("preproc",
signature=signature(object="eSet"),
function(object, value) {
ed <- experimentData(object)
preproc(ed) <- value
object@experimentData <- ed
object
})
setMethod("show",
signature=signature(object="eSet"),
function(object) {
cat(class(object), " (storageMode: ",
storageMode(object), ")\n", sep="")
adim <- dim(object)
if (length(adim)>1)
cat("assayData:",
if (length(adim)>1)
paste(adim[[1]], "features,",
adim[[2]], "samples") else NULL,
"\n")
cat(" element names:",
paste(assayDataElementNames(object), collapse=", "), "\n")
.showAnnotatedDataFrame(protocolData(object),
labels=list(object="protocolData"))
.showAnnotatedDataFrame(phenoData(object),
labels=list(object="phenoData"))
.showAnnotatedDataFrame(featureData(object),
labels=list(
object="featureData",
sampleNames="featureNames",
varLabels="fvarLabels",
varMetadata="fvarMetadata"))
cat("experimentData: use 'experimentData(object)'\n")
pmids <- pubMedIds(object)
if (length(pmids) > 0 && all(pmids != ""))
cat(" pubMedIds:", paste(pmids, sep=", "), "\n")
cat("Annotation:", annotation(object), "\n")
})
setMethod("storageMode", "eSet", function(object) storageMode(assayData(object)))
setReplaceMethod("storageMode",
signature=signature(
object="eSet", value="character"),
function(object, value) {
ad <- assayData(object)
storageMode(ad) <- value
object@assayData <- ad
object
})
setMethod("sampleNames",
signature(object="eSet"),
function(object) sampleNames(phenoData(object)))
setReplaceMethod("sampleNames",
signature=signature(object="eSet", value="ANY"),
function(object, value) {
pd <- phenoData(object)
sampleNames(pd) <- value
ad <- assayData(object)
sampleNames(ad) <- value
prd <- protocolData(object)
if (nrow(prd) == 0) {
prd <- pd[,integer(0)]
} else {
sampleNames(prd) <- value
}
object@phenoData <- pd
object@protocolData <- prd
unsafeSetSlot(object, "assayData", ad)
})
setMethod("featureNames",
signature=signature(object="eSet"),
function(object) featureNames(assayData(object)))
setReplaceMethod("featureNames",
signature=signature(object="eSet", value="ANY"),
function(object, value) {
fd <- featureData(object)
featureNames(fd) <- value
ad <- assayData(object)
featureNames(ad) <- value
object@featureData <- fd
unsafeSetSlot(object, "assayData", ad)
})
setMethod("dimnames", "eSet", function(x) {
list(featureNames(x), sampleNames(x))
})
setReplaceMethod("dimnames", "eSet", function(x, value) {
featureNames(x) <- value[[1]]
sampleNames(x) <- value[[2]]
x
})
setMethod("dim", "eSet", function(x) assayDataDim(assayData(x)))
setMethod("dims", "eSet", function(object) assayDataDims(assayData(object)))
setMethod("[", "eSet", function(x, i, j, ..., drop = FALSE) {
if (missing(drop))
drop <- FALSE
if (missing(i) && missing(j)) {
if (!missing(...))
stop("specify genes or samples to subset; use '",
substitute(x), "$", names(list(...))[[1]],
"' to access phenoData variables")
return(x)
}
if (!isVersioned(x) || !isCurrent(x)["eSet"])
x <- updateObject(x)
if (!missing(j)) {
phenoData(x) <- phenoData(x)[j,, ..., drop = drop]
protocolData(x) <- protocolData(x)[j,, ..., drop = drop]
}
if (!missing(i))
featureData(x) <- featureData(x)[i,,..., drop=drop]
## assayData; implemented here to avoid function call
orig <- assayData(x)
storage.mode <- assayDataStorageMode(orig)
assayData(x) <-
switch(storage.mode,
environment =,
lockedEnvironment = {
aData <- new.env(parent=emptyenv())
if (missing(i)) # j must be present
for(nm in ls(orig)) aData[[nm]] <- orig[[nm]][, j, ..., drop = drop]
else { # j may or may not be present
if (missing(j))
for(nm in ls(orig)) aData[[nm]] <- orig[[nm]][i,, ..., drop = drop]
else
for(nm in ls(orig)) aData[[nm]] <- orig[[nm]][i, j, ..., drop = drop]
}
if ("lockedEnvironment" == storage.mode) assayDataEnvLock(aData)
aData
},
list = {
if (missing(i)) # j must be present
lapply(orig, function(obj) obj[, j, ..., drop = drop])
else { # j may or may not be present
if (missing(j))
lapply(orig, function(obj) obj[i,, ..., drop = drop])
else
lapply(orig, function(obj) obj[i, j, ..., drop = drop])
}
})
x
})
## $ stops dispatching ?!
##setMethod("$", "eSet", function(x, name) `$`(phenoData(x), name))
setMethod("$", "eSet", function(x, name) {
eval(substitute(phenoData(x)$NAME_ARG, list(NAME_ARG=name)))
})
.DollarNames.eSet <- function(x, pattern)
grep(pattern, names(pData(x)), value=TRUE)
setReplaceMethod("$", "eSet", function(x, name, value) {
phenoData(x)[[name]] = value
x
})
setMethod("[[", "eSet", function(x, i, j, ...) phenoData(x)[[i]])
setReplaceMethod("[[", "eSet",
function(x, i, j, ..., value) {
phenoData(x)[[i, ...]] <- value
x
})
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setMethod("assayData", "eSet", function(object) object@assayData)
setReplaceMethod("assayData",
signature=signature(
object="eSet",
value="AssayData"),
function(object, value) {
object@assayData <- value
object
})
assayDataElementNames <- function(object) {
if (storageMode(object) == "list") names(assayData(object))
else ls(assayData(object))
}
assayDataElement <- function(object, elt) assayData(object)[[elt]]
.validate_assayDataElementReplace <- function(obj, value) {
if (!is.null(value)) {
dimvalue <- dim(value)
dimobj <- dim(obj)[seq_along(dimvalue)]
if (!isTRUE(all.equal(unname(dimvalue), unname(dimobj))))
stop("object and replacement value have different dimensions")
}
if (!is.null(value)) {
if (!is.null(dimnames(value))) {
## validate and harmonize dimnames
vd <- Map(function(od, vd) {
if (is.null(vd))
## update vd to contain indexes into matrix
od <- seq_along(od)
else if (!setequal(od, vd))
stop("object and replacement value dimnames differ")
od
}, dimnames(obj), dimnames(value))
## re-arrange value to have dimnames in same order as obj
value <- do.call(`[`, c(list(value), vd, drop=FALSE))
}
dimnames(value) <- dimnames(obj)
}
value
}
assayDataElementReplace <- function(obj, elt, value, validate=TRUE) {
## 'validate' added later, needs to be last for position matching
if (validate)
value <- .validate_assayDataElementReplace(obj, value)
storage.mode <- storageMode(obj)
switch(storageMode(obj),
"lockedEnvironment" = {
aData <- copyEnv(assayData(obj))
if (is.null(value)) rm(list=elt, envir=aData)
else aData[[elt]] <- value
assayDataEnvLock(aData)
assayData(obj) <- aData
},
"environment" = {
if (is.null(value)) rm(list=elt, envir=assayData(obj))
else assayData(obj)[[elt]] <- value
},
list = assayData(obj)[[elt]] <- value)
obj
}
`assayDataElement<-` <- function(obj, elt, ..., value)
## 'value' is always the last argument, but needs to be 3rd for
## assayDataElementReplace
assayDataElementReplace(obj, elt, value, ...)
setMethod("phenoData", "eSet", function(object) object@phenoData)
setReplaceMethod("phenoData",
signature=signature(
object="eSet",
value="AnnotatedDataFrame"),
function(object, value) {
object@phenoData <- value
if (nrow(protocolData(object)) == 0) {
protocolData(object) <- value[,integer(0)]
}
object
})
setMethod("pData", "eSet", function(object) pData(phenoData(object)))
setReplaceMethod("pData",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
pd <- phenoData(object)
pData(pd) <- value
phenoData(object) <- pd
object
})
setMethod("varMetadata",
signature=signature(object="eSet"),
function(object) varMetadata(phenoData(object)))
setReplaceMethod("varMetadata",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
pd <- phenoData(object)
varMetadata(pd) <- value
object@phenoData <- pd
object
})
setMethod("varLabels",
signature=signature(object="eSet"),
function(object) varLabels(phenoData(object)))
setReplaceMethod("varLabels",
signature=signature(
object="eSet",
value="ANY"),
function(object, value) {
pd <- phenoData(object)
varLabels(pd) <- value
object@phenoData <- pd
object
})
setMethod("featureData",
signature(object="eSet"),
function(object) object@featureData)
setReplaceMethod("featureData",
signature=signature(
object="eSet",
value="AnnotatedDataFrame"),
function(object, value) {
object@featureData <- value
object
})
setMethod("fData",
signature=signature(object="eSet"),
function(object) pData(featureData(object)))
setReplaceMethod("fData",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
fd <- featureData(object)
pData(fd) <- value
object@featureData <- fd
object
})
setMethod("fvarMetadata",
signature=signature(object="eSet"),
function(object) varMetadata(featureData(object)))
setReplaceMethod("fvarMetadata",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
fd <- featureData(object)
varMetadata(fd) <- value
object@featureData <- fd
object
})
setMethod("fvarLabels",
signature=signature(object="eSet"),
function(object) varLabels(featureData(object)))
setReplaceMethod("fvarLabels",
signature=signature(
object="eSet",
value="ANY"),
function(object, value) {
pd <- featureData(object)
varLabels(pd) <- value
object@featureData <- pd
object
})
setMethod("experimentData", signature(object="eSet"), function(object) object@experimentData)
setReplaceMethod("experimentData",
signature=signature(
object="eSet",
value="MIAME"),
function(object, value) {
object@experimentData <- value
object
})
setMethod("description", signature(object="eSet"),
function(object, ...) {
experimentData(object)
})
setReplaceMethod("description",
signature=signature(
object="eSet",
value="MIAME"),
function(object, value) {
object@experimentData <- value
object
})
setMethod("notes", signature(object="eSet"),
function(object) otherInfo(experimentData(object)))
setReplaceMethod("notes",
signature=signature(
object="eSet",
value="ANY"),
function(object, value) {
ed <- experimentData(object)
notes(ed) <- value
object@experimentData <- ed
object
})
setMethod("pubMedIds", signature(object="eSet"),
function(object) pubMedIds(experimentData(object)))
setReplaceMethod("pubMedIds",
signature=signature(
object="eSet",
value="character"),
function(object, value) {
ed <- experimentData(object)
pubMedIds(ed) <- value
object@experimentData <- ed
object
})
setMethod("abstract", "eSet", function(object) abstract(experimentData(object)))
setMethod("annotation", "eSet", definition = function(object) object@annotation)
setReplaceMethod("annotation",
signature=signature(
object="eSet",
value="character"),
function(object, value) {
object@annotation <- value
object
})
setMethod("protocolData", "eSet",
function(object) {
tryCatch(object@protocolData,
error = function(x) {
phenoData(object)[,integer(0)]
})
})
setReplaceMethod("protocolData",
signature=signature(
object="eSet",
value="AnnotatedDataFrame"),
function(object, value) {
if (class(try(object@protocolData, silent = TRUE)) == "try-error")
object <- updateObject(object)
object@protocolData <- value
object
})
setMethod("combine",
signature=signature(
x="eSet", y="eSet"),
function(x, y, ...) {
if (class(x) != class(y))
stop("objects must be the same class, but are '",
class(x), "', '", class(y), "'")
if (any(annotation(x) != annotation(y)))
stop("objects have different annotations: ",
annotation(x), ", ", annotation(y))
if (!isCurrent(x)[["eSet"]])
x <- updateObject(x)
assayData(x) <- combine(assayData(x), assayData(y))
phenoData(x) <- combine(phenoData(x), phenoData(y))
featureData(x) <- combine(featureData(x), featureData(y))
experimentData(x) <- combine(experimentData(x),experimentData(y))
protocolData(x) <- combine(protocolData(x), protocolData(y))
## annotation -- constant
x
})
| /R/methods-eSet.R | no_license | AlfonsoRReyes/Biobase | R | false | false | 29,334 | r | # ==========================================================================
# eSet Class Validator
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setMethod("initialize", signature(.Object="eSet"),
function(.Object, assayData,
phenoData=annotatedDataFrameFrom(assayData, byrow=FALSE),
featureData=annotatedDataFrameFrom(assayData, byrow=TRUE),
experimentData=MIAME(), annotation=character(0),
protocolData=phenoData[,integer(0)], ...)
{
## NB: Arguments provided in '...' are used to initialize
## slots if possible (when called from some subclass).
## Otherwise, extra args in '...' are added as elements
## to assayData. We do this to allow subclasses to
## rely on default contructor behavior for initializing
## slots.
##
## NB2: Extra args to the assayData constructor will
## be passed along as long as current class doesn't
## have a slot with a matching name.
mySlots <- slotNames(.Object)
dotArgs <- list(...)
isSlot <- names(dotArgs) %in% mySlots
if (missing(assayData))
assayData <- do.call(assayDataNew, dotArgs[!isSlot], envir=parent.frame())
else {
checkClass(assayData, "AssayData", class(.Object))
nms <-
if (storageMode(assayData)=="list") names(assayData)
else ls(assayData)
dupNames <- nms %in% names(dotArgs[!isSlot])
if (any(dupNames))
warning("initialize argument(s) '",
paste(nms[dupNames], collapse="' '"),
"' also present in 'assayData'; argument(s) ignored")
}
if (!missing(phenoData))
checkClass(phenoData, "AnnotatedDataFrame", class(.Object))
dimLabels(phenoData) <- c("sampleNames", "sampleColumns")
if (!missing(featureData))
checkClass(featureData, "AnnotatedDataFrame", class(.Object))
dimLabels(featureData) <- c("featureNames", "featureColumns")
## create the protocolData, if necessary
if (!missing(protocolData)) {
checkClass(protocolData, "AnnotatedDataFrame", class(.Object))
dimLabels(protocolData) <- c("sampleNames", "sampleColumns")
}
## coordinate sample names
adSampleNames <- sampleNames(assayData)
if (all(sapply(adSampleNames, is.null)))
sampleNames(assayData) <- sampleNames(phenoData)
pdSampleNames <- sampleNames(protocolData)
if (all(sapply(pdSampleNames, is.null)))
sampleNames(protocolData) <- sampleNames(phenoData)
## where do feature names come from? assayData or featureData
adFeatureNames <- featureNames(assayData)
if (all(sapply(adFeatureNames, is.null)))
featureNames(assayData) <- featureNames(featureData)
## create new instance from 'extra' dotArgs, and from instance
for (s in names(dotArgs)[isSlot])
slot(.Object, s) <- dotArgs[[s]]
callNextMethod(.Object, assayData=assayData, phenoData=phenoData,
featureData=featureData, experimentData=experimentData,
annotation=annotation, protocolData=protocolData)
})
updateOldESet <- function(from, toClass, ...) { # to MultiExpressionSet
from <- asS4(from)
ophenoData <- asS4(phenoData(from))
metadata <- ophenoData@varMetadata
if (all(dim(metadata)==0)) {
warning("replacing apparently empty varMetadata")
metadata <- data.frame(numeric(ncol(ophenoData@pData)))[,FALSE]
}
if (!is.null(metadata[["varName"]])) {
row.names(metadata) <- metadata[["varName"]]
metadata[["varName"]] <- NULL
} else if (!is.null(names(ophenoData@pData))) {
row.names(metadata) <- names(ophenoData@pData)
}
if (!is.null(metadata[["varLabels"]])) {
names(metadata)[names(metadata)=="varLabels"] <- "labelDescription"
metadata[["labelDescription"]] <- as.character(metadata[["labelDescription"]])
}
## phenoData
pData <- ophenoData@pData
phenoData <- AnnotatedDataFrame(data=pData, varMetadata=metadata)
## sampleNames
if (any(sampleNames(assayData(from))!=sampleNames(phenoData))) {
warning("creating assayData colnames from phenoData sampleNames")
sampleNames(assayData(from)) <- sampleNames(phenoData)
}
## reporterNames
if (length(from@reporterNames) == dim(from)[[1]]) {
if (any(sapply(assayData(from),rownames)!=from@reporterNames))
warning("creating assayData featureNames from reporterNames")
featureNames(assayData(from)) <- from@reporterNames
} else {
warning("creating numeric assayData featureNames")
featureNames(assayData(from)) <- 1:dim(from)[[1]]
}
if (sum(dups <- duplicated(featureNames(assayData(from))))>0) {
warning("removing ", sum(dups), " duplicated featureNames")
from@assayData <- lapply(from@assayData, function(elt) elt[!dups,])
}
## description
description <- from@description
if (is(description,"MIAME")) {
if (length(from@notes)!=0) {
warning("addding 'notes' to 'description'")
description@other <- c(description@other,from@notes)
}
if (length(from@history)!=0) {
warning("adding 'history' to 'description'")
description@other <- c(description@other,from@history)
}
} else {
warning("'description' is not of class MIAME; ignored")
description <- NULL
}
## reporterInfo
if (any(dim(from@reporterInfo)!=0))
warning("reporterInfo data not transfered to '",toClass, "' object")
## new object
object <- new(toClass,
assayData = from@assayData,
phenoData = phenoData,
featureData = annotatedDataFrameFrom(from@assayData, byrow=TRUE),
experimentData = updateObject(description),
annotation = from@annotation)
validObject(object)
object
}
setAs("eSet", "ExpressionSet", function(from, to) updateOldESet(from, "ExpressionSet"))
setAs("eSet", "MultiSet", function(from, to) updateOldESet(from, "MultiSet"))
updateESetTo <- function(object, template, ..., verbose=FALSE) {
if (verbose) message("updateESetTo(object = 'eSet' template = '", class(template), "')")
## cannot instantiate a 'virtual' class, so use accessor functions
## to update class components. Better than direct slot access?
funcs <- c("assayData", "phenoData", "experimentData", "annotation")
eval(parse(text=paste(funcs,"(template)<-",
"updateObject(", funcs, "(object), ..., verbose=verbose)")))
result <- try(featureData(template) <- featureData(object), silent=TRUE)
if (class(result)=="try-error")
featureData(template) <- annotatedDataFrameFrom(assayData(object), byrow=TRUE)
vers <- classVersion("eSet")
classVersion(template)[names(vers)] <- vers # current class version, eSet & 'below' only
template
}
setMethod("updateObject", signature(object="eSet"),
function(object, ..., verbose=FALSE) {
if (verbose) message("updateObject(object = 'eSet')")
object <- asS4(object)
if (isVersioned(object) && isCurrent(object)["eSet"])
return(callNextMethod())
## storage.mode likely to be useful to update versioned classes, too
storage.mode.final <- storageMode(object)
storage.mode <-
if (storage.mode.final == "lockedEnvironment") "environment"
else storage.mode.final
additionalSlots <- setdiff(slotNames(class(object)), slotNames("eSet"))
names(additionalSlots) <- additionalSlots
if (!isVersioned(object)) {
object <- updateESetTo(object, new(class(object), storage.mode=storage.mode), ..., verbose=verbose)
storageMode(object) <- storage.mode.final
} else if (classVersion(object)["eSet"]=="1.0.0") {
## added featureData slot; need to update phenoData
object <-
do.call(new,
c(list(class(object),
assayData = updateObject(assayData(object), ..., verbose=verbose),
phenoData = AnnotatedDataFrame(data=pData(object),
varMetadata=varMetadata(object)),
featureData = annotatedDataFrameFrom(assayData(object), byrow=TRUE),
experimentData = updateObject(experimentData(object), ..., verbose=verbose),
annotation = annotation(object)),
lapply(additionalSlots, function(x) slot(object, x))))
} else if (classVersion(object)["eSet"]=="1.1.0") {
## added scanDates slot
object <-
do.call(new,
c(list(class(object),
assayData = updateObject(assayData(object)),
phenoData = updateObject(phenoData(object)),
featureData = updateObject(featureData(object)),
experimentData = updateObject(experimentData(object)),
annotation = annotation(object)),
lapply(additionalSlots, function(x) slot(object, x))))
} else if (classVersion(object)["eSet"]=="1.2.0") {
## added protocolData slot, removed scanDates slot
scanDates <- object@scanDates
protocolData <- phenoData(object)[,integer(0)]
if (length(scanDates) > 0) {
protocolData[["ScanDate"]] <- scanDates
}
object <-
do.call(new,
c(list(class(object),
assayData = updateObject(assayData(object)),
phenoData = updateObject(phenoData(object)),
featureData = updateObject(featureData(object)),
experimentData = updateObject(experimentData(object)),
annotation = annotation(object),
protocolData = protocolData),
lapply(additionalSlots,
function(x) updateObject(slot(object, x)))))
} else {
stop("cannot update object of class '", class(object),
"', claiming to be eSet version '",
as(classVersion(object)["eSet"], "character"), "'")
}
object
})
setMethod("updateObjectTo", signature(object="eSet", template="eSet"), updateESetTo)
setValidity("eSet", function(object) {
msg <- validMsg(NULL, isValidVersion(object, "eSet"))
dims <- dims(object)
if (ncol(dims) > 0) {
## assayData
msg <- validMsg(msg, assayDataValidMembers(assayData(object)))
if (any(dims[1,] != dims[1,1]))
msg <- validMsg(msg, "row numbers differ for assayData members")
if (any(dims[2,] != dims[2,1]))
msg <- validMsg(msg, "sample numbers differ for assayData members")
## featureData
if (dims[1,1] != dim( featureData(object))[[1]])
msg <- validMsg(msg, "feature numbers differ between assayData and featureData")
if (!identical(featureNames(assayData(object)), featureNames(featureData(object))))
msg <- validMsg(msg, "featureNames differ between assayData and featureData")
## phenoData
if (dims[2,1] != dim(phenoData(object))[[1]])
msg <- validMsg(msg, "sample numbers differ between assayData and phenoData")
if (!identical(sampleNames(assayData(object)), sampleNames(phenoData(object))))
msg <- validMsg(msg, "sampleNames differ between assayData and phenoData")
## protocolData
if (dim(phenoData(object))[[1]] != dim(protocolData(object))[[1]])
msg <- validMsg(msg, "sample numbers differ between phenoData and protocolData")
if (!identical(sampleNames(phenoData(object)), sampleNames(protocolData(object))))
msg <- validMsg(msg, "sampleNames differ between phenoData and protocolData")
}
if (is.null(msg)) TRUE else msg
})
setMethod("preproc", "eSet", function(object) preproc(experimentData(object)))
setReplaceMethod("preproc",
signature=signature(object="eSet"),
function(object, value) {
ed <- experimentData(object)
preproc(ed) <- value
object@experimentData <- ed
object
})
setMethod("show",
signature=signature(object="eSet"),
function(object) {
cat(class(object), " (storageMode: ",
storageMode(object), ")\n", sep="")
adim <- dim(object)
if (length(adim)>1)
cat("assayData:",
if (length(adim)>1)
paste(adim[[1]], "features,",
adim[[2]], "samples") else NULL,
"\n")
cat(" element names:",
paste(assayDataElementNames(object), collapse=", "), "\n")
.showAnnotatedDataFrame(protocolData(object),
labels=list(object="protocolData"))
.showAnnotatedDataFrame(phenoData(object),
labels=list(object="phenoData"))
.showAnnotatedDataFrame(featureData(object),
labels=list(
object="featureData",
sampleNames="featureNames",
varLabels="fvarLabels",
varMetadata="fvarMetadata"))
cat("experimentData: use 'experimentData(object)'\n")
pmids <- pubMedIds(object)
if (length(pmids) > 0 && all(pmids != ""))
cat(" pubMedIds:", paste(pmids, sep=", "), "\n")
cat("Annotation:", annotation(object), "\n")
})
setMethod("storageMode", "eSet", function(object) storageMode(assayData(object)))
setReplaceMethod("storageMode",
signature=signature(
object="eSet", value="character"),
function(object, value) {
ad <- assayData(object)
storageMode(ad) <- value
object@assayData <- ad
object
})
setMethod("sampleNames",
signature(object="eSet"),
function(object) sampleNames(phenoData(object)))
setReplaceMethod("sampleNames",
signature=signature(object="eSet", value="ANY"),
function(object, value) {
pd <- phenoData(object)
sampleNames(pd) <- value
ad <- assayData(object)
sampleNames(ad) <- value
prd <- protocolData(object)
if (nrow(prd) == 0) {
prd <- pd[,integer(0)]
} else {
sampleNames(prd) <- value
}
object@phenoData <- pd
object@protocolData <- prd
unsafeSetSlot(object, "assayData", ad)
})
setMethod("featureNames",
signature=signature(object="eSet"),
function(object) featureNames(assayData(object)))
setReplaceMethod("featureNames",
signature=signature(object="eSet", value="ANY"),
function(object, value) {
fd <- featureData(object)
featureNames(fd) <- value
ad <- assayData(object)
featureNames(ad) <- value
object@featureData <- fd
unsafeSetSlot(object, "assayData", ad)
})
setMethod("dimnames", "eSet", function(x) {
list(featureNames(x), sampleNames(x))
})
setReplaceMethod("dimnames", "eSet", function(x, value) {
featureNames(x) <- value[[1]]
sampleNames(x) <- value[[2]]
x
})
setMethod("dim", "eSet", function(x) assayDataDim(assayData(x)))
setMethod("dims", "eSet", function(object) assayDataDims(assayData(object)))
setMethod("[", "eSet", function(x, i, j, ..., drop = FALSE) {
if (missing(drop))
drop <- FALSE
if (missing(i) && missing(j)) {
if (!missing(...))
stop("specify genes or samples to subset; use '",
substitute(x), "$", names(list(...))[[1]],
"' to access phenoData variables")
return(x)
}
if (!isVersioned(x) || !isCurrent(x)["eSet"])
x <- updateObject(x)
if (!missing(j)) {
phenoData(x) <- phenoData(x)[j,, ..., drop = drop]
protocolData(x) <- protocolData(x)[j,, ..., drop = drop]
}
if (!missing(i))
featureData(x) <- featureData(x)[i,,..., drop=drop]
## assayData; implemented here to avoid function call
orig <- assayData(x)
storage.mode <- assayDataStorageMode(orig)
assayData(x) <-
switch(storage.mode,
environment =,
lockedEnvironment = {
aData <- new.env(parent=emptyenv())
if (missing(i)) # j must be present
for(nm in ls(orig)) aData[[nm]] <- orig[[nm]][, j, ..., drop = drop]
else { # j may or may not be present
if (missing(j))
for(nm in ls(orig)) aData[[nm]] <- orig[[nm]][i,, ..., drop = drop]
else
for(nm in ls(orig)) aData[[nm]] <- orig[[nm]][i, j, ..., drop = drop]
}
if ("lockedEnvironment" == storage.mode) assayDataEnvLock(aData)
aData
},
list = {
if (missing(i)) # j must be present
lapply(orig, function(obj) obj[, j, ..., drop = drop])
else { # j may or may not be present
if (missing(j))
lapply(orig, function(obj) obj[i,, ..., drop = drop])
else
lapply(orig, function(obj) obj[i, j, ..., drop = drop])
}
})
x
})
## $ stops dispatching ?!
##setMethod("$", "eSet", function(x, name) `$`(phenoData(x), name))
setMethod("$", "eSet", function(x, name) {
eval(substitute(phenoData(x)$NAME_ARG, list(NAME_ARG=name)))
})
.DollarNames.eSet <- function(x, pattern)
grep(pattern, names(pData(x)), value=TRUE)
setReplaceMethod("$", "eSet", function(x, name, value) {
phenoData(x)[[name]] = value
x
})
setMethod("[[", "eSet", function(x, i, j, ...) phenoData(x)[[i]])
setReplaceMethod("[[", "eSet",
function(x, i, j, ..., value) {
phenoData(x)[[i, ...]] <- value
x
})
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
setMethod("assayData", "eSet", function(object) object@assayData)
setReplaceMethod("assayData",
signature=signature(
object="eSet",
value="AssayData"),
function(object, value) {
object@assayData <- value
object
})
assayDataElementNames <- function(object) {
if (storageMode(object) == "list") names(assayData(object))
else ls(assayData(object))
}
assayDataElement <- function(object, elt) assayData(object)[[elt]]
.validate_assayDataElementReplace <- function(obj, value) {
if (!is.null(value)) {
dimvalue <- dim(value)
dimobj <- dim(obj)[seq_along(dimvalue)]
if (!isTRUE(all.equal(unname(dimvalue), unname(dimobj))))
stop("object and replacement value have different dimensions")
}
if (!is.null(value)) {
if (!is.null(dimnames(value))) {
## validate and harmonize dimnames
vd <- Map(function(od, vd) {
if (is.null(vd))
## update vd to contain indexes into matrix
od <- seq_along(od)
else if (!setequal(od, vd))
stop("object and replacement value dimnames differ")
od
}, dimnames(obj), dimnames(value))
## re-arrange value to have dimnames in same order as obj
value <- do.call(`[`, c(list(value), vd, drop=FALSE))
}
dimnames(value) <- dimnames(obj)
}
value
}
assayDataElementReplace <- function(obj, elt, value, validate=TRUE) {
## 'validate' added later, needs to be last for position matching
if (validate)
value <- .validate_assayDataElementReplace(obj, value)
storage.mode <- storageMode(obj)
switch(storageMode(obj),
"lockedEnvironment" = {
aData <- copyEnv(assayData(obj))
if (is.null(value)) rm(list=elt, envir=aData)
else aData[[elt]] <- value
assayDataEnvLock(aData)
assayData(obj) <- aData
},
"environment" = {
if (is.null(value)) rm(list=elt, envir=assayData(obj))
else assayData(obj)[[elt]] <- value
},
list = assayData(obj)[[elt]] <- value)
obj
}
`assayDataElement<-` <- function(obj, elt, ..., value)
## 'value' is always the last argument, but needs to be 3rd for
## assayDataElementReplace
assayDataElementReplace(obj, elt, value, ...)
setMethod("phenoData", "eSet", function(object) object@phenoData)
setReplaceMethod("phenoData",
signature=signature(
object="eSet",
value="AnnotatedDataFrame"),
function(object, value) {
object@phenoData <- value
if (nrow(protocolData(object)) == 0) {
protocolData(object) <- value[,integer(0)]
}
object
})
setMethod("pData", "eSet", function(object) pData(phenoData(object)))
setReplaceMethod("pData",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
pd <- phenoData(object)
pData(pd) <- value
phenoData(object) <- pd
object
})
setMethod("varMetadata",
signature=signature(object="eSet"),
function(object) varMetadata(phenoData(object)))
setReplaceMethod("varMetadata",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
pd <- phenoData(object)
varMetadata(pd) <- value
object@phenoData <- pd
object
})
setMethod("varLabels",
signature=signature(object="eSet"),
function(object) varLabels(phenoData(object)))
setReplaceMethod("varLabels",
signature=signature(
object="eSet",
value="ANY"),
function(object, value) {
pd <- phenoData(object)
varLabels(pd) <- value
object@phenoData <- pd
object
})
setMethod("featureData",
signature(object="eSet"),
function(object) object@featureData)
setReplaceMethod("featureData",
signature=signature(
object="eSet",
value="AnnotatedDataFrame"),
function(object, value) {
object@featureData <- value
object
})
setMethod("fData",
signature=signature(object="eSet"),
function(object) pData(featureData(object)))
setReplaceMethod("fData",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
fd <- featureData(object)
pData(fd) <- value
object@featureData <- fd
object
})
setMethod("fvarMetadata",
signature=signature(object="eSet"),
function(object) varMetadata(featureData(object)))
setReplaceMethod("fvarMetadata",
signature=signature(
object="eSet",
value="data.frame"),
function(object, value) {
fd <- featureData(object)
varMetadata(fd) <- value
object@featureData <- fd
object
})
setMethod("fvarLabels",
signature=signature(object="eSet"),
function(object) varLabels(featureData(object)))
setReplaceMethod("fvarLabels",
signature=signature(
object="eSet",
value="ANY"),
function(object, value) {
pd <- featureData(object)
varLabels(pd) <- value
object@featureData <- pd
object
})
setMethod("experimentData", signature(object="eSet"), function(object) object@experimentData)
setReplaceMethod("experimentData",
signature=signature(
object="eSet",
value="MIAME"),
function(object, value) {
object@experimentData <- value
object
})
setMethod("description", signature(object="eSet"),
function(object, ...) {
experimentData(object)
})
setReplaceMethod("description",
signature=signature(
object="eSet",
value="MIAME"),
function(object, value) {
object@experimentData <- value
object
})
setMethod("notes", signature(object="eSet"),
function(object) otherInfo(experimentData(object)))
setReplaceMethod("notes",
signature=signature(
object="eSet",
value="ANY"),
function(object, value) {
ed <- experimentData(object)
notes(ed) <- value
object@experimentData <- ed
object
})
setMethod("pubMedIds", signature(object="eSet"),
function(object) pubMedIds(experimentData(object)))
setReplaceMethod("pubMedIds",
signature=signature(
object="eSet",
value="character"),
function(object, value) {
ed <- experimentData(object)
pubMedIds(ed) <- value
object@experimentData <- ed
object
})
setMethod("abstract", "eSet", function(object) abstract(experimentData(object)))
setMethod("annotation", "eSet", definition = function(object) object@annotation)
setReplaceMethod("annotation",
signature=signature(
object="eSet",
value="character"),
function(object, value) {
object@annotation <- value
object
})
setMethod("protocolData", "eSet",
function(object) {
tryCatch(object@protocolData,
error = function(x) {
phenoData(object)[,integer(0)]
})
})
setReplaceMethod("protocolData",
signature=signature(
object="eSet",
value="AnnotatedDataFrame"),
function(object, value) {
if (class(try(object@protocolData, silent = TRUE)) == "try-error")
object <- updateObject(object)
object@protocolData <- value
object
})
setMethod("combine",
signature=signature(
x="eSet", y="eSet"),
function(x, y, ...) {
if (class(x) != class(y))
stop("objects must be the same class, but are '",
class(x), "', '", class(y), "'")
if (any(annotation(x) != annotation(y)))
stop("objects have different annotations: ",
annotation(x), ", ", annotation(y))
if (!isCurrent(x)[["eSet"]])
x <- updateObject(x)
assayData(x) <- combine(assayData(x), assayData(y))
phenoData(x) <- combine(phenoData(x), phenoData(y))
featureData(x) <- combine(featureData(x), featureData(y))
experimentData(x) <- combine(experimentData(x),experimentData(y))
protocolData(x) <- combine(protocolData(x), protocolData(y))
## annotation -- constant
x
})
|
install.packages ("paleobioDB", dep = T)
setwd("C:\\Users\\harle\\Evolution\\Tasks\\Task_02")
library(paleobioDB)
Taxon <- "Dinosauria"
MinMA <- 66
MaxMA <- 252
fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma=MinMA, max_ma=MaxMA)
#How many species are known from each time period?
Res <- 5
nspeciesOverTime <- pbdb_richness (fossils, rank = "genus", temporal_extent =c (MaxMA, MinMA), res=Res)
par (mar = c (4, 5, 2, 1), las = 1, tck = -.01, mgp = c (2.5, 0.5, 0))
plot(seq ( to = MaxMA, from = MinMA, length.out = nrow (nspeciesOverTime)), nspeciesOverTime [,2], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", main = Taxon)
newspeciesOverTime <- pbdb_orig_ext (fossils, res=5, rank= "species", temporal_exten = c (MinMA, MaxMA))
par (mar = c (4,5,2,1), las = 1, tck = -0.01, mgp = c (2.5,0.5,0))
plot (seq (to = MaxMA, from = MinMA, length.out = nrow (newspeciesOverTime)), newspeciesOverTime [,1], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", main = Taxon)
lines (seq (to = MaxMA, from = MinMA, length.out = nrow (newspeciesOverTime)), newspeciesOverTime [,2], col = "red")
legend ("topleft", legend = c ("first appear", "go extinct"), col = c ("black", "red"), lty = 1, bty = "n")
#The lines are roughly the same until the time period approaches roughly 80 million years ago, where first appearances dramatically increase. Dinosaurs were evolving the fastest during the same time period according to the graph.
OceanCol <- "light blue"
LandCol <- "black"
Cols <- c ("#fee5d9", "#fcae91", "#fb6a4a", "#de2d26", "#a50f15")
par (las = 0)
pbdb_map_richness (fossils, col.ocean = OceanCol, col.int = LandCol, col.rich = Cols)
MinMA <- 201
MaxMA <- 252
triassic_fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
MinMA <- 145
MaxMA <- 201
jurassic_fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
MinMA <- 66
MaxMA <- 145
cretaceous_fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
dev.new (height = 7.8, width = 13)
pbdb_map_richness (jurassic_fossils, col.ocean = OceanCol, col.int = LandCol, col.rich = Cols)
mtext (side = 3, "Cretaceous (145 - 66Ma)", cex = 3, line = -2)
Taxon2 <- "Mammalia"
MinMA <- 66
MaxMA <- 252
fossils2 <- pbdb_occurrences (base_name = Taxon2, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
nspeciesOverTime2 <- pbdb_richness (fossils2, rank = "genus", temporal_extent = c (MaxMA, MinMA), res = Res)
par (mar = c (4,5,2,1), las = 1, tck = -0.01, mgp = c (2.5,0.5,0))
Col_dino <- Cols [length (Cols)]
Col_mammal <- Cols[1]
LineWidth <- 2
plot (seq(to = MaxMA, from = MinMA, length.out = nrow (nspeciesOverTime)), nspeciesOverTime[,2], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", col = Col_dino, lwd = LineWidth)
lines(seq(to=MaxMA, from=MinMA, length.out = nrow (nspeciesOverTime2)), nspeciesOverTime2[,2], col = Col_mammal, lwd = LineWidth)
legend("topleft", legend = c (Taxon, Taxon2), col = c (Col_dino, Col_mammal), bty = "n", lwd = LineWidth)
Taxon3 <- "Echinodermata"
MinMA <- 100
MaxMA <- 290
fossils2 <- pbdb_occurrences (base_name = Taxon3, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
nspeciesOverTime2 <- pbdb_richness (fossils2, rank = "genus", temporal_extent = c (MaxMA, MinMA), res = Res)
Taxon4 <- "Chordata"
MinMA <- 100
MaxMA <- 290
fossils2 <- pbdb_occurrences (base_name = Taxon4, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
nspeciesOverTime2 <- pbdb_richness (fossils2, rank = "genus", temporal_extent = c (MaxMA, MinMA), res = Res)
par (mar = c (4,5,2,1), las = 1, tck = -0.01, mgp = c (2.5,0.5,0))
Col_echino <- Cols [length (Cols)]
Col_chorda <- Cols[1]
LineWidth <- 2
plot (seq(to = MaxMA, from = MinMA, length.out = nrow (nspeciesOverTime)), nspeciesOverTime[,2], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", col = Col_dino, lwd = LineWidth)
lines(seq(to=MaxMA, from=MinMA, length.out = nrow (nspeciesOverTime2)), nspeciesOverTime2[,2], col = Col_mammal, lwd = LineWidth)
legend("topleft", legend = c (Taxon3, Taxon4), col = c (Col_echino, Col_chorda), bty = "n", lwd = LineWidth)
#My hypothesis is that the echinodermata and chordata growth rates are postively correlated from 200 million years ago to 100 million years ago. | /Task_03/Task03.R | no_license | shaneh34/Tasks | R | false | false | 4,630 | r | install.packages ("paleobioDB", dep = T)
setwd("C:\\Users\\harle\\Evolution\\Tasks\\Task_02")
library(paleobioDB)
Taxon <- "Dinosauria"
MinMA <- 66
MaxMA <- 252
fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma=MinMA, max_ma=MaxMA)
#How many species are known from each time period?
Res <- 5
nspeciesOverTime <- pbdb_richness (fossils, rank = "genus", temporal_extent =c (MaxMA, MinMA), res=Res)
par (mar = c (4, 5, 2, 1), las = 1, tck = -.01, mgp = c (2.5, 0.5, 0))
plot(seq ( to = MaxMA, from = MinMA, length.out = nrow (nspeciesOverTime)), nspeciesOverTime [,2], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", main = Taxon)
newspeciesOverTime <- pbdb_orig_ext (fossils, res=5, rank= "species", temporal_exten = c (MinMA, MaxMA))
par (mar = c (4,5,2,1), las = 1, tck = -0.01, mgp = c (2.5,0.5,0))
plot (seq (to = MaxMA, from = MinMA, length.out = nrow (newspeciesOverTime)), newspeciesOverTime [,1], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", main = Taxon)
lines (seq (to = MaxMA, from = MinMA, length.out = nrow (newspeciesOverTime)), newspeciesOverTime [,2], col = "red")
legend ("topleft", legend = c ("first appear", "go extinct"), col = c ("black", "red"), lty = 1, bty = "n")
#The lines are roughly the same until the time period approaches roughly 80 million years ago, where first appearances dramatically increase. Dinosaurs were evolving the fastest during the same time period according to the graph.
OceanCol <- "light blue"
LandCol <- "black"
Cols <- c ("#fee5d9", "#fcae91", "#fb6a4a", "#de2d26", "#a50f15")
par (las = 0)
pbdb_map_richness (fossils, col.ocean = OceanCol, col.int = LandCol, col.rich = Cols)
MinMA <- 201
MaxMA <- 252
triassic_fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
MinMA <- 145
MaxMA <- 201
jurassic_fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
MinMA <- 66
MaxMA <- 145
cretaceous_fossils <- pbdb_occurrences (base_name = Taxon, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
dev.new (height = 7.8, width = 13)
pbdb_map_richness (jurassic_fossils, col.ocean = OceanCol, col.int = LandCol, col.rich = Cols)
mtext (side = 3, "Cretaceous (145 - 66Ma)", cex = 3, line = -2)
Taxon2 <- "Mammalia"
MinMA <- 66
MaxMA <- 252
fossils2 <- pbdb_occurrences (base_name = Taxon2, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
nspeciesOverTime2 <- pbdb_richness (fossils2, rank = "genus", temporal_extent = c (MaxMA, MinMA), res = Res)
par (mar = c (4,5,2,1), las = 1, tck = -0.01, mgp = c (2.5,0.5,0))
Col_dino <- Cols [length (Cols)]
Col_mammal <- Cols[1]
LineWidth <- 2
plot (seq(to = MaxMA, from = MinMA, length.out = nrow (nspeciesOverTime)), nspeciesOverTime[,2], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", col = Col_dino, lwd = LineWidth)
lines(seq(to=MaxMA, from=MinMA, length.out = nrow (nspeciesOverTime2)), nspeciesOverTime2[,2], col = Col_mammal, lwd = LineWidth)
legend("topleft", legend = c (Taxon, Taxon2), col = c (Col_dino, Col_mammal), bty = "n", lwd = LineWidth)
Taxon3 <- "Echinodermata"
MinMA <- 100
MaxMA <- 290
fossils2 <- pbdb_occurrences (base_name = Taxon3, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
nspeciesOverTime2 <- pbdb_richness (fossils2, rank = "genus", temporal_extent = c (MaxMA, MinMA), res = Res)
Taxon4 <- "Chordata"
MinMA <- 100
MaxMA <- 290
fossils2 <- pbdb_occurrences (base_name = Taxon4, show = c ("phylo", "coords", "ident"), min_ma = MinMA, max_ma = MaxMA)
nspeciesOverTime2 <- pbdb_richness (fossils2, rank = "genus", temporal_extent = c (MaxMA, MinMA), res = Res)
par (mar = c (4,5,2,1), las = 1, tck = -0.01, mgp = c (2.5,0.5,0))
Col_echino <- Cols [length (Cols)]
Col_chorda <- Cols[1]
LineWidth <- 2
plot (seq(to = MaxMA, from = MinMA, length.out = nrow (nspeciesOverTime)), nspeciesOverTime[,2], xlim = c (MaxMA, MinMA), type = "l", xlab = "age (millions of years ago)", ylab = "num. of species", col = Col_dino, lwd = LineWidth)
lines(seq(to=MaxMA, from=MinMA, length.out = nrow (nspeciesOverTime2)), nspeciesOverTime2[,2], col = Col_mammal, lwd = LineWidth)
legend("topleft", legend = c (Taxon3, Taxon4), col = c (Col_echino, Col_chorda), bty = "n", lwd = LineWidth)
#My hypothesis is that the echinodermata and chordata growth rates are postively correlated from 200 million years ago to 100 million years ago. |
library(Hmisc)
library(alluvial)
library(data.table)
library(VennDiagram)
source('~/Scripts/Heatmap3.r')
##################
### CpGi to mC ###
d=read.table('output/CpGi_mC_Correspond.txt',stringsAsFactors=FALSE)
d23=table(paste(d[,2],d[,3]))
t23=cbind( matrix(unlist(strsplit(names(d23),' ')),byrow=TRUE,ncol=2), d23 )
t23=gsub('NotCorr','NS',t23)
t23=gsub('Corr','Sign',t23)
pdf('output/Alluvial.CpGi_mC_correspond.pdf',height=4,width=3) ### SUPPLEMENTARY FIGURE S1E
alluvial(t23[,c(1,2)],freq=as.numeric(t23[,3]),axis_labels=c('CpGi','mC'))
dev.off()
###### GSEAs
KEGGs=NULL ### This is to rename the pathways with lower-cases
for (i in list.files('HumanGenome/',pattern='PathwayLists')){
cKegg=read.table(paste('HumanGenome/',i,sep=''),sep='\t',
stringsAsFactors=FALSE,colClasses="character")
KEGGs=rbind(KEGGs,cKegg)
}
rownames(KEGGs)=KEGGs[,2]
myGSEAs=list.files('output/GSEA/CpGi')
v=NULL
v1=NULL
for (i in rev(myGSEAs)){
cPaths=read.table(paste('output/GSEA/CpGi',i,'output/output.txt',sep='/'),
sep='\t',header=TRUE,stringsAsFactors=FALSE)
toExclude=unique(which(is.na(cPaths),arr.ind=T)[,1])
if (length(toExclude)>0){
cPaths=cPaths[-toExclude,]
}
Keggs=cPaths[which(cPaths[,3]<0.1),]
v[[strsplit(i,'[.]')[[1]][2]]]=KEGGs[substr(Keggs[,1],1,5),3]
v1[[strsplit(i,'[.]')[[1]][2]]]=Keggs[,1:2]
rownames(v1[[strsplit(i,'[.]')[[1]][2]]])=KEGGs[substr(Keggs[,1],1,5),3]
}
m=matrix(0,nrow=length(unique(unlist(v))),ncol=length(names(v)))
rownames(m)=unique(unlist(v))
colnames(m)=gsub('RankTads.','',names(v))
for (i in names(v)){
m[rownames(v1[[i]]),i] = as.numeric(v1[[i]][,2])
}
m=m[,c('Proximal','Intermediate','Long','Whole')]
colnames(m)=c('Proximal','Intermediate','Long','TAD-wide')
m=m[order(m[,1],m[,2],m[,3],m[,4],decreasing=TRUE),]
w=c("Cytokine-cytokine receptor interaction",
"Phosphatidylinositol signaling system",
"Necroptosis",
"Biosynthesis of unsaturated fatty acids",
"Endocytosis",
"Fructose and mannose metabolism",
"Inositol phosphate metabolism",
"Lysosome",
"N-Glycan biosynthesis",
"Peroxisome",
"Phagosome",
"Sphingolipid metabolism",
"Steroid biosynthesis",
"Ribosome",
"Spliceosome")
m=m[w,]
hc=colorRampPalette(c('navy','gray90','firebrick'))(21)
pdf('output/Heatmap.CpGiGseaTads.pdf',height=10) ### SUPPLEMENTARY FIGURE S1G
heatmap.3(m,col=hc,margins=c(32,28),breaks=seq(-2.5,2.5,length.out=length(hc)+1),
dendrogram='n',Colv='n',Rowv='n',KeyValueName='Enrichment score')
dev.off()
| /Step02I.r | no_license | atelonis/MIRs-IDH12-DNMT3A-AMLs_2022 | R | false | false | 2,480 | r |
library(Hmisc)
library(alluvial)
library(data.table)
library(VennDiagram)
source('~/Scripts/Heatmap3.r')
##################
### CpGi to mC ###
d=read.table('output/CpGi_mC_Correspond.txt',stringsAsFactors=FALSE)
d23=table(paste(d[,2],d[,3]))
t23=cbind( matrix(unlist(strsplit(names(d23),' ')),byrow=TRUE,ncol=2), d23 )
t23=gsub('NotCorr','NS',t23)
t23=gsub('Corr','Sign',t23)
pdf('output/Alluvial.CpGi_mC_correspond.pdf',height=4,width=3) ### SUPPLEMENTARY FIGURE S1E
alluvial(t23[,c(1,2)],freq=as.numeric(t23[,3]),axis_labels=c('CpGi','mC'))
dev.off()
###### GSEAs
KEGGs=NULL ### This is to rename the pathways with lower-cases
for (i in list.files('HumanGenome/',pattern='PathwayLists')){
cKegg=read.table(paste('HumanGenome/',i,sep=''),sep='\t',
stringsAsFactors=FALSE,colClasses="character")
KEGGs=rbind(KEGGs,cKegg)
}
rownames(KEGGs)=KEGGs[,2]
myGSEAs=list.files('output/GSEA/CpGi')
v=NULL
v1=NULL
for (i in rev(myGSEAs)){
cPaths=read.table(paste('output/GSEA/CpGi',i,'output/output.txt',sep='/'),
sep='\t',header=TRUE,stringsAsFactors=FALSE)
toExclude=unique(which(is.na(cPaths),arr.ind=T)[,1])
if (length(toExclude)>0){
cPaths=cPaths[-toExclude,]
}
Keggs=cPaths[which(cPaths[,3]<0.1),]
v[[strsplit(i,'[.]')[[1]][2]]]=KEGGs[substr(Keggs[,1],1,5),3]
v1[[strsplit(i,'[.]')[[1]][2]]]=Keggs[,1:2]
rownames(v1[[strsplit(i,'[.]')[[1]][2]]])=KEGGs[substr(Keggs[,1],1,5),3]
}
m=matrix(0,nrow=length(unique(unlist(v))),ncol=length(names(v)))
rownames(m)=unique(unlist(v))
colnames(m)=gsub('RankTads.','',names(v))
for (i in names(v)){
m[rownames(v1[[i]]),i] = as.numeric(v1[[i]][,2])
}
m=m[,c('Proximal','Intermediate','Long','Whole')]
colnames(m)=c('Proximal','Intermediate','Long','TAD-wide')
m=m[order(m[,1],m[,2],m[,3],m[,4],decreasing=TRUE),]
w=c("Cytokine-cytokine receptor interaction",
"Phosphatidylinositol signaling system",
"Necroptosis",
"Biosynthesis of unsaturated fatty acids",
"Endocytosis",
"Fructose and mannose metabolism",
"Inositol phosphate metabolism",
"Lysosome",
"N-Glycan biosynthesis",
"Peroxisome",
"Phagosome",
"Sphingolipid metabolism",
"Steroid biosynthesis",
"Ribosome",
"Spliceosome")
m=m[w,]
hc=colorRampPalette(c('navy','gray90','firebrick'))(21)
pdf('output/Heatmap.CpGiGseaTads.pdf',height=10) ### SUPPLEMENTARY FIGURE S1G
heatmap.3(m,col=hc,margins=c(32,28),breaks=seq(-2.5,2.5,length.out=length(hc)+1),
dendrogram='n',Colv='n',Rowv='n',KeyValueName='Enrichment score')
dev.off()
|
\name{coef.lmSubsets}
\alias{coef.lmSubsets}
\alias{coef.lmSelect}
\title{Extract the ceofficients from a subset regression}
\description{
Return the coefficients for the specified submodels.
}
\usage{
\method{coef}{lmSubsets}(object, size, best = 1, ..., na.rm = TRUE, drop = TRUE)
\method{coef}{lmSelect}(object, best = 1, ..., na.rm = TRUE, drop = TRUE)
}
\arguments{
\item{object}{\code{"lmSubsets"}, \code{"lmSelect"}---a subset
regression}
\item{size}{\code{integer[]}---the submodel sizes}
\item{best}{\code{integer[]}---the submodel positions}
\item{...}{ignored}
\item{na.rm}{\code{logical}---if \code{TRUE}, remove \code{NA}
entries}
\item{drop}{\code{logical}---if \code{TRUE}, simplify structure}
}
\value{
\code{double[,]}, \code{"data.frame"}---the submodel coefficients
}
\seealso{
\itemize{
\item{\code{\link[=lmSubsets]{lmSubsets()}} for all-subsets
regression}
\item{\code{\link[=lmSelect]{lmSelect()}} for best-subset
regression}
\item{\code{\link[=coef]{coef()}} for the S3 generic}
}
}
| /man/coef.lmSubsets.Rd | no_license | cran/lmSubsets | R | false | false | 1,091 | rd | \name{coef.lmSubsets}
\alias{coef.lmSubsets}
\alias{coef.lmSelect}
\title{Extract the ceofficients from a subset regression}
\description{
Return the coefficients for the specified submodels.
}
\usage{
\method{coef}{lmSubsets}(object, size, best = 1, ..., na.rm = TRUE, drop = TRUE)
\method{coef}{lmSelect}(object, best = 1, ..., na.rm = TRUE, drop = TRUE)
}
\arguments{
\item{object}{\code{"lmSubsets"}, \code{"lmSelect"}---a subset
regression}
\item{size}{\code{integer[]}---the submodel sizes}
\item{best}{\code{integer[]}---the submodel positions}
\item{...}{ignored}
\item{na.rm}{\code{logical}---if \code{TRUE}, remove \code{NA}
entries}
\item{drop}{\code{logical}---if \code{TRUE}, simplify structure}
}
\value{
\code{double[,]}, \code{"data.frame"}---the submodel coefficients
}
\seealso{
\itemize{
\item{\code{\link[=lmSubsets]{lmSubsets()}} for all-subsets
regression}
\item{\code{\link[=lmSelect]{lmSelect()}} for best-subset
regression}
\item{\code{\link[=coef]{coef()}} for the S3 generic}
}
}
|
args <- commandArgs(trailingOnly=T)
"%&%" <- function(a, b) paste(a, b, sep="")
header <- args[1]
pcsfile <- header %&% '.evec'
#popfile <- header %&% '.pop'
pcs <- read.table(pcsfile)
#pop <- read.table(popfile)
#colnames(pop) <- 'pop'
#new <- cbind(pcs,pop)
pdf(file=header %&% '.PCA.plot.pdf')
plot(pcs$V2,pcs$V3)
#legend('bottomright',legend=unique(new$pop),col=unique(new$pop),pch=1)
dev.off()
| /example_pipelines/1_preIMPUTE2_QC/plot.pca.r | no_license | yzharold/GWAS_QC | R | false | false | 403 | r | args <- commandArgs(trailingOnly=T)
"%&%" <- function(a, b) paste(a, b, sep="")
header <- args[1]
pcsfile <- header %&% '.evec'
#popfile <- header %&% '.pop'
pcs <- read.table(pcsfile)
#pop <- read.table(popfile)
#colnames(pop) <- 'pop'
#new <- cbind(pcs,pop)
pdf(file=header %&% '.PCA.plot.pdf')
plot(pcs$V2,pcs$V3)
#legend('bottomright',legend=unique(new$pop),col=unique(new$pop),pch=1)
dev.off()
|
seed <- 346
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226229.2313917939
df.resid <- 35402
df <- 165
coefs <- c(6.601518068335809, 5.854996486879975, 5.801789349775871, 5.308501325008783, 5.0307735915881535, 4.825827535313658, 4.7637646766755, 4.654625178639051, 4.335280667047761, 4.272746323140234, 4.288157246264554, 4.139765105299556, 4.017862695980087, 3.9508173788362564, 3.7616685218495856, 3.499534088051676, 3.188963377726886, 2.8982613843532032, 2.4634649518027825, 2.0082579220751793, 1.6213673859515378, 0.9325479768221158, 1.0299096139166461, 0.4226173550219881, 0.12178969652231589, -0.867822684453502, -0.4858933762639743, 1.070790227660025, 1.1530303429882636, -1.3590379627578635, -2.247200778098213, -2.0697598086309372, -0.43377114765738123, 0.7787785347056181, 1.3251291024010947, -1.1918473847478643, 0.21213496129816867, -0.8487727310317762, 3.963154817959715e-2, -1.0281330025195565, 1.0267049882534685, 0.8925051450681762, -1.1231238129782708, -1.8135365594523574, -0.43969987283728457, -0.9314962579172811, -0.41968136950712864, 0.3053488100280217, 0.4512925531246379, -0.8643846805195815, -0.11210233592875697, 1.26122341597212, -2.7953332333635084, 1.7644186615296493, 0.7785491721677038, 1.0615421452894331, -2.48171481081541, -0.14412333178378756, -0.3576794229050256, 1.1650696103387672, 1.0726088108585186, 0.676100371315586, -1.296270599721031, -1.8135584099729016, -0.6250096078018085, 0.38336730557046955, 0.6765687324539505, -0.583768882782072, -1.6684949536900744, -0.6557197815902871, -1.7823532051927529, 0.10981350968892098, 0.44955509379736813, 0.9112358887452279, 0.6712180130481035, -0.6185150500427579, -1.5465221964926625, -1.1083098206695892, 8.524312042174654e-4, 0.6308446877960406, 1.2316726252549342, 0.15966352878686904, 8.114300251198218e-2, -1.6775033552025367, -0.7774871778648516, 0.2991629986273003, 1.2738832984045705, 0.3928437696473087, 0.931533012399579, -1.9898829670117897, 0.5449006931578331, 0.8279305206872498, 0.8438658847170755, 0.4060458943075301, -0.18034257220303612, 1.2513195088026696, -0.6846689658947187, 0.47635153511167955, -9.570352325720259e-2, -0.13681780322890294, 0.42209194261786565, -0.795825647251527, 0.774885808820014, 4.6460676043602114e-2, 0.6774947928365564, 0.8541743283610878, 1.1142288016015314, -0.3132135156384953, -7.354128786246736e-2, -0.9193189230917259, 0.3111378350158587, 0.7463146617984192, 1.6417720503387463, -1.3322607247290867, -0.33025409720819043, -1.0672703205040677, 0.8180141723444158, -0.34960421798826147, 0.3910929530663124, 0.5675283182632018, -0.37854391258809283, -0.6999864606715342, -1.1614993410840029, -0.24785523917634195, 0.33193540465939964, 1.0359193311486679, -1.4545341514512998e-2, 0.957923521072521, -0.4891414178840062, -0.39835241833974006, 0.3431585746882398, 0.9249499515169605, 0.7798302393414069, 0.4869438016006629, -1.4715014937491831e-2, 1.2623942479037056, -0.2498491171077911, 1.1415857364424433, 0.7553814345364386, 0.9080913990575724, 0.8649696273684251, -0.6949331582734755, -1.341477949271746, 0.7054614843147038, 0.39841481421937774, 0.5167799773386114, -0.31221128930893194, -0.5811727870262903, -1.86144803024095, 1.4286626800331446, 0.21201489131582965, 1.1463786122907351, -0.12333714364483388, 4.301890290942462e-2, -0.20312752910606152, -2.269544292386349, -1.5713848249516549, 0.8337109070289505, 1.1825247858531331, -0.30751403101479186, 1.571094443749323, -6.114173737298122e-2, -2.8029133856755567e-2, -4.483599086770157e-2, 1.2001422440075182)
| /analysis/boot/boot346.R | no_license | patperry/interaction-proc | R | false | false | 3,757 | r | seed <- 346
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226229.2313917939
df.resid <- 35402
df <- 165
coefs <- c(6.601518068335809, 5.854996486879975, 5.801789349775871, 5.308501325008783, 5.0307735915881535, 4.825827535313658, 4.7637646766755, 4.654625178639051, 4.335280667047761, 4.272746323140234, 4.288157246264554, 4.139765105299556, 4.017862695980087, 3.9508173788362564, 3.7616685218495856, 3.499534088051676, 3.188963377726886, 2.8982613843532032, 2.4634649518027825, 2.0082579220751793, 1.6213673859515378, 0.9325479768221158, 1.0299096139166461, 0.4226173550219881, 0.12178969652231589, -0.867822684453502, -0.4858933762639743, 1.070790227660025, 1.1530303429882636, -1.3590379627578635, -2.247200778098213, -2.0697598086309372, -0.43377114765738123, 0.7787785347056181, 1.3251291024010947, -1.1918473847478643, 0.21213496129816867, -0.8487727310317762, 3.963154817959715e-2, -1.0281330025195565, 1.0267049882534685, 0.8925051450681762, -1.1231238129782708, -1.8135365594523574, -0.43969987283728457, -0.9314962579172811, -0.41968136950712864, 0.3053488100280217, 0.4512925531246379, -0.8643846805195815, -0.11210233592875697, 1.26122341597212, -2.7953332333635084, 1.7644186615296493, 0.7785491721677038, 1.0615421452894331, -2.48171481081541, -0.14412333178378756, -0.3576794229050256, 1.1650696103387672, 1.0726088108585186, 0.676100371315586, -1.296270599721031, -1.8135584099729016, -0.6250096078018085, 0.38336730557046955, 0.6765687324539505, -0.583768882782072, -1.6684949536900744, -0.6557197815902871, -1.7823532051927529, 0.10981350968892098, 0.44955509379736813, 0.9112358887452279, 0.6712180130481035, -0.6185150500427579, -1.5465221964926625, -1.1083098206695892, 8.524312042174654e-4, 0.6308446877960406, 1.2316726252549342, 0.15966352878686904, 8.114300251198218e-2, -1.6775033552025367, -0.7774871778648516, 0.2991629986273003, 1.2738832984045705, 0.3928437696473087, 0.931533012399579, -1.9898829670117897, 0.5449006931578331, 0.8279305206872498, 0.8438658847170755, 0.4060458943075301, -0.18034257220303612, 1.2513195088026696, -0.6846689658947187, 0.47635153511167955, -9.570352325720259e-2, -0.13681780322890294, 0.42209194261786565, -0.795825647251527, 0.774885808820014, 4.6460676043602114e-2, 0.6774947928365564, 0.8541743283610878, 1.1142288016015314, -0.3132135156384953, -7.354128786246736e-2, -0.9193189230917259, 0.3111378350158587, 0.7463146617984192, 1.6417720503387463, -1.3322607247290867, -0.33025409720819043, -1.0672703205040677, 0.8180141723444158, -0.34960421798826147, 0.3910929530663124, 0.5675283182632018, -0.37854391258809283, -0.6999864606715342, -1.1614993410840029, -0.24785523917634195, 0.33193540465939964, 1.0359193311486679, -1.4545341514512998e-2, 0.957923521072521, -0.4891414178840062, -0.39835241833974006, 0.3431585746882398, 0.9249499515169605, 0.7798302393414069, 0.4869438016006629, -1.4715014937491831e-2, 1.2623942479037056, -0.2498491171077911, 1.1415857364424433, 0.7553814345364386, 0.9080913990575724, 0.8649696273684251, -0.6949331582734755, -1.341477949271746, 0.7054614843147038, 0.39841481421937774, 0.5167799773386114, -0.31221128930893194, -0.5811727870262903, -1.86144803024095, 1.4286626800331446, 0.21201489131582965, 1.1463786122907351, -0.12333714364483388, 4.301890290942462e-2, -0.20312752910606152, -2.269544292386349, -1.5713848249516549, 0.8337109070289505, 1.1825247858531331, -0.30751403101479186, 1.571094443749323, -6.114173737298122e-2, -2.8029133856755567e-2, -4.483599086770157e-2, 1.2001422440075182)
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("censusVis"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Create demographc maps with
informaton from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c("Percent White",
"Percent Black",
"Percent Hispanic",
"Percent Asian"),
selected = "Percent White"),
sliderInput("range",
label = "Range of interest:",
min = 0, max = 100, value = c(0,100))
),
# Show a plot of the generated distribution
mainPanel(
textOutput("selected_var"),
textOutput("selected_range")
)
)
))
| /Data Product/reactiveOutput/ui.R | no_license | jlpeng75/Coursera | R | false | false | 1,227 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("censusVis"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Create demographc maps with
informaton from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c("Percent White",
"Percent Black",
"Percent Hispanic",
"Percent Asian"),
selected = "Percent White"),
sliderInput("range",
label = "Range of interest:",
min = 0, max = 100, value = c(0,100))
),
# Show a plot of the generated distribution
mainPanel(
textOutput("selected_var"),
textOutput("selected_range")
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workbench_icon.r
\name{simpleAmigaIcon}
\alias{simpleAmigaIcon}
\title{Create simple AmigaIcon objects}
\usage{
simpleAmigaIcon(
version = c("OS1.x", "OS2.x"),
type = c("WBDISK", "WBDRAWER", "WBTOOL", "WBPROJECT", "WBGARBAGE", "WBDEVICE",
"WBKICK", "WBAPPICON"),
two.images = TRUE,
back.fill = FALSE,
...
)
}
\arguments{
\item{version}{A \code{character} string indicating the Amiga OS version
with which the icon should be compatible. "\code{OS2.x}" indicates
>=OS2.0 and "\code{OS1.x}" indicates <OS2.0.}
\item{type}{A \code{character} string indicating the type of object (file, disk, directory, etc.)
the icon should represent. See the `Usage' section for all posible options.}
\item{two.images}{A single \code{logical} value, indicating whether
the selected icon is depicted as a second image (in which case the
icon contains two images). The default value is \code{TRUE}.}
\item{back.fill}{A single \code{logical} value, indicating whether
the selected image of the icon should use the `back fill' mode (default).
If set to \code{FALSE} `complement' mode is used. Note that
back fill is not compatible when the icon holds two images. In the
`complement' mode, the image colours are inverted when selected.
In the `back fill' exterior first colour is not inverted.}
\item{...}{Reserverd for additional arguments. Currently ignored.}
}
\value{
Returns a simple S3 object of class \code{\link{AmigaIcon}}.
}
\description{
Graphical representation of files and directories (icons) are stored as
separate files (with the .info extension) on the Amiga. This function writes
\code{\link{AmigaIcon}} class objects to such files.
}
\details{
This function creates basic \code{\link{AmigaIcon}} objects which
can be modified afterwards. It uses simple generic images to represent
different types of files or directories.
}
\examples{
\dontrun{
## Create an AmigaIcon object using the default arguments:
icon <- simpleAmigaIcon()
}
}
\seealso{
Other AmigaIcon.operations:
\code{\link{AmigaIcon}},
\code{\link{rawToAmigaIcon}()},
\code{\link{read.AmigaIcon}()},
\code{\link{write.AmigaIcon}()}
Other raw.operations:
\code{\link{as.AmigaBasic}()},
\code{\link{as.raw.AmigaBasic}()},
\code{\link{colourToAmigaRaw}()},
\code{\link{packBitmap}()},
\code{\link{rawToAmigaBasicBMAP}()},
\code{\link{rawToAmigaBasicShape}()},
\code{\link{rawToAmigaBasic}()},
\code{\link{rawToAmigaBitmapFontSet}()},
\code{\link{rawToAmigaBitmapFont}()},
\code{\link{rawToAmigaIcon}()},
\code{\link{rawToHWSprite}()},
\code{\link{rawToIFFChunk}()},
\code{\link{rawToSysConfig}()}
}
\author{
Pepijn de Vries
}
\concept{AmigaIcon.operations}
\concept{raw.operations}
| /man/simpleAmigaIcon.Rd | no_license | pepijn-devries/AmigaFFH | R | false | true | 2,734 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workbench_icon.r
\name{simpleAmigaIcon}
\alias{simpleAmigaIcon}
\title{Create simple AmigaIcon objects}
\usage{
simpleAmigaIcon(
version = c("OS1.x", "OS2.x"),
type = c("WBDISK", "WBDRAWER", "WBTOOL", "WBPROJECT", "WBGARBAGE", "WBDEVICE",
"WBKICK", "WBAPPICON"),
two.images = TRUE,
back.fill = FALSE,
...
)
}
\arguments{
\item{version}{A \code{character} string indicating the Amiga OS version
with which the icon should be compatible. "\code{OS2.x}" indicates
>=OS2.0 and "\code{OS1.x}" indicates <OS2.0.}
\item{type}{A \code{character} string indicating the type of object (file, disk, directory, etc.)
the icon should represent. See the `Usage' section for all posible options.}
\item{two.images}{A single \code{logical} value, indicating whether
the selected icon is depicted as a second image (in which case the
icon contains two images). The default value is \code{TRUE}.}
\item{back.fill}{A single \code{logical} value, indicating whether
the selected image of the icon should use the `back fill' mode (default).
If set to \code{FALSE} `complement' mode is used. Note that
back fill is not compatible when the icon holds two images. In the
`complement' mode, the image colours are inverted when selected.
In the `back fill' exterior first colour is not inverted.}
\item{...}{Reserverd for additional arguments. Currently ignored.}
}
\value{
Returns a simple S3 object of class \code{\link{AmigaIcon}}.
}
\description{
Graphical representation of files and directories (icons) are stored as
separate files (with the .info extension) on the Amiga. This function writes
\code{\link{AmigaIcon}} class objects to such files.
}
\details{
This function creates basic \code{\link{AmigaIcon}} objects which
can be modified afterwards. It uses simple generic images to represent
different types of files or directories.
}
\examples{
\dontrun{
## Create an AmigaIcon object using the default arguments:
icon <- simpleAmigaIcon()
}
}
\seealso{
Other AmigaIcon.operations:
\code{\link{AmigaIcon}},
\code{\link{rawToAmigaIcon}()},
\code{\link{read.AmigaIcon}()},
\code{\link{write.AmigaIcon}()}
Other raw.operations:
\code{\link{as.AmigaBasic}()},
\code{\link{as.raw.AmigaBasic}()},
\code{\link{colourToAmigaRaw}()},
\code{\link{packBitmap}()},
\code{\link{rawToAmigaBasicBMAP}()},
\code{\link{rawToAmigaBasicShape}()},
\code{\link{rawToAmigaBasic}()},
\code{\link{rawToAmigaBitmapFontSet}()},
\code{\link{rawToAmigaBitmapFont}()},
\code{\link{rawToAmigaIcon}()},
\code{\link{rawToHWSprite}()},
\code{\link{rawToIFFChunk}()},
\code{\link{rawToSysConfig}()}
}
\author{
Pepijn de Vries
}
\concept{AmigaIcon.operations}
\concept{raw.operations}
|
context("set")
test_that("Setting values set values on generator", {
AC <- R6Class("AC",
public = list(
x = 1,
getxyz = function() self$x + private$y + private$z()
),
private = list(
y = 2,
z = function() 3
),
active = list(
x2 = function(value) {
if (missing(value)) return(self$x * 2)
else self$x <<- value/2
}
)
)
# Can set new names
AC$set("public", "nx", 10)
AC$set("public", "ngetxyz", function() self$nx + private$ny + private$nz())
AC$set("private", "ny", 20)
AC$set("private", "nz", function() 30)
AC$set("active", "nx2", function(value) {
if (missing(value)) return(self$nx * 2)
else self$nx <<- value/2
})
A <- AC$new()
expect_identical(A$nx, 10)
expect_identical(A$ngetxyz(), 60)
expect_identical(A$nx2, 20)
# Can't set existing names
expect_error(AC$set("public", "x", 99))
expect_error(AC$set("public", "getxyz", function() 99))
expect_error(AC$set("private", "y", 99))
expect_error(AC$set("private", "z", function() 99))
expect_error(AC$set("active", "x2", function(value) 99))
# Can't set existing names in different group
expect_error(AC$set("private", "x", 99))
expect_error(AC$set("private", "getxyz", function() 99))
expect_error(AC$set("active", "y", 99))
expect_error(AC$set("public", "z", function() 99))
expect_error(AC$set("private", "x2", function(value) 99))
# Can set existing names if overwrite = TRUE
AC$set("public", "x", 99, overwrite = TRUE)
AC$set("public", "getxyz", function() 99, overwrite = TRUE)
AC$set("private", "y", 99, overwrite = TRUE)
AC$set("private", "z", function() 99, overwrite = TRUE)
AC$set("active", "x2", function(value) 99, overwrite = TRUE)
# Can't set existing names in different group, even if overwrite = TRUE
expect_error(AC$set("private", "x", 99, overwrite = TRUE))
expect_error(AC$set("private", "getxyz", function() 99, overwrite = TRUE))
expect_error(AC$set("active", "y", 99, overwrite = TRUE))
expect_error(AC$set("public", "z", function() 99, overwrite = TRUE))
expect_error(AC$set("private", "x2", function(value) 99, overwrite = TRUE))
})
test_that("Setting values with empty public or private", {
AC <- R6Class("AC",
public = list(),
private = list()
)
AC$set("public", "x", 1)
AC$set("private", "y", 1)
AC$set("public", "gety", function() private$y)
a <- AC$new()
expect_identical(a$x, 1)
expect_identical(a$gety(), 1)
})
test_that("Locked class", {
AC <- R6Class("AC", lock_class = TRUE)
expect_error(AC$set("public", "x", 1))
expect_error(AC$set("private", "x", 1))
expect_true(AC$is_locked())
AC$unlock()
expect_false(AC$is_locked())
AC$set("public", "x", 1)
AC$lock()
expect_error(AC$set("public", "x", 2))
})
test_that("Assigning NULL values", {
AC <- R6Class("AC",
public = list(),
private = list()
)
AC$set("public", "x", NULL)
a <- AC$new()
expect_true("x" %in% names(a))
expect_identical(a$x, NULL)
})
| /packrat/lib/x86_64-pc-linux-gnu/3.6.3/R6/tests/testthat/test-set.R | permissive | sdroldan/ColaboRando | R | false | false | 3,018 | r | context("set")
test_that("Setting values set values on generator", {
AC <- R6Class("AC",
public = list(
x = 1,
getxyz = function() self$x + private$y + private$z()
),
private = list(
y = 2,
z = function() 3
),
active = list(
x2 = function(value) {
if (missing(value)) return(self$x * 2)
else self$x <<- value/2
}
)
)
# Can set new names
AC$set("public", "nx", 10)
AC$set("public", "ngetxyz", function() self$nx + private$ny + private$nz())
AC$set("private", "ny", 20)
AC$set("private", "nz", function() 30)
AC$set("active", "nx2", function(value) {
if (missing(value)) return(self$nx * 2)
else self$nx <<- value/2
})
A <- AC$new()
expect_identical(A$nx, 10)
expect_identical(A$ngetxyz(), 60)
expect_identical(A$nx2, 20)
# Can't set existing names
expect_error(AC$set("public", "x", 99))
expect_error(AC$set("public", "getxyz", function() 99))
expect_error(AC$set("private", "y", 99))
expect_error(AC$set("private", "z", function() 99))
expect_error(AC$set("active", "x2", function(value) 99))
# Can't set existing names in different group
expect_error(AC$set("private", "x", 99))
expect_error(AC$set("private", "getxyz", function() 99))
expect_error(AC$set("active", "y", 99))
expect_error(AC$set("public", "z", function() 99))
expect_error(AC$set("private", "x2", function(value) 99))
# Can set existing names if overwrite = TRUE
AC$set("public", "x", 99, overwrite = TRUE)
AC$set("public", "getxyz", function() 99, overwrite = TRUE)
AC$set("private", "y", 99, overwrite = TRUE)
AC$set("private", "z", function() 99, overwrite = TRUE)
AC$set("active", "x2", function(value) 99, overwrite = TRUE)
# Can't set existing names in different group, even if overwrite = TRUE
expect_error(AC$set("private", "x", 99, overwrite = TRUE))
expect_error(AC$set("private", "getxyz", function() 99, overwrite = TRUE))
expect_error(AC$set("active", "y", 99, overwrite = TRUE))
expect_error(AC$set("public", "z", function() 99, overwrite = TRUE))
expect_error(AC$set("private", "x2", function(value) 99, overwrite = TRUE))
})
test_that("Setting values with empty public or private", {
AC <- R6Class("AC",
public = list(),
private = list()
)
AC$set("public", "x", 1)
AC$set("private", "y", 1)
AC$set("public", "gety", function() private$y)
a <- AC$new()
expect_identical(a$x, 1)
expect_identical(a$gety(), 1)
})
test_that("Locked class", {
AC <- R6Class("AC", lock_class = TRUE)
expect_error(AC$set("public", "x", 1))
expect_error(AC$set("private", "x", 1))
expect_true(AC$is_locked())
AC$unlock()
expect_false(AC$is_locked())
AC$set("public", "x", 1)
AC$lock()
expect_error(AC$set("public", "x", 2))
})
test_that("Assigning NULL values", {
AC <- R6Class("AC",
public = list(),
private = list()
)
AC$set("public", "x", NULL)
a <- AC$new()
expect_true("x" %in% names(a))
expect_identical(a$x, NULL)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_list_invitations}
\alias{securityhub_list_invitations}
\title{Lists all Security Hub membership invitations that were sent to the
current AWS account}
\usage{
securityhub_list_invitations(MaxResults, NextToken)
}
\arguments{
\item{MaxResults}{The maximum number of items that you want in the response.}
\item{NextToken}{Paginates results. On your first call to the \code{ListInvitations}
operation, set the value of this parameter to \code{NULL}. For subsequent
calls to the operation, fill \code{nextToken} in the request with the value
of \code{NextToken} from the previous response to continue listing data.}
}
\description{
Lists all Security Hub membership invitations that were sent to the
current AWS account.
}
\section{Request syntax}{
\preformatted{svc$list_invitations(
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/securityhub_list_invitations.Rd | permissive | johnnytommy/paws | R | false | true | 972 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_list_invitations}
\alias{securityhub_list_invitations}
\title{Lists all Security Hub membership invitations that were sent to the
current AWS account}
\usage{
securityhub_list_invitations(MaxResults, NextToken)
}
\arguments{
\item{MaxResults}{The maximum number of items that you want in the response.}
\item{NextToken}{Paginates results. On your first call to the \code{ListInvitations}
operation, set the value of this parameter to \code{NULL}. For subsequent
calls to the operation, fill \code{nextToken} in the request with the value
of \code{NextToken} from the previous response to continue listing data.}
}
\description{
Lists all Security Hub membership invitations that were sent to the
current AWS account.
}
\section{Request syntax}{
\preformatted{svc$list_invitations(
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
urlike <-
function (tvec,
mut1,
sigmat1,
mut2,
sigmat2,
mur1,
sigmar1,
mur2,
sigmar2,
rho,
kprint = 0)
{
zout <- SURLIKE(t = as.double(tvec),
nt = as.integer(length(tvec)),
mut1 = as.double(mut1),
sigmat1 = as.double(sigmat1),
mut2 = as.double(mut2),
sigmat2 = as.double(sigmat2),
mur1 = as.double(mur1),
sigmar1 = as.double(sigmar1),
mur2 = as.double(mur2),
sigmar2 = as.double(sigmar2),
rho = as.double(rho),
answer = double(length(tvec)),
kprint = as.integer(kprimt))
return(zout$answer)
}
| /R/urlike.R | no_license | anhnguyendepocen/SMRD | R | false | false | 875 | r | urlike <-
function (tvec,
mut1,
sigmat1,
mut2,
sigmat2,
mur1,
sigmar1,
mur2,
sigmar2,
rho,
kprint = 0)
{
zout <- SURLIKE(t = as.double(tvec),
nt = as.integer(length(tvec)),
mut1 = as.double(mut1),
sigmat1 = as.double(sigmat1),
mut2 = as.double(mut2),
sigmat2 = as.double(sigmat2),
mur1 = as.double(mur1),
sigmar1 = as.double(sigmar1),
mur2 = as.double(mur2),
sigmar2 = as.double(sigmar2),
rho = as.double(rho),
answer = double(length(tvec)),
kprint = as.integer(kprimt))
return(zout$answer)
}
|
# Q2
price_q2=function(q2) {
len_q2=dim(q2)[1]
wid=rowSums(!is.na(q2))[1] # excluding NA elements
t=(q2[,4])
x0=(q2[,6:7])
k=(q2[,8])
sigma=(q2[,9:10])
rho=q2[,3]
x_calc=function(t,x0,k,sigma){
n=10^5 # no of monte carlo simulations
# random number generation
library('MASS')
mu=(rep(0,9))
rho=0.01
sigma=matrix(data=rho,nrow=2,ncol=2)
for (i in 1:2)
{
sigma[i,i]=1
}
sigma
z=mvrnorm(n=10^5,mu,sigma,tol=10^(-6))
z=t(z)
x=matrix(data=NA,nrow=len_q1,ncol=n)
for(i in 1:len_q1){
x[i,]=x0[i]*exp(sigma[i]*sqrt(t[i])*y1-((sigma[i])^2)*t[i]/2)
}
return(x)
}
x=x_calc(t,x0,k,sigma)
return(x)
}
| /question3/fun_Q2.R | no_license | athulvijayan6/gs-quantify-2k15 | R | false | false | 754 | r | # Q2
price_q2=function(q2) {
len_q2=dim(q2)[1]
wid=rowSums(!is.na(q2))[1] # excluding NA elements
t=(q2[,4])
x0=(q2[,6:7])
k=(q2[,8])
sigma=(q2[,9:10])
rho=q2[,3]
x_calc=function(t,x0,k,sigma){
n=10^5 # no of monte carlo simulations
# random number generation
library('MASS')
mu=(rep(0,9))
rho=0.01
sigma=matrix(data=rho,nrow=2,ncol=2)
for (i in 1:2)
{
sigma[i,i]=1
}
sigma
z=mvrnorm(n=10^5,mu,sigma,tol=10^(-6))
z=t(z)
x=matrix(data=NA,nrow=len_q1,ncol=n)
for(i in 1:len_q1){
x[i,]=x0[i]*exp(sigma[i]*sqrt(t[i])*y1-((sigma[i])^2)*t[i]/2)
}
return(x)
}
x=x_calc(t,x0,k,sigma)
return(x)
}
|
\name{CalculateCSD}
\alias{CalculateCSD}
\title{Calculates the Current Source Density estimates of EEG/ERP data.}
\usage{
CalculateCSD(eeg.data, G, H, head.radius = 1,
lambda = 10^-5, scalp.potentials = TRUE)
}
\arguments{
\item{eeg.data}{Matrix containing the data to be
transformed.}
\item{G}{The transformation matrix for the interpolation
of the surface potentials, as calculated by function
\code{\link{GetGH}}.}
\item{H}{The transformation matrix for the calculation of
the Current Source Density (CSD) estimates, as calculated
by function \code{\link{GetGH}}.}
\item{head.radius}{Head radius. Defaults to 1 for unit
sphere [µV/m²]). Specify a value (in cm) to rescale CSD
data to smaller units [µV/cm²] (e.g., use 10.0 to scale
to more realistic head size).}
\item{lambda}{Smoothing constant lambda for the spherical
spline interpolation. Defaults to 0.00001.}
\item{scalp.potentials}{Logical value. \code{TRUE}
determines the the spherical spline surface potential
interpolation on top of the calculation of the CSD
estimates. \code{FALSE} makes the function return only
the CSD estimates. Defaults to \code{TRUE}.}
}
\value{
List containing the spherical spline interpolation of the
CSD and surface potential data (in case the latter was
requested), with the following fields:
\describe{
\item{csd}{Matrix containing the CSD transformed data.}
\item{surface.potential}{Matrix containing the
interpolation of the surface potential data, or NA, in
case this was not requested by the user.}
}
}
\description{
Calculates the Current Source Density (CSD) estimates of
EEG/ERP data by means of the spherical spline
interpolation algorithm developed by Perrin et al (1989,
1990).
}
\examples{
\dontrun{
# From the original MATLAB CSD Toolbox tutorial
m.example <- ExtractMontage(user.labels = colnames(NR.C66.trr))
gh.example <- GetGH(m.example)
csd.data <- CalculateCSD(eeg.data = t(NR.C66.trr), G = gh.example$G,
H = gh.example$H
)
# Should reproduce Figures 16 of the original MATLAB CSD TOOLBOX tutorial:
# http://psychophysiology.cpmc.columbia.edu/software/CSDtoolbox/tutorial.html
matplot(t(csd.data$csd), type = "l", lty = 1, main = "Figure 16",
ylim = c(-20, 40)
)
# Should reproduce Figures 17 of the original MATLAB CSD TOOLBOX tutorial
matplot(t(csd.data$csd)[, c(14,24)], type = "l", col = c("blue", "green"),
lty = 1, main = "Figure 17", ylim = c(-10, 35)
)
# Should reproduce Figures 18 of the original MATLAB CSD TOOLBOX tutorial
matplot(NR.C66.trr, type = "l", lty = 1, main = "Figure 18",
ylim = c(-10, 20)
)
# Should reproduce Figures 19 of the original MATLAB CSD TOOLBOX tutorial
matplot(NR.C66.trr[, c(14,24)], type = "l", col = c("blue", "green"),
lty = 1, main = "Figure 19", ylim = c(-10, 20)
)
}
}
\seealso{
\code{\link{GetGH}}
}
| /man/CalculateCSD.Rd | no_license | diogo-almeida/csd.toolbox | R | false | false | 2,862 | rd | \name{CalculateCSD}
\alias{CalculateCSD}
\title{Calculates the Current Source Density estimates of EEG/ERP data.}
\usage{
CalculateCSD(eeg.data, G, H, head.radius = 1,
lambda = 10^-5, scalp.potentials = TRUE)
}
\arguments{
\item{eeg.data}{Matrix containing the data to be
transformed.}
\item{G}{The transformation matrix for the interpolation
of the surface potentials, as calculated by function
\code{\link{GetGH}}.}
\item{H}{The transformation matrix for the calculation of
the Current Source Density (CSD) estimates, as calculated
by function \code{\link{GetGH}}.}
\item{head.radius}{Head radius. Defaults to 1 for unit
sphere [µV/m²]). Specify a value (in cm) to rescale CSD
data to smaller units [µV/cm²] (e.g., use 10.0 to scale
to more realistic head size).}
\item{lambda}{Smoothing constant lambda for the spherical
spline interpolation. Defaults to 0.00001.}
\item{scalp.potentials}{Logical value. \code{TRUE}
determines the the spherical spline surface potential
interpolation on top of the calculation of the CSD
estimates. \code{FALSE} makes the function return only
the CSD estimates. Defaults to \code{TRUE}.}
}
\value{
List containing the spherical spline interpolation of the
CSD and surface potential data (in case the latter was
requested), with the following fields:
\describe{
\item{csd}{Matrix containing the CSD transformed data.}
\item{surface.potential}{Matrix containing the
interpolation of the surface potential data, or NA, in
case this was not requested by the user.}
}
}
\description{
Calculates the Current Source Density (CSD) estimates of
EEG/ERP data by means of the spherical spline
interpolation algorithm developed by Perrin et al (1989,
1990).
}
\examples{
\dontrun{
# From the original MATLAB CSD Toolbox tutorial
m.example <- ExtractMontage(user.labels = colnames(NR.C66.trr))
gh.example <- GetGH(m.example)
csd.data <- CalculateCSD(eeg.data = t(NR.C66.trr), G = gh.example$G,
H = gh.example$H
)
# Should reproduce Figures 16 of the original MATLAB CSD TOOLBOX tutorial:
# http://psychophysiology.cpmc.columbia.edu/software/CSDtoolbox/tutorial.html
matplot(t(csd.data$csd), type = "l", lty = 1, main = "Figure 16",
ylim = c(-20, 40)
)
# Should reproduce Figures 17 of the original MATLAB CSD TOOLBOX tutorial
matplot(t(csd.data$csd)[, c(14,24)], type = "l", col = c("blue", "green"),
lty = 1, main = "Figure 17", ylim = c(-10, 35)
)
# Should reproduce Figures 18 of the original MATLAB CSD TOOLBOX tutorial
matplot(NR.C66.trr, type = "l", lty = 1, main = "Figure 18",
ylim = c(-10, 20)
)
# Should reproduce Figures 19 of the original MATLAB CSD TOOLBOX tutorial
matplot(NR.C66.trr[, c(14,24)], type = "l", col = c("blue", "green"),
lty = 1, main = "Figure 19", ylim = c(-10, 20)
)
}
}
\seealso{
\code{\link{GetGH}}
}
|
# title: "Link between FishLine and sampling schemes, 2019"
# author: "Kirsten Birch Håkansson, DTU Aqua"
# Setup ----
rm(list = ls())
library(RODBC)
library(sqldf)
library(dplyr)
library(lubridate)
####################################################################
years <- 2020 # only a single
cruises <- c("MON", "SEAS", "IN-LYNG", "IN-HIRT")
####################################################################
ref_path <- "Q:/mynd/kibi/reference_tables/sampling_schemes/"
output_path <- "Q:/mynd/kibi/reference_tables/link_fishline_dfad_to_sampling_schemes/"
lh_file <- paste0("Q:/mynd/kibi/reference_tables/sampling_frames/", "lykkehjul_", years, ".csv")
# Get Sampling schemes ----
ss <- read.csv(paste0(ref_path, "sampling_scheme_ref_", years, ".csv"), sep = ";")
ss <- subset(ss, year == years)
# Get FishLine ----
channel <- odbcConnect("FishLineDW")
samp <- sqlQuery(
channel,
paste(
"SELECT Trip.tripId, Trip.cruiseId, Trip.year, Trip.cruise, Trip.trip, Trip.tripType, Trip.logBldNr, Trip.timeZone, Trip.dateStart, Trip.dateEnd, Trip.samplingType, Trip.samplingMethod, Trip.fisheryType, Trip.platform1, Trip.nationalityPlatform1,
Trip.fDFVesselPlatform1, Trip.nationalityPlatform2, Trip.platform2, Trip.dateSample, Trip.harbourSample, Trip.nationalityHarbourSample, Trip.harbourLanding, Trip.nationalityHarbourLanding, Sample.sampleId, Sample.station,
Sample.gearQuality, Sample.dfuArea, Sample.targetSpecies1, Sample.catchRegistration, Sample.speciesRegistration, SpeciesListRaised.speciesCode, SpeciesListRaised.landingCategory, SpeciesListRaised.weightSubSample,
SpeciesListRaised.weightTotal
FROM SpeciesListRaised LEFT OUTER JOIN
Sample ON SpeciesListRaised.sampleId = Sample.sampleId RIGHT OUTER JOIN
Trip ON Sample.tripId = Trip.tripId
WHERE (Trip.year between ", min(years), " and ", max(years) , ")
and Trip.cruise in ('", paste(cruises, collapse = "','"),
"')",
sep = ""
)
)
close(channel)
# Get lykkehjul ----
lh <- read.csv(lh_file, sep = ";")
# Make DNK_Market_Sampling ----
ss_MS <-
subset(ss, samplingScheme == "DNK_Market_Sampling")
samp_MS <- subset(samp, cruise %in% c("IN-HIRT", "IN-LYNG"))
unique(samp_MS$targetSpecies1)
out <-
subset(
samp_MS,
targetSpecies1 %in% c("BLH", "BRS", "TBM", "SPE", "HMK", "LOD", "MAK", "SIL", "PIL") |
landingCategory %in% c("IND", "DIS") |
speciesCode %in% c("MAK", "SIL") &
landingCategory == "KON" | speciesCode == "GLL"
)
out_sum <-
summarise(
group_by(out, speciesCode, targetSpecies1, landingCategory),
kg = sum(weightTotal, na.rm = T)
)
ok <- subset(samp_MS,!(sampleId %in% out$sampleId))
ok_sum <-
summarise(
group_by(ok, speciesCode, targetSpecies1, landingCategory),
kg = sum(weightTotal, na.rm = T)
)
samp_MS_ok <- distinct(ok, year, tripId, cruise, trip, harbourSample, dateSample)
ok$samplingScheme <- "DNK_Market_Sampling"
ok$quarter <- quarter(ok$dateSample)
ok$stratumName <- "High activity"
samp_MS_ok_1 <- distinct(ok, year, samplingScheme, quarter, stratumName, tripId, cruise, trip)
ss_MS_ok <- full_join(ss_MS, samp_MS_ok_1)
rm(ss_MS, ok, ok_sum, out, out_sum, samp_MS, samp_MS_ok, samp_MS_ok_1)
# Make DNK_AtSea_Observer_Active ----
ss_AOA <-
subset(ss, samplingScheme == "DNK_AtSea_Observer_Active")
samp_AOA <- subset(samp, cruise %in% c("MON", "SEAS"))
samp_AOA$quarter <- quarter(samp_AOA$dateStart)
samp_lh <- left_join(samp_AOA, lh, by = c("platform1" = "fid", "year" = "year"))
samp_lh_uniq <- distinct(samp_lh, tripId, year, cruise, trip, platform1, stratumName, quarter)
unique(samp_lh$stratumName)
test_no_stratum <- subset(samp_lh_uniq, is.na(stratumName))
ss_AOA_ok <- full_join(ss_AOA, samp_lh_uniq)
test <- summarise(group_by(ss_AOA_ok, tripId), no = length(tripId))
ss_AOA_ok$sampled[!(is.na(ss_AOA_ok$samplingScheme)) & is.na(ss_AOA_ok$tripId)] <- "N"
ss_AOA_ok$reasonNotSampled[!(is.na(ss_AOA_ok$samplingScheme)) & is.na(ss_AOA_ok$tripId)] <- "Other"
ss_AOA_ok$samplingScheme[is.na(ss_AOA_ok$stratumName)] <- "DNK_AtSea_Observer_Active"
ss_AOA_ok$hierarchy[is.na(ss_AOA_ok$stratumName)] <- 1
ss_AOA_ok$stratumSamplingFrame[is.na(ss_AOA_ok$stratumName)] <- paste0("Q", ss_AOA_ok$quarter[is.na(ss_AOA_ok$stratumName)])
ss_AOA_ok$PSU[is.na(ss_AOA_ok$stratumName)] <- "VS"
ss_AOA_ok$stratumName[is.na(ss_AOA_ok$stratumName)] <- "Sampling outside frame"
ss_AOA_ok$selectionMethod[is.na(ss_AOA_ok$stratumName)] <- "NPCS"
ss_AOA_ok$sampled[is.na(ss_AOA_ok$stratumName)] <- "Y"
ss_AOA_ok$reasonNotSampled[is.na(ss_AOA_ok$stratumName)] <- ""
rm(lh, samp_AOA, samp_lh, samp_lh_uniq, ss_AOA, test, test_no_stratum)
# Merge linkage and out put ----
link <- bind_rows(ss_AOA_ok, ss_MS_ok)
write.table(link, paste0(output_path, "link_fishLine_to_sampling_schemes_", years, ".csv"), sep = ";", row.names = F)
| /link_fishline_dfad_to_sampling_schemes/link_fishLine_to_sampling_schemes_2020.R | no_license | KirstenBirchHaakansson/reference_tables | R | false | false | 5,044 | r |
# title: "Link between FishLine and sampling schemes, 2019"
# author: "Kirsten Birch Håkansson, DTU Aqua"
# Setup ----
rm(list = ls())
library(RODBC)
library(sqldf)
library(dplyr)
library(lubridate)
####################################################################
years <- 2020 # only a single
cruises <- c("MON", "SEAS", "IN-LYNG", "IN-HIRT")
####################################################################
ref_path <- "Q:/mynd/kibi/reference_tables/sampling_schemes/"
output_path <- "Q:/mynd/kibi/reference_tables/link_fishline_dfad_to_sampling_schemes/"
lh_file <- paste0("Q:/mynd/kibi/reference_tables/sampling_frames/", "lykkehjul_", years, ".csv")
# Get Sampling schemes ----
ss <- read.csv(paste0(ref_path, "sampling_scheme_ref_", years, ".csv"), sep = ";")
ss <- subset(ss, year == years)
# Get FishLine ----
channel <- odbcConnect("FishLineDW")
samp <- sqlQuery(
channel,
paste(
"SELECT Trip.tripId, Trip.cruiseId, Trip.year, Trip.cruise, Trip.trip, Trip.tripType, Trip.logBldNr, Trip.timeZone, Trip.dateStart, Trip.dateEnd, Trip.samplingType, Trip.samplingMethod, Trip.fisheryType, Trip.platform1, Trip.nationalityPlatform1,
Trip.fDFVesselPlatform1, Trip.nationalityPlatform2, Trip.platform2, Trip.dateSample, Trip.harbourSample, Trip.nationalityHarbourSample, Trip.harbourLanding, Trip.nationalityHarbourLanding, Sample.sampleId, Sample.station,
Sample.gearQuality, Sample.dfuArea, Sample.targetSpecies1, Sample.catchRegistration, Sample.speciesRegistration, SpeciesListRaised.speciesCode, SpeciesListRaised.landingCategory, SpeciesListRaised.weightSubSample,
SpeciesListRaised.weightTotal
FROM SpeciesListRaised LEFT OUTER JOIN
Sample ON SpeciesListRaised.sampleId = Sample.sampleId RIGHT OUTER JOIN
Trip ON Sample.tripId = Trip.tripId
WHERE (Trip.year between ", min(years), " and ", max(years) , ")
and Trip.cruise in ('", paste(cruises, collapse = "','"),
"')",
sep = ""
)
)
close(channel)
# Get lykkehjul ----
lh <- read.csv(lh_file, sep = ";")
# Make DNK_Market_Sampling ----
ss_MS <-
subset(ss, samplingScheme == "DNK_Market_Sampling")
samp_MS <- subset(samp, cruise %in% c("IN-HIRT", "IN-LYNG"))
unique(samp_MS$targetSpecies1)
out <-
subset(
samp_MS,
targetSpecies1 %in% c("BLH", "BRS", "TBM", "SPE", "HMK", "LOD", "MAK", "SIL", "PIL") |
landingCategory %in% c("IND", "DIS") |
speciesCode %in% c("MAK", "SIL") &
landingCategory == "KON" | speciesCode == "GLL"
)
out_sum <-
summarise(
group_by(out, speciesCode, targetSpecies1, landingCategory),
kg = sum(weightTotal, na.rm = T)
)
ok <- subset(samp_MS,!(sampleId %in% out$sampleId))
ok_sum <-
summarise(
group_by(ok, speciesCode, targetSpecies1, landingCategory),
kg = sum(weightTotal, na.rm = T)
)
samp_MS_ok <- distinct(ok, year, tripId, cruise, trip, harbourSample, dateSample)
ok$samplingScheme <- "DNK_Market_Sampling"
ok$quarter <- quarter(ok$dateSample)
ok$stratumName <- "High activity"
samp_MS_ok_1 <- distinct(ok, year, samplingScheme, quarter, stratumName, tripId, cruise, trip)
ss_MS_ok <- full_join(ss_MS, samp_MS_ok_1)
rm(ss_MS, ok, ok_sum, out, out_sum, samp_MS, samp_MS_ok, samp_MS_ok_1)
# Make DNK_AtSea_Observer_Active ----
ss_AOA <-
subset(ss, samplingScheme == "DNK_AtSea_Observer_Active")
samp_AOA <- subset(samp, cruise %in% c("MON", "SEAS"))
samp_AOA$quarter <- quarter(samp_AOA$dateStart)
samp_lh <- left_join(samp_AOA, lh, by = c("platform1" = "fid", "year" = "year"))
samp_lh_uniq <- distinct(samp_lh, tripId, year, cruise, trip, platform1, stratumName, quarter)
unique(samp_lh$stratumName)
test_no_stratum <- subset(samp_lh_uniq, is.na(stratumName))
ss_AOA_ok <- full_join(ss_AOA, samp_lh_uniq)
test <- summarise(group_by(ss_AOA_ok, tripId), no = length(tripId))
ss_AOA_ok$sampled[!(is.na(ss_AOA_ok$samplingScheme)) & is.na(ss_AOA_ok$tripId)] <- "N"
ss_AOA_ok$reasonNotSampled[!(is.na(ss_AOA_ok$samplingScheme)) & is.na(ss_AOA_ok$tripId)] <- "Other"
ss_AOA_ok$samplingScheme[is.na(ss_AOA_ok$stratumName)] <- "DNK_AtSea_Observer_Active"
ss_AOA_ok$hierarchy[is.na(ss_AOA_ok$stratumName)] <- 1
ss_AOA_ok$stratumSamplingFrame[is.na(ss_AOA_ok$stratumName)] <- paste0("Q", ss_AOA_ok$quarter[is.na(ss_AOA_ok$stratumName)])
ss_AOA_ok$PSU[is.na(ss_AOA_ok$stratumName)] <- "VS"
ss_AOA_ok$stratumName[is.na(ss_AOA_ok$stratumName)] <- "Sampling outside frame"
ss_AOA_ok$selectionMethod[is.na(ss_AOA_ok$stratumName)] <- "NPCS"
ss_AOA_ok$sampled[is.na(ss_AOA_ok$stratumName)] <- "Y"
ss_AOA_ok$reasonNotSampled[is.na(ss_AOA_ok$stratumName)] <- ""
rm(lh, samp_AOA, samp_lh, samp_lh_uniq, ss_AOA, test, test_no_stratum)
# Merge linkage and out put ----
link <- bind_rows(ss_AOA_ok, ss_MS_ok)
write.table(link, paste0(output_path, "link_fishLine_to_sampling_schemes_", years, ".csv"), sep = ";", row.names = F)
|
library(ggplot2)
library(dplyr)
datos_covid<-VariosPaises_Covid19_Corregido
datos_covid.Spain<-filter(datos_covid, COUNTRY=="SPAIN")
datos_covid<-rename(datos_covid, Day=X1)
Days<-datos_covid.Spain$Day-1355
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Days, y=HOSPITALIZADOS))+
geom_smooth(mapping=aes (x=Days, y=HOSPITALIZADOS))
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=RECUPERADOS))+
geom_smooth(mapping=aes (x=Day, y=RECUPERADOS), color="green")
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=CONTAGIADOS))+
geom_smooth(mapping=aes (x=Day, y=CONTAGIADOS), color="orange")
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=FALLECIDOS))+
geom_smooth(mapping=aes (x=Day, y=FALLECIDOS), color="black")
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=UCIs))+
geom_smooth(mapping=aes (x=Day, y=UCIs), color="red")
#lo que realmente interesa es que los nodos sumidero reciban m?s llegadas que el nodo fuente
#MINIMIZATION TO INF AND LOST AND MAXIMIZATION TO RECOVERED,
#DANGER INDEX PROPOSAL IS (INFECTED+LOST)-RECOVERED, IN MY DATABASE:
res<-CONTAGIADOS+FALLECIDOS-RECUPERADOS
gr_res<-ggplot(data=datos_covid)+
geom_point(mapping=aes (x=Day, y=res))+
geom_smooth(mapping=aes (x=Day, y=res), color="red")
gr_res + xlab("9 March-9 May")+ ylab( "Infected+Lost-Recovered") +
ggtitle("Danger Index:")
#Another alternative as ratio
res<-(CONTAGIADOS+FALLECIDOS)/RECUPERADOS
gr_res<-ggplot(data=datos_covid)+
geom_point(mapping=aes (x=Day, y=res))+
geom_smooth(mapping=aes (x=Day, y=res), color="red")
gr_res + xlab("9 March-9 May")+ ylab( "(Infected+Lost)/Recovered") +
ggtitle("Danger Ration Indicator:")
| /Spain.r | no_license | vlopezlo/Covid_19 | R | false | false | 1,770 | r | library(ggplot2)
library(dplyr)
datos_covid<-VariosPaises_Covid19_Corregido
datos_covid.Spain<-filter(datos_covid, COUNTRY=="SPAIN")
datos_covid<-rename(datos_covid, Day=X1)
Days<-datos_covid.Spain$Day-1355
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Days, y=HOSPITALIZADOS))+
geom_smooth(mapping=aes (x=Days, y=HOSPITALIZADOS))
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=RECUPERADOS))+
geom_smooth(mapping=aes (x=Day, y=RECUPERADOS), color="green")
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=CONTAGIADOS))+
geom_smooth(mapping=aes (x=Day, y=CONTAGIADOS), color="orange")
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=FALLECIDOS))+
geom_smooth(mapping=aes (x=Day, y=FALLECIDOS), color="black")
ggplot(data=datos_covid.Spain)+
geom_point(mapping=aes (x=Day, y=UCIs))+
geom_smooth(mapping=aes (x=Day, y=UCIs), color="red")
#lo que realmente interesa es que los nodos sumidero reciban m?s llegadas que el nodo fuente
#MINIMIZATION TO INF AND LOST AND MAXIMIZATION TO RECOVERED,
#DANGER INDEX PROPOSAL IS (INFECTED+LOST)-RECOVERED, IN MY DATABASE:
res<-CONTAGIADOS+FALLECIDOS-RECUPERADOS
gr_res<-ggplot(data=datos_covid)+
geom_point(mapping=aes (x=Day, y=res))+
geom_smooth(mapping=aes (x=Day, y=res), color="red")
gr_res + xlab("9 March-9 May")+ ylab( "Infected+Lost-Recovered") +
ggtitle("Danger Index:")
#Another alternative as ratio
res<-(CONTAGIADOS+FALLECIDOS)/RECUPERADOS
gr_res<-ggplot(data=datos_covid)+
geom_point(mapping=aes (x=Day, y=res))+
geom_smooth(mapping=aes (x=Day, y=res), color="red")
gr_res + xlab("9 March-9 May")+ ylab( "(Infected+Lost)/Recovered") +
ggtitle("Danger Ration Indicator:")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RnBSet-class.R
\docType{methods}
\name{hasCovg,RnBSet-method}
\alias{hasCovg,RnBSet-method}
\alias{hasCovg}
\title{hasCovg-methods}
\usage{
\S4method{hasCovg}{RnBSet}(object, type = "sites")
}
\arguments{
\item{object}{\code{RnBSet} of interest.}
\item{type}{\code{character} singleton. If \code{sites} or a region type summarized in the object}
}
\value{
\code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type. \code{FALSE} otherwise
}
\description{
Returns \code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type.
}
\examples{
\donttest{
library(RnBeads.hg19)
data(small.example.object)
## per-site beta-value matrix
hasCovg(rnb.set.example)
}
}
| /man/hasCovg-methods.Rd | no_license | epigen/RnBeads | R | false | true | 834 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RnBSet-class.R
\docType{methods}
\name{hasCovg,RnBSet-method}
\alias{hasCovg,RnBSet-method}
\alias{hasCovg}
\title{hasCovg-methods}
\usage{
\S4method{hasCovg}{RnBSet}(object, type = "sites")
}
\arguments{
\item{object}{\code{RnBSet} of interest.}
\item{type}{\code{character} singleton. If \code{sites} or a region type summarized in the object}
}
\value{
\code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type. \code{FALSE} otherwise
}
\description{
Returns \code{TRUE} if the \code{RnBSet} object contains coverage information for sites or the specified region type.
}
\examples{
\donttest{
library(RnBeads.hg19)
data(small.example.object)
## per-site beta-value matrix
hasCovg(rnb.set.example)
}
}
|
#' Prof K
#' 03-01-2020
#' 7.2
# Libs
library(caret)
library(vtreat)
library(class)
# WD
setwd('____')
# Data
df <- read.csv("UniversalBank.csv")
# EDA
table(df$Personal.Loan)
# Drop per directions
df$ID <- ____
df$ZIP.Code <- NULL
# In this example EDU is actually a factor!
df$Education <- as.factor(df$Education)
nlevels(df$Education)
# Partition per directions
set.seed(1234)
splitPercent <- round(nrow(df) %*% __)
totalRecords <- 1:nrow(___)
idx <- sample(_______, splitPercent)
trainDat <- df[idx,]
testDat <- df[-___,]
# Treatment to account for dummies, although its a pretty clean data set, EDU could be a dummy but is still ordinal...so?
xVars <- c("Age", "Experience", "Income", "Family", "CCAvg",
"Education", "Mortgage", "Securities.Account",
"CD.Account", "Online", "CreditCard" )
yVar <- '____'
plan <- designTreatmentsC(trainDat, ____,____,_)
# Prepare
treatedTrain <- prepare(___, ____)
# Fit - WARNING!!!
knnFit <- train(______ ~ .,
data = _____, method = "___",
preProcess = c("center","scale"), tuneLength = 10)
# Make sure you know your data problem...its classification! Note the difference with the previous call.
knnFit <- train(as.factor(____) ~ .,
data = treatedTrain, method = "___",
preProcess = c("center","scale"), tuneLength = 10)
# 7.2A
newCustomer <- data.frame(Age = __,
Experience = __,
Income = __,
Family = _,
CCAvg = _,
Education = _,
Mortgage = _,
Securities.Account = _,
CD.Account = _,
Online = _,
CreditCard = _)
newCustomer$Education <- as.factor(___________)
treatedNewCU <- prepare(plan, _______)
# this is the version with a higher K
predict(____, _____)
# Since 7.2a demands k=1, we make a single model bc caret's implementation starts at 5.
allData <- full_join(df, newCustomer)
treatedAll <- prepare(plan, allData)
scaleAll <- scale(treatedAll[,1:__], scale = T, center=T)
specialK <- knn(train = scaleAll[1:5000,1:__],
test = scaleAll[____,1:__],
cl = as.factor(df$Personal.Loan), k =____)
specialK
# Did the person accept the personal loan offer?
# Answer:
# 7.2B
______
plot(_____)
# The most balanced K is:
# Answer:
# 7.2C
# Prep the validation set
treatedTest <- prepare(plan, _____)
testClasses <- predict(knnFit, _______)
confusionMatrix(as.factor(testDat$Personal.Loan),________)
# 7.2D
# Make another new customer data frame. Prepare it. Then use the knnFit to make a prediction.
# Did the person accept the personal loan offer?
# Answer:
# 7.3E
# Now redo your partitions into 3 parts. Go back to our script examples for this code. Make predictions, construct the confusion matrices and review to answer the question.
# End | /Lessons/E_LogReg/scripts/Z_deprecated_HW_supplemental_studentVersion.R | no_license | lenamax2355/Harvard_DataMining_Business_Student | R | false | false | 3,139 | r | #' Prof K
#' 03-01-2020
#' 7.2
# Libs
library(caret)
library(vtreat)
library(class)
# WD
setwd('____')
# Data
df <- read.csv("UniversalBank.csv")
# EDA
table(df$Personal.Loan)
# Drop per directions
df$ID <- ____
df$ZIP.Code <- NULL
# In this example EDU is actually a factor!
df$Education <- as.factor(df$Education)
nlevels(df$Education)
# Partition per directions
set.seed(1234)
splitPercent <- round(nrow(df) %*% __)
totalRecords <- 1:nrow(___)
idx <- sample(_______, splitPercent)
trainDat <- df[idx,]
testDat <- df[-___,]
# Treatment to account for dummies, although its a pretty clean data set, EDU could be a dummy but is still ordinal...so?
xVars <- c("Age", "Experience", "Income", "Family", "CCAvg",
"Education", "Mortgage", "Securities.Account",
"CD.Account", "Online", "CreditCard" )
yVar <- '____'
plan <- designTreatmentsC(trainDat, ____,____,_)
# Prepare
treatedTrain <- prepare(___, ____)
# Fit - WARNING!!!
knnFit <- train(______ ~ .,
data = _____, method = "___",
preProcess = c("center","scale"), tuneLength = 10)
# Make sure you know your data problem...its classification! Note the difference with the previous call.
knnFit <- train(as.factor(____) ~ .,
data = treatedTrain, method = "___",
preProcess = c("center","scale"), tuneLength = 10)
# 7.2A
newCustomer <- data.frame(Age = __,
Experience = __,
Income = __,
Family = _,
CCAvg = _,
Education = _,
Mortgage = _,
Securities.Account = _,
CD.Account = _,
Online = _,
CreditCard = _)
newCustomer$Education <- as.factor(___________)
treatedNewCU <- prepare(plan, _______)
# this is the version with a higher K
predict(____, _____)
# Since 7.2a demands k=1, we make a single model bc caret's implementation starts at 5.
allData <- full_join(df, newCustomer)
treatedAll <- prepare(plan, allData)
scaleAll <- scale(treatedAll[,1:__], scale = T, center=T)
specialK <- knn(train = scaleAll[1:5000,1:__],
test = scaleAll[____,1:__],
cl = as.factor(df$Personal.Loan), k =____)
specialK
# Did the person accept the personal loan offer?
# Answer:
# 7.2B
______
plot(_____)
# The most balanced K is:
# Answer:
# 7.2C
# Prep the validation set
treatedTest <- prepare(plan, _____)
testClasses <- predict(knnFit, _______)
confusionMatrix(as.factor(testDat$Personal.Loan),________)
# 7.2D
# Make another new customer data frame. Prepare it. Then use the knnFit to make a prediction.
# Did the person accept the personal loan offer?
# Answer:
# 7.3E
# Now redo your partitions into 3 parts. Go back to our script examples for this code. Make predictions, construct the confusion matrices and review to answer the question.
# End |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/C3Scatter.R
\name{C3Scatter}
\alias{C3Scatter}
\title{C3Scatter}
\usage{
C3Scatter(dataset, size = 2.5, colors, width = NULL, height = NULL)
}
\arguments{
\item{dataset}{the data to be plotted, a list or data.frame containing the different data series,
each as a vector of numeric values. Note that a vector of x values must be supplied and named 'x'.}
\item{size}{the ratio of the points to be plotted}
\item{colors}{vector of colors for the bars.}
\item{width, height}{size of figure output}
}
\description{
Creat a scatter plot
}
| /man/C3Scatter.Rd | permissive | rmnppt/rc3 | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/C3Scatter.R
\name{C3Scatter}
\alias{C3Scatter}
\title{C3Scatter}
\usage{
C3Scatter(dataset, size = 2.5, colors, width = NULL, height = NULL)
}
\arguments{
\item{dataset}{the data to be plotted, a list or data.frame containing the different data series,
each as a vector of numeric values. Note that a vector of x values must be supplied and named 'x'.}
\item{size}{the ratio of the points to be plotted}
\item{colors}{vector of colors for the bars.}
\item{width, height}{size of figure output}
}
\description{
Creat a scatter plot
}
|
#' Change the HTML (or text) inside an element
#'
#' Change the text or HTML inside an element. The given HTML can be any
#' R expression, and it can either be appended to the currentcontents of the element
#' or overwrite it (default).
#'
#' @param id The id of the element/Shiny tag
#' @param html The HTML/text to place inside the element. Can be either simple
#' plain text or valid HTML code.
#' @param add If \code{TRUE}, then append \code{html} to the contents of the element;
#' otherwise overwrite it.
#' @param selector JQuery selector of the elements to target. Ignored if the \code{id}
#' argument is given.
#' @param asis If \code{TRUE}, use the ID as-is even when inside a module
#' (instead of adding the namespace prefix to the ID).
#' @seealso \code{\link[shinyjs]{useShinyjs}},
#' \code{\link[shinyjs]{runExample}}
#' @note \code{shinyjs} must be initialized with a call to \code{useShinyjs()}
#' in the app's ui.
#' @examples
#' if (interactive()) {
#' library(shiny)
#'
#' shinyApp(
#' ui = fluidPage(
#' useShinyjs(), # Set up shinyjs
#' actionButton("btn", "Click me"),
#' p(id = "element", "Watch what happens to me")
#' ),
#' server = function(input, output) {
#' observeEvent(input$btn, {
#' # Change the following line for more examples
#' html("element", paste0("The date is ", date()))
#' })
#' }
#' )
#' }
#' \dontrun{
#' # The shinyjs function call in the above app can be replaced by
#' # any of the following examples to produce similar Shiny apps
#' html("element", "Hello!")
#' html("element", " Hello!", TRUE)
#' html("element", "<strong>bold</strong> that was achieved with HTML")
#' local({val <- "some text"; html("element", val)})
#' html(id = "element", add = TRUE, html = input$btn)
#' }
#' @export
html <- function(id = NULL, html = NULL, add = FALSE, selector = NULL, asis = FALSE) {
fxn <- "html"
params <- list(id = id, html = html, add = add, selector = selector, asis = asis)
jsFuncHelper(fxn, params)
}
| /R/jsFunc-html.R | no_license | cran/shinyjs | R | false | false | 2,093 | r | #' Change the HTML (or text) inside an element
#'
#' Change the text or HTML inside an element. The given HTML can be any
#' R expression, and it can either be appended to the currentcontents of the element
#' or overwrite it (default).
#'
#' @param id The id of the element/Shiny tag
#' @param html The HTML/text to place inside the element. Can be either simple
#' plain text or valid HTML code.
#' @param add If \code{TRUE}, then append \code{html} to the contents of the element;
#' otherwise overwrite it.
#' @param selector JQuery selector of the elements to target. Ignored if the \code{id}
#' argument is given.
#' @param asis If \code{TRUE}, use the ID as-is even when inside a module
#' (instead of adding the namespace prefix to the ID).
#' @seealso \code{\link[shinyjs]{useShinyjs}},
#' \code{\link[shinyjs]{runExample}}
#' @note \code{shinyjs} must be initialized with a call to \code{useShinyjs()}
#' in the app's ui.
#' @examples
#' if (interactive()) {
#' library(shiny)
#'
#' shinyApp(
#' ui = fluidPage(
#' useShinyjs(), # Set up shinyjs
#' actionButton("btn", "Click me"),
#' p(id = "element", "Watch what happens to me")
#' ),
#' server = function(input, output) {
#' observeEvent(input$btn, {
#' # Change the following line for more examples
#' html("element", paste0("The date is ", date()))
#' })
#' }
#' )
#' }
#' \dontrun{
#' # The shinyjs function call in the above app can be replaced by
#' # any of the following examples to produce similar Shiny apps
#' html("element", "Hello!")
#' html("element", " Hello!", TRUE)
#' html("element", "<strong>bold</strong> that was achieved with HTML")
#' local({val <- "some text"; html("element", val)})
#' html(id = "element", add = TRUE, html = input$btn)
#' }
#' @export
html <- function(id = NULL, html = NULL, add = FALSE, selector = NULL, asis = FALSE) {
fxn <- "html"
params <- list(id = id, html = html, add = add, selector = selector, asis = asis)
jsFuncHelper(fxn, params)
}
|
\name{decathlon}
\alias{decathlon}
\docType{data}
\title{Performance in decathlon (data)}
\description{
The data used here refer to athletes' performance during two sporting events.
}
\usage{data(decathlon)}
\format{
A data frame with 41 rows and 13 columns: the first ten columns corresponds to
the performance of the athletes for the 10 events of the decathlon.
The columns 11 and 12 correspond respectively to the rank and the points obtained.
The last column is a categorical variable
corresponding to the sporting event (2004 Olympic Game or 2004 Decastar)
}
\source{
The references below.
}
\references{
{Departement of Applied Mathematics, Agrocampus Rennes.
Le, S., Josse, J. & Husson, F. (2008). FactoMineR: An R Package for Multivariate Analysis. Journal of Statistical Software. 25(1). pp. 1-18.
}
}
\keyword{datasets}
| /man/decathlon.Rd | no_license | chavent/PCAmixdata | R | false | false | 844 | rd | \name{decathlon}
\alias{decathlon}
\docType{data}
\title{Performance in decathlon (data)}
\description{
The data used here refer to athletes' performance during two sporting events.
}
\usage{data(decathlon)}
\format{
A data frame with 41 rows and 13 columns: the first ten columns corresponds to
the performance of the athletes for the 10 events of the decathlon.
The columns 11 and 12 correspond respectively to the rank and the points obtained.
The last column is a categorical variable
corresponding to the sporting event (2004 Olympic Game or 2004 Decastar)
}
\source{
The references below.
}
\references{
{Departement of Applied Mathematics, Agrocampus Rennes.
Le, S., Josse, J. & Husson, F. (2008). FactoMineR: An R Package for Multivariate Analysis. Journal of Statistical Software. 25(1). pp. 1-18.
}
}
\keyword{datasets}
|
# === Clean Up Workspace
rm(list = ls())
# === Download, install and load packages if needed
install.package.needed<-function(package.needed = NA)
{
if (is.na(package.needed)) stop("Package name missing")
new.packages <- package.needed[!(package.needed %in% installed.packages()[,"Package"])]
if(length(new.packages))
{
install.packages(new.packages)
}
# load the packages
library(package.needed,character.only=TRUE)
}
# ==== MAIN PART
# set warning messages to English
Sys.setenv(LANG = "en")
# set work directory
setwd("Z:/D/VL_MPMD_Stats_With_R/002 Course Material/Part 3 R/005_User-defined_Functions")
# import Excel file
install.package.needed("readxl")
sales <- read_excel("004b Sales.xlsx")
# convert to dataframe
sales<-data.frame(sales)
head(sales)
# copy sales to x
x<-sales
# === Correlation
# here using cor.test to get also the significance
# interpret the significance in detail!
cor.test(x$sales,x$price)
# === fitting a linear regression model
lm.model<-lm(sales~price,data=x)
# print just the equation
lm.model
# detailed statistics of model
summary(lm.model)
# confidence intervals for parameter based on standard errors - correct if model meets assumptions!
confint(lm.model,level=0.95)
# add predictions to data frame
x$forecast<-fitted(lm.model)
#add residuals to the data frame
x$residuals<-resid(lm.model)
# add predictions to data frame
x$z.forecast<-scale(fitted(lm.model))
#add stanardized residuals to the data frame
x$z.residuals<-rstandard(lm.model)
# === using the model to forecast
# define a data frame with the given values, here a known value 49 to compare the result - see 1st row of original data
new.values<-data.frame(matrix(c(49),nrow=1,ncol=1))
colnames(new.values)<-c("price")
# provide the given values to predict()
predict.lm(lm.model,new.values)
# === checking the assumptions
hist(x$z.residuals)
qqnorm(x$residuals)
qqline(x$residuals)
ks.test(x$residuals,"pnorm")
shapiro.test(x$residuals)
# checking for Homo-/Heteroskedasticity
plot(x$z.forecast,x$residuals,xlab="Fitted",ylab="Residuals")
# probably here also Durbin Watson Test for Autocorrelation
| /05 Correlation/Sales/regression_sales_SOLUTION_IDM-6246EB8.R | no_license | jandroi/seminar_HTW | R | false | false | 2,142 | r | # === Clean Up Workspace
rm(list = ls())
# === Download, install and load packages if needed
install.package.needed<-function(package.needed = NA)
{
if (is.na(package.needed)) stop("Package name missing")
new.packages <- package.needed[!(package.needed %in% installed.packages()[,"Package"])]
if(length(new.packages))
{
install.packages(new.packages)
}
# load the packages
library(package.needed,character.only=TRUE)
}
# ==== MAIN PART
# set warning messages to English
Sys.setenv(LANG = "en")
# set work directory
setwd("Z:/D/VL_MPMD_Stats_With_R/002 Course Material/Part 3 R/005_User-defined_Functions")
# import Excel file
install.package.needed("readxl")
sales <- read_excel("004b Sales.xlsx")
# convert to dataframe
sales<-data.frame(sales)
head(sales)
# copy sales to x
x<-sales
# === Correlation
# here using cor.test to get also the significance
# interpret the significance in detail!
cor.test(x$sales,x$price)
# === fitting a linear regression model
lm.model<-lm(sales~price,data=x)
# print just the equation
lm.model
# detailed statistics of model
summary(lm.model)
# confidence intervals for parameter based on standard errors - correct if model meets assumptions!
confint(lm.model,level=0.95)
# add predictions to data frame
x$forecast<-fitted(lm.model)
#add residuals to the data frame
x$residuals<-resid(lm.model)
# add predictions to data frame
x$z.forecast<-scale(fitted(lm.model))
#add stanardized residuals to the data frame
x$z.residuals<-rstandard(lm.model)
# === using the model to forecast
# define a data frame with the given values, here a known value 49 to compare the result - see 1st row of original data
new.values<-data.frame(matrix(c(49),nrow=1,ncol=1))
colnames(new.values)<-c("price")
# provide the given values to predict()
predict.lm(lm.model,new.values)
# === checking the assumptions
hist(x$z.residuals)
qqnorm(x$residuals)
qqline(x$residuals)
ks.test(x$residuals,"pnorm")
shapiro.test(x$residuals)
# checking for Homo-/Heteroskedasticity
plot(x$z.forecast,x$residuals,xlab="Fitted",ylab="Residuals")
# probably here also Durbin Watson Test for Autocorrelation
|
library(shiny)
library(plotly)
ui <- fluidPage(
headerPanel("Instagram Explorer"),
sidebarPanel(width="2",
selectInput('y', 'Type', choices = c("Followers","Following","Likes Given"), selected = "Followers",width = "140px"),
radioButtons("r1", "View Type:",c("Yearly","Monthly"), selected = "Yearly",inline=F),
br(),
br(),
selectInput('z', 'Media Based Proportion', choices = c("Uploads"), selected = "Uploads",width = "200px"),
radioButtons("r2", "View Type:", c("Yearly","Monthly"), selected = "Yearly",inline=F)
),
mainPanel(
plotlyOutput('trendPlot',width="980px",height="330px"),
br(),
br(),
plotlyOutput('propPlot',width="980px",height="330px")
)
) | /ui.R | no_license | yatinkode/My-Instagram-EDA-and-Time-Series-Forecasting | R | false | false | 794 | r | library(shiny)
library(plotly)
ui <- fluidPage(
headerPanel("Instagram Explorer"),
sidebarPanel(width="2",
selectInput('y', 'Type', choices = c("Followers","Following","Likes Given"), selected = "Followers",width = "140px"),
radioButtons("r1", "View Type:",c("Yearly","Monthly"), selected = "Yearly",inline=F),
br(),
br(),
selectInput('z', 'Media Based Proportion', choices = c("Uploads"), selected = "Uploads",width = "200px"),
radioButtons("r2", "View Type:", c("Yearly","Monthly"), selected = "Yearly",inline=F)
),
mainPanel(
plotlyOutput('trendPlot',width="980px",height="330px"),
br(),
br(),
plotlyOutput('propPlot',width="980px",height="330px")
)
) |
#=============================================================================
#_________________________Import Packages_____________________________________
library(shiny)
library(shinydashboard)
library(tidyverse)
library(leaflet)
library(sf)
#=============================================================================
#_________________________Import Dataset______________________________________
Income <- read_csv("Dataset/Income.csv")
solar_permits <- read_csv("Dataset/solar_permits.csv")
tractsf <- read_sf(dsn = "Dataset", layer = "tl_2019_15_tract")
#=============================================================================
#_________________________Cleaning Data_______________________________________
#Filter Honolulu County area only
Oahutract <- filter(tractsf, COUNTYFP == "003")
#Rename ID Geography into GEOID so the data can join the shape file data of Oahu
Rename_Income <- Income %>% rename(GEOID = "ID Geography")
Rename_Income <- mutate(Rename_Income, GEOID = str_remove(Rename_Income$GEOID, "14000US"))
#Combine the two data together
censusInc <- left_join(Oahutract, Rename_Income, by='GEOID')
#Remove the Northwestern Hawaii Island from the Honolulu County
censusInc <- filter(censusInc, NAME != '9812')
#Filter only data from year 2017 & NA
censusInc <- filter(censusInc, Year == 2017 | is.na(Year) == TRUE)
#Remove this census which is the surround body of water around Oahu
censusInc <- filter(censusInc, NAME != '9900.01')
#Rename "Household Income by Race" to household_Inc
censusInc <- censusInc %>% rename(Household_Inc = "Household Income by Race")
#Lets try to remove unnescessary column
censusInc <- select(censusInc, -c("Household Income by Race Moe",
"ID Race",
"ID Year",
"Race",
"FUNCSTAT",
"MTFCC",
"Geography",
"ALAND",
"AWATER"))
#Renaming the variables for longitude & latitude
censusInc <- rename(censusInc, lng = "INTPTLON")
censusInc <- rename(censusInc, lat = "INTPTLAT")
#Turn all na values in Household_Inc to zero
censusInc$Household_Inc[is.na(censusInc$Household_Inc)] = 0
#================================================================================
#__________________________Import Graphic & Icons Here_____________________________
#Import Icon
PV_Icon <- makeIcon(
iconUrl = "Images/pv_icon.png",
iconWidth = 30, iconHeight = 30
)
#================================================================================
#_________________________Color & Aesthetic Variables Here_______________________
# Now redefining pal variable again, but as a colorNumeric function.
Household_Income <- censusInc$Household_Inc
pal <- colorNumeric(palette = "viridis",
domain = Household_Income)
#=============================================================================
#_________________________USER INTERFACE______________________________________
ui <- dashboardPage(title = "Dashboard-test", skin = "green",
dashboardHeader(title = "ECON 256 Project",
dropdownMenu(type = "tasks",
taskItem(
value = 60,
color = "green",
"PV"),
taskItem(
value = 0,
color = "green",
"EV"
),
taskItem(
value = 50,
color = "green",
"Income"
),
taskItem(
value = 0,
color = "green",
"Housing Price"
),
taskItem(
value = 0,
color = "green",
"Electric Usage"
),
taskItem(
value = 0,
color = "green",
"Solar Intensity"
)
)
),
dashboardSidebar(
sidebarMenu(
menuItem("Home Menue", tabName = "Progress"),
menuSubItem("PV", tabName = "PV"),
menuSubItem("EV", tabName = "EV"),
menuSubItem("Income", tabName = "Income"),
menuSubItem("House Price", tabName = "House_Price"),
menuSubItem("Electric Usage", tabName = "Electric_Usage"),
menuSubItem("Solar Intensity", tabName = "Solar_Intensity")
)
),
#The dashboardBody is where we call function we make in the server code black to be shown on the shiny app
#For example: In tabItem, where tabName = PV, I call the function leafletoutput("map"), where "map" is a variable I made in
#the server code block called output$map
#Do the same for all the tabItem
dashboardBody(
tabItems(
tabItem(tabName = "Progress",h1("Progress Report"), fluidPage(includeMarkdown("Progress.Rmd"))),
tabItem(tabName = "PV",h1("PV"), fluidRow(box(title = "PV location on Oahu",
solidHeader = TRUE,
background = "olive",
leafletOutput("map",width = "100%", height=400)))),
tabItem(tabName = "EV",h1("EV")),
tabItem(tabName = "Income",h1("Income"),fluidRow(box(title = "PV location on Oahu",
solidHeader = TRUE,
background = "olive",
leafletOutput("map_inc",height=500)))),
tabItem(tabName = "House_Price",h1("House Price")),
tabItem(tabName = "Electric_Usage",h1("Electric Usage")),
tabItem(tabName = "Solar_Intensity",h1("Solar Intensity"))
)
)
)
#_____________________________________________________________________________
#=============================================================================
#__________________________SERVER FUNCTION_____________________________________
server <- function(input, output) {
#This is a function called 'map', which display the interactive leaflet map for PV location
output$map<-renderLeaflet({
leaflet()%>%
addTiles()%>%
addMarkers(data = solar_permits, lng = ~lng, lat = ~lat, icon = PV_Icon, clusterOptions = markerClusterOptions())
})
#This is the function called 'map_inc' which display the interactive choropleth map for Income
output$map_inc <- renderLeaflet({
censusInc %>%
st_transform(crs = "+init=epsg:4326") %>%
leaflet(width = "100%") %>%
#addProviderTiles(provider = "CartoDB.Positron") %>%
addPolygons(popup = ~ str_extract(NAME, "^([^,]*)"),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal(Household_Income)) %>%
addLegend("topright",
pal = pal,
values = ~ Household_Income,
title = "Household income",
labFormat = labelFormat(prefix = "$"),
opacity = 1) %>%
addTiles()
})
#__________________________________________________________________________________________
#Lenny & Elenor, pls write your function below when you are ready with your data
#Use the form:
#
# output$name_of_your_variable <- type_of_render_you_are_using ({
#
#
# Copy & paste the code that does your plotting inside here
#
#
# })
#________________________________________________________________________________________
#EV code HERE
#House Price HERE
#Electric Usage HERE
#Solar Intensity HERE
#_______________________________________________________________________________________
# Now the function 'name_of_your_variable' can be call into the dashboardbody code block
# An example would be to call the function to where the tabItem of your selected data is
# Ex:
# tabItem(tabName = "House_Price",h1("House Price"), name_of_your_variable)
}
#_____________________________________________________________________________
#=============================================================================
#__________________________RUN THE APPLICATION________________________________
shinyApp(ui = ui, server = server) | /Test-Dashboard/experimental_app.R | no_license | VictorTran808/Econ256-Project | R | false | false | 10,737 | r | #=============================================================================
#_________________________Import Packages_____________________________________
library(shiny)
library(shinydashboard)
library(tidyverse)
library(leaflet)
library(sf)
#=============================================================================
#_________________________Import Dataset______________________________________
Income <- read_csv("Dataset/Income.csv")
solar_permits <- read_csv("Dataset/solar_permits.csv")
tractsf <- read_sf(dsn = "Dataset", layer = "tl_2019_15_tract")
#=============================================================================
#_________________________Cleaning Data_______________________________________
#Filter Honolulu County area only
Oahutract <- filter(tractsf, COUNTYFP == "003")
#Rename ID Geography into GEOID so the data can join the shape file data of Oahu
Rename_Income <- Income %>% rename(GEOID = "ID Geography")
Rename_Income <- mutate(Rename_Income, GEOID = str_remove(Rename_Income$GEOID, "14000US"))
#Combine the two data together
censusInc <- left_join(Oahutract, Rename_Income, by='GEOID')
#Remove the Northwestern Hawaii Island from the Honolulu County
censusInc <- filter(censusInc, NAME != '9812')
#Filter only data from year 2017 & NA
censusInc <- filter(censusInc, Year == 2017 | is.na(Year) == TRUE)
#Remove this census which is the surround body of water around Oahu
censusInc <- filter(censusInc, NAME != '9900.01')
#Rename "Household Income by Race" to household_Inc
censusInc <- censusInc %>% rename(Household_Inc = "Household Income by Race")
#Lets try to remove unnescessary column
censusInc <- select(censusInc, -c("Household Income by Race Moe",
"ID Race",
"ID Year",
"Race",
"FUNCSTAT",
"MTFCC",
"Geography",
"ALAND",
"AWATER"))
#Renaming the variables for longitude & latitude
censusInc <- rename(censusInc, lng = "INTPTLON")
censusInc <- rename(censusInc, lat = "INTPTLAT")
#Turn all na values in Household_Inc to zero
censusInc$Household_Inc[is.na(censusInc$Household_Inc)] = 0
#================================================================================
#__________________________Import Graphic & Icons Here_____________________________
#Import Icon
PV_Icon <- makeIcon(
iconUrl = "Images/pv_icon.png",
iconWidth = 30, iconHeight = 30
)
#================================================================================
#_________________________Color & Aesthetic Variables Here_______________________
# Now redefining pal variable again, but as a colorNumeric function.
Household_Income <- censusInc$Household_Inc
pal <- colorNumeric(palette = "viridis",
domain = Household_Income)
#=============================================================================
#_________________________USER INTERFACE______________________________________
ui <- dashboardPage(title = "Dashboard-test", skin = "green",
dashboardHeader(title = "ECON 256 Project",
dropdownMenu(type = "tasks",
taskItem(
value = 60,
color = "green",
"PV"),
taskItem(
value = 0,
color = "green",
"EV"
),
taskItem(
value = 50,
color = "green",
"Income"
),
taskItem(
value = 0,
color = "green",
"Housing Price"
),
taskItem(
value = 0,
color = "green",
"Electric Usage"
),
taskItem(
value = 0,
color = "green",
"Solar Intensity"
)
)
),
dashboardSidebar(
sidebarMenu(
menuItem("Home Menue", tabName = "Progress"),
menuSubItem("PV", tabName = "PV"),
menuSubItem("EV", tabName = "EV"),
menuSubItem("Income", tabName = "Income"),
menuSubItem("House Price", tabName = "House_Price"),
menuSubItem("Electric Usage", tabName = "Electric_Usage"),
menuSubItem("Solar Intensity", tabName = "Solar_Intensity")
)
),
#The dashboardBody is where we call function we make in the server code black to be shown on the shiny app
#For example: In tabItem, where tabName = PV, I call the function leafletoutput("map"), where "map" is a variable I made in
#the server code block called output$map
#Do the same for all the tabItem
dashboardBody(
tabItems(
tabItem(tabName = "Progress",h1("Progress Report"), fluidPage(includeMarkdown("Progress.Rmd"))),
tabItem(tabName = "PV",h1("PV"), fluidRow(box(title = "PV location on Oahu",
solidHeader = TRUE,
background = "olive",
leafletOutput("map",width = "100%", height=400)))),
tabItem(tabName = "EV",h1("EV")),
tabItem(tabName = "Income",h1("Income"),fluidRow(box(title = "PV location on Oahu",
solidHeader = TRUE,
background = "olive",
leafletOutput("map_inc",height=500)))),
tabItem(tabName = "House_Price",h1("House Price")),
tabItem(tabName = "Electric_Usage",h1("Electric Usage")),
tabItem(tabName = "Solar_Intensity",h1("Solar Intensity"))
)
)
)
#_____________________________________________________________________________
#=============================================================================
#__________________________SERVER FUNCTION_____________________________________
server <- function(input, output) {
#This is a function called 'map', which display the interactive leaflet map for PV location
output$map<-renderLeaflet({
leaflet()%>%
addTiles()%>%
addMarkers(data = solar_permits, lng = ~lng, lat = ~lat, icon = PV_Icon, clusterOptions = markerClusterOptions())
})
#This is the function called 'map_inc' which display the interactive choropleth map for Income
output$map_inc <- renderLeaflet({
censusInc %>%
st_transform(crs = "+init=epsg:4326") %>%
leaflet(width = "100%") %>%
#addProviderTiles(provider = "CartoDB.Positron") %>%
addPolygons(popup = ~ str_extract(NAME, "^([^,]*)"),
stroke = FALSE,
smoothFactor = 0,
fillOpacity = 0.7,
color = ~ pal(Household_Income)) %>%
addLegend("topright",
pal = pal,
values = ~ Household_Income,
title = "Household income",
labFormat = labelFormat(prefix = "$"),
opacity = 1) %>%
addTiles()
})
#__________________________________________________________________________________________
#Lenny & Elenor, pls write your function below when you are ready with your data
#Use the form:
#
# output$name_of_your_variable <- type_of_render_you_are_using ({
#
#
# Copy & paste the code that does your plotting inside here
#
#
# })
#________________________________________________________________________________________
#EV code HERE
#House Price HERE
#Electric Usage HERE
#Solar Intensity HERE
#_______________________________________________________________________________________
# Now the function 'name_of_your_variable' can be call into the dashboardbody code block
# An example would be to call the function to where the tabItem of your selected data is
# Ex:
# tabItem(tabName = "House_Price",h1("House Price"), name_of_your_variable)
}
#_____________________________________________________________________________
#=============================================================================
#__________________________RUN THE APPLICATION________________________________
shinyApp(ui = ui, server = server) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_api_key.R
\name{get.api.key}
\alias{get.api.key}
\title{Get the RIDB API key}
\usage{
get.api.key()
}
\value{
the user's api key
}
\description{
This function returns the user's RIDB API key that was defined with
\code{set.api.key}.
}
| /man/get.api.key.Rd | no_license | jtbradt/ridbAPI | R | false | true | 317 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_api_key.R
\name{get.api.key}
\alias{get.api.key}
\title{Get the RIDB API key}
\usage{
get.api.key()
}
\value{
the user's api key
}
\description{
This function returns the user's RIDB API key that was defined with
\code{set.api.key}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exa.createScript.R
\name{exa.createScript}
\alias{exa.createScript}
\title{Deploys an R function as an UDF in the EXASolution database.}
\usage{
exa.createScript(channel, name, func = NA, env = list(), initCode = NA,
cleanCode = NA, inType = SET, inArgs = list(), outType = EMITS,
outArgs = list(), outputAddress = NA, replaceIfExists = TRUE,
mockOnly = FALSE)
}
\arguments{
\item{channel}{The RODBC connection channel, typically created via
odbcConnect.}
\item{name}{The script will be created in the database with this name.}
\item{func}{The R function to be created as a UDF R script in the database.}
\item{env}{A list of values which will be available in the UDF function under
the same name.
For example if you pass \code{list(a=2)} for this argument, you can access
the value a in the function via \code{env$a}.}
\item{initCode}{This code block will be executed once on each parallel
instance of the database running the script, before the first call of the
function.}
\item{cleanCode}{This code block will be executed once on each parallel
instance of the database running the script, after the function was called
the last time.}
\item{inType}{The input type of the UDF script, either \code{SET} or
\code{SCALAR}.
\code{SET} will call the function once for each group,
\code{SCALAR} will call the function once for each record.
Please read the EXASolution manual about UDF scripts for details.}
\item{inArgs}{Vector of strings specifying the names and types of the input
arguments for the UDF script.
Example: \code{inArgs = c("k INT", "v VARCHAR(10)")}}
\item{outType}{The output type of the UDF script, either \code{EMITS} or
\code{RETURNS}. For \code{EMITS}, the function emits any number of values.
For \code{RETURNS}, the function emits just a single value.}
\item{outArgs}{Vector of strings specifying the names and types of the output
arguments of the UDF script.
Example: \code{inArgs = c("outputstring VARCHAR(10)")}}
\item{outputAddress}{This parameters specifies the address and port of the
optional python output service is listening on. For example:
\code{c("192.168.1.10", 3000)}.Please read the README.txt of this R package
for details.}
\item{replaceIfExists}{Boolean whether the script shall be replaced if it
already exists. Either \code{TRUE} or \code{FALSE}.}
\item{mockOnly}{Boolean, default FALSE. This parameter is useful for
unit-testing if the ODBC connection is not available. Setting mockOnly=TRUE
will not install the UDF function to the EXASOL database.}
}
\value{
This function returns a function that, when called, will execute the
script on the server. With the call you have to specify to which data it
shall be applied. The returned function generates and executes a
\code{SELECT SQL} statement behind the scenes. It has the following
signature:
\code{function(..., table = NA, where = NA, groupBy = NA, restQuery = "",
returnSQL = FALSE, reader = NA, server = NA)}
\item{...}{The first string parameters define the SQL expressions that will
be used as the input for the UDF script. Typically this is one or more
column names as you see in the example below.}
\item{table}{A string with the table name to which the function shall be
applied to. You can specify quoted names the following:
table='myschema."MyQuotedTable"'}
\item{where}{A string with the where clause (SQL) to filter the records.}
\item{groupBy}{A string with the group-by clause (SQL) that will be used to
group the. This is especially important for SET UDF scripts that will be
called once for each group.}
\item{returnSQL}{Boolean value. For TRUE, the autogenerated SQL statement
will be returned, but NOT executed.}
\item{restQuery}{A string with additional SQL code that will be appended at
the end of the autogenerated query, e.g. ORDER BY or HAVING.}
\item{reader}{For internal usage only.}
\item{server}{For internal usage only.}
\item{RETURN VALUE}{The return value of the function is the result of the
SELECT query. The query will be executed internally with the exa.readData
function.}
}
\description{
This function takes an R function and creates a R UDF script on the
EXASolution database. A \code{CREATE SCRIPT} call will be used behind the
scenes. The return value is a function that, when executed, will execute the
script on the database server.
}
\details{
We recommend to read the EXASolution manual about UDF scripts for a better
understanding.
}
\examples{
\dontrun{
# This example creates a simple SET-EMITS script and executes
# it the table footable.
require(RODBC)
require(exasol)
# Connect via RODBC with configured DSN
C <- odbcConnect("exasolution")
# Generate example data frame with two groups
# of random values with different means.
valsMean0 <- rnorm(10, 0)
valsMean50 <- rnorm(10, 50)
twogroups <- data.frame(group = rep(1:2, each = 10),
value = c(valsMean0, valsMean50))
# Write example data to a table
odbcQuery(C, "CREATE SCHEMA test")
odbcQuery(C, "CREATE TABLE test.twogroups (groupid INT, val DOUBLE)")
exa.writeData(C, twogroups, tableName = "test.twogroups")
# Create the R function as an UDF R script in the database
# In our case it computes the mean for each group.
testscript <- exa.createScript(
C,
"test.mymean",
function(data) {
data$next_row(NA); # read all values from this group into a single vector
data$emit(data$groupid[[1]], mean(data$val))
},
inArgs = c( "groupid INT", "val DOUBLE" ),
outArgs = c( "groupid INT", "mean DOUBLE" ) )
# Run the function, grouping by the groupid column
# and aggregating on the "val" column. This returns
# two values which are close to the means of the two groups.
testscript("groupid", "val", table = "test.twogroups" , groupBy = "groupid")
}
}
\author{
EXASOL AG <support@exasol.com>
}
| /man/exa.createScript.Rd | no_license | marcelboldt/r-exasol | R | false | true | 5,903 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exa.createScript.R
\name{exa.createScript}
\alias{exa.createScript}
\title{Deploys an R function as an UDF in the EXASolution database.}
\usage{
exa.createScript(channel, name, func = NA, env = list(), initCode = NA,
cleanCode = NA, inType = SET, inArgs = list(), outType = EMITS,
outArgs = list(), outputAddress = NA, replaceIfExists = TRUE,
mockOnly = FALSE)
}
\arguments{
\item{channel}{The RODBC connection channel, typically created via
odbcConnect.}
\item{name}{The script will be created in the database with this name.}
\item{func}{The R function to be created as a UDF R script in the database.}
\item{env}{A list of values which will be available in the UDF function under
the same name.
For example if you pass \code{list(a=2)} for this argument, you can access
the value a in the function via \code{env$a}.}
\item{initCode}{This code block will be executed once on each parallel
instance of the database running the script, before the first call of the
function.}
\item{cleanCode}{This code block will be executed once on each parallel
instance of the database running the script, after the function was called
the last time.}
\item{inType}{The input type of the UDF script, either \code{SET} or
\code{SCALAR}.
\code{SET} will call the function once for each group,
\code{SCALAR} will call the function once for each record.
Please read the EXASolution manual about UDF scripts for details.}
\item{inArgs}{Vector of strings specifying the names and types of the input
arguments for the UDF script.
Example: \code{inArgs = c("k INT", "v VARCHAR(10)")}}
\item{outType}{The output type of the UDF script, either \code{EMITS} or
\code{RETURNS}. For \code{EMITS}, the function emits any number of values.
For \code{RETURNS}, the function emits just a single value.}
\item{outArgs}{Vector of strings specifying the names and types of the output
arguments of the UDF script.
Example: \code{inArgs = c("outputstring VARCHAR(10)")}}
\item{outputAddress}{This parameters specifies the address and port of the
optional python output service is listening on. For example:
\code{c("192.168.1.10", 3000)}.Please read the README.txt of this R package
for details.}
\item{replaceIfExists}{Boolean whether the script shall be replaced if it
already exists. Either \code{TRUE} or \code{FALSE}.}
\item{mockOnly}{Boolean, default FALSE. This parameter is useful for
unit-testing if the ODBC connection is not available. Setting mockOnly=TRUE
will not install the UDF function to the EXASOL database.}
}
\value{
This function returns a function that, when called, will execute the
script on the server. With the call you have to specify to which data it
shall be applied. The returned function generates and executes a
\code{SELECT SQL} statement behind the scenes. It has the following
signature:
\code{function(..., table = NA, where = NA, groupBy = NA, restQuery = "",
returnSQL = FALSE, reader = NA, server = NA)}
\item{...}{The first string parameters define the SQL expressions that will
be used as the input for the UDF script. Typically this is one or more
column names as you see in the example below.}
\item{table}{A string with the table name to which the function shall be
applied to. You can specify quoted names the following:
table='myschema."MyQuotedTable"'}
\item{where}{A string with the where clause (SQL) to filter the records.}
\item{groupBy}{A string with the group-by clause (SQL) that will be used to
group the. This is especially important for SET UDF scripts that will be
called once for each group.}
\item{returnSQL}{Boolean value. For TRUE, the autogenerated SQL statement
will be returned, but NOT executed.}
\item{restQuery}{A string with additional SQL code that will be appended at
the end of the autogenerated query, e.g. ORDER BY or HAVING.}
\item{reader}{For internal usage only.}
\item{server}{For internal usage only.}
\item{RETURN VALUE}{The return value of the function is the result of the
SELECT query. The query will be executed internally with the exa.readData
function.}
}
\description{
This function takes an R function and creates a R UDF script on the
EXASolution database. A \code{CREATE SCRIPT} call will be used behind the
scenes. The return value is a function that, when executed, will execute the
script on the database server.
}
\details{
We recommend to read the EXASolution manual about UDF scripts for a better
understanding.
}
\examples{
\dontrun{
# This example creates a simple SET-EMITS script and executes
# it the table footable.
require(RODBC)
require(exasol)
# Connect via RODBC with configured DSN
C <- odbcConnect("exasolution")
# Generate example data frame with two groups
# of random values with different means.
valsMean0 <- rnorm(10, 0)
valsMean50 <- rnorm(10, 50)
twogroups <- data.frame(group = rep(1:2, each = 10),
value = c(valsMean0, valsMean50))
# Write example data to a table
odbcQuery(C, "CREATE SCHEMA test")
odbcQuery(C, "CREATE TABLE test.twogroups (groupid INT, val DOUBLE)")
exa.writeData(C, twogroups, tableName = "test.twogroups")
# Create the R function as an UDF R script in the database
# In our case it computes the mean for each group.
testscript <- exa.createScript(
C,
"test.mymean",
function(data) {
data$next_row(NA); # read all values from this group into a single vector
data$emit(data$groupid[[1]], mean(data$val))
},
inArgs = c( "groupid INT", "val DOUBLE" ),
outArgs = c( "groupid INT", "mean DOUBLE" ) )
# Run the function, grouping by the groupid column
# and aggregating on the "val" column. This returns
# two values which are close to the means of the two groups.
testscript("groupid", "val", table = "test.twogroups" , groupBy = "groupid")
}
}
\author{
EXASOL AG <support@exasol.com>
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate.R
\name{json_validator}
\alias{json_validator}
\title{Create a json validator}
\usage{
json_validator(schema, engine = "imjv", reference = NULL, strict = FALSE)
}
\arguments{
\item{schema}{Contents of the json schema, or a filename
containing a schema.}
\item{engine}{Specify the validation engine to use. Options are
"imjv" (the default; which uses "is-my-json-valid") and "ajv"
(Another JSON Schema Validator). The latter supports more
recent json schema features.}
\item{reference}{Reference within schema to use for validating against a
sub-schema instead of the full schema passed in. For example
if the schema has a 'definitions' list including a definition for a
'Hello' object, one could pass "#/definitions/Hello" and the validator
would check that the json is a valid "Hello" object. Only available if
\code{engine = "ajv"}.}
\item{strict}{Set whether the schema should be parsed strictly or not.
If in strict mode schemas will error to "prevent any unexpected
behaviours or silently ignored mistakes in user schema". For example
it will error if encounters unknown formats or unknown keywords. See
https://ajv.js.org/strict-mode.html for details. Only available in
\code{engine = "ajv"}.}
}
\description{
Create a validator that can validate multiple json files.
}
\section{Validation Engines}{
We support two different json validation engines, \code{imjv}
("is-my-json-valid") and \code{ajv} ("Another JSON
Validator"). \code{imjv} was the original validator included in
the package and remains the default for reasons of backward
compatibility. However, users are encouraged to migrate to
\code{ajv} as with it we support many more features, including
nested schemas that span multiple files, meta schema versions
later than draft-04, validating using a subschema, and
validating a subset of an input data object.
If your schema uses these features we will print a message to
screen indicating that you should update when running
interactively. We do not use a warning here as this will be
disruptive to users. You can disable the message by setting the
option \code{jsonvalidate.no_note_imjv} to \code{TRUE}. Consider using
\code{\link[withr:with_options]{withr::with_options()}} (or simply \code{\link[=suppressMessages]{suppressMessages()}}) to
scope this option if you want to quieten it within code you do
not control. Alternatively, setting the option
\code{jsonvalidate.no_note_imjv} to \code{FALSE} will print the message
even noninteractively.
Updating the engine should be simply a case of adding \verb{\{engine = "ajv"} to your \code{json_validator} or \code{json_validate}
calls, but you may see some issues when doing so.
\itemize{
\item Your json now fails validation: We've seen this where schemas
spanned several files and are silently ignored. By including
these, your data may now fail validation and you will need to
either fix the data or the schema.
\item Your code depended on the exact payload returned by \code{imjv}: If
you are inspecting the error result and checking numbers of
errors, or even the columns used to describe the errors, you
will likely need to update your code to accommodate the slightly
different format of \code{ajv}
\item Your schema is simply invalid: If you reference an invalid
metaschema for example, jsonvalidate will fail
}
}
\section{Using multiple files}{
Multiple files are supported. You can have a schema that
references a file \code{child.json} using \code{{"$ref": "child.json"}} -
in this case if \code{child.json} includes an \code{id} or \verb{$id} element
it will be silently dropped and the filename used to reference
the schema will be used as the schema id.
The support is currently quite limited - it will not (yet) read
sub-child schemas relative to child schema \verb{$id} url, and
does not support reading from URLs (only local files are
supported).
}
\examples{
# A simple schema example:
schema <- '{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Product",
"description": "A product from Acme\'s catalog",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for a product",
"type": "integer"
},
"name": {
"description": "Name of the product",
"type": "string"
},
"price": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"tags": {
"type": "array",
"items": {
"type": "string"
},
"minItems": 1,
"uniqueItems": true
}
},
"required": ["id", "name", "price"]
}'
# Create a validator function
v <- jsonvalidate::json_validator(schema)
# Test if some (invalid) json conforms to the schema
v("{}", verbose = TRUE)
# Test if some (valid) json conforms to the schema
v('{
"id": 1,
"name": "A green door",
"price": 12.50,
"tags": ["home", "green"]
}')
# Using features from draft-06 or draft-07 requires the ajv engine:
schema <- "{
'$schema': 'http://json-schema.org/draft-06/schema#',
'type': 'object',
'properties': {
'a': {
'const': 'foo'
}
}
}"
# Create the validator
v <- jsonvalidate::json_validator(schema, engine = "ajv")
# This confirms to the schema
v('{"a": "foo"}')
# But this does not
v('{"a": "bar"}')
}
| /man/json_validator.Rd | permissive | cran/jsonvalidate | R | false | true | 5,453 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validate.R
\name{json_validator}
\alias{json_validator}
\title{Create a json validator}
\usage{
json_validator(schema, engine = "imjv", reference = NULL, strict = FALSE)
}
\arguments{
\item{schema}{Contents of the json schema, or a filename
containing a schema.}
\item{engine}{Specify the validation engine to use. Options are
"imjv" (the default; which uses "is-my-json-valid") and "ajv"
(Another JSON Schema Validator). The latter supports more
recent json schema features.}
\item{reference}{Reference within schema to use for validating against a
sub-schema instead of the full schema passed in. For example
if the schema has a 'definitions' list including a definition for a
'Hello' object, one could pass "#/definitions/Hello" and the validator
would check that the json is a valid "Hello" object. Only available if
\code{engine = "ajv"}.}
\item{strict}{Set whether the schema should be parsed strictly or not.
If in strict mode schemas will error to "prevent any unexpected
behaviours or silently ignored mistakes in user schema". For example
it will error if encounters unknown formats or unknown keywords. See
https://ajv.js.org/strict-mode.html for details. Only available in
\code{engine = "ajv"}.}
}
\description{
Create a validator that can validate multiple json files.
}
\section{Validation Engines}{
We support two different json validation engines, \code{imjv}
("is-my-json-valid") and \code{ajv} ("Another JSON
Validator"). \code{imjv} was the original validator included in
the package and remains the default for reasons of backward
compatibility. However, users are encouraged to migrate to
\code{ajv} as with it we support many more features, including
nested schemas that span multiple files, meta schema versions
later than draft-04, validating using a subschema, and
validating a subset of an input data object.
If your schema uses these features we will print a message to
screen indicating that you should update when running
interactively. We do not use a warning here as this will be
disruptive to users. You can disable the message by setting the
option \code{jsonvalidate.no_note_imjv} to \code{TRUE}. Consider using
\code{\link[withr:with_options]{withr::with_options()}} (or simply \code{\link[=suppressMessages]{suppressMessages()}}) to
scope this option if you want to quieten it within code you do
not control. Alternatively, setting the option
\code{jsonvalidate.no_note_imjv} to \code{FALSE} will print the message
even noninteractively.
Updating the engine should be simply a case of adding \verb{\{engine = "ajv"} to your \code{json_validator} or \code{json_validate}
calls, but you may see some issues when doing so.
\itemize{
\item Your json now fails validation: We've seen this where schemas
spanned several files and are silently ignored. By including
these, your data may now fail validation and you will need to
either fix the data or the schema.
\item Your code depended on the exact payload returned by \code{imjv}: If
you are inspecting the error result and checking numbers of
errors, or even the columns used to describe the errors, you
will likely need to update your code to accommodate the slightly
different format of \code{ajv}
\item Your schema is simply invalid: If you reference an invalid
metaschema for example, jsonvalidate will fail
}
}
\section{Using multiple files}{
Multiple files are supported. You can have a schema that
references a file \code{child.json} using \code{{"$ref": "child.json"}} -
in this case if \code{child.json} includes an \code{id} or \verb{$id} element
it will be silently dropped and the filename used to reference
the schema will be used as the schema id.
The support is currently quite limited - it will not (yet) read
sub-child schemas relative to child schema \verb{$id} url, and
does not support reading from URLs (only local files are
supported).
}
\examples{
# A simple schema example:
schema <- '{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Product",
"description": "A product from Acme\'s catalog",
"type": "object",
"properties": {
"id": {
"description": "The unique identifier for a product",
"type": "integer"
},
"name": {
"description": "Name of the product",
"type": "string"
},
"price": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"tags": {
"type": "array",
"items": {
"type": "string"
},
"minItems": 1,
"uniqueItems": true
}
},
"required": ["id", "name", "price"]
}'
# Create a validator function
v <- jsonvalidate::json_validator(schema)
# Test if some (invalid) json conforms to the schema
v("{}", verbose = TRUE)
# Test if some (valid) json conforms to the schema
v('{
"id": 1,
"name": "A green door",
"price": 12.50,
"tags": ["home", "green"]
}')
# Using features from draft-06 or draft-07 requires the ajv engine:
schema <- "{
'$schema': 'http://json-schema.org/draft-06/schema#',
'type': 'object',
'properties': {
'a': {
'const': 'foo'
}
}
}"
# Create the validator
v <- jsonvalidate::json_validator(schema, engine = "ajv")
# This confirms to the schema
v('{"a": "foo"}')
# But this does not
v('{"a": "bar"}')
}
|
# Functions calculating conditional intensities
#---------------------------------------------------------------------------------
#' Compute conditional intensity of events at time time
#'
#' @param time Current time.
#' @param kernel Function describing the kernel.
#' @param events Vector of event times.
#' @param parameters Parameters of the Hawkes kernel.
#' @return The sum of the kernel evaluated at \code{t} - \code{y}.
#' @examples
#' conditional_intensity(time = 1.5, kernel = ray_kernel, events = c(0.5, 1, 1.3),
#' parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0))
#' @export
conditional_intensity <- function(time, kernel, events, parameters){
# Only select events which have occurred at or before this time
difference_in_times <- time - events
difference_in_times <- difference_in_times[which(difference_in_times > 0)]
return (sum(kernel(difference_in_times, parameters)))
}
#---------------------------------------------------------------------------------
#' Computes conditional intensity for a list of events and times
#'
#' @param times List of current time.
#' @param events Vector of event times.
#' @param kernel Function describing the kernel.
#' @param parameters Parameters of the Hawkes kernel.
#' @return A vector of the sums of the kernel evaluated at \code{t} - \code{y}.
#' @examples
#' conditional_intensity_list(time = c(1, 1.5), kernel = ray_kernel,
#' events = c(0.5, 1, 1.3), parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0))
#' @export
conditional_intensity_list <- function(times, kernel, events, parameters){
difference_matrix <- matrix(rep(times, length(events)), ncol = length(events)) -
t(matrix(rep(events, length(times)), ncol = length(times)))
difference_matrix[difference_matrix <= 0] <- NA
difference_sum <- kernel(difference_matrix, parameters = parameters)
return(rowSums(difference_sum, na.rm = TRUE))
}
#---------------------------------------------------------------------------------
#' Compute lambda_max - maximum intensity at a given time
#'
#' @param time Current time.
#' @param events Vector of event times.
#' @param previous_event_time Time of previous known event.
#' @param T_max Maximum time of simulation.
#' @param kernel Function describing the kernel.
#' @param parameters Parameters of the Hawkes kernel.
#' @param mu_fn Function describing contribution to intensity from exogenous terms.
#' @param mu_fn_diff Differential of the mu function.
#' @param print_level Level of printing to display.
#' @return Maximum value of lambda after an event assuming no more events.
#' @examples
#' max_lambda(time = 2, events = c(0.5, 1, 1.3), previous_event_time = 1.3,
#' T_max = 10, kernel = ray_kernel,
#' parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0))
#' max_lambda(time = 2, events = c(0.5, 1, 1.3), previous_event_time = 1.3,
#' T_max = 10, kernel = ray_kernel,
#' parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0, "A" = 2),
#' mu_fn = mu_constant, mu_fn_diff = mu_diff_constant)
#' @export
max_lambda <- function(time, events, previous_event_time, T_max, kernel,
parameters, mu_fn = mu_none, mu_fn_diff = mu_diff_none,
print_level = 1){
# Works out maximum times of mu
time_maximum_mus <- max_mu(time = time, T_max = T_max,
parameters = parameters,
mu_fn = mu_fn, mu_fn_diff = mu_fn_diff)
if (identical(kernel, ray_kernel)){
# Works out the time after the previous event at which that event is maximum
time_max_previous <- previous_event_time + 1 / sqrt(parameters$delta) + parameters$delay
# Choose maximum between kernel maximum and mu maximum
# Is there a way can shrink the range. Probs maximum one after current one.
max_time_eval <- max(time_maximum_mus, time_max_previous)
# Helper function for working out differential of lambda max at different points
f <- function(time){
# Work out contribution from mu
mu_t <- ifelse(mu_fn_diff(time = time, parameters = parameters) == 0, 0,
mu_fn_diff(t = time, parameters = parameters))
return (mu_t + conditional_intensity_list(times = time, kernel = ray_kernel_diff,
events = events[events <= previous_event_time],
parameters = parameters))
}
# Computes all roots between lower bound of current time and maximum time of previous event
# or maximum time of mu_t.
all_times <- all_roots(f, interval = c(time, max_time_eval))
# Adds option of current time being max
all_times <- c(all_times, time)
} else if (identical(kernel, exp_kernel)){
# Select events that have happened
events_subset <- events[events <= previous_event_time]
# Selects all the peaks after current time
max_times <- events_subset + parameters$delay + 1e-10
all_times <- max_times[max_times >= time]
# Adds current time in case current time is greater plus maximum time from mu
all_times <- unique(c(all_times, time, time_maximum_mus))
} else{
stop("The maximum intensity for this set up is not coded up.")
}
#Computes intensity at all possible roots
# Work out contribution from mu
if (is.null(mu_fn(time, parameters = parameters))){
mu_ts = rep(0, length(all_times))
} else{
mu_ts = mu_fn(all_times, parameters = parameters)
}
all_intensities <- mu_ts +
conditional_intensity_list(times = all_times,
events = events[events <= previous_event_time],
kernel = kernel,
parameters = parameters)
# Find index of maximum intensity
max_idx <- which.max(all_intensities)
print_message(sprintf("Maximum conditional intensity for time %f is %f at time %f",
time, all_intensities[max_idx], all_times[max_idx]),
log_level = 3, print_level = print_level)
return (all_intensities[max_idx])
}
#----------------------------------------------------------------------------------------------
#' Compute time of maximum intensity of mu
#'
#' @param time Current time.
#' @param T_max Maximum time of simulation.
#' @param parameters Parameters of the Hawkes kernel.
#' @param mu_fn Function describing contribution to intensity from exogenous terms.
#' @param mu_fn_diff Differential of the mu function.
#' @return Times of maximum value of mu.
#' @examples
#' max_mu(time = 5, T_max= 10, parameters = list("A" = 1),
#' mu_fn = mu_constant, mu_fn_diff = mu_diff_constant)
#' max_mu(time = 5, T_max= 100, parameters = list("A" = 1, "B" = -1),
#' mu_fn = mu_linear, mu_fn_diff = mu_diff_linear)
#'max_mu(time = 5, T_max= 700, parameters = list("M" = 1, "N" = 1),
#' mu_fn = mu_sinusoidal, mu_fn_diff = mu_diff_sinusoidal)
#' @export
max_mu <- function(time, T_max, parameters,
mu_fn = mu_none, mu_fn_diff = mu_diff_none){
if (!identical(mu_fn_diff, mu_diff_none)){
# Check to see if there are any roots between current time and T_max
roots_mu_diff <- all_roots(mu_fn_diff, interval = c(time, T_max),
parameters = parameters)
# Choose times to evaluate mu at
if (length(roots_mu_diff) == 0){
eval_times <- c(time, T_max)
} else {
eval_times <- c(time, T_max, roots_mu_diff)
}
# Values of mu at times of interest
mu_values <- mu_fn(eval_times, parameters = parameters)
# Indexes of maximum values of mu
max_idxs <- which(round(mu_values, digits = 5) == round(max(mu_values), 5))
# Max times
max_time <- eval_times[max_idxs]
# Checks if valid formulation
if (max_time[1] == T_max){
stop("Check mu term is not increasing - invalid for Hawkes Processes.")
}
# If have no differential set max time to be 0
} else {
max_time <- 0
}
return (max_time)
}
| /R/conditional_intensities.R | permissive | mrc-ide/epihawkes | R | false | false | 8,019 | r | # Functions calculating conditional intensities
#---------------------------------------------------------------------------------
#' Compute conditional intensity of events at time time
#'
#' @param time Current time.
#' @param kernel Function describing the kernel.
#' @param events Vector of event times.
#' @param parameters Parameters of the Hawkes kernel.
#' @return The sum of the kernel evaluated at \code{t} - \code{y}.
#' @examples
#' conditional_intensity(time = 1.5, kernel = ray_kernel, events = c(0.5, 1, 1.3),
#' parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0))
#' @export
conditional_intensity <- function(time, kernel, events, parameters){
# Only select events which have occurred at or before this time
difference_in_times <- time - events
difference_in_times <- difference_in_times[which(difference_in_times > 0)]
return (sum(kernel(difference_in_times, parameters)))
}
#---------------------------------------------------------------------------------
#' Computes conditional intensity for a list of events and times
#'
#' @param times List of current time.
#' @param events Vector of event times.
#' @param kernel Function describing the kernel.
#' @param parameters Parameters of the Hawkes kernel.
#' @return A vector of the sums of the kernel evaluated at \code{t} - \code{y}.
#' @examples
#' conditional_intensity_list(time = c(1, 1.5), kernel = ray_kernel,
#' events = c(0.5, 1, 1.3), parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0))
#' @export
conditional_intensity_list <- function(times, kernel, events, parameters){
difference_matrix <- matrix(rep(times, length(events)), ncol = length(events)) -
t(matrix(rep(events, length(times)), ncol = length(times)))
difference_matrix[difference_matrix <= 0] <- NA
difference_sum <- kernel(difference_matrix, parameters = parameters)
return(rowSums(difference_sum, na.rm = TRUE))
}
#---------------------------------------------------------------------------------
#' Compute lambda_max - maximum intensity at a given time
#'
#' @param time Current time.
#' @param events Vector of event times.
#' @param previous_event_time Time of previous known event.
#' @param T_max Maximum time of simulation.
#' @param kernel Function describing the kernel.
#' @param parameters Parameters of the Hawkes kernel.
#' @param mu_fn Function describing contribution to intensity from exogenous terms.
#' @param mu_fn_diff Differential of the mu function.
#' @param print_level Level of printing to display.
#' @return Maximum value of lambda after an event assuming no more events.
#' @examples
#' max_lambda(time = 2, events = c(0.5, 1, 1.3), previous_event_time = 1.3,
#' T_max = 10, kernel = ray_kernel,
#' parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0))
#' max_lambda(time = 2, events = c(0.5, 1, 1.3), previous_event_time = 1.3,
#' T_max = 10, kernel = ray_kernel,
#' parameters = list("alpha" = 1.0, "delta" = 1.0, "delay" = 0, "A" = 2),
#' mu_fn = mu_constant, mu_fn_diff = mu_diff_constant)
#' @export
max_lambda <- function(time, events, previous_event_time, T_max, kernel,
parameters, mu_fn = mu_none, mu_fn_diff = mu_diff_none,
print_level = 1){
# Works out maximum times of mu
time_maximum_mus <- max_mu(time = time, T_max = T_max,
parameters = parameters,
mu_fn = mu_fn, mu_fn_diff = mu_fn_diff)
if (identical(kernel, ray_kernel)){
# Works out the time after the previous event at which that event is maximum
time_max_previous <- previous_event_time + 1 / sqrt(parameters$delta) + parameters$delay
# Choose maximum between kernel maximum and mu maximum
# Is there a way can shrink the range. Probs maximum one after current one.
max_time_eval <- max(time_maximum_mus, time_max_previous)
# Helper function for working out differential of lambda max at different points
f <- function(time){
# Work out contribution from mu
mu_t <- ifelse(mu_fn_diff(time = time, parameters = parameters) == 0, 0,
mu_fn_diff(t = time, parameters = parameters))
return (mu_t + conditional_intensity_list(times = time, kernel = ray_kernel_diff,
events = events[events <= previous_event_time],
parameters = parameters))
}
# Computes all roots between lower bound of current time and maximum time of previous event
# or maximum time of mu_t.
all_times <- all_roots(f, interval = c(time, max_time_eval))
# Adds option of current time being max
all_times <- c(all_times, time)
} else if (identical(kernel, exp_kernel)){
# Select events that have happened
events_subset <- events[events <= previous_event_time]
# Selects all the peaks after current time
max_times <- events_subset + parameters$delay + 1e-10
all_times <- max_times[max_times >= time]
# Adds current time in case current time is greater plus maximum time from mu
all_times <- unique(c(all_times, time, time_maximum_mus))
} else{
stop("The maximum intensity for this set up is not coded up.")
}
#Computes intensity at all possible roots
# Work out contribution from mu
if (is.null(mu_fn(time, parameters = parameters))){
mu_ts = rep(0, length(all_times))
} else{
mu_ts = mu_fn(all_times, parameters = parameters)
}
all_intensities <- mu_ts +
conditional_intensity_list(times = all_times,
events = events[events <= previous_event_time],
kernel = kernel,
parameters = parameters)
# Find index of maximum intensity
max_idx <- which.max(all_intensities)
print_message(sprintf("Maximum conditional intensity for time %f is %f at time %f",
time, all_intensities[max_idx], all_times[max_idx]),
log_level = 3, print_level = print_level)
return (all_intensities[max_idx])
}
#----------------------------------------------------------------------------------------------
#' Compute time of maximum intensity of mu
#'
#' @param time Current time.
#' @param T_max Maximum time of simulation.
#' @param parameters Parameters of the Hawkes kernel.
#' @param mu_fn Function describing contribution to intensity from exogenous terms.
#' @param mu_fn_diff Differential of the mu function.
#' @return Times of maximum value of mu.
#' @examples
#' max_mu(time = 5, T_max= 10, parameters = list("A" = 1),
#' mu_fn = mu_constant, mu_fn_diff = mu_diff_constant)
#' max_mu(time = 5, T_max= 100, parameters = list("A" = 1, "B" = -1),
#' mu_fn = mu_linear, mu_fn_diff = mu_diff_linear)
#'max_mu(time = 5, T_max= 700, parameters = list("M" = 1, "N" = 1),
#' mu_fn = mu_sinusoidal, mu_fn_diff = mu_diff_sinusoidal)
#' @export
max_mu <- function(time, T_max, parameters,
mu_fn = mu_none, mu_fn_diff = mu_diff_none){
if (!identical(mu_fn_diff, mu_diff_none)){
# Check to see if there are any roots between current time and T_max
roots_mu_diff <- all_roots(mu_fn_diff, interval = c(time, T_max),
parameters = parameters)
# Choose times to evaluate mu at
if (length(roots_mu_diff) == 0){
eval_times <- c(time, T_max)
} else {
eval_times <- c(time, T_max, roots_mu_diff)
}
# Values of mu at times of interest
mu_values <- mu_fn(eval_times, parameters = parameters)
# Indexes of maximum values of mu
max_idxs <- which(round(mu_values, digits = 5) == round(max(mu_values), 5))
# Max times
max_time <- eval_times[max_idxs]
# Checks if valid formulation
if (max_time[1] == T_max){
stop("Check mu term is not increasing - invalid for Hawkes Processes.")
}
# If have no differential set max time to be 0
} else {
max_time <- 0
}
return (max_time)
}
|
# makeCacheMatrix creates list of functions (setMatrix, getMatrix,
# cacheInverse,getInverse), which is really a list
# containing a function to store the matrix and cached value of the inverse
makeCacheMatrix <- function(x = numeric()) {
# setting the initial cache to NULL
cache <- NULL
# set the value of a matrix
setMatrix <- function(newValue)
{
x <<- newValue
# truncating the cache
cache <<- NULL
}
# get the value of a matrix
getMatrix <- function() {x}
# get the cached inverse value
cacheInverse <- function(solve) {cache <<- solve}
# get the cached value
getInverse <- function() {cache}
# return list of functions
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
# this function computes the inverse of the special "matrix" returned
# by the above function (makeCacheMatrix)
cacheSolve <- function(y, ...)
{
# cached value
inverse <- y$getInverse()
# check if value exists
if(!is.null(inverse)) {
message("returning the cached data")
# return the value
return(inverse)
}
# else return the matrix and compute the inverse
# and cache it
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# getting the inverse
inverse
} | /cachematrix.R | no_license | fwaktole/ProgrammingAssignment2 | R | false | false | 1,528 | r | # makeCacheMatrix creates list of functions (setMatrix, getMatrix,
# cacheInverse,getInverse), which is really a list
# containing a function to store the matrix and cached value of the inverse
makeCacheMatrix <- function(x = numeric()) {
# setting the initial cache to NULL
cache <- NULL
# set the value of a matrix
setMatrix <- function(newValue)
{
x <<- newValue
# truncating the cache
cache <<- NULL
}
# get the value of a matrix
getMatrix <- function() {x}
# get the cached inverse value
cacheInverse <- function(solve) {cache <<- solve}
# get the cached value
getInverse <- function() {cache}
# return list of functions
list(setMatrix = setMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
# this function computes the inverse of the special "matrix" returned
# by the above function (makeCacheMatrix)
cacheSolve <- function(y, ...)
{
# cached value
inverse <- y$getInverse()
# check if value exists
if(!is.null(inverse)) {
message("returning the cached data")
# return the value
return(inverse)
}
# else return the matrix and compute the inverse
# and cache it
data <- y$getMatrix()
inverse <- solve(data)
y$cacheInverse(inverse)
# getting the inverse
inverse
} |
library(shiny)
library(rCharts)
library(plyr)
library(dplyr)
library(ggplot2)
library(scales)
library(maptools)
library(grid)
load("data/consolidate_data_clean_app.RData")
theme_null <- function() {
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "none",
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_blank())
}
theme_legend <- function(){
theme(legend.position = "bottom",
text=element_text(colour = "white"),
legend.background = element_rect(fill = "transparent"),
legend.text = element_text(size = 10),
legend.key.size = unit(0.8, "cm"))
}
| /cl-educ/global.r | no_license | personlin/shiny-apps | R | false | false | 951 | r | library(shiny)
library(rCharts)
library(plyr)
library(dplyr)
library(ggplot2)
library(scales)
library(maptools)
library(grid)
load("data/consolidate_data_clean_app.RData")
theme_null <- function() {
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position = "none",
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_blank())
}
theme_legend <- function(){
theme(legend.position = "bottom",
text=element_text(colour = "white"),
legend.background = element_rect(fill = "transparent"),
legend.text = element_text(size = 10),
legend.key.size = unit(0.8, "cm"))
}
|
sim.bPCA <- function(data,
#priors specification
covmat.prior,
covmat.prior.DF,
mu.prior,
mu.prior.cov,
#MCMC specication
n.chains,
n.iter,
n.burnin){
# requirements
require(R2jags)
require(MASS)
require(Matrix)
require(coda)
# dataset dimensions
N = nrow(data)
V = ncol(data)
# defaults
if(missing(covmat.prior)) covmat.prior=as.matrix(Diagonal(V,1/1000))
if(missing(covmat.prior.DF)) covmat.prior.DF=V
if(missing(mu.prior)) mu.prior=rep(0,V)
if(missing(mu.prior.cov)) mu.prior.cov=as.matrix(Diagonal(V,1000))
if(missing(n.chains)) n.chains=3
if(missing(n.iter)) n.iter=5000
if(missing(n.burnin)) n.burnin=4500
# makes precisions from covariances
mu.prior.prec=ginv(mu.prior.cov)
# puts data into list
listdata = list(Y=as.matrix(data),
N=N,
V=V,
covmat.prior=covmat.prior,
mu.prior=mu.prior,
covmat.prior.DF=covmat.prior.DF,
mu.prior.prec=mu.prior.prec)
# defines the model in JAGS language
cat("
model
{
# priors on the vector of multinormal means
mu[1:V] ~ dmnorm(mu.prior[], mu.prior.prec[,])
# priors on the covariance matrix
prec[1:V,1:V] ~ dwish(covmat.prior[,], covmat.prior.DF)
# makes covariance from precision
cov[1:V,1:V] <- inverse(prec[,])
# likelihood
for (i in 1:N)
{
Y[i,1:V] ~ dmnorm(mu[], prec[,])
}
}
", file="PCA.bugs")
# jags model to estimate covariance matrix distribution
pcabay <- jags(data=listdata,
model.file="PCA.bugs",
parameters.to.save=c("cov", "mu"),
n.chains=n.chains,
n.iter=n.iter,
n.burnin=n.burnin,
DIC=FALSE)
return(pcabay)
}
# ------------------------------------------------------------------------------
# CONVERGENCE DIAGNOSTICS
convergenceplots.bPCA <- function(bPCA.fitted)
{
#convergence diagnostics plots of the means mu
plot(as.mcmc(bPCA.fitted))
}
# ------------------------------------------------------------------------------
# BOXPLOTS OF THE "STABILITY" OF THE EIGENVALUES AND EXPLAINED VARIANCES
eigenvalplots.bPCA <- function(bPCA.fitted, data)
{
V <- ncol(data)
sims <- bPCA.fitted$BUGSoutput$sims.matrix
# extracting only the covariances
sims <- sims[,1:(V*V)]
# empty matrix for results
eigen.chains <- matrix(nrow=nrow(sims), ncol=V)
# calculate eigenvalues for each covariance matrix in the chain
for(i in 1:nrow(sims))
{
covm <- matrix(sims[i,], V, V)
eigen.chains[i,] <- eigen(covm)$values
}
# percents of explained variability
exp.vars <- eigen.chains/rowSums(eigen.chains) * 100
# posteriors of eigenvalues as boxplots
par(mfrow=c(1,2))
boxplot(eigen.chains, ylab="Eigenvalue", xlab="PCA axis",
col="grey", outline=FALSE)
boxplot(exp.vars, ylab="Explained variability [% of total]", xlab="PCA axis",
col="grey", outline=FALSE, ylim=c(0,100))
results <- list(Eigenvalues = summary(eigen.chains),
Exp.var = summary(exp.vars))
return(results)
}
# ------------------------------------------------------------------------------
plot.classicPCA <- function(data, axes.to.plot=1:2, scale=1, xlim, ylim)
{
eig=eigen(cov(data))
loadings <- t(t(eig$vectors))*(eig$values^scale)
row.names(loadings) <- names(data)
centered <- scale(data, scale=FALSE)
scores <- centered %*% eig$vectors
scores <- scores[,axes.to.plot]
loadings <- loadings[,axes.to.plot]
biplot(x=scores, y=loadings, main="Classic PCA",
xlab=paste("Comp.", axes.to.plot[1]),
ylab=paste("Comp.", axes.to.plot[2]),
ylim=ylim, xlim=xlim)
abline(h=0, lty=2, col="grey"); abline(v=0, lty=2, col="grey")
}
# ------------------------------------------------------------------------------
# SIMPLE BIPLOTS OF THE BAYESIAN PCA
biplots.bPCA <- function(bPCA.fitted, data, axes.to.plot=1:2, scale=1,xlim,ylim)
{
V = length(data[1,])
summ.stats <- c("2.5%", "50%", "97.5%")
par(mfrow=c(1, length(summ.stats)))
for(summ.stat in summ.stats)
{
covm = matrix(bPCA.fitted$BUGSoutput$summary[1:(V^2), summ.stat], V, V)
mu = bPCA.fitted$BUGSoutput$summary[((V^2)+1):((V^2)+V), summ.stat]
eig=eigen(covm)
loadings <- t(t(eig$vectors)*(eig$values^scale))
row.names(loadings) <- names(data)
centered <- scale(data, center=mu, scale=FALSE)
scores <- centered %*% eig$vectors
biplot(x=scores[,axes.to.plot],
y=loadings[,axes.to.plot],
main=paste(summ.stat, "of Bayesian PCA"),
xlab=paste("Comp.", axes.to.plot[1]),
ylab=paste("Comp.", axes.to.plot[2]))
abline(h=0, lty=2, col="grey"); abline(v=0, lty=2, col="grey")
}
}
# ------------------------------------------------------------------------------
# FAST LOOP-AVOIDING FUNCTION THAT EXTRACTS THE CHAINS OF THE LOADINGS
get.loadings.chain.bPCA <- function(bPCA.fitted, data)
{
sims <- bPCA.fitted$BUGSoutput$sims.matrix
V <- ncol(data)
sims.cov <- sims[,1:(V^2)]
# split by rows (for the lapply function below)
sims.cov <- split(sims.cov, seq(nrow(sims.cov)))
names(sims.cov) <- NULL
# the function that will be used by lapply
load.extract <- function(cov, V, data)
{
covm = matrix(cov, V, V)
loadings <- eigen(covm)$vectors
row.names(loadings) <- names(data)
colnames(loadings) <- paste("Comp.", 1:V, sep="")
return(loadings)
}
# the loop-avoiding lapply that applies the load.extract() to
# each element of the sims.cov list
loadings.chain <- lapply(X=sims.cov, FUN=load.extract, V=V, data=data)
return(loadings.chain)
}
# ------------------------------------------------------------------------------
# FAST FUNCTION THAT SUMMARIZES THE CHAINS FOR THE LOADINGS BY QUANTILES
# AND BY LATTICE HISTOGRAMS
# plots and summarizes loadings
summary.loadings.bPCA <- function(loadings.chain,
vars.to.get,
axes.to.get,
quantiles=c(0.025, 0.5, 0.975))
{
require(reshape)
require(lattice)
V <- nrow(loadings.chain[[1]])
# if vars.to.plot not specified, plotting all (but max 5) variables
if(missing(vars.to.get)) vars.to.get <- 1:min(V, 5)
if(missing(axes.to.get)) axes.to.get <- 1:min(V, 5)
# function trimmer() that deletes undesired vars and axes
trimmer <- function(M, vars.to.get, axes.to.get){
M[vars.to.get, axes.to.get]
}
# apply the trimmer() to the chain of loadings (loop avoidance)
loadings.trimmed <- lapply(loadings.chain,
FUN=trimmer, vars.to.get, axes.to.get)
# "melt" the chain into a data frame
melted <- melt(loadings.trimmed)[,1:3]
# plot using lattice graphics
print(histogram(~ value | X2 * X1, data=melted,
panel = function(x, ...){
panel.histogram(x, ...)
}))
# summarizing the data for output using quantiles
output <- vector(mode="list", length=length(quantiles))
names(output) <- as.character(quantiles)
for(i in 1:length(quantiles))
{
output[[i]] <- apply(simplify2array(loadings.trimmed), 1:2,
FUN=quantile,
probs=quantiles[i])
}
return(output)
}
# ------------------------------------------------------------------------------
# FAST LOOP-AVOIDING FUNCTION THAT EXTRACTS THE CHAINS FOR THE SCORES
get.scores.chain.bPCA <- function(bPCA.fitted, data)
{
sims <- bPCA.fitted$BUGSoutput$sims.matrix
V = ncol(data)
# split by rows (for the lapply function below)
sims <- split(sims, seq(nrow(sims)))
names(sims) <- NULL
# the function that will be used by lapply
scores.extract <- function(sim, V, data)
{
covm = matrix(sim[1:(V^2)], V, V)
mus <- sim[((V^2)+1):((V^2)+V)]
centered = scale(data, center=mus, scale=FALSE)
loadings = eigen(covm)$vectors
scores = centered %*% loadings
row.names(scores) <- row.names(data)
colnames(scores) <- paste("Comp.", 1:V, sep="")
return(scores)
}
scores.chain <- lapply(X=sims, FUN=scores.extract, V=V, data=data)
return(scores.chain)
}
# ------------------------------------------------------------------------------
# THIS FUNCTION SUMMARIZES THE CHAIN OF THE SCORES BY QUANTILES,
# NOTHING IS PLOTTED.
summary.scores.bPCA <- function(scores.chain, axes.to.get,
quantiles=c(0.025, 0.5, 0.975))
{
# function trimmer2() that deletes undesired columns (axes) in a matrix:
trimmer2 <- function(M, axes.to.get){
M2 <- as.matrix(M[, axes.to.get])
colnames(M2) <- paste("Comp.", axes.to.get, sep="")
return(M2)
}
# apply the trimmer() to the chain of loadings (loop avoidance)
scores.trimmed <- lapply(scores.chain,
FUN=trimmer2, axes.to.get)
#summarizing the data for output using quantiles
output <- vector(mode="list", length=length(quantiles))
names(output) <- as.character(quantiles)
for(i in 1:length(quantiles))
{
# calculating the quantile - again, loop avoidance
output[[i]] <- apply(simplify2array(scores.trimmed), 1:2,
FUN=quantile,
probs=quantiles[i])
}
return(output)
}
# summarizing the first axis
| /R/bPCA_functions.r | no_license | petrkeil/bPCA | R | false | false | 9,687 | r |
sim.bPCA <- function(data,
#priors specification
covmat.prior,
covmat.prior.DF,
mu.prior,
mu.prior.cov,
#MCMC specication
n.chains,
n.iter,
n.burnin){
# requirements
require(R2jags)
require(MASS)
require(Matrix)
require(coda)
# dataset dimensions
N = nrow(data)
V = ncol(data)
# defaults
if(missing(covmat.prior)) covmat.prior=as.matrix(Diagonal(V,1/1000))
if(missing(covmat.prior.DF)) covmat.prior.DF=V
if(missing(mu.prior)) mu.prior=rep(0,V)
if(missing(mu.prior.cov)) mu.prior.cov=as.matrix(Diagonal(V,1000))
if(missing(n.chains)) n.chains=3
if(missing(n.iter)) n.iter=5000
if(missing(n.burnin)) n.burnin=4500
# makes precisions from covariances
mu.prior.prec=ginv(mu.prior.cov)
# puts data into list
listdata = list(Y=as.matrix(data),
N=N,
V=V,
covmat.prior=covmat.prior,
mu.prior=mu.prior,
covmat.prior.DF=covmat.prior.DF,
mu.prior.prec=mu.prior.prec)
# defines the model in JAGS language
cat("
model
{
# priors on the vector of multinormal means
mu[1:V] ~ dmnorm(mu.prior[], mu.prior.prec[,])
# priors on the covariance matrix
prec[1:V,1:V] ~ dwish(covmat.prior[,], covmat.prior.DF)
# makes covariance from precision
cov[1:V,1:V] <- inverse(prec[,])
# likelihood
for (i in 1:N)
{
Y[i,1:V] ~ dmnorm(mu[], prec[,])
}
}
", file="PCA.bugs")
# jags model to estimate covariance matrix distribution
pcabay <- jags(data=listdata,
model.file="PCA.bugs",
parameters.to.save=c("cov", "mu"),
n.chains=n.chains,
n.iter=n.iter,
n.burnin=n.burnin,
DIC=FALSE)
return(pcabay)
}
# ------------------------------------------------------------------------------
# CONVERGENCE DIAGNOSTICS
convergenceplots.bPCA <- function(bPCA.fitted)
{
#convergence diagnostics plots of the means mu
plot(as.mcmc(bPCA.fitted))
}
# ------------------------------------------------------------------------------
# BOXPLOTS OF THE "STABILITY" OF THE EIGENVALUES AND EXPLAINED VARIANCES
eigenvalplots.bPCA <- function(bPCA.fitted, data)
{
V <- ncol(data)
sims <- bPCA.fitted$BUGSoutput$sims.matrix
# extracting only the covariances
sims <- sims[,1:(V*V)]
# empty matrix for results
eigen.chains <- matrix(nrow=nrow(sims), ncol=V)
# calculate eigenvalues for each covariance matrix in the chain
for(i in 1:nrow(sims))
{
covm <- matrix(sims[i,], V, V)
eigen.chains[i,] <- eigen(covm)$values
}
# percents of explained variability
exp.vars <- eigen.chains/rowSums(eigen.chains) * 100
# posteriors of eigenvalues as boxplots
par(mfrow=c(1,2))
boxplot(eigen.chains, ylab="Eigenvalue", xlab="PCA axis",
col="grey", outline=FALSE)
boxplot(exp.vars, ylab="Explained variability [% of total]", xlab="PCA axis",
col="grey", outline=FALSE, ylim=c(0,100))
results <- list(Eigenvalues = summary(eigen.chains),
Exp.var = summary(exp.vars))
return(results)
}
# ------------------------------------------------------------------------------
plot.classicPCA <- function(data, axes.to.plot=1:2, scale=1, xlim, ylim)
{
eig=eigen(cov(data))
loadings <- t(t(eig$vectors))*(eig$values^scale)
row.names(loadings) <- names(data)
centered <- scale(data, scale=FALSE)
scores <- centered %*% eig$vectors
scores <- scores[,axes.to.plot]
loadings <- loadings[,axes.to.plot]
biplot(x=scores, y=loadings, main="Classic PCA",
xlab=paste("Comp.", axes.to.plot[1]),
ylab=paste("Comp.", axes.to.plot[2]),
ylim=ylim, xlim=xlim)
abline(h=0, lty=2, col="grey"); abline(v=0, lty=2, col="grey")
}
# ------------------------------------------------------------------------------
# SIMPLE BIPLOTS OF THE BAYESIAN PCA
biplots.bPCA <- function(bPCA.fitted, data, axes.to.plot=1:2, scale=1,xlim,ylim)
{
V = length(data[1,])
summ.stats <- c("2.5%", "50%", "97.5%")
par(mfrow=c(1, length(summ.stats)))
for(summ.stat in summ.stats)
{
covm = matrix(bPCA.fitted$BUGSoutput$summary[1:(V^2), summ.stat], V, V)
mu = bPCA.fitted$BUGSoutput$summary[((V^2)+1):((V^2)+V), summ.stat]
eig=eigen(covm)
loadings <- t(t(eig$vectors)*(eig$values^scale))
row.names(loadings) <- names(data)
centered <- scale(data, center=mu, scale=FALSE)
scores <- centered %*% eig$vectors
biplot(x=scores[,axes.to.plot],
y=loadings[,axes.to.plot],
main=paste(summ.stat, "of Bayesian PCA"),
xlab=paste("Comp.", axes.to.plot[1]),
ylab=paste("Comp.", axes.to.plot[2]))
abline(h=0, lty=2, col="grey"); abline(v=0, lty=2, col="grey")
}
}
# ------------------------------------------------------------------------------
# FAST LOOP-AVOIDING FUNCTION THAT EXTRACTS THE CHAINS OF THE LOADINGS
get.loadings.chain.bPCA <- function(bPCA.fitted, data)
{
sims <- bPCA.fitted$BUGSoutput$sims.matrix
V <- ncol(data)
sims.cov <- sims[,1:(V^2)]
# split by rows (for the lapply function below)
sims.cov <- split(sims.cov, seq(nrow(sims.cov)))
names(sims.cov) <- NULL
# the function that will be used by lapply
load.extract <- function(cov, V, data)
{
covm = matrix(cov, V, V)
loadings <- eigen(covm)$vectors
row.names(loadings) <- names(data)
colnames(loadings) <- paste("Comp.", 1:V, sep="")
return(loadings)
}
# the loop-avoiding lapply that applies the load.extract() to
# each element of the sims.cov list
loadings.chain <- lapply(X=sims.cov, FUN=load.extract, V=V, data=data)
return(loadings.chain)
}
# ------------------------------------------------------------------------------
# FAST FUNCTION THAT SUMMARIZES THE CHAINS FOR THE LOADINGS BY QUANTILES
# AND BY LATTICE HISTOGRAMS
# plots and summarizes loadings
summary.loadings.bPCA <- function(loadings.chain,
vars.to.get,
axes.to.get,
quantiles=c(0.025, 0.5, 0.975))
{
require(reshape)
require(lattice)
V <- nrow(loadings.chain[[1]])
# if vars.to.plot not specified, plotting all (but max 5) variables
if(missing(vars.to.get)) vars.to.get <- 1:min(V, 5)
if(missing(axes.to.get)) axes.to.get <- 1:min(V, 5)
# function trimmer() that deletes undesired vars and axes
trimmer <- function(M, vars.to.get, axes.to.get){
M[vars.to.get, axes.to.get]
}
# apply the trimmer() to the chain of loadings (loop avoidance)
loadings.trimmed <- lapply(loadings.chain,
FUN=trimmer, vars.to.get, axes.to.get)
# "melt" the chain into a data frame
melted <- melt(loadings.trimmed)[,1:3]
# plot using lattice graphics
print(histogram(~ value | X2 * X1, data=melted,
panel = function(x, ...){
panel.histogram(x, ...)
}))
# summarizing the data for output using quantiles
output <- vector(mode="list", length=length(quantiles))
names(output) <- as.character(quantiles)
for(i in 1:length(quantiles))
{
output[[i]] <- apply(simplify2array(loadings.trimmed), 1:2,
FUN=quantile,
probs=quantiles[i])
}
return(output)
}
# ------------------------------------------------------------------------------
# FAST LOOP-AVOIDING FUNCTION THAT EXTRACTS THE CHAINS FOR THE SCORES
get.scores.chain.bPCA <- function(bPCA.fitted, data)
{
sims <- bPCA.fitted$BUGSoutput$sims.matrix
V = ncol(data)
# split by rows (for the lapply function below)
sims <- split(sims, seq(nrow(sims)))
names(sims) <- NULL
# the function that will be used by lapply
scores.extract <- function(sim, V, data)
{
covm = matrix(sim[1:(V^2)], V, V)
mus <- sim[((V^2)+1):((V^2)+V)]
centered = scale(data, center=mus, scale=FALSE)
loadings = eigen(covm)$vectors
scores = centered %*% loadings
row.names(scores) <- row.names(data)
colnames(scores) <- paste("Comp.", 1:V, sep="")
return(scores)
}
scores.chain <- lapply(X=sims, FUN=scores.extract, V=V, data=data)
return(scores.chain)
}
# ------------------------------------------------------------------------------
# THIS FUNCTION SUMMARIZES THE CHAIN OF THE SCORES BY QUANTILES,
# NOTHING IS PLOTTED.
summary.scores.bPCA <- function(scores.chain, axes.to.get,
quantiles=c(0.025, 0.5, 0.975))
{
# function trimmer2() that deletes undesired columns (axes) in a matrix:
trimmer2 <- function(M, axes.to.get){
M2 <- as.matrix(M[, axes.to.get])
colnames(M2) <- paste("Comp.", axes.to.get, sep="")
return(M2)
}
# apply the trimmer() to the chain of loadings (loop avoidance)
scores.trimmed <- lapply(scores.chain,
FUN=trimmer2, axes.to.get)
#summarizing the data for output using quantiles
output <- vector(mode="list", length=length(quantiles))
names(output) <- as.character(quantiles)
for(i in 1:length(quantiles))
{
# calculating the quantile - again, loop avoidance
output[[i]] <- apply(simplify2array(scores.trimmed), 1:2,
FUN=quantile,
probs=quantiles[i])
}
return(output)
}
# summarizing the first axis
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/difORD.R
\name{coef.difORD}
\alias{coef.difORD}
\alias{coefficients.difORD}
\title{Extract model coefficients from an object of \code{"difORD"} class.}
\usage{
\method{coef}{difORD}(object, SE = FALSE, simplify = FALSE, IRTpars = TRUE, CI = 0.95, ...)
}
\arguments{
\item{object}{an object of \code{"difORD"} class.}
\item{SE}{logical: should the standard errors of estimated
parameters be also returned? (default is \code{FALSE}).}
\item{simplify}{logical: should the estimated parameters be
simplified to a matrix? (default is \code{FALSE}).}
\item{IRTpars}{logical: should the estimated parameters be returned
in IRT parameterization? (default is \code{TRUE}).}
\item{CI}{numeric: level of confidence interval for parameters,
default is \code{0.95} for 95\% confidence interval.}
\item{...}{other generic parameters for \code{coef()} function.}
}
\description{
S3 method for extracting estimated model coefficients
from an object of \code{"difORD"} class.
}
\examples{
\dontrun{
# loading data
data(Anxiety, package = "ShinyItemAnalysis")
Data <- Anxiety[, paste0("R", 1:29)] # items
group <- Anxiety[, "gender"] # group membership variable
# testing both DIF effects with adjacent category logit model
(x <- difORD(Data, group, focal.name = 1, model = "adjacent"))
# estimated parameters
coef(x)
# includes standard errors
coef(x, SE = TRUE)
# includes standard errors and simplifies to matrix
coef(x, SE = TRUE, simplify = TRUE)
# intercept-slope parameterization
coef(x, IRTpars = FALSE)
# intercept-slope parameterization, simplifies to matrix, turn off confidence intervals
coef(x, IRTpars = FALSE, simplify = TRUE, CI = 0)
}
}
\seealso{
\code{\link[difNLR]{difORD}} for DIF detection among ordinal data. \cr
\code{\link[stats]{coef}} for generic function extracting model coefficients.
}
\author{
Adela Hladka (nee Drabinova) \cr
Institute of Computer Science of the Czech Academy of Sciences \cr
Faculty of Mathematics and Physics, Charles University \cr
\email{hladka@cs.cas.cz} \cr
Patricia Martinkova \cr
Institute of Computer Science of the Czech Academy of Sciences \cr
\email{martinkova@cs.cas.cz} \cr
}
| /man/coef.difORD.Rd | no_license | cran/difNLR | R | false | true | 2,274 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/difORD.R
\name{coef.difORD}
\alias{coef.difORD}
\alias{coefficients.difORD}
\title{Extract model coefficients from an object of \code{"difORD"} class.}
\usage{
\method{coef}{difORD}(object, SE = FALSE, simplify = FALSE, IRTpars = TRUE, CI = 0.95, ...)
}
\arguments{
\item{object}{an object of \code{"difORD"} class.}
\item{SE}{logical: should the standard errors of estimated
parameters be also returned? (default is \code{FALSE}).}
\item{simplify}{logical: should the estimated parameters be
simplified to a matrix? (default is \code{FALSE}).}
\item{IRTpars}{logical: should the estimated parameters be returned
in IRT parameterization? (default is \code{TRUE}).}
\item{CI}{numeric: level of confidence interval for parameters,
default is \code{0.95} for 95\% confidence interval.}
\item{...}{other generic parameters for \code{coef()} function.}
}
\description{
S3 method for extracting estimated model coefficients
from an object of \code{"difORD"} class.
}
\examples{
\dontrun{
# loading data
data(Anxiety, package = "ShinyItemAnalysis")
Data <- Anxiety[, paste0("R", 1:29)] # items
group <- Anxiety[, "gender"] # group membership variable
# testing both DIF effects with adjacent category logit model
(x <- difORD(Data, group, focal.name = 1, model = "adjacent"))
# estimated parameters
coef(x)
# includes standard errors
coef(x, SE = TRUE)
# includes standard errors and simplifies to matrix
coef(x, SE = TRUE, simplify = TRUE)
# intercept-slope parameterization
coef(x, IRTpars = FALSE)
# intercept-slope parameterization, simplifies to matrix, turn off confidence intervals
coef(x, IRTpars = FALSE, simplify = TRUE, CI = 0)
}
}
\seealso{
\code{\link[difNLR]{difORD}} for DIF detection among ordinal data. \cr
\code{\link[stats]{coef}} for generic function extracting model coefficients.
}
\author{
Adela Hladka (nee Drabinova) \cr
Institute of Computer Science of the Czech Academy of Sciences \cr
Faculty of Mathematics and Physics, Charles University \cr
\email{hladka@cs.cas.cz} \cr
Patricia Martinkova \cr
Institute of Computer Science of the Czech Academy of Sciences \cr
\email{martinkova@cs.cas.cz} \cr
}
|
\name{boundsdata}
\docType{data}
\alias{boundsdata}
\title{Example Data for the Design Functions}
\description{
A random subsample of the simulated data used in Imai, Tingley, Yamamoto (2012). The data contains 1000 rows and 7 columns with no missing values.
}
\usage{boundsdata}
\format{A data frame containing the following variables, which are interpreted as results from a hypothetical randomized trial. See the source for a full description.
\describe{
\item{out:}{ The binary outcome variable under the parallel design.}
\item{out.enc:}{ The binary outcome variable under the parallel encouragement design.}
\item{med:}{ The binary mediator under the parallel design.}
\item{med.enc:}{ The binary mediator under the parallel encouragement design.}
\item{ttt:}{ The binary treatment variable.}
\item{manip:}{ The design indicator, or the variable indicating whether the mediator is manipulated under the parallel design.}
\item{enc:}{ The trichotomous encouragement variable under the parallel encouragement design. Equals 0 if subject received no encouragement; 1 if encouraged for the mediator value of 1; and -1 if encouraged for the mediator value of 0.}
}
}
\details{
Conditioning on 'manip' = 0 will simulate a randomized trial under the single experiment design, where 'out' and 'med' equal observed outcome and mediator values, respectively.
Unconditionally, using 'out', 'med', 'ttt' and 'manip' will simulate an experiment under the parallel design.
The 'out.enc' and 'med.enc' variables represent the outcome and mediator values observed when subjects received the encouragement indicated in 'enc'. Therefore, using 'out.enc', 'med.enc', 'ttt' and 'enc' will simulate an experiment under the parallel encouragement design.
Note that all the observed responses are generated from an underlying distribution of potential outcomes and mediators (not shown in this dataset) satisfying the assumptions described in Imai, Tingley and Yamamoto (2012). The full simulation code is available as a companion replication archive for the article.
}
\source{
Imai, K., Tingley, D. and Yamamoto, T. (2012) Experimental Designs for Identifying Causal Mechanisms. Journal of the Royal Statistical Society, Series A (Statistics in Society)
}
\keyword{datasets}
| /man/boundsdata.Rd | no_license | InterestingProgarmsInHealthScience/mediation | R | false | false | 2,334 | rd | \name{boundsdata}
\docType{data}
\alias{boundsdata}
\title{Example Data for the Design Functions}
\description{
A random subsample of the simulated data used in Imai, Tingley, Yamamoto (2012). The data contains 1000 rows and 7 columns with no missing values.
}
\usage{boundsdata}
\format{A data frame containing the following variables, which are interpreted as results from a hypothetical randomized trial. See the source for a full description.
\describe{
\item{out:}{ The binary outcome variable under the parallel design.}
\item{out.enc:}{ The binary outcome variable under the parallel encouragement design.}
\item{med:}{ The binary mediator under the parallel design.}
\item{med.enc:}{ The binary mediator under the parallel encouragement design.}
\item{ttt:}{ The binary treatment variable.}
\item{manip:}{ The design indicator, or the variable indicating whether the mediator is manipulated under the parallel design.}
\item{enc:}{ The trichotomous encouragement variable under the parallel encouragement design. Equals 0 if subject received no encouragement; 1 if encouraged for the mediator value of 1; and -1 if encouraged for the mediator value of 0.}
}
}
\details{
Conditioning on 'manip' = 0 will simulate a randomized trial under the single experiment design, where 'out' and 'med' equal observed outcome and mediator values, respectively.
Unconditionally, using 'out', 'med', 'ttt' and 'manip' will simulate an experiment under the parallel design.
The 'out.enc' and 'med.enc' variables represent the outcome and mediator values observed when subjects received the encouragement indicated in 'enc'. Therefore, using 'out.enc', 'med.enc', 'ttt' and 'enc' will simulate an experiment under the parallel encouragement design.
Note that all the observed responses are generated from an underlying distribution of potential outcomes and mediators (not shown in this dataset) satisfying the assumptions described in Imai, Tingley and Yamamoto (2012). The full simulation code is available as a companion replication archive for the article.
}
\source{
Imai, K., Tingley, D. and Yamamoto, T. (2012) Experimental Designs for Identifying Causal Mechanisms. Journal of the Royal Statistical Society, Series A (Statistics in Society)
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2017 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Installs R packages.
#
# The script is called with one or more arguments, where each argument is a
# package name to install.
# Get only the trailing command-line arguments:
args <- commandArgs( trailingOnly = TRUE );
# Check that at least one package name has been provided...
n <- length( args );
if ( n == 0 ) {
stop( "Must provide at least one package to install.", call. = FALSE );
}
# Install each package...
for ( i in 1:n ) {
install.packages( args[ i ], repos = "http://lib.stat.cmu.edu/R/CRAN/" );
}
| /tools/scripts/install_r_pkgs.R | permissive | stdlib-js/stdlib | R | false | false | 1,152 | r | #!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2017 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Installs R packages.
#
# The script is called with one or more arguments, where each argument is a
# package name to install.
# Get only the trailing command-line arguments:
args <- commandArgs( trailingOnly = TRUE );
# Check that at least one package name has been provided...
n <- length( args );
if ( n == 0 ) {
stop( "Must provide at least one package to install.", call. = FALSE );
}
# Install each package...
for ( i in 1:n ) {
install.packages( args[ i ], repos = "http://lib.stat.cmu.edu/R/CRAN/" );
}
|
if (requireNamespace("margins")) {
context("sim_margins lm")
states <- as.data.frame(state.x77)
states$HSGrad <- states$`HS Grad`
states$o70 <- 0
states$o70[states$`Life Exp` > 70] <- 1
states$o70n <- states$o70
states$o70 <- factor(states$o70)
states$o70l <- states$`Life Exp` > 70
states$o70c <- ifelse(states$o70l, yes = "yes", no = "no")
set.seed(3)
states$wts <- runif(50, 0, 3)
fit <- lm(Income ~ HSGrad*Murder*Illiteracy + o70 + Area, data = states)
fit2 <- lm(Income ~ HSGrad*o70, data = states)
fit2n <- lm(Income ~ HSGrad*o70n, data = states)
fitw <- lm(Income ~ HSGrad*Murder*Illiteracy + o70 + Area, data = states,
weights = wts)
fitl <- lm(Income ~ HSGrad*o70l, data = states)
fitc <- lm(Income ~ HSGrad*Murder + o70c, data = states)
if (requireNamespace("survey")) {
suppressMessages(library(survey, quietly = TRUE))
data(api)
dstrat <- svydesign(id = ~1, strata = ~stype, weights = ~pw, data = apistrat,
fpc = ~fpc)
regmodel <- svyglm(api00 ~ ell * meals * both + sch.wide, design = dstrat)
}
test_that("sim_margins works for lm", {
expect_s3_class(sim_margins(model = fit,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad), "sim_margins")
expect_s3_class(sim_margins(model = fit,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "bootstrap",
iterations = 50), "sim_margins")
expect_s3_class(sim_margins(model = fit,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "simulation",
iterations = 50), "sim_margins")
})
test_that("sim_margins works for weighted lm", {
expect_s3_class(sim_margins(model = fitw,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
modx.values = c(1.0, 1.5, 2.0)), class = "sim_margins")
expect_s3_class(sim_margins(model = fitw,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "bootstrap",
modx.values = c(1.0, 1.5, 2.0),
iterations = 50), class = "sim_margins")
expect_s3_class(sim_margins(model = fitw,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "simulation",
modx.values = c(1.0, 1.5, 2.0),
iterations = 50), class = "sim_margins")
})
test_that("sim_margins works for lm w/ logical", {
expect_s3_class(sim_margins(model = fitl,
pred = HSGrad,
modx = o70l), "sim_margins")
expect_s3_class(sim_margins(model = fitl,
pred = HSGrad,
modx = o70l,
vce = "bootstrap",
iterations = 50), "sim_margins")
expect_s3_class(sim_margins(model = fitl,
pred = HSGrad,
modx = o70l,
vce = "simulation",
iterations = 50), "sim_margins")
})
test_that("sim_margins works for lm w/ non-focal character", {
expect_s3_class(sim_margins(model = fitc,
pred = HSGrad,
modx = Murder), "sim_margins")
expect_s3_class(sim_margins(model = fitc,
pred = HSGrad,
modx = Murder,
vce = "bootstrap",
iterations = 50), "sim_margins")
expect_s3_class(sim_margins(model = fitc,
pred = HSGrad,
modx = Murder,
vce = "simulation",
iterations = 50), "sim_margins")
})
context("sim_margins methods")
if (requireNamespace("huxtable") && requireNamespace("broom")) {
test_that("as_huxtable.sim_margins works", {
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy)
expect_is(as_huxtable.sim_margins(ss3), "huxtable")
expect_is(as_huxtable.sim_margins(ss), "huxtable")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "bootstrap", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "bootstrap", iterations = 50)
expect_is(as_huxtable.sim_margins(ss3), "huxtable")
expect_is(as_huxtable.sim_margins(ss), "huxtable")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "simulation", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "simulation", iterations = 50)
expect_is(as_huxtable.sim_margins(ss3), "huxtable")
expect_is(as_huxtable.sim_margins(ss), "huxtable")
})
}
if (requireNamespace("ggstance") && requireNamespace("broom")) {
test_that("plot.sim_margins works", {
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy)
expect_is(plot(ss3), "ggplot")
expect_is(plot(ss), "ggplot")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "bootstrap", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "bootstrap", iterations = 50)
expect_is(plot(ss3), "ggplot")
expect_is(plot(ss), "ggplot")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "simulation", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "simulation", iterations = 50)
expect_is(plot(ss3), "ggplot")
expect_is(plot(ss), "ggplot")
})
}
context("sim_margins svyglm")
if (requireNamespace("survey")) {
test_that("sim_margins works for svyglm", {
expect_is(sim_margins(regmodel, pred = ell, modx = meals, mod2 = both),
"sim_margins")
# margins bug
# expect_is(sim_margins(regmodel, pred = ell, modx = meals, mod2 = both,
# vce = "bootstrap", iterations = 50),
# "sim_margins")
# expect_is(sim_margins(regmodel, pred = ell, modx = meals, mod2 = both,
# vce = "simulation", iterations = 50),
# "sim_margins")
})
}
context("sim_margins merMod")
if (requireNamespace("lme4")) {
library(lme4, quietly = TRUE)
data(VerbAgg)
fmVA0 <- glmer(r2 ~ Anger * Gender + btype + situ + (1|id) + (1|item),
family = binomial, data = VerbAgg, nAGQ=0L)
lmVA0 <- lmer(as.numeric(r2 == "Y") ~ Anger * Gender + btype + situ +
(1|id) + (1|item), data = VerbAgg)
test_that("sim_margins works for lme4", {
expect_is(sim_margins(lmVA0, pred = Anger, modx = Gender), "sim_margins")
expect_is(sim_margins(fmVA0, pred = Anger, modx = Gender), "sim_margins")
})
}
}
| /tests/testthat/test_sim_margins.R | permissive | jacob-long/interactions | R | false | false | 7,740 | r | if (requireNamespace("margins")) {
context("sim_margins lm")
states <- as.data.frame(state.x77)
states$HSGrad <- states$`HS Grad`
states$o70 <- 0
states$o70[states$`Life Exp` > 70] <- 1
states$o70n <- states$o70
states$o70 <- factor(states$o70)
states$o70l <- states$`Life Exp` > 70
states$o70c <- ifelse(states$o70l, yes = "yes", no = "no")
set.seed(3)
states$wts <- runif(50, 0, 3)
fit <- lm(Income ~ HSGrad*Murder*Illiteracy + o70 + Area, data = states)
fit2 <- lm(Income ~ HSGrad*o70, data = states)
fit2n <- lm(Income ~ HSGrad*o70n, data = states)
fitw <- lm(Income ~ HSGrad*Murder*Illiteracy + o70 + Area, data = states,
weights = wts)
fitl <- lm(Income ~ HSGrad*o70l, data = states)
fitc <- lm(Income ~ HSGrad*Murder + o70c, data = states)
if (requireNamespace("survey")) {
suppressMessages(library(survey, quietly = TRUE))
data(api)
dstrat <- svydesign(id = ~1, strata = ~stype, weights = ~pw, data = apistrat,
fpc = ~fpc)
regmodel <- svyglm(api00 ~ ell * meals * both + sch.wide, design = dstrat)
}
test_that("sim_margins works for lm", {
expect_s3_class(sim_margins(model = fit,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad), "sim_margins")
expect_s3_class(sim_margins(model = fit,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "bootstrap",
iterations = 50), "sim_margins")
expect_s3_class(sim_margins(model = fit,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "simulation",
iterations = 50), "sim_margins")
})
test_that("sim_margins works for weighted lm", {
expect_s3_class(sim_margins(model = fitw,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
modx.values = c(1.0, 1.5, 2.0)), class = "sim_margins")
expect_s3_class(sim_margins(model = fitw,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "bootstrap",
modx.values = c(1.0, 1.5, 2.0),
iterations = 50), class = "sim_margins")
expect_s3_class(sim_margins(model = fitw,
pred = Murder,
modx = Illiteracy,
mod2 = HSGrad,
vce = "simulation",
modx.values = c(1.0, 1.5, 2.0),
iterations = 50), class = "sim_margins")
})
test_that("sim_margins works for lm w/ logical", {
expect_s3_class(sim_margins(model = fitl,
pred = HSGrad,
modx = o70l), "sim_margins")
expect_s3_class(sim_margins(model = fitl,
pred = HSGrad,
modx = o70l,
vce = "bootstrap",
iterations = 50), "sim_margins")
expect_s3_class(sim_margins(model = fitl,
pred = HSGrad,
modx = o70l,
vce = "simulation",
iterations = 50), "sim_margins")
})
test_that("sim_margins works for lm w/ non-focal character", {
expect_s3_class(sim_margins(model = fitc,
pred = HSGrad,
modx = Murder), "sim_margins")
expect_s3_class(sim_margins(model = fitc,
pred = HSGrad,
modx = Murder,
vce = "bootstrap",
iterations = 50), "sim_margins")
expect_s3_class(sim_margins(model = fitc,
pred = HSGrad,
modx = Murder,
vce = "simulation",
iterations = 50), "sim_margins")
})
context("sim_margins methods")
if (requireNamespace("huxtable") && requireNamespace("broom")) {
test_that("as_huxtable.sim_margins works", {
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy)
expect_is(as_huxtable.sim_margins(ss3), "huxtable")
expect_is(as_huxtable.sim_margins(ss), "huxtable")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "bootstrap", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "bootstrap", iterations = 50)
expect_is(as_huxtable.sim_margins(ss3), "huxtable")
expect_is(as_huxtable.sim_margins(ss), "huxtable")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "simulation", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "simulation", iterations = 50)
expect_is(as_huxtable.sim_margins(ss3), "huxtable")
expect_is(as_huxtable.sim_margins(ss), "huxtable")
})
}
if (requireNamespace("ggstance") && requireNamespace("broom")) {
test_that("plot.sim_margins works", {
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy)
expect_is(plot(ss3), "ggplot")
expect_is(plot(ss), "ggplot")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "bootstrap", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "bootstrap", iterations = 50)
expect_is(plot(ss3), "ggplot")
expect_is(plot(ss), "ggplot")
ss3 <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
mod2 = HSGrad, vce = "simulation", iterations = 50)
ss <- sim_margins(model = fit, pred = Murder, modx = Illiteracy,
vce = "simulation", iterations = 50)
expect_is(plot(ss3), "ggplot")
expect_is(plot(ss), "ggplot")
})
}
context("sim_margins svyglm")
if (requireNamespace("survey")) {
test_that("sim_margins works for svyglm", {
expect_is(sim_margins(regmodel, pred = ell, modx = meals, mod2 = both),
"sim_margins")
# margins bug
# expect_is(sim_margins(regmodel, pred = ell, modx = meals, mod2 = both,
# vce = "bootstrap", iterations = 50),
# "sim_margins")
# expect_is(sim_margins(regmodel, pred = ell, modx = meals, mod2 = both,
# vce = "simulation", iterations = 50),
# "sim_margins")
})
}
context("sim_margins merMod")
if (requireNamespace("lme4")) {
library(lme4, quietly = TRUE)
data(VerbAgg)
fmVA0 <- glmer(r2 ~ Anger * Gender + btype + situ + (1|id) + (1|item),
family = binomial, data = VerbAgg, nAGQ=0L)
lmVA0 <- lmer(as.numeric(r2 == "Y") ~ Anger * Gender + btype + situ +
(1|id) + (1|item), data = VerbAgg)
test_that("sim_margins works for lme4", {
expect_is(sim_margins(lmVA0, pred = Anger, modx = Gender), "sim_margins")
expect_is(sim_margins(fmVA0, pred = Anger, modx = Gender), "sim_margins")
})
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wl8_diagram.R
\name{wl8_diagram}
\alias{wl8_diagram}
\title{Generate Win Loss Diagram}
\usage{
wl8_diagram(data, topline, colour = TRUE)
}
\arguments{
\item{data}{Data frame with columns `endpoint`, ` 'wins', 'ties', 'losses'`.
The dataframe should have exactly 8 rows.}
\item{topline}{Character string to be included in the top box.}
\item{colour}{Logical default = TRUE. If TRUE the diagram will be coloured.}
}
\value{
grViz object. Refer to DiagrammeR package for details.
}
\description{
Uses the DiagrammeR package to return a win-loss diagram.
Currently only supports exactly 8 rows.
}
\examples{
data <- data.frame(endpoint = c(paste0("endpoint", 1:8)),
wins = c(5,5,5,5,5,5,5,5),
ties = c(80,70,60,50,40,30,20,10),
losses = c(5,5,5,5,5,5,5,5))
wl8_diagram(data, topline="N=10*9 = 90")
wl8_diagram(data, topline="N=10*9 = 90", colour = FALSE)
}
| /man/wl8_diagram.Rd | no_license | kismet303/iDiagrams | R | false | true | 933 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wl8_diagram.R
\name{wl8_diagram}
\alias{wl8_diagram}
\title{Generate Win Loss Diagram}
\usage{
wl8_diagram(data, topline, colour = TRUE)
}
\arguments{
\item{data}{Data frame with columns `endpoint`, ` 'wins', 'ties', 'losses'`.
The dataframe should have exactly 8 rows.}
\item{topline}{Character string to be included in the top box.}
\item{colour}{Logical default = TRUE. If TRUE the diagram will be coloured.}
}
\value{
grViz object. Refer to DiagrammeR package for details.
}
\description{
Uses the DiagrammeR package to return a win-loss diagram.
Currently only supports exactly 8 rows.
}
\examples{
data <- data.frame(endpoint = c(paste0("endpoint", 1:8)),
wins = c(5,5,5,5,5,5,5,5),
ties = c(80,70,60,50,40,30,20,10),
losses = c(5,5,5,5,5,5,5,5))
wl8_diagram(data, topline="N=10*9 = 90")
wl8_diagram(data, topline="N=10*9 = 90", colour = FALSE)
}
|
library(tidyverse)
library(readxl)
library(lubridate)
library(openxlsx)
library(zoo)
# Setup -------------------------------------------------------------------
# clean out exisiting environment
# helps to avoid overwriting
rm(list = ls())
options(scipen=999)
path <- "A:/Integrated_Report/DataSources/NationalEstuarineResearch/331891/FilesforAWQMS/"
in_fnames <- list.files(path, full.names = TRUE)
in_fnames <- in_fnames[grep("wq", in_fnames)]
for(h in 1:length(in_fnames)){
print(paste("Starting file", h, "of",length(in_fnames) ))
filepath = in_fnames[h]
data_import <- read.csv(filepath, stringsAsFactors = FALSE) %>%
select(StationCode, DateTimeStamp, Temp, F_Temp, DO_Pct, F_DO_Pct, DO_mgl, F_DO_mgl ) %>%
mutate(DateTimeStamp = mdy_hm(DateTimeStamp))
#tz(data_import$DateTimeStamp) <- "America/Los_Angeles"
data_long <- data_import %>%
gather(key = 'parameter', value = 'result', -StationCode, -DateTimeStamp, -F_Temp, -F_DO_Pct, -F_DO_mgl ) %>%
mutate(qual = ifelse(parameter == "Temp", F_Temp,
ifelse(parameter == "DO_mgl", F_DO_mgl,
ifelse(parameter == "DO_Pct", F_DO_Pct, "ERROR" ))),
date = as.Date(DateTimeStamp)) %>%
select(-F_Temp, -F_DO_mgl, -F_DO_Pct) %>%
# Filter out suspect and other bad data, keep only 0,2,3,4,and 5 labeled data
filter(grepl("<0>|<2>|<3>|<4>|<5>", qual))
# get unique list of characteristics to run for loop through
unique_characteritics <- unique(data_long$parameter)
#create list for getting data out of loop
monloc_do_list <- list()
sumstatlist <- list()
# For loop for summary statistics -----------------------------------------
# Loop goes through each characteristc and generates summary stats
# After loop, data gets pushed inot single table
for (i in 1:length(unique_characteritics)){
print(paste("Begin", unique_characteritics[i], "- characteristic", i, "of", length(unique_characteritics)))
# Characteristic for this loop iteration
char <- unique_characteritics[i]
# Filter so table only contains single characteristic
results_data_char <- data_long %>%
filter(parameter == char) %>%
# generare unique hour field for hourly values and stats
mutate(hr = format(DateTimeStamp, "%Y-%j-%H"))
# Simplify to hourly values and Stats
hrsum <- results_data_char %>%
group_by(StationCode, hr) %>%
summarise(date = mean(date),
hrDTmin = min(DateTimeStamp),
hrDTmax = max(DateTimeStamp),
hrN = sum(!is.na(result)),
hrMean = mean(result, na.rm=TRUE),
hrMin = min(result, na.rm=TRUE),
hrMax = max(result, na.rm=TRUE))
# For each date, how many hours have hrN > 0
# remove rows with zero records in an hour.
hrdat<- hrsum[which(hrsum$hrN >0),]
# Summarise to daily statistics
daydat <- hrdat %>%
group_by(StationCode, date) %>%
summarise( dDTmin = min(hrDTmin),
dDTmax = max(hrDTmax),
hrNday = length(hrN),
dyN = sum(hrN),
dyMean = mean(hrMean, na.rm=TRUE),
dyMin = min(hrMin, na.rm=TRUE),
dyMax = max(hrMax, na.rm=TRUE))
daydat <- daydat %>%
rowwise() %>%
mutate(ResultStatusID = ifelse(hrNday >= 22, 'Final', "Rejected")) %>%
mutate(cmnt =ifelse(hrNday >= 22, "Generated by ORDEQ", ifelse(hrNday <= 22 & hrNday >= 20,
paste0("Generated by ORDEQ; Estimated - ", as.character(hrNday), ' hrs with valid data in day' ),
paste0("Generated by ORDEQ; Rejected - ", as.character(hrNday), ' hrs with valid data in day' )) )) %>%
mutate(ma.mean7 = as.numeric(""),
ma.min7 = as.numeric(""),
ma.mean30 = as.numeric(""),
ma.max7 = as.numeric(""))
#Deal with DO Results
if (grepl("DO", char)) {
#monitoring location loop
for(j in 1:length(unique(daydat$StationCode))){
print(paste("Station", j, "of", length(unique(daydat$StationCode))))
station <- unique(daydat$StationCode)[j]
#Filter dataset to only look at 1 monitoring location at a time
daydat_station <- daydat %>%
filter(StationCode == station) %>%
mutate(startdate7 = as.Date(date) - 6,
startdate30 = as.Date(date) -30)
# 7 day loop
# Loops throough each row in the monitoring location dataset
# And pulls out records that are within the preceding 7 day window
# If there are at least 6 values, then calculate 7 day min and mean
# Assigns data back to daydat_station
print("Begin 7 day moving averages")
pb <- txtProgressBar(min = 0, max = nrow(daydat_station), style = 3)
for(k in 1:nrow(daydat_station)){
start7 <- daydat_station$startdate7[k]
end7 <- daydat_station$date[k]
station_7day <- daydat_station %>%
filter(date <= end7 & date >= start7) %>%
filter(hrNday >= 22)
ma.mean7 <- ifelse(length(unique(station_7day$date)) >= 6, mean(station_7day$dyMean), NA )
ma.min7 <- ifelse(length(unique(station_7day$date)) >= 6, mean(station_7day$dyMin), NA )
daydat_station[k,"ma.mean7"] <- ifelse(k >=7, ma.mean7, NA)
daydat_station[k, "ma.min7"] <- ifelse(k >=7, ma.min7, NA)
setTxtProgressBar(pb, k)
} #end of 7day loop
close(pb)
# 30 day loop
# Loops throough each row in the monitoring location dataset
# And pulls out records that are within the preceding 30 day window
# If there are at least 29 values, then calculate 30 day mean
# Assigns data back to daydat_station
print("Begin 30 day moving averages" )
pb <- txtProgressBar(min = 0, max = nrow(daydat_station), style = 3)
for(l in 1:nrow(daydat_station)){
start30 <- daydat_station$startdate30[l]
end30 <- daydat_station$date[l]
station_30day <- daydat_station %>%
filter(date <= end30 & date >= start30) %>%
filter(hrNday >= 22)
ma.mean30 <- ifelse(length(unique(station_30day$date)) >= 29, mean(station_30day$dyMean), NA )
daydat_station[l,"ma.mean30"] <- ifelse(l >= 30, ma.mean30, NA)
setTxtProgressBar(pb, l)
} #end of 30day loop
close(pb)
# Assign dataset filtered to 1 monitoring location to a list for combining outside of for loop
monloc_do_list[[j]] <- daydat_station
} # end of monitoring location for loop
# Combine list to single dataframe
sum_stats <- bind_rows(monloc_do_list)
} # end of DO if statement
## TEMPERATURE
if (char == 'Temp' ) {
# Temperature is much easier to calculate, since it needs a complete 7 day record to calculate the 7day moving average
# This can happen with a simple grouping
sum_stats <- daydat %>%
arrange(StationCode, date) %>%
group_by(StationCode) %>%
mutate(startdate7 = lag(date, 6, order_by = date),
macmt = paste(lag(ResultStatusID, 6),
lag(ResultStatusID, 5),
lag(ResultStatusID, 4),
lag(ResultStatusID, 3),
lag(ResultStatusID, 2),
lag(ResultStatusID, 1),
ResultStatusID),
# flag out which result gets a moving average calculated
calc7ma = ifelse(startdate7 == (as.Date(date) - 6) & (!grepl("Rejected",macmt )), 1, 0 ))%>%
mutate(ma.max7 = ifelse(calc7ma == 1 ,round(rollmean(x = dyMax, 7, align = "right", fill = NA),2) , NA )) %>%
select(-startdate7, -calc7ma, -macmt )
} #end of temp if statement
## Other - just set sum_stats to daydat, since no moving averages need to be generated.
if (char != 'Temp' & !grepl("DO", char) ) {
sum_stats <- daydat
} #end of not DO or temp statement
#Assign the char ID to the dataset
sum_stats <- sum_stats %>%
mutate(charID = char)
#Set to list for getting out of for loop
sumstatlist[[i]] <- sum_stats
} # end of characteristics for loop
sumstat <- bind_rows(sumstatlist)
#Gather summary statistics from wide format into long format
#rename summary statistcs to match AWQMS Import COnfiguration
sumstat_long <- sumstat %>%
rename("Daily Maximum" = dyMax,
"Daily Minimum" = dyMin,
"Daily Mean" = dyMean,
"7DMADMin" = ma.min7,
"7DMADMean" = ma.mean7,
"7DMADMax" = ma.max7,
"30DMADMean" = ma.mean30) %>%
gather(
"Daily Maximum",
"Daily Minimum",
"Daily Mean",
"7DMADMin",
"7DMADMean",
"7DMADMax",
"30DMADMean",
key = "StatisticalBasis",
value = "Result",
na.rm = TRUE
) %>%
arrange(StationCode, date) %>%
mutate(Equipment = "ContinuousPrb")
AQWMS_sum_stat <- sumstat_long %>%
mutate(r_units = ifelse(charID == "Temp", "deg C",
ifelse(charID == "DO_mgl", "mg/l",
ifelse(charID == "DO_Pct", "% saturatn", "ERROR" ))),
charID = ifelse(charID == "Temp", "Temperature, water",
ifelse(charID == "DO_mgl", "Dissolved oxygen (DO)",
ifelse(charID == "DO_Pct", "Dissolved oxygen saturation", "ERROR" ))),
RsltTimeBasis = ifelse(StatisticalBasis == "7DMADMin" |
StatisticalBasis == "7DMADMean" |
StatisticalBasis == "7DMADMax", "7 Day",
ifelse(StatisticalBasis == "30DMADMean", "30 Day", "1 Day" )),
ActivityType = "FMC",
Result.Analytical.Method.ID = ifelse(charID == "Conductivity", "120.1",
ifelse(charID == "Dissolved oxygen (DO)", "NFM 6.2.1-LUM",
ifelse(charID == "pH","150.1",
ifelse(charID == "Temperature, water", "170.1",
ifelse(charID == "Turbidity", "180.1",
ifelse(charID == "Dissolved oxygen saturation", "NFM 6.2.1-LUM", "ERROR" )))))),
SmplColMthd = "ContinuousPrb",
SmplColEquip = "Probe/Sensor",
SmplDepth = "",
SmplDepthUnit = "",
SmplColEquipComment = "",
Samplers = "",
# Project = Project.ID,
AnaStartDate = "",
AnaStartTime = "",
AnaEndDate = "",
AnaEndTime = "",
ActStartDate = format(dDTmax, "%Y-%m-%d"),
ActStartTime = "0:00",
ActEndDate = format(dDTmax, "%Y-%m-%d"),
ActEndTime = "23:59",
RsltType = "Calculated",
ActStartTimeZone = "PST",
ActEndTimeZone = "PST",
AnaStartTimeZone = "",
AnaEndTimeZone = "",
Result = round(Result, digits = 2)
) %>%
select(charID,
Result,
r_units,
Result.Analytical.Method.ID,
RsltType,
ResultStatusID,
StatisticalBasis,
RsltTimeBasis,
cmnt,
ActivityType,
StationCode,
SmplColMthd,
SmplColEquip,
SmplDepth,
SmplDepthUnit,
SmplColEquipComment,
Samplers,
Equipment,
#Project,
ActStartDate,
ActStartTime,
ActStartTimeZone,
ActEndDate,
ActEndTime,
ActEndTimeZone,
AnaStartDate,
AnaStartTime,
AnaStartTimeZone,
AnaEndDate,
AnaEndTime,
AnaEndTimeZone)
openxlsx::write.xlsx(AQWMS_sum_stat, paste0(tools::file_path_sans_ext(filepath),"-statsum_4_AWQMS.xlsx"))
} | /File Processing/National_Estuary_sum_stat.R | no_license | TravisPritchardODEQ/IR2018 | R | false | false | 12,145 | r | library(tidyverse)
library(readxl)
library(lubridate)
library(openxlsx)
library(zoo)
# Setup -------------------------------------------------------------------
# clean out exisiting environment
# helps to avoid overwriting
rm(list = ls())
options(scipen=999)
path <- "A:/Integrated_Report/DataSources/NationalEstuarineResearch/331891/FilesforAWQMS/"
in_fnames <- list.files(path, full.names = TRUE)
in_fnames <- in_fnames[grep("wq", in_fnames)]
for(h in 1:length(in_fnames)){
print(paste("Starting file", h, "of",length(in_fnames) ))
filepath = in_fnames[h]
data_import <- read.csv(filepath, stringsAsFactors = FALSE) %>%
select(StationCode, DateTimeStamp, Temp, F_Temp, DO_Pct, F_DO_Pct, DO_mgl, F_DO_mgl ) %>%
mutate(DateTimeStamp = mdy_hm(DateTimeStamp))
#tz(data_import$DateTimeStamp) <- "America/Los_Angeles"
data_long <- data_import %>%
gather(key = 'parameter', value = 'result', -StationCode, -DateTimeStamp, -F_Temp, -F_DO_Pct, -F_DO_mgl ) %>%
mutate(qual = ifelse(parameter == "Temp", F_Temp,
ifelse(parameter == "DO_mgl", F_DO_mgl,
ifelse(parameter == "DO_Pct", F_DO_Pct, "ERROR" ))),
date = as.Date(DateTimeStamp)) %>%
select(-F_Temp, -F_DO_mgl, -F_DO_Pct) %>%
# Filter out suspect and other bad data, keep only 0,2,3,4,and 5 labeled data
filter(grepl("<0>|<2>|<3>|<4>|<5>", qual))
# get unique list of characteristics to run for loop through
unique_characteritics <- unique(data_long$parameter)
#create list for getting data out of loop
monloc_do_list <- list()
sumstatlist <- list()
# For loop for summary statistics -----------------------------------------
# Loop goes through each characteristc and generates summary stats
# After loop, data gets pushed inot single table
for (i in 1:length(unique_characteritics)){
print(paste("Begin", unique_characteritics[i], "- characteristic", i, "of", length(unique_characteritics)))
# Characteristic for this loop iteration
char <- unique_characteritics[i]
# Filter so table only contains single characteristic
results_data_char <- data_long %>%
filter(parameter == char) %>%
# generare unique hour field for hourly values and stats
mutate(hr = format(DateTimeStamp, "%Y-%j-%H"))
# Simplify to hourly values and Stats
hrsum <- results_data_char %>%
group_by(StationCode, hr) %>%
summarise(date = mean(date),
hrDTmin = min(DateTimeStamp),
hrDTmax = max(DateTimeStamp),
hrN = sum(!is.na(result)),
hrMean = mean(result, na.rm=TRUE),
hrMin = min(result, na.rm=TRUE),
hrMax = max(result, na.rm=TRUE))
# For each date, how many hours have hrN > 0
# remove rows with zero records in an hour.
hrdat<- hrsum[which(hrsum$hrN >0),]
# Summarise to daily statistics
daydat <- hrdat %>%
group_by(StationCode, date) %>%
summarise( dDTmin = min(hrDTmin),
dDTmax = max(hrDTmax),
hrNday = length(hrN),
dyN = sum(hrN),
dyMean = mean(hrMean, na.rm=TRUE),
dyMin = min(hrMin, na.rm=TRUE),
dyMax = max(hrMax, na.rm=TRUE))
daydat <- daydat %>%
rowwise() %>%
mutate(ResultStatusID = ifelse(hrNday >= 22, 'Final', "Rejected")) %>%
mutate(cmnt =ifelse(hrNday >= 22, "Generated by ORDEQ", ifelse(hrNday <= 22 & hrNday >= 20,
paste0("Generated by ORDEQ; Estimated - ", as.character(hrNday), ' hrs with valid data in day' ),
paste0("Generated by ORDEQ; Rejected - ", as.character(hrNday), ' hrs with valid data in day' )) )) %>%
mutate(ma.mean7 = as.numeric(""),
ma.min7 = as.numeric(""),
ma.mean30 = as.numeric(""),
ma.max7 = as.numeric(""))
#Deal with DO Results
if (grepl("DO", char)) {
#monitoring location loop
for(j in 1:length(unique(daydat$StationCode))){
print(paste("Station", j, "of", length(unique(daydat$StationCode))))
station <- unique(daydat$StationCode)[j]
#Filter dataset to only look at 1 monitoring location at a time
daydat_station <- daydat %>%
filter(StationCode == station) %>%
mutate(startdate7 = as.Date(date) - 6,
startdate30 = as.Date(date) -30)
# 7 day loop
# Loops throough each row in the monitoring location dataset
# And pulls out records that are within the preceding 7 day window
# If there are at least 6 values, then calculate 7 day min and mean
# Assigns data back to daydat_station
print("Begin 7 day moving averages")
pb <- txtProgressBar(min = 0, max = nrow(daydat_station), style = 3)
for(k in 1:nrow(daydat_station)){
start7 <- daydat_station$startdate7[k]
end7 <- daydat_station$date[k]
station_7day <- daydat_station %>%
filter(date <= end7 & date >= start7) %>%
filter(hrNday >= 22)
ma.mean7 <- ifelse(length(unique(station_7day$date)) >= 6, mean(station_7day$dyMean), NA )
ma.min7 <- ifelse(length(unique(station_7day$date)) >= 6, mean(station_7day$dyMin), NA )
daydat_station[k,"ma.mean7"] <- ifelse(k >=7, ma.mean7, NA)
daydat_station[k, "ma.min7"] <- ifelse(k >=7, ma.min7, NA)
setTxtProgressBar(pb, k)
} #end of 7day loop
close(pb)
# 30 day loop
# Loops throough each row in the monitoring location dataset
# And pulls out records that are within the preceding 30 day window
# If there are at least 29 values, then calculate 30 day mean
# Assigns data back to daydat_station
print("Begin 30 day moving averages" )
pb <- txtProgressBar(min = 0, max = nrow(daydat_station), style = 3)
for(l in 1:nrow(daydat_station)){
start30 <- daydat_station$startdate30[l]
end30 <- daydat_station$date[l]
station_30day <- daydat_station %>%
filter(date <= end30 & date >= start30) %>%
filter(hrNday >= 22)
ma.mean30 <- ifelse(length(unique(station_30day$date)) >= 29, mean(station_30day$dyMean), NA )
daydat_station[l,"ma.mean30"] <- ifelse(l >= 30, ma.mean30, NA)
setTxtProgressBar(pb, l)
} #end of 30day loop
close(pb)
# Assign dataset filtered to 1 monitoring location to a list for combining outside of for loop
monloc_do_list[[j]] <- daydat_station
} # end of monitoring location for loop
# Combine list to single dataframe
sum_stats <- bind_rows(monloc_do_list)
} # end of DO if statement
## TEMPERATURE
if (char == 'Temp' ) {
# Temperature is much easier to calculate, since it needs a complete 7 day record to calculate the 7day moving average
# This can happen with a simple grouping
sum_stats <- daydat %>%
arrange(StationCode, date) %>%
group_by(StationCode) %>%
mutate(startdate7 = lag(date, 6, order_by = date),
macmt = paste(lag(ResultStatusID, 6),
lag(ResultStatusID, 5),
lag(ResultStatusID, 4),
lag(ResultStatusID, 3),
lag(ResultStatusID, 2),
lag(ResultStatusID, 1),
ResultStatusID),
# flag out which result gets a moving average calculated
calc7ma = ifelse(startdate7 == (as.Date(date) - 6) & (!grepl("Rejected",macmt )), 1, 0 ))%>%
mutate(ma.max7 = ifelse(calc7ma == 1 ,round(rollmean(x = dyMax, 7, align = "right", fill = NA),2) , NA )) %>%
select(-startdate7, -calc7ma, -macmt )
} #end of temp if statement
## Other - just set sum_stats to daydat, since no moving averages need to be generated.
if (char != 'Temp' & !grepl("DO", char) ) {
sum_stats <- daydat
} #end of not DO or temp statement
#Assign the char ID to the dataset
sum_stats <- sum_stats %>%
mutate(charID = char)
#Set to list for getting out of for loop
sumstatlist[[i]] <- sum_stats
} # end of characteristics for loop
sumstat <- bind_rows(sumstatlist)
#Gather summary statistics from wide format into long format
#rename summary statistcs to match AWQMS Import COnfiguration
sumstat_long <- sumstat %>%
rename("Daily Maximum" = dyMax,
"Daily Minimum" = dyMin,
"Daily Mean" = dyMean,
"7DMADMin" = ma.min7,
"7DMADMean" = ma.mean7,
"7DMADMax" = ma.max7,
"30DMADMean" = ma.mean30) %>%
gather(
"Daily Maximum",
"Daily Minimum",
"Daily Mean",
"7DMADMin",
"7DMADMean",
"7DMADMax",
"30DMADMean",
key = "StatisticalBasis",
value = "Result",
na.rm = TRUE
) %>%
arrange(StationCode, date) %>%
mutate(Equipment = "ContinuousPrb")
AQWMS_sum_stat <- sumstat_long %>%
mutate(r_units = ifelse(charID == "Temp", "deg C",
ifelse(charID == "DO_mgl", "mg/l",
ifelse(charID == "DO_Pct", "% saturatn", "ERROR" ))),
charID = ifelse(charID == "Temp", "Temperature, water",
ifelse(charID == "DO_mgl", "Dissolved oxygen (DO)",
ifelse(charID == "DO_Pct", "Dissolved oxygen saturation", "ERROR" ))),
RsltTimeBasis = ifelse(StatisticalBasis == "7DMADMin" |
StatisticalBasis == "7DMADMean" |
StatisticalBasis == "7DMADMax", "7 Day",
ifelse(StatisticalBasis == "30DMADMean", "30 Day", "1 Day" )),
ActivityType = "FMC",
Result.Analytical.Method.ID = ifelse(charID == "Conductivity", "120.1",
ifelse(charID == "Dissolved oxygen (DO)", "NFM 6.2.1-LUM",
ifelse(charID == "pH","150.1",
ifelse(charID == "Temperature, water", "170.1",
ifelse(charID == "Turbidity", "180.1",
ifelse(charID == "Dissolved oxygen saturation", "NFM 6.2.1-LUM", "ERROR" )))))),
SmplColMthd = "ContinuousPrb",
SmplColEquip = "Probe/Sensor",
SmplDepth = "",
SmplDepthUnit = "",
SmplColEquipComment = "",
Samplers = "",
# Project = Project.ID,
AnaStartDate = "",
AnaStartTime = "",
AnaEndDate = "",
AnaEndTime = "",
ActStartDate = format(dDTmax, "%Y-%m-%d"),
ActStartTime = "0:00",
ActEndDate = format(dDTmax, "%Y-%m-%d"),
ActEndTime = "23:59",
RsltType = "Calculated",
ActStartTimeZone = "PST",
ActEndTimeZone = "PST",
AnaStartTimeZone = "",
AnaEndTimeZone = "",
Result = round(Result, digits = 2)
) %>%
select(charID,
Result,
r_units,
Result.Analytical.Method.ID,
RsltType,
ResultStatusID,
StatisticalBasis,
RsltTimeBasis,
cmnt,
ActivityType,
StationCode,
SmplColMthd,
SmplColEquip,
SmplDepth,
SmplDepthUnit,
SmplColEquipComment,
Samplers,
Equipment,
#Project,
ActStartDate,
ActStartTime,
ActStartTimeZone,
ActEndDate,
ActEndTime,
ActEndTimeZone,
AnaStartDate,
AnaStartTime,
AnaStartTimeZone,
AnaEndDate,
AnaEndTime,
AnaEndTimeZone)
openxlsx::write.xlsx(AQWMS_sum_stat, paste0(tools::file_path_sans_ext(filepath),"-statsum_4_AWQMS.xlsx"))
} |
library(tidyverse)
library(data.table)
library(viridis)
library(MASS)
library(evir)
library(ercv)
library(Rcpp)
source("functions.R")
sourceCpp("cppi.cpp")
# Create Data------------------------------------------------------
alpha <- 0.0343 # Expected return of the risky market
sigma <- 0.1544 # Expected volatility of the risky market
a <- 10 # Factor 'a'
years <- 60 # Total time
nsim <- 1e6 # Number of simulations
pi <- 0.5 # Constant proportion for risky investment
K <- 42
A <- 0.5
all_data <- generate_all_data(alpha = alpha,
sigma = sigma,
a = a,
years = years,
nsim = nsim,
pi = pi,
K = K,
A = 0.5,
include.mortality = TRUE)
all_data %>%
filter(model %in% c("cppi-mort", "alt-mort")) %>%
ggplot(aes(x = X_T, fill = model)) +
geom_density(alpha = 0.5) +
theme_bw() +
theme(legend.position = "none") +
xlab("Final Wealth") +
ylab("")
all_data %>%
filter(model %in% c("cppi-mort", "alt-mort")) %>%
filter(X_T <= 0) %>%
ggplot(aes(x = X_T, fill = model)) +
geom_density(alpha = 0.5) +
theme_bw() +
theme(legend.position = "none") +
xlab("Final Wealth") +
ylab("")
| /mort-performance.R | no_license | aljrico/savings-optimisation | R | false | false | 1,213 | r | library(tidyverse)
library(data.table)
library(viridis)
library(MASS)
library(evir)
library(ercv)
library(Rcpp)
source("functions.R")
sourceCpp("cppi.cpp")
# Create Data------------------------------------------------------
alpha <- 0.0343 # Expected return of the risky market
sigma <- 0.1544 # Expected volatility of the risky market
a <- 10 # Factor 'a'
years <- 60 # Total time
nsim <- 1e6 # Number of simulations
pi <- 0.5 # Constant proportion for risky investment
K <- 42
A <- 0.5
all_data <- generate_all_data(alpha = alpha,
sigma = sigma,
a = a,
years = years,
nsim = nsim,
pi = pi,
K = K,
A = 0.5,
include.mortality = TRUE)
all_data %>%
filter(model %in% c("cppi-mort", "alt-mort")) %>%
ggplot(aes(x = X_T, fill = model)) +
geom_density(alpha = 0.5) +
theme_bw() +
theme(legend.position = "none") +
xlab("Final Wealth") +
ylab("")
all_data %>%
filter(model %in% c("cppi-mort", "alt-mort")) %>%
filter(X_T <= 0) %>%
ggplot(aes(x = X_T, fill = model)) +
geom_density(alpha = 0.5) +
theme_bw() +
theme(legend.position = "none") +
xlab("Final Wealth") +
ylab("")
|
library(ConsRank)
### Name: Tau_X
### Title: TauX (tau exstension) rank correlation coefficient
### Aliases: Tau_X
### Keywords: TauX coefficient correlation rank
### ** Examples
data(BU)
RD=BU[,1:3]
Tau=Tau_X(RD)
Tau1_3=Tau_X(RD[1,],RD[3,])
| /data/genthat_extracted_code/ConsRank/examples/Tau_X.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 250 | r | library(ConsRank)
### Name: Tau_X
### Title: TauX (tau exstension) rank correlation coefficient
### Aliases: Tau_X
### Keywords: TauX coefficient correlation rank
### ** Examples
data(BU)
RD=BU[,1:3]
Tau=Tau_X(RD)
Tau1_3=Tau_X(RD[1,],RD[3,])
|
#' A cost-effectiveness object
#'
#' An object that summarizes simulated measures of clinical effectiveness and costs from a simulation model for use in a cost-effectiveness analysis.
#'
#'
#' @format
#' A list containing two elements:
#' \itemize{
#' \item{costs}{ Total (discounted) costs by category.}
#' \item{QALYs}{ (Discounted) quality-adjusted life-years.}
#' }
#'
#' @section Costs:
#' A 'costs' \code{\link{data.table}} contains the following columns:
#' \describe{
#' \item{category}{The cost category.}
#' \item{dr}{The discount rate.}
#' \item{sample}{A randomly sampled parameter set from the probabilistic sensitivity analysis (PSA)}
#' \item{strategy_id}{The treatment strategy ID.}
#' \item{grp}{An optional column denoting a subgroup. If not included, it is assumed that a single subgroup is being analyzed.}
#' \item{costs}{Costs.}
#' }
#'
#' @section Quality-adjusted life-years:
#' A 'qalys' \code{\link{data.table}} contains the following columns:
#' \describe{
#' \item{dr}{The discount rate.}
#' \item{sample}{A randomly sampled parameter set from the probabilistic sensitivity analysis (PSA)}
#' \item{strategy_id}{The treatment strategy ID.}
#' \item{grp}{An optional column denoting a subgroup. If not included, it is assumed that a single subgroup is being analyzed.}
#' \item{qalys}{Quality-adjusted life-years}
#' }
#'
#' @name ce
NULL
#' Individualized cost-effectiveness analysis
#'
#' Conduct conducting Bayesian cost-effectiveness analysis (e.g. summarize a probabilistic
#' sensitivity analysis (PSA)) by subgroup.
#' \itemize{
#' \item \code{icea()} computes the probability that
#' each treatment is most cost-effective, the expected value of perfect
#' information, and the net monetary benefit for each treatment.
#' \item \code{icea_pw()} compares interventions to a comparator. Computed
#' quantities include the incremental cost-effectiveness ratio, the
#' incremental net monetary benefit, output for a cost-effectiveness plane,
#' and output for a cost-effectiveness acceptability curve.
#' }
#'
#'
#' @param x An object of simulation output characterizing the probability distribution
#' of clinical effectiveness and costs. If the default method is used, then \code{x}
#' must be a \code{data.frame} or \code{data.table} containing columns of
#' mean costs and clinical effectiveness where each row denotes a randomly sampled parameter set
#' and treatment strategy.
#' @param k Vector of willingness to pay values.
#' @param comparator Name of the comparator strategy in x.
#' @param sample Character name of column from \code{x} denoting a randomly sampled parameter set.
#' @param strategy Character name of column from \code{x} denoting treatment strategy.
#' @param grp Character name of column from \code{x} denoting subgroup. If \code{NULL}, then
#' it is assumed that there is only one group.
#' @param e Character name of column from \code{x} denoting clinical effectiveness.
#' @param c Character name of column from \code{x} denoting costs.
#' @param ... Further arguments passed to or from other methods. Currently unused.
#' @return \code{icea} returns a list containing four \code{data.table}s:
#'
#' \describe{
#' \item{summary}{A \code{data.table} of the mean, 2.5\% quantile, and 97.5\%
#' quantile by strategy and group for clinical effectiveness and costs.}
#' \item{mce}{The probability that each strategy is the most effective treatment
#' for each group for the range of specified willingness to pay values.}
#' \item{evpi}{The expected value of perfect information by group for the range
#' of specified willingness to pay values.}
#' \item{nmb}{The mean, 2.5\% quantile, and 97.5\% quantile of (monetary) net benefits
#' for the range of specified willingness to pay values.}
#' }
#'
#' \code{icea_pw} also returns a list containing four data.tables:
#' \describe{
#' \item{summary}{A data.table of the mean, 2.5\% quantile, and 97.5\%
#' quantile by strategy and group for clinical effectiveness and costs.}
#' \item{delta}{Incremental effectiveness and incremental cost for each simulated
#' parameter set by strategy and group. Can be used to plot a cost-effectiveness plane. }
#' \item{ceac}{Values needed to plot a cost-effectiveness acceptability curve by
#' group. In other words, the probability that each strategy is more cost-effective than
#' the comparator for the specified willingness to pay values.}
#' \item{inmb}{The mean, 2.5\% quantile, and 97.5\% quantile of (monetary)
#' incremental net benefits for the range of specified willingness to pay values.}
#' }
#' @name icea
#' @examples
#' # simulation output
#' n_samples <- 100
#' sim <- data.frame(sample = rep(seq(n_samples), 4),
#' c = c(rlnorm(n_samples, 5, .1), rlnorm(n_samples, 5, .1),
#' rlnorm(n_samples, 11, .1), rlnorm(n_samples, 11, .1)),
#' e = c(rnorm(n_samples, 8, .2), rnorm(n_samples, 8.5, .1),
#' rnorm(n_samples, 11, .6), rnorm(n_samples, 11.5, .6)),
#' strategy = rep(paste0("Strategy ", seq(1, 2)),
#' each = n_samples * 2),
#' grp = rep(rep(c("Group 1", "Group 2"),
#' each = n_samples), 2)
#')
#'
#' # icea
#' icea <- icea(sim, k = seq(0, 200000, 500), sample = "sample", strategy = "strategy",
#' grp = "grp", e = "e", c = "c")
#' names(icea)
#' # The probability that each strategy is the most cost-effective
#' # in each group with a willingness to pay of 20,000
#' library("data.table")
#' icea$mce[k == 20000]
#'
#' # icea_pw
#' icea_pw <- icea_pw(sim, k = seq(0, 200000, 500), comparator = "Strategy 1",
#' sample = "sample", strategy = "strategy", grp = "grp",
#' e = "e", c = "c")
#' names(icea_pw)
#' # cost-effectiveness acceptability curve
#' head(icea_pw$ceac[k >= 20000])
#' icer_tbl(icea_pw)
#' @export
icea <- function(x, ...) {
UseMethod("icea")
}
#' @export
#' @rdname icea
icea_pw <- function(x, ...) {
UseMethod("icea_pw")
}
check_grp <- function(x, grp){
if (is.null(grp)){
grp <- "grp"
if ("grp" %in% colnames(x)){
x[, ("grp") := NULL]
}
x[, (grp) := "1"]
}
return(grp)
}
#' @export
#' @rdname icea
icea.default <- function(x, k = seq(0, 200000, 500), sample, strategy,
grp = NULL, e, c, ...){
if (!is.data.table(x)){
x <- data.table(x)
}
x <- copy(x)
grp <- check_grp(x, grp)
n_samples <- length(unique(x[[sample]]))
n_strategies <- length(unique(x[[strategy]]))
n_grps <- length(unique(x[[grp]]))
setorderv(x, c(grp, sample, strategy))
# estimates
nmb <- nmb_summary(x, k, strategy, grp, e, c)
mce <- mce(x, k, strategy, grp, e, c, n_samples, n_strategies, n_grps)
evpi <- evpi(x, k, strategy, grp, e, c, n_samples, n_strategies, n_grps, nmb)
summary_table <- cea_table(x, strategy, grp, e, c)
setnames(summary_table,
c(paste0(e, c("_mean", "_lower", "_upper")),
paste0(c, c("_mean", "_lower", "_upper"))),
c(paste0("e", c("_mean", "_lower", "_upper")),
paste0("c", c("_mean", "_lower", "_upper")))
)
l <- list(summary = summary_table, mce = mce, evpi = evpi, nmb = nmb)
class(l) <- "icea"
attr(l, "strategy") <- strategy
attr(l, "grp") <- grp
return(l)
}
#' @export
#' @rdname icea
icea_pw.default <- function(x, k = seq(0, 200000, 500), comparator,
sample, strategy,
grp = NULL, e, c, ...){
if (!is.data.table(x)){
x <- data.table(x)
}
x <- copy(x)
grp <- check_grp(x, grp)
setorderv(x, c(grp, strategy, sample))
if (!comparator %in% unique(x[[strategy]])){
stop("Chosen comparator strategy is not in 'x'.",
call. = FALSE)
}
# treatment strategies vs comparators
indx_comparator <- which(x[[strategy]] == comparator)
indx_treat <- which(x[[strategy]] != comparator)
sim_comparator <- x[indx_comparator]
sim_treat <- x[indx_treat]
n_strategies <- length(unique(sim_treat[[strategy]]))
n_samples <- length(unique(sim_treat[[sample]]))
n_grps <- length(unique(sim_treat[[grp]]))
# estimates
outcomes <- c(e, c)
delta <- calc_incr_effect(sim_treat, sim_comparator, sample, strategy, grp, outcomes,
n_samples, n_strategies, n_grps)
setnames(delta, paste0("i", e), "ie")
setnames(delta, paste0("i", c), "ic")
ceac <- ceac(delta, k, strategy, grp, e = "ie", c = "ic",
n_samples, n_strategies, n_grps)
inmb <- inmb_summary(delta, k, strategy, grp, e = "ie", c = "ic")
summary_table <- cea_table(delta, strategy, grp, e = "ie", c = "ic", icer = TRUE)
l <- list(summary = summary_table, delta = delta, ceac = ceac, inmb = inmb)
class(l) <- "icea_pw"
attr(l, "strategy") <- strategy
attr(l, "grp") <- grp
attr(l, "comparator") <- comparator
if (is.factor(x[[strategy]])){
comp_pos <- which(levels(x[[strategy]]) == comparator)
} else {
comp_pos <- which(sort(unique(x[[strategy]])) == comparator)
}
attr(l, "comparator_pos") <- comp_pos
return(l)
}
#' @export
#' @rdname icea
#' @param dr Discount rate.
icea.ce <- function(x, k = seq(0, 200000, 500), dr, ...){
category <- NULL
dr_env <- dr
sim <- cbind(x$costs[category == "total" & dr == dr_env,
c("sample", "strategy_id", "costs")],
x$qalys[dr == dr_env, "qalys", with = FALSE])
res <- icea(sim, k = k, sample = "sample", strategy = "strategy_id",
e = "qalys", c = "costs")
return(res)
}
#' @export
#' @rdname icea
icea_pw.ce <- function(x, k = seq(0, 200000, 500), comparator, dr, ...){
category <- NULL
dr_env <- dr
sim <- cbind(x$costs[category == "total" & dr == dr_env,
c("sample", "strategy_id", "costs")],
x$qalys[dr == dr_env, "qalys", with = FALSE])
res <- icea_pw(sim, k = k, comparator = comparator, sample = "sample",
strategy = "strategy_id",
e = "qalys", c = "costs")
return(res)
}
# Probability of being most cost-effective
mce <- function(x, k, strategy, grp, e, c, n_samples, n_strategies, n_grps){
k_rep <- rep(k, each = n_strategies * n_grps)
strategy_rep <- rep(unique(x[[strategy]]), times = length(k) * n_grps)
grp_rep <- rep(rep(unique(x[[grp]]), each = n_strategies), length(k))
prob_vec <- C_mce(k, x[[e]], x[[c]], n_samples, n_strategies, n_grps)
prob <- data.table(k_rep, strategy_rep, grp_rep, prob_vec)
setnames(prob, c("k", strategy, grp, "prob"))
return(prob)
}
# Cost effectiveness acceptability curve
ceac <- function(delta, k, strategy, grp, e, c, n_samples, n_strategies, n_grps){
k_rep <- rep(k, each = n_strategies * n_grps)
strategy_rep <- rep(unique(delta[[strategy]]), times = length(k) * n_grps)
grp_rep <- rep(rep(unique(delta[[grp]]), each = n_strategies), length(k))
prob_vec <- C_ceac(k, delta[[e]], delta[[c]],
n_samples, n_strategies, n_grps)
prob <- data.table(k_rep, strategy_rep, grp_rep, prob_vec)
setnames(prob, c("k", strategy, grp, "prob"))
return(prob)
}
# net benefits summary statistics
nmb_summary <- function(x, k, strategy, grp, e, c){
nmb <- NULL # Avoid CRAN warning for global undefined variable
nmb_dt <- data.table(strategy = rep(x[[strategy]], times = length(k)),
grp = rep(x[[grp]], times = length(k)),
k = rep(k, each = nrow(x)),
e = rep(x[[e]], times = length(k)),
c = rep(x[[c]], times = length(k)))
nmb_dt[, "nmb" := k * e - c]
nmb_summary <- nmb_dt[, list("enmb" = mean(nmb),
"lnmb" = stats::quantile(nmb, .025),
"unmb" = stats::quantile(nmb, .975)),
by = c("strategy", "grp", "k")]
setnames(nmb_summary, old = c("strategy", "grp"), new = c(strategy, grp))
return(nmb_summary)
}
# incremental benefit summary statistics
inmb_summary <- function(ix, k, strategy, grp, e, c){
inmb <- nmb_summary(ix, k, strategy, grp, e, c)
setnames(inmb, colnames(inmb), c(strategy, grp, "k", "einmb", "linmb", "uinmb"))
return(inmb)
}
# Expected value of perfect information
evpi <- function(x, k, strategy, grp, e, c,
n_samples, n_strategies, n_grps, nmb){
# Choose treatment by maximum expected benefit
x_nmb = copy(nmb)
f <- stats::as.formula(paste0("k", "+", grp, "~", strategy))
x_enmb <- dcast(x_nmb, f, value.var = "enmb")
mu <- C_rowmax(as.matrix(x_enmb[, -c(1:2), with = FALSE]))
mu_ind <- c(C_rowmax_index(as.matrix(x_enmb[, -c(1:2), with = FALSE]))) + 1
# calculate expected value of perfect information
enmbpi <- C_enmbpi(k, x[[e]], x[[c]], n_samples, n_strategies, n_grps)
evpi <- enmbpi - c(mu)
dt <- data.table(k = rep(k, each = n_grps),
grp = rep(unique(x[[grp]]), times = length(k)),
evpi = evpi, enmbpi = enmbpi, enmb = c(mu), best = mu_ind)
setnames(dt, "grp", grp)
return(dt)
}
# CEA summary table
cea_table <- function(x, strategy, grp, e, c, icer = FALSE){
FUN <- function (x){
return(list(mean = mean(x), quant = stats::quantile(x, c(.025, .975))))
}
ret <- x[, as.list(unlist(lapply(.SD, FUN))),
by = c(strategy, grp), .SDcols = c(e, c)]
setnames(ret, colnames(ret), c(strategy, grp,
paste0(e, c("_mean", "_lower", "_upper")),
paste0(c, c("_mean", "_lower", "_upper"))))
if (icer == TRUE){
ie_mean <- paste0(e, "_mean")
ic_mean <- paste0(c, "_mean")
ret$icer <- ret[, ic_mean, with = FALSE]/ret[, ie_mean, with = FALSE]
ret$icer <- ifelse(ret[, ic_mean, with = FALSE] < 0 & ret[, ie_mean, with = FALSE] >= 0, "Dominates",
ret$icer)
ret$icer <- ifelse(ret[, ic_mean, with = FALSE] > 0 & ret[, ie_mean, with = FALSE] <= 0, "Dominated",
ret$icer)
}
return(ret)
}
format_costs <- function(x, digits){
formatC(x, format = "f", digits = digits, big.mark = ",")
}
format_qalys <- function(x, digits){
formatC(x, format = "f", digits = digits)
}
format_cri <- function(est, lower, upper, costs = TRUE, digits){
if (costs){
lower <- format_costs(lower, digits = digits)
upper <- format_costs(upper, digits = digits)
} else{
lower <- format_qalys(lower, digits = digits)
upper <- format_qalys(upper, digits = digits)
}
paste0(est, " (",lower, ", ", upper, ")")
}
#' ICER table
#'
#' Generate a table of incremental cost-effectiveness ratios given output from
#' \code{\link{icea_pw}}.
#'
#' @param x An object of class "icea_pw" returned by \code{\link{icea_pw}}.
#' @param k Willingness to pay.
#' @param cri If \code{TRUE}, credible intervals are computed; otherwise
#' they are not.
#' @param prob A numeric scalar in the interval \code{(0,1)} giving the credible interval.
#' Default is 0.95 for a 95 percent credible interval.
#' @param digits_qalys Number of digits to use to report QALYs.
#' @param digits_costs Number of digits to use to report costs.
#' @param output Should output be a \code{data.table} or a list of matrices for
#' each group.
#' @param rownames Row names for matrices when \code{output = "matrix"}.
#' @param colnames Column names for matrices when \code{output = "matrix"}.
#' @param drop If \code{TRUE}, then the result is coerced to the lowest possible dimension.
#' Relevant if \code{output = "matrix"} and there is one group, in which case a single
#' matrix will be returned if \code{drop = TRUE} and a list of length 1 will be returned
#' if \code{drop = FALSE}.
#' @seealso \code{\link{icea_pw}}
#' @return If \code{output = "matrix"}, then a list of matrices (or a matrix if
#' \code{drop = TRUE}) reporting incremental cost-effectiveness ratios (ICERs)
#' by group. Specifically, each matrix contains five rows for: (i)
#' incremental quality-adjusted life-years (QALYs), (ii) incremental costs,
#' (iii) the incremental net monetary benefit (NMB), (iv) the ICER,
#' and (v) a conclusion stating whether each strategy is cost-effective relative
#' to a comparator. The number of columns is equal to the
#' number of strategies (including the comparator).
#'
#' If \code{output = "data.table"}, then the results are reported as a \code{data.table},
#' with one row for each strategy and group combination.
#' @export
icer_tbl <- function(x, k = 50000, cri = TRUE, prob = 0.95,
digits_qalys = 2,
digits_costs = 0, output = c("matrix", "data.table"),
rownames = NULL, colnames = NULL,
drop = TRUE){
if (!inherits(x, "icea_pw")){
stop("'x' must be an object of class 'icea_pw'",
call. = FALSE)
}
if (prob > 1 | prob < 0){
stop("'prob' must be in the interval (0,1)",
call. = FALSE)
}
strategy <- attributes(x)$strategy
grp <- attributes(x)$grp
output <- match.arg(output)
tbl <- copy(x$summary)
tbl[, "icer_numeric" := get("ic_mean")/get("ie_mean")]
tbl[, "inmb" := k * get("ie_mean") - get("ic_mean")]
# Formatting
tbl[, "iqalys" := format_qalys(get("ie_mean"), digits = digits_qalys)]
tbl[, "icosts" := format_costs(get("ic_mean"), digits = digits_costs)]
tbl[, "icer" := format_costs(get("icer_numeric"), digits = digits_costs)]
tbl[, "icer" := ifelse(get("ic_mean") < 0 & get("ie_mean") >= 0, "Dominates", get("icer"))]
tbl[, "icer" := ifelse(get("ic_mean") > 0 & get("ie_mean") <= 0, "Dominated", get("icer"))]
tbl[, "inmb" := format_costs(get("inmb"), digits = digits_costs)]
if(cri){
prob_lower <- (1 - prob)/2
prob_upper <- 1 - prob_lower
x$delta[, "inmb" := k * get("ie") - get("ic")]
if (prob == 0.95){
tbl[, "iqalys" := format_cri(get("iqalys"), get("ie_lower"), get("ie_upper"),
costs = FALSE,
digits = digits_qalys)]
tbl[, "icosts" := format_cri(get("icosts"), get("ic_lower"), get("ic_upper"),
costs = TRUE,
digits = digits_costs)]
inmb_dt <- x$delta[, list(mean = mean(get("inmb")),
lower = stats::quantile(get("inmb"), prob_lower),
upper = stats::quantile(get("inmb"), prob_upper)),
by = c(strategy, grp)]
tbl[, "inmb" := format_cri(get("inmb"), inmb_dt$lower, inmb_dt$upper,
costs = TRUE,
digits = digits_costs)]
} else {
cri_dt <- x$delta[, list(iqalys_lower = stats::quantile(get("ie"), prob_lower),
iqalys_upper = stats::quantile(get("ie"), prob_upper),
icosts_lower = stats::quantile(get("ic"), prob_lower),
icosts_upper = stats::quantile(get("ic"), prob_upper),
inmb_lower = stats::quantile(get("inmb"), prob_lower),
inmb_upper = stats::quantile(get("inmb"), prob_upper)),
by = c(strategy, grp)]
tbl[, "iqalys" := format_cri(get("iqalys"), cri_dt$iqalys_lower,
cri_dt$iqalys_upper, costs = FALSE,
digits = digits_qalys)]
tbl[, "icosts" := format_cri(get("icosts"), cri_dt$icosts_lower,
cri_dt$icosts_upper, costs = TRUE,
digits = digits_costs)]
tbl[, "inmb" := format_cri(get("inmb"), cri_dt$inmb_lower,
cri_dt$inmb_upper, costs = TRUE,
digits = digits_costs)]
}
x$delta[, "inmb" := NULL]
} # end credible interval calculations
tbl[, "conclusion" := ifelse((get("icer_numeric") >= 0 & get("icer_numeric") <= k) |
get("icer") == "Dominates",
"Cost-effective", "Not cost-effective")]
tbl <- tbl[, c(strategy, grp, "iqalys", "icosts", "inmb", "icer", "conclusion"),
with = FALSE]
if (output == "matrix"){
tbl_list <- split(tbl, by = grp)
mat_list <- vector(mode = "list", length = length(tbl_list))
names(mat_list) <- names(tbl_list)
n_strategies <- length(unique(tbl[[strategy]]))
mat <- matrix(NA, nrow = 5, ncol = n_strategies + 1)
if(is.null(rownames)){
rownames(mat) <- c("Incremental QALYs", "Incremental costs",
"Incremental NMB", "ICER", "Conclusion")
} else{
rownames(mat) <- rownames
}
comp_pos <- attributes(x)$comparator_pos
if (is.null(colnames)){
strategy_names <- rep(NA, ncol(mat))
strategy_names[comp_pos] <- attributes(x)$comparator
strategy_names[-comp_pos] <- as.character(tbl_list[[1]][[strategy]])
colnames(mat) <- strategy_names
} else{
colnames(mat) <- colnames
}
for (i in 1:length(mat_list)){
mat[1, -comp_pos] <- tbl_list[[i]]$iqalys
mat[2, -comp_pos] <- tbl_list[[i]]$icosts
mat[3, -comp_pos] <- tbl_list[[i]]$inmb
mat[4, -comp_pos] <- tbl_list[[i]]$icer
mat[5, -comp_pos] <- tbl_list[[i]]$conclusion
mat[, comp_pos] <- "-"
mat_list[[i]] <- mat
}
if (drop){
if(length(mat_list) == 1){
mat_list <- mat_list[[1]]
}
}
return(mat_list)
} else{
return(tbl)
}
}
| /R/cea.R | no_license | DustinBrett/hesim | R | false | false | 21,579 | r | #' A cost-effectiveness object
#'
#' An object that summarizes simulated measures of clinical effectiveness and costs from a simulation model for use in a cost-effectiveness analysis.
#'
#'
#' @format
#' A list containing two elements:
#' \itemize{
#' \item{costs}{ Total (discounted) costs by category.}
#' \item{QALYs}{ (Discounted) quality-adjusted life-years.}
#' }
#'
#' @section Costs:
#' A 'costs' \code{\link{data.table}} contains the following columns:
#' \describe{
#' \item{category}{The cost category.}
#' \item{dr}{The discount rate.}
#' \item{sample}{A randomly sampled parameter set from the probabilistic sensitivity analysis (PSA)}
#' \item{strategy_id}{The treatment strategy ID.}
#' \item{grp}{An optional column denoting a subgroup. If not included, it is assumed that a single subgroup is being analyzed.}
#' \item{costs}{Costs.}
#' }
#'
#' @section Quality-adjusted life-years:
#' A 'qalys' \code{\link{data.table}} contains the following columns:
#' \describe{
#' \item{dr}{The discount rate.}
#' \item{sample}{A randomly sampled parameter set from the probabilistic sensitivity analysis (PSA)}
#' \item{strategy_id}{The treatment strategy ID.}
#' \item{grp}{An optional column denoting a subgroup. If not included, it is assumed that a single subgroup is being analyzed.}
#' \item{qalys}{Quality-adjusted life-years}
#' }
#'
#' @name ce
NULL
#' Individualized cost-effectiveness analysis
#'
#' Conduct conducting Bayesian cost-effectiveness analysis (e.g. summarize a probabilistic
#' sensitivity analysis (PSA)) by subgroup.
#' \itemize{
#' \item \code{icea()} computes the probability that
#' each treatment is most cost-effective, the expected value of perfect
#' information, and the net monetary benefit for each treatment.
#' \item \code{icea_pw()} compares interventions to a comparator. Computed
#' quantities include the incremental cost-effectiveness ratio, the
#' incremental net monetary benefit, output for a cost-effectiveness plane,
#' and output for a cost-effectiveness acceptability curve.
#' }
#'
#'
#' @param x An object of simulation output characterizing the probability distribution
#' of clinical effectiveness and costs. If the default method is used, then \code{x}
#' must be a \code{data.frame} or \code{data.table} containing columns of
#' mean costs and clinical effectiveness where each row denotes a randomly sampled parameter set
#' and treatment strategy.
#' @param k Vector of willingness to pay values.
#' @param comparator Name of the comparator strategy in x.
#' @param sample Character name of column from \code{x} denoting a randomly sampled parameter set.
#' @param strategy Character name of column from \code{x} denoting treatment strategy.
#' @param grp Character name of column from \code{x} denoting subgroup. If \code{NULL}, then
#' it is assumed that there is only one group.
#' @param e Character name of column from \code{x} denoting clinical effectiveness.
#' @param c Character name of column from \code{x} denoting costs.
#' @param ... Further arguments passed to or from other methods. Currently unused.
#' @return \code{icea} returns a list containing four \code{data.table}s:
#'
#' \describe{
#' \item{summary}{A \code{data.table} of the mean, 2.5\% quantile, and 97.5\%
#' quantile by strategy and group for clinical effectiveness and costs.}
#' \item{mce}{The probability that each strategy is the most effective treatment
#' for each group for the range of specified willingness to pay values.}
#' \item{evpi}{The expected value of perfect information by group for the range
#' of specified willingness to pay values.}
#' \item{nmb}{The mean, 2.5\% quantile, and 97.5\% quantile of (monetary) net benefits
#' for the range of specified willingness to pay values.}
#' }
#'
#' \code{icea_pw} also returns a list containing four data.tables:
#' \describe{
#' \item{summary}{A data.table of the mean, 2.5\% quantile, and 97.5\%
#' quantile by strategy and group for clinical effectiveness and costs.}
#' \item{delta}{Incremental effectiveness and incremental cost for each simulated
#' parameter set by strategy and group. Can be used to plot a cost-effectiveness plane. }
#' \item{ceac}{Values needed to plot a cost-effectiveness acceptability curve by
#' group. In other words, the probability that each strategy is more cost-effective than
#' the comparator for the specified willingness to pay values.}
#' \item{inmb}{The mean, 2.5\% quantile, and 97.5\% quantile of (monetary)
#' incremental net benefits for the range of specified willingness to pay values.}
#' }
#' @name icea
#' @examples
#' # simulation output
#' n_samples <- 100
#' sim <- data.frame(sample = rep(seq(n_samples), 4),
#' c = c(rlnorm(n_samples, 5, .1), rlnorm(n_samples, 5, .1),
#' rlnorm(n_samples, 11, .1), rlnorm(n_samples, 11, .1)),
#' e = c(rnorm(n_samples, 8, .2), rnorm(n_samples, 8.5, .1),
#' rnorm(n_samples, 11, .6), rnorm(n_samples, 11.5, .6)),
#' strategy = rep(paste0("Strategy ", seq(1, 2)),
#' each = n_samples * 2),
#' grp = rep(rep(c("Group 1", "Group 2"),
#' each = n_samples), 2)
#')
#'
#' # icea
#' icea <- icea(sim, k = seq(0, 200000, 500), sample = "sample", strategy = "strategy",
#' grp = "grp", e = "e", c = "c")
#' names(icea)
#' # The probability that each strategy is the most cost-effective
#' # in each group with a willingness to pay of 20,000
#' library("data.table")
#' icea$mce[k == 20000]
#'
#' # icea_pw
#' icea_pw <- icea_pw(sim, k = seq(0, 200000, 500), comparator = "Strategy 1",
#' sample = "sample", strategy = "strategy", grp = "grp",
#' e = "e", c = "c")
#' names(icea_pw)
#' # cost-effectiveness acceptability curve
#' head(icea_pw$ceac[k >= 20000])
#' icer_tbl(icea_pw)
#' @export
icea <- function(x, ...) {
UseMethod("icea")
}
#' @export
#' @rdname icea
icea_pw <- function(x, ...) {
UseMethod("icea_pw")
}
check_grp <- function(x, grp){
if (is.null(grp)){
grp <- "grp"
if ("grp" %in% colnames(x)){
x[, ("grp") := NULL]
}
x[, (grp) := "1"]
}
return(grp)
}
#' @export
#' @rdname icea
icea.default <- function(x, k = seq(0, 200000, 500), sample, strategy,
grp = NULL, e, c, ...){
if (!is.data.table(x)){
x <- data.table(x)
}
x <- copy(x)
grp <- check_grp(x, grp)
n_samples <- length(unique(x[[sample]]))
n_strategies <- length(unique(x[[strategy]]))
n_grps <- length(unique(x[[grp]]))
setorderv(x, c(grp, sample, strategy))
# estimates
nmb <- nmb_summary(x, k, strategy, grp, e, c)
mce <- mce(x, k, strategy, grp, e, c, n_samples, n_strategies, n_grps)
evpi <- evpi(x, k, strategy, grp, e, c, n_samples, n_strategies, n_grps, nmb)
summary_table <- cea_table(x, strategy, grp, e, c)
setnames(summary_table,
c(paste0(e, c("_mean", "_lower", "_upper")),
paste0(c, c("_mean", "_lower", "_upper"))),
c(paste0("e", c("_mean", "_lower", "_upper")),
paste0("c", c("_mean", "_lower", "_upper")))
)
l <- list(summary = summary_table, mce = mce, evpi = evpi, nmb = nmb)
class(l) <- "icea"
attr(l, "strategy") <- strategy
attr(l, "grp") <- grp
return(l)
}
#' @export
#' @rdname icea
icea_pw.default <- function(x, k = seq(0, 200000, 500), comparator,
sample, strategy,
grp = NULL, e, c, ...){
if (!is.data.table(x)){
x <- data.table(x)
}
x <- copy(x)
grp <- check_grp(x, grp)
setorderv(x, c(grp, strategy, sample))
if (!comparator %in% unique(x[[strategy]])){
stop("Chosen comparator strategy is not in 'x'.",
call. = FALSE)
}
# treatment strategies vs comparators
indx_comparator <- which(x[[strategy]] == comparator)
indx_treat <- which(x[[strategy]] != comparator)
sim_comparator <- x[indx_comparator]
sim_treat <- x[indx_treat]
n_strategies <- length(unique(sim_treat[[strategy]]))
n_samples <- length(unique(sim_treat[[sample]]))
n_grps <- length(unique(sim_treat[[grp]]))
# estimates
outcomes <- c(e, c)
delta <- calc_incr_effect(sim_treat, sim_comparator, sample, strategy, grp, outcomes,
n_samples, n_strategies, n_grps)
setnames(delta, paste0("i", e), "ie")
setnames(delta, paste0("i", c), "ic")
ceac <- ceac(delta, k, strategy, grp, e = "ie", c = "ic",
n_samples, n_strategies, n_grps)
inmb <- inmb_summary(delta, k, strategy, grp, e = "ie", c = "ic")
summary_table <- cea_table(delta, strategy, grp, e = "ie", c = "ic", icer = TRUE)
l <- list(summary = summary_table, delta = delta, ceac = ceac, inmb = inmb)
class(l) <- "icea_pw"
attr(l, "strategy") <- strategy
attr(l, "grp") <- grp
attr(l, "comparator") <- comparator
if (is.factor(x[[strategy]])){
comp_pos <- which(levels(x[[strategy]]) == comparator)
} else {
comp_pos <- which(sort(unique(x[[strategy]])) == comparator)
}
attr(l, "comparator_pos") <- comp_pos
return(l)
}
#' @export
#' @rdname icea
#' @param dr Discount rate.
icea.ce <- function(x, k = seq(0, 200000, 500), dr, ...){
category <- NULL
dr_env <- dr
sim <- cbind(x$costs[category == "total" & dr == dr_env,
c("sample", "strategy_id", "costs")],
x$qalys[dr == dr_env, "qalys", with = FALSE])
res <- icea(sim, k = k, sample = "sample", strategy = "strategy_id",
e = "qalys", c = "costs")
return(res)
}
#' @export
#' @rdname icea
icea_pw.ce <- function(x, k = seq(0, 200000, 500), comparator, dr, ...){
category <- NULL
dr_env <- dr
sim <- cbind(x$costs[category == "total" & dr == dr_env,
c("sample", "strategy_id", "costs")],
x$qalys[dr == dr_env, "qalys", with = FALSE])
res <- icea_pw(sim, k = k, comparator = comparator, sample = "sample",
strategy = "strategy_id",
e = "qalys", c = "costs")
return(res)
}
# Probability of being most cost-effective
mce <- function(x, k, strategy, grp, e, c, n_samples, n_strategies, n_grps){
k_rep <- rep(k, each = n_strategies * n_grps)
strategy_rep <- rep(unique(x[[strategy]]), times = length(k) * n_grps)
grp_rep <- rep(rep(unique(x[[grp]]), each = n_strategies), length(k))
prob_vec <- C_mce(k, x[[e]], x[[c]], n_samples, n_strategies, n_grps)
prob <- data.table(k_rep, strategy_rep, grp_rep, prob_vec)
setnames(prob, c("k", strategy, grp, "prob"))
return(prob)
}
# Cost effectiveness acceptability curve
ceac <- function(delta, k, strategy, grp, e, c, n_samples, n_strategies, n_grps){
k_rep <- rep(k, each = n_strategies * n_grps)
strategy_rep <- rep(unique(delta[[strategy]]), times = length(k) * n_grps)
grp_rep <- rep(rep(unique(delta[[grp]]), each = n_strategies), length(k))
prob_vec <- C_ceac(k, delta[[e]], delta[[c]],
n_samples, n_strategies, n_grps)
prob <- data.table(k_rep, strategy_rep, grp_rep, prob_vec)
setnames(prob, c("k", strategy, grp, "prob"))
return(prob)
}
# net benefits summary statistics
nmb_summary <- function(x, k, strategy, grp, e, c){
nmb <- NULL # Avoid CRAN warning for global undefined variable
nmb_dt <- data.table(strategy = rep(x[[strategy]], times = length(k)),
grp = rep(x[[grp]], times = length(k)),
k = rep(k, each = nrow(x)),
e = rep(x[[e]], times = length(k)),
c = rep(x[[c]], times = length(k)))
nmb_dt[, "nmb" := k * e - c]
nmb_summary <- nmb_dt[, list("enmb" = mean(nmb),
"lnmb" = stats::quantile(nmb, .025),
"unmb" = stats::quantile(nmb, .975)),
by = c("strategy", "grp", "k")]
setnames(nmb_summary, old = c("strategy", "grp"), new = c(strategy, grp))
return(nmb_summary)
}
# incremental benefit summary statistics
inmb_summary <- function(ix, k, strategy, grp, e, c){
inmb <- nmb_summary(ix, k, strategy, grp, e, c)
setnames(inmb, colnames(inmb), c(strategy, grp, "k", "einmb", "linmb", "uinmb"))
return(inmb)
}
# Expected value of perfect information
evpi <- function(x, k, strategy, grp, e, c,
n_samples, n_strategies, n_grps, nmb){
# Choose treatment by maximum expected benefit
x_nmb = copy(nmb)
f <- stats::as.formula(paste0("k", "+", grp, "~", strategy))
x_enmb <- dcast(x_nmb, f, value.var = "enmb")
mu <- C_rowmax(as.matrix(x_enmb[, -c(1:2), with = FALSE]))
mu_ind <- c(C_rowmax_index(as.matrix(x_enmb[, -c(1:2), with = FALSE]))) + 1
# calculate expected value of perfect information
enmbpi <- C_enmbpi(k, x[[e]], x[[c]], n_samples, n_strategies, n_grps)
evpi <- enmbpi - c(mu)
dt <- data.table(k = rep(k, each = n_grps),
grp = rep(unique(x[[grp]]), times = length(k)),
evpi = evpi, enmbpi = enmbpi, enmb = c(mu), best = mu_ind)
setnames(dt, "grp", grp)
return(dt)
}
# CEA summary table
cea_table <- function(x, strategy, grp, e, c, icer = FALSE){
FUN <- function (x){
return(list(mean = mean(x), quant = stats::quantile(x, c(.025, .975))))
}
ret <- x[, as.list(unlist(lapply(.SD, FUN))),
by = c(strategy, grp), .SDcols = c(e, c)]
setnames(ret, colnames(ret), c(strategy, grp,
paste0(e, c("_mean", "_lower", "_upper")),
paste0(c, c("_mean", "_lower", "_upper"))))
if (icer == TRUE){
ie_mean <- paste0(e, "_mean")
ic_mean <- paste0(c, "_mean")
ret$icer <- ret[, ic_mean, with = FALSE]/ret[, ie_mean, with = FALSE]
ret$icer <- ifelse(ret[, ic_mean, with = FALSE] < 0 & ret[, ie_mean, with = FALSE] >= 0, "Dominates",
ret$icer)
ret$icer <- ifelse(ret[, ic_mean, with = FALSE] > 0 & ret[, ie_mean, with = FALSE] <= 0, "Dominated",
ret$icer)
}
return(ret)
}
format_costs <- function(x, digits){
formatC(x, format = "f", digits = digits, big.mark = ",")
}
format_qalys <- function(x, digits){
formatC(x, format = "f", digits = digits)
}
format_cri <- function(est, lower, upper, costs = TRUE, digits){
if (costs){
lower <- format_costs(lower, digits = digits)
upper <- format_costs(upper, digits = digits)
} else{
lower <- format_qalys(lower, digits = digits)
upper <- format_qalys(upper, digits = digits)
}
paste0(est, " (",lower, ", ", upper, ")")
}
#' ICER table
#'
#' Generate a table of incremental cost-effectiveness ratios given output from
#' \code{\link{icea_pw}}.
#'
#' @param x An object of class "icea_pw" returned by \code{\link{icea_pw}}.
#' @param k Willingness to pay.
#' @param cri If \code{TRUE}, credible intervals are computed; otherwise
#' they are not.
#' @param prob A numeric scalar in the interval \code{(0,1)} giving the credible interval.
#' Default is 0.95 for a 95 percent credible interval.
#' @param digits_qalys Number of digits to use to report QALYs.
#' @param digits_costs Number of digits to use to report costs.
#' @param output Should output be a \code{data.table} or a list of matrices for
#' each group.
#' @param rownames Row names for matrices when \code{output = "matrix"}.
#' @param colnames Column names for matrices when \code{output = "matrix"}.
#' @param drop If \code{TRUE}, then the result is coerced to the lowest possible dimension.
#' Relevant if \code{output = "matrix"} and there is one group, in which case a single
#' matrix will be returned if \code{drop = TRUE} and a list of length 1 will be returned
#' if \code{drop = FALSE}.
#' @seealso \code{\link{icea_pw}}
#' @return If \code{output = "matrix"}, then a list of matrices (or a matrix if
#' \code{drop = TRUE}) reporting incremental cost-effectiveness ratios (ICERs)
#' by group. Specifically, each matrix contains five rows for: (i)
#' incremental quality-adjusted life-years (QALYs), (ii) incremental costs,
#' (iii) the incremental net monetary benefit (NMB), (iv) the ICER,
#' and (v) a conclusion stating whether each strategy is cost-effective relative
#' to a comparator. The number of columns is equal to the
#' number of strategies (including the comparator).
#'
#' If \code{output = "data.table"}, then the results are reported as a \code{data.table},
#' with one row for each strategy and group combination.
#' @export
icer_tbl <- function(x, k = 50000, cri = TRUE, prob = 0.95,
digits_qalys = 2,
digits_costs = 0, output = c("matrix", "data.table"),
rownames = NULL, colnames = NULL,
drop = TRUE){
if (!inherits(x, "icea_pw")){
stop("'x' must be an object of class 'icea_pw'",
call. = FALSE)
}
if (prob > 1 | prob < 0){
stop("'prob' must be in the interval (0,1)",
call. = FALSE)
}
strategy <- attributes(x)$strategy
grp <- attributes(x)$grp
output <- match.arg(output)
tbl <- copy(x$summary)
tbl[, "icer_numeric" := get("ic_mean")/get("ie_mean")]
tbl[, "inmb" := k * get("ie_mean") - get("ic_mean")]
# Formatting
tbl[, "iqalys" := format_qalys(get("ie_mean"), digits = digits_qalys)]
tbl[, "icosts" := format_costs(get("ic_mean"), digits = digits_costs)]
tbl[, "icer" := format_costs(get("icer_numeric"), digits = digits_costs)]
tbl[, "icer" := ifelse(get("ic_mean") < 0 & get("ie_mean") >= 0, "Dominates", get("icer"))]
tbl[, "icer" := ifelse(get("ic_mean") > 0 & get("ie_mean") <= 0, "Dominated", get("icer"))]
tbl[, "inmb" := format_costs(get("inmb"), digits = digits_costs)]
if(cri){
prob_lower <- (1 - prob)/2
prob_upper <- 1 - prob_lower
x$delta[, "inmb" := k * get("ie") - get("ic")]
if (prob == 0.95){
tbl[, "iqalys" := format_cri(get("iqalys"), get("ie_lower"), get("ie_upper"),
costs = FALSE,
digits = digits_qalys)]
tbl[, "icosts" := format_cri(get("icosts"), get("ic_lower"), get("ic_upper"),
costs = TRUE,
digits = digits_costs)]
inmb_dt <- x$delta[, list(mean = mean(get("inmb")),
lower = stats::quantile(get("inmb"), prob_lower),
upper = stats::quantile(get("inmb"), prob_upper)),
by = c(strategy, grp)]
tbl[, "inmb" := format_cri(get("inmb"), inmb_dt$lower, inmb_dt$upper,
costs = TRUE,
digits = digits_costs)]
} else {
cri_dt <- x$delta[, list(iqalys_lower = stats::quantile(get("ie"), prob_lower),
iqalys_upper = stats::quantile(get("ie"), prob_upper),
icosts_lower = stats::quantile(get("ic"), prob_lower),
icosts_upper = stats::quantile(get("ic"), prob_upper),
inmb_lower = stats::quantile(get("inmb"), prob_lower),
inmb_upper = stats::quantile(get("inmb"), prob_upper)),
by = c(strategy, grp)]
tbl[, "iqalys" := format_cri(get("iqalys"), cri_dt$iqalys_lower,
cri_dt$iqalys_upper, costs = FALSE,
digits = digits_qalys)]
tbl[, "icosts" := format_cri(get("icosts"), cri_dt$icosts_lower,
cri_dt$icosts_upper, costs = TRUE,
digits = digits_costs)]
tbl[, "inmb" := format_cri(get("inmb"), cri_dt$inmb_lower,
cri_dt$inmb_upper, costs = TRUE,
digits = digits_costs)]
}
x$delta[, "inmb" := NULL]
} # end credible interval calculations
tbl[, "conclusion" := ifelse((get("icer_numeric") >= 0 & get("icer_numeric") <= k) |
get("icer") == "Dominates",
"Cost-effective", "Not cost-effective")]
tbl <- tbl[, c(strategy, grp, "iqalys", "icosts", "inmb", "icer", "conclusion"),
with = FALSE]
if (output == "matrix"){
tbl_list <- split(tbl, by = grp)
mat_list <- vector(mode = "list", length = length(tbl_list))
names(mat_list) <- names(tbl_list)
n_strategies <- length(unique(tbl[[strategy]]))
mat <- matrix(NA, nrow = 5, ncol = n_strategies + 1)
if(is.null(rownames)){
rownames(mat) <- c("Incremental QALYs", "Incremental costs",
"Incremental NMB", "ICER", "Conclusion")
} else{
rownames(mat) <- rownames
}
comp_pos <- attributes(x)$comparator_pos
if (is.null(colnames)){
strategy_names <- rep(NA, ncol(mat))
strategy_names[comp_pos] <- attributes(x)$comparator
strategy_names[-comp_pos] <- as.character(tbl_list[[1]][[strategy]])
colnames(mat) <- strategy_names
} else{
colnames(mat) <- colnames
}
for (i in 1:length(mat_list)){
mat[1, -comp_pos] <- tbl_list[[i]]$iqalys
mat[2, -comp_pos] <- tbl_list[[i]]$icosts
mat[3, -comp_pos] <- tbl_list[[i]]$inmb
mat[4, -comp_pos] <- tbl_list[[i]]$icer
mat[5, -comp_pos] <- tbl_list[[i]]$conclusion
mat[, comp_pos] <- "-"
mat_list[[i]] <- mat
}
if (drop){
if(length(mat_list) == 1){
mat_list <- mat_list[[1]]
}
}
return(mat_list)
} else{
return(tbl)
}
}
|
# devtools::load_all('.')
library(PEcAnRTM)
library(testthat)
context('Inversion using BayesianTools')
skip_on_travis()
skip_if_not(
interactive(),
"Long-running tests. Only run in interactive mode."
)
set.seed(12345678)
true_prospect <- defparam('prospect_5')
true_params <- c(true_prospect, residual = 0.01)
model <- function(x) prospect(x, 5)[,1]
true_model <- model(true_prospect)
noise <- rnorm(length(true_model), 0, true_params['residual'])
observed <- true_model + noise
if (interactive()) {
plot(400:2500, observed, type = 'l')
lines(400:2500, true_model, col = 'red')
legend("topright", c('observation', 'pseudo-data'), col = c('black', 'red'), lty = 'solid')
}
# Alternate test, using observed spectra
# data("testspec")
# observed <- testspec_ACRU[,2]
prior <- prospect_bt_prior(5)
threshold <- 1.1
custom_settings <- list(init = list(iterations = 2000),
loop = list(iterations = 1000),
other = list(threshold = threshold,
verbose_loglike = FALSE))
samples <- invert_bt(observed = observed, model = model, prior = prior,
custom_settings = custom_settings)
samples_mcmc <- BayesianTools::getSample(samples, coda = TRUE)
samples_burned <- PEcAn.assim.batch::autoburnin(samples_mcmc, method = 'gelman.plot', threshold = threshold)
mean_estimates <- do.call(cbind, summary(samples_burned, quantiles = c(0.01, 0.5, 0.99))[c('statistics', 'quantiles')])
test_that(
'True values are within 95% confidence interval',
{
expect_true(all(true_params > mean_estimates[,'1%']))
expect_true(all(true_params < mean_estimates[,'99%']))
}
)
test_that(
'Mean estimates are within 10% of true values',
expect_equal(true_params, mean_estimates[names(true_params), 'Mean'], tol = 0.1)
)
# Compare observation with predicted interval
samp_mat <- as.matrix(samples_burned)
nsamp <- 2500
prosp_mat <- matrix(0.0, nsamp, 2101)
message('Generating PROSPECT confidence interval')
pb <- txtProgressBar(style = 3)
for (i in seq_len(nsamp)) {
setTxtProgressBar(pb, i/nsamp)
samp_param <- samp_mat[sample.int(nrow(samp_mat), 1),]
prosp_mat[i,] <- rnorm(2101, model(samp_param[-6]), samp_param[6])
}
close(pb)
mid <- colMeans(prosp_mat)
lo <- apply(prosp_mat, 2, quantile, 0.025)
hi <- apply(prosp_mat, 2, quantile, 0.975)
pi_y <- c(lo, rev(hi))
pi_x <- c(seq_along(lo), rev(seq_along(hi)))
outside <- which(observed < lo | observed > hi)
test_that(
'95% predictive interval overlaps around 95% of data',
expect_lt(100 * length(outside) / length(true_model), 7.5)
)
if (interactive()) {
par(mfrow = c(1,1))
plot(observed, type = 'l')
lines(mid, col = 'red')
polygon(pi_x, pi_y, col = rgb(1, 0, 0, 0.2), border = 'red', lty = 'dashed')
legend(
'topright',
c('observed', 'mean prediction', 'predictive interval'),
lty = c('solid', 'solid', 'dashed'),
col = c('black', 'red', 'red')
)
}
| /modules/rtm/tests/testthat/test.invert_bayestools.R | permissive | yan130/pecan | R | false | false | 2,946 | r | # devtools::load_all('.')
library(PEcAnRTM)
library(testthat)
context('Inversion using BayesianTools')
skip_on_travis()
skip_if_not(
interactive(),
"Long-running tests. Only run in interactive mode."
)
set.seed(12345678)
true_prospect <- defparam('prospect_5')
true_params <- c(true_prospect, residual = 0.01)
model <- function(x) prospect(x, 5)[,1]
true_model <- model(true_prospect)
noise <- rnorm(length(true_model), 0, true_params['residual'])
observed <- true_model + noise
if (interactive()) {
plot(400:2500, observed, type = 'l')
lines(400:2500, true_model, col = 'red')
legend("topright", c('observation', 'pseudo-data'), col = c('black', 'red'), lty = 'solid')
}
# Alternate test, using observed spectra
# data("testspec")
# observed <- testspec_ACRU[,2]
prior <- prospect_bt_prior(5)
threshold <- 1.1
custom_settings <- list(init = list(iterations = 2000),
loop = list(iterations = 1000),
other = list(threshold = threshold,
verbose_loglike = FALSE))
samples <- invert_bt(observed = observed, model = model, prior = prior,
custom_settings = custom_settings)
samples_mcmc <- BayesianTools::getSample(samples, coda = TRUE)
samples_burned <- PEcAn.assim.batch::autoburnin(samples_mcmc, method = 'gelman.plot', threshold = threshold)
mean_estimates <- do.call(cbind, summary(samples_burned, quantiles = c(0.01, 0.5, 0.99))[c('statistics', 'quantiles')])
test_that(
'True values are within 95% confidence interval',
{
expect_true(all(true_params > mean_estimates[,'1%']))
expect_true(all(true_params < mean_estimates[,'99%']))
}
)
test_that(
'Mean estimates are within 10% of true values',
expect_equal(true_params, mean_estimates[names(true_params), 'Mean'], tol = 0.1)
)
# Compare observation with predicted interval
samp_mat <- as.matrix(samples_burned)
nsamp <- 2500
prosp_mat <- matrix(0.0, nsamp, 2101)
message('Generating PROSPECT confidence interval')
pb <- txtProgressBar(style = 3)
for (i in seq_len(nsamp)) {
setTxtProgressBar(pb, i/nsamp)
samp_param <- samp_mat[sample.int(nrow(samp_mat), 1),]
prosp_mat[i,] <- rnorm(2101, model(samp_param[-6]), samp_param[6])
}
close(pb)
mid <- colMeans(prosp_mat)
lo <- apply(prosp_mat, 2, quantile, 0.025)
hi <- apply(prosp_mat, 2, quantile, 0.975)
pi_y <- c(lo, rev(hi))
pi_x <- c(seq_along(lo), rev(seq_along(hi)))
outside <- which(observed < lo | observed > hi)
test_that(
'95% predictive interval overlaps around 95% of data',
expect_lt(100 * length(outside) / length(true_model), 7.5)
)
if (interactive()) {
par(mfrow = c(1,1))
plot(observed, type = 'l')
lines(mid, col = 'red')
polygon(pi_x, pi_y, col = rgb(1, 0, 0, 0.2), border = 'red', lty = 'dashed')
legend(
'topright',
c('observed', 'mean prediction', 'predictive interval'),
lty = c('solid', 'solid', 'dashed'),
col = c('black', 'red', 'red')
)
}
|
context("64-bit Split handling")
library("TreeTools")
test_that("64-bit splits handled ok", {
SPI <- function (nTip) SharedPhylogeneticInfo(BalancedTree(nTip),
PectinateTree(nTip),
normalize = pmin)
expect_gt(SPI(64), SPI(63))
expect_lt(SPI(64), SPI(65))
expect_gt(SPI(128), SPI(127))
expect_lt(SPI(128), SPI(129))
})
| /fuzzedpackages/TreeDist/tests/testthat/test-64.R | no_license | akhikolla/testpackages | R | false | false | 442 | r | context("64-bit Split handling")
library("TreeTools")
test_that("64-bit splits handled ok", {
SPI <- function (nTip) SharedPhylogeneticInfo(BalancedTree(nTip),
PectinateTree(nTip),
normalize = pmin)
expect_gt(SPI(64), SPI(63))
expect_lt(SPI(64), SPI(65))
expect_gt(SPI(128), SPI(127))
expect_lt(SPI(128), SPI(129))
})
|
library(trajr)
### Name: TrajGenerate
### Title: Generate a random trajectory
### Aliases: TrajGenerate
### ** Examples
# Generate a 1000 step correlated random walk
trj <- TrajGenerate()
plot(trj, main = "Correlated walk")
# Generate a 1000 step levy flight - paths lengths follow a cauchy distribution
trj <- TrajGenerate(linearErrorDist = rcauchy)
plot(trj, main = "Levy flight")
# Generate a short directed trajectory
trj <- TrajGenerate(n = 20, random = FALSE)
plot(trj, main = "Directed walk")
# Generate an uncorrelated random walk
trj <- TrajGenerate(500, angularErrorDist = function(n) runif(n, -pi, pi))
plot(trj, main = "Uncorrelated walk")
| /data/genthat_extracted_code/trajr/examples/TrajGenerate.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 663 | r | library(trajr)
### Name: TrajGenerate
### Title: Generate a random trajectory
### Aliases: TrajGenerate
### ** Examples
# Generate a 1000 step correlated random walk
trj <- TrajGenerate()
plot(trj, main = "Correlated walk")
# Generate a 1000 step levy flight - paths lengths follow a cauchy distribution
trj <- TrajGenerate(linearErrorDist = rcauchy)
plot(trj, main = "Levy flight")
# Generate a short directed trajectory
trj <- TrajGenerate(n = 20, random = FALSE)
plot(trj, main = "Directed walk")
# Generate an uncorrelated random walk
trj <- TrajGenerate(500, angularErrorDist = function(n) runif(n, -pi, pi))
plot(trj, main = "Uncorrelated walk")
|
# Data Invesitgation
# Last Modified: 12/10/18
# Checking the unique values ----
duplicats <- te[duplicated(te$fullVisitorId),]
x <- data.frame(table(tr$fullVisitorId))
x <- x[x$Freq > 1,]
duplicats <- te[te$fullVisitorId %in% x$Var1,]
x <- unique(tr$fullVisitorId)
test | /data_exploration.R | no_license | DataMonsterBoy/gacrp | R | false | false | 273 | r | # Data Invesitgation
# Last Modified: 12/10/18
# Checking the unique values ----
duplicats <- te[duplicated(te$fullVisitorId),]
x <- data.frame(table(tr$fullVisitorId))
x <- x[x$Freq > 1,]
duplicats <- te[te$fullVisitorId %in% x$Var1,]
x <- unique(tr$fullVisitorId)
test |
#=======================================================#
#-------------------------------------------------------#
# #
# epi_scan_impute.R #
# #
# Scan for 4 and 8df models (epistasis) on the #
# Imputed blocks #
# #
# joseph.powell@uq.edu.au #
# V1. Feb.2013 #
# #
#-------------------------------------------------------#
#=======================================================#
#=======================================================#
# CONVERT THE PED FORMT TO A 0, 1, 2, FORMAT #
#=======================================================#
plink_to_012.fun <- function(
ped, # ped file
map) { # map file
ids <- ped[, 1:6]
nid <- nrow(ids)
ped <- ped[, -c(1:6)]
index <- seq(1, ncol(ped), 2)
geno <- matrix(0, nid, length(index))
# Convert to 0, 1, 2 format
for(i in 1:length(index)) {
snp <- ped[,c(index[i], index[i]+1)]
x <- array(NA, nid)
snp[snp == "0"] <- NA
i0 <- snp[,1] == 1 & snp[,2] == 1
i2 <- snp[,1] == 2 & snp[,2] == 2
i1 <- (snp[,1] == 1 & snp[,2] == 2) | (snp[,1] == 2 & snp[,2] == 1)
x[i0] <- 0
x[i1] <- 1
x[i2] <- 2
geno[, i] <- x
}
colnames(geno) <- map$V2
rownames(geno) <- ids$V2
return(geno)
}
#=======================================================#
# .FUN FOR THE SNP BY SNP PAIRWISE 4DF AND 8DF MODELS #
#=======================================================#
epi_scan.fun <- function(
block1, # SNP1 block
block2, # SNP2 block
probe # matched probe phenotype
) {
# Check the sample ids match
out <- array(NA, c(ncol(block1)*ncol(block2), 9))
c <- 0
for(i in 1:ncol(block1)) {
snpi <- block1[,i]
for(k in 1:ncol(block2)) {
c <- c+1
snpk <- block2[,k]
# check the SNP names are different
if(colnames(block1)[i]==colnames(block2)[k]) {
# print("matching snp ids")
out[c,] <- "NA"
}
else {
# Information
out[c,1] <- colnames(block1)[i]
out[c,2] <- colnames(block2)[k]
# rsq
out[c,3] <- round(cor(snpi, snpk), 4)
# 4 and 8df tests
fullmod <- lm(probe ~ as.factor(snpi) + as.factor(snpk) + as.factor(snpi):as.factor(snpk))
redmod <- lm(probe ~ as.factor(snpi) + as.factor(snpk))
# This is the interaction terms on their own (nested test)
intmod <- anova(redmod, fullmod)
# Extract statistics
tmp <- summary(fullmod)$fstatistic
out[c,4] <- tmp[2]
out[c,5] <- tmp[3]
out[c,6] <- round(-log10(pf(tmp[1], tmp[2], tmp[3], low=F)),4)
out[c,7] <- round(-log10(intmod$Pr[2]), 4)
# class sizes
out[c,8] <- length(table(snpi + 3*snpk))
out[c,9] <- min(table(snpi + 3*snpk))
}
}
print(i)
}
out <- as.data.frame(out)
names(out) <- c("snp1", "snp2", "rsq", "df1", "df2", "fullP", "intP", "nclass", "minclass")
return(out)
}
#=======================================================#
# READ IN THE DATA AND ARGS #
#=======================================================#
n <- as.numeric(commandArgs(T)[1])
pheno <- commandArgs(T)[2]
set3 <- commandArgs(T)[3]
blockdir <- commandArgs(T)[4]
outdir <- commandArgs(T)[5]
load(pheno)
load(set3)
#=======================================================#
# GET THE PHENOTYPE AND GENOTYPE DATA FOR N #
#=======================================================#
ped1 <- read.table(paste(blockdir, as.character(set3$snp1[n]), ".ped", sep=""), header=F)
ped2 <- read.table(paste(blockdir, as.character(set3$snp2[n]), ".ped", sep=""), header=F)
map1 <- read.table(paste(blockdir, as.character(set3$snp1[n]), ".map", sep=""), header=F)
map2 <- read.table(paste(blockdir, as.character(set3$snp2[n]), ".map", sep=""), header=F)
block1 <- plink_to_012.fun(ped1, map1)
block2 <- plink_to_012.fun(ped2, map2)
pheno <- resphen[,which(colnames(resphen)==as.character(set3$probename[n]))]
#=======================================================#
# RUN EPI_SCAN.FUN #
#=======================================================#
epi_scan_out <- epi_scan.fun(block1, block2, pheno)
#=======================================================#
# WRITE OUT THE OUTPUT INTOTHE OUTDIR #
#=======================================================#
write.table(epi_scan_out, paste(outdir, "epi_impute_scan_", as.character(set3$probename[n]), "_", as.character(set3$snp1[n]), "_", as.character(set3$snp2[n]), ".txt", sep=""), quote=F, row.names=F)
| /simulations/epi_scan_impute.R | no_license | explodecomputer/eQTL-2D | R | false | false | 4,427 | r | #=======================================================#
#-------------------------------------------------------#
# #
# epi_scan_impute.R #
# #
# Scan for 4 and 8df models (epistasis) on the #
# Imputed blocks #
# #
# joseph.powell@uq.edu.au #
# V1. Feb.2013 #
# #
#-------------------------------------------------------#
#=======================================================#
#=======================================================#
# CONVERT THE PED FORMT TO A 0, 1, 2, FORMAT #
#=======================================================#
plink_to_012.fun <- function(
ped, # ped file
map) { # map file
ids <- ped[, 1:6]
nid <- nrow(ids)
ped <- ped[, -c(1:6)]
index <- seq(1, ncol(ped), 2)
geno <- matrix(0, nid, length(index))
# Convert to 0, 1, 2 format
for(i in 1:length(index)) {
snp <- ped[,c(index[i], index[i]+1)]
x <- array(NA, nid)
snp[snp == "0"] <- NA
i0 <- snp[,1] == 1 & snp[,2] == 1
i2 <- snp[,1] == 2 & snp[,2] == 2
i1 <- (snp[,1] == 1 & snp[,2] == 2) | (snp[,1] == 2 & snp[,2] == 1)
x[i0] <- 0
x[i1] <- 1
x[i2] <- 2
geno[, i] <- x
}
colnames(geno) <- map$V2
rownames(geno) <- ids$V2
return(geno)
}
#=======================================================#
# .FUN FOR THE SNP BY SNP PAIRWISE 4DF AND 8DF MODELS #
#=======================================================#
epi_scan.fun <- function(
block1, # SNP1 block
block2, # SNP2 block
probe # matched probe phenotype
) {
# Check the sample ids match
out <- array(NA, c(ncol(block1)*ncol(block2), 9))
c <- 0
for(i in 1:ncol(block1)) {
snpi <- block1[,i]
for(k in 1:ncol(block2)) {
c <- c+1
snpk <- block2[,k]
# check the SNP names are different
if(colnames(block1)[i]==colnames(block2)[k]) {
# print("matching snp ids")
out[c,] <- "NA"
}
else {
# Information
out[c,1] <- colnames(block1)[i]
out[c,2] <- colnames(block2)[k]
# rsq
out[c,3] <- round(cor(snpi, snpk), 4)
# 4 and 8df tests
fullmod <- lm(probe ~ as.factor(snpi) + as.factor(snpk) + as.factor(snpi):as.factor(snpk))
redmod <- lm(probe ~ as.factor(snpi) + as.factor(snpk))
# This is the interaction terms on their own (nested test)
intmod <- anova(redmod, fullmod)
# Extract statistics
tmp <- summary(fullmod)$fstatistic
out[c,4] <- tmp[2]
out[c,5] <- tmp[3]
out[c,6] <- round(-log10(pf(tmp[1], tmp[2], tmp[3], low=F)),4)
out[c,7] <- round(-log10(intmod$Pr[2]), 4)
# class sizes
out[c,8] <- length(table(snpi + 3*snpk))
out[c,9] <- min(table(snpi + 3*snpk))
}
}
print(i)
}
out <- as.data.frame(out)
names(out) <- c("snp1", "snp2", "rsq", "df1", "df2", "fullP", "intP", "nclass", "minclass")
return(out)
}
#=======================================================#
# READ IN THE DATA AND ARGS #
#=======================================================#
n <- as.numeric(commandArgs(T)[1])
pheno <- commandArgs(T)[2]
set3 <- commandArgs(T)[3]
blockdir <- commandArgs(T)[4]
outdir <- commandArgs(T)[5]
load(pheno)
load(set3)
#=======================================================#
# GET THE PHENOTYPE AND GENOTYPE DATA FOR N #
#=======================================================#
ped1 <- read.table(paste(blockdir, as.character(set3$snp1[n]), ".ped", sep=""), header=F)
ped2 <- read.table(paste(blockdir, as.character(set3$snp2[n]), ".ped", sep=""), header=F)
map1 <- read.table(paste(blockdir, as.character(set3$snp1[n]), ".map", sep=""), header=F)
map2 <- read.table(paste(blockdir, as.character(set3$snp2[n]), ".map", sep=""), header=F)
block1 <- plink_to_012.fun(ped1, map1)
block2 <- plink_to_012.fun(ped2, map2)
pheno <- resphen[,which(colnames(resphen)==as.character(set3$probename[n]))]
#=======================================================#
# RUN EPI_SCAN.FUN #
#=======================================================#
epi_scan_out <- epi_scan.fun(block1, block2, pheno)
#=======================================================#
# WRITE OUT THE OUTPUT INTOTHE OUTDIR #
#=======================================================#
write.table(epi_scan_out, paste(outdir, "epi_impute_scan_", as.character(set3$probename[n]), "_", as.character(set3$snp1[n]), "_", as.character(set3$snp2[n]), ".txt", sep=""), quote=F, row.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_crop_template.R
\docType{data}
\name{Evaluation_Forms}
\alias{Evaluation_Forms}
\title{A list with evaluation and forms used in Participatory Varietal Selection}
\format{A list used in potato and sweetpotato traits}
\source{
International Potato Center, potato andsweetpotato experimental data.
}
\usage{
Evaluation_Forms
}
\description{
This list contains all forms and trait used in Participatory Varietal Selection. Users can pick up elements on this list trough ShinyTree
}
\references{
This data is related to HiDAP crop template
}
| /man/Evaluation_Forms.Rd | no_license | CIPTOOLS/fbdesign | R | false | true | 621 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_crop_template.R
\docType{data}
\name{Evaluation_Forms}
\alias{Evaluation_Forms}
\title{A list with evaluation and forms used in Participatory Varietal Selection}
\format{A list used in potato and sweetpotato traits}
\source{
International Potato Center, potato andsweetpotato experimental data.
}
\usage{
Evaluation_Forms
}
\description{
This list contains all forms and trait used in Participatory Varietal Selection. Users can pick up elements on this list trough ShinyTree
}
\references{
This data is related to HiDAP crop template
}
|
library(dplyr)
library(ggplot2)
library(tidyr)
library(viridis)
source("../../R/bm-sim-data.R")
source("../../R/helper.R")
sysfonts::font_add("Gyre Bonum",
regular = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-regular.otf",
bold = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-bold.otf")
showtext::showtext_auto()
font_scale = 3
## memory
## ----------------------------------------------
# Load local data:
# -------------------
files = list.files("memory", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
mem_setup = 50
k = 1
for (fn in files) {
load(fn)
cat("Read", k, "/", length(files), "\n")
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$cls_config$ncls[1], ncnoise = bm_extract$cls_config$nic[1])
cnames = colnames(dat$data)
for (fn in cnames[cnames != "y"]) {
dat$data[[fn]] = as.character(dat$data[[fn]])
}
dat_noise = dat$data
mem_rsim = sum(c(object.size(dat), object.size(dat_noise))) / 1024^2
if(is.null(bm_extract$ms_extract_ridge)) {
cat(fn, " does not have ridge memory heap size\n")
} else {
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
nclasses = bm_extract$cls_config["ncls"][1,1],
nnoninfocls = bm_extract$cls_config["nic"][1,1],
#rep = bm_extract$config$rep, # rep is always 1 for memory
ncolsnoise = bm_extract$config$pnoise,
mem = c(last(bm_extract$ms_extract_linear$mem_heap_B), last(bm_extract$ms_extract_binary$mem_heap_B), last(bm_extract$ms_extract_ridge$mem_heap_B)) - mem_setup - mem_rsim,
unit = c(last(bm_extract$ms_extract_linear$unit), last(bm_extract$ms_extract_binary$unit), last(bm_extract$ms_extract_ridge$unit)),
method = c("linear", "binary", "ridge")
)
}
k = k+1
}
df_cat_memory = do.call("rbind", ll_rows)
# Plot used memory (proportional):
# --------------------------------
df_plt_mem = df_cat_memory %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mem = median(mem)) %>%
group_by(nrows, ncols, ncolsnoise, nclasses) %>%
summarize(rel_mem_bin = mem[method == "linear"] / mem[method == "binary"], rel_mem_ridge = mem[method == "linear"] / mem[method == "ridge"], ptotal = ncols[1] + ncolsnoise[1]) %>%
gather(key = "method", value = "rel_mem", starts_with("rel_mem")) %>%
filter(rel_mem >= 1)
df_plt_mem$ptotal = factor(df_plt_mem$ptotal, levels = as.character(sort(unique(df_plt_mem$ptotal))))
df_plt_mem$method[df_plt_mem$method == "rel_mem_bin"] = "Binary"
df_plt_mem$method[df_plt_mem$method == "rel_mem_ridge"] = "Ridge"
df_plt_mem$ncls_cat = factor(paste0(df_plt_mem$nclasses, " classes"), levels = paste0(sort(unique(df_plt_mem$nclasses)), " classes"))
gg = ggplot() +
geom_hline(yintercept = 1, col = "dark red", lty = 2) +
geom_line(data = df_plt_mem, aes(x = nrows, y = rel_mem, color = ptotal, group = paste0(ncols, ncolsnoise))) +
geom_point(data = df_plt_mem, aes(x = nrows, y = rel_mem, color = ptotal, group = paste0(ncols, ncolsnoise))) +
geom_text(data = data.frame(x = 100000, y = 1, label = "Baseline (used memory is equal)", method = "Ridge", ptotal = 250, ncls_cat = factor("20 classes", levels = paste0(c(5, 10, 20), " classes"))),
aes(x = x, y = y, label = label), color = "dark red", vjust = 1.5, hjust = 1, show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = sort(unique(df_cat_memory$nrows)), trans = "log10") +
scale_color_viridis(discrete = TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab("Relative Deviation of the\nAllocated Memory in MB\nCompared to Using No Binning") +
labs(color = "Number of\nFeatures") +
coord_cartesian(clip = 'off') +
facet_grid(ncls_cat ~ method, scales= "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_memory_rel_lines.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.5, units = "mm")
tmp = df_cat_memory %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mean_mem = median(mem)) %>%
select(nrows, ncols, ncolsnoise, mean_mem, method, nclasses) %>%
pivot_wider(names_from = method, values_from = mean_mem) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
group_by(nrows, ptotal, nclasses) %>%
select(nrows, ptotal, linear, binary, ridge, nclasses) %>%
filter(linear == max(linear), nclasses %in% c(5, 20)) %>%
group_by(nrows) %>%
filter(ptotal %in% c(min(ptotal), max(ptotal))) %>%
arrange(nrows, ptotal, nclasses) #%>%
#mutate(rel = nobinning / binning)
knitr::kable(round(tmp, 2), format = "latex")
## runtime
## ----------------------------------------------
files = list.files("runtime", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
for (fn in files) {
load(fn)
cat("Read", k, "/", length(files), "\n")
ncls = as.integer(strsplit(x = strsplit(x = fn, split = "nclasses")[[1]][2], split = "-informative")[[1]][1])
nic = as.integer(strsplit(x = strsplit(x = fn, split = "informative-classes")[[1]][2], split = ".Rda")[[1]][1])
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
nclasses = ncls,
nnoninfocls = bm_extract$config_classes["nic"][1,1],
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
time = c(sum(bm_extract$time_linear), sum(bm_extract$time_binary), sum(bm_extract$time_ridge)),
method = c("linear", "binary", "ridge")
)
k = k+1
}
df_cat_runtime = do.call("rbind", ll_rows)
df_plt_run = df_cat_runtime %>%
group_by(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses) %>%
summarize(
rel_time_binary = time[method == "linear"] / time[method == "binary"],
rel_time_ridge = time[method == "linear"] / time[method == "ridge"],
ptotal = ncols[1] + ncolsnoise[1]
) %>%
gather(key = "method", value = "rel_time", starts_with("rel_time"))
df_plt_run$ptotal = factor(df_plt_run$ptotal, levels = as.character(sort(unique(df_plt_run$ptotal))))
df_plt_run$method[df_plt_run$method == "rel_time_binary"] = "Binary"
df_plt_run$method[df_plt_run$method == "rel_time_ridge"] = "Ridge"
df_plt_run$ncls_cat = factor(paste0(df_plt_run$nclasses, " classes"), levels = paste0(sort(unique(df_plt_run$nclasses)), " classes"))
gg = ggplot() +
geom_hline(yintercept = 1, lty = 2, col = "dark red") +
geom_violin(data = df_plt_run, aes(x = as.factor(nrows), y = rel_time, fill = as.factor(ptotal), color = as.factor(ptotal)), alpha = 0.5) +
geom_text(data = data.frame(x = 6, y = 1, label = "Baseline (runtime is equal)", method = "Ridge", ptotal = 250, ncls_cat = factor("20 classes", levels = paste0(c(5, 10, 20), " classes"))),
aes(x = x, y = y, label = label), color = "dark red", vjust = 1.5, hjust = 1, show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of Rows") +
ylab("Relative Deviation of the\nRuntime in Seconds\nCompared to Using No Binning") +
labs(color = "Number of\nFeatures", fill = "Number of\nFeatures") +
coord_cartesian(clip = 'off') +
facet_grid(ncls_cat ~ method, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_runtime_rel_violines.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.5, units = "mm")
tmp = df_cat_runtime %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mean_mem = median(time)) %>%
select(nrows, ncols, ncolsnoise, mean_mem, method, nclasses) %>%
pivot_wider(names_from = method, values_from = mean_mem) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
group_by(nrows, ptotal, nclasses) %>%
select(nrows, ptotal, linear, binary, ridge, nclasses) %>%
filter(linear == max(linear), nclasses %in% c(5, 20)) %>%
group_by(nrows) %>%
filter(ptotal %in% c(min(ptotal), max(ptotal))) %>%
arrange(nrows, ptotal, nclasses) #%>%
#mutate(rel = nobinning / binning)
tmp$binary = tmp$binary / 60
tmp$linear = tmp$linear / 60
tmp$ridge = tmp$ridge / 60
knitr::kable(round(tmp, 2), format = "latex")
## -----------------------------------------------------------------
## performance
## -----------------------------------------------------------------
files = list.files("performance", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
for (fn in files[20001:length(files)]) {
cat(as.character(Sys.time()), "Read: ", k , "/", length(files), "\n")
load(fn)
selected_feat_bin = unlist(lapply(strsplit(x = bm_extract$trace_binary, split = "_"), function (x) x[1]))
selected_feat_ridge = unlist(lapply(strsplit(x = bm_extract$trace_ridge, split = "_"), function (x) x[1]))
ncls = as.integer(strsplit(x = strsplit(x = fn, split = "nclasses")[[1]][2], split = "-informative")[[1]][1])
n_noise_bin = sum(grepl(x = selected_feat_bin, pattern = "noise"))
n_feat_bin = length(selected_feat_bin) - n_noise_bin
n_noise_ridge = sum(grepl(x = selected_feat_ridge, pattern = "noise"))
n_feat_ridge = length(selected_feat_ridge) - n_noise_ridge
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
oob_int = mean((mean(dat$data$y) - dat$data$y)^2)
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
nclasses = ncls,
nnoninfocls = bm_extract$config_classes["nic"][1,1],
time_init = c(bm_extract$time_binary["init.elapsed"], bm_extract$time_ridge["init.elapsed"]),
time_fit = c(bm_extract$time_binary["fit.elapsed"], bm_extract$time_ridge["fit.elapsed"]),
method = c("binary", "ridge"),
iterations = c(length(selected_feat_bin), length(selected_feat_ridge)),
oob_risk_int = oob_int,
oob_risk_min = c(min(bm_extract$log_binary$logger_data[,2]), min(bm_extract$log_ridge$logger_data[,2])),
n_selected = c(n_feat_bin, n_feat_ridge),
n_noise = c(n_noise_bin, n_noise_ridge)
)
k = k+1
}
save(ll_rows, file = "ll_rows_cat.Rda")
load("ll_rows_cat.Rda")
df_cat = do.call("rbind", ll_rows)
# Selected noise in fitting process:
# ------------------------------------------
gg_sel = df_cat %>%
pivot_longer(names_to = "feat_type", values_to = "selected", cols = starts_with("n_")) %>%
mutate(
rel_selected = selected / iterations,
ft = ifelse(feat_type == "n_selected", "Informative", "Noise")
) %>%
#group_by(nrows, ncols, ncolsnoise, ft, method, sn_ratio, nclasses) %>%
group_by(nrows, ncols, ncolsnoise, ft, method, sn_ratio) %>%
filter(ft == "Informative") %>%
#summarize(rel = median(rel_selected), min_rel = min(rel_selected), max_rel = max(rel_selected)) %>%
summarize(
rel = median(rel_selected),
min_rel = median(rel_selected) - sd(rel_selected),
max_rel = median(rel_selected) + sd(rel_selected),
pn_rel = ncolsnoise[1] / ncols[1]
) %>%
mutate(pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel)) %>%
#filter(ncols == 50, nclasses == 20) %>%
ggplot(aes(x = nrows, y = rel, linetype = method, color = as.factor(sn_ratio))) +
geom_linerange(aes(ymin = min_rel, ymax = max_rel), alpha = 0.5, position = position_dodge(width = 0.05)) +
geom_line(position = position_dodge(width = 0.05)) +
#scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
scale_color_brewer(palette = "Set1") +
xlab("Number of Rows\n(log10 Scale)") +
ylab("Relative Amount of Selected\nInformative Feature\nDuring the Fitting Process") +
labs(linetype = "Method", fill = "Method", color = "Signal to Noise\nRatio") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = unique(df_cat$nrows), trans = "log10") +
facet_grid(pn_rel ~ ncols)#, scales = "free_y")
gg_sel
dinA4width = 210 * font_scale
ggsave(plot = gg_sel, filename = "categorical_selection_full.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Trace how base-learner are selected:
# ------------------------------------------
load("performance/xxx-n100000-p10-pnoise20-snr1-rep1-nclasses10-informative-classes0.Rda")
bl_ridge= vapply(bm_extract$trace_ridge, FUN.VALUE = character(1L), FUN = function (b) strsplit(x = b, split = "_")[[1]][1])
gg1 = plotBlearnerTraces(bl_ridge, n_legend = 10L) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE)
bl_binary = vapply(bm_extract$trace_binary, FUN.VALUE = character(1L), FUN = function (b) strsplit(x = b, split = "_")[[1]][1])
gg2 = plotBlearnerTraces(bl_binary, n_legend = 10L) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE)
gridExtra::grid.arrange(gg1, gg2)
# Comparison of explained risk:
# -----------------------------------------
df_plt = df_cat %>%
group_by(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses, nnoninfocls) %>%
summarize(
ptotal = ncols[1] + ncolsnoise[1],
diffiter = (iterations[method == "binary"] - iterations[method == "ridge"]) / iterations[method == "binary"],
diffoob = (oob_risk_min[method == "binary"] - oob_risk_min[method == "ridge"]) / oob_risk_min[method == "binary"],
diffiter_t = (iterations[method == "binary"] - iterations[method == "ridge"]),
diffoob_t = (oob_risk_min[method == "binary"] - oob_risk_min[method == "ridge"]),
diffoob_int = (oob_risk_int[method == "binary"] - oob_risk_int[method == "ridge"]) / oob_risk_int[method == "binary"],
diffoob_int_t = oob_risk_int[method == "binary"] - oob_risk_int[method == "ridge"],
range_cod = oob_risk_int[method == "binary"],
iter_cod = iterations[method == "binary"],
range_agbm = oob_risk_int[method == "ridge"],
risk_explained = ((oob_risk_int[method == "ridge"] - oob_risk_min[method == "ridge"]) - (oob_risk_int[method == "binary"] - oob_risk_min[method == "binary"])) / oob_risk_int[method == "ridge"],
iters_bin = iterations[method == "binary"],
iters_ridge = iterations[method == "ridge"]
)
gg_oob = df_plt %>%
mutate(ncolsf = factor(paste0("# Features: ", ncols), levels = paste0("# Features: ", c(5,10,20,50)))) %>%
group_by(nrows, ncolsf, sn_ratio, ncolsnoise, nclasses, nnoninfocls) %>%
#filter(iters_bin < 20000, iters_ridge < 20000) %>%
summarize(risk_explained = mean(risk_explained), diffiter = mean(diffiter_t), ptotal = ptotal[1]) %>%
ggplot(aes(x = risk_explained, y = diffiter, color = factor(sn_ratio), shape = factor(nrows))) +
geom_point(alpha = 0.5, size = 3) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
#geom_smooth(method = "lm", se = FALSE)+
#scale_color_brewer(palette = "Set1") +
xlab("Difference in Explained Risk") +
ylab("Iterations(Binary) - Iterations(Ridge)") +
labs(color = "Signal-to-Noise Ratio", fill = "Signal-to-Noise Ratio", shape = "Number of Rows") +
theme_minimal(base_family = "Gyre Bonum") +
#theme_minimal() +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)#,
#legend.position = c(.95, .95),
#legend.justification = c("right", "top")
) +
facet_grid(factor(paste0("# Classes: ", nclasses), levels = paste0("# Classes: ", c(5,10,20))) ~ ncolsf)
#gg_oob = ggExtra::ggMarginal(gg_oob, type = "density", groupFill = TRUE, groupColour = TRUE, margins = "y")
dinA4width = 210 * font_scale
ggsave(plot = gg_oob, filename = "categorical_oob.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Dependency between Iters(Ridge) ~ Iters(Binary):
# -----------------------------------------------------------
df_cat_iter = df_cat %>%
select(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses, nnoninfocls, method, iterations) %>%
pivot_wider(names_from = "method", values_from = "iterations") %>%
mutate(ptotal = ncols + ncolsnoise) %>%
filter(binary < 20000)
ll_temp = list()
k = 1
for (p in unique(df_cat_iter$ptotal)) {
for (n in unique(df_cat_iter$nrows)) {
temp = df_cat_iter %>% filter(ptotal == p, nrows == n) %>% mutate(nclasses = as.factor(nclasses)) %>% select(binary, ridge, nclasses, ptotal, nrows)
mod = lm(binary ~ 0 + ridge*nclasses, data = temp)
params = coef(mod)
temp_max = temp %>% group_by(nclasses) %>% filter(ridge == max(ridge))
ll_empty = list()
for (i in seq_len(nrow(temp_max))) {
pred = predict(mod, temp_max[i,])
if (temp_max[i,"nclasses",drop = TRUE] %in% mod$xlevels$nclasses) {
ie = paste0("ridge:nclasses", temp_max[i,"nclasses",drop=TRUE])
if (ie %in% names(params)) {
ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"] + params[ie])
#if (is.na(coef(summary(mod))[,"Pr(>|t|)"][ie])) {
#ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"])
#} else {
#if (coef(summary(mod))[,"Pr(>|t|)"][ie] < 0.05) {
#ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"] + params[ie])
#} else {
#ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = NA)
#}
#}
} else {
ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"])
}
}
}
preds = do.call(rbind, ll_empty)
preds$labels = as.character(round(preds$label, 2))
ll_temp[[k]] = preds
k = k + 1
}
}
df_labels = do.call(rbind, ll_temp)
gg_iter = df_cat_iter %>%
ggplot(aes(x = ridge, y = binary, color = as.factor(nclasses))) +
geom_point(alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE) +
geom_abline(intercept = 0, slope = 1, color = "dark red", linetype = "dashed", alpha = 0.5) +
ggrepel::geom_label_repel(data = df_labels, aes(x = ridge, y = binary, fill = factor(nclasses, levels = c("5", "10", "20")), label = labels),
colour = "white", fontface = "bold", show.legend = FALSE) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
#scale_color_brewer(palette = "Set1") +
#scale_fill_brewer(palette = "Set1") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 9 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlab("Iterations (Ridge)") +
ylab("Iterations (Binary)") +
labs(color = "Number of\nclasses\nper feature") +
scale_x_continuous(breaks = c(0, 8000)) +
facet_grid(factor(paste0("# Rows:\n", nrows), levels = paste0("# Rows:\n", c(5000,10000,20000,50000,100000))) ~ factor(paste0("# p:\n", ptotal), levels = paste0("# p:\n", sort(unique(df_labels$ptotal)))))
dinA4width = 210 * font_scale
ggsave(plot = gg_iter, filename = "categorical_iters.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
gg_iter_meta = df_labels %>%
ggplot(aes(x = label, color = nclasses, fill = nclasses)) +
geom_density(alpha = 0.5) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
#scale_color_brewer(palette = "Set1") +
#scale_fill_brewer(palette = "Set1") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 9 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlab("Slope: Iterations(Binary) ~ Iterations(Ridge)") +
ylab("Density") +
labs(color = "Number of\nclasses\nper feature", fill = "Number of\nclasses\nper feature") +
facet_wrap(. ~ factor(paste0("# Features: ", ptotal), levels = paste0("# Features: ", sort(unique(df_labels$ptotal)))))
dinA4width = 210 * font_scale
ggsave(plot = gg_iter_meta, filename = "categorical_iters_meta.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Check how well noise categories are "NOT" estimated
# ---------------------------------------------------
#load("performance/xxx-n100000-p5-pnoise5-snr10-rep15-nclasses20-informative-classes10.Rda")
#set.seed(bm_extract$data_seed)
#dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
#real_params = dat$cat_param
#est_params = bm_extract$coef_ridge[[2]]
#names(est_params) = vapply(names(est_params), FUN.VALUE = character(1L), FUN = function (nm) strsplit(nm, split = "_")[[1]][1])
#est_params = transformRidgeToParam(est_params, dat$data)
#getNoiseMSE(real_params, est_params)
#est_params = bm_extract$coef_binary[[2]]
#est_params = transformBinaryToParam(est_params)
#getNoiseMSE(real_params, est_params)
files = list.files("performance", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
#for (fn in sample(files, 200, FALSE)) {
for (fn in files) {
cat(as.character(Sys.time()), "Read: ", k , "/", length(files), "\n")
if(! grepl(pattern = "informative-classes0", x = fn)) {
load(fn)
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
real_params = dat$cat_param
est_params = bm_extract$coef_ridge[[2]]
names(est_params) = vapply(names(est_params), FUN.VALUE = character(1L), FUN = function (nm) strsplit(nm, split = "_")[[1]][1])
est_params = transformRidgeToParam(est_params, dat$data)
mse_ridge = getNoiseMSE(real_params, est_params, FALSE)
mse_ridge_wn = getNoiseMSE(real_params, est_params, TRUE)
mse_ridge_just_wn = getNoiseMSE(real_params, est_params, TRUE, TRUE)
ridge_cutoff = 0.01
est_params_ridge_cutoff001 = lapply(est_params, function (p) {
p$means = ifelse (abs(p$means) > ridge_cutoff, p$means, 0)
p
})
mse_ridge_cutoff001 = getNoiseMSE(real_params, est_params_ridge_cutoff001, FALSE)
mse_ridge_wn_cutoff001 = getNoiseMSE(real_params, est_params_ridge_cutoff001, TRUE)
mse_ridge_just_wn_cutoff001 = getNoiseMSE(real_params, est_params_ridge_cutoff001, TRUE, TRUE)
ridge_cutoff = 0.5
est_params_ridge_cutoff05 = lapply(est_params, function (p) {
p$means = ifelse (abs(p$means) > ridge_cutoff, p$means, 0)
p
})
mse_ridge_cutoff05 = getNoiseMSE(real_params, est_params_ridge_cutoff05, FALSE)
mse_ridge_wn_cutoff05 = getNoiseMSE(real_params, est_params_ridge_cutoff05, TRUE)
mse_ridge_just_wn_cutoff05 = getNoiseMSE(real_params, est_params_ridge_cutoff05, TRUE, TRUE)
ridge_cutoff = 1
est_params_ridge_cutoff1 = lapply(est_params, function (p) {
p$means = ifelse (abs(p$means) > ridge_cutoff, p$means, 0)
p
})
mse_ridge_cutoff1 = getNoiseMSE(real_params, est_params_ridge_cutoff1, FALSE)
mse_ridge_wn_cutoff1 = getNoiseMSE(real_params, est_params_ridge_cutoff1, TRUE)
mse_ridge_just_wn_cutoff1 = getNoiseMSE(real_params, est_params_ridge_cutoff1, TRUE, TRUE)
est_params = bm_extract$coef_binary[[2]]
est_params = transformBinaryToParam(est_params)
mse_binary = getNoiseMSE(real_params, est_params, FALSE)
mse_binary_wn = getNoiseMSE(real_params, est_params, TRUE)
mse_binary_just_wn = getNoiseMSE(real_params, est_params, TRUE, TRUE)
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
nclasses = bm_extract$config_classes$ncls[1],
nnoninfocls = bm_extract$config_classes["nic"][1,1],
time_init = c(bm_extract$time_binary["init.elapsed"], rep(bm_extract$time_ridge["init.elapsed"], 4)),
time_fit = c(bm_extract$time_binary["fit.elapsed"], rep(bm_extract$time_ridge["fit.elapsed"], 4)),
method = c("binary", "ridge", "ridge_cutoff001", "ridge_cutoff05", "ridge_cutoff1"),
mse = c(mse_binary_wn$mean, mse_ridge_wn$mean, mse_ridge_wn_cutoff001$mean, mse_ridge_wn_cutoff05$mean, mse_ridge_wn_cutoff1$mean),
mse_with_noise = c(mse_binary$mean, mse_ridge$mean, mse_ridge_cutoff001$mean, mse_ridge_cutoff05$mean, mse_ridge_cutoff1$mean),
mse_noise = c(mse_binary_just_wn$mean, mse_ridge_just_wn$mean, mse_ridge_just_wn_cutoff001$mean, mse_ridge_just_wn_cutoff05$mean, mse_ridge_just_wn_cutoff1$mean),
nnotselected = c(mse_binary$n_not_sel, mse_ridge$n_not_sel, mse_ridge_cutoff001$n_not_sel, mse_ridge_cutoff05$n_not_sel, mse_ridge_cutoff1$n_not_sel),
nwrongnotselected = c(mse_binary$n_wrong_not_sel, mse_ridge$n_wrong_not_sel, mse_ridge_cutoff001$n_wrong_not_sel, mse_ridge_cutoff05$n_wrong_not_sel, mse_ridge_cutoff1$n_wrong_not_sel)
)
}
k = k+1
}
save(ll_rows, file = "ll_rows_cat_mses2.Rda")
load("ll_rows_cat_mses2.Rda")
df_cat_mses = do.call(rbind, ll_rows)
df_cat_mses$method = factor(df_cat_mses$method)
levels(df_cat_mses$method) = c("Binary", "Ridge", "Ridge (cutoff <0.01)", "Ridge (cutoff <0.5)", "Ridge (cutoff <1)")
df_bp = df_cat_mses %>%
pivot_longer(cols = starts_with("mse")) %>%
mutate(sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))) %>%
mutate(mse = factor(name))
levels(df_bp$mse) = c("MSE", "MSE of\nnoise classes", "MSE of\ninformative classes")
gg = df_bp %>%
ggplot(aes(x = mse, y = value, fill = method, color = method)) +
geom_boxplot(alpha = 0.2) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("") +
ylab("MSE") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
facet_grid(sn_ratiof ~ .) #, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_mse.pdf", width = dinA4width * 2/3 * 0.7, height = dinA4width * 2/3 * 0.5, units = "mm")
head(df_cat_mses)
df_plt_cat_mses = df_cat_mses %>%
mutate(
rel_notselected = nnotselected / nnoninfocls,
rel_nwrongnotselected = nwrongnotselected / nnoninfocls
) %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses, sn_ratio) %>%
summarize(
rel_notselected = median(rel_notselected, na.rm = TRUE),
rel_nwrongnotselected = median(rel_nwrongnotselected, na.rm = TRUE)
) %>%
mutate(
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
)
dim(df_plt_cat_mses)
#hull = df_cat_mses %>%
#mutate(
#rel_notselected = nnotselected / nnoninfocls,
#rel_nwrongnotselected = nwrongnotselected / nnoninfocls,
#sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
#) %>%
#filter(!is.nan(rel_nwrongnotselected), !is.nan(rel_notselected)) %>%
#group_by(sn_ratiof, method) %>%
#slice(chull(rel_nwrongnotselected, rel_notselected))
#hull = df_plt_cat_mses %>%
filter(!is.nan(rel_nwrongnotselected), !is.nan(rel_notselected)) %>%
#group_by(sn_ratiof, method) %>%
#slice(chull(rel_nwrongnotselected, rel_notselected))
tmp = df_cat_mses %>%
filter(method %in% c("Binary", "Ridge (cutoff <0.5)", "Ridge (cutoff <1)")) %>%
mutate(
rel_notselected = nnotselected / nnoninfocls,
rel_nwrongnotselected = nwrongnotselected / nnoninfocls,
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
) %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses, sn_ratiof) %>%
summarize(
rel_notselected = median(rel_notselected, na.rm = TRUE),
rel_nwrongnotselected = median(rel_nwrongnotselected, na.rm = TRUE)
) %>%
select(rel_nwrongnotselected, rel_notselected, method, sn_ratiof) %>%
filter(is.finite(rel_nwrongnotselected), is.finite(rel_notselected), rel_nwrongnotselected > 0, rel_notselected > 0) %>%
na.omit()
ll_dens = list()
k = 1
for (m in unique(tmp$method)) {
for (snr in unique(tmp$sn_ratiof)) {
kd = ks::kde(tmp[(tmp$method == m) & (tmp$sn_ratiof == snr), c("rel_nwrongnotselected", "rel_notselected")],
compute.cont=TRUE)
kd = ks::kde(tmp[(tmp$method == m) & (tmp$sn_ratiof == snr), c("rel_nwrongnotselected", "rel_notselected")],
compute.cont=TRUE, H = kd$H * 2)
cont = with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]],
z=estimate, levels=cont["5%"])[[1]], bgridsize=c(151,151))
ll_dens[[k]] = data.frame(cont, method = m, sn_ratiof = snr)
ll_dens[[k]]$rel_nwrongnotselected = ll_dens[[k]]$x
ll_dens[[k]]$rel_notselected = ll_dens[[k]]$y
k = k + 1
}
}
df_dens = do.call(rbind, ll_dens)
gg = ggplot(mapping = aes(x = rel_nwrongnotselected, y = rel_notselected, shape = method, color = method, fill = method)) +
#ggplot(mapping = aes(x = rel_nwrongnotselected, y = rel_notselected, shape = method, color = method, fill = method)) +
geom_polygon(data = df_dens, alpha = 0.2, size = 0.1) +
geom_point(data = df_plt_cat_mses) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("Fraction of wrongly\nnot selected classes (FPR)") +
ylab("Fraction of correctly\nnot selected classes (TPR)") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlim(min(df_dens$rel_nwrongnotselected), max(df_dens$rel_nwrongnotselected)) +
ylim(min(df_dens$rel_notselected), max(df_dens$rel_notselected)) +
scale_x_continuous(breaks = seq(0, 1, 0.2)) +
scale_y_continuous(breaks = seq(0, 1, 0.2)) +
facet_grid(sn_ratiof ~ .)#, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_noninfo_count.pdf", width = dinA4width * 2/3 * 0.7, height = dinA4width * 2/3 * 0.5, units = "mm")
gg_cat_selected = df_cat_mses %>%
#df_cat_mses %>%
group_by(nrows, method, nclasses, sn_ratio) %>%
summarize(
rel = median(nnotselected, na.rm = TRUE),
min_rel = median(nnotselected, na.rm = TRUE) - sd(nnotselected, na.rm = TRUE),
max_rel = median(nnotselected, na.rm = TRUE) + sd(nnotselected, na.rm = TRUE)
#pn_rel = ncolsnoise[1] / ncols[1],
) %>%
mutate(
#pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel),
#ncolsf = factor(paste0("# p: ", ncols), levels = paste0("# p: ", c(5, 10, 20, 50))),
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
) %>%
#filter(ncols == 50, nclasses == 20) %>%
ggplot(aes(x = nrows, y = rel / nclasses * 2, linetype = as.factor(method), color = as.factor(nclasses))) +
geom_linerange(aes(ymin = min_rel / nclasses * 2, ymax = max_rel / nclasses * 2), alpha = 0.5, position = position_dodge(width = 0.05)) +
geom_line(position = position_dodge(width = 0.05)) +
#scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
scale_color_brewer(palette = "Set1") +
xlab("Number of rows\n(log10 Scale)") +
ylab("Fraction of not selected\nnon-informative classes") +
labs(linetype = "Method", fill = "Method", color = "Number of classes\nper feature") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = unique(df_cat_mses$nrows), trans = "log10") +
facet_grid(sn_ratiof ~ .)#, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg_cat_selected, filename = "categorical_noninfo_count.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
gg_cat_mse = df_cat_mses %>%
group_by(nrows, ncols, ncolsnoise, nclasses, sn_ratio, rep, nnoninfocls) %>%
summarize(
pn_rel = ncolsnoise[1] / ncols[1],
mse_diff = median(noninfo_mse[method == "ridge"], na.rm = TRUE) - median(noninfo_mse[method == "binary"], na.rm = TRUE)
) %>%
mutate(pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel)) %>%
mutate(
ncolsf = factor(paste0("# p: ", ncols), levels = paste0("# p: ", c(5, 10, 20, 50))),
pn_relf = factor(paste0("rel p\nnoise:\n", pn_rel), levels = paste0("rel p\nnoise:\n", c(0.5, 1, 2, 5))),
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
) %>%
group_by(nrows, ncolsf, pn_rel, nclasses, sn_ratiof, nnoninfocls) %>%
summarize(
mmse_diff = median(mse_diff),
min_rel = median(mse_diff) - sd(mse_diff),
max_rel = median(mse_diff) + sd(mse_diff)
) %>%
ggplot(aes(x = nrows, y = mmse_diff, linetype = as.factor(pn_rel), color = as.factor(nclasses))) +
geom_linerange(aes(ymin = min_rel, ymax = max_rel), alpha = 0.5, position = position_dodge(width = 0.05)) +
geom_line(position = position_dodge(width = 0.05)) +
#geom_smooth(se = FALSE) +
#scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
scale_color_brewer(palette = "Set1") +
xlab("Number of Rows\n(log10 Scale)") +
ylab("MSE(Ridge) - MSE(Binary)\nof Non-Informative Classes") +
labs(linetype = "Relative number\nof noise classes", fill = "Method", color = "# Classes") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = unique(df_cat_mses$nrows), trans = "log10") +
facet_grid(sn_ratiof ~ ncolsf, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg_cat_mse, filename = "categorical_noninfo_mse.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Calculate MSE of estimated parameter:
# ---------------------------------------------------
files = list.files("performance", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
for (fn in files) {
cat(as.character(Sys.time()), "Read: ", k , "/", length(files), "\n")
load(fn)
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
real_params = dat$cat_param
est_params = bm_extract$coef_ridge[[2]]
names(est_params) = vapply(names(est_params), FUN.VALUE = character(1L), FUN = function (nm) strsplit(nm, split = "_")[[1]][1])
est_params = transformRidgeToParam(est_params, dat$data)
mse_ridge = getCategoricalMSE(real_params, est_params, TRUE)
est_params = bm_extract$coef_binary[[2]]
est_params = transformBinaryToParam(est_params)
mse_binary = getCategoricalMSE(real_params, est_params, TRUE)
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
nclasses = bm_extract$config_classes$ncls[1],
nnoninfocls = bm_extract$config_classes["nic"][1,1],
time_init = c(bm_extract$time_binary["init.elapsed"], bm_extract$time_ridge["init.elapsed"]),
time_fit = c(bm_extract$time_binary["fit.elapsed"], bm_extract$time_ridge["fit.elapsed"]),
method = c("binary", "ridge"),
mse = c(mse_binary, mse_ridge)
)
k = k+1
}
save(ll_rows, file = "ll_rows_cat_mses_full.Rda")
load("ll_rows_cat_mses_full.Rda")
df_mses = do.call(rbind, ll_rows)
df_mses %>%
filter(mse < 300) %>%
group_by(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses, nnoninfocls) %>%
mutate(
pn_rel = ncolsnoise[1] / ncols[1],
clsn_rel = nnoninfocls[1] / nclasses[1],
mse_diff = median(mse[method == "ridge"], na.rm = TRUE) - median(mse[method == "binary"], na.rm = TRUE)
) %>%
mutate(
pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel),
clsn_rel = ifelse(clsn_rel == 0.4, 0.5, clsn_rel)
) %>%
ggplot(aes(x = as.factor(nrows), y = mse_diff, fill = as.factor(pn_rel))) +
geom_boxplot() +
scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
#scale_fill_brewer(palette = "Set1") +
#scale_color_brewer(palette = "Set1") +
xlab("Number of Rows\n(log10 Scale)") +
ylab("MSE(Ridge) - MSE(Binary)\nof Non-Informative Classes") +
labs(linetype = "Relative number\nof noise classes", fill = "Method", color = "# Classes") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
#facet_grid(sn_ratio + nnoninfocls ~ ncols + ncolsnoise, scales = "free_y")
facet_grid(paste0("SNR: ", sn_ratio) ~ nclasses, scales = "free_y")
| /bm-scripts/categorical/analyse.R | no_license | schalkdaniel/bm-CompAspCboost | R | false | false | 41,962 | r | library(dplyr)
library(ggplot2)
library(tidyr)
library(viridis)
source("../../R/bm-sim-data.R")
source("../../R/helper.R")
sysfonts::font_add("Gyre Bonum",
regular = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-regular.otf",
bold = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-bold.otf")
showtext::showtext_auto()
font_scale = 3
## memory
## ----------------------------------------------
# Load local data:
# -------------------
files = list.files("memory", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
mem_setup = 50
k = 1
for (fn in files) {
load(fn)
cat("Read", k, "/", length(files), "\n")
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$cls_config$ncls[1], ncnoise = bm_extract$cls_config$nic[1])
cnames = colnames(dat$data)
for (fn in cnames[cnames != "y"]) {
dat$data[[fn]] = as.character(dat$data[[fn]])
}
dat_noise = dat$data
mem_rsim = sum(c(object.size(dat), object.size(dat_noise))) / 1024^2
if(is.null(bm_extract$ms_extract_ridge)) {
cat(fn, " does not have ridge memory heap size\n")
} else {
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
nclasses = bm_extract$cls_config["ncls"][1,1],
nnoninfocls = bm_extract$cls_config["nic"][1,1],
#rep = bm_extract$config$rep, # rep is always 1 for memory
ncolsnoise = bm_extract$config$pnoise,
mem = c(last(bm_extract$ms_extract_linear$mem_heap_B), last(bm_extract$ms_extract_binary$mem_heap_B), last(bm_extract$ms_extract_ridge$mem_heap_B)) - mem_setup - mem_rsim,
unit = c(last(bm_extract$ms_extract_linear$unit), last(bm_extract$ms_extract_binary$unit), last(bm_extract$ms_extract_ridge$unit)),
method = c("linear", "binary", "ridge")
)
}
k = k+1
}
df_cat_memory = do.call("rbind", ll_rows)
# Plot used memory (proportional):
# --------------------------------
df_plt_mem = df_cat_memory %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mem = median(mem)) %>%
group_by(nrows, ncols, ncolsnoise, nclasses) %>%
summarize(rel_mem_bin = mem[method == "linear"] / mem[method == "binary"], rel_mem_ridge = mem[method == "linear"] / mem[method == "ridge"], ptotal = ncols[1] + ncolsnoise[1]) %>%
gather(key = "method", value = "rel_mem", starts_with("rel_mem")) %>%
filter(rel_mem >= 1)
df_plt_mem$ptotal = factor(df_plt_mem$ptotal, levels = as.character(sort(unique(df_plt_mem$ptotal))))
df_plt_mem$method[df_plt_mem$method == "rel_mem_bin"] = "Binary"
df_plt_mem$method[df_plt_mem$method == "rel_mem_ridge"] = "Ridge"
df_plt_mem$ncls_cat = factor(paste0(df_plt_mem$nclasses, " classes"), levels = paste0(sort(unique(df_plt_mem$nclasses)), " classes"))
gg = ggplot() +
geom_hline(yintercept = 1, col = "dark red", lty = 2) +
geom_line(data = df_plt_mem, aes(x = nrows, y = rel_mem, color = ptotal, group = paste0(ncols, ncolsnoise))) +
geom_point(data = df_plt_mem, aes(x = nrows, y = rel_mem, color = ptotal, group = paste0(ncols, ncolsnoise))) +
geom_text(data = data.frame(x = 100000, y = 1, label = "Baseline (used memory is equal)", method = "Ridge", ptotal = 250, ncls_cat = factor("20 classes", levels = paste0(c(5, 10, 20), " classes"))),
aes(x = x, y = y, label = label), color = "dark red", vjust = 1.5, hjust = 1, show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = sort(unique(df_cat_memory$nrows)), trans = "log10") +
scale_color_viridis(discrete = TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab("Relative Deviation of the\nAllocated Memory in MB\nCompared to Using No Binning") +
labs(color = "Number of\nFeatures") +
coord_cartesian(clip = 'off') +
facet_grid(ncls_cat ~ method, scales= "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_memory_rel_lines.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.5, units = "mm")
tmp = df_cat_memory %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mean_mem = median(mem)) %>%
select(nrows, ncols, ncolsnoise, mean_mem, method, nclasses) %>%
pivot_wider(names_from = method, values_from = mean_mem) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
group_by(nrows, ptotal, nclasses) %>%
select(nrows, ptotal, linear, binary, ridge, nclasses) %>%
filter(linear == max(linear), nclasses %in% c(5, 20)) %>%
group_by(nrows) %>%
filter(ptotal %in% c(min(ptotal), max(ptotal))) %>%
arrange(nrows, ptotal, nclasses) #%>%
#mutate(rel = nobinning / binning)
knitr::kable(round(tmp, 2), format = "latex")
## runtime
## ----------------------------------------------
files = list.files("runtime", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
for (fn in files) {
load(fn)
cat("Read", k, "/", length(files), "\n")
ncls = as.integer(strsplit(x = strsplit(x = fn, split = "nclasses")[[1]][2], split = "-informative")[[1]][1])
nic = as.integer(strsplit(x = strsplit(x = fn, split = "informative-classes")[[1]][2], split = ".Rda")[[1]][1])
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
nclasses = ncls,
nnoninfocls = bm_extract$config_classes["nic"][1,1],
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
time = c(sum(bm_extract$time_linear), sum(bm_extract$time_binary), sum(bm_extract$time_ridge)),
method = c("linear", "binary", "ridge")
)
k = k+1
}
df_cat_runtime = do.call("rbind", ll_rows)
df_plt_run = df_cat_runtime %>%
group_by(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses) %>%
summarize(
rel_time_binary = time[method == "linear"] / time[method == "binary"],
rel_time_ridge = time[method == "linear"] / time[method == "ridge"],
ptotal = ncols[1] + ncolsnoise[1]
) %>%
gather(key = "method", value = "rel_time", starts_with("rel_time"))
df_plt_run$ptotal = factor(df_plt_run$ptotal, levels = as.character(sort(unique(df_plt_run$ptotal))))
df_plt_run$method[df_plt_run$method == "rel_time_binary"] = "Binary"
df_plt_run$method[df_plt_run$method == "rel_time_ridge"] = "Ridge"
df_plt_run$ncls_cat = factor(paste0(df_plt_run$nclasses, " classes"), levels = paste0(sort(unique(df_plt_run$nclasses)), " classes"))
gg = ggplot() +
geom_hline(yintercept = 1, lty = 2, col = "dark red") +
geom_violin(data = df_plt_run, aes(x = as.factor(nrows), y = rel_time, fill = as.factor(ptotal), color = as.factor(ptotal)), alpha = 0.5) +
geom_text(data = data.frame(x = 6, y = 1, label = "Baseline (runtime is equal)", method = "Ridge", ptotal = 250, ncls_cat = factor("20 classes", levels = paste0(c(5, 10, 20), " classes"))),
aes(x = x, y = y, label = label), color = "dark red", vjust = 1.5, hjust = 1, show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of Rows") +
ylab("Relative Deviation of the\nRuntime in Seconds\nCompared to Using No Binning") +
labs(color = "Number of\nFeatures", fill = "Number of\nFeatures") +
coord_cartesian(clip = 'off') +
facet_grid(ncls_cat ~ method, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_runtime_rel_violines.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.5, units = "mm")
tmp = df_cat_runtime %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mean_mem = median(time)) %>%
select(nrows, ncols, ncolsnoise, mean_mem, method, nclasses) %>%
pivot_wider(names_from = method, values_from = mean_mem) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
group_by(nrows, ptotal, nclasses) %>%
select(nrows, ptotal, linear, binary, ridge, nclasses) %>%
filter(linear == max(linear), nclasses %in% c(5, 20)) %>%
group_by(nrows) %>%
filter(ptotal %in% c(min(ptotal), max(ptotal))) %>%
arrange(nrows, ptotal, nclasses) #%>%
#mutate(rel = nobinning / binning)
tmp$binary = tmp$binary / 60
tmp$linear = tmp$linear / 60
tmp$ridge = tmp$ridge / 60
knitr::kable(round(tmp, 2), format = "latex")
## -----------------------------------------------------------------
## performance
## -----------------------------------------------------------------
files = list.files("performance", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
for (fn in files[20001:length(files)]) {
cat(as.character(Sys.time()), "Read: ", k , "/", length(files), "\n")
load(fn)
selected_feat_bin = unlist(lapply(strsplit(x = bm_extract$trace_binary, split = "_"), function (x) x[1]))
selected_feat_ridge = unlist(lapply(strsplit(x = bm_extract$trace_ridge, split = "_"), function (x) x[1]))
ncls = as.integer(strsplit(x = strsplit(x = fn, split = "nclasses")[[1]][2], split = "-informative")[[1]][1])
n_noise_bin = sum(grepl(x = selected_feat_bin, pattern = "noise"))
n_feat_bin = length(selected_feat_bin) - n_noise_bin
n_noise_ridge = sum(grepl(x = selected_feat_ridge, pattern = "noise"))
n_feat_ridge = length(selected_feat_ridge) - n_noise_ridge
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
oob_int = mean((mean(dat$data$y) - dat$data$y)^2)
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
nclasses = ncls,
nnoninfocls = bm_extract$config_classes["nic"][1,1],
time_init = c(bm_extract$time_binary["init.elapsed"], bm_extract$time_ridge["init.elapsed"]),
time_fit = c(bm_extract$time_binary["fit.elapsed"], bm_extract$time_ridge["fit.elapsed"]),
method = c("binary", "ridge"),
iterations = c(length(selected_feat_bin), length(selected_feat_ridge)),
oob_risk_int = oob_int,
oob_risk_min = c(min(bm_extract$log_binary$logger_data[,2]), min(bm_extract$log_ridge$logger_data[,2])),
n_selected = c(n_feat_bin, n_feat_ridge),
n_noise = c(n_noise_bin, n_noise_ridge)
)
k = k+1
}
save(ll_rows, file = "ll_rows_cat.Rda")
load("ll_rows_cat.Rda")
df_cat = do.call("rbind", ll_rows)
# Selected noise in fitting process:
# ------------------------------------------
gg_sel = df_cat %>%
pivot_longer(names_to = "feat_type", values_to = "selected", cols = starts_with("n_")) %>%
mutate(
rel_selected = selected / iterations,
ft = ifelse(feat_type == "n_selected", "Informative", "Noise")
) %>%
#group_by(nrows, ncols, ncolsnoise, ft, method, sn_ratio, nclasses) %>%
group_by(nrows, ncols, ncolsnoise, ft, method, sn_ratio) %>%
filter(ft == "Informative") %>%
#summarize(rel = median(rel_selected), min_rel = min(rel_selected), max_rel = max(rel_selected)) %>%
summarize(
rel = median(rel_selected),
min_rel = median(rel_selected) - sd(rel_selected),
max_rel = median(rel_selected) + sd(rel_selected),
pn_rel = ncolsnoise[1] / ncols[1]
) %>%
mutate(pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel)) %>%
#filter(ncols == 50, nclasses == 20) %>%
ggplot(aes(x = nrows, y = rel, linetype = method, color = as.factor(sn_ratio))) +
geom_linerange(aes(ymin = min_rel, ymax = max_rel), alpha = 0.5, position = position_dodge(width = 0.05)) +
geom_line(position = position_dodge(width = 0.05)) +
#scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
scale_color_brewer(palette = "Set1") +
xlab("Number of Rows\n(log10 Scale)") +
ylab("Relative Amount of Selected\nInformative Feature\nDuring the Fitting Process") +
labs(linetype = "Method", fill = "Method", color = "Signal to Noise\nRatio") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = unique(df_cat$nrows), trans = "log10") +
facet_grid(pn_rel ~ ncols)#, scales = "free_y")
gg_sel
dinA4width = 210 * font_scale
ggsave(plot = gg_sel, filename = "categorical_selection_full.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Trace how base-learner are selected:
# ------------------------------------------
load("performance/xxx-n100000-p10-pnoise20-snr1-rep1-nclasses10-informative-classes0.Rda")
bl_ridge= vapply(bm_extract$trace_ridge, FUN.VALUE = character(1L), FUN = function (b) strsplit(x = b, split = "_")[[1]][1])
gg1 = plotBlearnerTraces(bl_ridge, n_legend = 10L) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE)
bl_binary = vapply(bm_extract$trace_binary, FUN.VALUE = character(1L), FUN = function (b) strsplit(x = b, split = "_")[[1]][1])
gg2 = plotBlearnerTraces(bl_binary, n_legend = 10L) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE)
gridExtra::grid.arrange(gg1, gg2)
# Comparison of explained risk:
# -----------------------------------------
df_plt = df_cat %>%
group_by(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses, nnoninfocls) %>%
summarize(
ptotal = ncols[1] + ncolsnoise[1],
diffiter = (iterations[method == "binary"] - iterations[method == "ridge"]) / iterations[method == "binary"],
diffoob = (oob_risk_min[method == "binary"] - oob_risk_min[method == "ridge"]) / oob_risk_min[method == "binary"],
diffiter_t = (iterations[method == "binary"] - iterations[method == "ridge"]),
diffoob_t = (oob_risk_min[method == "binary"] - oob_risk_min[method == "ridge"]),
diffoob_int = (oob_risk_int[method == "binary"] - oob_risk_int[method == "ridge"]) / oob_risk_int[method == "binary"],
diffoob_int_t = oob_risk_int[method == "binary"] - oob_risk_int[method == "ridge"],
range_cod = oob_risk_int[method == "binary"],
iter_cod = iterations[method == "binary"],
range_agbm = oob_risk_int[method == "ridge"],
risk_explained = ((oob_risk_int[method == "ridge"] - oob_risk_min[method == "ridge"]) - (oob_risk_int[method == "binary"] - oob_risk_min[method == "binary"])) / oob_risk_int[method == "ridge"],
iters_bin = iterations[method == "binary"],
iters_ridge = iterations[method == "ridge"]
)
gg_oob = df_plt %>%
mutate(ncolsf = factor(paste0("# Features: ", ncols), levels = paste0("# Features: ", c(5,10,20,50)))) %>%
group_by(nrows, ncolsf, sn_ratio, ncolsnoise, nclasses, nnoninfocls) %>%
#filter(iters_bin < 20000, iters_ridge < 20000) %>%
summarize(risk_explained = mean(risk_explained), diffiter = mean(diffiter_t), ptotal = ptotal[1]) %>%
ggplot(aes(x = risk_explained, y = diffiter, color = factor(sn_ratio), shape = factor(nrows))) +
geom_point(alpha = 0.5, size = 3) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
#geom_smooth(method = "lm", se = FALSE)+
#scale_color_brewer(palette = "Set1") +
xlab("Difference in Explained Risk") +
ylab("Iterations(Binary) - Iterations(Ridge)") +
labs(color = "Signal-to-Noise Ratio", fill = "Signal-to-Noise Ratio", shape = "Number of Rows") +
theme_minimal(base_family = "Gyre Bonum") +
#theme_minimal() +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)#,
#legend.position = c(.95, .95),
#legend.justification = c("right", "top")
) +
facet_grid(factor(paste0("# Classes: ", nclasses), levels = paste0("# Classes: ", c(5,10,20))) ~ ncolsf)
#gg_oob = ggExtra::ggMarginal(gg_oob, type = "density", groupFill = TRUE, groupColour = TRUE, margins = "y")
dinA4width = 210 * font_scale
ggsave(plot = gg_oob, filename = "categorical_oob.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Dependency between Iters(Ridge) ~ Iters(Binary):
# -----------------------------------------------------------
df_cat_iter = df_cat %>%
select(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses, nnoninfocls, method, iterations) %>%
pivot_wider(names_from = "method", values_from = "iterations") %>%
mutate(ptotal = ncols + ncolsnoise) %>%
filter(binary < 20000)
ll_temp = list()
k = 1
for (p in unique(df_cat_iter$ptotal)) {
for (n in unique(df_cat_iter$nrows)) {
temp = df_cat_iter %>% filter(ptotal == p, nrows == n) %>% mutate(nclasses = as.factor(nclasses)) %>% select(binary, ridge, nclasses, ptotal, nrows)
mod = lm(binary ~ 0 + ridge*nclasses, data = temp)
params = coef(mod)
temp_max = temp %>% group_by(nclasses) %>% filter(ridge == max(ridge))
ll_empty = list()
for (i in seq_len(nrow(temp_max))) {
pred = predict(mod, temp_max[i,])
if (temp_max[i,"nclasses",drop = TRUE] %in% mod$xlevels$nclasses) {
ie = paste0("ridge:nclasses", temp_max[i,"nclasses",drop=TRUE])
if (ie %in% names(params)) {
ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"] + params[ie])
#if (is.na(coef(summary(mod))[,"Pr(>|t|)"][ie])) {
#ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"])
#} else {
#if (coef(summary(mod))[,"Pr(>|t|)"][ie] < 0.05) {
#ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"] + params[ie])
#} else {
#ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = NA)
#}
#}
} else {
ll_empty[[i]] = cbind(temp_max[i,], pred = pred, label = params["ridge"])
}
}
}
preds = do.call(rbind, ll_empty)
preds$labels = as.character(round(preds$label, 2))
ll_temp[[k]] = preds
k = k + 1
}
}
df_labels = do.call(rbind, ll_temp)
gg_iter = df_cat_iter %>%
ggplot(aes(x = ridge, y = binary, color = as.factor(nclasses))) +
geom_point(alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE) +
geom_abline(intercept = 0, slope = 1, color = "dark red", linetype = "dashed", alpha = 0.5) +
ggrepel::geom_label_repel(data = df_labels, aes(x = ridge, y = binary, fill = factor(nclasses, levels = c("5", "10", "20")), label = labels),
colour = "white", fontface = "bold", show.legend = FALSE) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
#scale_color_brewer(palette = "Set1") +
#scale_fill_brewer(palette = "Set1") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 9 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlab("Iterations (Ridge)") +
ylab("Iterations (Binary)") +
labs(color = "Number of\nclasses\nper feature") +
scale_x_continuous(breaks = c(0, 8000)) +
facet_grid(factor(paste0("# Rows:\n", nrows), levels = paste0("# Rows:\n", c(5000,10000,20000,50000,100000))) ~ factor(paste0("# p:\n", ptotal), levels = paste0("# p:\n", sort(unique(df_labels$ptotal)))))
dinA4width = 210 * font_scale
ggsave(plot = gg_iter, filename = "categorical_iters.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
gg_iter_meta = df_labels %>%
ggplot(aes(x = label, color = nclasses, fill = nclasses)) +
geom_density(alpha = 0.5) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
#scale_color_brewer(palette = "Set1") +
#scale_fill_brewer(palette = "Set1") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 9 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlab("Slope: Iterations(Binary) ~ Iterations(Ridge)") +
ylab("Density") +
labs(color = "Number of\nclasses\nper feature", fill = "Number of\nclasses\nper feature") +
facet_wrap(. ~ factor(paste0("# Features: ", ptotal), levels = paste0("# Features: ", sort(unique(df_labels$ptotal)))))
dinA4width = 210 * font_scale
ggsave(plot = gg_iter_meta, filename = "categorical_iters_meta.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Check how well noise categories are "NOT" estimated
# ---------------------------------------------------
#load("performance/xxx-n100000-p5-pnoise5-snr10-rep15-nclasses20-informative-classes10.Rda")
#set.seed(bm_extract$data_seed)
#dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
#real_params = dat$cat_param
#est_params = bm_extract$coef_ridge[[2]]
#names(est_params) = vapply(names(est_params), FUN.VALUE = character(1L), FUN = function (nm) strsplit(nm, split = "_")[[1]][1])
#est_params = transformRidgeToParam(est_params, dat$data)
#getNoiseMSE(real_params, est_params)
#est_params = bm_extract$coef_binary[[2]]
#est_params = transformBinaryToParam(est_params)
#getNoiseMSE(real_params, est_params)
files = list.files("performance", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
#for (fn in sample(files, 200, FALSE)) {
for (fn in files) {
cat(as.character(Sys.time()), "Read: ", k , "/", length(files), "\n")
if(! grepl(pattern = "informative-classes0", x = fn)) {
load(fn)
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
real_params = dat$cat_param
est_params = bm_extract$coef_ridge[[2]]
names(est_params) = vapply(names(est_params), FUN.VALUE = character(1L), FUN = function (nm) strsplit(nm, split = "_")[[1]][1])
est_params = transformRidgeToParam(est_params, dat$data)
mse_ridge = getNoiseMSE(real_params, est_params, FALSE)
mse_ridge_wn = getNoiseMSE(real_params, est_params, TRUE)
mse_ridge_just_wn = getNoiseMSE(real_params, est_params, TRUE, TRUE)
ridge_cutoff = 0.01
est_params_ridge_cutoff001 = lapply(est_params, function (p) {
p$means = ifelse (abs(p$means) > ridge_cutoff, p$means, 0)
p
})
mse_ridge_cutoff001 = getNoiseMSE(real_params, est_params_ridge_cutoff001, FALSE)
mse_ridge_wn_cutoff001 = getNoiseMSE(real_params, est_params_ridge_cutoff001, TRUE)
mse_ridge_just_wn_cutoff001 = getNoiseMSE(real_params, est_params_ridge_cutoff001, TRUE, TRUE)
ridge_cutoff = 0.5
est_params_ridge_cutoff05 = lapply(est_params, function (p) {
p$means = ifelse (abs(p$means) > ridge_cutoff, p$means, 0)
p
})
mse_ridge_cutoff05 = getNoiseMSE(real_params, est_params_ridge_cutoff05, FALSE)
mse_ridge_wn_cutoff05 = getNoiseMSE(real_params, est_params_ridge_cutoff05, TRUE)
mse_ridge_just_wn_cutoff05 = getNoiseMSE(real_params, est_params_ridge_cutoff05, TRUE, TRUE)
ridge_cutoff = 1
est_params_ridge_cutoff1 = lapply(est_params, function (p) {
p$means = ifelse (abs(p$means) > ridge_cutoff, p$means, 0)
p
})
mse_ridge_cutoff1 = getNoiseMSE(real_params, est_params_ridge_cutoff1, FALSE)
mse_ridge_wn_cutoff1 = getNoiseMSE(real_params, est_params_ridge_cutoff1, TRUE)
mse_ridge_just_wn_cutoff1 = getNoiseMSE(real_params, est_params_ridge_cutoff1, TRUE, TRUE)
est_params = bm_extract$coef_binary[[2]]
est_params = transformBinaryToParam(est_params)
mse_binary = getNoiseMSE(real_params, est_params, FALSE)
mse_binary_wn = getNoiseMSE(real_params, est_params, TRUE)
mse_binary_just_wn = getNoiseMSE(real_params, est_params, TRUE, TRUE)
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
nclasses = bm_extract$config_classes$ncls[1],
nnoninfocls = bm_extract$config_classes["nic"][1,1],
time_init = c(bm_extract$time_binary["init.elapsed"], rep(bm_extract$time_ridge["init.elapsed"], 4)),
time_fit = c(bm_extract$time_binary["fit.elapsed"], rep(bm_extract$time_ridge["fit.elapsed"], 4)),
method = c("binary", "ridge", "ridge_cutoff001", "ridge_cutoff05", "ridge_cutoff1"),
mse = c(mse_binary_wn$mean, mse_ridge_wn$mean, mse_ridge_wn_cutoff001$mean, mse_ridge_wn_cutoff05$mean, mse_ridge_wn_cutoff1$mean),
mse_with_noise = c(mse_binary$mean, mse_ridge$mean, mse_ridge_cutoff001$mean, mse_ridge_cutoff05$mean, mse_ridge_cutoff1$mean),
mse_noise = c(mse_binary_just_wn$mean, mse_ridge_just_wn$mean, mse_ridge_just_wn_cutoff001$mean, mse_ridge_just_wn_cutoff05$mean, mse_ridge_just_wn_cutoff1$mean),
nnotselected = c(mse_binary$n_not_sel, mse_ridge$n_not_sel, mse_ridge_cutoff001$n_not_sel, mse_ridge_cutoff05$n_not_sel, mse_ridge_cutoff1$n_not_sel),
nwrongnotselected = c(mse_binary$n_wrong_not_sel, mse_ridge$n_wrong_not_sel, mse_ridge_cutoff001$n_wrong_not_sel, mse_ridge_cutoff05$n_wrong_not_sel, mse_ridge_cutoff1$n_wrong_not_sel)
)
}
k = k+1
}
save(ll_rows, file = "ll_rows_cat_mses2.Rda")
load("ll_rows_cat_mses2.Rda")
df_cat_mses = do.call(rbind, ll_rows)
df_cat_mses$method = factor(df_cat_mses$method)
levels(df_cat_mses$method) = c("Binary", "Ridge", "Ridge (cutoff <0.01)", "Ridge (cutoff <0.5)", "Ridge (cutoff <1)")
df_bp = df_cat_mses %>%
pivot_longer(cols = starts_with("mse")) %>%
mutate(sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))) %>%
mutate(mse = factor(name))
levels(df_bp$mse) = c("MSE", "MSE of\nnoise classes", "MSE of\ninformative classes")
gg = df_bp %>%
ggplot(aes(x = mse, y = value, fill = method, color = method)) +
geom_boxplot(alpha = 0.2) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("") +
ylab("MSE") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
facet_grid(sn_ratiof ~ .) #, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_mse.pdf", width = dinA4width * 2/3 * 0.7, height = dinA4width * 2/3 * 0.5, units = "mm")
head(df_cat_mses)
df_plt_cat_mses = df_cat_mses %>%
mutate(
rel_notselected = nnotselected / nnoninfocls,
rel_nwrongnotselected = nwrongnotselected / nnoninfocls
) %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses, sn_ratio) %>%
summarize(
rel_notselected = median(rel_notselected, na.rm = TRUE),
rel_nwrongnotselected = median(rel_nwrongnotselected, na.rm = TRUE)
) %>%
mutate(
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
)
dim(df_plt_cat_mses)
#hull = df_cat_mses %>%
#mutate(
#rel_notselected = nnotselected / nnoninfocls,
#rel_nwrongnotselected = nwrongnotselected / nnoninfocls,
#sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
#) %>%
#filter(!is.nan(rel_nwrongnotselected), !is.nan(rel_notselected)) %>%
#group_by(sn_ratiof, method) %>%
#slice(chull(rel_nwrongnotselected, rel_notselected))
#hull = df_plt_cat_mses %>%
filter(!is.nan(rel_nwrongnotselected), !is.nan(rel_notselected)) %>%
#group_by(sn_ratiof, method) %>%
#slice(chull(rel_nwrongnotselected, rel_notselected))
tmp = df_cat_mses %>%
filter(method %in% c("Binary", "Ridge (cutoff <0.5)", "Ridge (cutoff <1)")) %>%
mutate(
rel_notselected = nnotselected / nnoninfocls,
rel_nwrongnotselected = nwrongnotselected / nnoninfocls,
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
) %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses, sn_ratiof) %>%
summarize(
rel_notselected = median(rel_notselected, na.rm = TRUE),
rel_nwrongnotselected = median(rel_nwrongnotselected, na.rm = TRUE)
) %>%
select(rel_nwrongnotselected, rel_notselected, method, sn_ratiof) %>%
filter(is.finite(rel_nwrongnotselected), is.finite(rel_notselected), rel_nwrongnotselected > 0, rel_notselected > 0) %>%
na.omit()
ll_dens = list()
k = 1
for (m in unique(tmp$method)) {
for (snr in unique(tmp$sn_ratiof)) {
kd = ks::kde(tmp[(tmp$method == m) & (tmp$sn_ratiof == snr), c("rel_nwrongnotselected", "rel_notselected")],
compute.cont=TRUE)
kd = ks::kde(tmp[(tmp$method == m) & (tmp$sn_ratiof == snr), c("rel_nwrongnotselected", "rel_notselected")],
compute.cont=TRUE, H = kd$H * 2)
cont = with(kd, contourLines(x=eval.points[[1]], y=eval.points[[2]],
z=estimate, levels=cont["5%"])[[1]], bgridsize=c(151,151))
ll_dens[[k]] = data.frame(cont, method = m, sn_ratiof = snr)
ll_dens[[k]]$rel_nwrongnotselected = ll_dens[[k]]$x
ll_dens[[k]]$rel_notselected = ll_dens[[k]]$y
k = k + 1
}
}
df_dens = do.call(rbind, ll_dens)
gg = ggplot(mapping = aes(x = rel_nwrongnotselected, y = rel_notselected, shape = method, color = method, fill = method)) +
#ggplot(mapping = aes(x = rel_nwrongnotselected, y = rel_notselected, shape = method, color = method, fill = method)) +
geom_polygon(data = df_dens, alpha = 0.2, size = 0.1) +
geom_point(data = df_plt_cat_mses) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("Fraction of wrongly\nnot selected classes (FPR)") +
ylab("Fraction of correctly\nnot selected classes (TPR)") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlim(min(df_dens$rel_nwrongnotselected), max(df_dens$rel_nwrongnotselected)) +
ylim(min(df_dens$rel_notselected), max(df_dens$rel_notselected)) +
scale_x_continuous(breaks = seq(0, 1, 0.2)) +
scale_y_continuous(breaks = seq(0, 1, 0.2)) +
facet_grid(sn_ratiof ~ .)#, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg, filename = "categorical_noninfo_count.pdf", width = dinA4width * 2/3 * 0.7, height = dinA4width * 2/3 * 0.5, units = "mm")
gg_cat_selected = df_cat_mses %>%
#df_cat_mses %>%
group_by(nrows, method, nclasses, sn_ratio) %>%
summarize(
rel = median(nnotselected, na.rm = TRUE),
min_rel = median(nnotselected, na.rm = TRUE) - sd(nnotselected, na.rm = TRUE),
max_rel = median(nnotselected, na.rm = TRUE) + sd(nnotselected, na.rm = TRUE)
#pn_rel = ncolsnoise[1] / ncols[1],
) %>%
mutate(
#pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel),
#ncolsf = factor(paste0("# p: ", ncols), levels = paste0("# p: ", c(5, 10, 20, 50))),
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
) %>%
#filter(ncols == 50, nclasses == 20) %>%
ggplot(aes(x = nrows, y = rel / nclasses * 2, linetype = as.factor(method), color = as.factor(nclasses))) +
geom_linerange(aes(ymin = min_rel / nclasses * 2, ymax = max_rel / nclasses * 2), alpha = 0.5, position = position_dodge(width = 0.05)) +
geom_line(position = position_dodge(width = 0.05)) +
#scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
scale_color_brewer(palette = "Set1") +
xlab("Number of rows\n(log10 Scale)") +
ylab("Fraction of not selected\nnon-informative classes") +
labs(linetype = "Method", fill = "Method", color = "Number of classes\nper feature") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = unique(df_cat_mses$nrows), trans = "log10") +
facet_grid(sn_ratiof ~ .)#, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg_cat_selected, filename = "categorical_noninfo_count.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
gg_cat_mse = df_cat_mses %>%
group_by(nrows, ncols, ncolsnoise, nclasses, sn_ratio, rep, nnoninfocls) %>%
summarize(
pn_rel = ncolsnoise[1] / ncols[1],
mse_diff = median(noninfo_mse[method == "ridge"], na.rm = TRUE) - median(noninfo_mse[method == "binary"], na.rm = TRUE)
) %>%
mutate(pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel)) %>%
mutate(
ncolsf = factor(paste0("# p: ", ncols), levels = paste0("# p: ", c(5, 10, 20, 50))),
pn_relf = factor(paste0("rel p\nnoise:\n", pn_rel), levels = paste0("rel p\nnoise:\n", c(0.5, 1, 2, 5))),
sn_ratiof = factor(paste0("SNR: ", sn_ratio), levels = paste0("SNR: ", c(0.1, 1, 10)))
) %>%
group_by(nrows, ncolsf, pn_rel, nclasses, sn_ratiof, nnoninfocls) %>%
summarize(
mmse_diff = median(mse_diff),
min_rel = median(mse_diff) - sd(mse_diff),
max_rel = median(mse_diff) + sd(mse_diff)
) %>%
ggplot(aes(x = nrows, y = mmse_diff, linetype = as.factor(pn_rel), color = as.factor(nclasses))) +
geom_linerange(aes(ymin = min_rel, ymax = max_rel), alpha = 0.5, position = position_dodge(width = 0.05)) +
geom_line(position = position_dodge(width = 0.05)) +
#geom_smooth(se = FALSE) +
#scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
scale_color_brewer(palette = "Set1") +
xlab("Number of Rows\n(log10 Scale)") +
ylab("MSE(Ridge) - MSE(Binary)\nof Non-Informative Classes") +
labs(linetype = "Relative number\nof noise classes", fill = "Method", color = "# Classes") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = unique(df_cat_mses$nrows), trans = "log10") +
facet_grid(sn_ratiof ~ ncolsf, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(plot = gg_cat_mse, filename = "categorical_noninfo_mse.pdf", width = dinA4width * 2/3, height = dinA4width * 2/3 * 0.6, units = "mm")
# Calculate MSE of estimated parameter:
# ---------------------------------------------------
files = list.files("performance", full.names = TRUE)
files = files[grep("xxx", files)]
ll_rows = list()
k = 1
for (fn in files) {
cat(as.character(Sys.time()), "Read: ", k , "/", length(files), "\n")
load(fn)
set.seed(bm_extract$data_seed)
dat = simCategoricalData(bm_extract$config$n, bm_extract$config$p, bm_extract$config$pnoise, nclasses = bm_extract$config_classes$ncls[1], ncnoise = bm_extract$config_classes$nic[1])
real_params = dat$cat_param
est_params = bm_extract$coef_ridge[[2]]
names(est_params) = vapply(names(est_params), FUN.VALUE = character(1L), FUN = function (nm) strsplit(nm, split = "_")[[1]][1])
est_params = transformRidgeToParam(est_params, dat$data)
mse_ridge = getCategoricalMSE(real_params, est_params, TRUE)
est_params = bm_extract$coef_binary[[2]]
est_params = transformBinaryToParam(est_params)
mse_binary = getCategoricalMSE(real_params, est_params, TRUE)
ll_rows[[k]] = data.frame(
date = bm_extract$date,
data_seed = bm_extract$data_seed,
nrows = bm_extract$config$n,
ncols = bm_extract$config$p,
sn_ratio = bm_extract$config$sn_ratio,
rep = bm_extract$config$rep,
ncolsnoise = bm_extract$config$pnoise,
nclasses = bm_extract$config_classes$ncls[1],
nnoninfocls = bm_extract$config_classes["nic"][1,1],
time_init = c(bm_extract$time_binary["init.elapsed"], bm_extract$time_ridge["init.elapsed"]),
time_fit = c(bm_extract$time_binary["fit.elapsed"], bm_extract$time_ridge["fit.elapsed"]),
method = c("binary", "ridge"),
mse = c(mse_binary, mse_ridge)
)
k = k+1
}
save(ll_rows, file = "ll_rows_cat_mses_full.Rda")
load("ll_rows_cat_mses_full.Rda")
df_mses = do.call(rbind, ll_rows)
df_mses %>%
filter(mse < 300) %>%
group_by(nrows, ncols, sn_ratio, rep, ncolsnoise, nclasses, nnoninfocls) %>%
mutate(
pn_rel = ncolsnoise[1] / ncols[1],
clsn_rel = nnoninfocls[1] / nclasses[1],
mse_diff = median(mse[method == "ridge"], na.rm = TRUE) - median(mse[method == "binary"], na.rm = TRUE)
) %>%
mutate(
pn_rel = ifelse(pn_rel == 0.4, 0.5, pn_rel),
clsn_rel = ifelse(clsn_rel == 0.4, 0.5, clsn_rel)
) %>%
ggplot(aes(x = as.factor(nrows), y = mse_diff, fill = as.factor(pn_rel))) +
geom_boxplot() +
scale_fill_viridis(discrete = TRUE) +
#scale_color_viridis(discrete = TRUE) +
#scale_fill_brewer(palette = "Set1") +
#scale_color_brewer(palette = "Set1") +
xlab("Number of Rows\n(log10 Scale)") +
ylab("MSE(Ridge) - MSE(Binary)\nof Non-Informative Classes") +
labs(linetype = "Relative number\nof noise classes", fill = "Method", color = "# Classes") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
#facet_grid(sn_ratio + nnoninfocls ~ ncols + ncolsnoise, scales = "free_y")
facet_grid(paste0("SNR: ", sn_ratio) ~ nclasses, scales = "free_y")
|
# #Bead level information
setClass("blum",
representation=representation( #cannot be ExpressionSet because exprs is not a matrix
## Contains information about samples
phenoData="AnnotatedDataFrame",
## contains information about analytes
featureData="AnnotatedDataFrame",
## list of bead level data
## Stored as samples -> analytes
exprs="data.table")
)
setClass("slum",
contains="ExpressionSet",
representation(unit="character", formula="formula", inv="function", fit="data.frame"))
setClassUnion("blumORslum", members=c("blum", "slum"))
| /LumiR/R/AllClasses.R | no_license | RGLab/luminex_R | R | false | false | 665 | r | # #Bead level information
setClass("blum",
representation=representation( #cannot be ExpressionSet because exprs is not a matrix
## Contains information about samples
phenoData="AnnotatedDataFrame",
## contains information about analytes
featureData="AnnotatedDataFrame",
## list of bead level data
## Stored as samples -> analytes
exprs="data.table")
)
setClass("slum",
contains="ExpressionSet",
representation(unit="character", formula="formula", inv="function", fit="data.frame"))
setClassUnion("blumORslum", members=c("blum", "slum"))
|
cnmlcd = function (x, lcd, maxit = 100, tol = 1e-06) {
library(lsei)
lambda = 1e-15
n = length(x)
xw = x.weight(x)
x = xw$x
w = xw$w
nx = length(x)
lower = x[1]
upper = x[nx]
attr(x, "xx") = rev(cumsum(rev(w))) * x - rev(cumsum(rev(x * w)))
if (missing(lcd)) lcd = new.lcd(alpha = 0, lower = lower, upper = upper)
ll = logLik.lcd(lcd, x, w)
convergence = 1
ll.old = -Inf
for (i in 1:maxit) {
if (ll <= ll.old + tol) {
convergence = 0
break
}
lcd.old = lcd
ll.old = ll
g = maxima.gradient(lcd, x, w = w)
if (length(g$theta) != 0) {
nsp = g$theta
nsl = length(nsp)
if (nsl >= 1) lcd = new.lcd(lcd$alpha, c(lcd$theta, nsp), c(lcd$pi, double(nsl)), lcd$lower, lcd$upper)
}
knots = c(lcd$lower, lcd$theta)
nk = length(knots)
cpkr = lcd$cpk[, nk] - cbind(0, lcd$cpk[, -nk, drop = FALSE])
mu = cpkr[2, ] - cpkr[1, ] * knots
grad = n * mu + attr(x, "xx")[indx(knots, x)]
mm = cpkr[3, ] - (knots + rep(knots, rep.int(nk, nk))) *
cpkr[2, ] + tcrossprod(knots) * cpkr[1, ]
mm[upper.tri(mm)] = 0
mm = mm + t(mm)
diag(mm) = diag(mm)/2
H = mm - tcrossprod(mu)
e = eigen(H)
v2 = sqrt(e$values[e$values >= e$values[1] * lambda])
kr = length(v2)
R = t(e$vectors[, 1:kr, drop = FALSE]) * v2
p = grad/n + drop(c(-lcd$alpha, lcd$pi) %*% H)
b = drop(crossprod(e$vectors[, 1:kr, drop = FALSE], p))/v2
r1 = pnnls(R, b, 1)
lcd1 = lcd
lcd1$alpha = -r1$x[1]
lcd1$pi = r1$x[-1]
r = line.lcd(lcd, lcd1, x, w = w, ll0 = ll.old)
lcd = r$lcd
ll = r$ll
if (any(lcd$pi == 0)) lcd = simplify.lcd(lcd)
}
list(lcd = lcd, ll = ll, num.iterations = i, max.gradient = g$gmax, convergence = convergence)
}
cnmlcd.mode = function (x, w, m=0, lcd, maxit=100, tol=1e-10) {
lambda = 1e-15
n = length(x)
if (missing(w)) {
xw = x.weight(x)
x = xw$x
w = xw$w
}
if (!(m %in% x)) {
x = c(x, m)
w = c(w, 0)
o = order(x)
x = x[o]
w = w[o]
}
nx = length(x)
lower = x[1]
upper = x[nx]
if (nx <= 2) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
return(list(lcd = lcd, ll = logLik.lcd(lcd, x, w), num.iterations = 0, max.gradient = 0, convergence = 0))
}
attr(x, "xx") = rev(cumsum(rev(w))) * x - rev(cumsum(rev(x * w)))
if (missing(lcd)) {
if (m %in% c(lower, upper)) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
} else {
lcd = new.lcd(alpha=0, theta=0, pi=0, lower=lower, upper=upper)
}
}
ll = logLik.lcd(lcd, x, w)
convergence = 1
ll.old = -Inf
for (i in 1:maxit) {
if (ll <= ll.old + tol) {
convergence = 0
break
}
lcd.old = lcd
ll.old = ll
g = maxima.gradient(lcd, x, w = w)
if (length(g$theta) != 0) {
nsp = g$theta
nsl = length(nsp)
if (nsl >= 1) {
lcd = new.lcd(lcd$alpha, c(lcd$theta, nsp), c(lcd$pi, double(nsl)), lcd$lower, lcd$upper)
}
}
knots = c(lcd$lower, lcd$theta)
nk = length(knots)
cpkr = lcd$cpk[, nk] - cbind(0, lcd$cpk[, -nk, drop = FALSE])
mu = cpkr[2, ] - cpkr[1, ] * knots
grad = n * mu + attr(x, "xx")[lsei::indx(knots, x)]
mm = cpkr[3, ] - (knots + rep(knots, rep.int(nk, nk))) * cpkr[2, ] + tcrossprod(knots) * cpkr[1, ]
mm[upper.tri(mm)] = 0
mm = mm + t(mm)
diag(mm) = diag(mm)/2
H = mm - tcrossprod(mu)
e = eigen(H)
v2 = sqrt(e$values[e$values >= e$values[1] * lambda])
kr = length(v2)
R = t(e$vectors[, 1:kr, drop = FALSE]) * v2
p = grad/n + drop(c(-lcd$alpha, lcd$pi) %*% H)
b = drop(crossprod(e$vectors[, 1:kr, drop = FALSE], p))/v2
g1 = cbind(0, diag(rep(nk-1)))
knot_m = sum(knots < m)
if (sum(knots %in% m) == 0) {
g2 = matrix(0, nrow=1, ncol=nk)
g2[1, 1:(knot_m-1)] = -1
} else {
g2 = matrix(0, nrow=2, ncol=nk)
g2[1, knots < m] = -1
g2[2, 1:(knot_m +1)] = 1
}
G = rbind(g1, g2)
H = rep(0, nrow(G))
lcd1 = lcd
flag = TRUE
r1 = tryCatch(limSolve::lsei(A=R, B=b, G=G, H=H, type=2), error = function(e) flag <<- FALSE)
if (!flag) {r2 = lsei::lsei(a=R, b=b, e=G, f=H)} else {r2 =NULL}
if (flag) {lcd1$alpha = -r1$X[1];lcd1$pi = r1$X[-1]} else {lcd1$alpha = -r2[1];lcd1$pi = r2[-1]}
r = line.lcd(lcd, lcd1, x, w = w, ll0 = ll.old)
lcd = r$lcd
ll = r$ll
if (any(lcd$pi == 0))
lcd = simplify.lcd(lcd)
}
# if (! m %in% c(lower, lcd$theta, upper))
# lcd = new.lcd(lcd$alpha, theta = c(lcd$theta, m), pi = c(lcd$pi, 0), lower = lcd$lower, upper = lcd$upper)
return(list(lcd = lcd, ll = ll, num.iterations = i, max.gradient = g$gmax, convergence = convergence))
}
cnmlcd.symm = function (x, w, m=0, lcd, maxit=100, tol=1e-10) {
lambda = 1e-15
n = length(x)
x = abs(x)
if (missing(w)) {
xw = x.weight(x)
x = xw$x
w = xw$w
}
if (!(m %in% x)) {
x = c(x, m)
w = c(w, 0)
o = order(x)
x = x[o]
w = w[o]
}
nx = length(x)
lower = x[1]
upper = x[nx]
if (nx <= 2) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
return(list(lcd = lcd, ll = logLik.lcd(lcd, x, w), num.iterations = 0, max.gradient = 0, convergence = 0))
}
attr(x, "xx") = rev(cumsum(rev(w))) * x - rev(cumsum(rev(x * w)))
if (missing(lcd)) {
if (m %in% c(lower, upper)) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
} else {
lcd = new.lcd(alpha=0, theta=0, pi=0, lower=lower, upper=upper)
}
}
ll = logLik.lcd(lcd, x, w)
convergence = 1
ll.old = -Inf
for (i in 1:maxit) {
if (ll <= ll.old + tol) {
convergence = 0
break
}
lcd.old = lcd
ll.old = ll
g = maxima.gradient(lcd, x, w = w)
if (length(g$theta) != 0) {
nsp = g$theta
nsl = length(nsp)
if (nsl >= 1) {
lcd = new.lcd(lcd$alpha, c(lcd$theta, nsp), c(lcd$pi, double(nsl)), lcd$lower, lcd$upper)
}
}
knots = c(lcd$lower, lcd$theta)
nk = length(knots)
cpkr = lcd$cpk[, nk] - cbind(0, lcd$cpk[, -nk, drop = FALSE])
mu = cpkr[2, ] - cpkr[1, ] * knots
grad = n * mu + attr(x, "xx")[lsei::indx(knots, x)]
mm = cpkr[3, ] - (knots + rep(knots, rep.int(nk, nk))) * cpkr[2, ] + tcrossprod(knots) * cpkr[1, ]
mm[upper.tri(mm)] = 0
mm = mm + t(mm)
diag(mm) = diag(mm)/2
H = mm - tcrossprod(mu)
e = eigen(H)
v2 = sqrt(e$values[e$values >= e$values[1] * lambda])
kr = length(v2)
R = t(e$vectors[, 1:kr, drop = FALSE]) * v2
p = grad/n + drop(c(-lcd$alpha, lcd$pi) %*% H)
b = drop(crossprod(e$vectors[, 1:kr, drop = FALSE], p))/v2
########
# r1 = pnnls(R, b, 1)
C1 = cbind(0, diag(rep(nk-1)))
C2 = matrix(0, nrow=2, ncol=nk)
C2[1, knots < m] = -1
C2[2, knots <= m] = 1
C = rbind(C1, C2)
D = rep(0, nrow(C))
r1 = limSolve::lsei(A=R, B=b, G=C, H=D, type=2)
########
lcd1 = lcd
lcd1$alpha = -r1$X[1]
lcd1$pi = r1$X[-1]
r = line.lcd(lcd, lcd1, x, w = w, ll0 = ll.old)
lcd = r$lcd
ll = r$ll
if (any(lcd$pi == 0))
lcd = simplify.lcd(lcd)
}
if (! m %in% c(lower, lcd$theta, upper))
lcd = new.lcd(lcd$alpha, theta = c(lcd$theta, m), pi = c(lcd$pi, 0), lower = lcd$lower, upper = lcd$upper)
# symmetric constraint added
lcd$alpha = sum(lcd$pi)
lcd$theta = sort(c(-lcd$theta,lcd$theta))
lcd$pi = c(rev(lcd$pi),lcd$pi)
lcd$C = 2*lcd$C
lcd$lower = -lcd$upper
c0 = c(rev(lcd$coef[1,-1]),lcd$coef[1,])
c1 = c(-rev(lcd$coef[2,-1]),lcd$coef[2,])
lcd$coef = matrix(c(c0,c1),nrow=2,ncol=2*dim(lcd$coef)[2]-1,byrow=T,dimnames=dimnames(lcd$coef))
ll = ll - n*log(2)
return(list(lcd = lcd, ll = ll, num.iterations = i, max.gradient = g$gmax, convergence = convergence))
}
| /modlr_lc/codes/cnmlcd.R | no_license | checkitsoso/modlr_lc | R | false | false | 8,000 | r | cnmlcd = function (x, lcd, maxit = 100, tol = 1e-06) {
library(lsei)
lambda = 1e-15
n = length(x)
xw = x.weight(x)
x = xw$x
w = xw$w
nx = length(x)
lower = x[1]
upper = x[nx]
attr(x, "xx") = rev(cumsum(rev(w))) * x - rev(cumsum(rev(x * w)))
if (missing(lcd)) lcd = new.lcd(alpha = 0, lower = lower, upper = upper)
ll = logLik.lcd(lcd, x, w)
convergence = 1
ll.old = -Inf
for (i in 1:maxit) {
if (ll <= ll.old + tol) {
convergence = 0
break
}
lcd.old = lcd
ll.old = ll
g = maxima.gradient(lcd, x, w = w)
if (length(g$theta) != 0) {
nsp = g$theta
nsl = length(nsp)
if (nsl >= 1) lcd = new.lcd(lcd$alpha, c(lcd$theta, nsp), c(lcd$pi, double(nsl)), lcd$lower, lcd$upper)
}
knots = c(lcd$lower, lcd$theta)
nk = length(knots)
cpkr = lcd$cpk[, nk] - cbind(0, lcd$cpk[, -nk, drop = FALSE])
mu = cpkr[2, ] - cpkr[1, ] * knots
grad = n * mu + attr(x, "xx")[indx(knots, x)]
mm = cpkr[3, ] - (knots + rep(knots, rep.int(nk, nk))) *
cpkr[2, ] + tcrossprod(knots) * cpkr[1, ]
mm[upper.tri(mm)] = 0
mm = mm + t(mm)
diag(mm) = diag(mm)/2
H = mm - tcrossprod(mu)
e = eigen(H)
v2 = sqrt(e$values[e$values >= e$values[1] * lambda])
kr = length(v2)
R = t(e$vectors[, 1:kr, drop = FALSE]) * v2
p = grad/n + drop(c(-lcd$alpha, lcd$pi) %*% H)
b = drop(crossprod(e$vectors[, 1:kr, drop = FALSE], p))/v2
r1 = pnnls(R, b, 1)
lcd1 = lcd
lcd1$alpha = -r1$x[1]
lcd1$pi = r1$x[-1]
r = line.lcd(lcd, lcd1, x, w = w, ll0 = ll.old)
lcd = r$lcd
ll = r$ll
if (any(lcd$pi == 0)) lcd = simplify.lcd(lcd)
}
list(lcd = lcd, ll = ll, num.iterations = i, max.gradient = g$gmax, convergence = convergence)
}
cnmlcd.mode = function (x, w, m=0, lcd, maxit=100, tol=1e-10) {
lambda = 1e-15
n = length(x)
if (missing(w)) {
xw = x.weight(x)
x = xw$x
w = xw$w
}
if (!(m %in% x)) {
x = c(x, m)
w = c(w, 0)
o = order(x)
x = x[o]
w = w[o]
}
nx = length(x)
lower = x[1]
upper = x[nx]
if (nx <= 2) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
return(list(lcd = lcd, ll = logLik.lcd(lcd, x, w), num.iterations = 0, max.gradient = 0, convergence = 0))
}
attr(x, "xx") = rev(cumsum(rev(w))) * x - rev(cumsum(rev(x * w)))
if (missing(lcd)) {
if (m %in% c(lower, upper)) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
} else {
lcd = new.lcd(alpha=0, theta=0, pi=0, lower=lower, upper=upper)
}
}
ll = logLik.lcd(lcd, x, w)
convergence = 1
ll.old = -Inf
for (i in 1:maxit) {
if (ll <= ll.old + tol) {
convergence = 0
break
}
lcd.old = lcd
ll.old = ll
g = maxima.gradient(lcd, x, w = w)
if (length(g$theta) != 0) {
nsp = g$theta
nsl = length(nsp)
if (nsl >= 1) {
lcd = new.lcd(lcd$alpha, c(lcd$theta, nsp), c(lcd$pi, double(nsl)), lcd$lower, lcd$upper)
}
}
knots = c(lcd$lower, lcd$theta)
nk = length(knots)
cpkr = lcd$cpk[, nk] - cbind(0, lcd$cpk[, -nk, drop = FALSE])
mu = cpkr[2, ] - cpkr[1, ] * knots
grad = n * mu + attr(x, "xx")[lsei::indx(knots, x)]
mm = cpkr[3, ] - (knots + rep(knots, rep.int(nk, nk))) * cpkr[2, ] + tcrossprod(knots) * cpkr[1, ]
mm[upper.tri(mm)] = 0
mm = mm + t(mm)
diag(mm) = diag(mm)/2
H = mm - tcrossprod(mu)
e = eigen(H)
v2 = sqrt(e$values[e$values >= e$values[1] * lambda])
kr = length(v2)
R = t(e$vectors[, 1:kr, drop = FALSE]) * v2
p = grad/n + drop(c(-lcd$alpha, lcd$pi) %*% H)
b = drop(crossprod(e$vectors[, 1:kr, drop = FALSE], p))/v2
g1 = cbind(0, diag(rep(nk-1)))
knot_m = sum(knots < m)
if (sum(knots %in% m) == 0) {
g2 = matrix(0, nrow=1, ncol=nk)
g2[1, 1:(knot_m-1)] = -1
} else {
g2 = matrix(0, nrow=2, ncol=nk)
g2[1, knots < m] = -1
g2[2, 1:(knot_m +1)] = 1
}
G = rbind(g1, g2)
H = rep(0, nrow(G))
lcd1 = lcd
flag = TRUE
r1 = tryCatch(limSolve::lsei(A=R, B=b, G=G, H=H, type=2), error = function(e) flag <<- FALSE)
if (!flag) {r2 = lsei::lsei(a=R, b=b, e=G, f=H)} else {r2 =NULL}
if (flag) {lcd1$alpha = -r1$X[1];lcd1$pi = r1$X[-1]} else {lcd1$alpha = -r2[1];lcd1$pi = r2[-1]}
r = line.lcd(lcd, lcd1, x, w = w, ll0 = ll.old)
lcd = r$lcd
ll = r$ll
if (any(lcd$pi == 0))
lcd = simplify.lcd(lcd)
}
# if (! m %in% c(lower, lcd$theta, upper))
# lcd = new.lcd(lcd$alpha, theta = c(lcd$theta, m), pi = c(lcd$pi, 0), lower = lcd$lower, upper = lcd$upper)
return(list(lcd = lcd, ll = ll, num.iterations = i, max.gradient = g$gmax, convergence = convergence))
}
cnmlcd.symm = function (x, w, m=0, lcd, maxit=100, tol=1e-10) {
lambda = 1e-15
n = length(x)
x = abs(x)
if (missing(w)) {
xw = x.weight(x)
x = xw$x
w = xw$w
}
if (!(m %in% x)) {
x = c(x, m)
w = c(w, 0)
o = order(x)
x = x[o]
w = w[o]
}
nx = length(x)
lower = x[1]
upper = x[nx]
if (nx <= 2) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
return(list(lcd = lcd, ll = logLik.lcd(lcd, x, w), num.iterations = 0, max.gradient = 0, convergence = 0))
}
attr(x, "xx") = rev(cumsum(rev(w))) * x - rev(cumsum(rev(x * w)))
if (missing(lcd)) {
if (m %in% c(lower, upper)) {
lcd = new.lcd(alpha=0, lower=lower, upper=upper)
} else {
lcd = new.lcd(alpha=0, theta=0, pi=0, lower=lower, upper=upper)
}
}
ll = logLik.lcd(lcd, x, w)
convergence = 1
ll.old = -Inf
for (i in 1:maxit) {
if (ll <= ll.old + tol) {
convergence = 0
break
}
lcd.old = lcd
ll.old = ll
g = maxima.gradient(lcd, x, w = w)
if (length(g$theta) != 0) {
nsp = g$theta
nsl = length(nsp)
if (nsl >= 1) {
lcd = new.lcd(lcd$alpha, c(lcd$theta, nsp), c(lcd$pi, double(nsl)), lcd$lower, lcd$upper)
}
}
knots = c(lcd$lower, lcd$theta)
nk = length(knots)
cpkr = lcd$cpk[, nk] - cbind(0, lcd$cpk[, -nk, drop = FALSE])
mu = cpkr[2, ] - cpkr[1, ] * knots
grad = n * mu + attr(x, "xx")[lsei::indx(knots, x)]
mm = cpkr[3, ] - (knots + rep(knots, rep.int(nk, nk))) * cpkr[2, ] + tcrossprod(knots) * cpkr[1, ]
mm[upper.tri(mm)] = 0
mm = mm + t(mm)
diag(mm) = diag(mm)/2
H = mm - tcrossprod(mu)
e = eigen(H)
v2 = sqrt(e$values[e$values >= e$values[1] * lambda])
kr = length(v2)
R = t(e$vectors[, 1:kr, drop = FALSE]) * v2
p = grad/n + drop(c(-lcd$alpha, lcd$pi) %*% H)
b = drop(crossprod(e$vectors[, 1:kr, drop = FALSE], p))/v2
########
# r1 = pnnls(R, b, 1)
C1 = cbind(0, diag(rep(nk-1)))
C2 = matrix(0, nrow=2, ncol=nk)
C2[1, knots < m] = -1
C2[2, knots <= m] = 1
C = rbind(C1, C2)
D = rep(0, nrow(C))
r1 = limSolve::lsei(A=R, B=b, G=C, H=D, type=2)
########
lcd1 = lcd
lcd1$alpha = -r1$X[1]
lcd1$pi = r1$X[-1]
r = line.lcd(lcd, lcd1, x, w = w, ll0 = ll.old)
lcd = r$lcd
ll = r$ll
if (any(lcd$pi == 0))
lcd = simplify.lcd(lcd)
}
if (! m %in% c(lower, lcd$theta, upper))
lcd = new.lcd(lcd$alpha, theta = c(lcd$theta, m), pi = c(lcd$pi, 0), lower = lcd$lower, upper = lcd$upper)
# symmetric constraint added
lcd$alpha = sum(lcd$pi)
lcd$theta = sort(c(-lcd$theta,lcd$theta))
lcd$pi = c(rev(lcd$pi),lcd$pi)
lcd$C = 2*lcd$C
lcd$lower = -lcd$upper
c0 = c(rev(lcd$coef[1,-1]),lcd$coef[1,])
c1 = c(-rev(lcd$coef[2,-1]),lcd$coef[2,])
lcd$coef = matrix(c(c0,c1),nrow=2,ncol=2*dim(lcd$coef)[2]-1,byrow=T,dimnames=dimnames(lcd$coef))
ll = ll - n*log(2)
return(list(lcd = lcd, ll = ll, num.iterations = i, max.gradient = g$gmax, convergence = convergence))
}
|
#' Lays out metricsgraphics widgets into a "grid", similar to
#' \code{grid.arrange} from \code{gridExtra}
#'
#' @param ... either individual \code{metricsgraphics} objects or a mixture of
#' individual \code{metricsgraphics} objects
#' and \code{list}s of \code{metricsgrahics} objects.
#' @param ncol how many columns in the grid
#' @param nrow how many rows in the grid
#' @param widths widths of the cells per row
#' @return \code{htmltools} tag with wrapped \code{metricsgraphics} objects suitable
#' for knitting with \code{echo=FALSE} & \code{results="asis"} or
#' rendering in Viewer with \code{html_print}
#' @note \code{mjs_grid} does not work in a Shiny context
#' @importFrom grDevices n2mfrow
#' @export
mjs_grid <- function(..., ncol=1, nrow=1, widths=1) {
input_list <- as.list(substitute(list(...)))[-1L]
params <- list()
for (i in 1:length(input_list)) {
x <- eval.parent(input_list[[i]])
if (any(class(x) == "list")) {
for (j in 1:length(x)) {
y <- eval(x[[j]])
params[[length(params)+1]] <- y
}
} else {
params[[length(params)+1]] <- x
}
}
if(!all(sapply(params, function(x) {
inherits(x, c("metricsgraphics", "htmlwidget"))
}))) {
stop("All parameters must be metricsgraphics objects")
}
if (ncol == 1 & nrow == 1) {
nm <- n2mfrow(length(params))
nrow <- nm[1]
ncol <- nm[2]
}
if (length(widths) < ncol) widths <- rep(1/ncol, ncol)
if (length(input_list) > ncol*nrow) {
stop("Number of metricsgraphics objects > available grid slots")
}
max_width <- "100%"
GRID <- TABLE(lapply(seq(1, nrow*ncol, ncol), function(idx){
TR(width="100%", style="width:100%", lapply(seq(idx, idx+ncol-1, 1), function(cell) {
cell_val <- try(eval(params[[cell]]), silent=TRUE)
if (!inherits(cell_val, c("metricsgraphics", "htmlwidget"))) {
cell_val <- HTML(" ")
}
TD(style=sprintf("width:%3.2f%%", 100*widths[cell-idx+1]),
width=sprintf("%3.2f%%", 100*widths[cell-idx+1]),
cell_val)
}))
}), style=sprintf("width:%s", max_width), width=max_width)
return(GRID)
}
TABLE <- tags$table <- function(...) tag("table", list(...))
TR <- tags$tr <- function(...) tag("tr", list(...))
TD <- tags$td <- function(...) tag("td", list(...))
| /R/mjs_grid.r | no_license | cran/metricsgraphics | R | false | false | 2,329 | r | #' Lays out metricsgraphics widgets into a "grid", similar to
#' \code{grid.arrange} from \code{gridExtra}
#'
#' @param ... either individual \code{metricsgraphics} objects or a mixture of
#' individual \code{metricsgraphics} objects
#' and \code{list}s of \code{metricsgrahics} objects.
#' @param ncol how many columns in the grid
#' @param nrow how many rows in the grid
#' @param widths widths of the cells per row
#' @return \code{htmltools} tag with wrapped \code{metricsgraphics} objects suitable
#' for knitting with \code{echo=FALSE} & \code{results="asis"} or
#' rendering in Viewer with \code{html_print}
#' @note \code{mjs_grid} does not work in a Shiny context
#' @importFrom grDevices n2mfrow
#' @export
mjs_grid <- function(..., ncol=1, nrow=1, widths=1) {
input_list <- as.list(substitute(list(...)))[-1L]
params <- list()
for (i in 1:length(input_list)) {
x <- eval.parent(input_list[[i]])
if (any(class(x) == "list")) {
for (j in 1:length(x)) {
y <- eval(x[[j]])
params[[length(params)+1]] <- y
}
} else {
params[[length(params)+1]] <- x
}
}
if(!all(sapply(params, function(x) {
inherits(x, c("metricsgraphics", "htmlwidget"))
}))) {
stop("All parameters must be metricsgraphics objects")
}
if (ncol == 1 & nrow == 1) {
nm <- n2mfrow(length(params))
nrow <- nm[1]
ncol <- nm[2]
}
if (length(widths) < ncol) widths <- rep(1/ncol, ncol)
if (length(input_list) > ncol*nrow) {
stop("Number of metricsgraphics objects > available grid slots")
}
max_width <- "100%"
GRID <- TABLE(lapply(seq(1, nrow*ncol, ncol), function(idx){
TR(width="100%", style="width:100%", lapply(seq(idx, idx+ncol-1, 1), function(cell) {
cell_val <- try(eval(params[[cell]]), silent=TRUE)
if (!inherits(cell_val, c("metricsgraphics", "htmlwidget"))) {
cell_val <- HTML(" ")
}
TD(style=sprintf("width:%3.2f%%", 100*widths[cell-idx+1]),
width=sprintf("%3.2f%%", 100*widths[cell-idx+1]),
cell_val)
}))
}), style=sprintf("width:%s", max_width), width=max_width)
return(GRID)
}
TABLE <- tags$table <- function(...) tag("table", list(...))
TR <- tags$tr <- function(...) tag("tr", list(...))
TD <- tags$td <- function(...) tag("td", list(...))
|
############################################################################
# plot2.R
############################################################################
# ----- load data -----
# get names
household_names <- read.table("household_power_consumption.txt",
na.strings = "?",
nrows = 1,
sep = ";",
stringsAsFactors = FALSE
)
# get desired data for 2007-02-01 and 2007-02-02
household <- read.table("household_power_consumption.txt",
na.strings = "?",
skip = 1+66636,
nrows = 2*24*60,
sep = ";",
stringsAsFactors = FALSE
)
# assign column names to data.frame
colnames(household) <- household_names[1, ]
# ----- Convert Date and time to R Date/Time class -----
library(lubridate)
# make new column for date / time objects
household$DateTime <- dmy_hms(paste(household$Date,
household$Time, sep = " "))
# remove Date and Time column since they will not be needed anymore
household <- household[ , 3:length(colnames(household))]
# ----- Make plot -----
# open new device
png(filename = "plot2.png",
width = 480,
height = 480,
units = "px")
# create the plot using "base plot"
plot(household$DateTime, household$Global_active_power,
type = "l",
xlab = "",
ylab = "Gloabal Active Power (kilowatts)")
# close the device
dev.off() | /plot2.R | no_license | lutzmagio/ExData_Plotting1 | R | false | false | 1,544 | r | ############################################################################
# plot2.R
############################################################################
# ----- load data -----
# get names
household_names <- read.table("household_power_consumption.txt",
na.strings = "?",
nrows = 1,
sep = ";",
stringsAsFactors = FALSE
)
# get desired data for 2007-02-01 and 2007-02-02
household <- read.table("household_power_consumption.txt",
na.strings = "?",
skip = 1+66636,
nrows = 2*24*60,
sep = ";",
stringsAsFactors = FALSE
)
# assign column names to data.frame
colnames(household) <- household_names[1, ]
# ----- Convert Date and time to R Date/Time class -----
library(lubridate)
# make new column for date / time objects
household$DateTime <- dmy_hms(paste(household$Date,
household$Time, sep = " "))
# remove Date and Time column since they will not be needed anymore
household <- household[ , 3:length(colnames(household))]
# ----- Make plot -----
# open new device
png(filename = "plot2.png",
width = 480,
height = 480,
units = "px")
# create the plot using "base plot"
plot(household$DateTime, household$Global_active_power,
type = "l",
xlab = "",
ylab = "Gloabal Active Power (kilowatts)")
# close the device
dev.off() |
print.lbreg <- function(x, ...)
{
cat("Call:\n")
print(x$call)
cat("\nCoefficients:\n")
print(x$coefficients)
}
| /R/print.lbreg.R | no_license | cran/lbreg | R | false | false | 127 | r | print.lbreg <- function(x, ...)
{
cat("Call:\n")
print(x$call)
cat("\nCoefficients:\n")
print(x$coefficients)
}
|
library(proportion)
### Name: ciAS
### Title: ArcSine method of CI estimation
### Aliases: ciAS
### ** Examples
n=5; alp=0.05
ciAS(n,alp)
| /data/genthat_extracted_code/proportion/examples/ciAS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 145 | r | library(proportion)
### Name: ciAS
### Title: ArcSine method of CI estimation
### Aliases: ciAS
### ** Examples
n=5; alp=0.05
ciAS(n,alp)
|
\name{panel.errbars}
\alias{panel.errbars}
\title{Panel Functions for Error Bars}
\description{
\code{panel.errbars} plots points are draws a line suppelemented
with error bars.
}
\usage{
panel.errbars(x,y,\dots,panel.xy=panel.xyplot,
make.grid=c("horizontal","vertical","both","none"),ewidth=0)
}
\arguments{
\item{x,y}{numeric values, the points around which error bars are plotted.
\code{x} is a numeric vector, \code{y} is a matrix with three columns,
the values, the lower and the upper ends of the error bars.
}
\item{\dots}{graphical parameters passed to \code{panel.xy}}
\item{panel.xy}{panel function to produce the plot to which error bars are added}
\item{make.grid}{character string, determines the direction of grid lines added to the plot}
\item{ewidth}{numerical value, width of the whiskers of the error bars}
}
\seealso{\code{\link[lattice]{panel.xyplot}}}
\examples{
library(lattice)
library(grid)
\dontshow{
if(interactive())
old.prompt <- grid::grid.prompt(TRUE)
}
applications <- Aggregate(percent(Dept,weight=Freq,ci=TRUE)~Gender,
data=UCBAdmissions)
admissions <- Aggregate(
percent(Admit=="Admitted",weight=Freq,ci=TRUE)~Dept+Gender,
data=UCBAdmissions)
xyplot(cbind(Percentage,lower,upper)~Gender|Dept,data=admissions,
panel=panel.errbars,
ewidth=.2,pch=19,
ylab="Percentage applicants admitted by Department")
xyplot(cbind(Percentage,lower,upper)~Gender|Dept,data=applications,
panel=panel.errbars,
ewidth=.2,pch=19,
ylab="Percentage applications to the Department")
\dontshow{
if(interactive())
grid::grid.prompt(old.prompt)
}
}
\keyword{hplot} | /man/errbars.Rd | no_license | msaidf/memisc | R | false | false | 1,738 | rd | \name{panel.errbars}
\alias{panel.errbars}
\title{Panel Functions for Error Bars}
\description{
\code{panel.errbars} plots points are draws a line suppelemented
with error bars.
}
\usage{
panel.errbars(x,y,\dots,panel.xy=panel.xyplot,
make.grid=c("horizontal","vertical","both","none"),ewidth=0)
}
\arguments{
\item{x,y}{numeric values, the points around which error bars are plotted.
\code{x} is a numeric vector, \code{y} is a matrix with three columns,
the values, the lower and the upper ends of the error bars.
}
\item{\dots}{graphical parameters passed to \code{panel.xy}}
\item{panel.xy}{panel function to produce the plot to which error bars are added}
\item{make.grid}{character string, determines the direction of grid lines added to the plot}
\item{ewidth}{numerical value, width of the whiskers of the error bars}
}
\seealso{\code{\link[lattice]{panel.xyplot}}}
\examples{
library(lattice)
library(grid)
\dontshow{
if(interactive())
old.prompt <- grid::grid.prompt(TRUE)
}
applications <- Aggregate(percent(Dept,weight=Freq,ci=TRUE)~Gender,
data=UCBAdmissions)
admissions <- Aggregate(
percent(Admit=="Admitted",weight=Freq,ci=TRUE)~Dept+Gender,
data=UCBAdmissions)
xyplot(cbind(Percentage,lower,upper)~Gender|Dept,data=admissions,
panel=panel.errbars,
ewidth=.2,pch=19,
ylab="Percentage applicants admitted by Department")
xyplot(cbind(Percentage,lower,upper)~Gender|Dept,data=applications,
panel=panel.errbars,
ewidth=.2,pch=19,
ylab="Percentage applications to the Department")
\dontshow{
if(interactive())
grid::grid.prompt(old.prompt)
}
}
\keyword{hplot} |
##### The For-Loop #####
class <- c('vijay', 'ashley', 'stephen', 'john', 'abanti', 'raj') # defining a vector
grades <- c(90, 52, 89, 88, 100, 92) # defining ANOTHER vector
gpa.sheet <- data.frame(class, grades) # defining a dataframe
# the paste command takes multiple statements and merges them together
paste("student X's grade was", gpa.sheet[4, 'grades']) # taking the fourth row from column grades in dataframe gpa sheet
dim(gpa.sheet) # dimension command, gives you dimensions of whatever shit
# general form of for-loop:
# for(variable in list.of.values) {
# (perform operations with variabel)
# }
# for loop will keep going till it's run through the values of the given vector
count <- 0
for(student in gpa.sheet [, 'class']) {
# inside the curly brackets we're gunna put the instructions of what we want R to do
# always use {} for a function like this
count <- count + 1 # keeps track of how many times the loop is running, step not always necesssary
tmp <- paste(student, 'had a grade of', gpa.sheet$grades[count])
print(tmp) # R is cycling through a list and the variable gets updated with each cycle
}
sort(gpa.sheet$grades, decreasing = T)
##### The While-loop #####
# general form of while loop:
# while (logical.statement) {
# <do this action>
# }
# while will keep doing the thing until whatever conditions you set up is no longer true
count <- 0
while(count < 100) {
count <- count + 1
print(count)
}
| /Tutorial_2.R | no_license | abantitagore/R-Lessons | R | false | false | 1,486 | r | ##### The For-Loop #####
class <- c('vijay', 'ashley', 'stephen', 'john', 'abanti', 'raj') # defining a vector
grades <- c(90, 52, 89, 88, 100, 92) # defining ANOTHER vector
gpa.sheet <- data.frame(class, grades) # defining a dataframe
# the paste command takes multiple statements and merges them together
paste("student X's grade was", gpa.sheet[4, 'grades']) # taking the fourth row from column grades in dataframe gpa sheet
dim(gpa.sheet) # dimension command, gives you dimensions of whatever shit
# general form of for-loop:
# for(variable in list.of.values) {
# (perform operations with variabel)
# }
# for loop will keep going till it's run through the values of the given vector
count <- 0
for(student in gpa.sheet [, 'class']) {
# inside the curly brackets we're gunna put the instructions of what we want R to do
# always use {} for a function like this
count <- count + 1 # keeps track of how many times the loop is running, step not always necesssary
tmp <- paste(student, 'had a grade of', gpa.sheet$grades[count])
print(tmp) # R is cycling through a list and the variable gets updated with each cycle
}
sort(gpa.sheet$grades, decreasing = T)
##### The While-loop #####
# general form of while loop:
# while (logical.statement) {
# <do this action>
# }
# while will keep doing the thing until whatever conditions you set up is no longer true
count <- 0
while(count < 100) {
count <- count + 1
print(count)
}
|
#' BART for estimating TATE
#'
#' @param outcome variable name denoting outcome
#' @param treatment variable name denoting binary treatment assignment (ok if only available in trial, not population)
#' @param trial variable name denoting binary trial participation (1 = trial participant, 0 = not trial participant)
#' @param selection_covariates vector of covariate names in data set that predict trial participation
#' @param data data frame comprised of "stacked" trial and target population data
#' @param is_data_disjoint logical. If TRUE, then trial and population data are considered independent.
#' @param seed numeric. By default, the seed is set to 13783, otherwise can be specified (such as for simulation purposes).
#' @return \code{generalize_bart} returns a list of the TATE estimate, standard error, and 95\% CI bounds
#' @import stats
generalize_bart <- function(outcome, treatment, trial, selection_covariates, data,is_data_disjoint = TRUE,seed){
##### set the seed #####
if(missing(seed)){
seed = 13783
}
set.seed(seed)
data = data[rownames(na.omit(data[,c(trial,selection_covariates)])),c(outcome, treatment, trial, selection_covariates)]
# Prepare target population test set
xtest = data[which(data[,trial] == 0),selection_covariates]
##### if the data are not disjoint:
if(is_data_disjoint == FALSE){
xtest = data[,selection_covariates]
}
xtest = rbind(xtest,xtest)
xtest[,treatment] = rep(c(1,0),each = nrow(xtest)/2)
# Training data based on trial
xtrain = data[which(data[,trial] == 1 & !is.na(data[,outcome])),c(treatment,selection_covariates)]
ytrain = data[which(data[,trial] == 1 & !is.na(data[,outcome])),outcome]
bart.out <- BayesTree::bart(x.train = xtrain,
y.train = ytrain,
keeptrainfits = FALSE,
x.test = xtest,
verbose = FALSE)
if(dim(table(data[,outcome])) == 2){
bart.fits <- pnorm(bart.out$yhat.test[,1:(nrow(xtest)/2)]) -
pnorm(bart.out$yhat.test[,(1+(nrow(xtest)/2)):nrow(xtest)])
}
else{
bart.fits <- bart.out$yhat.test[,1:(nrow(xtest)/2)] -
bart.out$yhat.test[,(1+(nrow(xtest)/2)):nrow(xtest)]
}
TATE = mean(apply(bart.fits,1,mean))
TATE_se = sd(apply(bart.fits,1,mean))
TATE_CI_l = TATE - 1.96*TATE_se
TATE_CI_u = TATE + 1.96*TATE_se
# #Function to get ORs from posterior draws after running BART
# getORs = function(i){
#
# pred_probs=pnorm(bart.out$yhat.test[i,])
# #pred_outcomes = sapply(pred_probs,function(x) rbinom(1,1,x))
#
# placebo=pred_probs[which(xtest$treat == 0)]
# trt=pred_probs[which(!xtest$treat == 0)]
#
# placebo_prob = mean(placebo,na.rm=TRUE)
# treatment_prob = mean(trt,na.rm=TRUE)
#
# (treatment_prob/(1-treatment_prob))/(placebo_prob/(1-placebo_prob))
#
# #(sum(trt)/(length(trt)-sum(trt)))/(sum(placebo)/(length(placebo)-sum(placebo)))
# }
# ORs = sapply(1:ndpost,getORs)
TATE = list(estimate = TATE, se = TATE_se, CI_l = TATE_CI_l, CI_u = TATE_CI_u)
out = list(TATE = TATE)
return(out)
}
| /R/generalize_bart.R | no_license | muschellij2/generalize | R | false | false | 3,126 | r | #' BART for estimating TATE
#'
#' @param outcome variable name denoting outcome
#' @param treatment variable name denoting binary treatment assignment (ok if only available in trial, not population)
#' @param trial variable name denoting binary trial participation (1 = trial participant, 0 = not trial participant)
#' @param selection_covariates vector of covariate names in data set that predict trial participation
#' @param data data frame comprised of "stacked" trial and target population data
#' @param is_data_disjoint logical. If TRUE, then trial and population data are considered independent.
#' @param seed numeric. By default, the seed is set to 13783, otherwise can be specified (such as for simulation purposes).
#' @return \code{generalize_bart} returns a list of the TATE estimate, standard error, and 95\% CI bounds
#' @import stats
generalize_bart <- function(outcome, treatment, trial, selection_covariates, data,is_data_disjoint = TRUE,seed){
##### set the seed #####
if(missing(seed)){
seed = 13783
}
set.seed(seed)
data = data[rownames(na.omit(data[,c(trial,selection_covariates)])),c(outcome, treatment, trial, selection_covariates)]
# Prepare target population test set
xtest = data[which(data[,trial] == 0),selection_covariates]
##### if the data are not disjoint:
if(is_data_disjoint == FALSE){
xtest = data[,selection_covariates]
}
xtest = rbind(xtest,xtest)
xtest[,treatment] = rep(c(1,0),each = nrow(xtest)/2)
# Training data based on trial
xtrain = data[which(data[,trial] == 1 & !is.na(data[,outcome])),c(treatment,selection_covariates)]
ytrain = data[which(data[,trial] == 1 & !is.na(data[,outcome])),outcome]
bart.out <- BayesTree::bart(x.train = xtrain,
y.train = ytrain,
keeptrainfits = FALSE,
x.test = xtest,
verbose = FALSE)
if(dim(table(data[,outcome])) == 2){
bart.fits <- pnorm(bart.out$yhat.test[,1:(nrow(xtest)/2)]) -
pnorm(bart.out$yhat.test[,(1+(nrow(xtest)/2)):nrow(xtest)])
}
else{
bart.fits <- bart.out$yhat.test[,1:(nrow(xtest)/2)] -
bart.out$yhat.test[,(1+(nrow(xtest)/2)):nrow(xtest)]
}
TATE = mean(apply(bart.fits,1,mean))
TATE_se = sd(apply(bart.fits,1,mean))
TATE_CI_l = TATE - 1.96*TATE_se
TATE_CI_u = TATE + 1.96*TATE_se
# #Function to get ORs from posterior draws after running BART
# getORs = function(i){
#
# pred_probs=pnorm(bart.out$yhat.test[i,])
# #pred_outcomes = sapply(pred_probs,function(x) rbinom(1,1,x))
#
# placebo=pred_probs[which(xtest$treat == 0)]
# trt=pred_probs[which(!xtest$treat == 0)]
#
# placebo_prob = mean(placebo,na.rm=TRUE)
# treatment_prob = mean(trt,na.rm=TRUE)
#
# (treatment_prob/(1-treatment_prob))/(placebo_prob/(1-placebo_prob))
#
# #(sum(trt)/(length(trt)-sum(trt)))/(sum(placebo)/(length(placebo)-sum(placebo)))
# }
# ORs = sapply(1:ndpost,getORs)
TATE = list(estimate = TATE, se = TATE_se, CI_l = TATE_CI_l, CI_u = TATE_CI_u)
out = list(TATE = TATE)
return(out)
}
|
#' @import ggplot2
#' @importFrom rlang .data
adj_for_inflation <- function(df) {
fred <- #within sysdata.rda
fred %>% tidyr::separate(.data$DATE, c("Year", "Month", "Day")) %>%
dplyr::mutate_all(as.numeric) %>%
dplyr::group_by(.data$Year) %>%
dplyr::summarize(CPIHOSNS = mean(.data$CPIHOSNS))
max_yr <- max(df$SALE_YEAR)
if (max_yr > max(fred$Year)) {
max_yr <- max(fred$Year)
}
print(paste0("Inflation adjusted to ", max_yr))
final_indx_value <-
fred %>% dplyr::filter(.data$Year == max_yr) %>% dplyr::select(.data$CPIHOSNS)
fred <-
fred %>% dplyr::mutate(percent_adj = as.numeric(final_indx_value) / .data$CPIHOSNS) %>%
dplyr::group_by(.data$Year) %>%
dplyr::summarize(percent_adj = mean(.data$percent_adj))
df <- df %>% dplyr::left_join(fred, by = c("SALE_YEAR" = "Year"))
#for when data is more recent than fred
df <- df %>% tidyr::replace_na(list(percent_adj = 1))
df["SALE_PRICE_ADJ"] <- df["SALE_PRICE"] * df["percent_adj"]
df["ASSESSED_VALUE_ADJ"] <- df["ASSESSED_VALUE"] * df["percent_adj"]
df <- df %>% dplyr::select(-.data$percent_adj)
return(df)
}
my_theme <- theme_classic() + theme(panel.grid.major = element_line(color = "gray"), axis.line.x = element_line(color = "gray"),
axis.line.y = element_blank())
my_theme_rotated <- theme_classic() + theme(panel.grid.major = element_line(color = "gray"),
axis.line.x = element_line(color = "gray"), axis.line.y = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1))
| /R/helper_fxns.R | permissive | jrockower/cmfproperty | R | false | false | 1,638 | r | #' @import ggplot2
#' @importFrom rlang .data
adj_for_inflation <- function(df) {
fred <- #within sysdata.rda
fred %>% tidyr::separate(.data$DATE, c("Year", "Month", "Day")) %>%
dplyr::mutate_all(as.numeric) %>%
dplyr::group_by(.data$Year) %>%
dplyr::summarize(CPIHOSNS = mean(.data$CPIHOSNS))
max_yr <- max(df$SALE_YEAR)
if (max_yr > max(fred$Year)) {
max_yr <- max(fred$Year)
}
print(paste0("Inflation adjusted to ", max_yr))
final_indx_value <-
fred %>% dplyr::filter(.data$Year == max_yr) %>% dplyr::select(.data$CPIHOSNS)
fred <-
fred %>% dplyr::mutate(percent_adj = as.numeric(final_indx_value) / .data$CPIHOSNS) %>%
dplyr::group_by(.data$Year) %>%
dplyr::summarize(percent_adj = mean(.data$percent_adj))
df <- df %>% dplyr::left_join(fred, by = c("SALE_YEAR" = "Year"))
#for when data is more recent than fred
df <- df %>% tidyr::replace_na(list(percent_adj = 1))
df["SALE_PRICE_ADJ"] <- df["SALE_PRICE"] * df["percent_adj"]
df["ASSESSED_VALUE_ADJ"] <- df["ASSESSED_VALUE"] * df["percent_adj"]
df <- df %>% dplyr::select(-.data$percent_adj)
return(df)
}
my_theme <- theme_classic() + theme(panel.grid.major = element_line(color = "gray"), axis.line.x = element_line(color = "gray"),
axis.line.y = element_blank())
my_theme_rotated <- theme_classic() + theme(panel.grid.major = element_line(color = "gray"),
axis.line.x = element_line(color = "gray"), axis.line.y = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIEN.R
\name{BIEN_list_spatialpolygons}
\alias{BIEN_list_spatialpolygons}
\title{Extract a list of species within a given spatialpolygons.}
\usage{
BIEN_list_spatialpolygons(spatialpolygons, cultivated = FALSE,
only.new.world = FALSE, ...)
}
\arguments{
\item{spatialpolygons}{An object of class SpatialPolygonsDataFrame. Note that the object must be in WGS84.}
\item{cultivated}{Return cultivated records as well? Default is FALSE.}
\item{only.new.world}{Return only records from the New World? Default is true}
\item{...}{Additional arguments passed to internal functions.}
}
\value{
Dataframe containing a list of all species with occurrences in the supplied SpatialPolygons object.
}
\description{
BIEN_list_spatialpolygons produces a list of all species with occurrence record falling within a user-supplied SpatialPolygons or SpatialPolygonsDataFrame.
}
\note{
We recommend using \code{\link[rgdal]{readOGR}} to load spatial data. Other methods may cause problems related to handling holes in polygons.
}
\examples{
\dontrun{
BIEN_ranges_species("Carnegiea gigantea")#saves ranges to the current working directory
shape<-readOGR(dsn = ".",layer = "Carnegiea_gigantea")
#spatialpolygons should be read with readOGR(), see note.
species_list<-BIEN_list_spatialpolygons(spatialpolygons=shape)}
}
\seealso{
Other list functions: \code{\link{BIEN_list_all}},
\code{\link{BIEN_list_country}},
\code{\link{BIEN_list_county}},
\code{\link{BIEN_list_state}}
}
| /BIEN/man/BIEN_list_spatialpolygons.Rd | no_license | chlorophilia/RBIEN | R | false | true | 1,550 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BIEN.R
\name{BIEN_list_spatialpolygons}
\alias{BIEN_list_spatialpolygons}
\title{Extract a list of species within a given spatialpolygons.}
\usage{
BIEN_list_spatialpolygons(spatialpolygons, cultivated = FALSE,
only.new.world = FALSE, ...)
}
\arguments{
\item{spatialpolygons}{An object of class SpatialPolygonsDataFrame. Note that the object must be in WGS84.}
\item{cultivated}{Return cultivated records as well? Default is FALSE.}
\item{only.new.world}{Return only records from the New World? Default is true}
\item{...}{Additional arguments passed to internal functions.}
}
\value{
Dataframe containing a list of all species with occurrences in the supplied SpatialPolygons object.
}
\description{
BIEN_list_spatialpolygons produces a list of all species with occurrence record falling within a user-supplied SpatialPolygons or SpatialPolygonsDataFrame.
}
\note{
We recommend using \code{\link[rgdal]{readOGR}} to load spatial data. Other methods may cause problems related to handling holes in polygons.
}
\examples{
\dontrun{
BIEN_ranges_species("Carnegiea gigantea")#saves ranges to the current working directory
shape<-readOGR(dsn = ".",layer = "Carnegiea_gigantea")
#spatialpolygons should be read with readOGR(), see note.
species_list<-BIEN_list_spatialpolygons(spatialpolygons=shape)}
}
\seealso{
Other list functions: \code{\link{BIEN_list_all}},
\code{\link{BIEN_list_country}},
\code{\link{BIEN_list_county}},
\code{\link{BIEN_list_state}}
}
|
library(queueing)
### Name: VTq.o_MMC
### Title: Returns the variance of the time spend in queue in the M/M/c
### queueing model
### Aliases: VTq.o_MMC
### Keywords: M/M/c
### ** Examples
## create input parameters
i_mmc <- NewInput.MMC(lambda=5, mu=10, c=2, n=0, method=0)
## Build the model
o_mmc <- QueueingModel(i_mmc)
## Returns the variance of the time spend in queue
VTq(o_mmc)
| /data/genthat_extracted_code/queueing/examples/VTq.o_MMC.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 397 | r | library(queueing)
### Name: VTq.o_MMC
### Title: Returns the variance of the time spend in queue in the M/M/c
### queueing model
### Aliases: VTq.o_MMC
### Keywords: M/M/c
### ** Examples
## create input parameters
i_mmc <- NewInput.MMC(lambda=5, mu=10, c=2, n=0, method=0)
## Build the model
o_mmc <- QueueingModel(i_mmc)
## Returns the variance of the time spend in queue
VTq(o_mmc)
|
\name{mmer}
\alias{mmer}
\title{
\strong{m}ixed \strong{m}odel \strong{e}quations in \strong{R}
}
\description{
Sommer is a structural multivariate-univariate linear mixed model solver for multiple random effects allowing the specification and/or estimation of variance covariance structures. REML estimates can be obtained using the Direct-Inversion Newton-Raphson, Average Information and Efficient Mixed Model Association algorithms coded in C++ using the Armadillo library to optimize dense matrix operations common in genomic selection models. Sommer was designed for genomic prediction and genome wide association studies (GWAS), but also functional as a regular mixed model program. These algorithms are \strong{intended to be used for problems of the type p > n or dense matrices}. For problems with sparse covariance structures, or problems of the type n > p, the MME-based algorithms are faster and we recommend to shift to the use of such software (i.e. lme4, breedR, or asreml-R).
\strong{For tutorials} on how to perform different analysis with sommer please look at the vignettes by typing in the terminal:
\strong{vignette("v1.sommer.quick.start")}
\strong{vignette("v2.sommer.changes.and.faqs")}
\strong{vignette("v3.sommer.qg")}
\strong{vignette("v4.sommer.gxe")}
}
\usage{
mmer(fixed, random, rcov, data, weights, iters=20, tolpar = 1e-03,
tolparinv = 1e-06, init=NULL, constraints=NULL,method="NR", getPEV=TRUE,
na.method.X="exclude", na.method.Y="exclude",return.param=FALSE,
date.warning=TRUE,verbose=TRUE, reshape.output=TRUE)
}
\arguments{
\item{fixed}{A formula specifying the \strong{response variable(s)} \strong{and fixed effects}, i.e:
\emph{response ~ covariate} for univariate models
\emph{cbind(response.i,response.j) ~ covariate} for multivariate models
The \code{\link{fcm}()} function can be used to constrain fixed effects in multi-response models.
}
\item{random}{A formula specifying the name of the \strong{random effects}, i.e. \emph{random= ~ genotype + year}.
Useful functions can be used to fit heterogeneous variances and other special models (\emph{see 'Special Functions' in the Details section for more information}):
\code{\link{vs}(...,Gu,Gt,Gtc)} is the main function to specify variance models and special structures for random effects. On the ... argument you provide the unknown variance-covariance structures (i.e. us,ds,at,cs) and the random effect where such covariance structure will be used (the random effect of interest). Gu is used to provide known covariance matrices among the levels of the random effect, Gt initial values and Gtc for constraints. Auxiliar functions for building the variance models are:
** \code{\link{ds}(x)}, \code{\link{us}(x)}, \code{\link{cs}(x)} and \code{\link{at}(x,levs)} can be used to specify unknown diagonal, unstructured and customized unstructured and diagonal covariance structures to be estimated by REML.
** \code{\link{unsm}(x)}, \code{\link{uncm}(x)}, \code{\link{fixm}(x)} and \code{\link{diag}(x)} can be used to build easily matrices to specify constraints in the Gtc argument of the \code{\link{vs}()} function.
** \code{\link{overlay}()}, \code{\link{spl2D}()}, and \code{\link{leg}()} functions can be used to specify overlayed of design matrices of random effects, two dimensional spline and random regression models within the \code{\link{vs}()} function.
}
\item{rcov}{A formula specifying the name of the \strong{error term}, i.e. \emph{rcov= ~ units}.
Special heterogeneous and special variance models and constraints for the residual part are the same used on the random term but the name of the random effect is always "units" which can be thought as a column with as many levels as rows in the data, i.e. \emph{rcov=~vs(ds(covariate),units)}
}
\item{data}{A data frame containing the variables specified in the formulas for response, fixed, and random effects.}
\item{weights}{Name of the covariate for weights. To be used for the product R = Wsi*R*Wsi, where * is the matrix product, Wsi is the square root of the inverse of W and R is the residual matrix.}
\item{iters}{Maximum number of iterations allowed.}
\item{tolpar}{Convergence criteria for the change in log-likelihood.}
\item{tolparinv}{Tolerance parameter for matrix inverse used when singularities are encountered in the estimation procedure.}
\item{init}{Initial values for the variance components. By default this is NULL and initial values for the variance components are provided by the algorithm, but in case the user want to provide initial values for ALL var-cov components this argument is functional. It has to be provided as a list, where each list element corresponds to one random effect (1x1 matrix) and if multitrait model is pursued each element of the list is a matrix of variance covariance components among traits for such random effect. Initial values can also be provided in the Gt argument of the \link{vs} function. Is highly encouraged to use the Gt and Gtc arguments of the \link{vs} function instead of this argument, but these argument can be used to provide all initial values at once}
\item{constraints}{When initial values are provided these have to be accompanied by their constraints. See the \link{vs} function for more details on the constraints. Is highly encouraged to use the Gt and Gtc arguments of the \link{vs} function instead of this argument but these argument can be used to provide all constraints at once.}
\item{method}{This refers to the method or algorithm to be used for estimating variance components. Direct-inversion Newton-Raphson \strong{NR} and Average Information \strong{AI} (Tunnicliffe 1989; Gilmour et al. 1995; Lee et al. 2015).}
\item{getPEV}{A TRUE/FALSE value indicating if the program should return the predicted error variance and variance for random effects. This option is provided since this can take a long time for certain models where p is > n by a big extent.}
\item{na.method.X}{One of the two possible values; "include" or "exclude". If "include" is selected then the function will impute the X matrices for fixed effects with the median value. If "exclude" is selected it will get rid of all rows with missing values for the X (fixed) covariates. The default is "exclude". The "include" option should be used carefully.}
\item{na.method.Y}{One of the three possible values; "include", "include2" or "exclude" (default) to treat the observations in response variable to be used in the estimation of variance components. The first option "include" will impute the response variables for all rows with the median value, whereas "include2" imputes the responses only for rows where there is observation(s) for at least one of the responses (only available in the multi-response models). If "exclude" is selected (default) it will get rid of rows in response(s) where missing values are present for at least one of the responses.}
\item{return.param}{A TRUE/FALSE value to indicate if the program should return the parameters to be used for fitting the model instead of fitting the model.}
\item{date.warning}{A TRUE/FALSE value to indicate if the program should warn you when is time to update the sommer package.}
\item{verbose}{A TRUE/FALSE value to indicate if the program should return the progress of the iterative algorithm.}
\item{reshape.output}{A TRUE/FALSE value to indicate if the output should be reshaped to be easier to interpret for the user, some information is missing from the multivariate models for an easy interpretation.}
}
\details{
The use of this function requires a good understanding of mixed models. Please review the 'sommer.quick.start' vignette and pay attention to details like format of your random and fixed variables (i.e. character and factor variables have different properties when returning BLUEs or BLUPs, please see the 'sommer.changes.and.faqs' vignette).
\strong{Citation}
Type \emph{citation("sommer")} to know how to cite the sommer package in your publications.
\strong{Special variance structures}
\code{\link{vs}(\link{at}(x,levels),y)}
can be used to specify heterogeneous variance for the "y" covariate at specific levels of the covariate "x", i.e. \emph{random=~vs(at(Location,c("A","B")),ID)} fits a variance component for ID at levels A and B of the covariate Location.
\code{\link{vs}(\link{ds}(x),y)}
can be used to specify a diagonal covariance structure for the "y" covariate for all levels of the covariate "x", i.e. \emph{random=~vs(ds(Location),ID)} fits a variance component for ID at all levels of the covariate Location.
\code{\link{vs}(\link{us}(x),y)}
can be used to specify an unstructured covariance structure for the "y" covariate for all levels of the covariate "x", i.e. \emph{random=~vs(us(Location),ID)} fits variance and covariance components for ID at all levels of the covariate Location.
\code{\link{vs}(\link{overlay}(...,rlist=NULL,prefix=NULL))}
can be used to specify overlay of design matrices between consecutive random effects specified, i.e. \emph{random=~vs(overlay(male,female))} overlays (overlaps) the incidence matrices for the male and female random effects to obtain a single variance component for both effects. The `rlist` argument is a list with each element being a numeric value that multiplies the incidence matrix to be overlayed. See \code{\link{overlay}} for details.Can be combined with vs().
\code{\link{vs}(\link{spl2D}(x.coord, y.coord, at, at.levels))}
can be used to fit a 2-dimensional spline (i.e. spatial modeling) using coordinates \code{x.coord} and \code{y.coord} (in numeric class). The 2D spline can be fitted at specific levels using the \code{at} and \code{at.levels} arguments. For example \emph{random=~vs(spl2D(x.coord=Row,y.coord=Range,at=FIELD))}.
\code{\link{vs}(\link{leg}(x,n),y)}
can be used to fit a random regression model using a numerical variable \code{x} that marks the trayectory for the random effect \code{y}. The leg function can be combined with the special functions \code{ds}, \code{us} \code{at} and \code{cs}. For example \emph{random=~vs(leg(x,1),y)} or \emph{random=~vs(us(leg(x,1)),y)}.
\code{\link{vs}(x,Gtc=\link{fcm}(v))}
can be used to constrain fixed effects in the multi-response mixed models. This is a vector that specifies if the fixed effect is to be estimated for such trait. For example \emph{fixed=cbind(response.i, response.j)~vs(Rowf, Gtc=fcm(c(1,0)))} means that the fixed effect Rowf should only be estimated for the first response and the second should only have the intercept.
\strong{S3 methods}{
S3 methods are available for some parameter extraction such as \code{\link{fitted.mmer}}, \code{\link{residuals.mmer}}, \code{\link{summary.mmer}}, \code{\link{randef}}, \code{\link{coef.mmer}}, \code{\link{anova.mmer}}, \code{\link{plot.mmer}}, and \code{\link{predict.mmer}} to obtain adjusted means. In addition, the \code{\link{vpredict}} function can be used to estimate standard errors for linear combinations of variance components (i.e. ratios like h2).
}
\strong{Additional Functions}
Additional functions for genetic analysis have been included such as relationship matrix building (\code{\link{A.mat}}, \code{\link{D.mat}}, \code{\link{E.mat}}, \code{\link{H.mat}}), heritability (\code{\link{h2.fun}}), build a genotypic hybrid marker matrix (\code{\link{build.HMM}}), plot of genetic maps (\code{\link{map.plot}}), and manhattan plots (\code{\link{manhattan}}). If you need to build a pedigree-based relationship matrix use the \code{getA} function from the pedigreemm package.
\strong{Bug report and contact}{
If you have any technical questions or suggestions please post it in https://stackoverflow.com or https://stats.stackexchange.com and send me an email with the link at cova_ruber@live.com.mx
If you have any bug report please go to https://github.com/covaruber/sommer or send me an email to address it asap, just make sure you have read the vignettes carefully before sending your question.
}
\strong{Example Datasets}
The package has been equiped with several datasets to learn how to use the sommer package:
* \code{\link{DT_halfdiallel}}, \code{\link{DT_fulldiallel}} and \code{\link{DT_mohring}} datasets have examples to fit half and full diallel designs.
* \code{\link{DT_h2}} to calculate heritability
* \code{\link{DT_cornhybrids}} and \code{\link{DT_technow}} datasets to perform genomic prediction in hybrid single crosses
* \code{\link{DT_wheat}} dataset to do genomic prediction in single crosses in species displaying only additive effects.
* \code{\link{DT_cpdata}} dataset to fit genomic prediction models within a biparental population coming from 2 highly heterozygous parents including additive, dominance and epistatic effects.
* \code{\link{DT_polyploid}} to fit genomic prediction and GWAS analysis in polyploids.
* \code{\link{DT_gryphon}} data contains an example of an animal model including pedigree information.
* \code{\link{DT_btdata}} dataset contains an animal (birds) model.
* \code{\link{DT_legendre}} simulated dataset for random regression model.
* \code{\link{DT_sleepstudy}} dataset to know how to translate lme4 models to sommer models.
\strong{Models Enabled}
For details about the models enabled and more information about the covariance structures please check the help page of the package (\code{\link{sommer}}).
}
\value{
If all parameters are correctly indicated the program will return a list with the following information:
\item{Vi}{the inverse of the phenotypic variance matrix V^- = (ZGZ+R)^-1}
\item{sigma}{a list with the values of the variance-covariance components with one list element for each random effect.}
\item{sigma_scaled}{a list with the values of the scaled variance-covariance components with one list element for each random effect.}
\item{sigmaSE}{Hessian matrix containing the variance-covariance for the variance components. SE's can be obtained taking the square root of the diagonal values of the Hessian.}
\item{Beta}{a data frame for trait BLUEs (fixed effects).}
\item{VarBeta}{a variance-covariance matrix for trait BLUEs}
\item{U}{a list (one element for each random effect) with a data frame for trait BLUPs.}
\item{VarU}{a list (one element for each random effect) with the variance-covariance matrix for trait BLUPs.}
\item{PevU}{a list (one element for each random effect) with the predicted error variance matrix for trait BLUPs.}
\item{fitted}{ Fitted values y.hat=XB}
\item{residuals}{Residual values e = Y - XB}
\item{AIC}{Akaike information criterion}
\item{BIC}{Bayesian information criterion}
\item{convergence}{a TRUE/FALSE statement indicating if the model converged.}
\item{monitor}{The values of log-likelihood and variance-covariance components across iterations during the REML estimation.}
\item{method}{The method for extimation of variance components specified by the user.}
\item{call}{Formula for fixed, random and rcov used.}
\item{constraints}{contraints used in the mixed models for the random effects.}
\item{constraintsF}{contraints used in the mixed models for the fixed effects.}
\item{data}{dataset used in the model.}
\item{sigmaVector}{a vectorized version of the sigma element (variance-covariance components) to match easily the standard errors of the var-cov components stored in the element sigmaSE.}
}
\references{
Covarrubias-Pazaran G. Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 2016, 11(6): doi:10.1371/journal.pone.0156744
Covarrubias-Pazaran G. 2018. Software update: Moving the R package sommer to multivariate mixed models for genome-assisted prediction. doi: https://doi.org/10.1101/354639
Bernardo Rex. 2010. Breeding for quantitative traits in plants. Second edition. Stemma Press. 390 pp.
Gilmour et al. 1995. Average Information REML: An efficient algorithm for variance parameter estimation in linear mixed models. Biometrics 51(4):1440-1450.
Kang et al. 2008. Efficient control of population structure in model organism association mapping. Genetics 178:1709-1723.
Lee, D.-J., Durban, M., and Eilers, P.H.C. (2013). Efficient two-dimensional smoothing with P-spline ANOVA mixed models and nested bases. Computational Statistics and Data Analysis, 61, 22 - 37.
Lee et al. 2015. MTG2: An efficient algorithm for multivariate linear mixed model analysis based on genomic information. Cold Spring Harbor. doi: http://dx.doi.org/10.1101/027201.
Maier et al. 2015. Joint analysis of psychiatric disorders increases accuracy of risk prediction for schizophrenia, bipolar disorder, and major depressive disorder. Am J Hum Genet; 96(2):283-294.
Rodriguez-Alvarez, Maria Xose, et al. Correcting for spatial heterogeneity in plant breeding experiments with P-splines. Spatial Statistics 23 (2018): 52-71.
Searle. 1993. Applying the EM algorithm to calculating ML and REML estimates of variance components. Paper invited for the 1993 American Statistical Association Meeting, San Francisco.
Yu et al. 2006. A unified mixed-model method for association mapping that accounts for multiple levels of relatedness. Genetics 38:203-208.
Tunnicliffe W. 1989. On the use of marginal likelihood in time series model estimation. JRSS 51(1):15-27.
Zhang et al. 2010. Mixed linear model approach adapted for genome-wide association studies. Nat. Genet. 42:355-360.
}
\author{
Giovanny Covarrubias-Pazaran
}
\examples{
####=========================================####
#### For CRAN time limitations most lines in the
#### examples are silenced with one '#' mark,
#### remove them and run the examples
####=========================================####
####=========================================####
#### EXAMPLES
#### Different models with sommer
####=========================================####
data(DT_example)
DT <- DT_example
head(DT)
####=========================================####
#### Univariate homogeneous variance models ####
####=========================================####
## Compound simmetry (CS) model
ans1 <- mmer(Yield~Env,
random= ~ Name + Env:Name,
rcov= ~ units,
data=DT)
summary(ans1)
####===========================================####
#### Univariate heterogeneous variance models ####
####===========================================####
## Compound simmetry (CS) + Diagonal (DIAG) model
ans2 <- mmer(Yield~Env,
random= ~Name + vs(ds(Env),Name),
rcov= ~ vs(ds(Env),units),
data=DT)
summary(ans2)
####===========================================####
#### Univariate unstructured variance models ####
####===========================================####
ans3 <- mmer(Yield~Env,
random=~ vs(us(Env),Name),
rcov=~vs(us(Env),units),
data=DT)
summary(ans3)
# ####==========================================####
# #### Multivariate homogeneous variance models ####
# ####==========================================####
#
# ## Multivariate Compound simmetry (CS) model
# DT$EnvName <- paste(DT$Env,DT$Name)
# ans4 <- mmer(cbind(Yield, Weight) ~ Env,
# random= ~ vs(Name, Gtc = unsm(2)) + vs(EnvName,Gtc = unsm(2)),
# rcov= ~ vs(units, Gtc = unsm(2)),
# data=DT)
# summary(ans4)
#
# ####=============================================####
# #### Multivariate heterogeneous variance models ####
# ####=============================================####
#
# ## Multivariate Compound simmetry (CS) + Diagonal (DIAG) model
# ans5 <- mmer(cbind(Yield, Weight) ~ Env,
# random= ~ vs(Name, Gtc = unsm(2)) + vs(ds(Env),Name, Gtc = unsm(2)),
# rcov= ~ vs(ds(Env),units, Gtc = unsm(2)),
# data=DT)
# summary(ans5)
#
# ####===========================================####
# #### Multivariate unstructured variance models ####
# ####===========================================####
#
# ans6 <- mmer(cbind(Yield, Weight) ~ Env,
# random= ~ vs(us(Env),Name, Gtc = unsm(2)),
# rcov= ~ vs(ds(Env),units, Gtc = unsm(2)),
# data=DT)
# summary(ans6)
#
# ####=========================================####
# ####=========================================####
# #### EXAMPLE SET 2
# #### 2 variance components
# #### one random effect with variance covariance structure
# ####=========================================####
# ####=========================================####
#
# data("DT_cpdata")
# DT <- DT_cpdata
# GT <- GT_cpdata
# MP <- MP_cpdata
# head(DT)
# GT[1:4,1:4]
# #### create the variance-covariance matrix
# A <- A.mat(GT)
# #### look at the data and fit the model
# mix1 <- mmer(Yield~1,
# random=~vs(id, Gu=A) + Rowf,
# rcov=~units,
# data=DT)
# summary(mix1)$varcomp
# #### calculate heritability
# vpredict(mix1, h1 ~ V1/(V1+V3) )
# #### multi trait example
# mix2 <- mmer(cbind(Yield,color)~1,
# random = ~ vs(id, Gu=A, Gtc = unsm(2)) + # unstructured at trait level
# vs(Rowf, Gtc=diag(2)) + # diagonal structure at trait level
# vs(Colf, Gtc=diag(2)), # diagonal structure at trait level
# rcov = ~ vs(units, Gtc = unsm(2)), # unstructured at trait level
# data=DT)
# summary(mix2)
#
# ####=========================================####
# #### EXAMPLE SET 3
# #### comparison with lmer, install 'lme4'
# #### and run the code below
# ####=========================================####
#
# #### lmer cannot use var-cov matrices so we will not
# #### use them in this comparison example
#
# library(lme4)
# library(sommer)
# data("DT_cornhybrids")
# DT <- DT_cornhybrids
# DTi <- DTi_cornhybrids
# GT <- GT_cornhybrids
#
# fm1 <- lmer(Yield ~ Location + (1|GCA1) + (1|GCA2) + (1|SCA),
# data=DT )
# out <- mmer(Yield ~ Location,
# random = ~ GCA1 + GCA2 + SCA,
# rcov = ~ units,
# data=DT)
# summary(fm1)
# summary(out)
# ### same BLUPs for GCA1, GCA2, SCA than lme4
# plot(out$U$GCA1$Yield, ranef(fm1)$GCA1[,1])
# plot(out$U$GCA2$Yield, ranef(fm1)$GCA2[,1])
# vv=which(abs(out$U$SCA$Yield) > 0)
# plot(out$U$SCA$Yield[vv], ranef(fm1)$SCA[,1])
#
# ### a more complex model specifying which locations
# head(DT)
# out2 <- mmer(Yield ~ Location,
# random = ~ vs(at(Location,c("3","4")),GCA2) +
# vs(at(Location,c("3","4")),SCA),
# rcov = ~ vs(ds(Location),units),
# data=DT)
# summary(out2)
} | /man/mmer.Rd | no_license | startrekor/sommer | R | false | false | 22,688 | rd | \name{mmer}
\alias{mmer}
\title{
\strong{m}ixed \strong{m}odel \strong{e}quations in \strong{R}
}
\description{
Sommer is a structural multivariate-univariate linear mixed model solver for multiple random effects allowing the specification and/or estimation of variance covariance structures. REML estimates can be obtained using the Direct-Inversion Newton-Raphson, Average Information and Efficient Mixed Model Association algorithms coded in C++ using the Armadillo library to optimize dense matrix operations common in genomic selection models. Sommer was designed for genomic prediction and genome wide association studies (GWAS), but also functional as a regular mixed model program. These algorithms are \strong{intended to be used for problems of the type p > n or dense matrices}. For problems with sparse covariance structures, or problems of the type n > p, the MME-based algorithms are faster and we recommend to shift to the use of such software (i.e. lme4, breedR, or asreml-R).
\strong{For tutorials} on how to perform different analysis with sommer please look at the vignettes by typing in the terminal:
\strong{vignette("v1.sommer.quick.start")}
\strong{vignette("v2.sommer.changes.and.faqs")}
\strong{vignette("v3.sommer.qg")}
\strong{vignette("v4.sommer.gxe")}
}
\usage{
mmer(fixed, random, rcov, data, weights, iters=20, tolpar = 1e-03,
tolparinv = 1e-06, init=NULL, constraints=NULL,method="NR", getPEV=TRUE,
na.method.X="exclude", na.method.Y="exclude",return.param=FALSE,
date.warning=TRUE,verbose=TRUE, reshape.output=TRUE)
}
\arguments{
\item{fixed}{A formula specifying the \strong{response variable(s)} \strong{and fixed effects}, i.e:
\emph{response ~ covariate} for univariate models
\emph{cbind(response.i,response.j) ~ covariate} for multivariate models
The \code{\link{fcm}()} function can be used to constrain fixed effects in multi-response models.
}
\item{random}{A formula specifying the name of the \strong{random effects}, i.e. \emph{random= ~ genotype + year}.
Useful functions can be used to fit heterogeneous variances and other special models (\emph{see 'Special Functions' in the Details section for more information}):
\code{\link{vs}(...,Gu,Gt,Gtc)} is the main function to specify variance models and special structures for random effects. On the ... argument you provide the unknown variance-covariance structures (i.e. us,ds,at,cs) and the random effect where such covariance structure will be used (the random effect of interest). Gu is used to provide known covariance matrices among the levels of the random effect, Gt initial values and Gtc for constraints. Auxiliar functions for building the variance models are:
** \code{\link{ds}(x)}, \code{\link{us}(x)}, \code{\link{cs}(x)} and \code{\link{at}(x,levs)} can be used to specify unknown diagonal, unstructured and customized unstructured and diagonal covariance structures to be estimated by REML.
** \code{\link{unsm}(x)}, \code{\link{uncm}(x)}, \code{\link{fixm}(x)} and \code{\link{diag}(x)} can be used to build easily matrices to specify constraints in the Gtc argument of the \code{\link{vs}()} function.
** \code{\link{overlay}()}, \code{\link{spl2D}()}, and \code{\link{leg}()} functions can be used to specify overlayed of design matrices of random effects, two dimensional spline and random regression models within the \code{\link{vs}()} function.
}
\item{rcov}{A formula specifying the name of the \strong{error term}, i.e. \emph{rcov= ~ units}.
Special heterogeneous and special variance models and constraints for the residual part are the same used on the random term but the name of the random effect is always "units" which can be thought as a column with as many levels as rows in the data, i.e. \emph{rcov=~vs(ds(covariate),units)}
}
\item{data}{A data frame containing the variables specified in the formulas for response, fixed, and random effects.}
\item{weights}{Name of the covariate for weights. To be used for the product R = Wsi*R*Wsi, where * is the matrix product, Wsi is the square root of the inverse of W and R is the residual matrix.}
\item{iters}{Maximum number of iterations allowed.}
\item{tolpar}{Convergence criteria for the change in log-likelihood.}
\item{tolparinv}{Tolerance parameter for matrix inverse used when singularities are encountered in the estimation procedure.}
\item{init}{Initial values for the variance components. By default this is NULL and initial values for the variance components are provided by the algorithm, but in case the user want to provide initial values for ALL var-cov components this argument is functional. It has to be provided as a list, where each list element corresponds to one random effect (1x1 matrix) and if multitrait model is pursued each element of the list is a matrix of variance covariance components among traits for such random effect. Initial values can also be provided in the Gt argument of the \link{vs} function. Is highly encouraged to use the Gt and Gtc arguments of the \link{vs} function instead of this argument, but these argument can be used to provide all initial values at once}
\item{constraints}{When initial values are provided these have to be accompanied by their constraints. See the \link{vs} function for more details on the constraints. Is highly encouraged to use the Gt and Gtc arguments of the \link{vs} function instead of this argument but these argument can be used to provide all constraints at once.}
\item{method}{This refers to the method or algorithm to be used for estimating variance components. Direct-inversion Newton-Raphson \strong{NR} and Average Information \strong{AI} (Tunnicliffe 1989; Gilmour et al. 1995; Lee et al. 2015).}
\item{getPEV}{A TRUE/FALSE value indicating if the program should return the predicted error variance and variance for random effects. This option is provided since this can take a long time for certain models where p is > n by a big extent.}
\item{na.method.X}{One of the two possible values; "include" or "exclude". If "include" is selected then the function will impute the X matrices for fixed effects with the median value. If "exclude" is selected it will get rid of all rows with missing values for the X (fixed) covariates. The default is "exclude". The "include" option should be used carefully.}
\item{na.method.Y}{One of the three possible values; "include", "include2" or "exclude" (default) to treat the observations in response variable to be used in the estimation of variance components. The first option "include" will impute the response variables for all rows with the median value, whereas "include2" imputes the responses only for rows where there is observation(s) for at least one of the responses (only available in the multi-response models). If "exclude" is selected (default) it will get rid of rows in response(s) where missing values are present for at least one of the responses.}
\item{return.param}{A TRUE/FALSE value to indicate if the program should return the parameters to be used for fitting the model instead of fitting the model.}
\item{date.warning}{A TRUE/FALSE value to indicate if the program should warn you when is time to update the sommer package.}
\item{verbose}{A TRUE/FALSE value to indicate if the program should return the progress of the iterative algorithm.}
\item{reshape.output}{A TRUE/FALSE value to indicate if the output should be reshaped to be easier to interpret for the user, some information is missing from the multivariate models for an easy interpretation.}
}
\details{
The use of this function requires a good understanding of mixed models. Please review the 'sommer.quick.start' vignette and pay attention to details like format of your random and fixed variables (i.e. character and factor variables have different properties when returning BLUEs or BLUPs, please see the 'sommer.changes.and.faqs' vignette).
\strong{Citation}
Type \emph{citation("sommer")} to know how to cite the sommer package in your publications.
\strong{Special variance structures}
\code{\link{vs}(\link{at}(x,levels),y)}
can be used to specify heterogeneous variance for the "y" covariate at specific levels of the covariate "x", i.e. \emph{random=~vs(at(Location,c("A","B")),ID)} fits a variance component for ID at levels A and B of the covariate Location.
\code{\link{vs}(\link{ds}(x),y)}
can be used to specify a diagonal covariance structure for the "y" covariate for all levels of the covariate "x", i.e. \emph{random=~vs(ds(Location),ID)} fits a variance component for ID at all levels of the covariate Location.
\code{\link{vs}(\link{us}(x),y)}
can be used to specify an unstructured covariance structure for the "y" covariate for all levels of the covariate "x", i.e. \emph{random=~vs(us(Location),ID)} fits variance and covariance components for ID at all levels of the covariate Location.
\code{\link{vs}(\link{overlay}(...,rlist=NULL,prefix=NULL))}
can be used to specify overlay of design matrices between consecutive random effects specified, i.e. \emph{random=~vs(overlay(male,female))} overlays (overlaps) the incidence matrices for the male and female random effects to obtain a single variance component for both effects. The `rlist` argument is a list with each element being a numeric value that multiplies the incidence matrix to be overlayed. See \code{\link{overlay}} for details.Can be combined with vs().
\code{\link{vs}(\link{spl2D}(x.coord, y.coord, at, at.levels))}
can be used to fit a 2-dimensional spline (i.e. spatial modeling) using coordinates \code{x.coord} and \code{y.coord} (in numeric class). The 2D spline can be fitted at specific levels using the \code{at} and \code{at.levels} arguments. For example \emph{random=~vs(spl2D(x.coord=Row,y.coord=Range,at=FIELD))}.
\code{\link{vs}(\link{leg}(x,n),y)}
can be used to fit a random regression model using a numerical variable \code{x} that marks the trayectory for the random effect \code{y}. The leg function can be combined with the special functions \code{ds}, \code{us} \code{at} and \code{cs}. For example \emph{random=~vs(leg(x,1),y)} or \emph{random=~vs(us(leg(x,1)),y)}.
\code{\link{vs}(x,Gtc=\link{fcm}(v))}
can be used to constrain fixed effects in the multi-response mixed models. This is a vector that specifies if the fixed effect is to be estimated for such trait. For example \emph{fixed=cbind(response.i, response.j)~vs(Rowf, Gtc=fcm(c(1,0)))} means that the fixed effect Rowf should only be estimated for the first response and the second should only have the intercept.
\strong{S3 methods}{
S3 methods are available for some parameter extraction such as \code{\link{fitted.mmer}}, \code{\link{residuals.mmer}}, \code{\link{summary.mmer}}, \code{\link{randef}}, \code{\link{coef.mmer}}, \code{\link{anova.mmer}}, \code{\link{plot.mmer}}, and \code{\link{predict.mmer}} to obtain adjusted means. In addition, the \code{\link{vpredict}} function can be used to estimate standard errors for linear combinations of variance components (i.e. ratios like h2).
}
\strong{Additional Functions}
Additional functions for genetic analysis have been included such as relationship matrix building (\code{\link{A.mat}}, \code{\link{D.mat}}, \code{\link{E.mat}}, \code{\link{H.mat}}), heritability (\code{\link{h2.fun}}), build a genotypic hybrid marker matrix (\code{\link{build.HMM}}), plot of genetic maps (\code{\link{map.plot}}), and manhattan plots (\code{\link{manhattan}}). If you need to build a pedigree-based relationship matrix use the \code{getA} function from the pedigreemm package.
\strong{Bug report and contact}{
If you have any technical questions or suggestions please post it in https://stackoverflow.com or https://stats.stackexchange.com and send me an email with the link at cova_ruber@live.com.mx
If you have any bug report please go to https://github.com/covaruber/sommer or send me an email to address it asap, just make sure you have read the vignettes carefully before sending your question.
}
\strong{Example Datasets}
The package has been equiped with several datasets to learn how to use the sommer package:
* \code{\link{DT_halfdiallel}}, \code{\link{DT_fulldiallel}} and \code{\link{DT_mohring}} datasets have examples to fit half and full diallel designs.
* \code{\link{DT_h2}} to calculate heritability
* \code{\link{DT_cornhybrids}} and \code{\link{DT_technow}} datasets to perform genomic prediction in hybrid single crosses
* \code{\link{DT_wheat}} dataset to do genomic prediction in single crosses in species displaying only additive effects.
* \code{\link{DT_cpdata}} dataset to fit genomic prediction models within a biparental population coming from 2 highly heterozygous parents including additive, dominance and epistatic effects.
* \code{\link{DT_polyploid}} to fit genomic prediction and GWAS analysis in polyploids.
* \code{\link{DT_gryphon}} data contains an example of an animal model including pedigree information.
* \code{\link{DT_btdata}} dataset contains an animal (birds) model.
* \code{\link{DT_legendre}} simulated dataset for random regression model.
* \code{\link{DT_sleepstudy}} dataset to know how to translate lme4 models to sommer models.
\strong{Models Enabled}
For details about the models enabled and more information about the covariance structures please check the help page of the package (\code{\link{sommer}}).
}
\value{
If all parameters are correctly indicated the program will return a list with the following information:
\item{Vi}{the inverse of the phenotypic variance matrix V^- = (ZGZ+R)^-1}
\item{sigma}{a list with the values of the variance-covariance components with one list element for each random effect.}
\item{sigma_scaled}{a list with the values of the scaled variance-covariance components with one list element for each random effect.}
\item{sigmaSE}{Hessian matrix containing the variance-covariance for the variance components. SE's can be obtained taking the square root of the diagonal values of the Hessian.}
\item{Beta}{a data frame for trait BLUEs (fixed effects).}
\item{VarBeta}{a variance-covariance matrix for trait BLUEs}
\item{U}{a list (one element for each random effect) with a data frame for trait BLUPs.}
\item{VarU}{a list (one element for each random effect) with the variance-covariance matrix for trait BLUPs.}
\item{PevU}{a list (one element for each random effect) with the predicted error variance matrix for trait BLUPs.}
\item{fitted}{ Fitted values y.hat=XB}
\item{residuals}{Residual values e = Y - XB}
\item{AIC}{Akaike information criterion}
\item{BIC}{Bayesian information criterion}
\item{convergence}{a TRUE/FALSE statement indicating if the model converged.}
\item{monitor}{The values of log-likelihood and variance-covariance components across iterations during the REML estimation.}
\item{method}{The method for extimation of variance components specified by the user.}
\item{call}{Formula for fixed, random and rcov used.}
\item{constraints}{contraints used in the mixed models for the random effects.}
\item{constraintsF}{contraints used in the mixed models for the fixed effects.}
\item{data}{dataset used in the model.}
\item{sigmaVector}{a vectorized version of the sigma element (variance-covariance components) to match easily the standard errors of the var-cov components stored in the element sigmaSE.}
}
\references{
Covarrubias-Pazaran G. Genome assisted prediction of quantitative traits using the R package sommer. PLoS ONE 2016, 11(6): doi:10.1371/journal.pone.0156744
Covarrubias-Pazaran G. 2018. Software update: Moving the R package sommer to multivariate mixed models for genome-assisted prediction. doi: https://doi.org/10.1101/354639
Bernardo Rex. 2010. Breeding for quantitative traits in plants. Second edition. Stemma Press. 390 pp.
Gilmour et al. 1995. Average Information REML: An efficient algorithm for variance parameter estimation in linear mixed models. Biometrics 51(4):1440-1450.
Kang et al. 2008. Efficient control of population structure in model organism association mapping. Genetics 178:1709-1723.
Lee, D.-J., Durban, M., and Eilers, P.H.C. (2013). Efficient two-dimensional smoothing with P-spline ANOVA mixed models and nested bases. Computational Statistics and Data Analysis, 61, 22 - 37.
Lee et al. 2015. MTG2: An efficient algorithm for multivariate linear mixed model analysis based on genomic information. Cold Spring Harbor. doi: http://dx.doi.org/10.1101/027201.
Maier et al. 2015. Joint analysis of psychiatric disorders increases accuracy of risk prediction for schizophrenia, bipolar disorder, and major depressive disorder. Am J Hum Genet; 96(2):283-294.
Rodriguez-Alvarez, Maria Xose, et al. Correcting for spatial heterogeneity in plant breeding experiments with P-splines. Spatial Statistics 23 (2018): 52-71.
Searle. 1993. Applying the EM algorithm to calculating ML and REML estimates of variance components. Paper invited for the 1993 American Statistical Association Meeting, San Francisco.
Yu et al. 2006. A unified mixed-model method for association mapping that accounts for multiple levels of relatedness. Genetics 38:203-208.
Tunnicliffe W. 1989. On the use of marginal likelihood in time series model estimation. JRSS 51(1):15-27.
Zhang et al. 2010. Mixed linear model approach adapted for genome-wide association studies. Nat. Genet. 42:355-360.
}
\author{
Giovanny Covarrubias-Pazaran
}
\examples{
####=========================================####
#### For CRAN time limitations most lines in the
#### examples are silenced with one '#' mark,
#### remove them and run the examples
####=========================================####
####=========================================####
#### EXAMPLES
#### Different models with sommer
####=========================================####
data(DT_example)
DT <- DT_example
head(DT)
####=========================================####
#### Univariate homogeneous variance models ####
####=========================================####
## Compound simmetry (CS) model
ans1 <- mmer(Yield~Env,
random= ~ Name + Env:Name,
rcov= ~ units,
data=DT)
summary(ans1)
####===========================================####
#### Univariate heterogeneous variance models ####
####===========================================####
## Compound simmetry (CS) + Diagonal (DIAG) model
ans2 <- mmer(Yield~Env,
random= ~Name + vs(ds(Env),Name),
rcov= ~ vs(ds(Env),units),
data=DT)
summary(ans2)
####===========================================####
#### Univariate unstructured variance models ####
####===========================================####
ans3 <- mmer(Yield~Env,
random=~ vs(us(Env),Name),
rcov=~vs(us(Env),units),
data=DT)
summary(ans3)
# ####==========================================####
# #### Multivariate homogeneous variance models ####
# ####==========================================####
#
# ## Multivariate Compound simmetry (CS) model
# DT$EnvName <- paste(DT$Env,DT$Name)
# ans4 <- mmer(cbind(Yield, Weight) ~ Env,
# random= ~ vs(Name, Gtc = unsm(2)) + vs(EnvName,Gtc = unsm(2)),
# rcov= ~ vs(units, Gtc = unsm(2)),
# data=DT)
# summary(ans4)
#
# ####=============================================####
# #### Multivariate heterogeneous variance models ####
# ####=============================================####
#
# ## Multivariate Compound simmetry (CS) + Diagonal (DIAG) model
# ans5 <- mmer(cbind(Yield, Weight) ~ Env,
# random= ~ vs(Name, Gtc = unsm(2)) + vs(ds(Env),Name, Gtc = unsm(2)),
# rcov= ~ vs(ds(Env),units, Gtc = unsm(2)),
# data=DT)
# summary(ans5)
#
# ####===========================================####
# #### Multivariate unstructured variance models ####
# ####===========================================####
#
# ans6 <- mmer(cbind(Yield, Weight) ~ Env,
# random= ~ vs(us(Env),Name, Gtc = unsm(2)),
# rcov= ~ vs(ds(Env),units, Gtc = unsm(2)),
# data=DT)
# summary(ans6)
#
# ####=========================================####
# ####=========================================####
# #### EXAMPLE SET 2
# #### 2 variance components
# #### one random effect with variance covariance structure
# ####=========================================####
# ####=========================================####
#
# data("DT_cpdata")
# DT <- DT_cpdata
# GT <- GT_cpdata
# MP <- MP_cpdata
# head(DT)
# GT[1:4,1:4]
# #### create the variance-covariance matrix
# A <- A.mat(GT)
# #### look at the data and fit the model
# mix1 <- mmer(Yield~1,
# random=~vs(id, Gu=A) + Rowf,
# rcov=~units,
# data=DT)
# summary(mix1)$varcomp
# #### calculate heritability
# vpredict(mix1, h1 ~ V1/(V1+V3) )
# #### multi trait example
# mix2 <- mmer(cbind(Yield,color)~1,
# random = ~ vs(id, Gu=A, Gtc = unsm(2)) + # unstructured at trait level
# vs(Rowf, Gtc=diag(2)) + # diagonal structure at trait level
# vs(Colf, Gtc=diag(2)), # diagonal structure at trait level
# rcov = ~ vs(units, Gtc = unsm(2)), # unstructured at trait level
# data=DT)
# summary(mix2)
#
# ####=========================================####
# #### EXAMPLE SET 3
# #### comparison with lmer, install 'lme4'
# #### and run the code below
# ####=========================================####
#
# #### lmer cannot use var-cov matrices so we will not
# #### use them in this comparison example
#
# library(lme4)
# library(sommer)
# data("DT_cornhybrids")
# DT <- DT_cornhybrids
# DTi <- DTi_cornhybrids
# GT <- GT_cornhybrids
#
# fm1 <- lmer(Yield ~ Location + (1|GCA1) + (1|GCA2) + (1|SCA),
# data=DT )
# out <- mmer(Yield ~ Location,
# random = ~ GCA1 + GCA2 + SCA,
# rcov = ~ units,
# data=DT)
# summary(fm1)
# summary(out)
# ### same BLUPs for GCA1, GCA2, SCA than lme4
# plot(out$U$GCA1$Yield, ranef(fm1)$GCA1[,1])
# plot(out$U$GCA2$Yield, ranef(fm1)$GCA2[,1])
# vv=which(abs(out$U$SCA$Yield) > 0)
# plot(out$U$SCA$Yield[vv], ranef(fm1)$SCA[,1])
#
# ### a more complex model specifying which locations
# head(DT)
# out2 <- mmer(Yield ~ Location,
# random = ~ vs(at(Location,c("3","4")),GCA2) +
# vs(at(Location,c("3","4")),SCA),
# rcov = ~ vs(ds(Location),units),
# data=DT)
# summary(out2)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_put_records}
\alias{kinesis_put_records}
\title{Writes multiple data records into a Kinesis data stream in a single call
(also referred to as a PutRecords request)}
\usage{
kinesis_put_records(Records, StreamName = NULL, StreamARN = NULL)
}
\arguments{
\item{Records}{[required] The records associated with the request.}
\item{StreamName}{The stream name associated with the request.}
\item{StreamARN}{The ARN of the stream.}
}
\description{
Writes multiple data records into a Kinesis data stream in a single call (also referred to as a \code{\link[=kinesis_put_records]{put_records}} request). Use this operation to send data into the stream for data ingestion and processing.
See \url{https://www.paws-r-sdk.com/docs/kinesis_put_records/} for full documentation.
}
\keyword{internal}
| /cran/paws.analytics/man/kinesis_put_records.Rd | permissive | paws-r/paws | R | false | true | 903 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_put_records}
\alias{kinesis_put_records}
\title{Writes multiple data records into a Kinesis data stream in a single call
(also referred to as a PutRecords request)}
\usage{
kinesis_put_records(Records, StreamName = NULL, StreamARN = NULL)
}
\arguments{
\item{Records}{[required] The records associated with the request.}
\item{StreamName}{The stream name associated with the request.}
\item{StreamARN}{The ARN of the stream.}
}
\description{
Writes multiple data records into a Kinesis data stream in a single call (also referred to as a \code{\link[=kinesis_put_records]{put_records}} request). Use this operation to send data into the stream for data ingestion and processing.
See \url{https://www.paws-r-sdk.com/docs/kinesis_put_records/} for full documentation.
}
\keyword{internal}
|
characterization_lines <- function(directory)
{
source('M:/Fanuc & Slider/sliderDataToComputer.r')
source('M:/Fanuc & Slider/datasetReturn.r')
source('M:/Fanuc & Slider/plots.r')
oldset <- sliderdata_yprofile(directory)
cleanset <- datasetwithseries(oldset)
plot_lines_2(cleanset)
}
characterization <- function(directory)
{
source('M:/Fanuc & Slider/sliderDataToComputer.r')
source('M:/Fanuc & Slider/datasetReturn.r')
source('M:/Fanuc & Slider/plots.r')
oldset <- sliderdata_yprofile(directory)
cleanset <- datasetwithseries(oldset)
plot_lines(cleanset)
} | /Explatory Analysis/characterization.r | no_license | adic26/datasciencecoursera | R | false | false | 590 | r | characterization_lines <- function(directory)
{
source('M:/Fanuc & Slider/sliderDataToComputer.r')
source('M:/Fanuc & Slider/datasetReturn.r')
source('M:/Fanuc & Slider/plots.r')
oldset <- sliderdata_yprofile(directory)
cleanset <- datasetwithseries(oldset)
plot_lines_2(cleanset)
}
characterization <- function(directory)
{
source('M:/Fanuc & Slider/sliderDataToComputer.r')
source('M:/Fanuc & Slider/datasetReturn.r')
source('M:/Fanuc & Slider/plots.r')
oldset <- sliderdata_yprofile(directory)
cleanset <- datasetwithseries(oldset)
plot_lines(cleanset)
} |
rm(list=ls())
library(bigreadr)
library(readr)
library(dplyr)
eth_all <- c("AFR","AMR","EAS","EUR","SAS")
trait_all <- c("bmi","height","HDL","LDL","logTG","nonHDL","TC","TG")
res <- tibble()
for (t in 1:length(trait_all)){
trait <- trait_all[t]
res_t <- matrix(nrow=5,ncol=2)
for (i in 1:length(eth_all)){
ethnic <- eth_all[i]
pheno <- fread2(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/",trait,"/tuning+validation/",ethnic,"_tuning.txt"))
pheno = pheno[,1:2]
covar <- read_tsv(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/covariates/tuning+validation/",ethnic,"_tuning.txt"))
pheno <- left_join(pheno, covar)
colnames(pheno) = c('id','y','sex','age',paste0('pc',1:10))
pheno = pheno[complete.cases(pheno$y),]
res_t[i,1] <- nrow(pheno)
pheno <- fread2(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/",trait,"/tuning+validation/",ethnic,"_validation.txt"))
pheno = pheno[,1:2]
covar <- read_tsv(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/covariates/tuning+validation/",ethnic,"_validation.txt"))
pheno <- left_join(pheno, covar)
colnames(pheno) = c('id','y','sex','age',paste0('pc',1:10))
pheno = pheno[complete.cases(pheno$y),]
res_t[i,2] <- nrow(pheno)
}
res_t <- data.frame(ethnic=eth_all,trait=trait, res_t)
colnames(res_t) <- c("ethnic","trait","tuning","validation")
res <- rbind(res, res_t)
}
write_tsv(res, "/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/complete_case_sample_size.txt")
| /code/Jingning/UKBB_ancestry_prediction/phenotype/3_sample_size_by_pheno.R | no_license | andrewhaoyu/multi_ethnic | R | false | false | 1,474 | r |
rm(list=ls())
library(bigreadr)
library(readr)
library(dplyr)
eth_all <- c("AFR","AMR","EAS","EUR","SAS")
trait_all <- c("bmi","height","HDL","LDL","logTG","nonHDL","TC","TG")
res <- tibble()
for (t in 1:length(trait_all)){
trait <- trait_all[t]
res_t <- matrix(nrow=5,ncol=2)
for (i in 1:length(eth_all)){
ethnic <- eth_all[i]
pheno <- fread2(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/",trait,"/tuning+validation/",ethnic,"_tuning.txt"))
pheno = pheno[,1:2]
covar <- read_tsv(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/covariates/tuning+validation/",ethnic,"_tuning.txt"))
pheno <- left_join(pheno, covar)
colnames(pheno) = c('id','y','sex','age',paste0('pc',1:10))
pheno = pheno[complete.cases(pheno$y),]
res_t[i,1] <- nrow(pheno)
pheno <- fread2(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/",trait,"/tuning+validation/",ethnic,"_validation.txt"))
pheno = pheno[,1:2]
covar <- read_tsv(paste0("/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/covariates/tuning+validation/",ethnic,"_validation.txt"))
pheno <- left_join(pheno, covar)
colnames(pheno) = c('id','y','sex','age',paste0('pc',1:10))
pheno = pheno[complete.cases(pheno$y),]
res_t[i,2] <- nrow(pheno)
}
res_t <- data.frame(ethnic=eth_all,trait=trait, res_t)
colnames(res_t) <- c("ethnic","trait","tuning","validation")
res <- rbind(res, res_t)
}
write_tsv(res, "/dcs04/nilanjan/data/jzhang2/UKBB/phenotype/complete_case_sample_size.txt")
|
# Plot 3
# Reading data into R and reformatting data:
setwd("~/Exploratory Data Analysis")
data1 <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Subsetting data into 1/2/2007 and 2/2/2007
dataSelect <- subset(data1, Date %in% c("1/2/2007","2/2/2007"))
# Converting Date character into Date class.
date_time <- paste(as.Date(dataSelect$Date, format = "%d/%m/%Y"), dataSelect$Time)
dataSelect$Date_Time <- as.POSIXct(date_time)
# Plotting:
with(dataSelect, {
plot(Date_Time, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(Date_Time, Sub_metering_2, col = "red")
lines(Date_Time, Sub_metering_3, col = "blue")
})
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# Creating PNG file by dev.copy:
dev.copy(png, file = "plot3.png")
dev.off()
| /plot3.R | no_license | WittyShiba/ExData_Plotting1 | R | false | false | 924 | r | # Plot 3
# Reading data into R and reformatting data:
setwd("~/Exploratory Data Analysis")
data1 <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Subsetting data into 1/2/2007 and 2/2/2007
dataSelect <- subset(data1, Date %in% c("1/2/2007","2/2/2007"))
# Converting Date character into Date class.
date_time <- paste(as.Date(dataSelect$Date, format = "%d/%m/%Y"), dataSelect$Time)
dataSelect$Date_Time <- as.POSIXct(date_time)
# Plotting:
with(dataSelect, {
plot(Date_Time, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(Date_Time, Sub_metering_2, col = "red")
lines(Date_Time, Sub_metering_3, col = "blue")
})
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# Creating PNG file by dev.copy:
dev.copy(png, file = "plot3.png")
dev.off()
|
library(shiny)
shinyApp(
ui = fluidPage (
#
# Show all defined HTML tags that can be
# used in shiny in an unordered list:
#
tags$ul(
lapply(names(tags), tags$li)
)
),
server = function(input, output, session) {}
);
| /ui/html/tags/App.R | no_license | ReneNyffenegger/temp-shiny | R | false | false | 276 | r | library(shiny)
shinyApp(
ui = fluidPage (
#
# Show all defined HTML tags that can be
# used in shiny in an unordered list:
#
tags$ul(
lapply(names(tags), tags$li)
)
),
server = function(input, output, session) {}
);
|
library(ORCME)
### Name: resampleORCME
### Title: Estimation of the proportion of the heterogeneity in the
### observed data for clustering
### Aliases: resampleORCME
### Keywords: cluster
### ** Examples
data(doseData)
data(geneData)
dirData <- monotoneDirection(geneData = geneData,doseData = doseData)
incData <- as.data.frame(dirData$incData)
lambdaVector <- c(0.05,0.50,0.95)
## No test:
resampleORCME(clusteringData=incData, lambdaVector=lambdaVector, robust=FALSE)
## End(No test)
| /data/genthat_extracted_code/ORCME/examples/resampleORCME.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 521 | r | library(ORCME)
### Name: resampleORCME
### Title: Estimation of the proportion of the heterogeneity in the
### observed data for clustering
### Aliases: resampleORCME
### Keywords: cluster
### ** Examples
data(doseData)
data(geneData)
dirData <- monotoneDirection(geneData = geneData,doseData = doseData)
incData <- as.data.frame(dirData$incData)
lambdaVector <- c(0.05,0.50,0.95)
## No test:
resampleORCME(clusteringData=incData, lambdaVector=lambdaVector, robust=FALSE)
## End(No test)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
skip_if_not_available("dataset")
library(dplyr, warn.conflicts = FALSE)
suppressPackageStartupMessages(library(bit64))
tbl <- example_data
test_that("explicit type conversions with cast()", {
num_int32 <- 12L
num_int64 <- bit64::as.integer64(10)
int_types <- c(int8(), int16(), int32(), int64())
uint_types <- c(uint8(), uint16(), uint32(), uint64())
float_types <- c(float32(), float64())
types <- c(
int_types,
uint_types,
float_types,
double(), # not actually a type, a base R function but should be alias for float64
string()
)
for (type in types) {
expect_type_equal(
object = {
t1 <- Table$create(x = num_int32) %>%
transmute(x = cast(x, type)) %>%
compute()
t1$schema[[1]]$type
},
as_type(type)
)
expect_type_equal(
object = {
t1 <- Table$create(x = num_int64) %>%
transmute(x = cast(x, type)) %>%
compute()
t1$schema[[1]]$type
},
as_type(type)
)
}
# Arrow errors when truncating floats...
expect_error(
expect_type_equal(
object = {
t1 <- Table$create(pi = pi) %>%
transmute(three = cast(pi, int32())) %>%
compute()
t1$schema[[1]]$type
},
int32()
),
"truncated"
)
# ... unless safe = FALSE (or allow_float_truncate = TRUE)
expect_type_equal(
object = {
t1 <- Table$create(pi = pi) %>%
transmute(three = cast(pi, int32(), safe = FALSE)) %>%
compute()
t1$schema[[1]]$type
},
int32()
)
})
test_that("explicit type conversions with as.*()", {
library(bit64)
compare_dplyr_binding(
.input %>%
transmute(
int2chr = as.character(int),
int2dbl = as.double(int),
int2int = as.integer(int),
int2num = as.numeric(int),
dbl2chr = as.character(dbl),
dbl2dbl = as.double(dbl),
dbl2int = as.integer(dbl),
dbl2num = as.numeric(dbl),
) %>%
collect(),
tbl
)
compare_dplyr_binding(
.input %>%
transmute(
chr2chr = as.character(chr),
chr2dbl = as.double(chr),
chr2int = as.integer(chr),
chr2num = as.numeric(chr)
) %>%
collect(),
tibble(chr = c("1", "2", "3"))
)
compare_dplyr_binding(
.input %>%
transmute(
chr2i64 = as.integer64(chr),
dbl2i64 = as.integer64(dbl),
i642i64 = as.integer64(i64),
) %>%
collect(),
tibble(chr = "10000000000", dbl = 10000000000, i64 = as.integer64(1e10))
)
compare_dplyr_binding(
.input %>%
transmute(
chr2lgl = as.logical(chr),
dbl2lgl = as.logical(dbl),
int2lgl = as.logical(int)
) %>%
collect(),
tibble(
chr = c("TRUE", "FALSE", "true", "false"),
dbl = c(1, 0, -99, 0),
int = c(1L, 0L, -99L, 0L)
)
)
compare_dplyr_binding(
.input %>%
transmute(
dbl2chr = as.character(dbl),
dbl2dbl = as.double(dbl),
dbl2int = as.integer(dbl),
dbl2lgl = as.logical(dbl),
int2chr = as.character(int),
int2dbl = as.double(int),
int2int = as.integer(int),
int2lgl = as.logical(int),
lgl2chr = as.character(lgl), # Arrow returns "true", "false" here ...
lgl2dbl = as.double(lgl),
lgl2int = as.integer(lgl),
lgl2lgl = as.logical(lgl)
) %>%
collect() %>%
# need to use toupper() *after* collect() or else skip if utf8proc not available
mutate(lgl2chr = toupper(lgl2chr)), # ... but we need "TRUE", "FALSE"
tibble(
dbl = c(1, 0, NA_real_),
int = c(1L, 0L, NA_integer_),
lgl = c(TRUE, FALSE, NA)
)
)
})
test_that("is.finite(), is.infinite(), is.nan()", {
df <- tibble(x = c(
-4.94065645841246544e-324, 1.79769313486231570e+308, 0,
NA_real_, NaN, Inf, -Inf
))
compare_dplyr_binding(
.input %>%
transmute(
is_fin = is.finite(x),
is_inf = is.infinite(x)
) %>%
collect(),
df
)
# is.nan() evaluates to FALSE on NA_real_ (ARROW-12850)
compare_dplyr_binding(
.input %>%
transmute(
is_nan = is.nan(x)
) %>%
collect(),
df
)
})
test_that("is.na() evaluates to TRUE on NaN (ARROW-12055)", {
df <- tibble(x = c(1.1, 2.2, NA_real_, 4.4, NaN, 6.6, 7.7))
compare_dplyr_binding(
.input %>%
transmute(
is_na = is.na(x)
) %>%
collect(),
df
)
})
test_that("type checks with is() giving Arrow types", {
# with class2=DataType
expect_equal(
Table$create(
i32 = Array$create(1, int32()),
dec = Array$create(pi)$cast(decimal(3, 2)),
dec128 = Array$create(pi)$cast(decimal128(3, 2)),
dec256 = Array$create(pi)$cast(decimal256(3, 2)),
f64 = Array$create(1.1, float64()),
str = Array$create("a", arrow::string())
) %>% transmute(
i32_is_i32 = is(i32, int32()),
i32_is_dec = is(i32, decimal(3, 2)),
i32_is_dec128 = is(i32, decimal128(3, 2)),
i32_is_dec256 = is(i32, decimal256(3, 2)),
i32_is_i64 = is(i32, float64()),
i32_is_str = is(i32, arrow::string()),
dec_is_i32 = is(dec, int32()),
dec_is_dec = is(dec, decimal(3, 2)),
dec_is_dec128 = is(dec, decimal128(3, 2)),
dec_is_dec256 = is(dec, decimal256(3, 2)),
dec_is_i64 = is(dec, float64()),
dec_is_str = is(dec, arrow::string()),
dec128_is_i32 = is(dec128, int32()),
dec128_is_dec128 = is(dec128, decimal128(3, 2)),
dec128_is_dec256 = is(dec128, decimal256(3, 2)),
dec128_is_i64 = is(dec128, float64()),
dec128_is_str = is(dec128, arrow::string()),
dec256_is_i32 = is(dec128, int32()),
dec256_is_dec128 = is(dec128, decimal128(3, 2)),
dec256_is_dec256 = is(dec128, decimal256(3, 2)),
dec256_is_i64 = is(dec128, float64()),
dec256_is_str = is(dec128, arrow::string()),
f64_is_i32 = is(f64, int32()),
f64_is_dec = is(f64, decimal(3, 2)),
f64_is_dec128 = is(f64, decimal128(3, 2)),
f64_is_dec256 = is(f64, decimal256(3, 2)),
f64_is_i64 = is(f64, float64()),
f64_is_str = is(f64, arrow::string()),
str_is_i32 = is(str, int32()),
str_is_dec128 = is(str, decimal128(3, 2)),
str_is_dec256 = is(str, decimal256(3, 2)),
str_is_i64 = is(str, float64()),
str_is_str = is(str, arrow::string())
) %>%
collect() %>%
t() %>%
as.vector(),
c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE)
)
# with class2=string
expect_equal(
Table$create(
i32 = Array$create(1, int32()),
f64 = Array$create(1.1, float64()),
str = Array$create("a", arrow::string())
) %>% transmute(
i32_is_i32 = is(i32, "int32"),
i32_is_i64 = is(i32, "double"),
i32_is_str = is(i32, "string"),
f64_is_i32 = is(f64, "int32"),
f64_is_i64 = is(f64, "double"),
f64_is_str = is(f64, "string"),
str_is_i32 = is(str, "int32"),
str_is_i64 = is(str, "double"),
str_is_str = is(str, "string")
) %>%
collect() %>%
t() %>%
as.vector(),
c(TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE)
)
# with class2=string alias
expect_equal(
Table$create(
f16 = Array$create(NA_real_, halffloat()),
f32 = Array$create(1.1, float()),
f64 = Array$create(2.2, float64()),
lgl = Array$create(TRUE, bool()),
str = Array$create("a", arrow::string())
) %>% transmute(
f16_is_f16 = is(f16, "float16"),
f16_is_f32 = is(f16, "float32"),
f16_is_f64 = is(f16, "float64"),
f16_is_lgl = is(f16, "boolean"),
f16_is_str = is(f16, "utf8"),
f32_is_f16 = is(f32, "float16"),
f32_is_f32 = is(f32, "float32"),
f32_is_f64 = is(f32, "float64"),
f32_is_lgl = is(f32, "boolean"),
f32_is_str = is(f32, "utf8"),
f64_is_f16 = is(f64, "float16"),
f64_is_f32 = is(f64, "float32"),
f64_is_f64 = is(f64, "float64"),
f64_is_lgl = is(f64, "boolean"),
f64_is_str = is(f64, "utf8"),
lgl_is_f16 = is(lgl, "float16"),
lgl_is_f32 = is(lgl, "float32"),
lgl_is_f64 = is(lgl, "float64"),
lgl_is_lgl = is(lgl, "boolean"),
lgl_is_str = is(lgl, "utf8"),
str_is_f16 = is(str, "float16"),
str_is_f32 = is(str, "float32"),
str_is_f64 = is(str, "float64"),
str_is_lgl = is(str, "boolean"),
str_is_str = is(str, "utf8")
) %>%
collect() %>%
t() %>%
as.vector(),
c(
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE
)
)
})
test_that("type checks with is() giving R types", {
library(bit64)
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is(chr, "character"),
chr_is_fct = is(chr, "factor"),
chr_is_int = is(chr, "integer"),
chr_is_i64 = is(chr, "integer64"),
chr_is_lst = is(chr, "list"),
chr_is_lgl = is(chr, "logical"),
chr_is_num = is(chr, "numeric"),
dbl_is_chr = is(dbl, "character"),
dbl_is_fct = is(dbl, "factor"),
dbl_is_int = is(dbl, "integer"),
dbl_is_i64 = is(dbl, "integer64"),
dbl_is_lst = is(dbl, "list"),
dbl_is_lgl = is(dbl, "logical"),
dbl_is_num = is(dbl, "numeric"),
fct_is_chr = is(fct, "character"),
fct_is_fct = is(fct, "factor"),
fct_is_int = is(fct, "integer"),
fct_is_i64 = is(fct, "integer64"),
fct_is_lst = is(fct, "list"),
fct_is_lgl = is(fct, "logical"),
fct_is_num = is(fct, "numeric"),
int_is_chr = is(int, "character"),
int_is_fct = is(int, "factor"),
int_is_int = is(int, "integer"),
int_is_i64 = is(int, "integer64"),
int_is_lst = is(int, "list"),
int_is_lgl = is(int, "logical"),
int_is_num = is(int, "numeric"),
lgl_is_chr = is(lgl, "character"),
lgl_is_fct = is(lgl, "factor"),
lgl_is_int = is(lgl, "integer"),
lgl_is_i64 = is(lgl, "integer64"),
lgl_is_lst = is(lgl, "list"),
lgl_is_lgl = is(lgl, "logical"),
lgl_is_num = is(lgl, "numeric")
) %>%
collect(),
tbl
)
compare_dplyr_binding(
.input %>%
transmute(
i64_is_chr = is(i64, "character"),
i64_is_fct = is(i64, "factor"),
# we want Arrow to return TRUE, but bit64 returns FALSE
# i64_is_int = is(i64, "integer"),
i64_is_i64 = is(i64, "integer64"),
i64_is_lst = is(i64, "list"),
i64_is_lgl = is(i64, "logical"),
# we want Arrow to return TRUE, but bit64 returns FALSE
# i64_is_num = is(i64, "numeric"),
lst_is_chr = is(lst, "character"),
lst_is_fct = is(lst, "factor"),
lst_is_int = is(lst, "integer"),
lst_is_i64 = is(lst, "integer64"),
lst_is_lst = is(lst, "list"),
lst_is_lgl = is(lst, "logical"),
lst_is_num = is(lst, "numeric")
) %>%
collect(),
tibble(
i64 = as.integer64(1:3),
lst = list(c("a", "b"), c("d", "e"), c("f", "g"))
)
)
})
test_that("type checks with is.*()", {
library(bit64)
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is.character(chr),
chr_is_dbl = is.double(chr),
chr_is_fct = is.factor(chr),
chr_is_int = is.integer(chr),
chr_is_i64 = is.integer64(chr),
chr_is_lst = is.list(chr),
chr_is_lgl = is.logical(chr),
chr_is_num = is.numeric(chr),
dbl_is_chr = is.character(dbl),
dbl_is_dbl = is.double(dbl),
dbl_is_fct = is.factor(dbl),
dbl_is_int = is.integer(dbl),
dbl_is_i64 = is.integer64(dbl),
dbl_is_lst = is.list(dbl),
dbl_is_lgl = is.logical(dbl),
dbl_is_num = is.numeric(dbl),
fct_is_chr = is.character(fct),
fct_is_dbl = is.double(fct),
fct_is_fct = is.factor(fct),
fct_is_int = is.integer(fct),
fct_is_i64 = is.integer64(fct),
fct_is_lst = is.list(fct),
fct_is_lgl = is.logical(fct),
fct_is_num = is.numeric(fct),
int_is_chr = is.character(int),
int_is_dbl = is.double(int),
int_is_fct = is.factor(int),
int_is_int = is.integer(int),
int_is_i64 = is.integer64(int),
int_is_lst = is.list(int),
int_is_lgl = is.logical(int),
int_is_num = is.numeric(int),
lgl_is_chr = is.character(lgl),
lgl_is_dbl = is.double(lgl),
lgl_is_fct = is.factor(lgl),
lgl_is_int = is.integer(lgl),
lgl_is_i64 = is.integer64(lgl),
lgl_is_lst = is.list(lgl),
lgl_is_lgl = is.logical(lgl),
lgl_is_num = is.numeric(lgl)
) %>%
collect(),
tbl
)
compare_dplyr_binding(
.input %>%
transmute(
i64_is_chr = is.character(i64),
# TODO: investigate why this is not matching when testthat runs it
# i64_is_dbl = is.double(i64),
i64_is_fct = is.factor(i64),
# we want Arrow to return TRUE, but bit64 returns FALSE
# i64_is_int = is.integer(i64),
i64_is_i64 = is.integer64(i64),
i64_is_lst = is.list(i64),
i64_is_lgl = is.logical(i64),
i64_is_num = is.numeric(i64),
lst_is_chr = is.character(lst),
lst_is_dbl = is.double(lst),
lst_is_fct = is.factor(lst),
lst_is_int = is.integer(lst),
lst_is_i64 = is.integer64(lst),
lst_is_lst = is.list(lst),
lst_is_lgl = is.logical(lst),
lst_is_num = is.numeric(lst)
) %>%
collect(),
tibble(
i64 = as.integer64(1:3),
lst = list(c("a", "b"), c("d", "e"), c("f", "g"))
)
)
})
test_that("type checks with is_*()", {
library(rlang, warn.conflicts = FALSE)
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is_character(chr),
chr_is_dbl = is_double(chr),
chr_is_int = is_integer(chr),
chr_is_lst = is_list(chr),
chr_is_lgl = is_logical(chr),
dbl_is_chr = is_character(dbl),
dbl_is_dbl = is_double(dbl),
dbl_is_int = is_integer(dbl),
dbl_is_lst = is_list(dbl),
dbl_is_lgl = is_logical(dbl),
int_is_chr = is_character(int),
int_is_dbl = is_double(int),
int_is_int = is_integer(int),
int_is_lst = is_list(int),
int_is_lgl = is_logical(int),
lgl_is_chr = is_character(lgl),
lgl_is_dbl = is_double(lgl),
lgl_is_int = is_integer(lgl),
lgl_is_lst = is_list(lgl),
lgl_is_lgl = is_logical(lgl)
) %>%
collect(),
tbl
)
})
test_that("type checks on expressions", {
compare_dplyr_binding(
.input %>%
transmute(
a = is.character(as.character(int)),
b = is.integer(as.character(int)),
c = is.integer(int + int),
d = is.double(int + dbl),
e = is.logical(dbl > pi)
) %>%
collect(),
tbl
)
# the code in the expectation below depends on RE2
skip_if_not_available("re2")
compare_dplyr_binding(
.input %>%
transmute(
a = is.logical(grepl("[def]", chr))
) %>%
collect(),
tbl
)
})
test_that("type checks on R scalar literals", {
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is.character("foo"),
int_is_chr = is.character(42L),
int_is_int = is.integer(42L),
chr_is_int = is.integer("foo"),
dbl_is_num = is.numeric(3.14159),
int_is_num = is.numeric(42L),
chr_is_num = is.numeric("foo"),
dbl_is_dbl = is.double(3.14159),
chr_is_dbl = is.double("foo"),
lgl_is_lgl = is.logical(TRUE),
chr_is_lgl = is.logical("foo"),
fct_is_fct = is.factor(factor("foo", levels = c("foo", "bar", "baz"))),
chr_is_fct = is.factor("foo"),
lst_is_lst = is.list(list(c(a = "foo", b = "bar"))),
chr_is_lst = is.list("foo")
) %>%
collect(),
tbl
)
})
test_that("as.factor()/dictionary_encode()", {
skip("ARROW-12632: ExecuteScalarExpression cannot Execute non-scalar expression")
df1 <- tibble(x = c("C", "D", "B", NA, "D", "B", "S", "A", "B", "Z", "B"))
df2 <- tibble(x = c(5, 5, 5, NA, 2, 3, 6, 8))
compare_dplyr_binding(
.input %>%
transmute(x = as.factor(x)) %>%
collect(),
df1
)
expect_warning(
compare_dplyr_binding(
.input %>%
transmute(x = as.factor(x)) %>%
collect(),
df2
),
"Coercing dictionary values to R character factor levels"
)
# dictionary values with default null encoding behavior ("mask") omits
# nulls from the dictionary values
expect_equal(
object = {
rb1 <- df1 %>%
record_batch() %>%
transmute(x = dictionary_encode(x)) %>%
compute()
dict <- rb1$x$dictionary()
as.vector(dict$Take(dict$SortIndices()))
},
sort(unique(df1$x), na.last = NA)
)
# dictionary values with "encode" null encoding behavior includes nulls in
# the dictionary values
expect_equal(
object = {
rb1 <- df1 %>%
record_batch() %>%
transmute(x = dictionary_encode(x, null_encoding_behavior = "encode")) %>%
compute()
dict <- rb1$x$dictionary()
as.vector(dict$Take(dict$SortIndices()))
},
sort(unique(df1$x), na.last = TRUE)
)
})
test_that("bad explicit type conversions with as.*()", {
# Arrow returns lowercase "true", "false" (instead of "TRUE", "FALSE" like R)
expect_error(
compare_dplyr_binding(
.input %>%
transmute(lgl2chr = as.character(lgl)) %>%
collect(),
tibble(lgl = c(TRUE, FALSE, NA))
)
)
# Arrow fails to parse these strings as numbers (instead of returning NAs with
# a warning like R does)
expect_error(
expect_warning(
compare_dplyr_binding(
.input %>%
transmute(chr2num = as.numeric(chr)) %>%
collect(),
tibble(chr = c("l.O", "S.S", ""))
)
)
)
# Arrow fails to parse these strings as Booleans (instead of returning NAs
# like R does)
expect_error(
compare_dplyr_binding(
.input %>%
transmute(chr2lgl = as.logical(chr)) %>%
collect(),
tibble(chr = c("TRU", "FAX", ""))
)
)
})
test_that("structs/nested data frames/tibbles can be created", {
df <- tibble(regular_col1 = 1L, regular_col2 = "a")
compare_dplyr_binding(
.input %>%
transmute(
df_col = tibble(
regular_col1 = regular_col1,
regular_col2 = regular_col2
)
) %>%
collect(),
df
)
# check auto column naming
compare_dplyr_binding(
.input %>%
transmute(
df_col = tibble(regular_col1, regular_col2)
) %>%
collect(),
df
)
# ...and that other arguments are not supported
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = tibble(char_col, .rows = 1L)),
".rows not supported in Arrow"
)
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = tibble(char_col, .name_repair = "universal")),
".name_repair not supported in Arrow"
)
# check that data.frame is mapped too
# stringsAsFactors default is TRUE in R 3.6, which is still tested on CI
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, regular_col2, stringsAsFactors = FALSE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
# check with fix.empty.names = FALSE
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, fix.empty.names = FALSE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
# check with check.names = TRUE and FALSE
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, regular_col1, check.names = TRUE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, regular_col1, check.names = FALSE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
# ...and that other arguments are not supported
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = data.frame(char_col, stringsAsFactors = TRUE)),
"stringsAsFactors = TRUE not supported in Arrow"
)
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = data.frame(char_col, row.names = 1L)),
"row.names not supported in Arrow"
)
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = data.frame(char_col, check.rows = TRUE)),
"check.rows not supported in Arrow"
)
})
test_that("nested structs can be created from scalars and existing data frames", {
compare_dplyr_binding(
.input %>%
transmute(
df_col = tibble(b = 3)
) %>%
collect(),
tibble(a = 1:2)
)
# technically this is handled by Scalar$create() since there is no
# call to data.frame or tibble() within a dplyr verb
existing_data_frame <- tibble(b = 3)
compare_dplyr_binding(
.input %>%
transmute(
df_col = existing_data_frame
) %>%
collect(),
tibble(a = 1:2)
)
})
| /r/tests/testthat/test-dplyr-funcs-type.R | permissive | mbrobbel/arrow | R | false | false | 22,508 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
skip_if_not_available("dataset")
library(dplyr, warn.conflicts = FALSE)
suppressPackageStartupMessages(library(bit64))
tbl <- example_data
test_that("explicit type conversions with cast()", {
num_int32 <- 12L
num_int64 <- bit64::as.integer64(10)
int_types <- c(int8(), int16(), int32(), int64())
uint_types <- c(uint8(), uint16(), uint32(), uint64())
float_types <- c(float32(), float64())
types <- c(
int_types,
uint_types,
float_types,
double(), # not actually a type, a base R function but should be alias for float64
string()
)
for (type in types) {
expect_type_equal(
object = {
t1 <- Table$create(x = num_int32) %>%
transmute(x = cast(x, type)) %>%
compute()
t1$schema[[1]]$type
},
as_type(type)
)
expect_type_equal(
object = {
t1 <- Table$create(x = num_int64) %>%
transmute(x = cast(x, type)) %>%
compute()
t1$schema[[1]]$type
},
as_type(type)
)
}
# Arrow errors when truncating floats...
expect_error(
expect_type_equal(
object = {
t1 <- Table$create(pi = pi) %>%
transmute(three = cast(pi, int32())) %>%
compute()
t1$schema[[1]]$type
},
int32()
),
"truncated"
)
# ... unless safe = FALSE (or allow_float_truncate = TRUE)
expect_type_equal(
object = {
t1 <- Table$create(pi = pi) %>%
transmute(three = cast(pi, int32(), safe = FALSE)) %>%
compute()
t1$schema[[1]]$type
},
int32()
)
})
test_that("explicit type conversions with as.*()", {
library(bit64)
compare_dplyr_binding(
.input %>%
transmute(
int2chr = as.character(int),
int2dbl = as.double(int),
int2int = as.integer(int),
int2num = as.numeric(int),
dbl2chr = as.character(dbl),
dbl2dbl = as.double(dbl),
dbl2int = as.integer(dbl),
dbl2num = as.numeric(dbl),
) %>%
collect(),
tbl
)
compare_dplyr_binding(
.input %>%
transmute(
chr2chr = as.character(chr),
chr2dbl = as.double(chr),
chr2int = as.integer(chr),
chr2num = as.numeric(chr)
) %>%
collect(),
tibble(chr = c("1", "2", "3"))
)
compare_dplyr_binding(
.input %>%
transmute(
chr2i64 = as.integer64(chr),
dbl2i64 = as.integer64(dbl),
i642i64 = as.integer64(i64),
) %>%
collect(),
tibble(chr = "10000000000", dbl = 10000000000, i64 = as.integer64(1e10))
)
compare_dplyr_binding(
.input %>%
transmute(
chr2lgl = as.logical(chr),
dbl2lgl = as.logical(dbl),
int2lgl = as.logical(int)
) %>%
collect(),
tibble(
chr = c("TRUE", "FALSE", "true", "false"),
dbl = c(1, 0, -99, 0),
int = c(1L, 0L, -99L, 0L)
)
)
compare_dplyr_binding(
.input %>%
transmute(
dbl2chr = as.character(dbl),
dbl2dbl = as.double(dbl),
dbl2int = as.integer(dbl),
dbl2lgl = as.logical(dbl),
int2chr = as.character(int),
int2dbl = as.double(int),
int2int = as.integer(int),
int2lgl = as.logical(int),
lgl2chr = as.character(lgl), # Arrow returns "true", "false" here ...
lgl2dbl = as.double(lgl),
lgl2int = as.integer(lgl),
lgl2lgl = as.logical(lgl)
) %>%
collect() %>%
# need to use toupper() *after* collect() or else skip if utf8proc not available
mutate(lgl2chr = toupper(lgl2chr)), # ... but we need "TRUE", "FALSE"
tibble(
dbl = c(1, 0, NA_real_),
int = c(1L, 0L, NA_integer_),
lgl = c(TRUE, FALSE, NA)
)
)
})
test_that("is.finite(), is.infinite(), is.nan()", {
df <- tibble(x = c(
-4.94065645841246544e-324, 1.79769313486231570e+308, 0,
NA_real_, NaN, Inf, -Inf
))
compare_dplyr_binding(
.input %>%
transmute(
is_fin = is.finite(x),
is_inf = is.infinite(x)
) %>%
collect(),
df
)
# is.nan() evaluates to FALSE on NA_real_ (ARROW-12850)
compare_dplyr_binding(
.input %>%
transmute(
is_nan = is.nan(x)
) %>%
collect(),
df
)
})
test_that("is.na() evaluates to TRUE on NaN (ARROW-12055)", {
df <- tibble(x = c(1.1, 2.2, NA_real_, 4.4, NaN, 6.6, 7.7))
compare_dplyr_binding(
.input %>%
transmute(
is_na = is.na(x)
) %>%
collect(),
df
)
})
test_that("type checks with is() giving Arrow types", {
# with class2=DataType
expect_equal(
Table$create(
i32 = Array$create(1, int32()),
dec = Array$create(pi)$cast(decimal(3, 2)),
dec128 = Array$create(pi)$cast(decimal128(3, 2)),
dec256 = Array$create(pi)$cast(decimal256(3, 2)),
f64 = Array$create(1.1, float64()),
str = Array$create("a", arrow::string())
) %>% transmute(
i32_is_i32 = is(i32, int32()),
i32_is_dec = is(i32, decimal(3, 2)),
i32_is_dec128 = is(i32, decimal128(3, 2)),
i32_is_dec256 = is(i32, decimal256(3, 2)),
i32_is_i64 = is(i32, float64()),
i32_is_str = is(i32, arrow::string()),
dec_is_i32 = is(dec, int32()),
dec_is_dec = is(dec, decimal(3, 2)),
dec_is_dec128 = is(dec, decimal128(3, 2)),
dec_is_dec256 = is(dec, decimal256(3, 2)),
dec_is_i64 = is(dec, float64()),
dec_is_str = is(dec, arrow::string()),
dec128_is_i32 = is(dec128, int32()),
dec128_is_dec128 = is(dec128, decimal128(3, 2)),
dec128_is_dec256 = is(dec128, decimal256(3, 2)),
dec128_is_i64 = is(dec128, float64()),
dec128_is_str = is(dec128, arrow::string()),
dec256_is_i32 = is(dec128, int32()),
dec256_is_dec128 = is(dec128, decimal128(3, 2)),
dec256_is_dec256 = is(dec128, decimal256(3, 2)),
dec256_is_i64 = is(dec128, float64()),
dec256_is_str = is(dec128, arrow::string()),
f64_is_i32 = is(f64, int32()),
f64_is_dec = is(f64, decimal(3, 2)),
f64_is_dec128 = is(f64, decimal128(3, 2)),
f64_is_dec256 = is(f64, decimal256(3, 2)),
f64_is_i64 = is(f64, float64()),
f64_is_str = is(f64, arrow::string()),
str_is_i32 = is(str, int32()),
str_is_dec128 = is(str, decimal128(3, 2)),
str_is_dec256 = is(str, decimal256(3, 2)),
str_is_i64 = is(str, float64()),
str_is_str = is(str, arrow::string())
) %>%
collect() %>%
t() %>%
as.vector(),
c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE)
)
# with class2=string
expect_equal(
Table$create(
i32 = Array$create(1, int32()),
f64 = Array$create(1.1, float64()),
str = Array$create("a", arrow::string())
) %>% transmute(
i32_is_i32 = is(i32, "int32"),
i32_is_i64 = is(i32, "double"),
i32_is_str = is(i32, "string"),
f64_is_i32 = is(f64, "int32"),
f64_is_i64 = is(f64, "double"),
f64_is_str = is(f64, "string"),
str_is_i32 = is(str, "int32"),
str_is_i64 = is(str, "double"),
str_is_str = is(str, "string")
) %>%
collect() %>%
t() %>%
as.vector(),
c(TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE)
)
# with class2=string alias
expect_equal(
Table$create(
f16 = Array$create(NA_real_, halffloat()),
f32 = Array$create(1.1, float()),
f64 = Array$create(2.2, float64()),
lgl = Array$create(TRUE, bool()),
str = Array$create("a", arrow::string())
) %>% transmute(
f16_is_f16 = is(f16, "float16"),
f16_is_f32 = is(f16, "float32"),
f16_is_f64 = is(f16, "float64"),
f16_is_lgl = is(f16, "boolean"),
f16_is_str = is(f16, "utf8"),
f32_is_f16 = is(f32, "float16"),
f32_is_f32 = is(f32, "float32"),
f32_is_f64 = is(f32, "float64"),
f32_is_lgl = is(f32, "boolean"),
f32_is_str = is(f32, "utf8"),
f64_is_f16 = is(f64, "float16"),
f64_is_f32 = is(f64, "float32"),
f64_is_f64 = is(f64, "float64"),
f64_is_lgl = is(f64, "boolean"),
f64_is_str = is(f64, "utf8"),
lgl_is_f16 = is(lgl, "float16"),
lgl_is_f32 = is(lgl, "float32"),
lgl_is_f64 = is(lgl, "float64"),
lgl_is_lgl = is(lgl, "boolean"),
lgl_is_str = is(lgl, "utf8"),
str_is_f16 = is(str, "float16"),
str_is_f32 = is(str, "float32"),
str_is_f64 = is(str, "float64"),
str_is_lgl = is(str, "boolean"),
str_is_str = is(str, "utf8")
) %>%
collect() %>%
t() %>%
as.vector(),
c(
TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE,
FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE,
FALSE, FALSE, TRUE
)
)
})
test_that("type checks with is() giving R types", {
library(bit64)
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is(chr, "character"),
chr_is_fct = is(chr, "factor"),
chr_is_int = is(chr, "integer"),
chr_is_i64 = is(chr, "integer64"),
chr_is_lst = is(chr, "list"),
chr_is_lgl = is(chr, "logical"),
chr_is_num = is(chr, "numeric"),
dbl_is_chr = is(dbl, "character"),
dbl_is_fct = is(dbl, "factor"),
dbl_is_int = is(dbl, "integer"),
dbl_is_i64 = is(dbl, "integer64"),
dbl_is_lst = is(dbl, "list"),
dbl_is_lgl = is(dbl, "logical"),
dbl_is_num = is(dbl, "numeric"),
fct_is_chr = is(fct, "character"),
fct_is_fct = is(fct, "factor"),
fct_is_int = is(fct, "integer"),
fct_is_i64 = is(fct, "integer64"),
fct_is_lst = is(fct, "list"),
fct_is_lgl = is(fct, "logical"),
fct_is_num = is(fct, "numeric"),
int_is_chr = is(int, "character"),
int_is_fct = is(int, "factor"),
int_is_int = is(int, "integer"),
int_is_i64 = is(int, "integer64"),
int_is_lst = is(int, "list"),
int_is_lgl = is(int, "logical"),
int_is_num = is(int, "numeric"),
lgl_is_chr = is(lgl, "character"),
lgl_is_fct = is(lgl, "factor"),
lgl_is_int = is(lgl, "integer"),
lgl_is_i64 = is(lgl, "integer64"),
lgl_is_lst = is(lgl, "list"),
lgl_is_lgl = is(lgl, "logical"),
lgl_is_num = is(lgl, "numeric")
) %>%
collect(),
tbl
)
compare_dplyr_binding(
.input %>%
transmute(
i64_is_chr = is(i64, "character"),
i64_is_fct = is(i64, "factor"),
# we want Arrow to return TRUE, but bit64 returns FALSE
# i64_is_int = is(i64, "integer"),
i64_is_i64 = is(i64, "integer64"),
i64_is_lst = is(i64, "list"),
i64_is_lgl = is(i64, "logical"),
# we want Arrow to return TRUE, but bit64 returns FALSE
# i64_is_num = is(i64, "numeric"),
lst_is_chr = is(lst, "character"),
lst_is_fct = is(lst, "factor"),
lst_is_int = is(lst, "integer"),
lst_is_i64 = is(lst, "integer64"),
lst_is_lst = is(lst, "list"),
lst_is_lgl = is(lst, "logical"),
lst_is_num = is(lst, "numeric")
) %>%
collect(),
tibble(
i64 = as.integer64(1:3),
lst = list(c("a", "b"), c("d", "e"), c("f", "g"))
)
)
})
test_that("type checks with is.*()", {
library(bit64)
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is.character(chr),
chr_is_dbl = is.double(chr),
chr_is_fct = is.factor(chr),
chr_is_int = is.integer(chr),
chr_is_i64 = is.integer64(chr),
chr_is_lst = is.list(chr),
chr_is_lgl = is.logical(chr),
chr_is_num = is.numeric(chr),
dbl_is_chr = is.character(dbl),
dbl_is_dbl = is.double(dbl),
dbl_is_fct = is.factor(dbl),
dbl_is_int = is.integer(dbl),
dbl_is_i64 = is.integer64(dbl),
dbl_is_lst = is.list(dbl),
dbl_is_lgl = is.logical(dbl),
dbl_is_num = is.numeric(dbl),
fct_is_chr = is.character(fct),
fct_is_dbl = is.double(fct),
fct_is_fct = is.factor(fct),
fct_is_int = is.integer(fct),
fct_is_i64 = is.integer64(fct),
fct_is_lst = is.list(fct),
fct_is_lgl = is.logical(fct),
fct_is_num = is.numeric(fct),
int_is_chr = is.character(int),
int_is_dbl = is.double(int),
int_is_fct = is.factor(int),
int_is_int = is.integer(int),
int_is_i64 = is.integer64(int),
int_is_lst = is.list(int),
int_is_lgl = is.logical(int),
int_is_num = is.numeric(int),
lgl_is_chr = is.character(lgl),
lgl_is_dbl = is.double(lgl),
lgl_is_fct = is.factor(lgl),
lgl_is_int = is.integer(lgl),
lgl_is_i64 = is.integer64(lgl),
lgl_is_lst = is.list(lgl),
lgl_is_lgl = is.logical(lgl),
lgl_is_num = is.numeric(lgl)
) %>%
collect(),
tbl
)
compare_dplyr_binding(
.input %>%
transmute(
i64_is_chr = is.character(i64),
# TODO: investigate why this is not matching when testthat runs it
# i64_is_dbl = is.double(i64),
i64_is_fct = is.factor(i64),
# we want Arrow to return TRUE, but bit64 returns FALSE
# i64_is_int = is.integer(i64),
i64_is_i64 = is.integer64(i64),
i64_is_lst = is.list(i64),
i64_is_lgl = is.logical(i64),
i64_is_num = is.numeric(i64),
lst_is_chr = is.character(lst),
lst_is_dbl = is.double(lst),
lst_is_fct = is.factor(lst),
lst_is_int = is.integer(lst),
lst_is_i64 = is.integer64(lst),
lst_is_lst = is.list(lst),
lst_is_lgl = is.logical(lst),
lst_is_num = is.numeric(lst)
) %>%
collect(),
tibble(
i64 = as.integer64(1:3),
lst = list(c("a", "b"), c("d", "e"), c("f", "g"))
)
)
})
test_that("type checks with is_*()", {
library(rlang, warn.conflicts = FALSE)
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is_character(chr),
chr_is_dbl = is_double(chr),
chr_is_int = is_integer(chr),
chr_is_lst = is_list(chr),
chr_is_lgl = is_logical(chr),
dbl_is_chr = is_character(dbl),
dbl_is_dbl = is_double(dbl),
dbl_is_int = is_integer(dbl),
dbl_is_lst = is_list(dbl),
dbl_is_lgl = is_logical(dbl),
int_is_chr = is_character(int),
int_is_dbl = is_double(int),
int_is_int = is_integer(int),
int_is_lst = is_list(int),
int_is_lgl = is_logical(int),
lgl_is_chr = is_character(lgl),
lgl_is_dbl = is_double(lgl),
lgl_is_int = is_integer(lgl),
lgl_is_lst = is_list(lgl),
lgl_is_lgl = is_logical(lgl)
) %>%
collect(),
tbl
)
})
test_that("type checks on expressions", {
compare_dplyr_binding(
.input %>%
transmute(
a = is.character(as.character(int)),
b = is.integer(as.character(int)),
c = is.integer(int + int),
d = is.double(int + dbl),
e = is.logical(dbl > pi)
) %>%
collect(),
tbl
)
# the code in the expectation below depends on RE2
skip_if_not_available("re2")
compare_dplyr_binding(
.input %>%
transmute(
a = is.logical(grepl("[def]", chr))
) %>%
collect(),
tbl
)
})
test_that("type checks on R scalar literals", {
compare_dplyr_binding(
.input %>%
transmute(
chr_is_chr = is.character("foo"),
int_is_chr = is.character(42L),
int_is_int = is.integer(42L),
chr_is_int = is.integer("foo"),
dbl_is_num = is.numeric(3.14159),
int_is_num = is.numeric(42L),
chr_is_num = is.numeric("foo"),
dbl_is_dbl = is.double(3.14159),
chr_is_dbl = is.double("foo"),
lgl_is_lgl = is.logical(TRUE),
chr_is_lgl = is.logical("foo"),
fct_is_fct = is.factor(factor("foo", levels = c("foo", "bar", "baz"))),
chr_is_fct = is.factor("foo"),
lst_is_lst = is.list(list(c(a = "foo", b = "bar"))),
chr_is_lst = is.list("foo")
) %>%
collect(),
tbl
)
})
test_that("as.factor()/dictionary_encode()", {
skip("ARROW-12632: ExecuteScalarExpression cannot Execute non-scalar expression")
df1 <- tibble(x = c("C", "D", "B", NA, "D", "B", "S", "A", "B", "Z", "B"))
df2 <- tibble(x = c(5, 5, 5, NA, 2, 3, 6, 8))
compare_dplyr_binding(
.input %>%
transmute(x = as.factor(x)) %>%
collect(),
df1
)
expect_warning(
compare_dplyr_binding(
.input %>%
transmute(x = as.factor(x)) %>%
collect(),
df2
),
"Coercing dictionary values to R character factor levels"
)
# dictionary values with default null encoding behavior ("mask") omits
# nulls from the dictionary values
expect_equal(
object = {
rb1 <- df1 %>%
record_batch() %>%
transmute(x = dictionary_encode(x)) %>%
compute()
dict <- rb1$x$dictionary()
as.vector(dict$Take(dict$SortIndices()))
},
sort(unique(df1$x), na.last = NA)
)
# dictionary values with "encode" null encoding behavior includes nulls in
# the dictionary values
expect_equal(
object = {
rb1 <- df1 %>%
record_batch() %>%
transmute(x = dictionary_encode(x, null_encoding_behavior = "encode")) %>%
compute()
dict <- rb1$x$dictionary()
as.vector(dict$Take(dict$SortIndices()))
},
sort(unique(df1$x), na.last = TRUE)
)
})
test_that("bad explicit type conversions with as.*()", {
# Arrow returns lowercase "true", "false" (instead of "TRUE", "FALSE" like R)
expect_error(
compare_dplyr_binding(
.input %>%
transmute(lgl2chr = as.character(lgl)) %>%
collect(),
tibble(lgl = c(TRUE, FALSE, NA))
)
)
# Arrow fails to parse these strings as numbers (instead of returning NAs with
# a warning like R does)
expect_error(
expect_warning(
compare_dplyr_binding(
.input %>%
transmute(chr2num = as.numeric(chr)) %>%
collect(),
tibble(chr = c("l.O", "S.S", ""))
)
)
)
# Arrow fails to parse these strings as Booleans (instead of returning NAs
# like R does)
expect_error(
compare_dplyr_binding(
.input %>%
transmute(chr2lgl = as.logical(chr)) %>%
collect(),
tibble(chr = c("TRU", "FAX", ""))
)
)
})
test_that("structs/nested data frames/tibbles can be created", {
df <- tibble(regular_col1 = 1L, regular_col2 = "a")
compare_dplyr_binding(
.input %>%
transmute(
df_col = tibble(
regular_col1 = regular_col1,
regular_col2 = regular_col2
)
) %>%
collect(),
df
)
# check auto column naming
compare_dplyr_binding(
.input %>%
transmute(
df_col = tibble(regular_col1, regular_col2)
) %>%
collect(),
df
)
# ...and that other arguments are not supported
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = tibble(char_col, .rows = 1L)),
".rows not supported in Arrow"
)
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = tibble(char_col, .name_repair = "universal")),
".name_repair not supported in Arrow"
)
# check that data.frame is mapped too
# stringsAsFactors default is TRUE in R 3.6, which is still tested on CI
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, regular_col2, stringsAsFactors = FALSE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
# check with fix.empty.names = FALSE
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, fix.empty.names = FALSE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
# check with check.names = TRUE and FALSE
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, regular_col1, check.names = TRUE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
compare_dplyr_binding(
.input %>%
transmute(
df_col = data.frame(regular_col1, regular_col1, check.names = FALSE)
) %>%
collect() %>%
mutate(df_col = as.data.frame(df_col)),
df
)
# ...and that other arguments are not supported
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = data.frame(char_col, stringsAsFactors = TRUE)),
"stringsAsFactors = TRUE not supported in Arrow"
)
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = data.frame(char_col, row.names = 1L)),
"row.names not supported in Arrow"
)
expect_warning(
record_batch(char_col = "a") %>%
mutate(df_col = data.frame(char_col, check.rows = TRUE)),
"check.rows not supported in Arrow"
)
})
test_that("nested structs can be created from scalars and existing data frames", {
compare_dplyr_binding(
.input %>%
transmute(
df_col = tibble(b = 3)
) %>%
collect(),
tibble(a = 1:2)
)
# technically this is handled by Scalar$create() since there is no
# call to data.frame or tibble() within a dplyr verb
existing_data_frame <- tibble(b = 3)
compare_dplyr_binding(
.input %>%
transmute(
df_col = existing_data_frame
) %>%
collect(),
tibble(a = 1:2)
)
})
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.4,family="gaussian",standardize=TRUE)
sink('./skin_051.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/skin/skin_051.R | no_license | esbgkannan/QSMART | R | false | false | 339 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.4,family="gaussian",standardize=TRUE)
sink('./skin_051.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' Conversion table of Datastream to ISO currency codes
#' @format A data frame with 161 rows and 3 variables:
#' \describe{
#' \item{dsCode}{the datastream code}
#' \item{isoCode}{the ISO code for the currency}
#' \item{primeCode}{primaryCode for currency or alternative}
#' \item{Multiplier}{the units of the currency}
#' }
"currencyDS2ISO" | /R/data.R | no_license | cran/DatastreamDSWS2R | R | false | false | 358 | r | #' Conversion table of Datastream to ISO currency codes
#' @format A data frame with 161 rows and 3 variables:
#' \describe{
#' \item{dsCode}{the datastream code}
#' \item{isoCode}{the ISO code for the currency}
#' \item{primeCode}{primaryCode for currency or alternative}
#' \item{Multiplier}{the units of the currency}
#' }
"currencyDS2ISO" |
## 1.Le jeu de donn?es MNIST------
install.packages("keras", dep=TRUE)
#install.packages("caret")
#install.packages("MLmetrics")
#install.packages("e1071")
require(e1071)
require(MLmetrics)
require(keras)
require(rlist)
require(caret)
require(factoextra)
require(ModelMetrics)
setwd(dir ="P:/Cours/Master/apprentissage")
rm(list=ls())
mnist <- list.load("mnist.rdata")
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
y_train <- factor(mnist$train$y,labels = c("Y0","Y1","Y2","Y3","Y4","Y5","Y6","Y7","Y8","Y9"))
y_test <- factor(mnist$test$y,labels = c("Y0","Y1","Y2","Y3","Y4","Y5","Y6","Y7","Y8","Y9"))
# visualize the digits
par(mfcol=c(6,6))
par(mar=c(0, 0, 3, 0), xaxs='i', yaxs='i')
for (idx in 1:36) {
im <- x_train[idx,,]
im <- t(apply(im, 2, rev))
image(1:28, 1:28, im, col=gray((0:255)/255),
xaxt='n', main=paste(y_train[idx]))
}
# je jette un oeil aux donn?es
str(x_train)
str(y_train)
summary(x_train)
summary(y_train)
##2.Préparation des données----- 1.5 + 0.75 = 2.25
#2.1 reshape
x_train <- keras::array_reshape(x_train, c(nrow(x_train), 784)) #.... 0.5
x_test <- keras::array_reshape(x_test, c(nrow(x_test), 784)) #.... 0.5
# on supprime les colonnes n'apportant pas d'information
x_train_var_0 <- nearZeroVar(x_train) # .... attention dangeureux : 0.5/1
x_train <- x_train[,-x_train_var_0]
x_test <- x_test[,-x_train_var_0]
#2.2 r?duction de dimensions : on peut faire une acp qui va r?duire les variables et enlever la colin?arit?
# on conserve 95% de la variance
preproc <- preProcess(data.frame(x_train), thresh = 0.95, method = "pca") # .... inutile de prendre tout : 0.75/1
x_train_pred <- predict(preproc, data.frame(x_train))
x_test_pred <- predict(preproc, data.frame(x_test))
# dans des data frame pour passer le svm lin?aire
train <- data.frame(x_train_pred,y_train)
test <- data.frame(x_test_pred,y_test)
# apr?s r?duction, il reste 90 composantes factorielles
##3. Apprentissage par SVM lin?aire----- # ... total = 2.5
# Utiliser une validation crois?e ? 5 folds r?p?t?e deux fois2.
control <- caret::trainControl(method="repeatedcv", number = 5, repeats=2, summaryFunction=multiClassSummary, classProbs=TRUE) # ... 0.5
# Pour le param?tre C, on propose de tester la s?quence 10**(-3:0).
grid <- data.frame(C = 10**(-3:0)) # ... 0.5
modelFit <- train(y_train ~ . , data = train, method="svmLinear", metric="Accuracy", trControl=control, tuneGrid=grid) # ... 0.5
predictTest <- predict(object=modelFit,newdata = x_test_pred) # ... 0.5
# Donner le taux de bon classement du jeu de donn?es test.
# 59 % d'images bien class?es ......................................0.5
ratePredict <- mean(predictTest == y_test)
##4. Apprentissage par r?seaux de neurones artificiels-----
##5. Impl?mentation d'un RNA avec une seule couche cach?e avec keras----- 4.5 + 0.5 = 5
require(keras)
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
# reshape
x_train <- array_reshape(x_train, c(nrow(x_train), 784))
x_test <- array_reshape(x_test, c(nrow(x_test), 784))
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
# a) on a besoin de 10 neurones (y train et test ont 10 modalit?s -> 1 neurone par modalit?) ... 0.5
# b)... 4.5
model <- keras_model_sequential()
model %>%
layer_dense(units = 784, activation = "relu", input_shape = c(784)) %>%
layer_dense(units = 10, activation = "softmax") # ... 0.5
model %>%
compile(loss = "categorical_crossentropy", optimizer = optimizer_adam(), metrics = c("accuracy"))
history <- model %>%
fit(x_train, y_train, epochs = 10, batch_size = 200, validation_split = 0.2) #
plot(history)# la meilleure performance est pour la 10?me it?ration
history$metrics# 97.8 % de pr?cision pour cette 10?me it?ration
model %>% evaluate(x_test, y_test) # 98.06% de pr?cision sur l'ensemble du test
##6. Apprentissage par r?seaux de neurones convolutifs-----
##7. Impl?mentation d'un r?seau de neurones convolutifs-----
require(keras)
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
model <- keras_model_sequential()
model %>% # 0 + (9*0.25) + 1 + 1 + 0.5 + 0.5 = 5.25
# 1. la couche de convolution sort des images de dimension 28*28 sur une seule couleur avec un noyau de convolution de 5*5 ... NON
layer_conv_2d(filters = 30, kernel_size = c(5,5), activation = "relu", input_shape = c(28,28,1)) %>% #... 0.25
# 2. la couche de maxpooling r?duit la dimension (ici de moiti? en largeur et longueur)# ... 1
layer_max_pooling_2d(pool_size = c(2,2)) %>% # ... 0.25
# avec 3 et 4 qui r?p?tent 1 et 2, on continue ? compresser l'image
# 3. convolution
layer_conv_2d(filters = 15, kernel_size = c(3,3), activation = "relu") %>% # ... 0.25
# 4. maxpooling
layer_max_pooling_2d(pool_size = c(2,2)) %>% # ... 0.25
# 5. dropout pour éviter le surapprentissage #... NON on peut expliquer le fonctionnement
layer_dropout(rate = 0.3) %>% # ... 0.25
# 6. on applatit la matrice de pixels dans le même but qu'au début du tp # ... 1
layer_flatten() %>% # ... 0.25
# 7. on fait un réseau de neurones pour terminer avec deux "relu" qui vont accéler la convergence ## ... 0.5/1
layer_dense(units = 128, activation = "relu") %>% #... 0.25
# 8.
layer_dense(units = 50, activation = "relu") %>% #... 0.25
# 9. et une logistique avec softmax pour finir # ... 0.5
layer_dense(units = 10, activation = "softmax") #... 0.25
model %>% compile( loss = "categorical_crossentropy", optimizer = optimizer_adam(), metrics = c("accuracy"))
x_train <- array(x_train, dim = c(60000, 28, 28, 1))
history <- model %>% fit( x_train, y_train, epochs = 10, batch_size = 200, validation_split = 0.2)
plot(history)# la meilleure performance est pour la 10?me it?ration
history$metrics# 99.1 % de pr?cision pour la 10?me it?ration
| /copies_MSP_ES/tp_wilczynski.R | no_license | masedki/ensai2019 | R | false | false | 6,211 | r | ## 1.Le jeu de donn?es MNIST------
install.packages("keras", dep=TRUE)
#install.packages("caret")
#install.packages("MLmetrics")
#install.packages("e1071")
require(e1071)
require(MLmetrics)
require(keras)
require(rlist)
require(caret)
require(factoextra)
require(ModelMetrics)
setwd(dir ="P:/Cours/Master/apprentissage")
rm(list=ls())
mnist <- list.load("mnist.rdata")
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
y_train <- factor(mnist$train$y,labels = c("Y0","Y1","Y2","Y3","Y4","Y5","Y6","Y7","Y8","Y9"))
y_test <- factor(mnist$test$y,labels = c("Y0","Y1","Y2","Y3","Y4","Y5","Y6","Y7","Y8","Y9"))
# visualize the digits
par(mfcol=c(6,6))
par(mar=c(0, 0, 3, 0), xaxs='i', yaxs='i')
for (idx in 1:36) {
im <- x_train[idx,,]
im <- t(apply(im, 2, rev))
image(1:28, 1:28, im, col=gray((0:255)/255),
xaxt='n', main=paste(y_train[idx]))
}
# je jette un oeil aux donn?es
str(x_train)
str(y_train)
summary(x_train)
summary(y_train)
##2.Préparation des données----- 1.5 + 0.75 = 2.25
#2.1 reshape
x_train <- keras::array_reshape(x_train, c(nrow(x_train), 784)) #.... 0.5
x_test <- keras::array_reshape(x_test, c(nrow(x_test), 784)) #.... 0.5
# on supprime les colonnes n'apportant pas d'information
x_train_var_0 <- nearZeroVar(x_train) # .... attention dangeureux : 0.5/1
x_train <- x_train[,-x_train_var_0]
x_test <- x_test[,-x_train_var_0]
#2.2 r?duction de dimensions : on peut faire une acp qui va r?duire les variables et enlever la colin?arit?
# on conserve 95% de la variance
preproc <- preProcess(data.frame(x_train), thresh = 0.95, method = "pca") # .... inutile de prendre tout : 0.75/1
x_train_pred <- predict(preproc, data.frame(x_train))
x_test_pred <- predict(preproc, data.frame(x_test))
# dans des data frame pour passer le svm lin?aire
train <- data.frame(x_train_pred,y_train)
test <- data.frame(x_test_pred,y_test)
# apr?s r?duction, il reste 90 composantes factorielles
##3. Apprentissage par SVM lin?aire----- # ... total = 2.5
# Utiliser une validation crois?e ? 5 folds r?p?t?e deux fois2.
control <- caret::trainControl(method="repeatedcv", number = 5, repeats=2, summaryFunction=multiClassSummary, classProbs=TRUE) # ... 0.5
# Pour le param?tre C, on propose de tester la s?quence 10**(-3:0).
grid <- data.frame(C = 10**(-3:0)) # ... 0.5
modelFit <- train(y_train ~ . , data = train, method="svmLinear", metric="Accuracy", trControl=control, tuneGrid=grid) # ... 0.5
predictTest <- predict(object=modelFit,newdata = x_test_pred) # ... 0.5
# Donner le taux de bon classement du jeu de donn?es test.
# 59 % d'images bien class?es ......................................0.5
ratePredict <- mean(predictTest == y_test)
##4. Apprentissage par r?seaux de neurones artificiels-----
##5. Impl?mentation d'un RNA avec une seule couche cach?e avec keras----- 4.5 + 0.5 = 5
require(keras)
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
# reshape
x_train <- array_reshape(x_train, c(nrow(x_train), 784))
x_test <- array_reshape(x_test, c(nrow(x_test), 784))
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
# a) on a besoin de 10 neurones (y train et test ont 10 modalit?s -> 1 neurone par modalit?) ... 0.5
# b)... 4.5
model <- keras_model_sequential()
model %>%
layer_dense(units = 784, activation = "relu", input_shape = c(784)) %>%
layer_dense(units = 10, activation = "softmax") # ... 0.5
model %>%
compile(loss = "categorical_crossentropy", optimizer = optimizer_adam(), metrics = c("accuracy"))
history <- model %>%
fit(x_train, y_train, epochs = 10, batch_size = 200, validation_split = 0.2) #
plot(history)# la meilleure performance est pour la 10?me it?ration
history$metrics# 97.8 % de pr?cision pour cette 10?me it?ration
model %>% evaluate(x_test, y_test) # 98.06% de pr?cision sur l'ensemble du test
##6. Apprentissage par r?seaux de neurones convolutifs-----
##7. Impl?mentation d'un r?seau de neurones convolutifs-----
require(keras)
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
model <- keras_model_sequential()
model %>% # 0 + (9*0.25) + 1 + 1 + 0.5 + 0.5 = 5.25
# 1. la couche de convolution sort des images de dimension 28*28 sur une seule couleur avec un noyau de convolution de 5*5 ... NON
layer_conv_2d(filters = 30, kernel_size = c(5,5), activation = "relu", input_shape = c(28,28,1)) %>% #... 0.25
# 2. la couche de maxpooling r?duit la dimension (ici de moiti? en largeur et longueur)# ... 1
layer_max_pooling_2d(pool_size = c(2,2)) %>% # ... 0.25
# avec 3 et 4 qui r?p?tent 1 et 2, on continue ? compresser l'image
# 3. convolution
layer_conv_2d(filters = 15, kernel_size = c(3,3), activation = "relu") %>% # ... 0.25
# 4. maxpooling
layer_max_pooling_2d(pool_size = c(2,2)) %>% # ... 0.25
# 5. dropout pour éviter le surapprentissage #... NON on peut expliquer le fonctionnement
layer_dropout(rate = 0.3) %>% # ... 0.25
# 6. on applatit la matrice de pixels dans le même but qu'au début du tp # ... 1
layer_flatten() %>% # ... 0.25
# 7. on fait un réseau de neurones pour terminer avec deux "relu" qui vont accéler la convergence ## ... 0.5/1
layer_dense(units = 128, activation = "relu") %>% #... 0.25
# 8.
layer_dense(units = 50, activation = "relu") %>% #... 0.25
# 9. et une logistique avec softmax pour finir # ... 0.5
layer_dense(units = 10, activation = "softmax") #... 0.25
model %>% compile( loss = "categorical_crossentropy", optimizer = optimizer_adam(), metrics = c("accuracy"))
x_train <- array(x_train, dim = c(60000, 28, 28, 1))
history <- model %>% fit( x_train, y_train, epochs = 10, batch_size = 200, validation_split = 0.2)
plot(history)# la meilleure performance est pour la 10?me it?ration
history$metrics# 99.1 % de pr?cision pour la 10?me it?ration
|
/TextMining/Lab/01-2020-disaster-or-not/disaster-or-not.r | no_license | lukaszksiezak/DataScience-Studies | R | false | false | 8,291 | r | ||
### Use a sinusoidal approximation to estimate the number of Growing
### Degree-Days above a given threshold, using daily minimum and
### maximum temperatures.
above.threshold <- function(mins, maxs, threshold) {
## Determine crossing points, as a fraction of the day
plus.over.2 = (mins + maxs)/2
minus.over.2 = (maxs - mins)/2
two.pi = 2*pi
## d0s is the times of crossing above; d1s is when cross below
d0s = asin((threshold - plus.over.2) / minus.over.2) / two.pi
d1s = .5 - d0s
## If always above or below threshold, set crossings accordingly
aboves = mins > threshold
belows = maxs < threshold
d0s[aboves] = 0
d1s[aboves] = 1
d0s[belows] = 0
d1s[belows] = 0
## Calculate integral
F1s = -minus.over.2 * cos(2*pi*d1s) / two.pi + plus.over.2 * d1s
F0s = -minus.over.2 * cos(2*pi*d0s) / two.pi + plus.over.2 * d0s
return(sum(F1s - F0s - threshold * (d1s - d0s)))
}
### Get the Growing Degree-Days, as degree-days between gdd.start and
### kdd.start, and Killing Degree-Days, as the degree-days above
### kdd.start.
get.gddkdd <- function(mins, maxs, gdd.start, kdd.start) {
dd.lowup = above.threshold(mins, maxs, gdd.start)
dd.above = above.threshold(mins, maxs, kdd.start)
dd.lower = dd.lowup - dd.above
return(c(dd.lower, dd.above))
}
| /R/gdd.R | permissive | px00001/research-common | R | false | false | 1,332 | r | ### Use a sinusoidal approximation to estimate the number of Growing
### Degree-Days above a given threshold, using daily minimum and
### maximum temperatures.
above.threshold <- function(mins, maxs, threshold) {
## Determine crossing points, as a fraction of the day
plus.over.2 = (mins + maxs)/2
minus.over.2 = (maxs - mins)/2
two.pi = 2*pi
## d0s is the times of crossing above; d1s is when cross below
d0s = asin((threshold - plus.over.2) / minus.over.2) / two.pi
d1s = .5 - d0s
## If always above or below threshold, set crossings accordingly
aboves = mins > threshold
belows = maxs < threshold
d0s[aboves] = 0
d1s[aboves] = 1
d0s[belows] = 0
d1s[belows] = 0
## Calculate integral
F1s = -minus.over.2 * cos(2*pi*d1s) / two.pi + plus.over.2 * d1s
F0s = -minus.over.2 * cos(2*pi*d0s) / two.pi + plus.over.2 * d0s
return(sum(F1s - F0s - threshold * (d1s - d0s)))
}
### Get the Growing Degree-Days, as degree-days between gdd.start and
### kdd.start, and Killing Degree-Days, as the degree-days above
### kdd.start.
get.gddkdd <- function(mins, maxs, gdd.start, kdd.start) {
dd.lowup = above.threshold(mins, maxs, gdd.start)
dd.above = above.threshold(mins, maxs, kdd.start)
dd.lower = dd.lowup - dd.above
return(c(dd.lower, dd.above))
}
|
rm(list=ls(all=TRUE))
install_github("Rd2roxygen", "yihui")
| /utility/install-packages-in-development.R | no_license | nlsy-links/NlsyLinks | R | false | false | 60 | r | rm(list=ls(all=TRUE))
install_github("Rd2roxygen", "yihui")
|
context("test-get_speech.R")
test_that("get speech text data", {
dat <- get_speech("http://www.pa.go.kr/research/contents/speech/index.jsp?spMode=view&catid=c_pa02062&artid=1311235")
expect_equal(dat$value[2], "1957.07.04")
})
| /tests/testthat/test-get_speech.R | no_license | Charlie13/presidentSpeechKr | R | false | false | 232 | r | context("test-get_speech.R")
test_that("get speech text data", {
dat <- get_speech("http://www.pa.go.kr/research/contents/speech/index.jsp?spMode=view&catid=c_pa02062&artid=1311235")
expect_equal(dat$value[2], "1957.07.04")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImportMethods.R
\name{import.CTSS}
\alias{import.CTSS}
\title{import.CTSS}
\usage{
import.CTSS(filepath)
}
\arguments{
\item{filepath}{The path to the "CTSS" file.
Note that the format of the "CTSS" files handled in this function is not
the same as the FANTOM5 "CTSS" files (which are plain BED).}
}
\description{
Imports a "CTSS" file in a \link{GPos} object
}
\examples{
CAGEr:::import.CTSS(system.file("extdata", "Zf.high.chr17.ctss", package = "CAGEr"))
}
\seealso{
Other loadFileIntoGPos: \code{\link{bam2CTSS}},
\code{\link{import.bam.ctss}}, \code{\link{import.bam}},
\code{\link{import.bedCTSS}},
\code{\link{import.bedScore}},
\code{\link{import.bedmolecule}},
\code{\link{loadFileIntoGPos}},
\code{\link{moleculesGR2CTSS}}
}
\concept{loadFileIntoGPos}
| /man/import.CTSS.Rd | no_license | clarapereira/CAGEr | R | false | true | 853 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImportMethods.R
\name{import.CTSS}
\alias{import.CTSS}
\title{import.CTSS}
\usage{
import.CTSS(filepath)
}
\arguments{
\item{filepath}{The path to the "CTSS" file.
Note that the format of the "CTSS" files handled in this function is not
the same as the FANTOM5 "CTSS" files (which are plain BED).}
}
\description{
Imports a "CTSS" file in a \link{GPos} object
}
\examples{
CAGEr:::import.CTSS(system.file("extdata", "Zf.high.chr17.ctss", package = "CAGEr"))
}
\seealso{
Other loadFileIntoGPos: \code{\link{bam2CTSS}},
\code{\link{import.bam.ctss}}, \code{\link{import.bam}},
\code{\link{import.bedCTSS}},
\code{\link{import.bedScore}},
\code{\link{import.bedmolecule}},
\code{\link{loadFileIntoGPos}},
\code{\link{moleculesGR2CTSS}}
}
\concept{loadFileIntoGPos}
|
COL.BEST.RET <- "Retention Time"
COL.FWHM <- "Full Width at Half Maximum"
COL.TOTAL.AREA <- "Total Peak Area"
COL.PEAK.ASS <- "Peak assymetry"
#########################################################################################
# here we put a selection of most column names that users use. The first element of each vector should be the best name that
# we suggest users to use and which our code is based on. for example "Retention Time" and "Full Width at Half Maximum" which are the first element
# of each vector in the list, are our suggestion so we wrote them in the fisrt place.
best_colnames <- list(
c("AcquiredTime","Acquired.Time","time","creation date"),
#c("Retention time","BestRetentionTime" ,"Best.RT","best retention time", "retention time","rt","best ret time","intensity","Best RT"),
#c("Full width at half maximum","MaxFWHM","fwhm","max.fwhm", "Max FWHM"),
#c("Total peak area","Total Area","TotalArea","total area","TA","T.Area"),
c("MinStartTime","min start time","Min Start Time"),
c("MaxEndTime", "max end time","Max End Time"),
c("Precursor","PeptideSequence"),
c("Annotations","anotations","anotation")
)
#### camelCaseSplit function ##############################################################################################
camelCaseSplit <- function(x) {
# This function get a camelCase word and splits it.
# Ex : camelCaseSplit("myComputerIsHere") ---> my Computer Is Here
return(gsub("([a-z])([A-Z])", "\\1 \\L\\2", x, perl = TRUE))
}
#### punc_remove function #################################################################################################
punc_remove <- function(x){
# This function removes any existing punctuation in your sentence or word and transfer it to space.
# Ex1: punc_remove(Best.RT) --> Best RT #Ex2: punc_remove(Best_RT) --> Best RT
return(gsub("[[:punct:]///' ]", " ", x))
}
#### clearString function ###############################################################################################
clearString <- function(x){
# This function, gets a word or setence, Splits it (if it is a camelCase), removes any existing punctuations, and transfer
# all Upper Case letters to lower case letters.
# Ex: clearString("myName_isSara.Taheri") --> my name is sara taheri
return(tolower(punc_remove(camelCaseSplit(x))))
}
#### guessColumnName function ###########################################################################################
# This function receives the data and check the column names of data and changes the column names if it is not the
# same names as our suggested sample data to fit our suggested sample data
guessColumnName <- function(x){
a <- clearString(x)
max_index <- 0
max <- -1
for(i in 1:length(best_colnames)){
col <- best_colnames[[i]]
for(j in 1:length(col)){
sim <- levenshteinSim(a,clearString(col[j]))
if(sim > max){
max <- sim
max_index <- i
}
}
}
if (max > 0.6) {
return(best_colnames[[max_index]][1])
}
else {
return(x)
}
}
#############################################################################################################
input.sanity.check <- function(prodata, processout, finalfile) {
error_message <- ""
null_columns <- c()
# get the column names and change them to the column names that we want (For example we want Retention Time but a user might use RT, this function auotomatically change RT to Retention Time)
colnames(prodata) <- unlist(lapply(colnames(prodata), function(x)guessColumnName(x)))
### conditions
# check that the data includes all the requiered columns and if not tell user what column is missing
# required_column_names <- c("Precursor","Retention Time","Full Width at Half Maximum","Total Peak Area","MinStartTime"
# ,"MaxEndTime")
required_column_names <- c("Precursor","Annotations")
if(!("Annotations" %in% colnames(prodata))) {
prodata[,"Annotations"] <- NA
error_message <- paste(error_message, "Please create a column named Annotation and put all your metrics after this column.To see an example of a sample data click on the {Run with example data} button.\n\n")
}
provided_column_names <- colnames(prodata)
# if(!all(required_column_names %in% provided_column_names)) {
# missedInput <- which(!(required_column_names %in% provided_column_names))
# error_message <- paste("ERROR : The required input(inputs) : ",
# paste(required_column_names[missedInput], collapse = ", "),
# " is(are) not provided in data set. Please add it to your data and try again.\n\n")
# }
# check that all columns other than Precursor and Acquired Time and Annotations are numeric.
AfterannoColNum <- (which(colnames(prodata)=="Annotations")) + 1
if(AfterannoColNum < ncol(prodata)) {
#colNames <- colnames(prodata)
for(i in AfterannoColNum:ncol(prodata)) {
if(is.numeric(prodata[,i]) == FALSE) {
error_message <- paste(error_message, "All the values of", colnames(prodata)[i], "should be numeric and positive.\n\n")
}
#if(sum(is.na(prodata[,i])) > 0) {
#null_columns <- c(null_columns,colNames[i])
#}
}
}
if(error_message != "") {
#return(paste(error_message, "Please check the values to make sure all the inputs are numeric and positive and then try again."))
return(paste(error_message))
}
# for custom metrics we are checking them to be numeric in QCMetrics in "find_custom_metrics" function and only accepting numeric columns after Annotation
# if there is any missing value in data replace it with NA
prodata[prodata==""] <- NA
levels(prodata$Annotations) = c(levels(prodata$Annotations), "Not Available")
prodata["Annotations"][is.na(prodata["Annotations"])] <- "Not Available"
# some times numeric values of some users are like 333,222 which is not acceptable and we convert it to 333222 by replacing "," to ""
# prodata[,"Full Width at Half Maximum"] <- as.numeric(gsub(",","",prodata[,"Full Width at Half Maximum"]))
# prodata[,"Total Peak Area"] <- as.numeric(gsub(",","",prodata[,"Total Peak Area"]))
# prodata[,"Retention Time"] <- as.numeric(gsub(",","",prodata[,"Retention Time"]))
# prodata$MaxEndTime <- as.numeric(gsub(",","",prodata$MaxEndTime))
# prodata$MinStartTime <- as.numeric(gsub(",","",prodata$MinStartTime))
# some data migh have annotation column, some might not have. If it doesn't, we create an empty "Annotation" column at the very end column of the data
# Define peak assymetry
if("MinStartTime" %in% provided_column_names && "MaxEndTime" %in% provided_column_names) {
peakAss <- 2*prodata$MinStartTime/(prodata$MaxEndTime+prodata$MinStartTime)
# locate a new column named "Peak Assymetry" right after the column named "Annotation"
#prodata.first <- prodata[,1:which(colnames(prodata)=="Annotations")]
#prodata.first[,"Peak Assymetry"]<- peakAss
#prodata <- cbind(prodata.first, prodata[,(which(colnames(prodata)=="MaxEndTime")+1):ncol(prodata), drop = FALSE])
prodata[,"Peak assymetry"] <- peakAss
}
return(prodata)
}
### Input_checking function #########################################################################################
input_checking <- function(data){
## save process output in each step #### creating a log file ########### from Meena's code
allfiles <- list.files()
num <- 0
filenaming <- "./log/msstatsqc"
finalfile <- "msstatsqc.log"
while(is.element(finalfile,allfiles)) {
num <- num+1
finalfile <- paste(paste(filenaming,num,sep="-"),".log",sep="")
}
session <- sessionInfo()
sink("./log/sessionInfo.txt")
print(session)
sink()
processout <- as.matrix(read.table("./log/sessionInfo.txt", header=T, sep="\t"))
write.table(processout, file=finalfile, row.names=FALSE)
processout <- rbind(processout, as.matrix(c(" "," ","MSstatsqc - dataProcess function"," "),ncol=1))
data <- input.sanity.check(data, processout, finalfile)
data <- data[complete.cases(data),] #work with complete cases
return(data)
}
| /data-validation.R | no_license | srtaheri/msstats-qc | R | false | false | 8,127 | r | COL.BEST.RET <- "Retention Time"
COL.FWHM <- "Full Width at Half Maximum"
COL.TOTAL.AREA <- "Total Peak Area"
COL.PEAK.ASS <- "Peak assymetry"
#########################################################################################
# here we put a selection of most column names that users use. The first element of each vector should be the best name that
# we suggest users to use and which our code is based on. for example "Retention Time" and "Full Width at Half Maximum" which are the first element
# of each vector in the list, are our suggestion so we wrote them in the fisrt place.
best_colnames <- list(
c("AcquiredTime","Acquired.Time","time","creation date"),
#c("Retention time","BestRetentionTime" ,"Best.RT","best retention time", "retention time","rt","best ret time","intensity","Best RT"),
#c("Full width at half maximum","MaxFWHM","fwhm","max.fwhm", "Max FWHM"),
#c("Total peak area","Total Area","TotalArea","total area","TA","T.Area"),
c("MinStartTime","min start time","Min Start Time"),
c("MaxEndTime", "max end time","Max End Time"),
c("Precursor","PeptideSequence"),
c("Annotations","anotations","anotation")
)
#### camelCaseSplit function ##############################################################################################
camelCaseSplit <- function(x) {
# This function get a camelCase word and splits it.
# Ex : camelCaseSplit("myComputerIsHere") ---> my Computer Is Here
return(gsub("([a-z])([A-Z])", "\\1 \\L\\2", x, perl = TRUE))
}
#### punc_remove function #################################################################################################
punc_remove <- function(x){
# This function removes any existing punctuation in your sentence or word and transfer it to space.
# Ex1: punc_remove(Best.RT) --> Best RT #Ex2: punc_remove(Best_RT) --> Best RT
return(gsub("[[:punct:]///' ]", " ", x))
}
#### clearString function ###############################################################################################
clearString <- function(x){
# This function, gets a word or setence, Splits it (if it is a camelCase), removes any existing punctuations, and transfer
# all Upper Case letters to lower case letters.
# Ex: clearString("myName_isSara.Taheri") --> my name is sara taheri
return(tolower(punc_remove(camelCaseSplit(x))))
}
#### guessColumnName function ###########################################################################################
# This function receives the data and check the column names of data and changes the column names if it is not the
# same names as our suggested sample data to fit our suggested sample data
guessColumnName <- function(x){
a <- clearString(x)
max_index <- 0
max <- -1
for(i in 1:length(best_colnames)){
col <- best_colnames[[i]]
for(j in 1:length(col)){
sim <- levenshteinSim(a,clearString(col[j]))
if(sim > max){
max <- sim
max_index <- i
}
}
}
if (max > 0.6) {
return(best_colnames[[max_index]][1])
}
else {
return(x)
}
}
#############################################################################################################
input.sanity.check <- function(prodata, processout, finalfile) {
error_message <- ""
null_columns <- c()
# get the column names and change them to the column names that we want (For example we want Retention Time but a user might use RT, this function auotomatically change RT to Retention Time)
colnames(prodata) <- unlist(lapply(colnames(prodata), function(x)guessColumnName(x)))
### conditions
# check that the data includes all the requiered columns and if not tell user what column is missing
# required_column_names <- c("Precursor","Retention Time","Full Width at Half Maximum","Total Peak Area","MinStartTime"
# ,"MaxEndTime")
required_column_names <- c("Precursor","Annotations")
if(!("Annotations" %in% colnames(prodata))) {
prodata[,"Annotations"] <- NA
error_message <- paste(error_message, "Please create a column named Annotation and put all your metrics after this column.To see an example of a sample data click on the {Run with example data} button.\n\n")
}
provided_column_names <- colnames(prodata)
# if(!all(required_column_names %in% provided_column_names)) {
# missedInput <- which(!(required_column_names %in% provided_column_names))
# error_message <- paste("ERROR : The required input(inputs) : ",
# paste(required_column_names[missedInput], collapse = ", "),
# " is(are) not provided in data set. Please add it to your data and try again.\n\n")
# }
# check that all columns other than Precursor and Acquired Time and Annotations are numeric.
AfterannoColNum <- (which(colnames(prodata)=="Annotations")) + 1
if(AfterannoColNum < ncol(prodata)) {
#colNames <- colnames(prodata)
for(i in AfterannoColNum:ncol(prodata)) {
if(is.numeric(prodata[,i]) == FALSE) {
error_message <- paste(error_message, "All the values of", colnames(prodata)[i], "should be numeric and positive.\n\n")
}
#if(sum(is.na(prodata[,i])) > 0) {
#null_columns <- c(null_columns,colNames[i])
#}
}
}
if(error_message != "") {
#return(paste(error_message, "Please check the values to make sure all the inputs are numeric and positive and then try again."))
return(paste(error_message))
}
# for custom metrics we are checking them to be numeric in QCMetrics in "find_custom_metrics" function and only accepting numeric columns after Annotation
# if there is any missing value in data replace it with NA
prodata[prodata==""] <- NA
levels(prodata$Annotations) = c(levels(prodata$Annotations), "Not Available")
prodata["Annotations"][is.na(prodata["Annotations"])] <- "Not Available"
# some times numeric values of some users are like 333,222 which is not acceptable and we convert it to 333222 by replacing "," to ""
# prodata[,"Full Width at Half Maximum"] <- as.numeric(gsub(",","",prodata[,"Full Width at Half Maximum"]))
# prodata[,"Total Peak Area"] <- as.numeric(gsub(",","",prodata[,"Total Peak Area"]))
# prodata[,"Retention Time"] <- as.numeric(gsub(",","",prodata[,"Retention Time"]))
# prodata$MaxEndTime <- as.numeric(gsub(",","",prodata$MaxEndTime))
# prodata$MinStartTime <- as.numeric(gsub(",","",prodata$MinStartTime))
# some data migh have annotation column, some might not have. If it doesn't, we create an empty "Annotation" column at the very end column of the data
# Define peak assymetry
if("MinStartTime" %in% provided_column_names && "MaxEndTime" %in% provided_column_names) {
peakAss <- 2*prodata$MinStartTime/(prodata$MaxEndTime+prodata$MinStartTime)
# locate a new column named "Peak Assymetry" right after the column named "Annotation"
#prodata.first <- prodata[,1:which(colnames(prodata)=="Annotations")]
#prodata.first[,"Peak Assymetry"]<- peakAss
#prodata <- cbind(prodata.first, prodata[,(which(colnames(prodata)=="MaxEndTime")+1):ncol(prodata), drop = FALSE])
prodata[,"Peak assymetry"] <- peakAss
}
return(prodata)
}
### Input_checking function #########################################################################################
input_checking <- function(data){
## save process output in each step #### creating a log file ########### from Meena's code
allfiles <- list.files()
num <- 0
filenaming <- "./log/msstatsqc"
finalfile <- "msstatsqc.log"
while(is.element(finalfile,allfiles)) {
num <- num+1
finalfile <- paste(paste(filenaming,num,sep="-"),".log",sep="")
}
session <- sessionInfo()
sink("./log/sessionInfo.txt")
print(session)
sink()
processout <- as.matrix(read.table("./log/sessionInfo.txt", header=T, sep="\t"))
write.table(processout, file=finalfile, row.names=FALSE)
processout <- rbind(processout, as.matrix(c(" "," ","MSstatsqc - dataProcess function"," "),ncol=1))
data <- input.sanity.check(data, processout, finalfile)
data <- data[complete.cases(data),] #work with complete cases
return(data)
}
|
library(doAzureBatch)
generatePoolConfig("batch_config.json") | /createBatchConfig.r | no_license | tanewill/R_Finance_AzureBatch | R | false | false | 61 | r | library(doAzureBatch)
generatePoolConfig("batch_config.json") |
# package-level documentation
#' @importFrom tools file_path_sans_ext
NULL
| /R/workflow.R | no_license | southwick-associates/workflow | R | false | false | 76 | r | # package-level documentation
#' @importFrom tools file_path_sans_ext
NULL
|
plot1 <- function () {
datafile <- 'household_power_consumption.txt'
## Checks if the data file exists in the current directory and downloads it
## if it does not.
if (!file.exists(datafile)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "assignment1.zip", method = "curl")
unzip('./assignment1.zip')
}
## Checks if the package data.table is loaded and installs and loads it
## if it is not.
if (!require(data.table)) {
install.packages("data.table")
}
library(data.table)
## Reads data for 1/2/2007 and 2/2/2007 into data table with original
## column names
x <- fread('./household_power_consumption.txt', skip = '1/2/2007', nrows=2880, na.strings = '?', colClasses = c(rep('character', 9)))
y <- colnames(fread('household_power_consumption.txt', nrows=0))
setnames(x,y)
## Converts Global_active_power column values to numeric
x$Global_active_power <- as.numeric(x$Global_active_power)
## Plots Global_active_power as histogram in a PNG file 'plot1.png'
png(filename = 'plot1.png', bg = 'transparent')
hist(x$Global_active_power, col = 'red', main = 'Global Active Power', xlab = 'Global Active Power (kilowatts)')
dev.off()
} | /plot1.R | no_license | zenra/ExData_Plotting1 | R | false | false | 1,339 | r | plot1 <- function () {
datafile <- 'household_power_consumption.txt'
## Checks if the data file exists in the current directory and downloads it
## if it does not.
if (!file.exists(datafile)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "assignment1.zip", method = "curl")
unzip('./assignment1.zip')
}
## Checks if the package data.table is loaded and installs and loads it
## if it is not.
if (!require(data.table)) {
install.packages("data.table")
}
library(data.table)
## Reads data for 1/2/2007 and 2/2/2007 into data table with original
## column names
x <- fread('./household_power_consumption.txt', skip = '1/2/2007', nrows=2880, na.strings = '?', colClasses = c(rep('character', 9)))
y <- colnames(fread('household_power_consumption.txt', nrows=0))
setnames(x,y)
## Converts Global_active_power column values to numeric
x$Global_active_power <- as.numeric(x$Global_active_power)
## Plots Global_active_power as histogram in a PNG file 'plot1.png'
png(filename = 'plot1.png', bg = 'transparent')
hist(x$Global_active_power, col = 'red', main = 'Global Active Power', xlab = 'Global Active Power (kilowatts)')
dev.off()
} |
# Yige Wu @WashU Jun 2021
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
source("./ccRCC_snRNA_analysis/plotting.R")
library(survival)
library(survminer)
library(My.stepwise)
## set run id
version_tmp <- 5
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input protein data
exp_df <- fread(data.table = F, input = "./Resources/Bulk_Processed_Data/mRNA/CPTAC_ccRCC_discovery_tumor_mRNA_FPKM_UQ_log2_v1.0.tsv")
## input bulk meta data
metadata_bulk_df <- fread("./Resources/Bulk_Processed_Data/Case_ID/CPTAC_ccRCC_discovery_caseID_v1.0.tsv")
## input survival ddata
survival_df <- fread(data.table = F, input = "./Resources/Analysis_Results/sample_info/clinical/extract_cptac_discovery_ccRCC_survival_time/20210920.v1/CPTAC_Discovery_ccRCC_Survival_Time20210920.v1.tsv")
## input marker gene
genes_process_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/tumor_specific_markers/overlap_tumor_vs_pt_DEGs_w_tumor_vs_other_DEGs/20210824.v1/ccRCC_markers.Surface.20210824.v1.tsv")
# make combined data and test ------------------------------------------------------
metadata_filtered_df <- metadata_bulk_df %>%
filter(Histologic_Type == "Clear cell renal cell carcinoma") %>%
mutate(Sample_ID = paste0(CASE_ID, "-T"))
## subset data
exp_data_df <- exp_df[, metadata_filtered_df$Sample_ID]
## rename columns
colnames(exp_data_df) <- metadata_filtered_df$CASE_ID
# specify gene to test ----------------------------------------------------
genes_process <- genes_process_df$Gene
genes_process <- genes_process[!(genes_process %in% c("DPP6", "CPNE8", "EFNA5", "MGLL", "SPIRE1", "SPIRE1", "PLCB1", "OSMR", "SORBS1", "ANO6", "EPB41", "PAM", "RHEX"))]
# pre-process -----------------------------------------------------------------
exp_test_wide_df <- exp_data_df[exp_df$gene_name %in% genes_process,]
testdata_df <- data.frame(t(exp_test_wide_df))
colnames(testdata_df) <- exp_df$gene_name[exp_df$gene_name %in% genes_process]
testdata_df$CASE_ID <- colnames(exp_test_wide_df)
testdata_df <- merge(x = testdata_df, y = survival_df, by = c("CASE_ID"), all.x = T)
testdata_df <- testdata_df %>%
dplyr::mutate(EFS_censor = (with_new_event == "With Tumor")) %>%
dplyr::mutate(EFS = (survival_time + 9)/365) %>%
arrange(CASE_ID) %>%
filter(!is.na(EFS_censor) & !is.na(EFS))
genes_test <- exp_df$gene_name[exp_df$gene_name %in% genes_process]
# process expression only -------------------------------------------------
file2write <- paste0(dir_out, "Expressiononly.Stepwise.Cox.", run_id, ".txt")
sink(file2write)
# cat(paste0("sle: 0.15; sls: 0.15\n"))
cat(paste0("sle: 0.25; sls: 0.25\n"))
My.stepwise.coxph(Time = "EFS", Status = "EFS_censor", variable.list = genes_test, data = testdata_df, sle = 0.25, sls = 0.25)
sink()
# process expression + basic patient info -------------------------------------------------
file2write <- paste0(dir_out, "Expression_plusAgeSexetc.Stepwise.Cox.", run_id, ".txt")
sink(file2write)
My.stepwise.coxph(Time = "EFS", Status = "EFS_censor", variable.list = c("age", "sex.ismale", "stage.numeric", "grade.numeric", genes_test), data = testdata_df, sle = 0.3, sls = 0.3)
sink()
# evaluate V1 -------------------------------------------------------------
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ CP + SHISA9, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "CP_SHISA9.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + grade.numeric + sex.ismale + age + CP + SHISA9, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Stage_Grade_Sex_Age.CP_SHISA9.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# # manually test best model for V2 ------------------------------------------------
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + ABCC3 + NDRG1 + SEMA6A + EPHA6 + KCTD3 + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "1.ExpressionOnly.DropPLIN2",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A + EPHA6 + KCTD3 + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "2.ExpressionOnly.DropPLIN2_ABCC3",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A + EPHA6 + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "3.ExpressionOnly.DropPLIN2_ABCC3_KTCD3",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "4.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "5.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6_UBE2D2",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "6.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6_UBE2D2_SEMA6A",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + grade.numeric + sex.ismale + age + SHISA9 + NDRG1 + stage.numeric , data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "7.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6_UBE2D2_SEMA6A",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# ## drop PHKA2
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP + PLEKHA1, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Drop_PHKA2.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# ## drop PHKA2
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "1.Drop_PHKA2_PLEKHA1.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Final.9Genes.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# # manually test best model for V3 ------------------------------------------------
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP + PLEKHA1 + PHKA2 + TGFA, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "0.Drop_PLIN21.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP + PLEKHA1 + PHKA2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "1.Drop_PLIN21_TGFA.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Final.9Genes.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Stage_Sex.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
# plot --------------------------------------------------------------------
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.noCP.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.CPonly.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ sex.ismale + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.noStage.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.ExpressionOnly.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.Stage_Sex.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
| /clinical_association/survival/coxph_PFS_stepwise_by_CPTAC_bulkRNA_continuous.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 10,589 | r | # Yige Wu @WashU Jun 2021
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
source("./ccRCC_snRNA_analysis/plotting.R")
library(survival)
library(survminer)
library(My.stepwise)
## set run id
version_tmp <- 5
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input protein data
exp_df <- fread(data.table = F, input = "./Resources/Bulk_Processed_Data/mRNA/CPTAC_ccRCC_discovery_tumor_mRNA_FPKM_UQ_log2_v1.0.tsv")
## input bulk meta data
metadata_bulk_df <- fread("./Resources/Bulk_Processed_Data/Case_ID/CPTAC_ccRCC_discovery_caseID_v1.0.tsv")
## input survival ddata
survival_df <- fread(data.table = F, input = "./Resources/Analysis_Results/sample_info/clinical/extract_cptac_discovery_ccRCC_survival_time/20210920.v1/CPTAC_Discovery_ccRCC_Survival_Time20210920.v1.tsv")
## input marker gene
genes_process_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/tumor_specific_markers/overlap_tumor_vs_pt_DEGs_w_tumor_vs_other_DEGs/20210824.v1/ccRCC_markers.Surface.20210824.v1.tsv")
# make combined data and test ------------------------------------------------------
metadata_filtered_df <- metadata_bulk_df %>%
filter(Histologic_Type == "Clear cell renal cell carcinoma") %>%
mutate(Sample_ID = paste0(CASE_ID, "-T"))
## subset data
exp_data_df <- exp_df[, metadata_filtered_df$Sample_ID]
## rename columns
colnames(exp_data_df) <- metadata_filtered_df$CASE_ID
# specify gene to test ----------------------------------------------------
genes_process <- genes_process_df$Gene
genes_process <- genes_process[!(genes_process %in% c("DPP6", "CPNE8", "EFNA5", "MGLL", "SPIRE1", "SPIRE1", "PLCB1", "OSMR", "SORBS1", "ANO6", "EPB41", "PAM", "RHEX"))]
# pre-process -----------------------------------------------------------------
exp_test_wide_df <- exp_data_df[exp_df$gene_name %in% genes_process,]
testdata_df <- data.frame(t(exp_test_wide_df))
colnames(testdata_df) <- exp_df$gene_name[exp_df$gene_name %in% genes_process]
testdata_df$CASE_ID <- colnames(exp_test_wide_df)
testdata_df <- merge(x = testdata_df, y = survival_df, by = c("CASE_ID"), all.x = T)
testdata_df <- testdata_df %>%
dplyr::mutate(EFS_censor = (with_new_event == "With Tumor")) %>%
dplyr::mutate(EFS = (survival_time + 9)/365) %>%
arrange(CASE_ID) %>%
filter(!is.na(EFS_censor) & !is.na(EFS))
genes_test <- exp_df$gene_name[exp_df$gene_name %in% genes_process]
# process expression only -------------------------------------------------
file2write <- paste0(dir_out, "Expressiononly.Stepwise.Cox.", run_id, ".txt")
sink(file2write)
# cat(paste0("sle: 0.15; sls: 0.15\n"))
cat(paste0("sle: 0.25; sls: 0.25\n"))
My.stepwise.coxph(Time = "EFS", Status = "EFS_censor", variable.list = genes_test, data = testdata_df, sle = 0.25, sls = 0.25)
sink()
# process expression + basic patient info -------------------------------------------------
file2write <- paste0(dir_out, "Expression_plusAgeSexetc.Stepwise.Cox.", run_id, ".txt")
sink(file2write)
My.stepwise.coxph(Time = "EFS", Status = "EFS_censor", variable.list = c("age", "sex.ismale", "stage.numeric", "grade.numeric", genes_test), data = testdata_df, sle = 0.3, sls = 0.3)
sink()
# evaluate V1 -------------------------------------------------------------
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ CP + SHISA9, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "CP_SHISA9.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + grade.numeric + sex.ismale + age + CP + SHISA9, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Stage_Grade_Sex_Age.CP_SHISA9.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# # manually test best model for V2 ------------------------------------------------
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + ABCC3 + NDRG1 + SEMA6A + EPHA6 + KCTD3 + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "1.ExpressionOnly.DropPLIN2",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A + EPHA6 + KCTD3 + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "2.ExpressionOnly.DropPLIN2_ABCC3",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A + EPHA6 + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "3.ExpressionOnly.DropPLIN2_ABCC3_KTCD3",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A + UBE2D2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "4.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1 + SEMA6A, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "5.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6_UBE2D2",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ SHISA9 + NDRG1, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "6.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6_UBE2D2_SEMA6A",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + grade.numeric + sex.ismale + age + SHISA9 + NDRG1 + stage.numeric , data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "7.ExpressionOnly.DropPLIN2_ABCC3_KTCD3_EPHA6_UBE2D2_SEMA6A",".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# ## drop PHKA2
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP + PLEKHA1, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Drop_PHKA2.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# ## drop PHKA2
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "1.Drop_PHKA2_PLEKHA1.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Final.9Genes.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# # manually test best model for V3 ------------------------------------------------
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP + PLEKHA1 + PHKA2 + TGFA, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "0.Drop_PLIN21.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + sex.ismale + EGFR + CP + PLEKHA1 + PHKA2, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "1.Drop_PLIN21_TGFA.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Final.9Genes.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
#
# fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale, data = testdata_df)
# fit_efs_sum <- summary(fit_efs)
# file2write <- paste0(dir_out, "Stage_Sex.Cox.", run_id, ".txt")
# sink(file2write)
# fit_efs_sum
# sink()
# plot --------------------------------------------------------------------
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.noCP.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.CPonly.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ sex.ismale + EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.noStage.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ EPHA6 + ABCC3 + FTO + COL23A1 + CA9 + SEMA6A + NDRG1 + EGFR + CP, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.ExpressionOnly.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
fit_efs <- coxph(formula = Surv(EFS, EFS_censor) ~ stage.numeric + sex.ismale, data = testdata_df)
file2write <- paste0(dir_out, "Hazard_Ratio.MultiGeneModel.Stage_Sex.pdf")
pdf(file2write, width = 5, height = 4, useDingbats = F)
p <- ggforest(model = fit_efs, data = testdata_df)
print(p)
dev.off()
|
library(testthat)
#test_check("breathtestcore", filter = "coef_diff_by_group")
test_check("breathtestcore")
| /tests/test-all.R | no_license | histopathology/breathtestcore | R | false | false | 109 | r | library(testthat)
#test_check("breathtestcore", filter = "coef_diff_by_group")
test_check("breathtestcore")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getScore.R
\name{getScore}
\alias{getScore}
\title{Interval score of prediction intervals}
\usage{
getScore(jumptimes, predInts, alpha)
}
\arguments{
\item{jumptimes}{a vector of length N storing the times that are to be
predicted}
\item{predInts}{a N by 2 matrix in which \code{predInts[n,]} denotes the
prediction interval for \code{jumptimes[n]}}
\item{alpha}{the alpha-error chosen for the prediction intervals}
}
\value{
the average interval score of the prediction intervals
}
\description{
A computation of the interval score by Gneiting and Raftery (2007) of
prediction intervals given as a
matrix \code{predInts}. The values that need to be predicted are given as
a vector \code{jumptimes}.
}
| /man/getScore.Rd | no_license | KLeckey/loadshare | R | false | true | 784 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getScore.R
\name{getScore}
\alias{getScore}
\title{Interval score of prediction intervals}
\usage{
getScore(jumptimes, predInts, alpha)
}
\arguments{
\item{jumptimes}{a vector of length N storing the times that are to be
predicted}
\item{predInts}{a N by 2 matrix in which \code{predInts[n,]} denotes the
prediction interval for \code{jumptimes[n]}}
\item{alpha}{the alpha-error chosen for the prediction intervals}
}
\value{
the average interval score of the prediction intervals
}
\description{
A computation of the interval score by Gneiting and Raftery (2007) of
prediction intervals given as a
matrix \code{predInts}. The values that need to be predicted are given as
a vector \code{jumptimes}.
}
|
wrapperRQ2 <- function (df, y, x, covar, linfct) {
M0 <- fitModel(df, y, x)
M <- fitModel(df, y, x, covar)
tmp0 <- M0[["modelObject"]] %>% getGtest(linfct)
tmp <- M[["modelObject"]] %>% getGtest(linfct)
list(oddsratios = bind_rows(M0[["modelObject"]] %>%
tidy(exponentiate = TRUE) %>%
mutate(model = "Unadjusted") %>%
select(model, everything()),
M[["modelObject"]] %>%
tidy(exponentiate = TRUE) %>%
mutate(model = "Adjusted") %>%
select(model, everything())) %>%
rename(oddsratio = estimate),
Ftests = bind_rows(data.frame(model = "Unadjusted",
F = tmp0$fstat, df1 = tmp0$df[1], df2 = tmp0$df[2], pValue = tmp0$pvalue,
stringsAsFactors = FALSE),
data.frame(model = "Adjusted",
F = tmp$fstat, df1 = tmp$df[1], df2 = tmp$df[2], pValue = tmp$pvalue,
stringsAsFactors = FALSE)),
fit = bind_rows(M0[["modelObject"]] %>%
glance() %>%
mutate(model = "Unadjusted") %>%
select(model, everything()),
M[["modelObject"]] %>%
glance() %>%
mutate(model = "Adjusted") %>%
select(model, everything())))
}
| /lib/wrapperRQ2.R | no_license | benjamin-chan/TeoPsychiatricSymptoms | R | false | false | 1,621 | r | wrapperRQ2 <- function (df, y, x, covar, linfct) {
M0 <- fitModel(df, y, x)
M <- fitModel(df, y, x, covar)
tmp0 <- M0[["modelObject"]] %>% getGtest(linfct)
tmp <- M[["modelObject"]] %>% getGtest(linfct)
list(oddsratios = bind_rows(M0[["modelObject"]] %>%
tidy(exponentiate = TRUE) %>%
mutate(model = "Unadjusted") %>%
select(model, everything()),
M[["modelObject"]] %>%
tidy(exponentiate = TRUE) %>%
mutate(model = "Adjusted") %>%
select(model, everything())) %>%
rename(oddsratio = estimate),
Ftests = bind_rows(data.frame(model = "Unadjusted",
F = tmp0$fstat, df1 = tmp0$df[1], df2 = tmp0$df[2], pValue = tmp0$pvalue,
stringsAsFactors = FALSE),
data.frame(model = "Adjusted",
F = tmp$fstat, df1 = tmp$df[1], df2 = tmp$df[2], pValue = tmp$pvalue,
stringsAsFactors = FALSE)),
fit = bind_rows(M0[["modelObject"]] %>%
glance() %>%
mutate(model = "Unadjusted") %>%
select(model, everything()),
M[["modelObject"]] %>%
glance() %>%
mutate(model = "Adjusted") %>%
select(model, everything())))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.