content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "current" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 1 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "future" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 1 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R") | /markdown/precipitation/fit-gsk-15-1.R | permissive | sammorris81/extreme-decomp | R | false | false | 1,114 | r | rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "current" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 1 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
time <- "future" # current or future
L <- 15 # number of knots to use for the basis functions
cv <- 1 # which cross-validation set to use
loc.fun <- scale.fun <- ~ time + elev # + B1 + B2 + B3 + B4 + B5 + 0
# fit the model and get predictions
source(file = "./fitmodel.R") |
# __________________________________________________________________
# //////////////////////////////////////////////////////////////////
#
# Author - Anupama Rajaram
#
# Program Description - Program for AirBnB Kaggle competition
# Using ctree() with age/ lang subset data to create
# statistical models. use these to predict outcomes for
# subset of test-data.
# Here we are taking only travellers to non-US, non-NDF
# countries to create a new prediction model and create
# new outcomes.
#
# Note: anything commented as 'Data Exploration' is purely for
# debugging purposes, and can be deleted without affecting this
# script.
# __________________________________________________________________
# //////////////////////////////////////////////////////////////////
# To clean up the memory of your current R session run the following line
rm(list=ls(all=TRUE))
#===================================================================#
#=========== Section 1: Data Management ============================#
# Load text file into local variable called 'data' - 51243 rows and 3 columns
data = read.delim(file = 'train_users_2.csv', header = TRUE,
sep = ',', dec = '.')
test = read.delim(file = 'test_users.csv', header = TRUE,
sep = ',', dec = '.')
# subset datasets based on travelers not headed to destination set [US, NDF, other]
age3140 <- subset(data, (country_destination != "NDF" & country_destination != "US"
& country_destination != "other"))
test_age3140 <- test
# ========== recode factor variables to equalize datasets
# from test and train subsets =========== #
age3140$gender <- factor(age3140$gender, levels = c("FEMALE", "MALE", "OTHER",
"-unknown-"))
test_age3140$gender <- factor(test_age3140$gender, levels = c("FEMALE", "MALE", "OTHER",
"-unknown-"))
age3140$signup_method <- factor(age3140$signup_method, levels = c("basic", "facebook",
"google"))
test_age3140$signup_method <- factor(test_age3140$signup_method, levels = c("basic", "facebook",
"google"))
age3140$language <- factor(age3140$language, levels = c("ca", "cs", "da", "de", "el",
"en", "es", "fi", "fr", "hu",
"id", "it", "ja", "ko", "nl",
"no", "pl", "pt", "ru", "sv",
"th", "tr", "zh"))
test_age3140$language <- factor(test_age3140$language, levels = c("ca", "cs", "da", "de", "el",
"en", "es", "fi", "fr", "hu",
"id", "it", "ja", "ko", "nl",
"no", "pl", "pt", "ru", "sv",
"th", "tr", "zh"))
age3140$affiliate_channel <- factor(age3140$affiliate_channel,
levels = c("content", "direct", "other",
"remarketing", "sem-brand",
"sem-non-brand", "seo"))
test_age3140$affiliate_channel <- factor(test_age3140$affiliate_channel,
levels = c("content", "direct", "other",
"remarketing", "sem-brand",
"sem-non-brand", "seo"))
age3140$affiliate_provider <- factor(age3140$affiliate_provider,
levels = c("baidu", "bing", "craigslist",
"daum", "direct", "email-marketing"
, "facebook", "facebook-open-graph",
"google", "gsp", "meetup", "naver",
"other", "padmapper", "vast", "yahoo",
"yandex"))
test_age3140$affiliate_provider <- factor(test_age3140$affiliate_provider,
levels = c("baidu", "bing", "craigslist",
"daum", "direct", "email-marketing"
, "facebook", "facebook-open-graph",
"google", "gsp", "meetup", "naver",
"other", "padmapper", "vast", "yahoo",
"yandex"))
age3140$first_affiliate_tracked <- factor(age3140$first_affiliate_tracked,
levels = c("linked", "local ops", "marketing",
"omg", "product", "tracked-other",
"untracked"))
test_age3140$first_affiliate_tracked <- factor(test_age3140$first_affiliate_tracked,
levels = c("linked", "local ops", "marketing",
"omg", "product", "tracked-other",
"untracked"))
# Data exploration - Display the data after transformation
head(test_age3140)
#============= Recoding incorrect values ==============#
# checking for number of NAs for each variable
sapply(test_age3140, function(x) sum(is.na(x)))
sapply(age3140, function(x) sum(is.na(x)))
# checking how many unique values under each variable
sapply(age3140, function(x) length(unique(x)))
sapply(test_age3140, function(x) length(unique(x)))
# recode missing variables
age3140$age[age3140$age >= 100] <- NA # many users have age = 2014
test_age3140$age[test_age3140$age >= 100] <- NA
# ================================================ #
# ======== Section 2: Predictions ================ #
# predictions based on non-US, non-NDF travelers
fml3 = country_destination ~ gender + signup_method + language + affiliate_channel+
affiliate_provider + first_device_type + first_affiliate_tracked + signup_flow +
signup_app
tr_age3140 <- ctree(fml3, data = age3140)
plot(tr_age3140)
# awesome plot! :-)
# data exploration
summary(tr_age3140)
# forecast values and write to file
Pred_age40 <- predict(tr_age3140, test_age3140)
# throwing error with factor levels, only when "first_browser" is added.
submit_tstage40 <- data.frame(id = test_age3140$id,
country = Pred_age40)
# unfortunately this gives only options = US, NDF. so no use!
write.csv(submit_tstage40, file = "jan9_nonUS_traveler_tree.csv", row.names = FALSE)
| /airbnb-nonUS-travelers2.R | no_license | anurajaram/kaggle-airbnb | R | false | false | 6,879 | r |
# __________________________________________________________________
# //////////////////////////////////////////////////////////////////
#
# Author - Anupama Rajaram
#
# Program Description - Program for AirBnB Kaggle competition
# Using ctree() with age/ lang subset data to create
# statistical models. use these to predict outcomes for
# subset of test-data.
# Here we are taking only travellers to non-US, non-NDF
# countries to create a new prediction model and create
# new outcomes.
#
# Note: anything commented as 'Data Exploration' is purely for
# debugging purposes, and can be deleted without affecting this
# script.
# __________________________________________________________________
# //////////////////////////////////////////////////////////////////
# To clean up the memory of your current R session run the following line
rm(list=ls(all=TRUE))
#===================================================================#
#=========== Section 1: Data Management ============================#
# Load text file into local variable called 'data' - 51243 rows and 3 columns
data = read.delim(file = 'train_users_2.csv', header = TRUE,
sep = ',', dec = '.')
test = read.delim(file = 'test_users.csv', header = TRUE,
sep = ',', dec = '.')
# subset datasets based on travelers not headed to destination set [US, NDF, other]
age3140 <- subset(data, (country_destination != "NDF" & country_destination != "US"
& country_destination != "other"))
test_age3140 <- test
# ========== recode factor variables to equalize datasets
# from test and train subsets =========== #
age3140$gender <- factor(age3140$gender, levels = c("FEMALE", "MALE", "OTHER",
"-unknown-"))
test_age3140$gender <- factor(test_age3140$gender, levels = c("FEMALE", "MALE", "OTHER",
"-unknown-"))
age3140$signup_method <- factor(age3140$signup_method, levels = c("basic", "facebook",
"google"))
test_age3140$signup_method <- factor(test_age3140$signup_method, levels = c("basic", "facebook",
"google"))
age3140$language <- factor(age3140$language, levels = c("ca", "cs", "da", "de", "el",
"en", "es", "fi", "fr", "hu",
"id", "it", "ja", "ko", "nl",
"no", "pl", "pt", "ru", "sv",
"th", "tr", "zh"))
test_age3140$language <- factor(test_age3140$language, levels = c("ca", "cs", "da", "de", "el",
"en", "es", "fi", "fr", "hu",
"id", "it", "ja", "ko", "nl",
"no", "pl", "pt", "ru", "sv",
"th", "tr", "zh"))
age3140$affiliate_channel <- factor(age3140$affiliate_channel,
levels = c("content", "direct", "other",
"remarketing", "sem-brand",
"sem-non-brand", "seo"))
test_age3140$affiliate_channel <- factor(test_age3140$affiliate_channel,
levels = c("content", "direct", "other",
"remarketing", "sem-brand",
"sem-non-brand", "seo"))
age3140$affiliate_provider <- factor(age3140$affiliate_provider,
levels = c("baidu", "bing", "craigslist",
"daum", "direct", "email-marketing"
, "facebook", "facebook-open-graph",
"google", "gsp", "meetup", "naver",
"other", "padmapper", "vast", "yahoo",
"yandex"))
test_age3140$affiliate_provider <- factor(test_age3140$affiliate_provider,
levels = c("baidu", "bing", "craigslist",
"daum", "direct", "email-marketing"
, "facebook", "facebook-open-graph",
"google", "gsp", "meetup", "naver",
"other", "padmapper", "vast", "yahoo",
"yandex"))
age3140$first_affiliate_tracked <- factor(age3140$first_affiliate_tracked,
levels = c("linked", "local ops", "marketing",
"omg", "product", "tracked-other",
"untracked"))
test_age3140$first_affiliate_tracked <- factor(test_age3140$first_affiliate_tracked,
levels = c("linked", "local ops", "marketing",
"omg", "product", "tracked-other",
"untracked"))
# Data exploration - Display the data after transformation
head(test_age3140)
#============= Recoding incorrect values ==============#
# checking for number of NAs for each variable
sapply(test_age3140, function(x) sum(is.na(x)))
sapply(age3140, function(x) sum(is.na(x)))
# checking how many unique values under each variable
sapply(age3140, function(x) length(unique(x)))
sapply(test_age3140, function(x) length(unique(x)))
# recode missing variables
age3140$age[age3140$age >= 100] <- NA # many users have age = 2014
test_age3140$age[test_age3140$age >= 100] <- NA
# ================================================ #
# ======== Section 2: Predictions ================ #
# predictions based on non-US, non-NDF travelers
fml3 = country_destination ~ gender + signup_method + language + affiliate_channel+
affiliate_provider + first_device_type + first_affiliate_tracked + signup_flow +
signup_app
tr_age3140 <- ctree(fml3, data = age3140)
plot(tr_age3140)
# awesome plot! :-)
# data exploration
summary(tr_age3140)
# forecast values and write to file
Pred_age40 <- predict(tr_age3140, test_age3140)
# throwing error with factor levels, only when "first_browser" is added.
submit_tstage40 <- data.frame(id = test_age3140$id,
country = Pred_age40)
# unfortunately this gives only options = US, NDF. so no use!
write.csv(submit_tstage40, file = "jan9_nonUS_traveler_tree.csv", row.names = FALSE)
|
# ์ ์ ์.
# train : 131209
# test : 75000
# all : 206209
# ์ ์ ์ (test+train = prior) ํ์ธ
# ์ ์ ๋ฆฌ์คํธ
train_users <- orders %>% filter(eval_set=="train") %>% distinct(user_id) %>% data.frame()
test_users <- orders %>% filter(eval_set=="test") %>% distinct(user_id) %>% data.frame()
all_users <- orders %>% filter(eval_set=="prior") %>% distinct(user_id) %>% data.frame()
# train : 131209
# not NA : 122607
data_reorder_users <- data %>% filter(!is.na(reordered)) %>%
select(user_id) %>% distinct(user_id) %>% data.frame()
# test : 75000
# NA : 205888
data_na_users <- data %>% filter(is.na(reordered)) %>%
select(user_id) %>% distinct(user_id) %>% data.frame()
# NA๋ train ์ ์ ์ธ๋ฐ ์ด๋ฒ ์ฃผ๋ฌธ(ordert)์์
# ํด๋น product๋ฅผ ์์๊ธฐ ๋๋ฌธ์ ordert์ ์ ๋ณด๊ฐ ์๋ ๊ฒ๋ค
# ์ฆ ๋ง์ train ์ ์ ๊ฐ ํฌํจ๋์ด ์๋ค.
# ์ฌ๊ธฐ์ test์ ์ ์ ๊ฒ๋ค์ ๋น์ฐํ ๋ค NA
# NA์ธ ์ ์ ๋ค์ test์ ์ ๋ค์ด ๋ชจ๋ ํฌํจ๋์๋ ๊ฒ์ ๋ณผ ์ ์๋ค.
sum(data_na_users[[1]] %in% test_users[[1]]) # 75000
# ๋ฆฌ์ค๋ ์ ๋ณด๊ฐ ์๋ ์ ์ ๋ค์ ๋ชจ๋ train์ ์ํจ
nrow(data_reorder_users) # 122607
sum(data_reorder_users[[1]] %in% train_users[[1]]) # 122607
# NA์ธ train ์ ์ ๋ค
nrow(train_users) # 131209
sum(data_na_users[[1]] %in% train_users[[1]]) # 130888
# NA์ ์ ์ฒด ์ ์ ์
nrow(data %>% filter(is.na(reordered)) %>% distinct(user_id)) # 205888
# NA์ธ train์ ์ ๋ค + NA์ธ test ์ ์ ๋ค : 130888+75000 = 205888
head(iris %>% group_by(Sepal.Width) %>% summarise(m=mean(Sepal.Length)), n=150)
| /confirming_code.R | no_license | FingerDrill/kaggle_instacart | R | false | false | 1,582 | r | # ์ ์ ์.
# train : 131209
# test : 75000
# all : 206209
# ์ ์ ์ (test+train = prior) ํ์ธ
# ์ ์ ๋ฆฌ์คํธ
train_users <- orders %>% filter(eval_set=="train") %>% distinct(user_id) %>% data.frame()
test_users <- orders %>% filter(eval_set=="test") %>% distinct(user_id) %>% data.frame()
all_users <- orders %>% filter(eval_set=="prior") %>% distinct(user_id) %>% data.frame()
# train : 131209
# not NA : 122607
data_reorder_users <- data %>% filter(!is.na(reordered)) %>%
select(user_id) %>% distinct(user_id) %>% data.frame()
# test : 75000
# NA : 205888
data_na_users <- data %>% filter(is.na(reordered)) %>%
select(user_id) %>% distinct(user_id) %>% data.frame()
# NA๋ train ์ ์ ์ธ๋ฐ ์ด๋ฒ ์ฃผ๋ฌธ(ordert)์์
# ํด๋น product๋ฅผ ์์๊ธฐ ๋๋ฌธ์ ordert์ ์ ๋ณด๊ฐ ์๋ ๊ฒ๋ค
# ์ฆ ๋ง์ train ์ ์ ๊ฐ ํฌํจ๋์ด ์๋ค.
# ์ฌ๊ธฐ์ test์ ์ ์ ๊ฒ๋ค์ ๋น์ฐํ ๋ค NA
# NA์ธ ์ ์ ๋ค์ test์ ์ ๋ค์ด ๋ชจ๋ ํฌํจ๋์๋ ๊ฒ์ ๋ณผ ์ ์๋ค.
sum(data_na_users[[1]] %in% test_users[[1]]) # 75000
# ๋ฆฌ์ค๋ ์ ๋ณด๊ฐ ์๋ ์ ์ ๋ค์ ๋ชจ๋ train์ ์ํจ
nrow(data_reorder_users) # 122607
sum(data_reorder_users[[1]] %in% train_users[[1]]) # 122607
# NA์ธ train ์ ์ ๋ค
nrow(train_users) # 131209
sum(data_na_users[[1]] %in% train_users[[1]]) # 130888
# NA์ ์ ์ฒด ์ ์ ์
nrow(data %>% filter(is.na(reordered)) %>% distinct(user_id)) # 205888
# NA์ธ train์ ์ ๋ค + NA์ธ test ์ ์ ๋ค : 130888+75000 = 205888
head(iris %>% group_by(Sepal.Width) %>% summarise(m=mean(Sepal.Length)), n=150)
|
.libPaths(.libPaths()[2])
library(dada2)
library(ggplot2)
plot <- plotQualityProfile(snakemake@input[["fastq"]], n=5e4)
ggsave(filename=snakemake@output[["plot"]], plot=plot) | /amplicon_processing/scripts/99.dada2_plotQualityProfile.R | no_license | polzlab/VanInsberghe_2019_Cdiff_colonization | R | false | false | 175 | r | .libPaths(.libPaths()[2])
library(dada2)
library(ggplot2)
plot <- plotQualityProfile(snakemake@input[["fastq"]], n=5e4)
ggsave(filename=snakemake@output[["plot"]], plot=plot) |
#' Fit Statistical Models
#'
#' @description Functions to fit statistical models to data.
#'
#' @param x Data object or numeric vector representing a morphometric predictor variable..
#' @param y Numeric vector representing a morphometric response variable.
#' @param z Binary classification vector classifying morphometric data into discrete maturity groups.
#' @param n Number of observations for each (x,y) pair (optional).
#' @param sex Biological sex which specifies the type of analysis or initial values to be applied.
#' @param theta Parameter vector.
#' @param discrete Logical value specifying whether to treat observations as discrete rounded values.
#' @param print Logical value specifying whether to print information about progress during model fitting.
#' @param trace Optimization control output (see \code{\link[stats]{optim}}).
#' @param model Character string specifying the model type.
#' @param nugget Logical value specifying whether a variogram model contains a nugget semi-variance parameter.
#' @param distance.exponent Numeric value specifying the exponent to be applied in the distance metric.
#'
#' export
fit <- function(x, ...) UseMethod("fit")
# @describeIn fit Generic morphometric model fit method.
#' @export fit.morphometry
fit.morphometry <- function(x, ...) UseMethod("fit.morphometry")
#' @describeIn fit Fit a model to empirical variogram data.
#' @export fit.variogram
#' @export
fit.variogram <- function(x, model = "spherical", nugget = TRUE, distance.exponent = 0, inits, ...){
# Parse input arguments:
model <- match.arg(tolower(model), c("exponential", "spherical", "gaussian"))
# Define various variogram models:
if (model == "exponential"){
vfun <- function(h, nugget = 0, range = 1, sill = 1){
v <- rep(sill, length(h))
dim(v) <- dim(h)
v <- (sill - nugget) * (1 - exp(-(3*h)/range)) + nugget
return(v)
}
}
if (model == "gaussian"){
vfun <- function(h, nugget = 0, range = 1, sill = 1){
v <- rep(sill, length(h))
dim(v) <- dim(h)
v <- (sill - nugget) * (1 - exp(-(3*(h^2))/(range^2))) + nugget
return(v)
}
}
if (model == "spherical"){
vfun <- function(h, nugget = 0, range = 1, sill = 1){
v <- rep(sill, length(h))
dim(v) <- dim(h)
index <- h < range
v[index] <- (sill - nugget) * (((3 * h[index]) / (2* range)) - (h[index] ^ 3) / (2 * (range ^ 3))) + nugget
return(v)
}
}
# Extract empirical variogram values:
res <- x$empirical
# Find initial values for variogram parameters:
if (missing(inits)) inits <- NULL
if ("range" %in% names(inits)) range <- inits$range else range <- x$max.distance / 2
names(range) <- "range"
sill <- mean(res$semi.variance[round(nrow(res)/2):nrow(res)])
names(sill) <- "sill"
if (nugget){
if (!is.null(x$lag)){
index <- ((res$start.distance + res$end.distance) / 2) < (0.75 * range)
tmp <- ((res$start.distance[index] + res$end.distance[index]) / 2)
nugget <- coef(lm(res$semi.variance[index] ~ tmp, weights = res$n[index]))[1]
}else{
index <- res$h < (0.75 * range)
nugget <- coef(lm(res$semi.variance[index] ~ res$h[index]))[1]
}
nugget <- max(c(0.0000001, nugget))
names(nugget) <- "nugget"
}else{
nugget <- NULL
}
# Switch nugget and sill parameters if they are inverted:
if (!is.null(nugget)){
if (sill < nugget){
tmp <- sill
sill <- nugget
nugget <- tmp
}
sill <- abs(sill - nugget)
nugget <- abs(nugget)
}else{
sill <- abs(sill)
}
range <- abs(range)
# Catenate parameter vector:
theta <- c(nugget, range, sill)
# Define objective function:
ss <- function(theta, lag, semi.variance, weight, distance.exponent = 0){
if ("nugget" %in% names(theta)) nugget <- abs(theta["nugget"]) else nugget <- 0
sill <- nugget + abs(theta["sill"])
range <- abs(theta["range"])
mu <- vfun(lag, nugget = nugget, range = range, sill = sill)
if (missing(weight)) weight <- 1
if (distance.exponent != 0) weight <- weight / (lag ^ distance.exponent) # Add lag distance-weighted component.
v <- sum(weight * (semi.variance - mu)^2)
return(v)
}
# Estimate parameters:
p <- optim(theta, ss, lag = res$h, semi.variance = res$semi.variance, weight = res$n , distance.exponent = distance.exponent, control = list(maxit = 4000, trace = 0))
for (i in 1:5) p <- optim(p$par, ss, lag = res$h, semi.variance = res$semi.variance, weight = res$n , distance.exponent = distance.exponent, control = list(maxit = 4000, trace = 0))
theta <- p$par
# Parse parameter vector:
if ("nugget" %in% names(theta)) nugget <- abs(theta["nugget"]) else nugget <- 0
sill <- nugget + abs(theta["sill"])
range <- abs(theta["range"])
# Add parameters and model to object:
x$nugget <- nugget
x$sill <- sill
x$range <- range
x$model <- model
x$vfun <- vfun
return(x)
}
#' @describeIn fit Fit a morphometric model to snow crab morphometric data.
#' @export fit.morphometry.scsbio
#' @rawNamespace S3method(fit.morphometry,scsbio)
fit.morphometry.scsbio <- function(x, y, z, n = 1, sex, theta, discrete = FALSE, print = FALSE, trace = 0){
if (!missing(sex) & missing(theta)){
if (sex == 1){
# Male morphometry initial values:
theta <- c(beta_immature = c(-2.03, 1.116, -0.06026, 0.0114), # Log-scale immature morphometric coefficients.
beta_mature = c(-2.858, 1.312), # Log-scale mature morphometric coefficients.
log_sigma = -3.3, # Log-scale standard error.
log_sigma_kurtotic = 0, # Log-scale extra standard error for kurtotic observations.
logit_p_kurtotic = -2, # Logit-scale proportion of kurtotic observations.
p_alpha = -11, # Logit-scale splm intercept parameter for mature proportions.
p_beta = c(0.25, 0.015, 0.25), # Logit-scale splm slope parameters for mature proportions.
p_transition = c(45, 95), # Logit-scale transition parameters for mature proportions.
p_window = 2.0) # Logit-scale splm window width parameter(s) for mature proportions.
}
if (sex == 2){
# Female morphometry initial values:
theta <- c(beta_immature = c(-2.72, 1.228), # Log-scale immature morphometric coefficients.
beta_mature = c(-2.80, 1.30), # Log-scale mature morphometric coefficients.
log_sigma = -3, # Log-scale standard error.
log_sigma_kurtotic = 2, # Log-scale extra standard error for kurtotic observations.
logit_p_kurtotic = -5.7, # Logit-scale proportion of kurtotic observations.
p_alpha = -10.4, # Logit-scale splm intercept parameter for mature proportions.
p_beta = c(0.16, 0.015, 0.29), # Logit-scale splm slope parameters for mature proportions.
p_transition = c(58.8, 101.1), # Logit-scale transition parameters for mature proportions.
p_window = 1.45) # Logit-scale splm window width parameter(s) for mature proportions.
}
}
# Negative log-likelihood function:
loglike <- function(theta, x, y, z, n = 1, fixed, discrete = FALSE){
if (!missing(fixed)) theta <- c(theta, fixed)
v <- -sum(n * morphometry.scsbio(x, y, z, theta = theta, discrete = discrete)$loglike)
return(v)
}
# Define optimization controls:
control <- list(trace = trace, maxit = 1000)
# Fit proportions
if (print) cat("Fitting mature proportion parameters.\n")
fixed <- theta[-grep("^p_", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit kurtotic parameters:
if (print) cat("Fitting kurtosis parameters.\n")
fixed <- theta[-grep("kurtotic", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit immature regression:
if (print) cat("Fitting immature regression coefficients.\n")
fixed <- theta[-grep("immature", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit non-regression coefficients:
if (print) cat("Fitting non-regression coefficients.\n")
fixed <- theta[grep("mature", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit immature regression:
if (print) cat("Fitting complete model.\n")
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, discrete = discrete, control = control)$par
return(theta)
}
| /R/fit.R | no_license | TobieSurette/gulf.stats | R | false | false | 9,432 | r | #' Fit Statistical Models
#'
#' @description Functions to fit statistical models to data.
#'
#' @param x Data object or numeric vector representing a morphometric predictor variable..
#' @param y Numeric vector representing a morphometric response variable.
#' @param z Binary classification vector classifying morphometric data into discrete maturity groups.
#' @param n Number of observations for each (x,y) pair (optional).
#' @param sex Biological sex which specifies the type of analysis or initial values to be applied.
#' @param theta Parameter vector.
#' @param discrete Logical value specifying whether to treat observations as discrete rounded values.
#' @param print Logical value specifying whether to print information about progress during model fitting.
#' @param trace Optimization control output (see \code{\link[stats]{optim}}).
#' @param model Character string specifying the model type.
#' @param nugget Logical value specifying whether a variogram model contains a nugget semi-variance parameter.
#' @param distance.exponent Numeric value specifying the exponent to be applied in the distance metric.
#'
#' export
fit <- function(x, ...) UseMethod("fit")
# @describeIn fit Generic morphometric model fit method.
#' @export fit.morphometry
fit.morphometry <- function(x, ...) UseMethod("fit.morphometry")
#' @describeIn fit Fit a model to empirical variogram data.
#' @export fit.variogram
#' @export
fit.variogram <- function(x, model = "spherical", nugget = TRUE, distance.exponent = 0, inits, ...){
# Parse input arguments:
model <- match.arg(tolower(model), c("exponential", "spherical", "gaussian"))
# Define various variogram models:
if (model == "exponential"){
vfun <- function(h, nugget = 0, range = 1, sill = 1){
v <- rep(sill, length(h))
dim(v) <- dim(h)
v <- (sill - nugget) * (1 - exp(-(3*h)/range)) + nugget
return(v)
}
}
if (model == "gaussian"){
vfun <- function(h, nugget = 0, range = 1, sill = 1){
v <- rep(sill, length(h))
dim(v) <- dim(h)
v <- (sill - nugget) * (1 - exp(-(3*(h^2))/(range^2))) + nugget
return(v)
}
}
if (model == "spherical"){
vfun <- function(h, nugget = 0, range = 1, sill = 1){
v <- rep(sill, length(h))
dim(v) <- dim(h)
index <- h < range
v[index] <- (sill - nugget) * (((3 * h[index]) / (2* range)) - (h[index] ^ 3) / (2 * (range ^ 3))) + nugget
return(v)
}
}
# Extract empirical variogram values:
res <- x$empirical
# Find initial values for variogram parameters:
if (missing(inits)) inits <- NULL
if ("range" %in% names(inits)) range <- inits$range else range <- x$max.distance / 2
names(range) <- "range"
sill <- mean(res$semi.variance[round(nrow(res)/2):nrow(res)])
names(sill) <- "sill"
if (nugget){
if (!is.null(x$lag)){
index <- ((res$start.distance + res$end.distance) / 2) < (0.75 * range)
tmp <- ((res$start.distance[index] + res$end.distance[index]) / 2)
nugget <- coef(lm(res$semi.variance[index] ~ tmp, weights = res$n[index]))[1]
}else{
index <- res$h < (0.75 * range)
nugget <- coef(lm(res$semi.variance[index] ~ res$h[index]))[1]
}
nugget <- max(c(0.0000001, nugget))
names(nugget) <- "nugget"
}else{
nugget <- NULL
}
# Switch nugget and sill parameters if they are inverted:
if (!is.null(nugget)){
if (sill < nugget){
tmp <- sill
sill <- nugget
nugget <- tmp
}
sill <- abs(sill - nugget)
nugget <- abs(nugget)
}else{
sill <- abs(sill)
}
range <- abs(range)
# Catenate parameter vector:
theta <- c(nugget, range, sill)
# Define objective function:
ss <- function(theta, lag, semi.variance, weight, distance.exponent = 0){
if ("nugget" %in% names(theta)) nugget <- abs(theta["nugget"]) else nugget <- 0
sill <- nugget + abs(theta["sill"])
range <- abs(theta["range"])
mu <- vfun(lag, nugget = nugget, range = range, sill = sill)
if (missing(weight)) weight <- 1
if (distance.exponent != 0) weight <- weight / (lag ^ distance.exponent) # Add lag distance-weighted component.
v <- sum(weight * (semi.variance - mu)^2)
return(v)
}
# Estimate parameters:
p <- optim(theta, ss, lag = res$h, semi.variance = res$semi.variance, weight = res$n , distance.exponent = distance.exponent, control = list(maxit = 4000, trace = 0))
for (i in 1:5) p <- optim(p$par, ss, lag = res$h, semi.variance = res$semi.variance, weight = res$n , distance.exponent = distance.exponent, control = list(maxit = 4000, trace = 0))
theta <- p$par
# Parse parameter vector:
if ("nugget" %in% names(theta)) nugget <- abs(theta["nugget"]) else nugget <- 0
sill <- nugget + abs(theta["sill"])
range <- abs(theta["range"])
# Add parameters and model to object:
x$nugget <- nugget
x$sill <- sill
x$range <- range
x$model <- model
x$vfun <- vfun
return(x)
}
#' @describeIn fit Fit a morphometric model to snow crab morphometric data.
#' @export fit.morphometry.scsbio
#' @rawNamespace S3method(fit.morphometry,scsbio)
fit.morphometry.scsbio <- function(x, y, z, n = 1, sex, theta, discrete = FALSE, print = FALSE, trace = 0){
if (!missing(sex) & missing(theta)){
if (sex == 1){
# Male morphometry initial values:
theta <- c(beta_immature = c(-2.03, 1.116, -0.06026, 0.0114), # Log-scale immature morphometric coefficients.
beta_mature = c(-2.858, 1.312), # Log-scale mature morphometric coefficients.
log_sigma = -3.3, # Log-scale standard error.
log_sigma_kurtotic = 0, # Log-scale extra standard error for kurtotic observations.
logit_p_kurtotic = -2, # Logit-scale proportion of kurtotic observations.
p_alpha = -11, # Logit-scale splm intercept parameter for mature proportions.
p_beta = c(0.25, 0.015, 0.25), # Logit-scale splm slope parameters for mature proportions.
p_transition = c(45, 95), # Logit-scale transition parameters for mature proportions.
p_window = 2.0) # Logit-scale splm window width parameter(s) for mature proportions.
}
if (sex == 2){
# Female morphometry initial values:
theta <- c(beta_immature = c(-2.72, 1.228), # Log-scale immature morphometric coefficients.
beta_mature = c(-2.80, 1.30), # Log-scale mature morphometric coefficients.
log_sigma = -3, # Log-scale standard error.
log_sigma_kurtotic = 2, # Log-scale extra standard error for kurtotic observations.
logit_p_kurtotic = -5.7, # Logit-scale proportion of kurtotic observations.
p_alpha = -10.4, # Logit-scale splm intercept parameter for mature proportions.
p_beta = c(0.16, 0.015, 0.29), # Logit-scale splm slope parameters for mature proportions.
p_transition = c(58.8, 101.1), # Logit-scale transition parameters for mature proportions.
p_window = 1.45) # Logit-scale splm window width parameter(s) for mature proportions.
}
}
# Negative log-likelihood function:
loglike <- function(theta, x, y, z, n = 1, fixed, discrete = FALSE){
if (!missing(fixed)) theta <- c(theta, fixed)
v <- -sum(n * morphometry.scsbio(x, y, z, theta = theta, discrete = discrete)$loglike)
return(v)
}
# Define optimization controls:
control <- list(trace = trace, maxit = 1000)
# Fit proportions
if (print) cat("Fitting mature proportion parameters.\n")
fixed <- theta[-grep("^p_", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit kurtotic parameters:
if (print) cat("Fitting kurtosis parameters.\n")
fixed <- theta[-grep("kurtotic", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit immature regression:
if (print) cat("Fitting immature regression coefficients.\n")
fixed <- theta[-grep("immature", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit non-regression coefficients:
if (print) cat("Fitting non-regression coefficients.\n")
fixed <- theta[grep("mature", names(theta))]
theta <- theta[setdiff(names(theta), names(fixed))]
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, fixed = fixed, discrete = discrete, control = control)$par
theta <- c(theta, fixed)
# Fit immature regression:
if (print) cat("Fitting complete model.\n")
theta <- optim(theta, loglike, x = x, y = y, z = z, n = n, discrete = discrete, control = control)$par
return(theta)
}
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0225) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter)
D <- 80 # grid number total
nSubj <- 50 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_seed1_grp50-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) | /full simulation/summer/hetero_power/variance0.0225/seed1/heter_power_0.0225_pca_s_seed1_50_50.R | no_license | wma9/FMRI-project | R | false | false | 8,902 | r | library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0225) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter)
D <- 80 # grid number total
nSubj <- 50 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_seed1_grp50-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) |
#Load packages
library(tidyverse) # safe to ignore conflicts with filter() and lag()
library(MASS)
library(popbio)
#library(mpmtools) # not at CRAN;
# install.packages("devtools")
#devtools::install_github("BruceKendall/mpmtools")
library(ggpubr)
# The rjags package is just an interface to the JAGS library
# Make sure you have installed JAGS-4.x.y.exe (for any x >=0, y>=0) from
# http://www.sourceforge.net/projects/mcmc-jags/files
# library(rjags)
# library(R2jags)
# library(jagsUI)
library(Rlab)
library(runjags)
library(postpack)
library(coda)
rm(list=ls())
#source("functions/Dovi_IBS_SB_test.assign.conformity_mat12.R") #All biennial breeders reproduce for the first time at age 12
source("./01_Data.generating.model/functions/Dovi_IBS_SB_test.assign.conformity_mat12OR13_liz.R") #Half of biennial breeders reproduce for the first time at age 12; the other half at age 13.
#################### Set output file locations and labels ####################
temp_location <- "output/"
output.location <- "output/"
parents_prefix <- "parents.breakdown"
sample_prefix <- "sample.info"
pop.size.prefix <- "pop.size"
truth.prefix <- "truth"
#################### Simulation parameters ####################
init.adult.pop.size <- 1000 # CHANGED FROM 3000; Initial adult population size
init.prop.female <- 0.5 # proportion of the initial population size that is female
birth.sex.ratio <- c(0.5,0.5) # The probability that each baby is F:M - has to add up to 1
YOY.survival <- 0.7 # CHANGED FROM 0.8; young of year survival
juvenile.survival <- 0.8 # CHANGED FROM 0.9; juvenile survival
Adult.survival <- 0.825 # CHANGED FROM 0.825; Adult survival
repro.age <- 12 # set age of reproductive maturity
max.age <- maxAge <- 50 #set the maximum age allowed in the simulation
num.mates <- c(1:3) #1 #c(1:3) #vector of potential number of mates per mating
f <- (1-Adult.survival)/(YOY.survival * juvenile.survival^11) # adult fecundity at equilibrium if no age truncation
#################### Breeding schedule ######################
#------------------------------ Annual ------------------------------
# breeding.schedule <- "annual.breeding"
# mating.periodicity <- 1 #number of years between mating; assigned to an individual and sticks with them through their life. So they're either a one or two year breeder.
# non.conformists <- 0
#------------------------------ Biennial ------------------------------
mating.periodicity <- 2 #number of years between mating; assigned to an individual and sticks with them through their life. So they're either a one or two year breeder.
#============================== psi 1 ==============================
breeding.schedule <- "biennial.breeding_psi1"
non.conformists <- 0
#============================== psi 0.90 ==============================
# breeding.schedule <- "biennial.breeding_psi0.90"
# non.conformists <- 0.10 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists
#============================== psi 0.75 ==============================
#breeding.schedule <- "biennial.breeding_psi0.75"
#non.conformists <- 0.25 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists
#============================== psi 0.50 ==============================
#breeding.schedule <- "biennial.breeding_psi0.50"
#non.conformists <- 0.50 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists
# Adjust fecundity ==============================================================
## for effective number of breeders each year, mating cycle, number of mates ====
#(from liz) ====
psi <- 1-non.conformists
ff <- mating.periodicity/(mating.periodicity-psi*mating.periodicity+psi)*f/init.prop.female/mean(num.mates)
ff
# ====
#################### Population growth ####################
#------------------------------Stable------------------------------
population.growth <- "lambda.1"
#------------------------------Slight increase------------------------------
# population.growth <- "lambda.slight.increase"
# ff.shift <- ff+0.5 #Increase fecundity to slightly increase population growth - only works for annual breeding
#------------------------------Slight decrease------------------------------
# population.growth <- "lambda.slight.decrease"
# ff.shift <- ff-0.5 #Decrease fecundity to slightly decrease population growth - only works for annual breeding
#------------------------------Substantial decrease------------------------------
#population.growth <- "lambda.extreme"
#################### Sampling scheme ######################
#============================== target YOY ==============================
#sampling.scheme <- "target.YOY"
#============================== sample all juveniles ==============================
#sampling.scheme <- "sample.all.juvenile.ages"
#============================== sample all ages ==============================
sampling.scheme <- "sample.ALL.ages"
#-------------------Set date and load seeds----------------------------
today <- format(Sys.Date(), "%d%b%Y") # Store date for use in file name
date.of.simulation <- today
rseeds <- readRDS("rseeds_2022.04.15.rda")
seeds <- "Seeds2022.04.15"
# create some seeds:
#rseeds <- trunc(1529375630*runif(1000, min=0, max=1 ) )
#rseeds[1] <- 746160703
#----------------------- DATA-GENERATING MODEL --------------------
# Note on sequencing: Birthdays happen at beginning of each year, followed by mating, then death (i.e. a female who dies in year 10 can still give birth and have surviving pups in year 10)
#Stable age distribution
props <- rep(NA, max.age+1)
props[1] <- f
props[2] <- f * YOY.survival
for (y in 3:(repro.age+1)) props[y] <- props[y-1] * juvenile.survival #+1 because of age 0 individuals
for (y in (repro.age+2):(max.age+1)) props[y] <- props[y-1] * Adult.survival #+2 because of age 0 individuals
prop.Adult <- sum(props[(repro.age+1):(max.age+1)])/sum(props)
Nages <- round(props[-1] * init.adult.pop.size)
init.pop.size <- sum(Nages) # all ages except YOYs
#Set length of simulation and estimation year
burn.in <- 40 # number of years to use as simulation burn in period
Num.years <- 50 # The number of years to run in the simulation beyond the burn in
n_yrs <- burn.in + Num.years #Total number of simulation years
estimation.year <- n_yrs - 5 # Set year of estimation for truth calculations
#--------------------- Sampling parameters ---------------------
sample.years <- c(n_yrs - c(3:0)) #For four years of sampling
sample.vec.prop <- c(.5, 1, 1.5, 2)
####-------------- Prep simulation ----------------------
# Moved sampling below so extract different sample sizes from same population
iterations <- 2 # 1 just to look at output 500 #Number of iterations to loop over
# Initialize arrays for saving results
results <- NULL
sims.list.1 <- NULL
sims.list.2 <- NULL
sims.list.3 <- NULL
sims.list.4 <- NULL
sample.info <- NULL
parents.tibble_all <- NULL
pop.size.tibble_all <- NULL
mom.comps.tibble <- NULL
dad.comps.tibble <- NULL
truth.all <- NULL
sim.samples.1 <- paste0(sample.vec.prop[1], "prop.sampled")
sim.samples.2 <- paste0(sample.vec.prop[2], "prop.sampled")
sim.samples.3 <- paste0(sample.vec.prop[3], "prop.sampled")
sim.samples.4 <- paste0(sample.vec.prop[4], "prop.sampled")
#
#---------------------Initialize array from previous checkpoint--------------------------
#Results
# results <- read_csv(paste0(results_location, results_prefix, "_", date.of.simulation, "_", seeds, "_", purpose, "_iter_", iter, ".csv"))
# #
# # #Model output for diagnostics
# sims.list.1 <- readRDS(file = paste0(temp_location, MCMC_prefix, "_", date.of.simulation, "_", seeds, "_", sim.samples.1, "_", MCMC.settings, "_", purpose))
# #
# sims.list.2 <- readRDS(file = paste0(temp_location, MCMC_prefix, "_", date.of.simulation, "_", seeds, "_", sim.samples.2, "_", MCMC.settings, "_", purpose))
# #
# sims.list.3 <- readRDS(file = paste0(temp_location, MCMC_prefix, "_", date.of.simulation, "_", seeds, "_", sim.samples.3, "_", MCMC.settings, "_", purpose))
#
# # Detailed info on samples and parents to examine in more detail
# sample.info <- readRDS(file = paste0(temp_location, sample_prefix, "_", date.of.simulation, "_", seeds, "_", purpose))
####-------------- Start simulation loop ----------------------
for(iter in 1:iterations) {
if(population.growth == "lambda.extreme"){
juvenile.survival <- 0.8 # Reset at the beginning of each simulation
Adult.survival <- 0.825 # Reset at the beginning of each simulation
(Ma <- -log(Adult.survival)) #Mortality of adults
(Mj <- -log(juvenile.survival)) #Mortality of juveniles
(Sa <- exp(-Ma)) #Survival of adults
(Sj <- exp(-Mj)) #Survival of juveniles
set.seed(rseeds[iter])
Mx <- runif(1, min = 0.05, max = 0.1) #Extra mortality
(Sa <- exp(-Ma - Mx)) #Survival of adults
(Sj <- exp(-Mj - Mx)) #Survival of juveniles
}
sim.start <- Sys.time()
rseed <- rseeds[iter]
set.seed(rseed)
#Run individual based simulation
out <- simulate.pop(init.pop.size = init.pop.size,
init.prop.female = init.prop.female,
Nages = Nages,
mating.periodicity = mating.periodicity,
repro.age = repro.age,
YOY.survival = YOY.survival,
juvenile.survival = juvenile.survival,
Adult.survival = Adult.survival,
max.age = max.age,
num.mates = num.mates,
ff = ff,
burn.in = burn.in,
Num.years = Num.years)
#Save simulation output as objects
loopy.list <- out[[1]] #List of dataframes for each year of simulation
pop.size.tibble <- out[[2]] %>% #population parameters for each year of simulation
as_tibble() %>%
mutate(seed = rseed, iteration = iter)
parents.tibble <- out[[3]] %>%
dplyr::filter(year >= 50) %>% #Tibble for each parent for each year to check the distribution later
mutate(seed = rseed, iteration = iter)
#organize results and calculate summary statistics from the simulation
source("./01_Data.generating.model/functions/query_results_PopSim.R")
#-----------------------Collect samples-------------------------
#Loop over sample sizes stored in sample.vec
for(samps in 1:length(sample.vec.prop)){
sample.prop <- sample.vec.prop[samps]
#Initialize sample dataframes
sample.df_all.info <- NULL
sample.df_temp <- NULL
#Sample population each year in sample.years and make dataframe of samples with all metadata
set.seed(rseed)
for(i in sample.years){
#Extract the relevant row from the pop size dataframe
pop.size.yr <- pop.size.tibble %>% dplyr::filter(year == i)
sample.size <- pop.size.yr %>%
mutate(sample.size = round(population_size*(sample.prop/100), 0)) %>%
pull(sample.size)
if(sampling.scheme == "target.YOY"){ #If targeting YOY for juvenile samples
#Set number of samples to a specific proportion of the population
#Sample YOY only for half-sib analysis
sample.df_temp <- loopy.list[[i]] %>% mutate(capture.year = i) %>%
dplyr::filter(age.x == 0) %>%
dplyr::slice_sample(n = sample.size) # #Sample each year WITHOUT replacement
} else if(sampling.scheme == "sample.all.juvenile.ages"){ #If sampling juveniles
sample.df_temp <- loopy.list[[i]] %>% dplyr::filter(age.x < repro.age & age.x > 0) %>%
mutate(capture.year = i) %>%
dplyr::slice_sample(n = sample.size) #Sample each year WITHOUT replacement (doesn't affect cross-year sampling since it's in a loop)
} else if(sampling.scheme == "sample.ALL.ages"){
sample.df_temp <- loopy.list[[i]] %>% mutate(capture.year = i) %>%
dplyr::slice_sample(n = sample.size) #Sample each year WITHOUT replacement (doesn't affect cross-year sampling since it's in a loop)
}
#Combine samples from all years
sample.df_all.info <- rbind(sample.df_all.info, sample.df_temp)
}
sampled.mothers <- unique(sample.df_all.info$mother.x)
sampled.fathers <- unique(sample.df_all.info$father.x)
#Compile results and summary statistics from simulation to compare estimates
source("./01_Data.generating.model/functions/PopSim_truth.R")
#Save info for samples to examine in more detail
sample.df_all.info <- sample.df_all.info %>%
mutate(sample.size.yr = sample.size,
sampling.scheme = sampling.scheme,
iteration = iter,
seed = rseed,
sample.prop = sample.prop) %>%
mutate(sample.size.total = sample.size.yr * length(sample.years))
sample.info <- rbind(sample.info, sample.df_all.info) %>%
as_tibble()
} # end loop over sample sizes
#-----------------Save output files iteratively--------------------
sim.samples.1 <- paste0(sample.vec.prop[1], "prop.sampled")
sim.samples.2 <- paste0(sample.vec.prop[2], "prop.sampled")
sim.samples.3 <- paste0(sample.vec.prop[3], "prop.sampled")
sim.samples.4 <- paste0(sample.vec.prop[4], "prop.sampled")
#Save parents tibble
parents.tibble_all <- bind_rows(parents.tibble_all, parents.tibble)
# saveRDS(parents.tibble_all, file = paste0(temp_location, parents_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter))
# Detailed info on population size
pop.size.tibble_all <- bind_rows(pop.size.tibble_all, pop.size.tibble)
# saveRDS(pop.size.tibble_all, file = paste0(temp_location, pop.size.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter))
#True values
truth.all <- bind_rows(truth.all, true.values)
# saveRDS(truth.all, file = paste0(temp_location, truth.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter))
# Detailed info on samples and parents to examine in more detail
# saveRDS(sample.info, file = paste0(temp_location, sample_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter, "_", sampling.scheme))
sim.end <- Sys.time()
iter.time <- round(as.numeric(difftime(sim.end, sim.start, units = "mins")), 1)
cat(paste0("Finished iteration ", iter, ". \n Took ", iter.time, " minutes"))
} # end loop over iterations
#-----------------------------Save major output files---------------------------------------------
#Save detailed info about samples from population
saveRDS(sample.info, file = paste0(output.location, sample_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme))
#Save parents tibble
saveRDS(parents.tibble_all, file = paste0(output.location, parents_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme))
# Detailed info on population size
saveRDS(pop.size.tibble_all, file = paste0(output.location, pop.size.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme))
# Truth
saveRDS(truth.all, file = paste0(output.location, truth.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme)) | /01_Data.generating.model/ForCluster/z_old/population_simulation_SB_psi1_sample.ALL.ages.R | no_license | JDSwenson/LemonSharkCKMR | R | false | false | 15,742 | r | #Load packages
library(tidyverse) # safe to ignore conflicts with filter() and lag()
library(MASS)
library(popbio)
#library(mpmtools) # not at CRAN;
# install.packages("devtools")
#devtools::install_github("BruceKendall/mpmtools")
library(ggpubr)
# The rjags package is just an interface to the JAGS library
# Make sure you have installed JAGS-4.x.y.exe (for any x >=0, y>=0) from
# http://www.sourceforge.net/projects/mcmc-jags/files
# library(rjags)
# library(R2jags)
# library(jagsUI)
library(Rlab)
library(runjags)
library(postpack)
library(coda)
rm(list=ls())
#source("functions/Dovi_IBS_SB_test.assign.conformity_mat12.R") #All biennial breeders reproduce for the first time at age 12
source("./01_Data.generating.model/functions/Dovi_IBS_SB_test.assign.conformity_mat12OR13_liz.R") #Half of biennial breeders reproduce for the first time at age 12; the other half at age 13.
#################### Set output file locations and labels ####################
temp_location <- "output/"
output.location <- "output/"
parents_prefix <- "parents.breakdown"
sample_prefix <- "sample.info"
pop.size.prefix <- "pop.size"
truth.prefix <- "truth"
#################### Simulation parameters ####################
init.adult.pop.size <- 1000 # CHANGED FROM 3000; Initial adult population size
init.prop.female <- 0.5 # proportion of the initial population size that is female
birth.sex.ratio <- c(0.5,0.5) # The probability that each baby is F:M - has to add up to 1
YOY.survival <- 0.7 # CHANGED FROM 0.8; young of year survival
juvenile.survival <- 0.8 # CHANGED FROM 0.9; juvenile survival
Adult.survival <- 0.825 # CHANGED FROM 0.825; Adult survival
repro.age <- 12 # set age of reproductive maturity
max.age <- maxAge <- 50 #set the maximum age allowed in the simulation
num.mates <- c(1:3) #1 #c(1:3) #vector of potential number of mates per mating
f <- (1-Adult.survival)/(YOY.survival * juvenile.survival^11) # adult fecundity at equilibrium if no age truncation
#################### Breeding schedule ######################
#------------------------------ Annual ------------------------------
# breeding.schedule <- "annual.breeding"
# mating.periodicity <- 1 #number of years between mating; assigned to an individual and sticks with them through their life. So they're either a one or two year breeder.
# non.conformists <- 0
#------------------------------ Biennial ------------------------------
mating.periodicity <- 2 #number of years between mating; assigned to an individual and sticks with them through their life. So they're either a one or two year breeder.
#============================== psi 1 ==============================
breeding.schedule <- "biennial.breeding_psi1"
non.conformists <- 0
#============================== psi 0.90 ==============================
# breeding.schedule <- "biennial.breeding_psi0.90"
# non.conformists <- 0.10 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists
#============================== psi 0.75 ==============================
#breeding.schedule <- "biennial.breeding_psi0.75"
#non.conformists <- 0.25 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists
#============================== psi 0.50 ==============================
#breeding.schedule <- "biennial.breeding_psi0.50"
#non.conformists <- 0.50 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists
# Adjust fecundity ==============================================================
## for effective number of breeders each year, mating cycle, number of mates ====
#(from liz) ====
psi <- 1-non.conformists
ff <- mating.periodicity/(mating.periodicity-psi*mating.periodicity+psi)*f/init.prop.female/mean(num.mates)
ff
# ====
#################### Population growth ####################
#------------------------------Stable------------------------------
population.growth <- "lambda.1"
#------------------------------Slight increase------------------------------
# population.growth <- "lambda.slight.increase"
# ff.shift <- ff+0.5 #Increase fecundity to slightly increase population growth - only works for annual breeding
#------------------------------Slight decrease------------------------------
# population.growth <- "lambda.slight.decrease"
# ff.shift <- ff-0.5 #Decrease fecundity to slightly decrease population growth - only works for annual breeding
#------------------------------Substantial decrease------------------------------
#population.growth <- "lambda.extreme"
#################### Sampling scheme ######################
#============================== target YOY ==============================
#sampling.scheme <- "target.YOY"
#============================== sample all juveniles ==============================
#sampling.scheme <- "sample.all.juvenile.ages"
#============================== sample all ages ==============================
sampling.scheme <- "sample.ALL.ages"
#-------------------Set date and load seeds----------------------------
today <- format(Sys.Date(), "%d%b%Y") # Store date for use in file name
date.of.simulation <- today
rseeds <- readRDS("rseeds_2022.04.15.rda")
seeds <- "Seeds2022.04.15"
# create some seeds:
#rseeds <- trunc(1529375630*runif(1000, min=0, max=1 ) )
#rseeds[1] <- 746160703
#----------------------- DATA-GENERATING MODEL --------------------
# Note on sequencing: Birthdays happen at beginning of each year, followed by mating, then death (i.e. a female who dies in year 10 can still give birth and have surviving pups in year 10)
#Stable age distribution
props <- rep(NA, max.age+1)
props[1] <- f
props[2] <- f * YOY.survival
for (y in 3:(repro.age+1)) props[y] <- props[y-1] * juvenile.survival #+1 because of age 0 individuals
for (y in (repro.age+2):(max.age+1)) props[y] <- props[y-1] * Adult.survival #+2 because of age 0 individuals
prop.Adult <- sum(props[(repro.age+1):(max.age+1)])/sum(props)
Nages <- round(props[-1] * init.adult.pop.size)
init.pop.size <- sum(Nages) # all ages except YOYs
#Set length of simulation and estimation year
burn.in <- 40 # number of years to use as simulation burn in period
Num.years <- 50 # The number of years to run in the simulation beyond the burn in
n_yrs <- burn.in + Num.years #Total number of simulation years
estimation.year <- n_yrs - 5 # Set year of estimation for truth calculations
#--------------------- Sampling parameters ---------------------
sample.years <- c(n_yrs - c(3:0)) #For four years of sampling
sample.vec.prop <- c(.5, 1, 1.5, 2)
####-------------- Prep simulation ----------------------
# Moved sampling below so extract different sample sizes from same population
iterations <- 2 # 1 just to look at output 500 #Number of iterations to loop over
# Initialize arrays for saving results
results <- NULL
sims.list.1 <- NULL
sims.list.2 <- NULL
sims.list.3 <- NULL
sims.list.4 <- NULL
sample.info <- NULL
parents.tibble_all <- NULL
pop.size.tibble_all <- NULL
mom.comps.tibble <- NULL
dad.comps.tibble <- NULL
truth.all <- NULL
sim.samples.1 <- paste0(sample.vec.prop[1], "prop.sampled")
sim.samples.2 <- paste0(sample.vec.prop[2], "prop.sampled")
sim.samples.3 <- paste0(sample.vec.prop[3], "prop.sampled")
sim.samples.4 <- paste0(sample.vec.prop[4], "prop.sampled")
#
#---------------------Initialize array from previous checkpoint--------------------------
#Results
# results <- read_csv(paste0(results_location, results_prefix, "_", date.of.simulation, "_", seeds, "_", purpose, "_iter_", iter, ".csv"))
# #
# # #Model output for diagnostics
# sims.list.1 <- readRDS(file = paste0(temp_location, MCMC_prefix, "_", date.of.simulation, "_", seeds, "_", sim.samples.1, "_", MCMC.settings, "_", purpose))
# #
# sims.list.2 <- readRDS(file = paste0(temp_location, MCMC_prefix, "_", date.of.simulation, "_", seeds, "_", sim.samples.2, "_", MCMC.settings, "_", purpose))
# #
# sims.list.3 <- readRDS(file = paste0(temp_location, MCMC_prefix, "_", date.of.simulation, "_", seeds, "_", sim.samples.3, "_", MCMC.settings, "_", purpose))
#
# # Detailed info on samples and parents to examine in more detail
# sample.info <- readRDS(file = paste0(temp_location, sample_prefix, "_", date.of.simulation, "_", seeds, "_", purpose))
####-------------- Start simulation loop ----------------------
for(iter in 1:iterations) {
if(population.growth == "lambda.extreme"){
juvenile.survival <- 0.8 # Reset at the beginning of each simulation
Adult.survival <- 0.825 # Reset at the beginning of each simulation
(Ma <- -log(Adult.survival)) #Mortality of adults
(Mj <- -log(juvenile.survival)) #Mortality of juveniles
(Sa <- exp(-Ma)) #Survival of adults
(Sj <- exp(-Mj)) #Survival of juveniles
set.seed(rseeds[iter])
Mx <- runif(1, min = 0.05, max = 0.1) #Extra mortality
(Sa <- exp(-Ma - Mx)) #Survival of adults
(Sj <- exp(-Mj - Mx)) #Survival of juveniles
}
sim.start <- Sys.time()
rseed <- rseeds[iter]
set.seed(rseed)
#Run individual based simulation
out <- simulate.pop(init.pop.size = init.pop.size,
init.prop.female = init.prop.female,
Nages = Nages,
mating.periodicity = mating.periodicity,
repro.age = repro.age,
YOY.survival = YOY.survival,
juvenile.survival = juvenile.survival,
Adult.survival = Adult.survival,
max.age = max.age,
num.mates = num.mates,
ff = ff,
burn.in = burn.in,
Num.years = Num.years)
#Save simulation output as objects
loopy.list <- out[[1]] #List of dataframes for each year of simulation
pop.size.tibble <- out[[2]] %>% #population parameters for each year of simulation
as_tibble() %>%
mutate(seed = rseed, iteration = iter)
parents.tibble <- out[[3]] %>%
dplyr::filter(year >= 50) %>% #Tibble for each parent for each year to check the distribution later
mutate(seed = rseed, iteration = iter)
#organize results and calculate summary statistics from the simulation
source("./01_Data.generating.model/functions/query_results_PopSim.R")
#-----------------------Collect samples-------------------------
#Loop over sample sizes stored in sample.vec
for(samps in 1:length(sample.vec.prop)){
sample.prop <- sample.vec.prop[samps]
#Initialize sample dataframes
sample.df_all.info <- NULL
sample.df_temp <- NULL
#Sample population each year in sample.years and make dataframe of samples with all metadata
set.seed(rseed)
for(i in sample.years){
#Extract the relevant row from the pop size dataframe
pop.size.yr <- pop.size.tibble %>% dplyr::filter(year == i)
sample.size <- pop.size.yr %>%
mutate(sample.size = round(population_size*(sample.prop/100), 0)) %>%
pull(sample.size)
if(sampling.scheme == "target.YOY"){ #If targeting YOY for juvenile samples
#Set number of samples to a specific proportion of the population
#Sample YOY only for half-sib analysis
sample.df_temp <- loopy.list[[i]] %>% mutate(capture.year = i) %>%
dplyr::filter(age.x == 0) %>%
dplyr::slice_sample(n = sample.size) # #Sample each year WITHOUT replacement
} else if(sampling.scheme == "sample.all.juvenile.ages"){ #If sampling juveniles
sample.df_temp <- loopy.list[[i]] %>% dplyr::filter(age.x < repro.age & age.x > 0) %>%
mutate(capture.year = i) %>%
dplyr::slice_sample(n = sample.size) #Sample each year WITHOUT replacement (doesn't affect cross-year sampling since it's in a loop)
} else if(sampling.scheme == "sample.ALL.ages"){
sample.df_temp <- loopy.list[[i]] %>% mutate(capture.year = i) %>%
dplyr::slice_sample(n = sample.size) #Sample each year WITHOUT replacement (doesn't affect cross-year sampling since it's in a loop)
}
#Combine samples from all years
sample.df_all.info <- rbind(sample.df_all.info, sample.df_temp)
}
sampled.mothers <- unique(sample.df_all.info$mother.x)
sampled.fathers <- unique(sample.df_all.info$father.x)
#Compile results and summary statistics from simulation to compare estimates
source("./01_Data.generating.model/functions/PopSim_truth.R")
#Save info for samples to examine in more detail
sample.df_all.info <- sample.df_all.info %>%
mutate(sample.size.yr = sample.size,
sampling.scheme = sampling.scheme,
iteration = iter,
seed = rseed,
sample.prop = sample.prop) %>%
mutate(sample.size.total = sample.size.yr * length(sample.years))
sample.info <- rbind(sample.info, sample.df_all.info) %>%
as_tibble()
} # end loop over sample sizes
#-----------------Save output files iteratively--------------------
sim.samples.1 <- paste0(sample.vec.prop[1], "prop.sampled")
sim.samples.2 <- paste0(sample.vec.prop[2], "prop.sampled")
sim.samples.3 <- paste0(sample.vec.prop[3], "prop.sampled")
sim.samples.4 <- paste0(sample.vec.prop[4], "prop.sampled")
#Save parents tibble
parents.tibble_all <- bind_rows(parents.tibble_all, parents.tibble)
# saveRDS(parents.tibble_all, file = paste0(temp_location, parents_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter))
# Detailed info on population size
pop.size.tibble_all <- bind_rows(pop.size.tibble_all, pop.size.tibble)
# saveRDS(pop.size.tibble_all, file = paste0(temp_location, pop.size.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter))
#True values
truth.all <- bind_rows(truth.all, true.values)
# saveRDS(truth.all, file = paste0(temp_location, truth.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter))
# Detailed info on samples and parents to examine in more detail
# saveRDS(sample.info, file = paste0(temp_location, sample_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_iter_", iter, "_", sampling.scheme))
sim.end <- Sys.time()
iter.time <- round(as.numeric(difftime(sim.end, sim.start, units = "mins")), 1)
cat(paste0("Finished iteration ", iter, ". \n Took ", iter.time, " minutes"))
} # end loop over iterations
#-----------------------------Save major output files---------------------------------------------
#Save detailed info about samples from population
saveRDS(sample.info, file = paste0(output.location, sample_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme))
#Save parents tibble
saveRDS(parents.tibble_all, file = paste0(output.location, parents_prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme))
# Detailed info on population size
saveRDS(pop.size.tibble_all, file = paste0(output.location, pop.size.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme))
# Truth
saveRDS(truth.all, file = paste0(output.location, truth.prefix, "_", date.of.simulation, "_", seeds, "_", population.growth, "_", breeding.schedule, "_", sampling.scheme)) |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.34142584663862e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615831144-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.34142584663862e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
# Inspired by https://github.com/allanino/DNA/blob/master/dna/
# Based on the technology described by Goldman et al (2013), Nature
# The terminology (S0, S1, S4, ...) is taken from the supplementary material (V2)
# https://www.ebi.ac.uk/sites/ebi.ac.uk/files/groups/goldman/file2features_2.0.pdf
# Important note: I will use a simplified version of the procedure.
# This means that the resulting DNA sequences can not be used for physical printing
# Main functions
## Convert S0 to S1
S0_to_S1 <- function(S0) {
# Convert text to bytes and bytes to 256 number rep
bytes <- bytes(as.character(S0)) %>%
strsplit(., " ") %>%
.[[1]] %>% # All steps above are parsing steps
strtoi(base = 16) # Convert to the numerical representation of bytes
# Convert bytes to base3 using the Huffman Dictionary
S1 <- tibble(bytes) %>%
left_join(huff) %>% # Find the corresponding base3 representation in the huffman dictionary
pull(base3) %>%
paste(collapse = '') # Parse
# Return output
return(S1)
}
## Convert S1 to S5 without the intermediate steps
S1_to_S5 <- function(S1) {
S5 <- DNAcode %>%
filter(prevBase == "A") %>%
.[,str_sub(S1, 1, 1) %>% as.character()] %>%
pull()
for (char in 2:nchar(S1)){
newBase <- DNAcode %>%
filter(prevBase == str_sub(S5, char - 1, char - 1)) %>%
.[,str_sub(S1, char, char) %>% as.character()] %>%
pull()
S5 <- str_c(S5, newBase, sep = "")
}
return(S5)
}
## Covert S5 to S1
S5_to_S1 <- function(S5) {
S1 <- DNAcode %>%
filter(prevBase == "A") %>%
.[ , which(. == str_sub(S5, 1, 1))] %>%
names()
for (char in 2:nchar(S5)){
newTrit <- DNAcode %>%
filter(prevBase == str_sub(S5, char - 1, char - 1)) %>%
.[ , which(. == str_sub(S5, char, char))] %>%
names()
S1 <- str_c(S1, newTrit, sep = "")
}
return(S1)
}
## Convert S1 to S0
S1_to_S0 <- function(S1) {
S0 <- ""
i <- 1
while (i < nchar(S1)) {
if (str_sub(S1, i, i+4) %in% huff$base3){
byte <- huff %>%
filter(base3 == str_sub(S1, i, i+4)) %>%
pull(bytes)
S0 <- rawToChar(as.raw(byte)) %>%
str_c(S0, ., sep = '')
i <- i + 5
} else {
byte <- huff %>%
filter(base3 == str_sub(S1, i, i+5)) %>%
pull(bytes)
S0 <- rawToChar(as.raw(byte)) %>%
str_c(S0, ., sep = '')
i <- i + 6
}
}
return(S0)
}
| /functions.R | permissive | swuyts/textToDNA | R | false | false | 2,472 | r | # Inspired by https://github.com/allanino/DNA/blob/master/dna/
# Based on the technology described by Goldman et al (2013), Nature
# The terminology (S0, S1, S4, ...) is taken from the supplementary material (V2)
# https://www.ebi.ac.uk/sites/ebi.ac.uk/files/groups/goldman/file2features_2.0.pdf
# Important note: I will use a simplified version of the procedure.
# This means that the resulting DNA sequences can not be used for physical printing
# Main functions
## Convert S0 to S1
S0_to_S1 <- function(S0) {
# Convert text to bytes and bytes to 256 number rep
bytes <- bytes(as.character(S0)) %>%
strsplit(., " ") %>%
.[[1]] %>% # All steps above are parsing steps
strtoi(base = 16) # Convert to the numerical representation of bytes
# Convert bytes to base3 using the Huffman Dictionary
S1 <- tibble(bytes) %>%
left_join(huff) %>% # Find the corresponding base3 representation in the huffman dictionary
pull(base3) %>%
paste(collapse = '') # Parse
# Return output
return(S1)
}
## Convert S1 to S5 without the intermediate steps
S1_to_S5 <- function(S1) {
S5 <- DNAcode %>%
filter(prevBase == "A") %>%
.[,str_sub(S1, 1, 1) %>% as.character()] %>%
pull()
for (char in 2:nchar(S1)){
newBase <- DNAcode %>%
filter(prevBase == str_sub(S5, char - 1, char - 1)) %>%
.[,str_sub(S1, char, char) %>% as.character()] %>%
pull()
S5 <- str_c(S5, newBase, sep = "")
}
return(S5)
}
## Covert S5 to S1
S5_to_S1 <- function(S5) {
S1 <- DNAcode %>%
filter(prevBase == "A") %>%
.[ , which(. == str_sub(S5, 1, 1))] %>%
names()
for (char in 2:nchar(S5)){
newTrit <- DNAcode %>%
filter(prevBase == str_sub(S5, char - 1, char - 1)) %>%
.[ , which(. == str_sub(S5, char, char))] %>%
names()
S1 <- str_c(S1, newTrit, sep = "")
}
return(S1)
}
## Convert S1 to S0
S1_to_S0 <- function(S1) {
S0 <- ""
i <- 1
while (i < nchar(S1)) {
if (str_sub(S1, i, i+4) %in% huff$base3){
byte <- huff %>%
filter(base3 == str_sub(S1, i, i+4)) %>%
pull(bytes)
S0 <- rawToChar(as.raw(byte)) %>%
str_c(S0, ., sep = '')
i <- i + 5
} else {
byte <- huff %>%
filter(base3 == str_sub(S1, i, i+5)) %>%
pull(bytes)
S0 <- rawToChar(as.raw(byte)) %>%
str_c(S0, ., sep = '')
i <- i + 6
}
}
return(S0)
}
|
set_panel_size <-
function(p = NULL, g = ggplot2::ggplotGrob(p), file = NULL,
margin = unit(1, "mm"), width = unit(4, "cm"),
height = unit(4, "cm")) {
panels <- grep("panel", g$layout$name)
panel_index_w <- unique(g$layout$l[panels])
panel_index_h <- unique(g$layout$t[panels])
nw <- length(panel_index_w)
nh <- length(panel_index_h)
g$widths[panel_index_w] <- rep(width, nw)
g$heights[panel_index_h] <- rep(height, nh)
if (!is.null(file)) {
ggplot2::ggsave(file, g, width = grid::convertWidth(sum(g$widths) +
margin, unitTo = "in", valueOnly = TRUE), height = grid::convertHeight(sum(g$heights) +
margin, unitTo = "in", valueOnly = TRUE))
}
g
}
| /R/set_panel_size.R | no_license | jamesjiadazhan/xmsPANDA | R | false | false | 804 | r | set_panel_size <-
function(p = NULL, g = ggplot2::ggplotGrob(p), file = NULL,
margin = unit(1, "mm"), width = unit(4, "cm"),
height = unit(4, "cm")) {
panels <- grep("panel", g$layout$name)
panel_index_w <- unique(g$layout$l[panels])
panel_index_h <- unique(g$layout$t[panels])
nw <- length(panel_index_w)
nh <- length(panel_index_h)
g$widths[panel_index_w] <- rep(width, nw)
g$heights[panel_index_h] <- rep(height, nh)
if (!is.null(file)) {
ggplot2::ggsave(file, g, width = grid::convertWidth(sum(g$widths) +
margin, unitTo = "in", valueOnly = TRUE), height = grid::convertHeight(sum(g$heights) +
margin, unitTo = "in", valueOnly = TRUE))
}
g
}
|
\name{AffineTransformation-class}
\Rdversion{1.1}
\docType{class}
\alias{AffineTransformation-class}
\title{Class "AffineTransformation"}
\description{Class to define affine transformations to be applied on sp objects.
Affine transformations can rotate, shift, scale (even applying different
factors on each axis) and skew geometries.}
\section{Objects from the Class}{
Objects can be created by calls to the function \link{AffineTransformation}.
}
\section{Slots}{
\describe{
\item{\code{controlPoints}:}{Object of class \code{"data.frame"} containing
control point coordinates arranged in four (4) columns: X source,
Y source, X target, Y target. }
\item{\code{parameters}:}{Object of class \code{"numeric"}. A vector of six
(6) parameters for representing the transformation, namely: a, b, c, d,
e and f, where \cr
x' = ax + by + c \cr
y' = dx + ey + f }
\item{\code{residuals}:}{Object of class \code{"matrix"}. Only set if Least
Squares is applied, i.e., if more than three (3) control points were
provided. Residuals are the difference between transformed source
coordinates and target coordinates of control points. }
\item{\code{rmse}:}{Object of class \code{"numericOrNULL"}. Only set if
Least Squares is applied, i.e., if more than three (3) control points
were provided. Root Mean Square Error, useful when comparing two
transformations. It measures the general deviation of transformed source
coordinates with respect to target coordinates of control points. It has
the same units as the coordinates, usually meters.}
}
}
\section{Extends}{
Class \code{"\linkS4class{Cartesian2DCoordinateTransformation}"}, directly.
}
\section{Methods}{
\describe{
\item{calculateParameters}{\code{signature(object = "AffineTransformation")}:\cr
Calculate transformation parameters from control points.}
}
}
\references{
Iliffe, J. and Lott, R. Datums and map projections: For remote sensing,
GIS and surveying. Section 4.5.4. pp.115-117, 2008.
}
\author{German Carrillo}
\seealso{\code{\link{AffineTransformation}}}
\examples{
showClass("AffineTransformation")
}
\keyword{classes}
\keyword{spatial}
| /man/AffineTransformation-class.Rd | no_license | gacarrillor/vec2dtransf | R | false | false | 2,280 | rd | \name{AffineTransformation-class}
\Rdversion{1.1}
\docType{class}
\alias{AffineTransformation-class}
\title{Class "AffineTransformation"}
\description{Class to define affine transformations to be applied on sp objects.
Affine transformations can rotate, shift, scale (even applying different
factors on each axis) and skew geometries.}
\section{Objects from the Class}{
Objects can be created by calls to the function \link{AffineTransformation}.
}
\section{Slots}{
\describe{
\item{\code{controlPoints}:}{Object of class \code{"data.frame"} containing
control point coordinates arranged in four (4) columns: X source,
Y source, X target, Y target. }
\item{\code{parameters}:}{Object of class \code{"numeric"}. A vector of six
(6) parameters for representing the transformation, namely: a, b, c, d,
e and f, where \cr
x' = ax + by + c \cr
y' = dx + ey + f }
\item{\code{residuals}:}{Object of class \code{"matrix"}. Only set if Least
Squares is applied, i.e., if more than three (3) control points were
provided. Residuals are the difference between transformed source
coordinates and target coordinates of control points. }
\item{\code{rmse}:}{Object of class \code{"numericOrNULL"}. Only set if
Least Squares is applied, i.e., if more than three (3) control points
were provided. Root Mean Square Error, useful when comparing two
transformations. It measures the general deviation of transformed source
coordinates with respect to target coordinates of control points. It has
the same units as the coordinates, usually meters.}
}
}
\section{Extends}{
Class \code{"\linkS4class{Cartesian2DCoordinateTransformation}"}, directly.
}
\section{Methods}{
\describe{
\item{calculateParameters}{\code{signature(object = "AffineTransformation")}:\cr
Calculate transformation parameters from control points.}
}
}
\references{
Iliffe, J. and Lott, R. Datums and map projections: For remote sensing,
GIS and surveying. Section 4.5.4. pp.115-117, 2008.
}
\author{German Carrillo}
\seealso{\code{\link{AffineTransformation}}}
\examples{
showClass("AffineTransformation")
}
\keyword{classes}
\keyword{spatial}
|
###
### So Young Park
### SODA 501
### Final Project
###
### Created 4/10/18 for gravity model analysis
###
library(readr)
library(stringr)
library(tidyverse)
library(stringr)
library(xtable)
library(splitstackshape)
library(gravity)
#first load population and income data
#population
population <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/population2016.csv")
head(population) # population is city level 2015 Estimate value
#unlike our Twitter edgelist data, the State names are not abbreviated. Let's change it.
colnames(population)[1] <- "city"
colnames(population)[2] <- "name"
colnames(population)[3] <- "population"
abbrev <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/state_abbrev.csv")
head(abbrev)
population <- full_join(population, abbrev, by = "name") #merge
population$name <- NULL
head(population)
population$city <- str_sub(population$city, 1, str_length(population$city)-5) #remove " city", " town"
population <- population[-c(1),] #remove Alabama
#income
income <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/medianhouseholdincome.csv")
head(income)
income[, 1:3] <- NULL
colnames(income)[1] <- "county"
colnames(income)[2] <- "income"
#income is county level 2015 median household income.
#seperate county and state
income <- cSplit(income, "county", "(") #divide into two columns
income$county_2 <- str_sub(income$county_2, 1, str_length(income$county_2)-1) #remove ")"
colnames(income)[2] <- "county"
colnames(income)[3] <- "state"
income$county <- str_sub(income$county, 1, str_length(income$county)-7) #remove " county"
income <- income[-c(1,2),] #remove Alabama and US
#need to match city and county to add income.
city2county <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/zip_codes_states.csv")
head(city2county)
income <- full_join(income, city2county, by = c("county" , "state"), all=TRUE) #merge
head(income)
#merge income and population
incomePop <- full_join(income, population, by = c("city" , "state"))
head(incomePop)
#Match with edgelist
load("~/Box Sync/2018 Spring/SoDA 501/FinalProject/agg_elist.RData")
edges <- agg_elist
edges <- cSplit(edges, "origin", ",")
edges <- cSplit(edges, "dest", ",")
head(edges)
colnames(edges)[1] <- "volume"
colnames(edges)[2] <- "distance"
colnames(edges)[3] <- "originC"
colnames(edges)[4] <- "stateC"
colnames(edges)[5] <- "destinC"
colnames(edges)[6] <- "stateD"
regressionData <- merge(incomePop, edges, by.x = c("city", "state"),
by.y = c("originC", "stateC"), all=TRUE)
regressionData1 <- filter(regressionData, volume!="NA")
regressionData1[,4:7] <- NULL
colnames(regressionData1)[1] <- "originC"
colnames(regressionData1)[2] <- "stateO"
colnames(regressionData1)[3] <- "incomeO"
colnames(regressionData1)[4] <- "populationO"
regressionData1 <- select(regressionData1, originC, stateO, incomeO, populationO, destinC, stateD)
head(regressionData1)
#add destination information
regressionData2 <- merge(incomePop, regressionData1, by.x = c("city", "state"),
by.y = c("destinC", "stateD"), all=TRUE)
head(regressionData2)
regressionData3 <- filter(regressionData2, originC!="NA")
regressionData3[,4:7] <- NULL
colnames(regressionData3)[1] <- "destinC"
colnames(regressionData3)[2] <- "stateD"
colnames(regressionData3)[3] <- "incomeD"
colnames(regressionData3)[4] <- "populationD"
regressionData3 <- filter(regressionData3, incomeD!="NA")
head(regressionData3)
#merge with origin data
regressionData4 <- filter(regressionData, volume!="NA")
colnames(regressionData4)[1] <- "originC"
regressionData4[,2:8] <- NULL
regressionData4[,5] <- NULL
head(regressionData4)
head(regressionData3)
regData3 <- distinct(regressionData3)
regData4 <- distinct(regressionData4)
regressionData <- full_join(regData3, regData4, by=c("destinC", "originC"))
regressionData <- filter(regressionData, volume!="NA")
head(regressionData)
glimpse(regressionData) #oops income has $ sign and commas
regressionData$incomeO2 = gsub("\\$|,", "", regressionData$incomeO)
regressionData$incomeD2 = gsub("\\$|,", "", regressionData$incomeD)
regressionData$incomeO <- as.numeric(regressionData$incomeO2)
regressionData$incomeD <- as.numeric(regressionData$incomeD2)
regressionData$incomeO2 <- NULL
regressionData$incomeD2 <- NULL
glimpse(regressionData)
save(regressionData, "Regression.RData")
#gravity model
regressionData$lincomeO <- log(regressionData$incomeO) #income and population need to be in the log form
regressionData$lincomeD <- log(regressionData$incomeD)
regressionData$lpopulationO <- log(regressionData$populationO)
regressionData$lpopulationD <- log(regressionData$populationD)
#PPML estimates gravity models in their multiplicative form via Poisson Pseudo Maximum Likelihood.
fit <- PPML(y="volume", dist="distance", x=c("populationO","populationD","incomeO","incomeD"),
vce_robust=TRUE, data=regressionData)
#okay many missing income and population data...
sum(is.na(regressionData$populationO))
sum(is.na(regressionData$incomeO))
sum(is.na(regressionData$populationD))
sum(is.na(regressionData$incomeD))
options(digits=4)
summary(fit) # display results
exp(coef(fit))
table <- xtable(fit)
print.xtable(table, type = "html", file = "table1.html", digits = 4)
regressionData$predict <- predict(fit) | /src/07_gravity_model.R | no_license | ckelling/Travel_Patterns_SODA_501 | R | false | false | 5,284 | r | ###
### So Young Park
### SODA 501
### Final Project
###
### Created 4/10/18 for gravity model analysis
###
library(readr)
library(stringr)
library(tidyverse)
library(stringr)
library(xtable)
library(splitstackshape)
library(gravity)
#first load population and income data
#population
population <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/population2016.csv")
head(population) # population is city level 2015 Estimate value
#unlike our Twitter edgelist data, the State names are not abbreviated. Let's change it.
colnames(population)[1] <- "city"
colnames(population)[2] <- "name"
colnames(population)[3] <- "population"
abbrev <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/state_abbrev.csv")
head(abbrev)
population <- full_join(population, abbrev, by = "name") #merge
population$name <- NULL
head(population)
population$city <- str_sub(population$city, 1, str_length(population$city)-5) #remove " city", " town"
population <- population[-c(1),] #remove Alabama
#income
income <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/medianhouseholdincome.csv")
head(income)
income[, 1:3] <- NULL
colnames(income)[1] <- "county"
colnames(income)[2] <- "income"
#income is county level 2015 median household income.
#seperate county and state
income <- cSplit(income, "county", "(") #divide into two columns
income$county_2 <- str_sub(income$county_2, 1, str_length(income$county_2)-1) #remove ")"
colnames(income)[2] <- "county"
colnames(income)[3] <- "state"
income$county <- str_sub(income$county, 1, str_length(income$county)-7) #remove " county"
income <- income[-c(1,2),] #remove Alabama and US
#need to match city and county to add income.
city2county <- read_csv("Box Sync/2018 Spring/SoDA 501/FinalProject/zip_codes_states.csv")
head(city2county)
income <- full_join(income, city2county, by = c("county" , "state"), all=TRUE) #merge
head(income)
#merge income and population
incomePop <- full_join(income, population, by = c("city" , "state"))
head(incomePop)
#Match with edgelist
load("~/Box Sync/2018 Spring/SoDA 501/FinalProject/agg_elist.RData")
edges <- agg_elist
edges <- cSplit(edges, "origin", ",")
edges <- cSplit(edges, "dest", ",")
head(edges)
colnames(edges)[1] <- "volume"
colnames(edges)[2] <- "distance"
colnames(edges)[3] <- "originC"
colnames(edges)[4] <- "stateC"
colnames(edges)[5] <- "destinC"
colnames(edges)[6] <- "stateD"
regressionData <- merge(incomePop, edges, by.x = c("city", "state"),
by.y = c("originC", "stateC"), all=TRUE)
regressionData1 <- filter(regressionData, volume!="NA")
regressionData1[,4:7] <- NULL
colnames(regressionData1)[1] <- "originC"
colnames(regressionData1)[2] <- "stateO"
colnames(regressionData1)[3] <- "incomeO"
colnames(regressionData1)[4] <- "populationO"
regressionData1 <- select(regressionData1, originC, stateO, incomeO, populationO, destinC, stateD)
head(regressionData1)
#add destination information
regressionData2 <- merge(incomePop, regressionData1, by.x = c("city", "state"),
by.y = c("destinC", "stateD"), all=TRUE)
head(regressionData2)
regressionData3 <- filter(regressionData2, originC!="NA")
regressionData3[,4:7] <- NULL
colnames(regressionData3)[1] <- "destinC"
colnames(regressionData3)[2] <- "stateD"
colnames(regressionData3)[3] <- "incomeD"
colnames(regressionData3)[4] <- "populationD"
regressionData3 <- filter(regressionData3, incomeD!="NA")
head(regressionData3)
#merge with origin data
regressionData4 <- filter(regressionData, volume!="NA")
colnames(regressionData4)[1] <- "originC"
regressionData4[,2:8] <- NULL
regressionData4[,5] <- NULL
head(regressionData4)
head(regressionData3)
regData3 <- distinct(regressionData3)
regData4 <- distinct(regressionData4)
regressionData <- full_join(regData3, regData4, by=c("destinC", "originC"))
regressionData <- filter(regressionData, volume!="NA")
head(regressionData)
glimpse(regressionData) #oops income has $ sign and commas
regressionData$incomeO2 = gsub("\\$|,", "", regressionData$incomeO)
regressionData$incomeD2 = gsub("\\$|,", "", regressionData$incomeD)
regressionData$incomeO <- as.numeric(regressionData$incomeO2)
regressionData$incomeD <- as.numeric(regressionData$incomeD2)
regressionData$incomeO2 <- NULL
regressionData$incomeD2 <- NULL
glimpse(regressionData)
save(regressionData, "Regression.RData")
#gravity model
regressionData$lincomeO <- log(regressionData$incomeO) #income and population need to be in the log form
regressionData$lincomeD <- log(regressionData$incomeD)
regressionData$lpopulationO <- log(regressionData$populationO)
regressionData$lpopulationD <- log(regressionData$populationD)
#PPML estimates gravity models in their multiplicative form via Poisson Pseudo Maximum Likelihood.
fit <- PPML(y="volume", dist="distance", x=c("populationO","populationD","incomeO","incomeD"),
vce_robust=TRUE, data=regressionData)
#okay many missing income and population data...
sum(is.na(regressionData$populationO))
sum(is.na(regressionData$incomeO))
sum(is.na(regressionData$populationD))
sum(is.na(regressionData$incomeD))
options(digits=4)
summary(fit) # display results
exp(coef(fit))
table <- xtable(fit)
print.xtable(table, type = "html", file = "table1.html", digits = 4)
regressionData$predict <- predict(fit) |
#' An new function to visualize and analyze the age of the first diagnosis given a list of disorders
#' @export ageFirstDiagnosis
ageFirstDiagnosis <- function( input, querypatients, diseaseList, sex, cutOff = 5, visualization = FALSE, fisherTest = TRUE, fisherCutOff = 0.001 ){
querypatients <- querypatients@qresult
input <- input[ input$PATIENT_NUM %in% querypatients$patient_id, ]
input <- input[, c("PATIENT_NUM","SEX_CD", "code", "BIRTH_DATE", "START_DATE")]
input <- input[ input$code %in% diseaseList, ]
input <- input[ ! duplicated( input ), ]
input$AGE <- as.numeric((as.Date(input$START_DATE) - as.Date(input$BIRTH_DATE)))%/%365
sortTable <- input[order(input$PATIENT_NUM,input$START_DATE,decreasing=FALSE),]
sortTable <- sortTable[! duplicated( sortTable$PATIENT_NUM), ]
if( length( sex ) == 1){
subset <- sortTable[ sortTable$SEX_CD == sex, ]
prevalenceData <- as.data.frame( table( subset$AGE ) )
prevalenceData$prevalence <- round((as.numeric( prevalenceData$Freq)/length(unique(subset$PATIENT_NUM)))*100, 2)
selection <- prevalenceData[ prevalenceData$prevalence >= cutOff, ]
if( visualization == TRUE){
ord <- as.factor( 0 : 18 )
selection$Var1 <- factor( selection$Var1, levels= as.factor( ord ) )
p <- ggplot2::ggplot ( selection, ggplot2::aes ( x = Var1, y = prevalence ), order = ord ) +
ggplot2::geom_bar ( stat = "identity", fill = "darkcyan" ) +
ggplot2::labs ( title = "Age when patients first diagnosed with ASD" , x = "Age", y = "prevalence (%)")
p <- p + ggplot2::theme_classic( ) +
ggplot2::theme( plot.margin = ggplot2::unit ( x = c ( 5, 15, 5, 15 ), units = "mm" ),
axis.line = ggplot2::element_line ( size = 1, color = "black" ), text = ggplot2::element_text ( size = 12 ) ,
axis.text.x = ggplot2::element_text ( angle = 45, size = 12, hjust = 1 ))
return( p )
}
return( selection )
}else{
inputA <- sortTable[ sortTable$SEX_CD == sex[1], ]
inputB <- sortTable[ sortTable$SEX_CD == sex[2], ]
inputAprevalence <- as.data.frame( table( inputA$AGE))
inputAprevalence$prevalence <- round( (as.numeric( inputAprevalence$Freq)/length(unique(inputA$PATIENT_NUM)))*100, 2)
selectionA <- inputAprevalence[ inputAprevalence$prevalence >= cutOff, ]
selectionA$variable <- sex[1]
inputBprevalence <- as.data.frame( table( inputB$AGE))
inputBprevalence$prevalence <- round((as.numeric( inputBprevalence$Freq)/length(unique(inputB$PATIENT_NUM)))*100,2)
selectionB <- inputBprevalence[ inputBprevalence$prevalence >= cutOff, ]
selectionB$variable <- sex[2]
selectionA <- selectionA[ selectionA$Var1 %in% selectionB$Var1, ]
selectionB <- selectionB[ selectionB$Var1 %in% selectionA$Var1, ]
finalDataSet <- rbind( selectionA, selectionB)
finalDataSet$variable <- as.factor( finalDataSet$variable )
if( fisherTest == TRUE){
finalDataSet$fisher <- NA
for( i in 1:nrow( finalDataSet ) ){
selection <- finalDataSet[ finalDataSet$Var1 %in% finalDataSet$Var1[ i ], ]
subsetA <- selection[ selection$variable == sex[1], "Freq"]
subsetB <- selection[ selection$variable == sex[2], "Freq"]
statistic <- matrix(c( subsetA, length(unique(inputA$PATIENT_NUM))-subsetA, subsetB, length(unique(inputB$PATIENT_NUM))-subsetB), nrow=2 )
finalDataSet$fisher[i] <- fisher.test(statistic)$p.value
}
finalDataSet <- finalDataSet[ finalDataSet$fisher <= fisherCutOff, ]
}
if( visualization == TRUE){
ord <- as.factor( 0 : 18 )
finalDataSet$Var1 <- factor( finalDataSet$Var1, levels= as.factor( ord ) )
p <- ggplot2::ggplot(finalDataSet, ggplot2::aes(Var1, prevalence), order = ord ) +
ggplot2::geom_bar(ggplot2::aes(fill = variable),
position = "dodge",
stat="identity",
colour = "black")
p <- p + ggplot2::scale_fill_manual(values=c("darkblue", "orange"))
p <- p + ggplot2::theme_classic( ) + ggplot2::theme( plot.margin = ggplot2::unit ( x = c ( 5, 15, 5, 15 ), units = "mm" ),
axis.line = ggplot2::element_line ( size = 0.7, color = "black" ), text = ggplot2::element_text ( size = 14 ) ,
axis.text.x = ggplot2::element_text ( angle = 45, size = 10, hjust = 1 ))
return ( p )
}
return ( finalDataSet)
}
}
| /R/ageFirstDiagnosisEstimation.R | permissive | aGutierrezSacristan/comorbidityAdaptedFunctions | R | false | false | 4,562 | r | #' An new function to visualize and analyze the age of the first diagnosis given a list of disorders
#' @export ageFirstDiagnosis
ageFirstDiagnosis <- function( input, querypatients, diseaseList, sex, cutOff = 5, visualization = FALSE, fisherTest = TRUE, fisherCutOff = 0.001 ){
querypatients <- querypatients@qresult
input <- input[ input$PATIENT_NUM %in% querypatients$patient_id, ]
input <- input[, c("PATIENT_NUM","SEX_CD", "code", "BIRTH_DATE", "START_DATE")]
input <- input[ input$code %in% diseaseList, ]
input <- input[ ! duplicated( input ), ]
input$AGE <- as.numeric((as.Date(input$START_DATE) - as.Date(input$BIRTH_DATE)))%/%365
sortTable <- input[order(input$PATIENT_NUM,input$START_DATE,decreasing=FALSE),]
sortTable <- sortTable[! duplicated( sortTable$PATIENT_NUM), ]
if( length( sex ) == 1){
subset <- sortTable[ sortTable$SEX_CD == sex, ]
prevalenceData <- as.data.frame( table( subset$AGE ) )
prevalenceData$prevalence <- round((as.numeric( prevalenceData$Freq)/length(unique(subset$PATIENT_NUM)))*100, 2)
selection <- prevalenceData[ prevalenceData$prevalence >= cutOff, ]
if( visualization == TRUE){
ord <- as.factor( 0 : 18 )
selection$Var1 <- factor( selection$Var1, levels= as.factor( ord ) )
p <- ggplot2::ggplot ( selection, ggplot2::aes ( x = Var1, y = prevalence ), order = ord ) +
ggplot2::geom_bar ( stat = "identity", fill = "darkcyan" ) +
ggplot2::labs ( title = "Age when patients first diagnosed with ASD" , x = "Age", y = "prevalence (%)")
p <- p + ggplot2::theme_classic( ) +
ggplot2::theme( plot.margin = ggplot2::unit ( x = c ( 5, 15, 5, 15 ), units = "mm" ),
axis.line = ggplot2::element_line ( size = 1, color = "black" ), text = ggplot2::element_text ( size = 12 ) ,
axis.text.x = ggplot2::element_text ( angle = 45, size = 12, hjust = 1 ))
return( p )
}
return( selection )
}else{
inputA <- sortTable[ sortTable$SEX_CD == sex[1], ]
inputB <- sortTable[ sortTable$SEX_CD == sex[2], ]
inputAprevalence <- as.data.frame( table( inputA$AGE))
inputAprevalence$prevalence <- round( (as.numeric( inputAprevalence$Freq)/length(unique(inputA$PATIENT_NUM)))*100, 2)
selectionA <- inputAprevalence[ inputAprevalence$prevalence >= cutOff, ]
selectionA$variable <- sex[1]
inputBprevalence <- as.data.frame( table( inputB$AGE))
inputBprevalence$prevalence <- round((as.numeric( inputBprevalence$Freq)/length(unique(inputB$PATIENT_NUM)))*100,2)
selectionB <- inputBprevalence[ inputBprevalence$prevalence >= cutOff, ]
selectionB$variable <- sex[2]
selectionA <- selectionA[ selectionA$Var1 %in% selectionB$Var1, ]
selectionB <- selectionB[ selectionB$Var1 %in% selectionA$Var1, ]
finalDataSet <- rbind( selectionA, selectionB)
finalDataSet$variable <- as.factor( finalDataSet$variable )
if( fisherTest == TRUE){
finalDataSet$fisher <- NA
for( i in 1:nrow( finalDataSet ) ){
selection <- finalDataSet[ finalDataSet$Var1 %in% finalDataSet$Var1[ i ], ]
subsetA <- selection[ selection$variable == sex[1], "Freq"]
subsetB <- selection[ selection$variable == sex[2], "Freq"]
statistic <- matrix(c( subsetA, length(unique(inputA$PATIENT_NUM))-subsetA, subsetB, length(unique(inputB$PATIENT_NUM))-subsetB), nrow=2 )
finalDataSet$fisher[i] <- fisher.test(statistic)$p.value
}
finalDataSet <- finalDataSet[ finalDataSet$fisher <= fisherCutOff, ]
}
if( visualization == TRUE){
ord <- as.factor( 0 : 18 )
finalDataSet$Var1 <- factor( finalDataSet$Var1, levels= as.factor( ord ) )
p <- ggplot2::ggplot(finalDataSet, ggplot2::aes(Var1, prevalence), order = ord ) +
ggplot2::geom_bar(ggplot2::aes(fill = variable),
position = "dodge",
stat="identity",
colour = "black")
p <- p + ggplot2::scale_fill_manual(values=c("darkblue", "orange"))
p <- p + ggplot2::theme_classic( ) + ggplot2::theme( plot.margin = ggplot2::unit ( x = c ( 5, 15, 5, 15 ), units = "mm" ),
axis.line = ggplot2::element_line ( size = 0.7, color = "black" ), text = ggplot2::element_text ( size = 14 ) ,
axis.text.x = ggplot2::element_text ( angle = 45, size = 10, hjust = 1 ))
return ( p )
}
return ( finalDataSet)
}
}
|
###
# De la siguiente direcciรณn donde se muestran los sueldos para Data Scientists
#
# https://www.glassdoor.com.mx/Sueldos/data-scientist-sueldo-SRCH_KO0,14.htm
#
# realiza las siguientes acciones:
###
# 1. Extraer la tabla del HTML
library(rvest)
url <- "https://www.glassdoor.com.mx/Sueldos/data-scientist-sueldo-SRCH_KO0,14.htm"
file <- read_html(url)
tables <- html_nodes(file, "table")
table1 <- html_table(tables[1], fill = TRUE)
tabla <- na.omit(as.data.frame(table1))
tabla
# 2. Quitar los caracteres no necesarios de la columna sueldos (todo lo que no
# sea nรบmero), para dejar solamente la cantidad mensual (Hint: la funciรณn
# gsub podrรญa ser de utilidad)
class(tabla)
sueldos <- tabla$Sueldo
sueldos <- gsub('MXN','',sueldos)
sueldos <- gsub(',','',sueldos)
sueldos <- gsub('/mes','',sueldos)
sueldos <- gsub('\\$','',sueldos)
# 3. Asignar รฉsta columna como tipo numรฉrico para poder realizar operaciones
# con ella
sueldos <- as.numeric(sueldos)
# 4. Ahora podrรกs responder esta pregunta ยฟCuรกl es la empresa que mรกs paga y
# la que menos paga?
result <- cbind(tabla, sueldos)
result
# Sueldo mรกs alto
max.sueldo <- which.max(result$sueldos)
result[max.sueldo,]
# Sueldo mรกs bajo
min.sueldo <- which.min(result$sueldos)
result[min.sueldo,]
| /Reto2.R | no_license | EmilyMaPe/Reto_Sesion_7 | R | false | false | 1,294 | r | ###
# De la siguiente direcciรณn donde se muestran los sueldos para Data Scientists
#
# https://www.glassdoor.com.mx/Sueldos/data-scientist-sueldo-SRCH_KO0,14.htm
#
# realiza las siguientes acciones:
###
# 1. Extraer la tabla del HTML
library(rvest)
url <- "https://www.glassdoor.com.mx/Sueldos/data-scientist-sueldo-SRCH_KO0,14.htm"
file <- read_html(url)
tables <- html_nodes(file, "table")
table1 <- html_table(tables[1], fill = TRUE)
tabla <- na.omit(as.data.frame(table1))
tabla
# 2. Quitar los caracteres no necesarios de la columna sueldos (todo lo que no
# sea nรบmero), para dejar solamente la cantidad mensual (Hint: la funciรณn
# gsub podrรญa ser de utilidad)
class(tabla)
sueldos <- tabla$Sueldo
sueldos <- gsub('MXN','',sueldos)
sueldos <- gsub(',','',sueldos)
sueldos <- gsub('/mes','',sueldos)
sueldos <- gsub('\\$','',sueldos)
# 3. Asignar รฉsta columna como tipo numรฉrico para poder realizar operaciones
# con ella
sueldos <- as.numeric(sueldos)
# 4. Ahora podrรกs responder esta pregunta ยฟCuรกl es la empresa que mรกs paga y
# la que menos paga?
result <- cbind(tabla, sueldos)
result
# Sueldo mรกs alto
max.sueldo <- which.max(result$sueldos)
result[max.sueldo,]
# Sueldo mรกs bajo
min.sueldo <- which.min(result$sueldos)
result[min.sueldo,]
|
# lav_start.R: provide starting values for model parameters
#
# YR 30/11/2010: initial version
# YR 08/06/2011: add fabin3 start values for factor loadings
# YR 14 Jan 2014: moved to lav_start.R
# fill in the 'ustart' column in a User data.frame with reasonable
# starting values, using the sample data
lav_start <- function(start.method = "default",
lavpartable = NULL,
lavsamplestats = NULL,
model.type = "sem",
mimic = "lavaan",
debug = FALSE) {
# check arguments
stopifnot(is.list(lavpartable))
# categorical?
categorical <- any(lavpartable$op == "|")
#ord.names <- unique(lavpartable$lhs[ lavpartable$op == "|" ])
# shortcut for 'simple'
if(identical(start.method, "simple")) {
start <- numeric( length(lavpartable$ustart) )
start[ which(lavpartable$op == "=~") ] <- 1.0
start[ which(lavpartable$op == "~*~") ] <- 1.0
ov.names.ord <- vnames(lavpartable, "ov.ord")
var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs &
!(lavpartable$lhs %in% ov.names.ord))
start[var.idx] <- 1.0
user.idx <- which(!is.na(lavpartable$ustart))
start[user.idx] <- lavpartable$ustart[user.idx]
return(start)
}
# check start.method
if(mimic == "lavaan") {
start.initial <- "lavaan"
} else if(mimic == "Mplus") {
start.initial <- "mplus"
} else {
# FIXME: use LISREL/EQS/AMOS/.... schems
start.initial <- "lavaan"
}
start.user <- NULL
if(is.character(start.method)) {
start.method. <- tolower(start.method)
if(start.method. == "default") {
# nothing to do
} else if(start.method. %in% c("simple", "lavaan", "mplus")) {
start.initial <- start.method.
} else {
stop("lavaan ERROR: unknown value for start argument")
}
} else if(is.list(start.method)) {
start.user <- start.method
} else if(inherits(start.method, "lavaan")) {
start.user <- parTable(start.method)
}
# check model list elements, if provided
if(!is.null(start.user)) {
if(is.null(start.user$lhs) ||
is.null(start.user$op) ||
is.null(start.user$rhs)) {
stop("lavaan ERROR: problem with start argument: model list does not contain all elements: lhs/op/rhs")
}
if(!is.null(start.user$est)) {
# excellent, we got an est column; nothing to do
} else if(!is.null(start.user$start)) {
# no est column, but we use the start column
start.user$est <- start.user$start
} else if(!is.null(start.user$ustart)) {
# no ideal, but better than nothing
start.user$est <- start.user$ustart
} else {
stop("lavaan ERROR: problem with start argument: could not find est/start column in model list")
}
}
# global settings
# 0. everyting is zero
start <- numeric( length(lavpartable$ustart) )
# 1. =~ factor loadings:
if(categorical) {
# if std.lv=TRUE, more likely initial Sigma.hat is positive definite
# 0.8 is too large
start[ which(lavpartable$op == "=~") ] <- 0.7
} else {
start[ which(lavpartable$op == "=~") ] <- 1.0
}
# 2. residual lv variances for latent variables
lv.names <- vnames(lavpartable, "lv") # all groups
lv.var.idx <- which(lavpartable$op == "~~" &
lavpartable$lhs %in% lv.names &
lavpartable$lhs == lavpartable$rhs)
start[lv.var.idx] <- 0.05
# 3. latent response scales (if any)
delta.idx <- which(lavpartable$op == "~*~")
start[delta.idx] <- 1.0
# group-specific settings
ngroups <- lavsamplestats@ngroups
for(g in 1:ngroups) {
# info from user model for this group
if(categorical) {
ov.names <- vnames(lavpartable, "ov.nox", group=g)
ov.names.num <- vnames(lavpartable, "ov.num", group=g)
ov.names.ord <- vnames(lavpartable, "ov.ord", group=g)
} else {
ov.names.num <- ov.names <- vnames(lavpartable, "ov", group=g)
}
lv.names <- vnames(lavpartable, "lv", group=g)
ov.names.x <- vnames(lavpartable, "ov.x", group=g)
# g1) factor loadings
if(start.initial %in% c("lavaan", "mplus") &&
model.type %in% c("sem", "cfa") &&
#!categorical &&
sum( lavpartable$ustart[ lavpartable$op == "=~" & lavpartable$group == g],
na.rm=TRUE) == length(lv.names) ) {
# only if all latent variables have a reference item,
# we use the fabin3 estimator (2sls) of Hagglund (1982)
# per factor
# 9 Okt 2013: if only 2 indicators, we use the regression
# coefficient (y=marker, x=2nd indicator)
for(f in lv.names) {
free.idx <- which( lavpartable$lhs == f & lavpartable$op == "=~"
& lavpartable$group == g
& lavpartable$free > 0L)
user.idx <- which( lavpartable$lhs == f & lavpartable$op == "=~"
& lavpartable$group == g )
# no second order
if(any(lavpartable$rhs[user.idx] %in% lv.names)) next
# get observed indicators for this latent variable
ov.idx <- match(lavpartable$rhs[user.idx], ov.names)
if(length(ov.idx) > 2L && !any(is.na(ov.idx))) {
if(lavsamplestats@missing.flag) {
COV <- lavsamplestats@missing.h1[[g]]$sigma[ov.idx,ov.idx]
} else {
COV <- lavsamplestats@cov[[g]][ov.idx,ov.idx]
}
start[user.idx] <- fabin3.uni(COV)
} else if(length(free.idx) == 1L && length(ov.idx) == 2L) {
REG2 <- ( lavsamplestats@cov[[g]][ov.idx[1],ov.idx[2]] /
lavsamplestats@cov[[g]][ov.idx[1],ov.idx[1]] )
start[free.idx] <- REG2
}
# standardized?
var.f.idx <- which(lavpartable$lhs == f & lavpartable$op == "~~" &
lavpartable$rhs == f)
if(length(var.f.idx) > 0L &&
lavpartable$free[var.f.idx] == 0 &&
lavpartable$ustart[var.f.idx] == 1) {
# make sure factor loadings are between -0.7 and 0.7
x <- start[user.idx]
start[user.idx] <- (x / max(abs(x))) * 0.7
}
}
}
if(model.type == "unrestricted") {
# fill in 'covariances' from lavsamplestats
cov.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs != lavpartable$rhs)
lhs.idx <- match(lavpartable$lhs[cov.idx], ov.names)
rhs.idx <- match(lavpartable$rhs[cov.idx], ov.names)
start[cov.idx] <- lavsamplestats@cov[[g]][ cbind(lhs.idx, rhs.idx) ]
}
# 2g) residual ov variances (including exo, to be overriden)
ov.var.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs %in% ov.names.num &
lavpartable$lhs == lavpartable$rhs)
sample.var.idx <- match(lavpartable$lhs[ov.var.idx], ov.names)
if(model.type == "unrestricted") {
start[ov.var.idx] <- diag(lavsamplestats@cov[[g]])[sample.var.idx]
} else {
if(start.initial == "mplus") {
start[ov.var.idx] <-
(1.0 - 0.50)*lavsamplestats@var[[1L]][sample.var.idx]
} else {
# start[ov.var.idx] <-
# (1.0 - 0.50)*lavsamplestats@var[[g]][sample.var.idx]
start[ov.var.idx] <-
(1.0 - 0.50)*diag(lavsamplestats@cov[[g]])[sample.var.idx]
}
}
# variances of ordinal variables - set to 1.0
if(categorical) {
ov.var.ord.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs %in% ov.names.ord &
lavpartable$lhs == lavpartable$rhs)
start[ov.var.ord.idx] <- 1.0
}
# 3g) intercepts/means
ov.int.idx <- which(lavpartable$group == g &
lavpartable$op == "~1" &
lavpartable$lhs %in% ov.names)
sample.int.idx <- match(lavpartable$lhs[ov.int.idx], ov.names)
if(lavsamplestats@missing.flag) {
start[ov.int.idx] <- lavsamplestats@missing.h1[[g]]$mu[sample.int.idx]
} else {
start[ov.int.idx] <- lavsamplestats@mean[[g]][sample.int.idx]
}
# 4g) thresholds
th.idx <- which(lavpartable$group == g & lavpartable$op == "|")
if(length(th.idx) > 0L) {
th.names.lavpartable <- paste(lavpartable$lhs[th.idx], "|",
lavpartable$rhs[th.idx], sep="")
th.names.sample <-
lavsamplestats@th.names[[g]][ lavsamplestats@th.idx[[g]] > 0L ]
# th.names.sample should identical to
# vnames(lavpartable, "th", group = g)
th.values <- lavsamplestats@th.nox[[g]][ lavsamplestats@th.idx[[g]] > 0L ]
start[th.idx] <- th.values[match(th.names.lavpartable,
th.names.sample)]
}
# 5g) exogenous `fixed.x' covariates
if(!categorical && length(ov.names.x) > 0) {
exo.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs %in% ov.names.x &
lavpartable$rhs %in% ov.names.x)
row.idx <- match(lavpartable$lhs[exo.idx], ov.names)
col.idx <- match(lavpartable$rhs[exo.idx], ov.names)
if(lavsamplestats@missing.flag) {
start[exo.idx] <-
lavsamplestats@missing.h1[[g]]$sigma[cbind(row.idx,col.idx)]
} else {
start[exo.idx] <- lavsamplestats@cov[[g]][cbind(row.idx,col.idx)]
}
}
# 6g) regressions "~"
}
# group weights
group.idx <- which(lavpartable$lhs == "group" &
lavpartable$op == "%")
if(length(group.idx) > 0L) {
#prop <- rep(1/ngroups, ngroups)
# use last group as reference
#start[group.idx] <- log(prop/prop[ngroups])
# poisson version
start[group.idx] <- log( rep(lavsamplestats@ntotal/ngroups, ngroups) )
}
# growth models:
# - compute starting values for mean latent variables
# - compute starting values for variance latent variables
if(start.initial %in% c("lavaan", "mplus") &&
model.type == "growth") {
### DEBUG ONLY
#lv.var.idx <- which(lavpartable$op == "~~" &
# lavpartable$lhs %in% lv.names &
# lavpartable$lhs == lavpartable$rhs)
#start[lv.var.idx] <- c(2.369511, 0.7026852)
### DEBUG ONLY
#lv.int.idx <- which(lavpartable$op == "~1" &
# lavpartable$lhs %in% lv.names)
#start[lv.int.idx] <- c(0.617156788, 1.005192793)
}
# override if a user list with starting values is provided
# we only look at the 'est' column for now
if(!is.null(start.user)) {
if(is.null(lavpartable$group)) {
lavpartable$group <- rep(1L, length(lavpartable$lhs))
}
if(is.null(start.user$group)) {
start.user$group <- rep(1L, length(start.user$lhs))
}
# FIXME: avoid for loop!!!
for(i in 1:length(lavpartable$lhs)) {
# find corresponding parameters
lhs <- lavpartable$lhs[i]
op <- lavpartable$op[i]
rhs <- lavpartable$rhs[i]
grp <- lavpartable$group[i]
start.user.idx <- which(start.user$lhs == lhs &
start.user$op == op &
start.user$rhs == rhs &
start.user$group == grp)
if(length(start.user.idx) == 1L &&
is.finite(start.user$est[start.user.idx])) {
start[i] <- start.user$est[start.user.idx]
}
}
}
# override if the model syntax contains explicit starting values
user.idx <- which(!is.na(lavpartable$ustart))
start[user.idx] <- lavpartable$ustart[user.idx]
if(debug) {
cat("lavaan DEBUG: lavaanStart\n")
print( start )
}
start
}
# backwards compatibility
# StartingValues <- lav_start
| /lavaan/R/lav_start.R | no_license | ingted/R-Examples | R | false | false | 13,468 | r | # lav_start.R: provide starting values for model parameters
#
# YR 30/11/2010: initial version
# YR 08/06/2011: add fabin3 start values for factor loadings
# YR 14 Jan 2014: moved to lav_start.R
# fill in the 'ustart' column in a User data.frame with reasonable
# starting values, using the sample data
lav_start <- function(start.method = "default",
lavpartable = NULL,
lavsamplestats = NULL,
model.type = "sem",
mimic = "lavaan",
debug = FALSE) {
# check arguments
stopifnot(is.list(lavpartable))
# categorical?
categorical <- any(lavpartable$op == "|")
#ord.names <- unique(lavpartable$lhs[ lavpartable$op == "|" ])
# shortcut for 'simple'
if(identical(start.method, "simple")) {
start <- numeric( length(lavpartable$ustart) )
start[ which(lavpartable$op == "=~") ] <- 1.0
start[ which(lavpartable$op == "~*~") ] <- 1.0
ov.names.ord <- vnames(lavpartable, "ov.ord")
var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs &
!(lavpartable$lhs %in% ov.names.ord))
start[var.idx] <- 1.0
user.idx <- which(!is.na(lavpartable$ustart))
start[user.idx] <- lavpartable$ustart[user.idx]
return(start)
}
# check start.method
if(mimic == "lavaan") {
start.initial <- "lavaan"
} else if(mimic == "Mplus") {
start.initial <- "mplus"
} else {
# FIXME: use LISREL/EQS/AMOS/.... schems
start.initial <- "lavaan"
}
start.user <- NULL
if(is.character(start.method)) {
start.method. <- tolower(start.method)
if(start.method. == "default") {
# nothing to do
} else if(start.method. %in% c("simple", "lavaan", "mplus")) {
start.initial <- start.method.
} else {
stop("lavaan ERROR: unknown value for start argument")
}
} else if(is.list(start.method)) {
start.user <- start.method
} else if(inherits(start.method, "lavaan")) {
start.user <- parTable(start.method)
}
# check model list elements, if provided
if(!is.null(start.user)) {
if(is.null(start.user$lhs) ||
is.null(start.user$op) ||
is.null(start.user$rhs)) {
stop("lavaan ERROR: problem with start argument: model list does not contain all elements: lhs/op/rhs")
}
if(!is.null(start.user$est)) {
# excellent, we got an est column; nothing to do
} else if(!is.null(start.user$start)) {
# no est column, but we use the start column
start.user$est <- start.user$start
} else if(!is.null(start.user$ustart)) {
# no ideal, but better than nothing
start.user$est <- start.user$ustart
} else {
stop("lavaan ERROR: problem with start argument: could not find est/start column in model list")
}
}
# global settings
# 0. everyting is zero
start <- numeric( length(lavpartable$ustart) )
# 1. =~ factor loadings:
if(categorical) {
# if std.lv=TRUE, more likely initial Sigma.hat is positive definite
# 0.8 is too large
start[ which(lavpartable$op == "=~") ] <- 0.7
} else {
start[ which(lavpartable$op == "=~") ] <- 1.0
}
# 2. residual lv variances for latent variables
lv.names <- vnames(lavpartable, "lv") # all groups
lv.var.idx <- which(lavpartable$op == "~~" &
lavpartable$lhs %in% lv.names &
lavpartable$lhs == lavpartable$rhs)
start[lv.var.idx] <- 0.05
# 3. latent response scales (if any)
delta.idx <- which(lavpartable$op == "~*~")
start[delta.idx] <- 1.0
# group-specific settings
ngroups <- lavsamplestats@ngroups
for(g in 1:ngroups) {
# info from user model for this group
if(categorical) {
ov.names <- vnames(lavpartable, "ov.nox", group=g)
ov.names.num <- vnames(lavpartable, "ov.num", group=g)
ov.names.ord <- vnames(lavpartable, "ov.ord", group=g)
} else {
ov.names.num <- ov.names <- vnames(lavpartable, "ov", group=g)
}
lv.names <- vnames(lavpartable, "lv", group=g)
ov.names.x <- vnames(lavpartable, "ov.x", group=g)
# g1) factor loadings
if(start.initial %in% c("lavaan", "mplus") &&
model.type %in% c("sem", "cfa") &&
#!categorical &&
sum( lavpartable$ustart[ lavpartable$op == "=~" & lavpartable$group == g],
na.rm=TRUE) == length(lv.names) ) {
# only if all latent variables have a reference item,
# we use the fabin3 estimator (2sls) of Hagglund (1982)
# per factor
# 9 Okt 2013: if only 2 indicators, we use the regression
# coefficient (y=marker, x=2nd indicator)
for(f in lv.names) {
free.idx <- which( lavpartable$lhs == f & lavpartable$op == "=~"
& lavpartable$group == g
& lavpartable$free > 0L)
user.idx <- which( lavpartable$lhs == f & lavpartable$op == "=~"
& lavpartable$group == g )
# no second order
if(any(lavpartable$rhs[user.idx] %in% lv.names)) next
# get observed indicators for this latent variable
ov.idx <- match(lavpartable$rhs[user.idx], ov.names)
if(length(ov.idx) > 2L && !any(is.na(ov.idx))) {
if(lavsamplestats@missing.flag) {
COV <- lavsamplestats@missing.h1[[g]]$sigma[ov.idx,ov.idx]
} else {
COV <- lavsamplestats@cov[[g]][ov.idx,ov.idx]
}
start[user.idx] <- fabin3.uni(COV)
} else if(length(free.idx) == 1L && length(ov.idx) == 2L) {
REG2 <- ( lavsamplestats@cov[[g]][ov.idx[1],ov.idx[2]] /
lavsamplestats@cov[[g]][ov.idx[1],ov.idx[1]] )
start[free.idx] <- REG2
}
# standardized?
var.f.idx <- which(lavpartable$lhs == f & lavpartable$op == "~~" &
lavpartable$rhs == f)
if(length(var.f.idx) > 0L &&
lavpartable$free[var.f.idx] == 0 &&
lavpartable$ustart[var.f.idx] == 1) {
# make sure factor loadings are between -0.7 and 0.7
x <- start[user.idx]
start[user.idx] <- (x / max(abs(x))) * 0.7
}
}
}
if(model.type == "unrestricted") {
# fill in 'covariances' from lavsamplestats
cov.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs != lavpartable$rhs)
lhs.idx <- match(lavpartable$lhs[cov.idx], ov.names)
rhs.idx <- match(lavpartable$rhs[cov.idx], ov.names)
start[cov.idx] <- lavsamplestats@cov[[g]][ cbind(lhs.idx, rhs.idx) ]
}
# 2g) residual ov variances (including exo, to be overriden)
ov.var.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs %in% ov.names.num &
lavpartable$lhs == lavpartable$rhs)
sample.var.idx <- match(lavpartable$lhs[ov.var.idx], ov.names)
if(model.type == "unrestricted") {
start[ov.var.idx] <- diag(lavsamplestats@cov[[g]])[sample.var.idx]
} else {
if(start.initial == "mplus") {
start[ov.var.idx] <-
(1.0 - 0.50)*lavsamplestats@var[[1L]][sample.var.idx]
} else {
# start[ov.var.idx] <-
# (1.0 - 0.50)*lavsamplestats@var[[g]][sample.var.idx]
start[ov.var.idx] <-
(1.0 - 0.50)*diag(lavsamplestats@cov[[g]])[sample.var.idx]
}
}
# variances of ordinal variables - set to 1.0
if(categorical) {
ov.var.ord.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs %in% ov.names.ord &
lavpartable$lhs == lavpartable$rhs)
start[ov.var.ord.idx] <- 1.0
}
# 3g) intercepts/means
ov.int.idx <- which(lavpartable$group == g &
lavpartable$op == "~1" &
lavpartable$lhs %in% ov.names)
sample.int.idx <- match(lavpartable$lhs[ov.int.idx], ov.names)
if(lavsamplestats@missing.flag) {
start[ov.int.idx] <- lavsamplestats@missing.h1[[g]]$mu[sample.int.idx]
} else {
start[ov.int.idx] <- lavsamplestats@mean[[g]][sample.int.idx]
}
# 4g) thresholds
th.idx <- which(lavpartable$group == g & lavpartable$op == "|")
if(length(th.idx) > 0L) {
th.names.lavpartable <- paste(lavpartable$lhs[th.idx], "|",
lavpartable$rhs[th.idx], sep="")
th.names.sample <-
lavsamplestats@th.names[[g]][ lavsamplestats@th.idx[[g]] > 0L ]
# th.names.sample should identical to
# vnames(lavpartable, "th", group = g)
th.values <- lavsamplestats@th.nox[[g]][ lavsamplestats@th.idx[[g]] > 0L ]
start[th.idx] <- th.values[match(th.names.lavpartable,
th.names.sample)]
}
# 5g) exogenous `fixed.x' covariates
if(!categorical && length(ov.names.x) > 0) {
exo.idx <- which(lavpartable$group == g &
lavpartable$op == "~~" &
lavpartable$lhs %in% ov.names.x &
lavpartable$rhs %in% ov.names.x)
row.idx <- match(lavpartable$lhs[exo.idx], ov.names)
col.idx <- match(lavpartable$rhs[exo.idx], ov.names)
if(lavsamplestats@missing.flag) {
start[exo.idx] <-
lavsamplestats@missing.h1[[g]]$sigma[cbind(row.idx,col.idx)]
} else {
start[exo.idx] <- lavsamplestats@cov[[g]][cbind(row.idx,col.idx)]
}
}
# 6g) regressions "~"
}
# group weights
group.idx <- which(lavpartable$lhs == "group" &
lavpartable$op == "%")
if(length(group.idx) > 0L) {
#prop <- rep(1/ngroups, ngroups)
# use last group as reference
#start[group.idx] <- log(prop/prop[ngroups])
# poisson version
start[group.idx] <- log( rep(lavsamplestats@ntotal/ngroups, ngroups) )
}
# growth models:
# - compute starting values for mean latent variables
# - compute starting values for variance latent variables
if(start.initial %in% c("lavaan", "mplus") &&
model.type == "growth") {
### DEBUG ONLY
#lv.var.idx <- which(lavpartable$op == "~~" &
# lavpartable$lhs %in% lv.names &
# lavpartable$lhs == lavpartable$rhs)
#start[lv.var.idx] <- c(2.369511, 0.7026852)
### DEBUG ONLY
#lv.int.idx <- which(lavpartable$op == "~1" &
# lavpartable$lhs %in% lv.names)
#start[lv.int.idx] <- c(0.617156788, 1.005192793)
}
# override if a user list with starting values is provided
# we only look at the 'est' column for now
if(!is.null(start.user)) {
if(is.null(lavpartable$group)) {
lavpartable$group <- rep(1L, length(lavpartable$lhs))
}
if(is.null(start.user$group)) {
start.user$group <- rep(1L, length(start.user$lhs))
}
# FIXME: avoid for loop!!!
for(i in 1:length(lavpartable$lhs)) {
# find corresponding parameters
lhs <- lavpartable$lhs[i]
op <- lavpartable$op[i]
rhs <- lavpartable$rhs[i]
grp <- lavpartable$group[i]
start.user.idx <- which(start.user$lhs == lhs &
start.user$op == op &
start.user$rhs == rhs &
start.user$group == grp)
if(length(start.user.idx) == 1L &&
is.finite(start.user$est[start.user.idx])) {
start[i] <- start.user$est[start.user.idx]
}
}
}
# override if the model syntax contains explicit starting values
user.idx <- which(!is.na(lavpartable$ustart))
start[user.idx] <- lavpartable$ustart[user.idx]
if(debug) {
cat("lavaan DEBUG: lavaanStart\n")
print( start )
}
start
}
# backwards compatibility
# StartingValues <- lav_start
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ft_tree.R
\name{ft_tree}
\alias{ft_tree}
\title{Load the Fern Tree of Life (FTOL)}
\usage{
ft_tree(
branch_len = "ultra",
consensus = TRUE,
rooted = TRUE,
backbone = FALSE,
drop_og = FALSE,
label_ages = FALSE,
decimals = NULL
)
}
\arguments{
\item{branch_len}{Character vector of length 1; how to treat branch lengths.
Must choose from \code{"ultra"} (ultrametric tree, branchlengths in units of
time), \code{"raw"} (raw branchlengths in units of genetic change), or \code{"clado"}
(cladogram, no branchlengths). Default \code{"ultra"}.}
\item{consensus}{Logical vector of length 1; if \code{TRUE}, the majority-rule
extended consensus phylogeny will be returned; otherwise returns the
maximum-likelihood tree. Default \code{TRUE}.}
\item{rooted}{Logical vector of length 1; if \code{TRUE}, the phylogeny will be
rooted on bryophytes; otherwise the phylogeny is unrooted. Default \code{TRUE}.}
\item{backbone}{Logical vector of length 1; if \code{TRUE}, the backbone phylogeny
(only species with complete plastomes available) will be returned;
otherwise the phylogeny will include all species. Default \code{FALSE}.}
\item{drop_og}{Logical vector of length 1; if \code{TRUE}, the outgroup
(non-ferns) will be excluded; otherwise the outgroup is included. Default
\code{FALSE}.}
\item{label_ages}{Logical vector of length 1; if \code{TRUE}, internal nodes will
be labeled with ages. Only works if \code{branch_len} is \verb{"ultra".}
Default \code{FALSE}.}
\item{decimals}{Numeric vector of length 1; number of decimals for rounding
node labels if \code{label_ages} is \code{TRUE}; \code{null} (default) does no rounding.}
}
\value{
List of class "phylo"; a phylogenetic tree.
}
\description{
For details on methods used to infer the tree, see Nitta et al. 2022.
}
\details{
Not all combinations are possible. For example, \code{branch_len = "ultra"} is
only available if \code{backbone = FALSE}.
}
\examples{
# Default is the consensus tree with branchlengths in units of time
ft_tree()
}
\references{
Nitta JH, Schuettpelz E, Ramรญrez-Barahona S, Iwasaki W. (2022) An
open and continuously updated fern tree of life. https://doi.org/10.3389/fpls.2022.909768
}
| /man/ft_tree.Rd | permissive | fernphy/ftolr | R | false | true | 2,260 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ft_tree.R
\name{ft_tree}
\alias{ft_tree}
\title{Load the Fern Tree of Life (FTOL)}
\usage{
ft_tree(
branch_len = "ultra",
consensus = TRUE,
rooted = TRUE,
backbone = FALSE,
drop_og = FALSE,
label_ages = FALSE,
decimals = NULL
)
}
\arguments{
\item{branch_len}{Character vector of length 1; how to treat branch lengths.
Must choose from \code{"ultra"} (ultrametric tree, branchlengths in units of
time), \code{"raw"} (raw branchlengths in units of genetic change), or \code{"clado"}
(cladogram, no branchlengths). Default \code{"ultra"}.}
\item{consensus}{Logical vector of length 1; if \code{TRUE}, the majority-rule
extended consensus phylogeny will be returned; otherwise returns the
maximum-likelihood tree. Default \code{TRUE}.}
\item{rooted}{Logical vector of length 1; if \code{TRUE}, the phylogeny will be
rooted on bryophytes; otherwise the phylogeny is unrooted. Default \code{TRUE}.}
\item{backbone}{Logical vector of length 1; if \code{TRUE}, the backbone phylogeny
(only species with complete plastomes available) will be returned;
otherwise the phylogeny will include all species. Default \code{FALSE}.}
\item{drop_og}{Logical vector of length 1; if \code{TRUE}, the outgroup
(non-ferns) will be excluded; otherwise the outgroup is included. Default
\code{FALSE}.}
\item{label_ages}{Logical vector of length 1; if \code{TRUE}, internal nodes will
be labeled with ages. Only works if \code{branch_len} is \verb{"ultra".}
Default \code{FALSE}.}
\item{decimals}{Numeric vector of length 1; number of decimals for rounding
node labels if \code{label_ages} is \code{TRUE}; \code{null} (default) does no rounding.}
}
\value{
List of class "phylo"; a phylogenetic tree.
}
\description{
For details on methods used to infer the tree, see Nitta et al. 2022.
}
\details{
Not all combinations are possible. For example, \code{branch_len = "ultra"} is
only available if \code{backbone = FALSE}.
}
\examples{
# Default is the consensus tree with branchlengths in units of time
ft_tree()
}
\references{
Nitta JH, Schuettpelz E, Ramรญrez-Barahona S, Iwasaki W. (2022) An
open and continuously updated fern tree of life. https://doi.org/10.3389/fpls.2022.909768
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/terrain_analysis.R
\name{wbt_remove_off_terrain_objects}
\alias{wbt_remove_off_terrain_objects}
\title{Remove off terrain objects}
\usage{
wbt_remove_off_terrain_objects(dem, output, filter = 11, slope = 15,
verbose_mode = FALSE)
}
\arguments{
\item{dem}{Input raster DEM file.}
\item{output}{Output raster file.}
\item{filter}{Filter size (cells).}
\item{slope}{Slope threshold value.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Removes off-terrain objects from a raster digital elevation model (DEM).
}
| /man/wbt_remove_off_terrain_objects.Rd | permissive | Remote-Sensing-Forks/whiteboxR | R | false | true | 707 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/terrain_analysis.R
\name{wbt_remove_off_terrain_objects}
\alias{wbt_remove_off_terrain_objects}
\title{Remove off terrain objects}
\usage{
wbt_remove_off_terrain_objects(dem, output, filter = 11, slope = 15,
verbose_mode = FALSE)
}
\arguments{
\item{dem}{Input raster DEM file.}
\item{output}{Output raster file.}
\item{filter}{Filter size (cells).}
\item{slope}{Slope threshold value.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Removes off-terrain objects from a raster digital elevation model (DEM).
}
|
rm(list=ls())
library(readxl)
library(ggplot2)
library(scales)
library(dplyr)
library(stringr)
library(RColorBrewer)
setwd("~/Documents/OnCampusJob/propuesta-graficos-covid-19/Analyze-Data-R")
d <- read_excel("Colaboradores Covid positivos 13_01_2021.xlsm", sheet = "BD")
d2 <- as.data.frame(d)
#Nombres de columnas
colnames(d2) <- c("nomina", "nombre", "genero", "institucion", "campus", "estado", "posicion", "corre", "celular", "tipo", "edad", "rangoedad", "inicio", "semanaContagio", "tipoContagio", "asistidoCampus", "tipoAtencion", "diagnostico", "morbilidades", "alta", "fechaAlta", "fechaFallecimiento")
#Limpiar datos tipo de contagio
d2$tipoContagio <- str_trim(d2$tipoContagio)
d2$tipoContagio <- ifelse(d2$tipoContagio == "1= Local", "1=Local", d2$tipoContagio)
#Limpiar datos de campus
d2$campus <- str_trim(d2$campus)
d2$campus <- ifelse(d2$campus == "Areas de Apoyo", "รreas de apoyo", d2$campus)
d2$campus <- ifelse(d2$campus == "รreas de Apoyo", "รreas de apoyo", d2$campus)
d2$campus <- ifelse(d2$campus == "C CM", "Ciudad de Mรฉxico", d2$campus)
d2$campus <- ifelse(d2$campus == "CCM", "Ciudad de Mรฉxico", d2$campus)
d2$campus <- ifelse(d2$campus == "C. Querรฉtaro", "Querรฉtaro", d2$campus)
d2$campus <- ifelse(d2$campus == "C. Mty", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "C MTY", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "Of. Monterrey", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "C Sinaloa", "Sinaloa", d2$campus)
d2$campus <- ifelse(d2$campus == "C Veracruz", "Veracruz", d2$campus)
d2$campus <- ifelse(d2$campus == "C Santa Fe", "Santa Fรฉ", d2$campus)
d2$campus <- ifelse(d2$campus == "C Laguna", "Laguna", d2$campus)
d2$campus <- ifelse(d2$campus == "C Ferrerรญa", "Ferrerรญa", d2$campus)
d2$campus <- ifelse(d2$campus == "C Guadalajara", "Guadalajara", d2$campus)
d2$campus <- ifelse(d2$campus == "CULIACAN", "Culiacรกn", d2$campus)
d2$campus <- ifelse(d2$campus == "Campus Monterrey", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "Eugenio Garza Laguera", "EGL", d2$campus)
d2$campus <- ifelse(d2$campus == "Prog. en lรญnea", "Prog. En lรญnea", d2$campus)
d2$campus <- ifelse(d2$campus == "Guarderia TEC", "Guarderรญa Tec", d2$campus)
d2$campus <- ifelse(d2$campus == "Guarderia Tec", "Guarderรญa Tec", d2$campus)
d2$campus <- ifelse(d2$campus == "O Mazatlรกn", "Mazatlรกn", d2$campus)
d2$campus <- ifelse(d2$campus == "O Mรฉxico", "Ciudad de Mรฉxico", d2$campus)
d2$campus <- ifelse(d2$campus == "O Monterrey", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "R. Tec. Mty", "R. Tec Mty", d2$campus)
d2$campus <- ifelse(d2$campus == "Santa Fe", "Santa Fรฉ", d2$campus)
data$Campus <- ifelse(data$Campus == "Central de Veracruz", "Veracruz", data$Campus)
data$Campus <- ifelse(data$Campus == "Santa Fe", "Santa Fรฉ", data$Campus)
data$Campus <- ifelse(data$Campus == "Sonora Norte", "Sonora", data$Campus)
#Limpiar datos de estado
d2$estado <- str_trim(d2$estado)
d2$estado <- ifelse(d2$estado == "CDMX", "Ciudad de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Torreรณn", "Coahuila", d2$estado)
d2$estado <- ifelse(d2$estado == "jalisco", "Jalisco", d2$estado)
d2$estado <- ifelse(d2$estado == "Mexico", "Estado de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Mรฉxico", "Estado de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Toluca", "Estado de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Monterrey", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "Nuevo Lรฉon", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "Nuevo Leon", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "Nuevo leon", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "nuevo Leรณn", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "SINALOA", "Sinaloa", d2$estado)
d2$estado <- ifelse(d2$estado == "sinaloa", "Sinaloa", d2$estado)
d2$estado <- ifelse(d2$estado == "San Luis Potosi", "San Luis Potosรญ", d2$estado)
d2$estado <- ifelse(d2$estado == "Veracruz", "Veracrรบz", d2$estado)
#Limpiar datos de institucion
d2$institucion <- ifelse(d2$institucion == "TECMILENIO", "Tecmilenio", d2$institucion)
#Limpiar datos de semana de contagio
d2$semanaContagio <- str_trim(d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er abril", "1ra abril", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er marzo", "1ra marzo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er mayo", "1ra mayo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er agosto", "1ra agosto", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1era julio", "1ra julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er julio", "1ra julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "2da Julio", "2da julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er marzo", "3ra marzo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er abril", "3ra abril", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er mayo", "3ra mayo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er marzo", "3ra marzo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3era junio", "3ra junio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er junio", "3ra junio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er julio", "3ra julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er agosto", "3ra agosto", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "4ta Agosto", "4ta agosto", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "4ta Julio", "4ta julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er septiembre", "1ra septiembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er septiembre", "3ra septiembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er octubre", "1ra octubre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er Octubre", "3ra octubre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "4ta Octubre", "4ta octubre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er noviembre", "1ra noviembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er Noviembre", "1ra noviembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er noviembre", "3ra noviembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er de Diciembre", "1ra diciembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er Diciembre", "1ra diciembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er diciembre", "3ra diciembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er enero", "1ra enero", d2$semanaContagio)
#Limpiar datos tipo colaborador
d2$tipo <- ifelse(d2$tipo == "1=Acadรฉmico", "Acadรฉmico", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "2=Apoyo", "Apoyo", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "3=Apoyo acadรฉmico", "Apoyo Acadรฉmico", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "3=Apoyo Acadรฉmico", "Apoyo Acadรฉmico", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "4=Operativo", "Operativo", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "5=Clรญnico", "Clรญnico", d2$tipo)
#Limpiar datos tipo de diagnostico
d2$diagnostico <- ifelse(d2$diagnostico == "1=Ambulatorio", "Ambulatorio", d2$diagnostico)
d2$diagnostico <- ifelse(d2$diagnostico == "2=Hospitalizado", "Hospitalizado", d2$diagnostico)
#Limpiar datos genero
d2$genero <- ifelse(d2$genero == "femenino", "Femenino", d2$genero)
d2$genero <- ifelse(d2$genero == "FEMENINO", "Femenino", d2$genero)
d2$genero <- ifelse(d2$genero == "masculino", "Masculino", d2$genero)
#Limpiar datos de tipo de alta
d2$alta <- ifelse(d2$alta == "NO", "No", d2$alta)
d2$alta <- ifelse(d2$alta == "no", "No", d2$alta)
#Color palette config
getPalette = colorRampPalette(brewer.pal(9, "Blues"))
#BarPlot por institucion
#institucion:
# -> Sorteos tec
# -> Tecmilenio
# -> Tecnologico de monterrey
# -> TecSalud
inst <- data.frame(table(d2$institucion))
colnames(inst) <- c("Instituciรณn","Casos")
ggplot(data=inst, aes(x=Instituciรณn, y=Casos, fill=Instituciรณn)) +
geom_bar(stat="identity", width=0.7, color="white") + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 por instituciรณn") +
xlab("Instituciรณn") + ylab("Nรบmero de casos") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
scale_fill_brewer()
#Piechart por institucion
#institucion:
# -> Sorteos tec
# -> Tecmilenio
# -> Tecnologico de monterrey
# -> TecSalud
inst2 <- data.frame(table(d2$institucion), data.frame(prop.table(table(d2$institucion)))$Freq*100)
colnames(inst2) <- c("Instituciรณn","Casos", "prop")
inst2 <- inst2 %>%
arrange(desc(Instituciรณn)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
ggplot(inst2, aes(x="", y=prop, fill=Instituciรณn)) +
geom_bar(stat="identity") +
coord_polar("y", start=0) +
geom_text(aes(y = ypos, label = paste(format(round(prop, 1), nsmall = 1), "%", sep = "")), color = "black", size=6) +
theme_void() +
ggtitle("Porcentaje de casos Covid-19 por instituciรณn") +
guides(fill=guide_legend(title="Instituciรณn")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=20)) +
scale_fill_brewer()
#Piechart por Tipo de colaborador
#tipo de colaborador:
# -> Academico
# -> Apoyo
# -> Apoyo Academico
# -> Clinico
# -> Operativo
t_colab <- data.frame(table(d2$tipo), data.frame(prop.table(table(d2$tipo)))$Freq*100)
colnames(t_colab) <- c("Tipo", "Casos", "prop")
ggplot(data=t_colab, aes(x=Tipo, y=Casos, fill=Tipo)) +
geom_bar(stat="identity", width=0.7, color="white") + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 por tipo de colaborador") +
xlab("Colaborador") + ylab("Nรบmero de casos") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#BarPlot por acumulacion de fallecimiento por fecha
#NOTA: solo toma los que tienen fecha
fallecimiento <- data.frame(table(d2$fechaFallecimiento))
fallecimiento$Acum <- rep(0, times = length(fallecimiento$Var1))
colnames(fallecimiento) <- c("Fecha", "Casos", "Acum")
acum <- 0
for(i in 1:length(fallecimiento$Fecha)){
acum <- fallecimiento$Casos[i] + acum
fallecimiento$Acum[i] <- acum
}
ggplot(data=fallecimiento, aes(x=Fecha, y=Acum, fill=Fecha)) +
geom_bar(stat="identity", width=0.7, color="white", fill = getPalette(length(fallecimiento$Fecha))) +
geom_text(aes(label = Acum), vjust = -0.2, size=5) +
ggtitle("Nรบmero de fallecimientos Covid-19 por fecha") +
xlab("Fecha") + ylab("Acumulaciรณn de casos") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15), axis.text.x = element_text(angle=75, hjust=1)) + theme(legend.position="none")
#BarPlot rango de edad
#rango de edad:
# -> 0-19
# -> 20-29
# -> 30-39
# -> 40-49
# -> 50-59
# -> 60-69
# -> 70-79
r_edad <- data.frame(table(d2$rangoedad))
colnames(r_edad) <- c("Rango","Casos")
ggplot(data=r_edad, aes(x=Rango, y=Casos, fill=Rango)) +
geom_bar(stat="identity", width=0.7, color="white") + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.05, size=5) +
xlab("Rango de edad") + ylab("Nรบmero de casos") +
ggtitle("Nรบmero de casos Covid-19 por rango de edad") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#Piechart por genero
#rango de edad:
# -> Femenino
# -> Masculino
genero <- data.frame(table(d2$genero), data.frame(prop.table(table(d2$genero)))$Freq*100)
colnames(genero) <- c("Genero", "Casos", "prop")
genero <- genero %>%
arrange(desc(Genero)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
ggplot(genero, aes(x="", y=prop, fill=Genero)) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
geom_text(aes(y = ypos, label = paste(format(round(prop, 1), nsmall = 1), "%", sep = "")), color = "black", size=10) +
theme_void() +
ggtitle("Porcentaje de casos Covid-19 por gรฉnero") +
guides(fill=guide_legend(title="Gรฉnero")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#BarPlot por diagnostico
#rango de edad:
# -> Ambulatorio
# -> Hospitalizado
diag <- data.frame(table(d2$diagnostico))
colnames(diag) <- c("Diagnostico", "Casos")
ggplot(data=diag, aes(x=Diagnostico, y=Casos, fill=Diagnostico)) +
geom_bar(stat="identity", width=0.7, color="white") +
geom_text(aes(label = Casos), vjust = -0.2, size=5) +
xlab("Diagnรณstico") + ylab("Nรบmero de casos") +
ggtitle("Nรบmero de casos Covid-19 por diagnรณstico") +
guides(fill=guide_legend(title="Diagnรณstico")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#Piechart por alta medica
#alta mรฉdica:
# -> No
# -> Sรญ
alta <- data.frame(table(d2$alta), data.frame(prop.table(table(d2$alta)))$Freq*100)
colnames(alta) <- c("Alta_Mรฉdica", "Casos", "prop")
alta <- alta %>%
arrange(desc(Alta_Mรฉdica)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
alta2 <- data.frame(
Alta_Mรฉdica <- c("No", "Sรญ"),
Freq <- c(50, 526),
prop <- c(8.68, 91.31)
)
colnames(alta2) <- c("Alta_Mรฉdica", "Casos", "prop")
alta2 <- alta2 %>%
arrange(desc(Alta_Mรฉdica)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
ggplot(alta, aes(x="", y=prop, fill=Alta_Mรฉdica )) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
geom_text(aes(y = ypos, label = paste(format(round(prop, 1), nsmall = 1), "%", sep = "")), color = "black", size=10) +
theme_void() +
guides(fill=guide_legend(title="Alta mรฉdica")) +
ggtitle("Porcentaje de casos Covid-19 por alta mรฉdica") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer(direction = -1)
#BarPlot por semana de contagio
#semana de contagio:
# -> Semanda y cantidad de contagios en la semana
d2$semanaContagio <- substr(d2$semanaContagio,1,7)
for(i in 1:length(d2$semanaContagio)){
num = ifelse(d2$semanaContagio[i] == "5ta dic" && substr(d2$inicio[i],6,7) == "01", toString(as.numeric(substr(d2$inicio[i],1,4))-1), substr(d2$inicio[i],1,4))
d2$semanaContagio[i] <- paste(d2$semanaContagio[i], num);
}
#d2$semanaContagio <- ifelse(d2$semanaContagio == "5ta dic" && substr(d2$inicio,6,7) == "01", paste(d2$semanaContagio,toString(as.numeric(substr(d2$inicio,1,4))-1)), paste(d2$semanaContagio, substr(d2$inicio,1,4)))
semana_num <- c("1ra", "2da", "3ra", "4ta", "5ta") # numero de semanas
meses <- c("ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sep", "oct", "nov", "dic") # meses
semana <- data.frame(table(d2$semanaContagio))
colnames(semana) <- c("Semana", "Casos")
semana$semana_num <- substr(semana$Semana,1,3)
semana$mes <- str_trim(substring(semana$Semana,4,7))
semana$year <- substring(semana$Semana,8)
semana$year <- factor(semana$year)
semana$semana_num <- factor(semana$semana_num, levels = semana_num)
semana$mes <- factor(semana$mes, levels = meses)
semana <- data.frame(semana[order(semana$year,semana$mes,semana$semana_num),])
semana$Semana <- factor(semana$Semana, levels = rev(semana$Semana))
#Generar csv con datos historicos de casos acumulados por semana de inicio de sรญntomas
semanaP <- data.frame(
Semana <- semana$Semana,
Casos <- semana$Casos
)
colnames(semanaP) <- c("Semana de Contagio", "Casos")
#write.csv(semanaP, file = "historicos.csv")
ggplot(data=semana, aes(x=Casos, y=Semana, fill=Semana)) +
geom_bar(stat="identity", width=0.7, color="white", fill = getPalette(length(semana$Semana))) +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
xlab("Nรบmero de casos") + ylab("Semana") +
ggtitle("Nรบmero de casos Covid-19 por semana de contagio") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none")
#BarPlot por semana de contagio por tipo de colaborados
#semana de contagio:
# -> Semanda y cantidad de contagios en la semana
# -> Tipo de colaborador
semana2 <- data.frame(table(d2$semanaContagio, d2$tipo))
colnames(semana2) <- c("Semana", "Colaborador", "Casos")
semana2$semana_num <- substr(semana2$Semana,1,3)
semana2$mes <- str_trim(substring(semana2$Semana,5,7))
semana2$year <- substring(semana2$Semana,8)
semana2$semana_num <- factor(semana2$semana_num, levels = semana_num)
semana2$mes <- factor(semana2$mes, levels = meses)
semana2$year <- factor(semana2$year)
semana2 <- data.frame(semana2[order(semana2$year,semana2$mes, semana2$semana_num),])
semana2$Semana <- factor(semana2$Semana, levels = unique(rev(semana2$Semana)))
ggplot(data = semana2, aes(x = Casos, y = Semana, fill = Colaborador)) +
geom_bar(stat = "identity", width=0.7, color="white") +
xlab("Nรบmero de casos") + ylab("Semana") +
ggtitle("Nรบmero de casos Covid-19 por semana de contagio contra tipo de colaborador") +
geom_text(aes(label = stat(x), group = Semana), stat = 'summary', fun = sum, hjust = -0.2, size=5) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#LinePlot por contagios acumulados
# -> lee una pestaรฑa de excel que cuenta con:
# -> Fecha (dia)
# -> Numero de contagios en esa fecha
# -> Acumlado hasta la fecha
# -> Acumulador por cada dรญa
casos <- as.data.frame(read_excel("Colaboradores Covid positivos 13_01_2021.xlsm", sheet = "Contagios"))
colnames(casos) <- c("Fecha", "Contagios", "Acumulados")
casosTotalAcum <- casos$Acumulados[length(casos$Acumulados)]
ggplot(casos, aes(x=as.Date(Fecha, origin="1899-12-30"), y=Acumulados)) +
geom_area( fill="#1769a8", alpha=0.5) +
geom_line(color="#184363", size=2) +
geom_point(size=3, color="#184363") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
xlab("Fecha") + ylab("Coantagios acumulados") +
ggtitle("Nรบmero de casos Covid-19 acumulados") +
coord_cartesian(ylim = c(-10, casosTotalAcum + 50)) +
scale_color_gradient()
#BarPlot por contagios diarios con lรญnea de tendencia
ggplot(data=casos, aes(x=as.Date(Fecha, origin="1899-12-30"), y=Contagios, fill=Contagios)) +
geom_bar(stat = "identity", position="identity") +
ggtitle("Nรบmero de casos Covid-19 por fecha de inicio de sรญntomas") +
xlab("Fecha de inicio de sรญntomas") + ylab("Nรบmero de casos") +
scale_x_date(labels=date_format("%b %d"), breaks=date_breaks("2 weeks")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + stat_smooth(colour="#1769a8") +
scale_color_gradient()
#BarPlot por estado
# -> Contagios por estado
estado <- data.frame(sort(table(d2$estado), decreasing = FALSE))
colnames(estado) <- c("Estado", "Casos")
ggplot(data=estado, aes(x=Estado, y=Casos, fill=Estado)) +
geom_bar(stat="identity", width=0.7, color="white", fill = getPalette(length(estado$Estado))) + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.2) +
xlab("Estado") + ylab("Nรบmero de casos") +
scale_fill_viridis_d() +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
ggtitle("Nรบmero de casos Covid-19 por estado")
#<-------------------------Cruzadas------------------------------->
#BarPlot tipo de empleado contra el campus
# -> Cruza el numero de colaboradores de cada tipo por campus
emp_camp2 <- data.frame(table(d2$tipo, d2$campus))
colnames(emp_camp2) <- c("Empleado", "Campus", "Casos")
campuss <- data.frame(table(d2$campus))
campuss <- campuss[campuss$Freq>5,]
emp_camp2 <- emp_camp2[emp_camp2$Campus %in% campuss$Var1,]
ggplot(emp_camp2, aes(fill=Empleado, y=Campus, x=Casos, label = Casos)) +
geom_bar(position="stack", stat="identity") +
xlab("Nรบmero de casos") + ylab("Campus") +
geom_text(data=subset(emp_camp2, Casos>0), size = 5, position = position_stack(vjust = 0.5), check_overlap = FALSE, colour="black", fontface = "bold") +
scale_fill_viridis_d(begin = 0.1, end = 0.9) +
guides(fill=guide_legend(title="Colaborador")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
ggtitle("Nรบmero de casos Covid-19 por tipo de colaborador contra campus (total mayor que 5)") +
scale_fill_brewer(direction = -1)
#BarPlot rango de edad contra campus
# -> Cruza el numero de colaboradores de cada rango de edad por campus
edad_camp2 <- data.frame(table(d2$rangoedad, d2$campus))
colnames(edad_camp2) <- c("Rango_Edad", "Campus", "Casos")
campus <- data.frame(table(d2$campus))
campus <- campus[campus$Freq>5,]
edad_camp2 <- edad_camp2[edad_camp2$Campus %in% campus$Var1,]
edad_camp2$Colors = ifelse(edad_camp2$Rango_Edad == "0-19", "white", "black")
ggplot(edad_camp2, aes(fill=Rango_Edad, y=Campus, x=Casos, label = Casos)) +
geom_bar(position="stack", stat="identity") +
xlab("Nรบmero de casos") + ylab("Campus") +
geom_text(data=subset(edad_camp2, Casos>0), size = 5, position = position_stack(vjust = 0.5), check_overlap = FALSE, colour="black", fontface = "bold") +
scale_fill_viridis_d(begin = 0.1, end = 0.9) +
guides(fill=guide_legend(title="Rango de edad")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
ggtitle("Nรบmero de casos Covid-19 por rango de edad contra campus (total mayor que 5)") +
scale_fill_brewer(direction = -1)
#BarPlot institucion contra tipo de colaborador
# -> Cruza el numero de colaboradores de cada tipo por institucion
inst_colab <- data.frame(prop.table(table(d2$tipo, d2$institucion),2))
colnames(inst_colab) <- c("Colaborador", "Institucion", "Casos")
ggplot(inst_colab, aes(fill=Colaborador, y=Institucion, x=Casos)) +
geom_bar(position="stack", stat="identity") +
scale_fill_viridis_d() +
xlab("Porcentaje de casos") + ylab("Instituciรณn") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
ggtitle("Porcentaje de casos Covid-19 por instituciรณn contra tipo de colaborador") +
scale_fill_brewer()
#<-------------------------graficas filtradas------------------------------->
#BarPlot hospitalizados por instituciรณn
# -> Numero de colaboradores hospitalizados por cada institucion
inst_hos <- data.frame(table(d2$institucion[d2$diagnostico == "Hospitalizado"]))
colnames(inst_hos) <- c("Institucion", "Casos")
ggplot(inst_hos, aes(fill=Institucion, y=Casos, x=Institucion)) +
geom_bar(position="stack", stat="identity", fill = getPalette(length(inst_hos$Institucion))) + coord_flip() +
xlab("Instituciรณn") + ylab("Nรบmero de casos hospitalizados") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 hospitalizados por instituciรณn")
#BarPlot hospitalizados por campus
# -> Numero de colaboradores hospitalizados por cada campus
campus_hos <- data.frame(table(d2$campus[d2$diagnostico == "Hospitalizado"]))
colnames(campus_hos) <- c("Campus", "Casos")
ggplot(campus_hos, aes(fill=Campus, y=Casos, x=Campus)) +
geom_bar(position="stack", stat="identity", fill = getPalette(length(campus_hos$Campus))) + coord_flip() +
xlab("Campus") + ylab("Nรบmero de casos hospitalizados") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 hospitalizados por campus")
#Bar Plot dato generales (casos totales, %de casos hospitalizados, diferencia de con semana anterior, positividad general)
data <- data.frame(read_excel("Tasas_y_Poblacion_Tec_16072020.xlsx", sheet = "Poblaciรณn Tec"))
#BarPlot de porcentage de colaboradores hospitalizados, fallecidos y contagiados con respecto a la poblacion total de colabadores
general_data2 <- data.frame(
Dato = c("Porcentaje de hospitalizaciรณn", "Porcentaje de fallecimiento", "Porcentaje de contagios (total)"),
Casos = c(dim(data.frame(d2[d2$diagnostico == "Hospitalizado",]))[1]/dim(d2)[1]*100, dim(d2[!is.na(d2$fechaFallecimiento),])[1]/dim(d2)[1]*100, dim(d2)[1]/sum(data$TOTAL.Colaboradores)*100),
stringsAsFactors = FALSE
)
ggplot(general_data2, aes(fill=Dato, y=Casos, x=Dato)) +
geom_bar(position="stack", stat="identity", fill = getPalette(3)) +
ylab("") + xlab("") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = paste(round(Casos,2), "%", sep="")), vjust = -0.2, size=8) +
ggtitle("Porcentajes de datos generales Covid-19")
#BarPlot semanas con menor y mayor cantidad de contagios, semana anterior, semana actual y la diferencia entre las รบltimas dos
general_data3 <- data.frame(
Semana = c("Semana min", "Semana max", "Semana anterior", "Semana actual", "Diferencia"),
Casos = c(min(semana$Casos), max(semana$Casos), semana$Casos[dim(semana)[1]-1], semana$Casos[dim(semana)[1]], semana$Casos[dim(semana)[1]]-semana$Casos[dim(semana)[1]-1]),
stringsAsFactors = FALSE
)
semanas <- c("Semana min", "Semana max", "Semana anterior", "Semana actual", "Diferencia")
general_data3$Semana <- factor(general_data3$Semana, levels = semanas)
ggplot(general_data3, aes(fill=Semana, y=Casos, x=Semana)) +
geom_bar(position="stack", stat="identity", fill = getPalette(5)) +
ylab("") + xlab("") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = Casos), vjust = ifelse(general_data3$Semana == "Diferencia", -5.2, -0.2), size=8) +
ggtitle("Datos respecto a contagios semanales Covid-19")
#BarPlot de casos totales en el campus e instituciones con mayor cantidad de casos, asi como los casos totales
max_campus <- data.frame(table(d2$campus))
max_campus <- max_campus[order(max_campus$Freq),]
max_inst <- data.frame(table(d2$institucion))
max_inst <- max_inst[order(max_inst$Freq),]
general_data4 <- data.frame(
Dato = c(paste("Campus max (", max_campus$Var1[dim(max_campus)][1], ")", sep=""), paste("Instituciรณn max (", max_inst$Var1[dim(max_inst)][1], ")", sep=""), "Casos totales"),
Casos = c(max_campus$Freq[dim(max_campus)][1], max_inst$Freq[dim(max_inst)][1], dim(d2)[1]),
stringsAsFactors = FALSE
)
data4_order <- c(paste("Campus max (", max_campus$Var1[dim(max_campus)][1], ")", sep=""), paste("Instituciรณn max (", max_inst$Var1[dim(max_inst)][1], ")", sep=""), "Casos totales")
general_data4$Dato <- factor(general_data4$Dato, data4_order)
ggplot(general_data4, aes(fill=Dato, y=Casos, x=Dato)) +
geom_bar(position="stack", stat="identity", fill = getPalette(3)) +
ylab("") + xlab("") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = Casos), vjust = -0.2, size=8) +
ggtitle("Datos de casos totales Covid-19")
#<-------------------------graficas porcentuales------------------------------->
#Barplot de tasa de contagio por cada campus con respecto a su poblacion total de colaboradores
campus_comp <- d2[d2$campus %in% data$Campus,]
campus_comp <- data.frame(table(campus_comp$campus))
campus_totales <- data[data$Campus %in% campus_comp$Var1,]
campus_totales <- data.frame(campus_totales$Campus, campus_totales$TOTAL.Colaboradores)
campus_totales <- campus_totales[order(campus_totales$campus_totales.Campus),]
campus_comp$total <- campus_totales$campus_totales.TOTAL.Colaboradores
colnames(campus_comp) <- c("Campus", "Casos", "Total")
colourCount = length(campus_comp$Campus)
getPalette = colorRampPalette(brewer.pal(9, "Blues"))
ggplot(campus_comp, aes(fill=Campus, y=(Casos/Total*100), x=Campus)) +
geom_bar(position="stack", stat="identity", fill = getPalette(colourCount)) +
ylab("") + xlab("") + coord_flip() +
xlab("Campus") + ylab("Porcentaje de tasa de contagio") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = paste(round(Casos/Total*100,2), "%", sep = "")), hjust = -0.05, size=5) +
ggtitle("Tasa de contagio por campus Covid-19")
#<-------------------------Tabla csv--------------------------------->
#Genera table con los casos de cada tipo de colaborador por campus
camp_colab <- table(d2$campus, d2$tipo)
#filter table
write.csv(camp_colab, file = "campus_contra_colaborador.csv")
| /script.R | no_license | Abrahamcepedao/Analyze-Data-R | R | false | false | 29,573 | r | rm(list=ls())
library(readxl)
library(ggplot2)
library(scales)
library(dplyr)
library(stringr)
library(RColorBrewer)
setwd("~/Documents/OnCampusJob/propuesta-graficos-covid-19/Analyze-Data-R")
d <- read_excel("Colaboradores Covid positivos 13_01_2021.xlsm", sheet = "BD")
d2 <- as.data.frame(d)
#Nombres de columnas
colnames(d2) <- c("nomina", "nombre", "genero", "institucion", "campus", "estado", "posicion", "corre", "celular", "tipo", "edad", "rangoedad", "inicio", "semanaContagio", "tipoContagio", "asistidoCampus", "tipoAtencion", "diagnostico", "morbilidades", "alta", "fechaAlta", "fechaFallecimiento")
#Limpiar datos tipo de contagio
d2$tipoContagio <- str_trim(d2$tipoContagio)
d2$tipoContagio <- ifelse(d2$tipoContagio == "1= Local", "1=Local", d2$tipoContagio)
#Limpiar datos de campus
d2$campus <- str_trim(d2$campus)
d2$campus <- ifelse(d2$campus == "Areas de Apoyo", "รreas de apoyo", d2$campus)
d2$campus <- ifelse(d2$campus == "รreas de Apoyo", "รreas de apoyo", d2$campus)
d2$campus <- ifelse(d2$campus == "C CM", "Ciudad de Mรฉxico", d2$campus)
d2$campus <- ifelse(d2$campus == "CCM", "Ciudad de Mรฉxico", d2$campus)
d2$campus <- ifelse(d2$campus == "C. Querรฉtaro", "Querรฉtaro", d2$campus)
d2$campus <- ifelse(d2$campus == "C. Mty", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "C MTY", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "Of. Monterrey", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "C Sinaloa", "Sinaloa", d2$campus)
d2$campus <- ifelse(d2$campus == "C Veracruz", "Veracruz", d2$campus)
d2$campus <- ifelse(d2$campus == "C Santa Fe", "Santa Fรฉ", d2$campus)
d2$campus <- ifelse(d2$campus == "C Laguna", "Laguna", d2$campus)
d2$campus <- ifelse(d2$campus == "C Ferrerรญa", "Ferrerรญa", d2$campus)
d2$campus <- ifelse(d2$campus == "C Guadalajara", "Guadalajara", d2$campus)
d2$campus <- ifelse(d2$campus == "CULIACAN", "Culiacรกn", d2$campus)
d2$campus <- ifelse(d2$campus == "Campus Monterrey", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "Eugenio Garza Laguera", "EGL", d2$campus)
d2$campus <- ifelse(d2$campus == "Prog. en lรญnea", "Prog. En lรญnea", d2$campus)
d2$campus <- ifelse(d2$campus == "Guarderia TEC", "Guarderรญa Tec", d2$campus)
d2$campus <- ifelse(d2$campus == "Guarderia Tec", "Guarderรญa Tec", d2$campus)
d2$campus <- ifelse(d2$campus == "O Mazatlรกn", "Mazatlรกn", d2$campus)
d2$campus <- ifelse(d2$campus == "O Mรฉxico", "Ciudad de Mรฉxico", d2$campus)
d2$campus <- ifelse(d2$campus == "O Monterrey", "Monterrey", d2$campus)
d2$campus <- ifelse(d2$campus == "R. Tec. Mty", "R. Tec Mty", d2$campus)
d2$campus <- ifelse(d2$campus == "Santa Fe", "Santa Fรฉ", d2$campus)
data$Campus <- ifelse(data$Campus == "Central de Veracruz", "Veracruz", data$Campus)
data$Campus <- ifelse(data$Campus == "Santa Fe", "Santa Fรฉ", data$Campus)
data$Campus <- ifelse(data$Campus == "Sonora Norte", "Sonora", data$Campus)
#Limpiar datos de estado
d2$estado <- str_trim(d2$estado)
d2$estado <- ifelse(d2$estado == "CDMX", "Ciudad de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Torreรณn", "Coahuila", d2$estado)
d2$estado <- ifelse(d2$estado == "jalisco", "Jalisco", d2$estado)
d2$estado <- ifelse(d2$estado == "Mexico", "Estado de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Mรฉxico", "Estado de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Toluca", "Estado de Mรฉxico", d2$estado)
d2$estado <- ifelse(d2$estado == "Monterrey", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "Nuevo Lรฉon", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "Nuevo Leon", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "Nuevo leon", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "nuevo Leรณn", "Nuevo Leรณn", d2$estado)
d2$estado <- ifelse(d2$estado == "SINALOA", "Sinaloa", d2$estado)
d2$estado <- ifelse(d2$estado == "sinaloa", "Sinaloa", d2$estado)
d2$estado <- ifelse(d2$estado == "San Luis Potosi", "San Luis Potosรญ", d2$estado)
d2$estado <- ifelse(d2$estado == "Veracruz", "Veracrรบz", d2$estado)
#Limpiar datos de institucion
d2$institucion <- ifelse(d2$institucion == "TECMILENIO", "Tecmilenio", d2$institucion)
#Limpiar datos de semana de contagio
d2$semanaContagio <- str_trim(d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er abril", "1ra abril", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er marzo", "1ra marzo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er mayo", "1ra mayo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er agosto", "1ra agosto", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1era julio", "1ra julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er julio", "1ra julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "2da Julio", "2da julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er marzo", "3ra marzo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er abril", "3ra abril", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er mayo", "3ra mayo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er marzo", "3ra marzo", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3era junio", "3ra junio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er junio", "3ra junio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er julio", "3ra julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er agosto", "3ra agosto", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "4ta Agosto", "4ta agosto", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "4ta Julio", "4ta julio", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er septiembre", "1ra septiembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er septiembre", "3ra septiembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er octubre", "1ra octubre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er Octubre", "3ra octubre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "4ta Octubre", "4ta octubre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er noviembre", "1ra noviembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er Noviembre", "1ra noviembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er noviembre", "3ra noviembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er de Diciembre", "1ra diciembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er Diciembre", "1ra diciembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "3er diciembre", "3ra diciembre", d2$semanaContagio)
d2$semanaContagio <- ifelse(d2$semanaContagio == "1er enero", "1ra enero", d2$semanaContagio)
#Limpiar datos tipo colaborador
d2$tipo <- ifelse(d2$tipo == "1=Acadรฉmico", "Acadรฉmico", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "2=Apoyo", "Apoyo", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "3=Apoyo acadรฉmico", "Apoyo Acadรฉmico", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "3=Apoyo Acadรฉmico", "Apoyo Acadรฉmico", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "4=Operativo", "Operativo", d2$tipo)
d2$tipo <- ifelse(d2$tipo == "5=Clรญnico", "Clรญnico", d2$tipo)
#Limpiar datos tipo de diagnostico
d2$diagnostico <- ifelse(d2$diagnostico == "1=Ambulatorio", "Ambulatorio", d2$diagnostico)
d2$diagnostico <- ifelse(d2$diagnostico == "2=Hospitalizado", "Hospitalizado", d2$diagnostico)
#Limpiar datos genero
d2$genero <- ifelse(d2$genero == "femenino", "Femenino", d2$genero)
d2$genero <- ifelse(d2$genero == "FEMENINO", "Femenino", d2$genero)
d2$genero <- ifelse(d2$genero == "masculino", "Masculino", d2$genero)
#Limpiar datos de tipo de alta
d2$alta <- ifelse(d2$alta == "NO", "No", d2$alta)
d2$alta <- ifelse(d2$alta == "no", "No", d2$alta)
#Color palette config
getPalette = colorRampPalette(brewer.pal(9, "Blues"))
#BarPlot por institucion
#institucion:
# -> Sorteos tec
# -> Tecmilenio
# -> Tecnologico de monterrey
# -> TecSalud
inst <- data.frame(table(d2$institucion))
colnames(inst) <- c("Instituciรณn","Casos")
ggplot(data=inst, aes(x=Instituciรณn, y=Casos, fill=Instituciรณn)) +
geom_bar(stat="identity", width=0.7, color="white") + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 por instituciรณn") +
xlab("Instituciรณn") + ylab("Nรบmero de casos") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
scale_fill_brewer()
#Piechart por institucion
#institucion:
# -> Sorteos tec
# -> Tecmilenio
# -> Tecnologico de monterrey
# -> TecSalud
inst2 <- data.frame(table(d2$institucion), data.frame(prop.table(table(d2$institucion)))$Freq*100)
colnames(inst2) <- c("Instituciรณn","Casos", "prop")
inst2 <- inst2 %>%
arrange(desc(Instituciรณn)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
ggplot(inst2, aes(x="", y=prop, fill=Instituciรณn)) +
geom_bar(stat="identity") +
coord_polar("y", start=0) +
geom_text(aes(y = ypos, label = paste(format(round(prop, 1), nsmall = 1), "%", sep = "")), color = "black", size=6) +
theme_void() +
ggtitle("Porcentaje de casos Covid-19 por instituciรณn") +
guides(fill=guide_legend(title="Instituciรณn")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=20)) +
scale_fill_brewer()
#Piechart por Tipo de colaborador
#tipo de colaborador:
# -> Academico
# -> Apoyo
# -> Apoyo Academico
# -> Clinico
# -> Operativo
t_colab <- data.frame(table(d2$tipo), data.frame(prop.table(table(d2$tipo)))$Freq*100)
colnames(t_colab) <- c("Tipo", "Casos", "prop")
ggplot(data=t_colab, aes(x=Tipo, y=Casos, fill=Tipo)) +
geom_bar(stat="identity", width=0.7, color="white") + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 por tipo de colaborador") +
xlab("Colaborador") + ylab("Nรบmero de casos") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#BarPlot por acumulacion de fallecimiento por fecha
#NOTA: solo toma los que tienen fecha
fallecimiento <- data.frame(table(d2$fechaFallecimiento))
fallecimiento$Acum <- rep(0, times = length(fallecimiento$Var1))
colnames(fallecimiento) <- c("Fecha", "Casos", "Acum")
acum <- 0
for(i in 1:length(fallecimiento$Fecha)){
acum <- fallecimiento$Casos[i] + acum
fallecimiento$Acum[i] <- acum
}
ggplot(data=fallecimiento, aes(x=Fecha, y=Acum, fill=Fecha)) +
geom_bar(stat="identity", width=0.7, color="white", fill = getPalette(length(fallecimiento$Fecha))) +
geom_text(aes(label = Acum), vjust = -0.2, size=5) +
ggtitle("Nรบmero de fallecimientos Covid-19 por fecha") +
xlab("Fecha") + ylab("Acumulaciรณn de casos") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15), axis.text.x = element_text(angle=75, hjust=1)) + theme(legend.position="none")
#BarPlot rango de edad
#rango de edad:
# -> 0-19
# -> 20-29
# -> 30-39
# -> 40-49
# -> 50-59
# -> 60-69
# -> 70-79
r_edad <- data.frame(table(d2$rangoedad))
colnames(r_edad) <- c("Rango","Casos")
ggplot(data=r_edad, aes(x=Rango, y=Casos, fill=Rango)) +
geom_bar(stat="identity", width=0.7, color="white") + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.05, size=5) +
xlab("Rango de edad") + ylab("Nรบmero de casos") +
ggtitle("Nรบmero de casos Covid-19 por rango de edad") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#Piechart por genero
#rango de edad:
# -> Femenino
# -> Masculino
genero <- data.frame(table(d2$genero), data.frame(prop.table(table(d2$genero)))$Freq*100)
colnames(genero) <- c("Genero", "Casos", "prop")
genero <- genero %>%
arrange(desc(Genero)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
ggplot(genero, aes(x="", y=prop, fill=Genero)) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
geom_text(aes(y = ypos, label = paste(format(round(prop, 1), nsmall = 1), "%", sep = "")), color = "black", size=10) +
theme_void() +
ggtitle("Porcentaje de casos Covid-19 por gรฉnero") +
guides(fill=guide_legend(title="Gรฉnero")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#BarPlot por diagnostico
#rango de edad:
# -> Ambulatorio
# -> Hospitalizado
diag <- data.frame(table(d2$diagnostico))
colnames(diag) <- c("Diagnostico", "Casos")
ggplot(data=diag, aes(x=Diagnostico, y=Casos, fill=Diagnostico)) +
geom_bar(stat="identity", width=0.7, color="white") +
geom_text(aes(label = Casos), vjust = -0.2, size=5) +
xlab("Diagnรณstico") + ylab("Nรบmero de casos") +
ggtitle("Nรบmero de casos Covid-19 por diagnรณstico") +
guides(fill=guide_legend(title="Diagnรณstico")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#Piechart por alta medica
#alta mรฉdica:
# -> No
# -> Sรญ
alta <- data.frame(table(d2$alta), data.frame(prop.table(table(d2$alta)))$Freq*100)
colnames(alta) <- c("Alta_Mรฉdica", "Casos", "prop")
alta <- alta %>%
arrange(desc(Alta_Mรฉdica)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
alta2 <- data.frame(
Alta_Mรฉdica <- c("No", "Sรญ"),
Freq <- c(50, 526),
prop <- c(8.68, 91.31)
)
colnames(alta2) <- c("Alta_Mรฉdica", "Casos", "prop")
alta2 <- alta2 %>%
arrange(desc(Alta_Mรฉdica)) %>%
mutate(ypos = cumsum(prop) - 0.5*prop)
ggplot(alta, aes(x="", y=prop, fill=Alta_Mรฉdica )) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
geom_text(aes(y = ypos, label = paste(format(round(prop, 1), nsmall = 1), "%", sep = "")), color = "black", size=10) +
theme_void() +
guides(fill=guide_legend(title="Alta mรฉdica")) +
ggtitle("Porcentaje de casos Covid-19 por alta mรฉdica") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer(direction = -1)
#BarPlot por semana de contagio
#semana de contagio:
# -> Semanda y cantidad de contagios en la semana
d2$semanaContagio <- substr(d2$semanaContagio,1,7)
for(i in 1:length(d2$semanaContagio)){
num = ifelse(d2$semanaContagio[i] == "5ta dic" && substr(d2$inicio[i],6,7) == "01", toString(as.numeric(substr(d2$inicio[i],1,4))-1), substr(d2$inicio[i],1,4))
d2$semanaContagio[i] <- paste(d2$semanaContagio[i], num);
}
#d2$semanaContagio <- ifelse(d2$semanaContagio == "5ta dic" && substr(d2$inicio,6,7) == "01", paste(d2$semanaContagio,toString(as.numeric(substr(d2$inicio,1,4))-1)), paste(d2$semanaContagio, substr(d2$inicio,1,4)))
semana_num <- c("1ra", "2da", "3ra", "4ta", "5ta") # numero de semanas
meses <- c("ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sep", "oct", "nov", "dic") # meses
semana <- data.frame(table(d2$semanaContagio))
colnames(semana) <- c("Semana", "Casos")
semana$semana_num <- substr(semana$Semana,1,3)
semana$mes <- str_trim(substring(semana$Semana,4,7))
semana$year <- substring(semana$Semana,8)
semana$year <- factor(semana$year)
semana$semana_num <- factor(semana$semana_num, levels = semana_num)
semana$mes <- factor(semana$mes, levels = meses)
semana <- data.frame(semana[order(semana$year,semana$mes,semana$semana_num),])
semana$Semana <- factor(semana$Semana, levels = rev(semana$Semana))
#Generar csv con datos historicos de casos acumulados por semana de inicio de sรญntomas
semanaP <- data.frame(
Semana <- semana$Semana,
Casos <- semana$Casos
)
colnames(semanaP) <- c("Semana de Contagio", "Casos")
#write.csv(semanaP, file = "historicos.csv")
ggplot(data=semana, aes(x=Casos, y=Semana, fill=Semana)) +
geom_bar(stat="identity", width=0.7, color="white", fill = getPalette(length(semana$Semana))) +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
xlab("Nรบmero de casos") + ylab("Semana") +
ggtitle("Nรบmero de casos Covid-19 por semana de contagio") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none")
#BarPlot por semana de contagio por tipo de colaborados
#semana de contagio:
# -> Semanda y cantidad de contagios en la semana
# -> Tipo de colaborador
semana2 <- data.frame(table(d2$semanaContagio, d2$tipo))
colnames(semana2) <- c("Semana", "Colaborador", "Casos")
semana2$semana_num <- substr(semana2$Semana,1,3)
semana2$mes <- str_trim(substring(semana2$Semana,5,7))
semana2$year <- substring(semana2$Semana,8)
semana2$semana_num <- factor(semana2$semana_num, levels = semana_num)
semana2$mes <- factor(semana2$mes, levels = meses)
semana2$year <- factor(semana2$year)
semana2 <- data.frame(semana2[order(semana2$year,semana2$mes, semana2$semana_num),])
semana2$Semana <- factor(semana2$Semana, levels = unique(rev(semana2$Semana)))
ggplot(data = semana2, aes(x = Casos, y = Semana, fill = Colaborador)) +
geom_bar(stat = "identity", width=0.7, color="white") +
xlab("Nรบmero de casos") + ylab("Semana") +
ggtitle("Nรบmero de casos Covid-19 por semana de contagio contra tipo de colaborador") +
geom_text(aes(label = stat(x), group = Semana), stat = 'summary', fun = sum, hjust = -0.2, size=5) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
scale_fill_brewer()
#LinePlot por contagios acumulados
# -> lee una pestaรฑa de excel que cuenta con:
# -> Fecha (dia)
# -> Numero de contagios en esa fecha
# -> Acumlado hasta la fecha
# -> Acumulador por cada dรญa
casos <- as.data.frame(read_excel("Colaboradores Covid positivos 13_01_2021.xlsm", sheet = "Contagios"))
colnames(casos) <- c("Fecha", "Contagios", "Acumulados")
casosTotalAcum <- casos$Acumulados[length(casos$Acumulados)]
ggplot(casos, aes(x=as.Date(Fecha, origin="1899-12-30"), y=Acumulados)) +
geom_area( fill="#1769a8", alpha=0.5) +
geom_line(color="#184363", size=2) +
geom_point(size=3, color="#184363") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
xlab("Fecha") + ylab("Coantagios acumulados") +
ggtitle("Nรบmero de casos Covid-19 acumulados") +
coord_cartesian(ylim = c(-10, casosTotalAcum + 50)) +
scale_color_gradient()
#BarPlot por contagios diarios con lรญnea de tendencia
ggplot(data=casos, aes(x=as.Date(Fecha, origin="1899-12-30"), y=Contagios, fill=Contagios)) +
geom_bar(stat = "identity", position="identity") +
ggtitle("Nรบmero de casos Covid-19 por fecha de inicio de sรญntomas") +
xlab("Fecha de inicio de sรญntomas") + ylab("Nรบmero de casos") +
scale_x_date(labels=date_format("%b %d"), breaks=date_breaks("2 weeks")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + stat_smooth(colour="#1769a8") +
scale_color_gradient()
#BarPlot por estado
# -> Contagios por estado
estado <- data.frame(sort(table(d2$estado), decreasing = FALSE))
colnames(estado) <- c("Estado", "Casos")
ggplot(data=estado, aes(x=Estado, y=Casos, fill=Estado)) +
geom_bar(stat="identity", width=0.7, color="white", fill = getPalette(length(estado$Estado))) + coord_flip() +
geom_text(aes(label = Casos), hjust = -0.2) +
xlab("Estado") + ylab("Nรบmero de casos") +
scale_fill_viridis_d() +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
ggtitle("Nรบmero de casos Covid-19 por estado")
#<-------------------------Cruzadas------------------------------->
#BarPlot tipo de empleado contra el campus
# -> Cruza el numero de colaboradores de cada tipo por campus
emp_camp2 <- data.frame(table(d2$tipo, d2$campus))
colnames(emp_camp2) <- c("Empleado", "Campus", "Casos")
campuss <- data.frame(table(d2$campus))
campuss <- campuss[campuss$Freq>5,]
emp_camp2 <- emp_camp2[emp_camp2$Campus %in% campuss$Var1,]
ggplot(emp_camp2, aes(fill=Empleado, y=Campus, x=Casos, label = Casos)) +
geom_bar(position="stack", stat="identity") +
xlab("Nรบmero de casos") + ylab("Campus") +
geom_text(data=subset(emp_camp2, Casos>0), size = 5, position = position_stack(vjust = 0.5), check_overlap = FALSE, colour="black", fontface = "bold") +
scale_fill_viridis_d(begin = 0.1, end = 0.9) +
guides(fill=guide_legend(title="Colaborador")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
ggtitle("Nรบmero de casos Covid-19 por tipo de colaborador contra campus (total mayor que 5)") +
scale_fill_brewer(direction = -1)
#BarPlot rango de edad contra campus
# -> Cruza el numero de colaboradores de cada rango de edad por campus
edad_camp2 <- data.frame(table(d2$rangoedad, d2$campus))
colnames(edad_camp2) <- c("Rango_Edad", "Campus", "Casos")
campus <- data.frame(table(d2$campus))
campus <- campus[campus$Freq>5,]
edad_camp2 <- edad_camp2[edad_camp2$Campus %in% campus$Var1,]
edad_camp2$Colors = ifelse(edad_camp2$Rango_Edad == "0-19", "white", "black")
ggplot(edad_camp2, aes(fill=Rango_Edad, y=Campus, x=Casos, label = Casos)) +
geom_bar(position="stack", stat="identity") +
xlab("Nรบmero de casos") + ylab("Campus") +
geom_text(data=subset(edad_camp2, Casos>0), size = 5, position = position_stack(vjust = 0.5), check_overlap = FALSE, colour="black", fontface = "bold") +
scale_fill_viridis_d(begin = 0.1, end = 0.9) +
guides(fill=guide_legend(title="Rango de edad")) +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
ggtitle("Nรบmero de casos Covid-19 por rango de edad contra campus (total mayor que 5)") +
scale_fill_brewer(direction = -1)
#BarPlot institucion contra tipo de colaborador
# -> Cruza el numero de colaboradores de cada tipo por institucion
inst_colab <- data.frame(prop.table(table(d2$tipo, d2$institucion),2))
colnames(inst_colab) <- c("Colaborador", "Institucion", "Casos")
ggplot(inst_colab, aes(fill=Colaborador, y=Institucion, x=Casos)) +
geom_bar(position="stack", stat="identity") +
scale_fill_viridis_d() +
xlab("Porcentaje de casos") + ylab("Instituciรณn") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) +
ggtitle("Porcentaje de casos Covid-19 por instituciรณn contra tipo de colaborador") +
scale_fill_brewer()
#<-------------------------graficas filtradas------------------------------->
#BarPlot hospitalizados por instituciรณn
# -> Numero de colaboradores hospitalizados por cada institucion
inst_hos <- data.frame(table(d2$institucion[d2$diagnostico == "Hospitalizado"]))
colnames(inst_hos) <- c("Institucion", "Casos")
ggplot(inst_hos, aes(fill=Institucion, y=Casos, x=Institucion)) +
geom_bar(position="stack", stat="identity", fill = getPalette(length(inst_hos$Institucion))) + coord_flip() +
xlab("Instituciรณn") + ylab("Nรบmero de casos hospitalizados") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 hospitalizados por instituciรณn")
#BarPlot hospitalizados por campus
# -> Numero de colaboradores hospitalizados por cada campus
campus_hos <- data.frame(table(d2$campus[d2$diagnostico == "Hospitalizado"]))
colnames(campus_hos) <- c("Campus", "Casos")
ggplot(campus_hos, aes(fill=Campus, y=Casos, x=Campus)) +
geom_bar(position="stack", stat="identity", fill = getPalette(length(campus_hos$Campus))) + coord_flip() +
xlab("Campus") + ylab("Nรบmero de casos hospitalizados") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=15)) + theme(legend.position="none") +
geom_text(aes(label = Casos), hjust = -0.2, size=5) +
ggtitle("Nรบmero de casos Covid-19 hospitalizados por campus")
#Bar Plot dato generales (casos totales, %de casos hospitalizados, diferencia de con semana anterior, positividad general)
data <- data.frame(read_excel("Tasas_y_Poblacion_Tec_16072020.xlsx", sheet = "Poblaciรณn Tec"))
#BarPlot de porcentage de colaboradores hospitalizados, fallecidos y contagiados con respecto a la poblacion total de colabadores
general_data2 <- data.frame(
Dato = c("Porcentaje de hospitalizaciรณn", "Porcentaje de fallecimiento", "Porcentaje de contagios (total)"),
Casos = c(dim(data.frame(d2[d2$diagnostico == "Hospitalizado",]))[1]/dim(d2)[1]*100, dim(d2[!is.na(d2$fechaFallecimiento),])[1]/dim(d2)[1]*100, dim(d2)[1]/sum(data$TOTAL.Colaboradores)*100),
stringsAsFactors = FALSE
)
ggplot(general_data2, aes(fill=Dato, y=Casos, x=Dato)) +
geom_bar(position="stack", stat="identity", fill = getPalette(3)) +
ylab("") + xlab("") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = paste(round(Casos,2), "%", sep="")), vjust = -0.2, size=8) +
ggtitle("Porcentajes de datos generales Covid-19")
#BarPlot semanas con menor y mayor cantidad de contagios, semana anterior, semana actual y la diferencia entre las รบltimas dos
general_data3 <- data.frame(
Semana = c("Semana min", "Semana max", "Semana anterior", "Semana actual", "Diferencia"),
Casos = c(min(semana$Casos), max(semana$Casos), semana$Casos[dim(semana)[1]-1], semana$Casos[dim(semana)[1]], semana$Casos[dim(semana)[1]]-semana$Casos[dim(semana)[1]-1]),
stringsAsFactors = FALSE
)
semanas <- c("Semana min", "Semana max", "Semana anterior", "Semana actual", "Diferencia")
general_data3$Semana <- factor(general_data3$Semana, levels = semanas)
ggplot(general_data3, aes(fill=Semana, y=Casos, x=Semana)) +
geom_bar(position="stack", stat="identity", fill = getPalette(5)) +
ylab("") + xlab("") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = Casos), vjust = ifelse(general_data3$Semana == "Diferencia", -5.2, -0.2), size=8) +
ggtitle("Datos respecto a contagios semanales Covid-19")
#BarPlot de casos totales en el campus e instituciones con mayor cantidad de casos, asi como los casos totales
max_campus <- data.frame(table(d2$campus))
max_campus <- max_campus[order(max_campus$Freq),]
max_inst <- data.frame(table(d2$institucion))
max_inst <- max_inst[order(max_inst$Freq),]
general_data4 <- data.frame(
Dato = c(paste("Campus max (", max_campus$Var1[dim(max_campus)][1], ")", sep=""), paste("Instituciรณn max (", max_inst$Var1[dim(max_inst)][1], ")", sep=""), "Casos totales"),
Casos = c(max_campus$Freq[dim(max_campus)][1], max_inst$Freq[dim(max_inst)][1], dim(d2)[1]),
stringsAsFactors = FALSE
)
data4_order <- c(paste("Campus max (", max_campus$Var1[dim(max_campus)][1], ")", sep=""), paste("Instituciรณn max (", max_inst$Var1[dim(max_inst)][1], ")", sep=""), "Casos totales")
general_data4$Dato <- factor(general_data4$Dato, data4_order)
ggplot(general_data4, aes(fill=Dato, y=Casos, x=Dato)) +
geom_bar(position="stack", stat="identity", fill = getPalette(3)) +
ylab("") + xlab("") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = Casos), vjust = -0.2, size=8) +
ggtitle("Datos de casos totales Covid-19")
#<-------------------------graficas porcentuales------------------------------->
#Barplot de tasa de contagio por cada campus con respecto a su poblacion total de colaboradores
campus_comp <- d2[d2$campus %in% data$Campus,]
campus_comp <- data.frame(table(campus_comp$campus))
campus_totales <- data[data$Campus %in% campus_comp$Var1,]
campus_totales <- data.frame(campus_totales$Campus, campus_totales$TOTAL.Colaboradores)
campus_totales <- campus_totales[order(campus_totales$campus_totales.Campus),]
campus_comp$total <- campus_totales$campus_totales.TOTAL.Colaboradores
colnames(campus_comp) <- c("Campus", "Casos", "Total")
colourCount = length(campus_comp$Campus)
getPalette = colorRampPalette(brewer.pal(9, "Blues"))
ggplot(campus_comp, aes(fill=Campus, y=(Casos/Total*100), x=Campus)) +
geom_bar(position="stack", stat="identity", fill = getPalette(colourCount)) +
ylab("") + xlab("") + coord_flip() +
xlab("Campus") + ylab("Porcentaje de tasa de contagio") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 18)) +
theme(text = element_text(size=17)) + theme(legend.position="none") +
geom_text(aes(label = paste(round(Casos/Total*100,2), "%", sep = "")), hjust = -0.05, size=5) +
ggtitle("Tasa de contagio por campus Covid-19")
#<-------------------------Tabla csv--------------------------------->
#Genera table con los casos de cada tipo de colaborador por campus
camp_colab <- table(d2$campus, d2$tipo)
#filter table
write.csv(camp_colab, file = "campus_contra_colaborador.csv")
|
#' this function takes a data.frame column with proportions (possibly from prop.table) and returns a formatted column with percentages, including the percent sign
#'
#' @param accepts a data.frame column or a vector, whether to include the percent symbol and whether there should be a space between the number and the percent symbol
#' @return returns a formatted string column or a string vector with the percent sign
#' @export
procentuj <- function(column, percent_symbol=TRUE, round_to=2, sep=" "){
return(paste(round(column, round_to),c("%"), sep=sep))
} | /R/procentuj.R | no_license | jaropis/shiny-tools | R | false | false | 563 | r | #' this function takes a data.frame column with proportions (possibly from prop.table) and returns a formatted column with percentages, including the percent sign
#'
#' @param accepts a data.frame column or a vector, whether to include the percent symbol and whether there should be a space between the number and the percent symbol
#' @return returns a formatted string column or a string vector with the percent sign
#' @export
procentuj <- function(column, percent_symbol=TRUE, round_to=2, sep=" "){
return(paste(round(column, round_to),c("%"), sep=sep))
} |
# find target lake in distribution
WBIC <- '1835300'
plot.fig9.GCB <- function(years,corrs){
cex.box = 1
cex.ttl = 1
tck <- 0.02
plt.rng.x <- c(1978.8,2011.2)
plt.rng.y <- data.frame("onset"=c(83,154),"july"=c(18.5,28.5))
tick.x <- seq(1960,2020,5)
tick.y <- data.frame("onset"=c(NA, 80,100,120,140,160,NA),"july"=c(18,20,22,24,26,28,30))
plot_colors <<- c("grey30", "grey80","black","firebrick")
lab.perc <<- 12 # label (e.g., (a) spacing percent from corner)
par.mgp <<- data.frame(x=c(1.2,.1,0),y=c(1.3,.1,0),b=c(.9,.1,0))
plot.order <<- c("medium","small","large")
line.wd <<- 2 # plot line width
font.size = 10
fig.w <- 3.14961
pan.h <<- 1.1
v.spc <- 0.05 # inches of vertical space to separate panels (top only)
h.spc <- 0.05 # inches of horizontal space to separate panels (right only)
l.mar <- 0.0
r.mar <- 0.0
t.mar <- 0.00
b.mar <- 0.1
left.spc <- 0.3 #0.1
pan.w <<- (fig.w-left.spc*2-h.spc*2)/3
pan.w <<- fig.w-left.spc-h.spc
fig.h <- pan.h*2+v.spc*2+b.mar+t.mar
pdf(file = "../Figure_07.pdf",title='Read et al. figure 07',
width=fig.w, height=fig.h)
divs <- 9
panels = NULL
for (j in 1:2){
for (i in 1:divs){
panels <- rbind(panels,c(j,j,j,j+2))
}
}
panels[divs+1,4]=3
panels <- rbind(panels,c(j,j,j,j+2))# last one!
panels <- rbind(panels,c(j,j,j,j+2))# last one!
panels
layout(panels)
par(mai=c(0,left.spc, v.spc, 0),mgp=par.mgp$x,omi=c(0,l.mar,t.mar,r.mar),ps=font.size)
if (missing(corrs)){
corr.july <- plot.july(years=seq(1979,2011,1),col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=T)
} else {
corr.july= corrs$corr.july
corr.onset = corrs$corr.onset
plot.july(years=seq(1979,2011,1),col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F)
}
par(mai=c(fig.h/(dim(panels)[1])*2,left.spc, v.spc, 0))
if (missing(corrs)){
corr.onset <- plot.onset(years=years,col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=T)
} else {
plot.onset(years=years,col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F)
}
par(mgp=par.mgp$b,mai=c(fig.h/(dim(panels)[1])*2-v.spc,left.spc*.9, v.spc, h.spc))
plot.box(data.frame(data=corr.july),tck,cex.box,cex.ttl,xlab='July temp.',lab='(b)')
par(mgp=par.mgp$b,mai=c(fig.h/(dim(panels)[1])*2,left.spc*.9, 0, h.spc))
plot.box(data.frame(data=corr.onset),tck,cex.box,cex.ttl,xlab='Strat. onset',lab='(d)')
dev.off()
return(list(corr.july=corr.july,corr.onset=corr.onset))
}
plot.box <- function(box.data,tck,cex.box,cex.ttl,xlab,lab){
t.crit <- 0.42 # ONE-tailed critical at 0.01 (testing positive correlation)
ylabel = expression(paste("Coherence (",italic(rho),")"))
boxplot(box.data,ylab=ylabel, axes=F,
ylim=c(0,1),xlim=c(0,2),
outline=F,width=.45,
range=1,
cex.lab=cex.ttl)#xlab=names(box.data))
tck = tck*2
par(mgp=c(0,.1,0))
title(xlab=xlab,cex.lab=cex.ttl)
axis(1,las=1, at=c(-100,100),cex.axis=cex.box, tck=1e-9,labels=NA)
axis(3,las=1, at=c(-100,100),cex.axis=cex.box, tck=1e-9,labels=NA)
axis(2,las=1, at=seq(-1,2,.2),cex.axis=cex.box, tck=tck)
axis(4,las=1, at=seq(-1,2,.2),cex.axis=cex.box, tck=tck,labels=NA)
abline(h=t.crit,lty="1342",lwd=0.65)# ONE-tailed critical at 0.01 (testing positive correlation)
cat(sum(box.data<0.42)); cat(' of '); cat(length(box.data[[1]])); cat(' below t.crit\n')
label.loc <- get.text.location(par(),h=1,w=.45)
text(label.loc[1],label.loc[2],lab)
}
plot.july <- function(years,col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F){
#source('Libraries/GLM.functions.R')
target = '1835300'
#par(mgp=c(.9,.06,0))
plot(c(0,1),c(0,1), type="l", col=NA,
axes=F,
ylim=plt.rng.y$july, xlim=plt.rng.x,
ylab="July temperature (ยฐC)",
xlab=NA,
xaxs="i", yaxs="i",cex.lab=cex.ttl)
sens.table <- read.delim("../supporting files/omg.huge.output.tsv",sep='\t',header=T)
names(sens.table)
x.vals = years
y.vals = years*NA
y.vals.1 = y.vals
y.vals.2 = y.vals
target.lake = y.vals
#other.lakes
for (i in 1:length(x.vals)){
use.i = sens.table$year==years[i]
use.lk = sens.table$year==years[i] & sens.table$lakeid==target
y.vals[i] <- median(sens.table$mean_surf_jul[use.i],na.rm=T)
y.vals.1[i] <- quantile(x=sens.table$mean_surf_jul[use.i],probs=c(.25,.75),na.rm=T)[[1]]
y.vals.2[i] <- quantile(x=sens.table$mean_surf_jul[use.i],probs=c(.25,.75),na.rm=T)[[2]]
target.lake[i] <- sens.table$mean_surf_jul[use.lk]
}
if (corr){
other.lakes = unique(sens.table$lakeid[sens.table$lakeid!=target])
corr = vector(length=length(other.lakes))
for (i in 1:length(other.lakes)){
lke = other.lakes[i]
vals.comp = vector(length=length(years))
for (j in 1:length(x.vals)){
use.lk = sens.table$year==years[j] & sens.table$lakeid==lke
if (any(use.lk)){
vals.comp[j] = sens.table$mean_surf_jul[use.lk]
} else {
vals.comp[j] = NA
}
}
print(i)
corr[i] = cor.test(y=vals.comp,x=target.lake, method = "pearson")$estimate[[1]]#summary(lm(vals.comp~target.lake))$r.squared
}
} else {
corr= NA
}
polygon(x=c(x.vals,rev(x.vals)), y=c(y.vals.1,rev(y.vals.2)),
col = plot_colors[2],border=NA)
#lines(x.vals,y.vals.1,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
#lines(x.vals,y.vals.2,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
lines(x.vals,y.vals,col=plot_colors[1],type='l',lwd=1.2,lty="longdash")
lines(x.vals,target.lake,col=plot_colors[3],type='l',lwd=line.wd)
label.loc <- get.text.location(par(),h=1,w=2)
text(label.loc[1],label.loc[2],'(a)')
par(mgp=par.mgp$y)
axis(1,las=1, at=tick.x,cex.axis=cex.box, tck=tck,labels=NA)
axis(3,at=tick.x,las=1, cex.axis=cex.box, tck=tck,labels=NA)
par(mgp=par.mgp$x)
axis(2,las=1, at=tick.y$july,cex.axis=cex.box, tck=tck)
axis(4,at=tick.y$july,las=1, cex.axis=cex.box, tck=tck,labels=NA)
return(corr)
}
plot.onset <- function(years=seq(1979,1988,1),col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F){
#source('Libraries/GLM.functions.R')
target = '1835300'
#par(mgp=c(.9,.06,0))
plot(c(0,1),c(0,1), type="l", col=NA,
axes=F,
ylim=plt.rng.y$onset, xlim=plt.rng.x,
ylab="Stratification onset (DoY)",
xlab=NA,
xaxs="i", yaxs="i",cex.lab=cex.ttl)
x.vals = years
y.vals = years*NA
y.vals.1 = y.vals
y.vals.2 = y.vals
target.lake = y.vals
# build first year
file.name = '../supporting files/strat.onset1979.tsv'
sens.table <- read.delim(file.name,sep='\t',header=T)
other.lakes <- sens.table$WBIC[sens.table$WBIC!=target & !is.na(sens.table$strat.onset.DoY)]
other.vals <- matrix(nrow=length(x.vals),ncol=length(other.lakes))
for (i in 1:length(x.vals)){
file.name = paste('../supporting files/strat.onset',years[i],'.tsv',sep='')
sens.table <- read.delim(file.name,sep='\t',header=T)
use.lk = sens.table$WBIC==target
y.vals[i] <- median(sens.table$strat.onset.DoY,na.rm=T)
y.vals.1[i] <- quantile(x=sens.table$strat.onset.DoY,probs=c(.25,.75),na.rm=T)[[1]]
y.vals.2[i] <- quantile(x=sens.table$strat.onset.DoY,probs=c(.25,.75),na.rm=T)[[2]]
target.lake[i] <- sens.table$strat.onset.DoY[use.lk]
if (corr){
for (j in 1:length(other.lakes)){
use.i = sens.table$WBIC==other.lakes[j]
if (any(use.i)){
other.vals[i,j] <- sens.table$strat.onset.DoY[use.i]
} else {
other.vals[i,j] = NA
}
}
}
}
if (corr){
corr = vector(length=length(other.lakes))
for (i in 1:length(other.lakes)){
print(i)
corr[i] = cor.test(y=other.vals[,i],x=target.lake, method = "pearson")$estimate[[1]]#summary(lm(other.vals[,i]~target.lake))$r.squared
}
} else {
corr= NA
}
polygon(x=c(x.vals,rev(x.vals)), y=c(y.vals.1,rev(y.vals.2)),
col = plot_colors[2],border=NA)
#lines(x.vals,y.vals.1,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
#lines(x.vals,y.vals.2,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
lines(x.vals,y.vals,col=plot_colors[1],type='l',lwd=1.2,lty="longdash")
lines(x.vals,target.lake,col=plot_colors[3],type='l',lwd=line.wd)
label.loc <- get.text.location(par(),h=1,w=2)
text(label.loc[1],label.loc[2],'(c)')
par(mgp=par.mgp$y)
axis(1,las=1, at=tick.x,cex.axis=cex.box, tck=tck)
axis(3,at=tick.x,las=1, cex.axis=cex.box, tck=tck,labels=NA)
par(mgp=par.mgp$x)
axis(2,las=1, at=tick.y$onset,cex.axis=cex.box, tck=tck)
axis(4,at=tick.y$onset,las=1, cex.axis=cex.box, tck=tck,labels=NA)
return(corr)
}
get.text.location <- function(par,perc=9,h=1,w=1){
x.lim <- par$usr[1:2] # limits
y.lim <- par$usr[3:4]
# upper right hand
y.range <- y.lim[2]-y.lim[1]
x.range <- x.lim[2]-x.lim[1]
y <- y.lim[2]-y.range*perc/100
x <- x.lim[1]+x.range*perc/100*(h/w)
return(c(x,y))
}
corrs <- plot.fig9.GCB(years=seq(1979,2011,1),corrs=corrs) | /demo/pub_code/plot.fig9.GCB.R | permissive | USGS-R/mda.lakes | R | false | false | 9,196 | r | # find target lake in distribution
WBIC <- '1835300'
plot.fig9.GCB <- function(years,corrs){
cex.box = 1
cex.ttl = 1
tck <- 0.02
plt.rng.x <- c(1978.8,2011.2)
plt.rng.y <- data.frame("onset"=c(83,154),"july"=c(18.5,28.5))
tick.x <- seq(1960,2020,5)
tick.y <- data.frame("onset"=c(NA, 80,100,120,140,160,NA),"july"=c(18,20,22,24,26,28,30))
plot_colors <<- c("grey30", "grey80","black","firebrick")
lab.perc <<- 12 # label (e.g., (a) spacing percent from corner)
par.mgp <<- data.frame(x=c(1.2,.1,0),y=c(1.3,.1,0),b=c(.9,.1,0))
plot.order <<- c("medium","small","large")
line.wd <<- 2 # plot line width
font.size = 10
fig.w <- 3.14961
pan.h <<- 1.1
v.spc <- 0.05 # inches of vertical space to separate panels (top only)
h.spc <- 0.05 # inches of horizontal space to separate panels (right only)
l.mar <- 0.0
r.mar <- 0.0
t.mar <- 0.00
b.mar <- 0.1
left.spc <- 0.3 #0.1
pan.w <<- (fig.w-left.spc*2-h.spc*2)/3
pan.w <<- fig.w-left.spc-h.spc
fig.h <- pan.h*2+v.spc*2+b.mar+t.mar
pdf(file = "../Figure_07.pdf",title='Read et al. figure 07',
width=fig.w, height=fig.h)
divs <- 9
panels = NULL
for (j in 1:2){
for (i in 1:divs){
panels <- rbind(panels,c(j,j,j,j+2))
}
}
panels[divs+1,4]=3
panels <- rbind(panels,c(j,j,j,j+2))# last one!
panels <- rbind(panels,c(j,j,j,j+2))# last one!
panels
layout(panels)
par(mai=c(0,left.spc, v.spc, 0),mgp=par.mgp$x,omi=c(0,l.mar,t.mar,r.mar),ps=font.size)
if (missing(corrs)){
corr.july <- plot.july(years=seq(1979,2011,1),col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=T)
} else {
corr.july= corrs$corr.july
corr.onset = corrs$corr.onset
plot.july(years=seq(1979,2011,1),col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F)
}
par(mai=c(fig.h/(dim(panels)[1])*2,left.spc, v.spc, 0))
if (missing(corrs)){
corr.onset <- plot.onset(years=years,col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=T)
} else {
plot.onset(years=years,col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F)
}
par(mgp=par.mgp$b,mai=c(fig.h/(dim(panels)[1])*2-v.spc,left.spc*.9, v.spc, h.spc))
plot.box(data.frame(data=corr.july),tck,cex.box,cex.ttl,xlab='July temp.',lab='(b)')
par(mgp=par.mgp$b,mai=c(fig.h/(dim(panels)[1])*2,left.spc*.9, 0, h.spc))
plot.box(data.frame(data=corr.onset),tck,cex.box,cex.ttl,xlab='Strat. onset',lab='(d)')
dev.off()
return(list(corr.july=corr.july,corr.onset=corr.onset))
}
plot.box <- function(box.data,tck,cex.box,cex.ttl,xlab,lab){
t.crit <- 0.42 # ONE-tailed critical at 0.01 (testing positive correlation)
ylabel = expression(paste("Coherence (",italic(rho),")"))
boxplot(box.data,ylab=ylabel, axes=F,
ylim=c(0,1),xlim=c(0,2),
outline=F,width=.45,
range=1,
cex.lab=cex.ttl)#xlab=names(box.data))
tck = tck*2
par(mgp=c(0,.1,0))
title(xlab=xlab,cex.lab=cex.ttl)
axis(1,las=1, at=c(-100,100),cex.axis=cex.box, tck=1e-9,labels=NA)
axis(3,las=1, at=c(-100,100),cex.axis=cex.box, tck=1e-9,labels=NA)
axis(2,las=1, at=seq(-1,2,.2),cex.axis=cex.box, tck=tck)
axis(4,las=1, at=seq(-1,2,.2),cex.axis=cex.box, tck=tck,labels=NA)
abline(h=t.crit,lty="1342",lwd=0.65)# ONE-tailed critical at 0.01 (testing positive correlation)
cat(sum(box.data<0.42)); cat(' of '); cat(length(box.data[[1]])); cat(' below t.crit\n')
label.loc <- get.text.location(par(),h=1,w=.45)
text(label.loc[1],label.loc[2],lab)
}
plot.july <- function(years,col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F){
#source('Libraries/GLM.functions.R')
target = '1835300'
#par(mgp=c(.9,.06,0))
plot(c(0,1),c(0,1), type="l", col=NA,
axes=F,
ylim=plt.rng.y$july, xlim=plt.rng.x,
ylab="July temperature (ยฐC)",
xlab=NA,
xaxs="i", yaxs="i",cex.lab=cex.ttl)
sens.table <- read.delim("../supporting files/omg.huge.output.tsv",sep='\t',header=T)
names(sens.table)
x.vals = years
y.vals = years*NA
y.vals.1 = y.vals
y.vals.2 = y.vals
target.lake = y.vals
#other.lakes
for (i in 1:length(x.vals)){
use.i = sens.table$year==years[i]
use.lk = sens.table$year==years[i] & sens.table$lakeid==target
y.vals[i] <- median(sens.table$mean_surf_jul[use.i],na.rm=T)
y.vals.1[i] <- quantile(x=sens.table$mean_surf_jul[use.i],probs=c(.25,.75),na.rm=T)[[1]]
y.vals.2[i] <- quantile(x=sens.table$mean_surf_jul[use.i],probs=c(.25,.75),na.rm=T)[[2]]
target.lake[i] <- sens.table$mean_surf_jul[use.lk]
}
if (corr){
other.lakes = unique(sens.table$lakeid[sens.table$lakeid!=target])
corr = vector(length=length(other.lakes))
for (i in 1:length(other.lakes)){
lke = other.lakes[i]
vals.comp = vector(length=length(years))
for (j in 1:length(x.vals)){
use.lk = sens.table$year==years[j] & sens.table$lakeid==lke
if (any(use.lk)){
vals.comp[j] = sens.table$mean_surf_jul[use.lk]
} else {
vals.comp[j] = NA
}
}
print(i)
corr[i] = cor.test(y=vals.comp,x=target.lake, method = "pearson")$estimate[[1]]#summary(lm(vals.comp~target.lake))$r.squared
}
} else {
corr= NA
}
polygon(x=c(x.vals,rev(x.vals)), y=c(y.vals.1,rev(y.vals.2)),
col = plot_colors[2],border=NA)
#lines(x.vals,y.vals.1,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
#lines(x.vals,y.vals.2,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
lines(x.vals,y.vals,col=plot_colors[1],type='l',lwd=1.2,lty="longdash")
lines(x.vals,target.lake,col=plot_colors[3],type='l',lwd=line.wd)
label.loc <- get.text.location(par(),h=1,w=2)
text(label.loc[1],label.loc[2],'(a)')
par(mgp=par.mgp$y)
axis(1,las=1, at=tick.x,cex.axis=cex.box, tck=tck,labels=NA)
axis(3,at=tick.x,las=1, cex.axis=cex.box, tck=tck,labels=NA)
par(mgp=par.mgp$x)
axis(2,las=1, at=tick.y$july,cex.axis=cex.box, tck=tck)
axis(4,at=tick.y$july,las=1, cex.axis=cex.box, tck=tck,labels=NA)
return(corr)
}
plot.onset <- function(years=seq(1979,1988,1),col,plt.rng.x,plt.rng.y,cex.ttl,cex.box,tick.x,tick.y,label,tck,tick.x.lab,corr=F){
#source('Libraries/GLM.functions.R')
target = '1835300'
#par(mgp=c(.9,.06,0))
plot(c(0,1),c(0,1), type="l", col=NA,
axes=F,
ylim=plt.rng.y$onset, xlim=plt.rng.x,
ylab="Stratification onset (DoY)",
xlab=NA,
xaxs="i", yaxs="i",cex.lab=cex.ttl)
x.vals = years
y.vals = years*NA
y.vals.1 = y.vals
y.vals.2 = y.vals
target.lake = y.vals
# build first year
file.name = '../supporting files/strat.onset1979.tsv'
sens.table <- read.delim(file.name,sep='\t',header=T)
other.lakes <- sens.table$WBIC[sens.table$WBIC!=target & !is.na(sens.table$strat.onset.DoY)]
other.vals <- matrix(nrow=length(x.vals),ncol=length(other.lakes))
for (i in 1:length(x.vals)){
file.name = paste('../supporting files/strat.onset',years[i],'.tsv',sep='')
sens.table <- read.delim(file.name,sep='\t',header=T)
use.lk = sens.table$WBIC==target
y.vals[i] <- median(sens.table$strat.onset.DoY,na.rm=T)
y.vals.1[i] <- quantile(x=sens.table$strat.onset.DoY,probs=c(.25,.75),na.rm=T)[[1]]
y.vals.2[i] <- quantile(x=sens.table$strat.onset.DoY,probs=c(.25,.75),na.rm=T)[[2]]
target.lake[i] <- sens.table$strat.onset.DoY[use.lk]
if (corr){
for (j in 1:length(other.lakes)){
use.i = sens.table$WBIC==other.lakes[j]
if (any(use.i)){
other.vals[i,j] <- sens.table$strat.onset.DoY[use.i]
} else {
other.vals[i,j] = NA
}
}
}
}
if (corr){
corr = vector(length=length(other.lakes))
for (i in 1:length(other.lakes)){
print(i)
corr[i] = cor.test(y=other.vals[,i],x=target.lake, method = "pearson")$estimate[[1]]#summary(lm(other.vals[,i]~target.lake))$r.squared
}
} else {
corr= NA
}
polygon(x=c(x.vals,rev(x.vals)), y=c(y.vals.1,rev(y.vals.2)),
col = plot_colors[2],border=NA)
#lines(x.vals,y.vals.1,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
#lines(x.vals,y.vals.2,col=plot_colors[2],type='l',lwd=1.2,lty="dotted")
lines(x.vals,y.vals,col=plot_colors[1],type='l',lwd=1.2,lty="longdash")
lines(x.vals,target.lake,col=plot_colors[3],type='l',lwd=line.wd)
label.loc <- get.text.location(par(),h=1,w=2)
text(label.loc[1],label.loc[2],'(c)')
par(mgp=par.mgp$y)
axis(1,las=1, at=tick.x,cex.axis=cex.box, tck=tck)
axis(3,at=tick.x,las=1, cex.axis=cex.box, tck=tck,labels=NA)
par(mgp=par.mgp$x)
axis(2,las=1, at=tick.y$onset,cex.axis=cex.box, tck=tck)
axis(4,at=tick.y$onset,las=1, cex.axis=cex.box, tck=tck,labels=NA)
return(corr)
}
get.text.location <- function(par,perc=9,h=1,w=1){
x.lim <- par$usr[1:2] # limits
y.lim <- par$usr[3:4]
# upper right hand
y.range <- y.lim[2]-y.lim[1]
x.range <- x.lim[2]-x.lim[1]
y <- y.lim[2]-y.range*perc/100
x <- x.lim[1]+x.range*perc/100*(h/w)
return(c(x,y))
}
corrs <- plot.fig9.GCB(years=seq(1979,2011,1),corrs=corrs) |
### Custom function to calculate protected lakes in each NARS ecoregion
protected_lakes_by_NARS <- function(NHD_pts_lakes_PADUS, NARS_regions){
#NHD_pts_lakes_PADUS: NHD lake centroids, merged with PADUS data
#NARS_regions: NARS ecoregions polygons, same crs as NHD
NARS_names <- NARS_regions@data$WSA9
# data fraome of lake IDs
NHD_pts_lakes_PADUS@data$rowID <- rownames(NHD_pts_lakes_PADUS@data)
rowid_NHD_df <- data.frame(rowID=NHD_pts_lakes_PADUS@data$rowID, COMID=NHD_pts_lakes_PADUS@data$COMID)
# number of lakes per state
# subset points that fall in each state polygon
# sp::over doesn't retain attribute data from points, so create data frame to join those data back later based on rowid
NARS_COMID <- sp::over(NHD_pts_lakes_PADUS, NARS_regions, returnList = F)#warning: slow with large polygons/lots of pts
NARS_COMID$joinID <- rownames(NARS_COMID)
NARS_COMID <- merge(NARS_COMID, rowid_NHD_df, by.x='joinID', by.y='rowID')
# get rid of factor; would cause problems later
NARS_COMID$COMID <- as.numeric(levels(NARS_COMID$COMID))[NARS_COMID$COMID]
# define protected lakes based on % Ws and Cat protected
protected_lakes_gap12_ctr <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAPS12_COMIDs_ctr)
protected_lakes_gap3_ctr <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAP3only_COMIDS_ctr)
protected_lakes_gap12_Cat100 <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAPS12_COMIDs_100)
protected_lakes_gap3_Cat100 <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAP3only_COMIDs_100)
unprotected_lakes <- subset(NHD_pts_lakes_PADUS, COMID %in% unprotected_COMIDs)
# number of protected lakes by ecoregion
NARS_lakes_byNARS_gap12_ctr <- colSums(gContains(NARS_regions, protected_lakes_gap12_ctr, byid = T))
NARS_lakes_byNARS_gap3_ctr <- colSums(gContains(NARS_regions, protected_lakes_gap3_ctr, byid = T))
NARS_lakes_byNARS_gap12_Cat100 <- colSums(gContains(NARS_regions, protected_lakes_gap12_Cat100, byid = T))
NARS_lakes_byNARS_gap3_Cat100 <- colSums(gContains(NARS_regions, protected_lakes_gap3_Cat100, byid = T))
NARS_lakes_byNARS_unprotected <- colSums(gContains(NARS_regions, unprotected_lakes, byid=T))
#setNames(NARS_lakes_byNARS, NARS_names)
NARS_protected_lakes_DF <- data.frame(Ecoregion=NARS_names, ProtectedLakes_gap12_ctr=NARS_lakes_byNARS_gap12_ctr,
ProtectedLakes_gap3_ctr=NARS_lakes_byNARS_gap3_ctr,
ProtectedLakes_gap12_Cat100=NARS_lakes_byNARS_gap12_Cat100,
ProtectedLakes_gap3_Cat100=NARS_lakes_byNARS_gap3_Cat100,
unprotected_lakes=NARS_lakes_byNARS_unprotected)
# proportion of protected lakes by ecoregion (out of total lakes in each ecoregion)
# count number of rows (COMIDs, therefore lakes) per unique ecoregion
lake_countz_NARS <- NARS_COMID %>%
group_by(WSA9) %>%
tally()
colnames(lake_countz_NARS) <- c("Ecoregion","nLakes")
lake_countz_protected_NARS <- merge(lake_countz_NARS, NARS_protected_lakes_DF, by="Ecoregion", all.x=F)
lake_countz_protected_NARS$PropProtected_gap12_ctr <- lake_countz_protected_NARS$ProtectedLakes_gap12_ctr/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropProtected_gap3_ctr <- lake_countz_protected_NARS$ProtectedLakes_gap3_ctr/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropProtected_gap12_Cat100 <- lake_countz_protected_NARS$ProtectedLakes_gap12_Cat100/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropProtected_gap3_Cat100 <- lake_countz_protected_NARS$ProtectedLakes_gap3_Cat100/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropUnprotected <- lake_countz_protected_NARS$unprotected_lakes/lake_countz_protected_NARS$nLakes
# clean up for next iteration
protected_lakes_gap12_ctr <- NULL
protected_lakes_gap3_ctr <- NULL
protected_lakes_gap12_Cat100 <- NULL
protected_lakes_gap3_Cat100 <- NULL
unprotected_lakes <- NULL
lake_countz_NARS <- NULL
NARS_protected_lakes_DF <- NULL
NARS_lakes_byNARS_gap12_ctr <- NULL
NARS_lakes_byNARS_gap3_ctr <- NULL
NARS_lakes_byNARS_gap12_Cat100 <- NULL
NARS_lakes_byNARS_gap3_Cat100 <- NULL
NARS_lakes_byNARS_unprotected <- NULL
NARS_COMID <- NULL
rowid_NHD_df <- NULL
NARS_names <- NULL
rowid_NHD_df <- NULL
return(lake_countz_protected_NARS)
}
| /Rcode/functions/protected_lakes_by_NARS.R | no_license | cont-limno/FreshwaterConservation | R | false | false | 4,438 | r | ### Custom function to calculate protected lakes in each NARS ecoregion
protected_lakes_by_NARS <- function(NHD_pts_lakes_PADUS, NARS_regions){
#NHD_pts_lakes_PADUS: NHD lake centroids, merged with PADUS data
#NARS_regions: NARS ecoregions polygons, same crs as NHD
NARS_names <- NARS_regions@data$WSA9
# data fraome of lake IDs
NHD_pts_lakes_PADUS@data$rowID <- rownames(NHD_pts_lakes_PADUS@data)
rowid_NHD_df <- data.frame(rowID=NHD_pts_lakes_PADUS@data$rowID, COMID=NHD_pts_lakes_PADUS@data$COMID)
# number of lakes per state
# subset points that fall in each state polygon
# sp::over doesn't retain attribute data from points, so create data frame to join those data back later based on rowid
NARS_COMID <- sp::over(NHD_pts_lakes_PADUS, NARS_regions, returnList = F)#warning: slow with large polygons/lots of pts
NARS_COMID$joinID <- rownames(NARS_COMID)
NARS_COMID <- merge(NARS_COMID, rowid_NHD_df, by.x='joinID', by.y='rowID')
# get rid of factor; would cause problems later
NARS_COMID$COMID <- as.numeric(levels(NARS_COMID$COMID))[NARS_COMID$COMID]
# define protected lakes based on % Ws and Cat protected
protected_lakes_gap12_ctr <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAPS12_COMIDs_ctr)
protected_lakes_gap3_ctr <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAP3only_COMIDS_ctr)
protected_lakes_gap12_Cat100 <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAPS12_COMIDs_100)
protected_lakes_gap3_Cat100 <- subset(NHD_pts_lakes_PADUS, COMID %in% protected_GAP3only_COMIDs_100)
unprotected_lakes <- subset(NHD_pts_lakes_PADUS, COMID %in% unprotected_COMIDs)
# number of protected lakes by ecoregion
NARS_lakes_byNARS_gap12_ctr <- colSums(gContains(NARS_regions, protected_lakes_gap12_ctr, byid = T))
NARS_lakes_byNARS_gap3_ctr <- colSums(gContains(NARS_regions, protected_lakes_gap3_ctr, byid = T))
NARS_lakes_byNARS_gap12_Cat100 <- colSums(gContains(NARS_regions, protected_lakes_gap12_Cat100, byid = T))
NARS_lakes_byNARS_gap3_Cat100 <- colSums(gContains(NARS_regions, protected_lakes_gap3_Cat100, byid = T))
NARS_lakes_byNARS_unprotected <- colSums(gContains(NARS_regions, unprotected_lakes, byid=T))
#setNames(NARS_lakes_byNARS, NARS_names)
NARS_protected_lakes_DF <- data.frame(Ecoregion=NARS_names, ProtectedLakes_gap12_ctr=NARS_lakes_byNARS_gap12_ctr,
ProtectedLakes_gap3_ctr=NARS_lakes_byNARS_gap3_ctr,
ProtectedLakes_gap12_Cat100=NARS_lakes_byNARS_gap12_Cat100,
ProtectedLakes_gap3_Cat100=NARS_lakes_byNARS_gap3_Cat100,
unprotected_lakes=NARS_lakes_byNARS_unprotected)
# proportion of protected lakes by ecoregion (out of total lakes in each ecoregion)
# count number of rows (COMIDs, therefore lakes) per unique ecoregion
lake_countz_NARS <- NARS_COMID %>%
group_by(WSA9) %>%
tally()
colnames(lake_countz_NARS) <- c("Ecoregion","nLakes")
lake_countz_protected_NARS <- merge(lake_countz_NARS, NARS_protected_lakes_DF, by="Ecoregion", all.x=F)
lake_countz_protected_NARS$PropProtected_gap12_ctr <- lake_countz_protected_NARS$ProtectedLakes_gap12_ctr/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropProtected_gap3_ctr <- lake_countz_protected_NARS$ProtectedLakes_gap3_ctr/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropProtected_gap12_Cat100 <- lake_countz_protected_NARS$ProtectedLakes_gap12_Cat100/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropProtected_gap3_Cat100 <- lake_countz_protected_NARS$ProtectedLakes_gap3_Cat100/lake_countz_protected_NARS$nLakes
lake_countz_protected_NARS$PropUnprotected <- lake_countz_protected_NARS$unprotected_lakes/lake_countz_protected_NARS$nLakes
# clean up for next iteration
protected_lakes_gap12_ctr <- NULL
protected_lakes_gap3_ctr <- NULL
protected_lakes_gap12_Cat100 <- NULL
protected_lakes_gap3_Cat100 <- NULL
unprotected_lakes <- NULL
lake_countz_NARS <- NULL
NARS_protected_lakes_DF <- NULL
NARS_lakes_byNARS_gap12_ctr <- NULL
NARS_lakes_byNARS_gap3_ctr <- NULL
NARS_lakes_byNARS_gap12_Cat100 <- NULL
NARS_lakes_byNARS_gap3_Cat100 <- NULL
NARS_lakes_byNARS_unprotected <- NULL
NARS_COMID <- NULL
rowid_NHD_df <- NULL
NARS_names <- NULL
rowid_NHD_df <- NULL
return(lake_countz_protected_NARS)
}
|
### Data Cleanup Script
###
### This script should read the train and test sets
## and produce a clean "train" and "test" datasets
## as well as labels and id variables.
## This includes feature engineering, inputing missing
## values and all the usual tricks
## You usually spend a lot of time on this script
## The data needs not to be scaled, we'll do that in
## the next script
# Load train and test datasets
train<-read.csv("train.csv",stringsAsFactors=FALSE)
test<-read.csv("test.csv",stringsAsFactors=FALSE)
# Add label to test set
test$Survived <- 0
# Combine train and test sets
combi <- rbind(train,test)
combi$Name <- as.character(combi$Name)
combi$Title <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][2]})
combi$Title <- sub(' ', '', combi$Title)
combi$Title[combi$Title %in% c('Mme', 'Mlle')] <- 'Mlle'
combi$Title[combi$Title %in% c('Capt', 'Don', 'Major', 'Sir')] <- 'Sir'
combi$Title[combi$Title %in% c('Dona', 'Lady', 'the Countess', 'Jonkheer')] <- 'Lady'
combi$Title <- factor(combi$Title)
combi$Sex<-factor(combi$Sex)
combi$Fare2 <- '30+'
combi$Fare2[combi$Fare < 30 & combi$Fare >= 20] <- '20-30'
combi$Fare2[combi$Fare < 20 & combi$Fare >= 10] <- '10-20'
combi$Fare2[combi$Fare < 10] <- '00-10'
combi$Fare2<-factor(combi$Fare2)
combi$FamilySize <- combi$SibSp + combi$Parch + 1
combi$Surname <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][1]})
combi$FamilyID <- paste(as.character(combi$FamilySize), combi$Surname, sep="")
combi$FamilyID[combi$FamilySize <= 2] <- 'Small'
famIDs <- data.frame(table(combi$FamilyID))
famIDs <- famIDs[famIDs$Freq <= 2,]
combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small'
combi$FamilyID <- factor(combi$FamilyID)
# Synthetize missing ages
Agefit <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + Title + FamilySize,
data=combi[!is.na(combi$Age),], method="anova")
combi$Age[is.na(combi$Age)] <- predict(Agefit, combi[is.na(combi$Age),])
# Cleanup missing embarking port
combi$Embarked[c(62,830)] = "S"
combi$Embarked <- factor(combi$Embarked)
# Fix missing fare
combi$Fare[1044] <- median(combi$Fare, na.rm=TRUE)
# Fix Maximum of 32 levels per factor
combi$FamilyID2 <- combi$FamilyID
combi$FamilyID2 <- as.character(combi$FamilyID2)
combi$FamilyID2[combi$FamilySize <= 3] <- 'Small'
combi$FamilyID2 <- factor(combi$FamilyID2)
# Now the fun begins
combi$CabinLetter<-substr(combi$Cabin,1,1)
combi$CabinLetter<-factor(combi$CabinLetter)
combi$CabinLetter<-as.character(combi$CabinLetter)
combi$CabinLetter[combi$CabinLetter==""]<-"U"
combi$CabinLetter<-factor(combi$CabinLetter)
# Create Cabin Number
combi$CabinNum<-gsub('[^0-9]',"",substr(combi$Cabin,1,4))
combi$CabinNum[combi$CabinNum==""]<-0
# Create Cabin Type
combi$Cabintype[(as.numeric(combi$CabinNum) %% 2) == 0] <- "even"
combi$Cabintype[(as.numeric(combi$CabinNum) %% 2) == 1] <- "odd"
combi$Cabintype[(as.numeric(combi$CabinNum) ==0) ] <- "unknown"
combi$Cabintype<-factor(combi$Cabintype)
# Add cabinnum2
combi$Cabinnum2<-as.numeric(combi$CabinNum) %/% 10
combi$Cabinnum2<-factor(combi$Cabinnum2)
# Fix Mrs Anderson
combi$SibSp[69]<-0
combi$Parch[69]<-0
combi$SibSp[1106]<-0
combi$Parch[1106]<-0
combi$HasFamily<-as.numeric(combi$FamilySize)>1
combi$HasFamily<-factor(combi$HasFamily)
combi$multicabin<-nchar(combi$Cabin)>3
combi$multicabin<-factor(combi$multicabin)
combi$hascharge<-grepl("\\(",combi$Name)
combi$hascharge<-factor(combi$hascharge)
rm(famIDs)
combi$Name<-NULL
combi$Ticket<-NULL
combi$Cabin<-NULL
combi$Surname<-NULL
combi$CabinNum<-NULL
combi$PassengerId<-NULL
combi$SibSp<-as.factor(combi$SibSp)
combi$Parch<-as.factor(combi$Parch)
combi$Pclass<-as.factor(combi$Pclass)
# Get Ids
id<-test$PassengerId
# Move label to first column
labels<-as.data.frame(train$Survived)
names(labels)<-"label"
combi$Survived<-NULL
#combi<-cbind(labels,combi)
#names(combi)[1]<-"label"
#combi$label<-as.factor(combi$label)
rm(Agefit)
# Create new prediction
train <- combi[1:891,]
test <- combi[892:1309,]
rm(combi)
str(train)
| /data_cleanup.R | no_license | lrargerich/predictionMachine | R | false | false | 4,063 | r | ### Data Cleanup Script
###
### This script should read the train and test sets
## and produce a clean "train" and "test" datasets
## as well as labels and id variables.
## This includes feature engineering, inputing missing
## values and all the usual tricks
## You usually spend a lot of time on this script
## The data needs not to be scaled, we'll do that in
## the next script
# Load train and test datasets
train<-read.csv("train.csv",stringsAsFactors=FALSE)
test<-read.csv("test.csv",stringsAsFactors=FALSE)
# Add label to test set
test$Survived <- 0
# Combine train and test sets
combi <- rbind(train,test)
combi$Name <- as.character(combi$Name)
combi$Title <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][2]})
combi$Title <- sub(' ', '', combi$Title)
combi$Title[combi$Title %in% c('Mme', 'Mlle')] <- 'Mlle'
combi$Title[combi$Title %in% c('Capt', 'Don', 'Major', 'Sir')] <- 'Sir'
combi$Title[combi$Title %in% c('Dona', 'Lady', 'the Countess', 'Jonkheer')] <- 'Lady'
combi$Title <- factor(combi$Title)
combi$Sex<-factor(combi$Sex)
combi$Fare2 <- '30+'
combi$Fare2[combi$Fare < 30 & combi$Fare >= 20] <- '20-30'
combi$Fare2[combi$Fare < 20 & combi$Fare >= 10] <- '10-20'
combi$Fare2[combi$Fare < 10] <- '00-10'
combi$Fare2<-factor(combi$Fare2)
combi$FamilySize <- combi$SibSp + combi$Parch + 1
combi$Surname <- sapply(combi$Name, FUN=function(x) {strsplit(x, split='[,.]')[[1]][1]})
combi$FamilyID <- paste(as.character(combi$FamilySize), combi$Surname, sep="")
combi$FamilyID[combi$FamilySize <= 2] <- 'Small'
famIDs <- data.frame(table(combi$FamilyID))
famIDs <- famIDs[famIDs$Freq <= 2,]
combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small'
combi$FamilyID <- factor(combi$FamilyID)
# Synthetize missing ages
Agefit <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + Title + FamilySize,
data=combi[!is.na(combi$Age),], method="anova")
combi$Age[is.na(combi$Age)] <- predict(Agefit, combi[is.na(combi$Age),])
# Cleanup missing embarking port
combi$Embarked[c(62,830)] = "S"
combi$Embarked <- factor(combi$Embarked)
# Fix missing fare
combi$Fare[1044] <- median(combi$Fare, na.rm=TRUE)
# Fix Maximum of 32 levels per factor
combi$FamilyID2 <- combi$FamilyID
combi$FamilyID2 <- as.character(combi$FamilyID2)
combi$FamilyID2[combi$FamilySize <= 3] <- 'Small'
combi$FamilyID2 <- factor(combi$FamilyID2)
# Now the fun begins
combi$CabinLetter<-substr(combi$Cabin,1,1)
combi$CabinLetter<-factor(combi$CabinLetter)
combi$CabinLetter<-as.character(combi$CabinLetter)
combi$CabinLetter[combi$CabinLetter==""]<-"U"
combi$CabinLetter<-factor(combi$CabinLetter)
# Create Cabin Number
combi$CabinNum<-gsub('[^0-9]',"",substr(combi$Cabin,1,4))
combi$CabinNum[combi$CabinNum==""]<-0
# Create Cabin Type
combi$Cabintype[(as.numeric(combi$CabinNum) %% 2) == 0] <- "even"
combi$Cabintype[(as.numeric(combi$CabinNum) %% 2) == 1] <- "odd"
combi$Cabintype[(as.numeric(combi$CabinNum) ==0) ] <- "unknown"
combi$Cabintype<-factor(combi$Cabintype)
# Add cabinnum2
combi$Cabinnum2<-as.numeric(combi$CabinNum) %/% 10
combi$Cabinnum2<-factor(combi$Cabinnum2)
# Fix Mrs Anderson
combi$SibSp[69]<-0
combi$Parch[69]<-0
combi$SibSp[1106]<-0
combi$Parch[1106]<-0
combi$HasFamily<-as.numeric(combi$FamilySize)>1
combi$HasFamily<-factor(combi$HasFamily)
combi$multicabin<-nchar(combi$Cabin)>3
combi$multicabin<-factor(combi$multicabin)
combi$hascharge<-grepl("\\(",combi$Name)
combi$hascharge<-factor(combi$hascharge)
rm(famIDs)
combi$Name<-NULL
combi$Ticket<-NULL
combi$Cabin<-NULL
combi$Surname<-NULL
combi$CabinNum<-NULL
combi$PassengerId<-NULL
combi$SibSp<-as.factor(combi$SibSp)
combi$Parch<-as.factor(combi$Parch)
combi$Pclass<-as.factor(combi$Pclass)
# Get Ids
id<-test$PassengerId
# Move label to first column
labels<-as.data.frame(train$Survived)
names(labels)<-"label"
combi$Survived<-NULL
#combi<-cbind(labels,combi)
#names(combi)[1]<-"label"
#combi$label<-as.factor(combi$label)
rm(Agefit)
# Create new prediction
train <- combi[1:891,]
test <- combi[892:1309,]
rm(combi)
str(train)
|
# Read previous scripts
# source("R/dm01_utils.R")
# source("R/dm02_pkg.r")
# If simulated data
# source("R/dm03_sim_data.R")
# Time-to-event data
#
# Number of subjects at start of follow-up stratified by covariates
# Number of person-years
# Number of events
# Age at event (mean, SD, median, range)
# fbc <- df_all # If with other colnames
fbc <- df_tot
# Table 1 -----------------------------------------------------------------
# Scaled grades
fbc_tab <-
fbc %>%
mutate_at(c(vars_avera, vars_grade), compose(c, scale)) %>%
mutate_at(
vars(starts_with("time")),
~ ((d_start - 1) %--% .) / years(1)
) %>%
mutate(
all = "all",
# For simulation data
Average_c = cut(Average, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_1_c = cut(subject_1, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
subject_2_c = cut(subject_2, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_3_c = cut(subject_3, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
subject_4_c = cut(subject_4, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_5_c = cut(subject_5, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_6_c = cut(subject_6, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
Average_c2 = cut(Average, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_1_c2 = cut(subject_1, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
subject_2_c2 = cut(subject_2, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_3_c2 = cut(subject_3, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
subject_4_c2 = cut(subject_4, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_5_c2 = cut(subject_5, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_6_c2 = cut(subject_6, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F)
# For real data
# Literature_c = cut(Literature, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
# Mathematics_c = cut(Mathematics, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
# Phys_Edu_c = cut(Phys_Edu, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
# Handicrafts_c = cut(Handicrafts, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
# Arts_c = cut(Arts, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
# Music_c = cut(Music, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
#
# Average_c2 = cut(Average, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Literature_c2 = cut(Literature, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
# Mathematics_c2 = cut(Mathematics, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Phys_Edu_c2 = cut(Phys_Edu, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
# Handicrafts_c2 = cut(Handicrafts, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Arts_c2 = cut(Arts, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Music_c2 = cut(Music, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F)
)
strata <- c("all", vars_covar,
"Average_c",
paste0(vars_grade, "_c"),
"Average_c2",
paste0(vars_grade, "_c2"))
# With original grades (non-scaled)
# fbc_tab_orig <-
# fbc %>%
# mutate_at(
# vars(starts_with("time")),
# ~ ((d_start - 1) %--% .) / years(1)
# ) %>%
# mutate(
# all = "all" ,
# Average_int = as.integer(Average),
# Average_round = round(Average, 0),
# Average_c = cut(Average, breaks = c(-Inf, 6, 7, 8, 9, Inf)) ,
# Average_c2 = cut(Average, breaks = c(-Inf, 4:10, Inf)) ,
# Literature_c = cut(Literature, breaks = c(-Inf, 6, 7, 8, 9, Inf)) ,
# Mathematics_c = cut(Mathematics, breaks = c(-Inf, 6, 7, 8, 9, Inf)),
# Phys_Edu_c = cut(Phys_Edu, breaks = c(-Inf, 6, 7, 8, 9, Inf)) ,
# Handicrafts_c = cut(Handicrafts, breaks = c(-Inf, 6, 7, 8, 9, Inf)),
# Arts_c = cut(Arts, breaks = c(-Inf, 6, 7, 8, 9, Inf)),
# Music_c = cut(Music, breaks = c(-Inf, 6, 7, 8, 9, Inf))
# )
#
# strata <- c("all",
# vars_covar,
# "Average_c",
# "Average_c2",
# "Average_int",
# "Average_round",
# vars_grade,
# paste0(vars_grade, "_c"))
# Same for scaled and non-scaled
fbc_long <-
fbc_tab %>% # or fbc_tab_orig if non-scaled
pivot_longer(
cols = matches("^(event|time)_"),
names_sep = "_",
names_to = c(".value", "outcome")
)
#' @param x a survfit object
cumulative_incidence <- function(x, at = Inf) {
broom::tidy(x) %>%
filter(time <= !!at) %>%
slice(n()) %>%
select(estimate, conf.high, conf.low) %>%
mutate_all(~ (1 - .) * 100)
}
options(dplyr.summarise.inform = FALSE)
table_1_data <-
map_df(strata, ~ {
fbc_long %>%
group_by(outcome, chracteristic = .x, value = !!sym(.x)) %>%
summarise(
n = n(),
n_risk = sum(time > 0, na.rm = T),
pyears = sum(time, na.rm = T),
events = sum(event, na.rm = T),
km_fit = list(survfit(Surv(time, event) ~ 1))
) %>%
mutate(ci = map(km_fit, cumulative_incidence)) %>%
unnest_wider(ci, names_sep = "_") %>%
mutate(value = as.character(value))
})
# openxlsx::write.xlsx(
# table_1_data %>% select(-km_fit),
# paste0(
# "figures/table_1_data_",
# Sys.Date(),
# ".xlsx"
# )
# )
# Mean age ----------------------------------------------------------------
fbc_age <-
fbc %>%
filter(fas == 1, fas_dg != "none") %>%
mutate_at(
vars(starts_with("time")),
~ ((dob) %--% .) / years(1)
) %>%
select(
contains("time"))
summary_age <-
bind_rows(
fbc_age %>%
summarise_all( ~ (mean(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (sd(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (min(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (median(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (max(., na.rm = T)))
) %>%
mutate(
measure = c(
"mean",
"sd",
"min",
"median",
"max"
)
)
# openxlsx::write.xlsx(
# summary_age,
# paste0(
# "figures/tab_summary_age_",
# Sys.Date(),
# ".xlsx"
# )
# )
# openxlsx::write.xlsx(
# summary(fbc_age),
# paste0(
# "figures/tab_summary_age2_",
# Sys.Date(),
# ".xlsx"
# )
# )
| /R/mv01_descr_time_event.r | permissive | davgyl/school_sim | R | false | false | 6,555 | r | # Read previous scripts
# source("R/dm01_utils.R")
# source("R/dm02_pkg.r")
# If simulated data
# source("R/dm03_sim_data.R")
# Time-to-event data
#
# Number of subjects at start of follow-up stratified by covariates
# Number of person-years
# Number of events
# Age at event (mean, SD, median, range)
# fbc <- df_all # If with other colnames
fbc <- df_tot
# Table 1 -----------------------------------------------------------------
# Scaled grades
fbc_tab <-
fbc %>%
mutate_at(c(vars_avera, vars_grade), compose(c, scale)) %>%
mutate_at(
vars(starts_with("time")),
~ ((d_start - 1) %--% .) / years(1)
) %>%
mutate(
all = "all",
# For simulation data
Average_c = cut(Average, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_1_c = cut(subject_1, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
subject_2_c = cut(subject_2, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_3_c = cut(subject_3, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
subject_4_c = cut(subject_4, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_5_c = cut(subject_5, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
subject_6_c = cut(subject_6, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
Average_c2 = cut(Average, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_1_c2 = cut(subject_1, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
subject_2_c2 = cut(subject_2, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_3_c2 = cut(subject_3, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
subject_4_c2 = cut(subject_4, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_5_c2 = cut(subject_5, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
subject_6_c2 = cut(subject_6, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F)
# For real data
# Literature_c = cut(Literature, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
# Mathematics_c = cut(Mathematics, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
# Phys_Edu_c = cut(Phys_Edu, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F) ,
# Handicrafts_c = cut(Handicrafts, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
# Arts_c = cut(Arts, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
# Music_c = cut(Music, breaks = c(-Inf, -1.5, -1, -0.5, 0.5, 1, 1.5, Inf), right = F),
#
# Average_c2 = cut(Average, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Literature_c2 = cut(Literature, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
# Mathematics_c2 = cut(Mathematics, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Phys_Edu_c2 = cut(Phys_Edu, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F) ,
# Handicrafts_c2 = cut(Handicrafts, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Arts_c2 = cut(Arts, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F),
# Music_c2 = cut(Music, breaks = c(-Inf, -1.5, -0.5, 0.5, 1.5, Inf), right = F)
)
strata <- c("all", vars_covar,
"Average_c",
paste0(vars_grade, "_c"),
"Average_c2",
paste0(vars_grade, "_c2"))
# With original grades (non-scaled)
# fbc_tab_orig <-
# fbc %>%
# mutate_at(
# vars(starts_with("time")),
# ~ ((d_start - 1) %--% .) / years(1)
# ) %>%
# mutate(
# all = "all" ,
# Average_int = as.integer(Average),
# Average_round = round(Average, 0),
# Average_c = cut(Average, breaks = c(-Inf, 6, 7, 8, 9, Inf)) ,
# Average_c2 = cut(Average, breaks = c(-Inf, 4:10, Inf)) ,
# Literature_c = cut(Literature, breaks = c(-Inf, 6, 7, 8, 9, Inf)) ,
# Mathematics_c = cut(Mathematics, breaks = c(-Inf, 6, 7, 8, 9, Inf)),
# Phys_Edu_c = cut(Phys_Edu, breaks = c(-Inf, 6, 7, 8, 9, Inf)) ,
# Handicrafts_c = cut(Handicrafts, breaks = c(-Inf, 6, 7, 8, 9, Inf)),
# Arts_c = cut(Arts, breaks = c(-Inf, 6, 7, 8, 9, Inf)),
# Music_c = cut(Music, breaks = c(-Inf, 6, 7, 8, 9, Inf))
# )
#
# strata <- c("all",
# vars_covar,
# "Average_c",
# "Average_c2",
# "Average_int",
# "Average_round",
# vars_grade,
# paste0(vars_grade, "_c"))
# Same for scaled and non-scaled
fbc_long <-
fbc_tab %>% # or fbc_tab_orig if non-scaled
pivot_longer(
cols = matches("^(event|time)_"),
names_sep = "_",
names_to = c(".value", "outcome")
)
#' @param x a survfit object
cumulative_incidence <- function(x, at = Inf) {
broom::tidy(x) %>%
filter(time <= !!at) %>%
slice(n()) %>%
select(estimate, conf.high, conf.low) %>%
mutate_all(~ (1 - .) * 100)
}
options(dplyr.summarise.inform = FALSE)
table_1_data <-
map_df(strata, ~ {
fbc_long %>%
group_by(outcome, chracteristic = .x, value = !!sym(.x)) %>%
summarise(
n = n(),
n_risk = sum(time > 0, na.rm = T),
pyears = sum(time, na.rm = T),
events = sum(event, na.rm = T),
km_fit = list(survfit(Surv(time, event) ~ 1))
) %>%
mutate(ci = map(km_fit, cumulative_incidence)) %>%
unnest_wider(ci, names_sep = "_") %>%
mutate(value = as.character(value))
})
# openxlsx::write.xlsx(
# table_1_data %>% select(-km_fit),
# paste0(
# "figures/table_1_data_",
# Sys.Date(),
# ".xlsx"
# )
# )
# Mean age ----------------------------------------------------------------
fbc_age <-
fbc %>%
filter(fas == 1, fas_dg != "none") %>%
mutate_at(
vars(starts_with("time")),
~ ((dob) %--% .) / years(1)
) %>%
select(
contains("time"))
summary_age <-
bind_rows(
fbc_age %>%
summarise_all( ~ (mean(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (sd(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (min(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (median(., na.rm = T))),
fbc_age %>%
summarise_all( ~ (max(., na.rm = T)))
) %>%
mutate(
measure = c(
"mean",
"sd",
"min",
"median",
"max"
)
)
# openxlsx::write.xlsx(
# summary_age,
# paste0(
# "figures/tab_summary_age_",
# Sys.Date(),
# ".xlsx"
# )
# )
# openxlsx::write.xlsx(
# summary(fbc_age),
# paste0(
# "figures/tab_summary_age2_",
# Sys.Date(),
# ".xlsx"
# )
# )
|
#!/usr/bin/env Rscript
err.cat <- function(x) cat(x, '\n', file=stderr())
# Filtering of variants based on annotation
suppressPackageStartupMessages(library(optparse))
suppressPackageStartupMessages(library(tools))
suppressPackageStartupMessages(library(xtable))
suppressPackageStartupMessages(library(data.table))
###
message('*** CADD FILTERING ***')
d <- as.data.frame(fread('file:///dev/stdin'))
option_list <- list(
make_option(c('--cadd.thresh'), default=20, help='CADD score threshold'),
make_option(c('--csq.filter'), default='start|stop|splice|frameshift|stop_gained', help='csq field'),
make_option(c('--carol.filter'), default='Deleterious', help='CAROL'),
make_option(c('--condel.filter'), default='deleterious', help='CAROL'),
make_option(c('--out'), help='outfile')
)
option.parser <- OptionParser(option_list=option_list)
opt <- parse_args(option.parser)
message('samples')
err.cat(length(samples <- grep('geno\\.',colnames(d), value=TRUE)))
# if there is a stop or indel then CADD score is NA
if (!is.null(opt$cadd.thresh)) {
cadd.thresh <- opt$cadd.thresh
message(sprintf('CADD score > %d',cadd.thresh))
err.cat(table( cadd.filter <- (d$CADD > cadd.thresh) ))
#d <- d[cadd.filter,]
}
# Filter on consequence, these need to ORed rather than ANDed
# missense
# We are interested in damaging variants:
# frameshift, stop or splice
if (!is.null(opt$csq.filter)) {
csq.filter <- opt$csq.filter
message( sprintf('%s in consequence field',opt$csq.filter) )
err.cat(table(csq.filter <- grepl(opt$csq.filter, d$Consequence)))
#d <- d[csq.filter,]
}
# CAROL Deleterious
if (!is.null(opt$carol.filter)) {
carol.filter <- opt$carol.filter
message( sprintf('CAROL %s',opt$carol.filter) )
err.cat(table(carol.filter <- grepl(opt$carol.filter,d$CAROL)))
#d <- d[carol.filter,]
}
# Condel deleterious
if (!is.null(opt$condel.filter)) {
condel.filter <- opt$condel.filter
message(sprintf('Condel %s',opt$condel.filter))
err.cat(table(condel.filter <- grepl(opt$condel.filter, d$Condel)))
#d <- d[condel.filter,]
}
f1 <- csq.filter
# missense and (carol or condel deleterious)
f2 <- ( grepl('missense_variant',d$Consequence) & (carol.filter|condel.filter|cadd.filter) )
d <- d[ f1 | f2, ]
write.csv( d[order(d$CADD,decreasing=TRUE),] , quote=FALSE, file=opt$out, row.names=FALSE)
| /annotation/filters/csq-filter.R | no_license | plagnollab/DNASeq_pipeline | R | false | false | 2,382 | r | #!/usr/bin/env Rscript
err.cat <- function(x) cat(x, '\n', file=stderr())
# Filtering of variants based on annotation
suppressPackageStartupMessages(library(optparse))
suppressPackageStartupMessages(library(tools))
suppressPackageStartupMessages(library(xtable))
suppressPackageStartupMessages(library(data.table))
###
message('*** CADD FILTERING ***')
d <- as.data.frame(fread('file:///dev/stdin'))
option_list <- list(
make_option(c('--cadd.thresh'), default=20, help='CADD score threshold'),
make_option(c('--csq.filter'), default='start|stop|splice|frameshift|stop_gained', help='csq field'),
make_option(c('--carol.filter'), default='Deleterious', help='CAROL'),
make_option(c('--condel.filter'), default='deleterious', help='CAROL'),
make_option(c('--out'), help='outfile')
)
option.parser <- OptionParser(option_list=option_list)
opt <- parse_args(option.parser)
message('samples')
err.cat(length(samples <- grep('geno\\.',colnames(d), value=TRUE)))
# if there is a stop or indel then CADD score is NA
if (!is.null(opt$cadd.thresh)) {
cadd.thresh <- opt$cadd.thresh
message(sprintf('CADD score > %d',cadd.thresh))
err.cat(table( cadd.filter <- (d$CADD > cadd.thresh) ))
#d <- d[cadd.filter,]
}
# Filter on consequence, these need to ORed rather than ANDed
# missense
# We are interested in damaging variants:
# frameshift, stop or splice
if (!is.null(opt$csq.filter)) {
csq.filter <- opt$csq.filter
message( sprintf('%s in consequence field',opt$csq.filter) )
err.cat(table(csq.filter <- grepl(opt$csq.filter, d$Consequence)))
#d <- d[csq.filter,]
}
# CAROL Deleterious
if (!is.null(opt$carol.filter)) {
carol.filter <- opt$carol.filter
message( sprintf('CAROL %s',opt$carol.filter) )
err.cat(table(carol.filter <- grepl(opt$carol.filter,d$CAROL)))
#d <- d[carol.filter,]
}
# Condel deleterious
if (!is.null(opt$condel.filter)) {
condel.filter <- opt$condel.filter
message(sprintf('Condel %s',opt$condel.filter))
err.cat(table(condel.filter <- grepl(opt$condel.filter, d$Condel)))
#d <- d[condel.filter,]
}
f1 <- csq.filter
# missense and (carol or condel deleterious)
f2 <- ( grepl('missense_variant',d$Consequence) & (carol.filter|condel.filter|cadd.filter) )
d <- d[ f1 | f2, ]
write.csv( d[order(d$CADD,decreasing=TRUE),] , quote=FALSE, file=opt$out, row.names=FALSE)
|
# Web UI for shiny
#v0.8
#dashboard version
# HELPER FUNCTIONS ------------------------------
#adds id parameter to a menuItem. Allows calling the generated list item by selector reference
menuItemAddId = function( menuitem, id){
#menuitem$attribs$id <- id;
menuitem$children[[1]]$attribs$id <- id;
return (menuitem);
}
# UI COMPONENTS -------------------------------
#header is the top stripe panel
header <- dashboardHeader(title = paste0("DreamTK v",app.version.major,".",app.version.minor, ".", app.version.revision));
#side bar is the expandable menu bar on the left side
sidebar <- dashboardSidebar(
# Custom CSS to hide the default logout panel
tags$head(tags$style(HTML('.shiny-server-account { display: none; }'))),
# The dynamically-generated user panel
uiOutput("userpanel"),
div(id = "sidebardiv",
sidebarMenu(id = "sidebar",
menuItemAddId( menuItem("Home", tabName = "hometab", icon = icon("home")), id="li_home" ),
menuItemAddId( menuItem("Search", tabName = "searchtab", icon = icon("search")), id="li_search" ),
menuItemAddId( menuItem("Analysis", tabName = "analysistab", icon = icon("pie-chart")), id="li_analysis" ),
menuItemAddId( menuItem("MFA", tabName = "mfatab", icon = icon("line-chart")), id="li_mfa" ),
menuItemAddId( menuItem("BER Analysis", tabName = "bertab", icon = icon("user", lib = "glyphicon")), id="li_ber" ),
menuItemAddId( menuItem("Save/Load", tabName = "savetab", icon = icon("floppy-o")), id="li_save" ),
menuItemAddId( menuItem("Help", icon = icon("question-circle"), startExpanded = FALSE,
actionLink(inputId = "link_help_overview", label = "Interface Help"),
actionLink(inputId = "link_help_search", label = "Search Help"),
actionLink(inputId = "link_help_save", label = "Data Backup Help"),
actionLink(inputId = "link_help_analysis", label = "Analysis Help")
),
id = "li_help" ),
menuItemAddId( menuItem("Quick Options", icon = icon("cog"), startExpanded = FALSE,
radioButtons(inputId = "radio_listtype", label = "Chemical display options:", selected = "name",
choices = list("By CASN" = "casn",
"By Name" = "name") ),
#radioButtons(inputID = "radio_plotorientation", label = "Plot orientation options:", selected = "vertical",
# choices = list("Horizontal" ="horizontal",
# "Vertical" = "vertical")),
radioButtons(inputId = "radio_dbtype", label = "Database options:", selected = "rdata", width = "100%",
choices = list("MySQL: dreamtk_db (v0.7, based on httk-1.7, tcpl-v2)" = "mysql",
"RData: DreamTKv0.8.RData (based on httk-1.7, tcpl-v2)" = "rdata") ),
uiOutput(outputId="ui_database_status")
),
id="li_quickoptions" )
))
);
#body is the main dashboard are with all the required tabs
body <- dashboardBody(
shinyjs::useShinyjs(),
includeCSS( "./www/progressbar.css"),
introjsUI(),
tabItems(
tabItem(tabName = "hometab",
h2("Home"),
fluidRow( div(id="welcomeboxdiv",
box(status = "primary", title = "Welcome", collapsible = TRUE, width = 6,
h5("Welcome to DreamTK, an R application which facilitates toxicokinetic analysis of a variety of chemicals."),
h5("ToxCast Pipeline for High-Throughput Screening Data (tcpl v2.0) (Filer et al., 2016, US EPA) is the primary chemical and assay database used by this application."),
h5("High-Throughput Toxicokinetics (httk v1.8) (Pearce et al., 2018) database is used to obtain the necessary toxicokinetic constants."),
h5("Only Assays that hit their cutoffs are considered for analysis.")
))
),
fluidRow(
box(status = "primary", title = "How To", collapsible = TRUE, width = 6,
h5("The following tutorials can help you familiarize with the application. These are also available from the Help tab on the Navigation menu."),
actionButton(inputId = "button_tour_overview", label = "Interface Tutorial"),
actionButton(inputId = "button_tour_search", label = "Search Tutorial"),
actionButton(inputId = "button_tour_save", label = "Data Backup Tutorial"),
actionButton(inputId = "button_tour_analysis", label = "Analysis Tutorial")
)
),
fluidRow(
box(status = "primary", title = "News and Announcements", collapsible = TRUE, width = 6,
includeHTML("./www/news.html")
)
)
),
tabItem(tabName = "searchtab",
h3("Search Chemical Database"),
fluidRow(
tabBox(title = tagList(shiny::icon("compass")), id = "searchtabset",
#search panel
tabPanel("Search", icon = icon("search"), fluid = TRUE,
fluidRow(
column(12,
div(
style = "display: inline-block;vertical-align:baseline; width: 90%;",
selectizeInput(inputId = "field_search", label = "", width = "100%",
options = list(placeholder = "Enter one or more chemicals separated by spaces",
create = TRUE, createOnBlur = TRUE, createFilter = "^[\\w-,()\\[\\]]+$", persist = FALSE,
maxOptions = 1000, loadThrottle = 800, openOnFocus = FALSE,
delimiter = " ", hideSelected = TRUE, closeAfterSelect = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL)
),
div(
style = "display: inline-block;vertical-align:70%; width: 9%;",
actionButton(inputId = "button_search", label = "", icon = icon("search"), style="color:#0e76b7"),
bsTooltip(id="button_search", "Search", placement = "bottom", trigger = "hover", options = NULL)
),
radioButtons(inputId = "radio_searchtype", label = "Search options:", inline = TRUE, selected = "casn",
choices = list("By CASN" = "casn",
"By Name" = "name") )
)
)
),
#custom file panel
tabPanel("Custom File", icon = icon("table"), fluid = TRUE,
fluidRow(
column(10,
fileInput(inputId = "file_customchem", label = "Choose CSV File", accept = c("csv", ".csv"), width = "100%"),
uiOutput(outputId="ui_customchems_status")
),
column(2,
br(),
actionButton(inputId = "button_load_customchems", label = "Parse file", icon = icon("folder-open-o"),
style="padding-left:10px; padding-right:10px; padding-top:10px; padding-bottom:10px; white-space: normal;")
)
),
fluidRow(
column(12,
br(),
actionLink(inputId = "link_customchem_hint", label = "File requirements hint", icon = icon("hand-o-up")),
hidden(fluidRow(id="panelCustomchemHint",
column(12,
h4("Template:"),
downloadLink(outputId = "button_customchem_template",label = tagList(shiny::icon("table"), "Download CSV Template"),
style="padding-left:10px; padding-right:10px; width:100%; white-space: normal; font-size:20px"),
HTML(
str_c( "<p>Comma-separated value (.csv) file with first row containing column names. ",
"<strong>'casn'</strong> column is mandatory. Other columns are optional, and if missing, will be looked up in the database.</p>",
"<strong>Table columns:</strong>",
"<blockquote style = 'font-size:14px; border-left: 10px solid #fff;'><strong>casn:</strong> CAS Registry Number",
"<br><strong>name:</strong> Chemical name",
"<br><strong>cytotoxicity_um:</strong> Cytotoxic concentration (<strong>Units:</strong> uM)",
"<br><strong>cytotoxic:</strong> is the chemical considered cytotoxic at above concentration? Y/N flag",
"<br><strong>mw:</strong> Molecular weight (<strong>Units:</strong> g/mol)",
"<br><strong>human_funbound_plasma:</strong> Unbound fraction of chemical in blood (<strong>Units:</strong> none)",
"<br><strong>human_clint:</strong> Intrinsic invitro hepatic clearance (<strong>Units:</strong> uL/min/10<sup>6</sup> cells)",
"<br><strong>human_rblood2plasma:</strong> Blood to plasma concentration ratio (<strong>Units:</strong> none)",
"<br><strong>log_kow:</strong> Octanol:Water partition coefficient at 25*C (<strong>Units:</strong> LOG10 value)",
"<br><strong>pka:</strong> Equivalent chemical ionization constant (<strong>Units:</strong> - LOG10 value)</blockquote>"
)
)
)
))
)
)
)
)
),
fluidRow(
box(status = "primary", title = "Select and examine chemicals and chemical assays", collapsible = TRUE, width = 12, id = "chemlistbox",
fluidRow(
column(2,
#if for some reason we change the colours primary won't work and we will have to figure a way to put colours in those switchtes. Style is not useable. https://rdrr.io/cran/shinyWidgets/src/R/input-pretty.R Gabriel
prettySwitch(inputId = "select_hit", label = "Only Active Assays", value = TRUE, status = "primary",
fill = TRUE, bigger = TRUE, inline = TRUE,
width = NULL)
),
column(2,
prettySwitch(inputId = "select_background", label = "Include Background Measurements", value = TRUE, status = "primary",
fill = TRUE, bigger = TRUE, inline = TRUE,
width = NULL)
)
),
fluidRow(
column(12,
div(
style = "display: inline-block;vertical-align:bottom; width: 33%;",
selectInput(inputId = "select_chemical", label = "Searched chemical list", selectize = FALSE, size = 10,
choices = NULL )
),
div(
style = "display: inline-block;vertical-align:bottom; width: 33%;",
selectInput(inputId = "select_assay", label = "Chemical assay list", selectize = FALSE, size = 10,
choices = NULL )
),
div(
style = "display: inline-block;vertical-align:bottom; width: 33%;",
selectInput(inputId = "select_assay_comp", label = "Assay component list", selectize = FALSE, size = 10,
choices = NULL )
)
)
),
fluidRow(
column(1,
actionButton(inputId = "button_delete_chem", label = "", icon = icon("trash-o"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_delete_chem", "Delete selected chemical", placement = "bottom", trigger = "hover", options = NULL)
),
column(1,
actionButton(inputId = "button_delete_missing", label = "*", icon = icon("trash"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_delete_missing", "Delete missing chemicals", placement = "bottom", trigger = "hover", options = NULL)
),
column(1,
actionButton(inputId = "button_list_missing", label = "*", icon = icon("list-ul"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_list_missing", "List missing chemicals", placement = "bottom", trigger = "hover", options = NULL)
),
column(1,
actionButton(inputId = "button_clear_list", label = "", icon = icon("times"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_clear_list", "Clear chemical list", placement = "bottom", trigger = "hover", options = NULL)
),
column(2,
downloadButton(outputId = "button_savechems", label = "Chem CSV", icon = icon("table"),
style="padding-left:10px; padding-right:10px; width:95%; white-space: normal;"),
bsTooltip(id="button_savechems", "Save information for all listed chemicals as a CSV file", placement = "bottom", trigger = "hover", options = NULL)
),
column(2,
downloadButton(outputId = "button_saveassays", label = "Assays CSV", icon = icon("table"), style="padding-left:10px; padding-right:10px; width:95%; white-space: normal;"),
bsTooltip(id="button_saveassays", "Save assay information for the selected chemical as a CSV file", placement = "bottom", trigger = "hover", options = NULL)
)
)
)
),
fluidRow(
#chemical and assay information panels
box(status = "primary", title = "Chemical info", collapsible = TRUE, width = 6,
htmlOutput(outputId = "html_chemicalinfo")
),
box(status = "primary", title = "Assay info", collapsible = TRUE, width = 6,
htmlOutput(outputId = "html_assayinfo")
)
)
),
tabItem(tabName = "analysistab",
h3("Analyse chemicals"),
#chemical selection panel
fluidRow(
box(status = "primary", title = "Select chemicals and desired analysis", collapsible = TRUE, width = 12,
column(6,
wellPanel(id = "stats_selectcontrol",
selectizeInput(inputId = "select_chemical_stats", label = "Selected chemicals",
options = list(placeholder = "Click me to select chemicals",
maxOptions = 10000, loadThrottle = 800,
delimiter = " ", hideSelected = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL),
fluidRow(
column(3,
actionButton(inputId = "button_stats_selectall", label = "Select All", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;overflow:hidden")
),
column(3,
actionButton(inputId = "button_stats_deselectall", label = "Deselect All", icon = icon("ban"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;overflow:hidden")
)
)
),
prettySwitch(inputId = "analyse_background", label = "Include Background Measurements", value = TRUE, status = "primary",
fill = TRUE, bigger = TRUE, inline = TRUE,
width = NULL)
),
column(6,
wellPanel(id = "stats_optionscontrol",
checkboxGroupInput(inputId = "checkbox_stats", label = "Select statistics",
choices = c("Target Family Counts and ac50 values" = "tfcounts",
"Hierarchical cluster heatmap of Target Subfamily activities" = "tfhm",
"Hierarchical cluster heatmap of Assay Endpoint activities" = "assayhm",
"Ac50 vs ScalarTop" = "scalartop_ac50",
"OED vs ScalarTop" = "scalartop_oed",
"Burst Assay vs Not Burst Assay" = "ac50_box",
"Chemical ToxPI Plots (Individual)" = "toxpi",
"Chemical ToxPI Plots (Cytotoxicity)" = "toxpi2",
"Chemical ToxPI Plots (Grouped)" = "toxpigroup"),
selected = NULL),
#those are magic numbers. Do not change them.
fluidRow(
column(4,
actionButton(inputId = "button_select_all_stats", label = "Select/Deselect all", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; white-space: normal; width:100%; display:inline-block; overflow:hidden")
),
column(3, offset = 0.5,
actionButton(inputId = "button_stats_run", label = "Run Stats", icon = icon("bar-chart"), style="padding-left:10px; padding-right:10px; white-space: normal; display:inline-block; overflow:hidden")
)
),
busyIndicator(text="Working...")
)
)
)
),
fluidRow(
box(status = "primary", title = "Analysis results", collapsible = TRUE, width = 12,
uiOutput(outputId = "ui_stats")
)
)
),
tabItem(tabName = "mfatab",
h3("Multiple Factor Analysis"),
#chemical selection panel
fluidRow(
box(status = "primary", title = "Select chemicals and desired analysis", collapsible = TRUE, width = 6,
wellPanel(id = "mfa_selectcontrol",
selectizeInput(inputId = "select_chemical_mfa", label = "Selected chemicals",
options = list(placeholder = "Click me to select chemicals",
maxOptions = 10000, loadThrottle = 800,
delimiter = " ", hideSelected = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL),
fluidRow(
column(3,
actionButton(inputId = "button_mfa_selectall", label = "Select All", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_mfa_deselectall", label = "Deselect All", icon = icon("ban"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_mfa_run", label = "Run MFA", icon = icon("bar-chart"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(1,
busyIndicator(text="Working...")
)
)
)
)
),
fluidRow(
box(status = "primary", title = "Analysis results", collapsible = TRUE, width = 12,
uiOutput(outputId = "ui_mfa")
)
)
),
tabItem(tabName = "bertab",
h3("Biological Exposure Ratio Analysis"),
#chemical selection panel
fluidRow(
box(status = "primary", title = "Select chemicals and desired analysis", collapsible = TRUE, width = 6,
wellPanel(id = "ber_selectcontrol",
selectizeInput(inputId = "select_chemical_ber", label = "Selected chemicals",
options = list(placeholder = "Click me to select chemicals",
maxOptions = 10000, loadThrottle = 800,
delimiter = " ", hideSelected = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL),
fluidRow(
column(3,
actionButton(inputId = "button_ber_selectall", label = "Select All", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_ber_deselectall", label = "Deselect All", icon = icon("ban"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_ber_run", label = "Run BER analysis", icon = icon("bar-chart"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(1,
busyIndicator(text="Working...")
)
)
)
),
box(status = "primary", title = "Extra Information and Assumptions", collapsible = TRUE, width = 6, height = 250,
p("The calculations are based on the", a( href = "https://pubs.acs.org/doi/10.1021/es502513w", "SHEDS-HT", style = "color: blue;", target = "_blank", rel = "noopener noreferrer"), " exposure model."),
h4("Assumptions"),
tags$ul(tags$li("Physical activity index = 1.75"),
tags$li("Basal Alveolar Ventilation Rate = 15.7 m",tags$sup("3"),"/day"),
tags$li("Vapor pressure = 0.876 Pa")
),
h4("Warnings"),
tags$ul(tags$li("Ac50 values are used as a surrogate for OED values as the vast majority of chemicals do not have any values for OED.")))
),
fluidRow(
box(status = "primary", title = "Analysis results", collapsible = TRUE, width = 12,
uiOutput(outputId = "ui_ber")
)
)
),
tabItem(tabName = "savetab",
h3("Save/Load workspace"),
box(status = "primary", title = "Save", width = 3,
radioButtons(inputId = "radio_savetype", label = "Save options:", selected = "cm",
choices = list("R Chemical object list" = "cm") ),
downloadButton(outputId = "button_savefile", label = "Save", style="padding-left:10px; padding-right:10px; white-space: normal;")
),
box(status = "primary", title = "Load", width = 4,
radioButtons(inputId = "radio_loadtype", label = "Load options:", selected = "cm",
choices = list("R Chemical object list" = "cm") ),
fileInput(inputId = "file_load", label = "Choose File"),
actionButton(inputId = "button_loadfile", label = "Load", style="padding-left:10px; padding-right:10px; white-space: normal;"),
busyIndicator(text="Working..."),
uiOutput(outputId="ui_load_status")
)
)
)
);
# main ui function
ui <- dashboardPage( header, sidebar, body, skin = "blue" ); | /app/app_ui.R | no_license | NongCT230/DREAMTK.0.8.1 | R | false | false | 26,821 | r | # Web UI for shiny
#v0.8
#dashboard version
# HELPER FUNCTIONS ------------------------------
#adds id parameter to a menuItem. Allows calling the generated list item by selector reference
menuItemAddId = function( menuitem, id){
#menuitem$attribs$id <- id;
menuitem$children[[1]]$attribs$id <- id;
return (menuitem);
}
# UI COMPONENTS -------------------------------
#header is the top stripe panel
header <- dashboardHeader(title = paste0("DreamTK v",app.version.major,".",app.version.minor, ".", app.version.revision));
#side bar is the expandable menu bar on the left side
sidebar <- dashboardSidebar(
# Custom CSS to hide the default logout panel
tags$head(tags$style(HTML('.shiny-server-account { display: none; }'))),
# The dynamically-generated user panel
uiOutput("userpanel"),
div(id = "sidebardiv",
sidebarMenu(id = "sidebar",
menuItemAddId( menuItem("Home", tabName = "hometab", icon = icon("home")), id="li_home" ),
menuItemAddId( menuItem("Search", tabName = "searchtab", icon = icon("search")), id="li_search" ),
menuItemAddId( menuItem("Analysis", tabName = "analysistab", icon = icon("pie-chart")), id="li_analysis" ),
menuItemAddId( menuItem("MFA", tabName = "mfatab", icon = icon("line-chart")), id="li_mfa" ),
menuItemAddId( menuItem("BER Analysis", tabName = "bertab", icon = icon("user", lib = "glyphicon")), id="li_ber" ),
menuItemAddId( menuItem("Save/Load", tabName = "savetab", icon = icon("floppy-o")), id="li_save" ),
menuItemAddId( menuItem("Help", icon = icon("question-circle"), startExpanded = FALSE,
actionLink(inputId = "link_help_overview", label = "Interface Help"),
actionLink(inputId = "link_help_search", label = "Search Help"),
actionLink(inputId = "link_help_save", label = "Data Backup Help"),
actionLink(inputId = "link_help_analysis", label = "Analysis Help")
),
id = "li_help" ),
menuItemAddId( menuItem("Quick Options", icon = icon("cog"), startExpanded = FALSE,
radioButtons(inputId = "radio_listtype", label = "Chemical display options:", selected = "name",
choices = list("By CASN" = "casn",
"By Name" = "name") ),
#radioButtons(inputID = "radio_plotorientation", label = "Plot orientation options:", selected = "vertical",
# choices = list("Horizontal" ="horizontal",
# "Vertical" = "vertical")),
radioButtons(inputId = "radio_dbtype", label = "Database options:", selected = "rdata", width = "100%",
choices = list("MySQL: dreamtk_db (v0.7, based on httk-1.7, tcpl-v2)" = "mysql",
"RData: DreamTKv0.8.RData (based on httk-1.7, tcpl-v2)" = "rdata") ),
uiOutput(outputId="ui_database_status")
),
id="li_quickoptions" )
))
);
#body is the main dashboard are with all the required tabs
body <- dashboardBody(
shinyjs::useShinyjs(),
includeCSS( "./www/progressbar.css"),
introjsUI(),
tabItems(
tabItem(tabName = "hometab",
h2("Home"),
fluidRow( div(id="welcomeboxdiv",
box(status = "primary", title = "Welcome", collapsible = TRUE, width = 6,
h5("Welcome to DreamTK, an R application which facilitates toxicokinetic analysis of a variety of chemicals."),
h5("ToxCast Pipeline for High-Throughput Screening Data (tcpl v2.0) (Filer et al., 2016, US EPA) is the primary chemical and assay database used by this application."),
h5("High-Throughput Toxicokinetics (httk v1.8) (Pearce et al., 2018) database is used to obtain the necessary toxicokinetic constants."),
h5("Only Assays that hit their cutoffs are considered for analysis.")
))
),
fluidRow(
box(status = "primary", title = "How To", collapsible = TRUE, width = 6,
h5("The following tutorials can help you familiarize with the application. These are also available from the Help tab on the Navigation menu."),
actionButton(inputId = "button_tour_overview", label = "Interface Tutorial"),
actionButton(inputId = "button_tour_search", label = "Search Tutorial"),
actionButton(inputId = "button_tour_save", label = "Data Backup Tutorial"),
actionButton(inputId = "button_tour_analysis", label = "Analysis Tutorial")
)
),
fluidRow(
box(status = "primary", title = "News and Announcements", collapsible = TRUE, width = 6,
includeHTML("./www/news.html")
)
)
),
tabItem(tabName = "searchtab",
h3("Search Chemical Database"),
fluidRow(
tabBox(title = tagList(shiny::icon("compass")), id = "searchtabset",
#search panel
tabPanel("Search", icon = icon("search"), fluid = TRUE,
fluidRow(
column(12,
div(
style = "display: inline-block;vertical-align:baseline; width: 90%;",
selectizeInput(inputId = "field_search", label = "", width = "100%",
options = list(placeholder = "Enter one or more chemicals separated by spaces",
create = TRUE, createOnBlur = TRUE, createFilter = "^[\\w-,()\\[\\]]+$", persist = FALSE,
maxOptions = 1000, loadThrottle = 800, openOnFocus = FALSE,
delimiter = " ", hideSelected = TRUE, closeAfterSelect = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL)
),
div(
style = "display: inline-block;vertical-align:70%; width: 9%;",
actionButton(inputId = "button_search", label = "", icon = icon("search"), style="color:#0e76b7"),
bsTooltip(id="button_search", "Search", placement = "bottom", trigger = "hover", options = NULL)
),
radioButtons(inputId = "radio_searchtype", label = "Search options:", inline = TRUE, selected = "casn",
choices = list("By CASN" = "casn",
"By Name" = "name") )
)
)
),
#custom file panel
tabPanel("Custom File", icon = icon("table"), fluid = TRUE,
fluidRow(
column(10,
fileInput(inputId = "file_customchem", label = "Choose CSV File", accept = c("csv", ".csv"), width = "100%"),
uiOutput(outputId="ui_customchems_status")
),
column(2,
br(),
actionButton(inputId = "button_load_customchems", label = "Parse file", icon = icon("folder-open-o"),
style="padding-left:10px; padding-right:10px; padding-top:10px; padding-bottom:10px; white-space: normal;")
)
),
fluidRow(
column(12,
br(),
actionLink(inputId = "link_customchem_hint", label = "File requirements hint", icon = icon("hand-o-up")),
hidden(fluidRow(id="panelCustomchemHint",
column(12,
h4("Template:"),
downloadLink(outputId = "button_customchem_template",label = tagList(shiny::icon("table"), "Download CSV Template"),
style="padding-left:10px; padding-right:10px; width:100%; white-space: normal; font-size:20px"),
HTML(
str_c( "<p>Comma-separated value (.csv) file with first row containing column names. ",
"<strong>'casn'</strong> column is mandatory. Other columns are optional, and if missing, will be looked up in the database.</p>",
"<strong>Table columns:</strong>",
"<blockquote style = 'font-size:14px; border-left: 10px solid #fff;'><strong>casn:</strong> CAS Registry Number",
"<br><strong>name:</strong> Chemical name",
"<br><strong>cytotoxicity_um:</strong> Cytotoxic concentration (<strong>Units:</strong> uM)",
"<br><strong>cytotoxic:</strong> is the chemical considered cytotoxic at above concentration? Y/N flag",
"<br><strong>mw:</strong> Molecular weight (<strong>Units:</strong> g/mol)",
"<br><strong>human_funbound_plasma:</strong> Unbound fraction of chemical in blood (<strong>Units:</strong> none)",
"<br><strong>human_clint:</strong> Intrinsic invitro hepatic clearance (<strong>Units:</strong> uL/min/10<sup>6</sup> cells)",
"<br><strong>human_rblood2plasma:</strong> Blood to plasma concentration ratio (<strong>Units:</strong> none)",
"<br><strong>log_kow:</strong> Octanol:Water partition coefficient at 25*C (<strong>Units:</strong> LOG10 value)",
"<br><strong>pka:</strong> Equivalent chemical ionization constant (<strong>Units:</strong> - LOG10 value)</blockquote>"
)
)
)
))
)
)
)
)
),
fluidRow(
box(status = "primary", title = "Select and examine chemicals and chemical assays", collapsible = TRUE, width = 12, id = "chemlistbox",
fluidRow(
column(2,
#if for some reason we change the colours primary won't work and we will have to figure a way to put colours in those switchtes. Style is not useable. https://rdrr.io/cran/shinyWidgets/src/R/input-pretty.R Gabriel
prettySwitch(inputId = "select_hit", label = "Only Active Assays", value = TRUE, status = "primary",
fill = TRUE, bigger = TRUE, inline = TRUE,
width = NULL)
),
column(2,
prettySwitch(inputId = "select_background", label = "Include Background Measurements", value = TRUE, status = "primary",
fill = TRUE, bigger = TRUE, inline = TRUE,
width = NULL)
)
),
fluidRow(
column(12,
div(
style = "display: inline-block;vertical-align:bottom; width: 33%;",
selectInput(inputId = "select_chemical", label = "Searched chemical list", selectize = FALSE, size = 10,
choices = NULL )
),
div(
style = "display: inline-block;vertical-align:bottom; width: 33%;",
selectInput(inputId = "select_assay", label = "Chemical assay list", selectize = FALSE, size = 10,
choices = NULL )
),
div(
style = "display: inline-block;vertical-align:bottom; width: 33%;",
selectInput(inputId = "select_assay_comp", label = "Assay component list", selectize = FALSE, size = 10,
choices = NULL )
)
)
),
fluidRow(
column(1,
actionButton(inputId = "button_delete_chem", label = "", icon = icon("trash-o"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_delete_chem", "Delete selected chemical", placement = "bottom", trigger = "hover", options = NULL)
),
column(1,
actionButton(inputId = "button_delete_missing", label = "*", icon = icon("trash"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_delete_missing", "Delete missing chemicals", placement = "bottom", trigger = "hover", options = NULL)
),
column(1,
actionButton(inputId = "button_list_missing", label = "*", icon = icon("list-ul"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_list_missing", "List missing chemicals", placement = "bottom", trigger = "hover", options = NULL)
),
column(1,
actionButton(inputId = "button_clear_list", label = "", icon = icon("times"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;"),
bsTooltip(id="button_clear_list", "Clear chemical list", placement = "bottom", trigger = "hover", options = NULL)
),
column(2,
downloadButton(outputId = "button_savechems", label = "Chem CSV", icon = icon("table"),
style="padding-left:10px; padding-right:10px; width:95%; white-space: normal;"),
bsTooltip(id="button_savechems", "Save information for all listed chemicals as a CSV file", placement = "bottom", trigger = "hover", options = NULL)
),
column(2,
downloadButton(outputId = "button_saveassays", label = "Assays CSV", icon = icon("table"), style="padding-left:10px; padding-right:10px; width:95%; white-space: normal;"),
bsTooltip(id="button_saveassays", "Save assay information for the selected chemical as a CSV file", placement = "bottom", trigger = "hover", options = NULL)
)
)
)
),
fluidRow(
#chemical and assay information panels
box(status = "primary", title = "Chemical info", collapsible = TRUE, width = 6,
htmlOutput(outputId = "html_chemicalinfo")
),
box(status = "primary", title = "Assay info", collapsible = TRUE, width = 6,
htmlOutput(outputId = "html_assayinfo")
)
)
),
tabItem(tabName = "analysistab",
h3("Analyse chemicals"),
#chemical selection panel
fluidRow(
box(status = "primary", title = "Select chemicals and desired analysis", collapsible = TRUE, width = 12,
column(6,
wellPanel(id = "stats_selectcontrol",
selectizeInput(inputId = "select_chemical_stats", label = "Selected chemicals",
options = list(placeholder = "Click me to select chemicals",
maxOptions = 10000, loadThrottle = 800,
delimiter = " ", hideSelected = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL),
fluidRow(
column(3,
actionButton(inputId = "button_stats_selectall", label = "Select All", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;overflow:hidden")
),
column(3,
actionButton(inputId = "button_stats_deselectall", label = "Deselect All", icon = icon("ban"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;overflow:hidden")
)
)
),
prettySwitch(inputId = "analyse_background", label = "Include Background Measurements", value = TRUE, status = "primary",
fill = TRUE, bigger = TRUE, inline = TRUE,
width = NULL)
),
column(6,
wellPanel(id = "stats_optionscontrol",
checkboxGroupInput(inputId = "checkbox_stats", label = "Select statistics",
choices = c("Target Family Counts and ac50 values" = "tfcounts",
"Hierarchical cluster heatmap of Target Subfamily activities" = "tfhm",
"Hierarchical cluster heatmap of Assay Endpoint activities" = "assayhm",
"Ac50 vs ScalarTop" = "scalartop_ac50",
"OED vs ScalarTop" = "scalartop_oed",
"Burst Assay vs Not Burst Assay" = "ac50_box",
"Chemical ToxPI Plots (Individual)" = "toxpi",
"Chemical ToxPI Plots (Cytotoxicity)" = "toxpi2",
"Chemical ToxPI Plots (Grouped)" = "toxpigroup"),
selected = NULL),
#those are magic numbers. Do not change them.
fluidRow(
column(4,
actionButton(inputId = "button_select_all_stats", label = "Select/Deselect all", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; white-space: normal; width:100%; display:inline-block; overflow:hidden")
),
column(3, offset = 0.5,
actionButton(inputId = "button_stats_run", label = "Run Stats", icon = icon("bar-chart"), style="padding-left:10px; padding-right:10px; white-space: normal; display:inline-block; overflow:hidden")
)
),
busyIndicator(text="Working...")
)
)
)
),
fluidRow(
box(status = "primary", title = "Analysis results", collapsible = TRUE, width = 12,
uiOutput(outputId = "ui_stats")
)
)
),
tabItem(tabName = "mfatab",
h3("Multiple Factor Analysis"),
#chemical selection panel
fluidRow(
box(status = "primary", title = "Select chemicals and desired analysis", collapsible = TRUE, width = 6,
wellPanel(id = "mfa_selectcontrol",
selectizeInput(inputId = "select_chemical_mfa", label = "Selected chemicals",
options = list(placeholder = "Click me to select chemicals",
maxOptions = 10000, loadThrottle = 800,
delimiter = " ", hideSelected = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL),
fluidRow(
column(3,
actionButton(inputId = "button_mfa_selectall", label = "Select All", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_mfa_deselectall", label = "Deselect All", icon = icon("ban"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_mfa_run", label = "Run MFA", icon = icon("bar-chart"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(1,
busyIndicator(text="Working...")
)
)
)
)
),
fluidRow(
box(status = "primary", title = "Analysis results", collapsible = TRUE, width = 12,
uiOutput(outputId = "ui_mfa")
)
)
),
tabItem(tabName = "bertab",
h3("Biological Exposure Ratio Analysis"),
#chemical selection panel
fluidRow(
box(status = "primary", title = "Select chemicals and desired analysis", collapsible = TRUE, width = 6,
wellPanel(id = "ber_selectcontrol",
selectizeInput(inputId = "select_chemical_ber", label = "Selected chemicals",
options = list(placeholder = "Click me to select chemicals",
maxOptions = 10000, loadThrottle = 800,
delimiter = " ", hideSelected = TRUE,
plugins = list("restore_on_backspace", "remove_button")),
multiple = TRUE, choices = NULL),
fluidRow(
column(3,
actionButton(inputId = "button_ber_selectall", label = "Select All", icon = icon("mouse-pointer"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_ber_deselectall", label = "Deselect All", icon = icon("ban"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(3,
actionButton(inputId = "button_ber_run", label = "Run BER analysis", icon = icon("bar-chart"), style="padding-left:10px; padding-right:10px; width:100%; white-space: normal;")
),
column(1,
busyIndicator(text="Working...")
)
)
)
),
box(status = "primary", title = "Extra Information and Assumptions", collapsible = TRUE, width = 6, height = 250,
p("The calculations are based on the", a( href = "https://pubs.acs.org/doi/10.1021/es502513w", "SHEDS-HT", style = "color: blue;", target = "_blank", rel = "noopener noreferrer"), " exposure model."),
h4("Assumptions"),
tags$ul(tags$li("Physical activity index = 1.75"),
tags$li("Basal Alveolar Ventilation Rate = 15.7 m",tags$sup("3"),"/day"),
tags$li("Vapor pressure = 0.876 Pa")
),
h4("Warnings"),
tags$ul(tags$li("Ac50 values are used as a surrogate for OED values as the vast majority of chemicals do not have any values for OED.")))
),
fluidRow(
box(status = "primary", title = "Analysis results", collapsible = TRUE, width = 12,
uiOutput(outputId = "ui_ber")
)
)
),
tabItem(tabName = "savetab",
h3("Save/Load workspace"),
box(status = "primary", title = "Save", width = 3,
radioButtons(inputId = "radio_savetype", label = "Save options:", selected = "cm",
choices = list("R Chemical object list" = "cm") ),
downloadButton(outputId = "button_savefile", label = "Save", style="padding-left:10px; padding-right:10px; white-space: normal;")
),
box(status = "primary", title = "Load", width = 4,
radioButtons(inputId = "radio_loadtype", label = "Load options:", selected = "cm",
choices = list("R Chemical object list" = "cm") ),
fileInput(inputId = "file_load", label = "Choose File"),
actionButton(inputId = "button_loadfile", label = "Load", style="padding-left:10px; padding-right:10px; white-space: normal;"),
busyIndicator(text="Working..."),
uiOutput(outputId="ui_load_status")
)
)
)
);
# main ui function
ui <- dashboardPage( header, sidebar, body, skin = "blue" ); |
\name{write.screen.template}
\alias{write.screen.template}
\alias{screen.template}
\alias{All}
\alias{screen.randomForest}
\alias{screen.SIS}
\alias{screen.ttest}
\alias{screen.corP}
\alias{screen.corRank}
\alias{screen.glmnet}
\title{
screening algorithms for SuperLearner
}
\description{
Screening algorithms for \code{SuperLearner} to be used with \code{SL.library}.
}
\usage{
write.screen.template(file = "", ...)
}
\arguments{
\item{file}{
A connection, or a character string naming a file to print to. Passed to \code{\link{cat}}.
}
\item{\dots}{
Additional arguments passed to \code{\link{cat}}
}
}
\details{
Explain structure of a screening algorithm here:
}
\value{
\item{whichVariable}{
A logical vector with the length equal to the number of columns in \code{X}. TRUE indicates the variable (column of X) should be included.
}
}
\author{ Eric C Polley \email{polley.eric@mayo.edu} }
\seealso{
\code{\link{SuperLearner}}
}
\examples{
write.screen.template(file = '')
}
\keyword{utilities}
| /man/write.screen.template.Rd | no_license | ecpolley/SuperLearner | R | false | false | 1,017 | rd | \name{write.screen.template}
\alias{write.screen.template}
\alias{screen.template}
\alias{All}
\alias{screen.randomForest}
\alias{screen.SIS}
\alias{screen.ttest}
\alias{screen.corP}
\alias{screen.corRank}
\alias{screen.glmnet}
\title{
screening algorithms for SuperLearner
}
\description{
Screening algorithms for \code{SuperLearner} to be used with \code{SL.library}.
}
\usage{
write.screen.template(file = "", ...)
}
\arguments{
\item{file}{
A connection, or a character string naming a file to print to. Passed to \code{\link{cat}}.
}
\item{\dots}{
Additional arguments passed to \code{\link{cat}}
}
}
\details{
Explain structure of a screening algorithm here:
}
\value{
\item{whichVariable}{
A logical vector with the length equal to the number of columns in \code{X}. TRUE indicates the variable (column of X) should be included.
}
}
\author{ Eric C Polley \email{polley.eric@mayo.edu} }
\seealso{
\code{\link{SuperLearner}}
}
\examples{
write.screen.template(file = '')
}
\keyword{utilities}
|
# load packages
library("reticulate")
use_python("/Applications/anaconda3/bin/python3", required = T)
sc <- import("scanpy")
library(tidyverse)
library(NMF)
install.extras('NMF')
# load liver data (decontaminated and preprocessed)
setwd("~/Documents/projects/spatial-genomics")
liver_h5ad <- sc$read_h5ad("./data/all_batches_mouse_only_raw_proximities.h5ad")
liver_h5ad
liver_mat_raw <- liver_h5ad$X
dim(liver_mat_raw) # 5434 cells by 55287 genes (human + mouse genes)
liver_cell_names <- liver_h5ad$obs_names$to_list()
rownames(liver_mat_raw) <- liver_cell_names <- make.names(liver_cell_names, allow_ = F)
colnames(liver_mat_raw) <- liver_gene_names <- liver_h5ad$var_names$to_list()
liver_obs <- liver_h5ad$obs
liver_obs$cell_iden <- rownames(liver_obs)
table(liver_obs$CellType)
liver_mat <- liver_mat_raw[, grep("mm10", colnames(liver_mat_raw))]
# filter out human transcripts
dim(liver_mat) # 5434 cells, 24917 genes
# load location info
liver_obs$location <- paste0(liver_obs$batch, "_", liver_obs$X, "_", liver_obs$Y)
table(liver_obs$CellType)
# sum(rownames(liver_mat) == liver_obs$cell_iden)
liver_tumor_mat <- liver_mat[liver_obs$CellType == "mc38", ]
dim(liver_tumor_mat) # 2256 mc38 cells and 24917 genes
# normalization
# multiplied each entry by (median / current_sum) so that the sum across the row is equal to the median.
med_sum <- median(liver_obs$n_counts)
# med_sum <- median(rowSums(liver_tumor_mat)) #median of the sum of counts across the cells
mat_normalized_t <- apply(t(liver_tumor_mat), 2, function(i) i * med_sum / sum(i))
liver_tumor_mat_norm <- t(mat_normalized_t)
# sum(rowSums(mat_normalized) == med_sum)
dim(liver_tumor_mat_norm) # 2256 cells by 24917 genes (normalized across all the cells)
# 2. identify tumor-specific gene modules using NMF
# - center the expression matrix individually by removing the mean expression for each gene.
# - set negative values to zero.
# - perform sparse nonsmooth NMF using nsNMF function in NMF package (rank = 20).
# liver-NMF
liver_tumor_mat_center <- liver_tumor_mat_norm - colMeans(liver_tumor_mat_norm)[col(liver_tumor_mat_norm)]
# range(colMeans(liver_tumor_mat_center))
liver_tumor_mat_nn <- liver_tumor_mat_center
liver_tumor_mat_nn[liver_tumor_mat_nn < 0] <- 0
dim(liver_tumor_mat_nn) # 2256 cells and 24917 genes
liver_tumor_mat_nn_sel <- liver_tumor_mat_nn[, colSums(liver_tumor_mat_nn) != 0]
dim(liver_tumor_mat_nn_sel) # 2256 cells by 20787 genes
# TEST CASE:
# sample_raw <- liver_tumor_mat_nn_sel[1:1000, 1:2000]
# sample <- sample_raw[rowSums(sample_raw) != 0, colSums(sample_raw) != 0]
# dim(sample)
# # estim.r <- nmf(x = t(sample),
# # rank = 2:5, nrun=11, seed=123456)
# # png(file = "./estim.png", width = 1024, height = 768)
# # plot(estim.r)
# # dev.off()
# fit1 <- estim.r$fit$`2`
# s_max_fit1 <- extractFeatures(fit1, method = "max")
# s_kim_fit1 <- extractFeatures(fit1)
# lapply(s_max_fit1, write, "./liver_module_max_fit1.txt", append = TRUE, ncolumns=1000)
# lapply(s_kim_fit1, write, "./liver_module_kim_fit1.txt", append = TRUE, ncolumns=1000)
# fit2 <- nmf(x = t(sample), # feature by sample
# rank = 10, method = "nsNMF")
# s_max_fit2 <- extractFeatures(fit2, method = "max")
# s_kim_fit2 <- extractFeatures(fit2)
# lapply(s_max_fit2, write, "./liver_module_max_fit2.txt", append = TRUE, ncolumns=1000)
# lapply(s_kim_fit2, write, "./liver_module_kim_fit2.txt", append = TRUE, ncolumns=1000)
nmf_res_liver <- nmf(x = t(liver_tumor_mat_nn_sel), # feature by sample
rank = 10, method = "nsNMF")
nmf_res_liver@fit
# <Object of class:NMFns>
# features: 764
# basis/rank: 10
# samples: 2256
# theta: 0.5
s_max <- extractFeatures(nmf_res_liver, method = "max")
s_kim <- extractFeatures(nmf_res_liver)
lapply(s_max, write, "./liver_module_max.txt", append = TRUE, ncolumns=5000)
lapply(s_max, cat, "\n", file = "./liver_module_max.txt", append = TRUE)
# lapply(s_kim, write, "./liver_module_kim.txt", append = TRUE, ncolumns=5000)
| /notebooks/gene.module/depreciated/liver_module.R | no_license | yelabucsf/spatial | R | false | false | 3,992 | r | # load packages
library("reticulate")
use_python("/Applications/anaconda3/bin/python3", required = T)
sc <- import("scanpy")
library(tidyverse)
library(NMF)
install.extras('NMF')
# load liver data (decontaminated and preprocessed)
setwd("~/Documents/projects/spatial-genomics")
liver_h5ad <- sc$read_h5ad("./data/all_batches_mouse_only_raw_proximities.h5ad")
liver_h5ad
liver_mat_raw <- liver_h5ad$X
dim(liver_mat_raw) # 5434 cells by 55287 genes (human + mouse genes)
liver_cell_names <- liver_h5ad$obs_names$to_list()
rownames(liver_mat_raw) <- liver_cell_names <- make.names(liver_cell_names, allow_ = F)
colnames(liver_mat_raw) <- liver_gene_names <- liver_h5ad$var_names$to_list()
liver_obs <- liver_h5ad$obs
liver_obs$cell_iden <- rownames(liver_obs)
table(liver_obs$CellType)
liver_mat <- liver_mat_raw[, grep("mm10", colnames(liver_mat_raw))]
# filter out human transcripts
dim(liver_mat) # 5434 cells, 24917 genes
# load location info
liver_obs$location <- paste0(liver_obs$batch, "_", liver_obs$X, "_", liver_obs$Y)
table(liver_obs$CellType)
# sum(rownames(liver_mat) == liver_obs$cell_iden)
liver_tumor_mat <- liver_mat[liver_obs$CellType == "mc38", ]
dim(liver_tumor_mat) # 2256 mc38 cells and 24917 genes
# normalization
# multiplied each entry by (median / current_sum) so that the sum across the row is equal to the median.
med_sum <- median(liver_obs$n_counts)
# med_sum <- median(rowSums(liver_tumor_mat)) #median of the sum of counts across the cells
mat_normalized_t <- apply(t(liver_tumor_mat), 2, function(i) i * med_sum / sum(i))
liver_tumor_mat_norm <- t(mat_normalized_t)
# sum(rowSums(mat_normalized) == med_sum)
dim(liver_tumor_mat_norm) # 2256 cells by 24917 genes (normalized across all the cells)
# 2. identify tumor-specific gene modules using NMF
# - center the expression matrix individually by removing the mean expression for each gene.
# - set negative values to zero.
# - perform sparse nonsmooth NMF using nsNMF function in NMF package (rank = 20).
# liver-NMF
liver_tumor_mat_center <- liver_tumor_mat_norm - colMeans(liver_tumor_mat_norm)[col(liver_tumor_mat_norm)]
# range(colMeans(liver_tumor_mat_center))
liver_tumor_mat_nn <- liver_tumor_mat_center
liver_tumor_mat_nn[liver_tumor_mat_nn < 0] <- 0
dim(liver_tumor_mat_nn) # 2256 cells and 24917 genes
liver_tumor_mat_nn_sel <- liver_tumor_mat_nn[, colSums(liver_tumor_mat_nn) != 0]
dim(liver_tumor_mat_nn_sel) # 2256 cells by 20787 genes
# TEST CASE:
# sample_raw <- liver_tumor_mat_nn_sel[1:1000, 1:2000]
# sample <- sample_raw[rowSums(sample_raw) != 0, colSums(sample_raw) != 0]
# dim(sample)
# # estim.r <- nmf(x = t(sample),
# # rank = 2:5, nrun=11, seed=123456)
# # png(file = "./estim.png", width = 1024, height = 768)
# # plot(estim.r)
# # dev.off()
# fit1 <- estim.r$fit$`2`
# s_max_fit1 <- extractFeatures(fit1, method = "max")
# s_kim_fit1 <- extractFeatures(fit1)
# lapply(s_max_fit1, write, "./liver_module_max_fit1.txt", append = TRUE, ncolumns=1000)
# lapply(s_kim_fit1, write, "./liver_module_kim_fit1.txt", append = TRUE, ncolumns=1000)
# fit2 <- nmf(x = t(sample), # feature by sample
# rank = 10, method = "nsNMF")
# s_max_fit2 <- extractFeatures(fit2, method = "max")
# s_kim_fit2 <- extractFeatures(fit2)
# lapply(s_max_fit2, write, "./liver_module_max_fit2.txt", append = TRUE, ncolumns=1000)
# lapply(s_kim_fit2, write, "./liver_module_kim_fit2.txt", append = TRUE, ncolumns=1000)
nmf_res_liver <- nmf(x = t(liver_tumor_mat_nn_sel), # feature by sample
rank = 10, method = "nsNMF")
nmf_res_liver@fit
# <Object of class:NMFns>
# features: 764
# basis/rank: 10
# samples: 2256
# theta: 0.5
s_max <- extractFeatures(nmf_res_liver, method = "max")
s_kim <- extractFeatures(nmf_res_liver)
lapply(s_max, write, "./liver_module_max.txt", append = TRUE, ncolumns=5000)
lapply(s_max, cat, "\n", file = "./liver_module_max.txt", append = TRUE)
# lapply(s_kim, write, "./liver_module_kim.txt", append = TRUE, ncolumns=5000)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics_functions.R
\name{management.uploads.get}
\alias{management.uploads.get}
\title{List uploads to which the user has access.}
\usage{
management.uploads.get(accountId, webPropertyId, customDataSourceId, uploadId)
}
\arguments{
\item{accountId}{Account Id for the upload to retrieve}
\item{webPropertyId}{Web property Id for the upload to retrieve}
\item{customDataSourceId}{Custom data source Id for upload to retrieve}
\item{uploadId}{Upload Id to retrieve}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/analytics
\item https://www.googleapis.com/auth/analytics.edit
\item https://www.googleapis.com/auth/analytics.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/analytics, https://www.googleapis.com/auth/analytics.edit, https://www.googleapis.com/auth/analytics.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/analytics/}{Google Documentation}
}
| /googleanalyticsv3.auto/man/management.uploads.get.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,256 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics_functions.R
\name{management.uploads.get}
\alias{management.uploads.get}
\title{List uploads to which the user has access.}
\usage{
management.uploads.get(accountId, webPropertyId, customDataSourceId, uploadId)
}
\arguments{
\item{accountId}{Account Id for the upload to retrieve}
\item{webPropertyId}{Web property Id for the upload to retrieve}
\item{customDataSourceId}{Custom data source Id for upload to retrieve}
\item{uploadId}{Upload Id to retrieve}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/analytics
\item https://www.googleapis.com/auth/analytics.edit
\item https://www.googleapis.com/auth/analytics.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/analytics, https://www.googleapis.com/auth/analytics.edit, https://www.googleapis.com/auth/analytics.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/analytics/}{Google Documentation}
}
|
#' Scale_fill colour scheme
#'
#' `grafify` internally includes colour-blind compatible schemes for fill and colour/color aesthetics.
#' Note that these **only** work for categorical variables. Use the brewer or viridis packages for numeric gradient scales.
#'
#' The default for \code{scale_fill_grafify()}, \code{scale_colour_grafify()} or \code{scale_color_grafify()} is a list of 55 colours as part of \code{palette = "all_grafify"}.
#'
#' Obviously, it is not recommended to use so many colours, but implementing this was easiest to prevent errors when using a lot of categorical variables.
#'
#' There are eight palettes with 5-10 colours each, which are recommended. These can be called by naming the colour scheme using \code{palette = } argument.
#' Additional options include "okabe_ito", "vibrant, "bright", "pale", "muted", "dark", "light", and "contrast". These are taken from [Paul Taul](https://personal.sron.nl/~pault/#sec:qualitative), [Mike Mol](https://mikemol.github.io/technique/colorblind/2018/02/11/color-safe-palette.html) and [Okabe Ito](http://jfly.iam.u-tokyo.ac.jp/color/#pallet).
#' `scale_fill_grafify2` and `scale_colour_grafify2` are identical except that when the number of categorical variables is fewer than the total number of colour shades in the palette (e.g. if you have 3 groups and the "okabe_ito" palette has 7 colours), these functions will pick the most 'distant' colours from the scheme than going sequentially. If you want colours assigned sequencially do not use `scale_fill_grafify` or `scale_colour_grafify`.
#'
#' @param palette Name of the colour scheme. Default set to \code{palette = "all_grafify"}. Provide names as above in quotes.
#' @param reverse Whether the colour order should be reversed.
#' @param ... Additional parameters for `scale_fill` or `scale_colour`
#'
#' @return ggplot scale_fill function
#' @export scale_fill_grafify2
#' @import ggplot2
#'
#' @examples
#' #add a grafify2 fill scheme to ggplot
#' ggplot(neuralgia, aes(x = Treatment, y = Duration))+
#' geom_point(aes(fill = Sex), shape = 21, size = 3)+
#' scale_fill_grafify2(palette = "muted")
#'
scale_fill_grafify2 <- function(palette = "all_grafify", reverse = FALSE, ...){
pal <- graf_col_palette_default(palette = palette, reverse = reverse)
discrete_scale("fill", paste0("graf_", palette), palette = pal, ...)
}
| /R/scale_fill_grafify2.R | no_license | EpiSuRgeon/grafify | R | false | false | 2,350 | r | #' Scale_fill colour scheme
#'
#' `grafify` internally includes colour-blind compatible schemes for fill and colour/color aesthetics.
#' Note that these **only** work for categorical variables. Use the brewer or viridis packages for numeric gradient scales.
#'
#' The default for \code{scale_fill_grafify()}, \code{scale_colour_grafify()} or \code{scale_color_grafify()} is a list of 55 colours as part of \code{palette = "all_grafify"}.
#'
#' Obviously, it is not recommended to use so many colours, but implementing this was easiest to prevent errors when using a lot of categorical variables.
#'
#' There are eight palettes with 5-10 colours each, which are recommended. These can be called by naming the colour scheme using \code{palette = } argument.
#' Additional options include "okabe_ito", "vibrant, "bright", "pale", "muted", "dark", "light", and "contrast". These are taken from [Paul Taul](https://personal.sron.nl/~pault/#sec:qualitative), [Mike Mol](https://mikemol.github.io/technique/colorblind/2018/02/11/color-safe-palette.html) and [Okabe Ito](http://jfly.iam.u-tokyo.ac.jp/color/#pallet).
#' `scale_fill_grafify2` and `scale_colour_grafify2` are identical except that when the number of categorical variables is fewer than the total number of colour shades in the palette (e.g. if you have 3 groups and the "okabe_ito" palette has 7 colours), these functions will pick the most 'distant' colours from the scheme than going sequentially. If you want colours assigned sequencially do not use `scale_fill_grafify` or `scale_colour_grafify`.
#'
#' @param palette Name of the colour scheme. Default set to \code{palette = "all_grafify"}. Provide names as above in quotes.
#' @param reverse Whether the colour order should be reversed.
#' @param ... Additional parameters for `scale_fill` or `scale_colour`
#'
#' @return ggplot scale_fill function
#' @export scale_fill_grafify2
#' @import ggplot2
#'
#' @examples
#' #add a grafify2 fill scheme to ggplot
#' ggplot(neuralgia, aes(x = Treatment, y = Duration))+
#' geom_point(aes(fill = Sex), shape = 21, size = 3)+
#' scale_fill_grafify2(palette = "muted")
#'
scale_fill_grafify2 <- function(palette = "all_grafify", reverse = FALSE, ...){
pal <- graf_col_palette_default(palette = palette, reverse = reverse)
discrete_scale("fill", paste0("graf_", palette), palette = pal, ...)
}
|
library(dplyr)
library(caret)
library(xgboost)
library(verification)
#source("../param/param.R")
#source("../data/prepare_data.R")
set.seed(seed)
flds <- createFolds(train.raw.y, k = k.fold)
info <- list()
model.name <- paste0("rf_xgboost_raw","_trees",param2$num_parallel_tree)
print(paste("#--------------",model.name,"--------------#"))
for(i in 1:length(flds)){
#------ split in train und test ------#
test.index <- flds[[i]]
train.index <- unlist(flds[-i])
names(train.index) <- NULL
#------ split in train und test ------#
cv.train <- train.raw[train.index,]
cv.test <- train.raw[test.index,]
cv.train.y <- train.raw.y[train.index]
cv.test.y <- train.raw.y[test.index]
dcvtrain <- xgb.DMatrix(data=as.matrix(cv.train), label=cv.train.y)
dcvtest <- xgb.DMatrix(data=as.matrix(cv.test))
#------------- Training cv--------------#
xg.cv <- xgb.train( params = param2,
data = dcvtrain,
nrounds = 1,
verbose = F,
maximize = FALSE
)
Pred.cv <- predict(xg.cv, dcvtest)
result <- roc.area(cv.test.y, Pred.cv)$A
print(paste("___ AUC =", result))
#------------- Training cv--------------#
#------------- Training test------------#
Pred.test <- predict(xg.cv, dtest.raw)
#------------- Training test------------#
#-------- speichere fold info --------#
fold_info <- list()
fold_info[["fold"]] <- i
fold_info[["seed"]] <- seed
fold_info[["test.index"]] <- test.index
fold_info[["train.index"]] <- train.index
fold_info[["AUC"]] <- result
fold_info[["prediction.cv"]] <- Pred.cv
fold_info[["prediction.test"]] <- Pred.test
info[[i]] <- fold_info
#-------- speichere fold info --------#
}
Predictions <- unlist(lapply(info, `[`, "prediction.cv"))
index <- unlist(lapply(info, `[`, "test.index"))
auc <- unlist(lapply(info, `[`, "AUC"))
auc <- mean(auc)
train.df <- data_frame(index=index, cv.pred=Predictions)
train.df <- arrange(train.df, index)
mean.df <- data.frame(matrix(NA,nrow=(nrow(test.raw))))
for(p in 1:length(info)){
Predictions2 <-info[[p]]$prediction.test
mean.df <- cbind(mean.df, Predictions2)
}
mean.df[,1]<-NULL
mean.df$mean <- rowMeans(mean.df)
#mean auc
result <- list()
result[["seed"]]<-seed
result[["AUC"]]<- auc
result[["cv.train"]]<-train.df$cv.pred
result[["test"]]<-mean.df$mean
saveRDS(result, paste0("./cache/",folder,"/",model.name,"_",auc,".RData"))
print(paste(model.name, "___", auc))
| /crossvalidater/rf_xgboost_raw.R | no_license | danielspringt/santander-stacker-pipeline | R | false | false | 2,582 | r | library(dplyr)
library(caret)
library(xgboost)
library(verification)
#source("../param/param.R")
#source("../data/prepare_data.R")
set.seed(seed)
flds <- createFolds(train.raw.y, k = k.fold)
info <- list()
model.name <- paste0("rf_xgboost_raw","_trees",param2$num_parallel_tree)
print(paste("#--------------",model.name,"--------------#"))
for(i in 1:length(flds)){
#------ split in train und test ------#
test.index <- flds[[i]]
train.index <- unlist(flds[-i])
names(train.index) <- NULL
#------ split in train und test ------#
cv.train <- train.raw[train.index,]
cv.test <- train.raw[test.index,]
cv.train.y <- train.raw.y[train.index]
cv.test.y <- train.raw.y[test.index]
dcvtrain <- xgb.DMatrix(data=as.matrix(cv.train), label=cv.train.y)
dcvtest <- xgb.DMatrix(data=as.matrix(cv.test))
#------------- Training cv--------------#
xg.cv <- xgb.train( params = param2,
data = dcvtrain,
nrounds = 1,
verbose = F,
maximize = FALSE
)
Pred.cv <- predict(xg.cv, dcvtest)
result <- roc.area(cv.test.y, Pred.cv)$A
print(paste("___ AUC =", result))
#------------- Training cv--------------#
#------------- Training test------------#
Pred.test <- predict(xg.cv, dtest.raw)
#------------- Training test------------#
#-------- speichere fold info --------#
fold_info <- list()
fold_info[["fold"]] <- i
fold_info[["seed"]] <- seed
fold_info[["test.index"]] <- test.index
fold_info[["train.index"]] <- train.index
fold_info[["AUC"]] <- result
fold_info[["prediction.cv"]] <- Pred.cv
fold_info[["prediction.test"]] <- Pred.test
info[[i]] <- fold_info
#-------- speichere fold info --------#
}
Predictions <- unlist(lapply(info, `[`, "prediction.cv"))
index <- unlist(lapply(info, `[`, "test.index"))
auc <- unlist(lapply(info, `[`, "AUC"))
auc <- mean(auc)
train.df <- data_frame(index=index, cv.pred=Predictions)
train.df <- arrange(train.df, index)
mean.df <- data.frame(matrix(NA,nrow=(nrow(test.raw))))
for(p in 1:length(info)){
Predictions2 <-info[[p]]$prediction.test
mean.df <- cbind(mean.df, Predictions2)
}
mean.df[,1]<-NULL
mean.df$mean <- rowMeans(mean.df)
#mean auc
result <- list()
result[["seed"]]<-seed
result[["AUC"]]<- auc
result[["cv.train"]]<-train.df$cv.pred
result[["test"]]<-mean.df$mean
saveRDS(result, paste0("./cache/",folder,"/",model.name,"_",auc,".RData"))
print(paste(model.name, "___", auc))
|
library(tidyverse)
library(rvest)
#importing from the website using the website link
webtable <- read_html("https://en.wikipedia.org/wiki/List_of_best-selling_music_artists")%>%
#importing the table's style that contains the required table
html_nodes("table.wikitable:nth-child(14)")%>%
html_table()%>%
rbind.data.frame()
webtable | /Webscraping using rvest.R | no_license | katti97/learningdatascience | R | false | false | 362 | r | library(tidyverse)
library(rvest)
#importing from the website using the website link
webtable <- read_html("https://en.wikipedia.org/wiki/List_of_best-selling_music_artists")%>%
#importing the table's style that contains the required table
html_nodes("table.wikitable:nth-child(14)")%>%
html_table()%>%
rbind.data.frame()
webtable |
# Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#options
rmr.options.env = new.env(parent=emptyenv())
rmr.options.env$backend = "hadoop"
rmr.options.env$profile.nodes = "off"
rmr.options.env$hdfs.tempdir = "/tmp" #can't check it exists here
rmr.options.env$exclude.objects = NULL
rmr.options.env$backend.parameters = list()
add.last =
function(action) {
old.Last = {
if (exists(".Last"))
.Last
else
function() NULL}
.Last <<-
function() {
action()
.Last <<- old.Last
.Last()}}
rmr.options =
function(
backend = c("hadoop", "local"),
profile.nodes = c("off", "calls", "memory", "both"),
hdfs.tempdir = "/tmp",
exclude.objects = NULL,
backend.parameters = list()) {
opt.assign = Curry(assign, envir = rmr.options.env)
args = as.list(sys.call())[-1]
is.named.arg = function(x) is.element(x, names(args))
if(is.named.arg("backend"))
opt.assign("backend", match.arg(backend))
if(is.named.arg("profile.nodes")) {
if (is.logical(profile.nodes)) {
profile.nodes = {
if(profile.nodes)
"calls"
else
"off"}}
else
opt.assign("profile.nodes", match.arg(profile.nodes))}
if(is.named.arg("hdfs.tempdir")) {
if(!dfs.exists(hdfs.tempdir)) {
hdfs.mkdir(hdfs.tempdir)
add.last(function() if(!in.a.task()) hdfs.rmr(hdfs.tempdir))}
opt.assign("hdfs.tempdir", hdfs.tempdir)}
if(is.named.arg("backend.parameters"))
opt.assign("backend.parameters", backend.parameters)
if(is.named.arg("exclude.objects"))
opt.assign("exclude.objects", exclude.objects)
if (rmr.options.env$backend == "hadoop")
if(!hdfs.exists(hdfs.tempdir)) #can't do this at package load time
warning("Please set an HDFS temp directory with rmr.options(hdfs.tempdir = ...)")
read.args = {
if(is.null(names(args)))
args
else
named.slice(args, "")}
if(length(read.args) > 0) {
read.args = simplify2array(read.args)
retval = as.list(rmr.options.env)[read.args]
if (length(retval) == 1) retval[[1]] else retval}
else NULL }
## map and reduce function generation
to.map =
function(fun1, fun2 = identity) {
if (missing(fun2)) {
function(k, v) fun1(keyval(k, v))}
else {
function(k, v) keyval(fun1(k), fun2(v))}}
to.reduce = to.map
## mapred combinators
compose.mapred =
function(mapred, map)
function(k, v) {
out = mapred(k, v)
if (is.null(out)) NULL
else map(keys(out), values(out))}
union.mapred =
function(mr1, mr2) function(k, v) {
c.keyval(mr1(k, v), mr2(k, v))}
# backend independent dfs section
is.hidden.file =
function(fname)
regexpr("[\\._]", basename(fname)) == 1
part.list =
function(fname) {
fname = to.dfs.path(fname)
if(rmr.options('backend') == "local") fname
else {
if(dfs.is.dir(fname)) {
du = hdfs.ls(fname)
du[!is.hidden.file(du[,"path"]),"path"]}
else fname}}
dfs.exists =
function(fname) {
fname = to.dfs.path(fname)
if (rmr.options('backend') == 'hadoop')
hdfs.exists(fname)
else file.exists(fname)}
dfs.rmr =
function(fname) {
fname = to.dfs.path(fname)
if(rmr.options('backend') == 'hadoop')
hdfs.rmr(fname)
else stopifnot(unlink(fname, recursive = TRUE) == 0)
NULL}
dfs.is.dir =
function(fname) {
fname = to.dfs.path(fname)
if (rmr.options('backend') == 'hadoop')
hdfs.isdir(fname)
else file.info(fname)[["isdir"]]}
dfs.empty =
function(fname) {
if(dfs.size(fname) > 1000) #size heuristic
FALSE
else
length.keyval(from.dfs(fname)) == 0}
dfs.size =
function(fname) {
fname = to.dfs.path(fname)
if(rmr.options('backend') == 'hadoop') {
du = hdfs.ls(fname)
if(is.null(du)) 0
else
sum(du[!is.hidden.file(du[["path"]]), "size"])}
else file.info(fname)[1, "size"] }
dfs.mv =
function(from, to) {
fname = to.dfs.path(from)
if(rmr.options('backend') == 'hadoop')
hdfs.mv(fname, to)
else
stopifnot(file.rename(fname, to))
NULL}
dfs.mkdir =
function(fname) {
fname = to.dfs.path(fname)
if (rmr.options('backend') == 'hadoop')
hdfs.mkdir(fname)
else
stopifnot(all(dir.create(fname)))
NULL}
# dfs bridge
to.dfs.path =
function(input) {
if (is.character(input)) {
input}
else {
if(is.function(input)) {
input()}}}
loadtb =
function(inf, outf)
system(paste(hadoop.streaming(), "loadtb", outf, "<", inf))
to.dfs =
function(
kv,
output = dfs.tempfile(),
format = "native") {
kv = as.keyval(kv)
tmp = tempfile()
dfs.output = to.dfs.path(output)
if(is.character(format))
format = make.output.format(format)
keyval.writer = make.keyval.writer(tmp, format)
keyval.writer(kv)
eval(
quote(
if(length(con) == 1)
close(con)
else lapply(con, close)),
envir=environment(keyval.writer))
if(rmr.options('backend') == 'hadoop') {
if(format$mode == "binary")
loadtb(tmp, dfs.output)
else #text
hdfs.put(tmp, dfs.output)}
else { #local
if(file.exists(dfs.output))
stop("Can't overwrite ", dfs.output)
file.copy(tmp, dfs.output)}
unlink(tmp, recursive=TRUE)
output}
from.dfs = function(input, format = "native") {
read.file = function(fname) {
keyval.reader =
make.keyval.reader(fname, format)
retval = make.fast.list()
kv = keyval.reader()
while(!is.null(kv)) {
retval(list(kv))
kv = keyval.reader()}
eval(
quote(close(con)),
envir = environment(keyval.reader))
c.keyval(retval())}
dumptb = function(src, dest){
lapply(src, function(x) system(paste(hadoop.streaming(), "dumptb", x, ">>", dest)))}
getmerge = function(src, dest) {
on.exit(unlink(tmp))
tmp = tempfile()
lapply(
src,
function(x) {
hdfs.get(as.character(x), tmp)
if(.Platform$OS.type == "windows") {
cmd = paste('type', tmp, '>>' , dest)
system(paste(Sys.getenv("COMSPEC"),"/c",cmd))}
else {
system(paste('cat', tmp, '>>' , dest))}
unlink(tmp)})
dest}
fname = to.dfs.path(input)
if(is.character(format)) format = make.input.format(format)
if(rmr.options("backend") == "hadoop") {
tmp = tempfile()
if(format$mode == "binary")
dumptb(part.list(fname), tmp)
else getmerge(part.list(fname), tmp)}
else
tmp = fname
retval = read.file(tmp)
if(rmr.options("backend") == "hadoop") unlink(tmp)
retval}
# mapreduce
rmr.normalize.path =
function(url.or.path) {
if(.Platform$OS.type == "windows")
url.or.path = gsub("\\\\","/", url.or.path)
gsub(
"/$",
"",
gsub(
"/+",
"/",
paste(
"/",
parse_url(url.or.path)$path,
sep = "")))}
current.input =
function() {
fname =
default(
Sys.getenv("mapreduce_map_input_file"),
Sys.getenv("map_input_file"),
"")
if (fname == "") NULL
else rmr.normalize.path(fname)}
dfs.tempfile =
function(
pattern = "file",
tmpdir = {
if(rmr.options("backend") == "hadoop")
rmr.options("hdfs.tempdir")
else
tempdir()}) {
fname = tempfile(pattern, tmpdir)
subfname = strsplit(fname, ":")
if(length(subfname[[1]]) > 1) fname = subfname[[1]][2]
namefun = function() {fname}
reg.finalizer(
environment(namefun),
function(e) {
fname = eval(expression(fname), envir = e)
if(!in.a.task() && dfs.exists(fname)) dfs.rmr(fname)
},
onexit = TRUE)
namefun}
dfs.managed.file = function(call, managed.dir = rmr.options('managed.dir')) {
file.path(managed.dir, digest(lapply(call, eval)))}
mapreduce = function(
input,
output = NULL,
map = to.map(identity),
reduce = NULL,
vectorized.reduce = FALSE,
combine = NULL,
in.memory.combine = FALSE,
input.format = "native",
output.format = "native",
backend.parameters = list(),
verbose = TRUE) {
on.exit(expr = gc(), add = TRUE) #this is here to trigger cleanup of tempfiles
if (is.null(output))
output = dfs.tempfile()
if(is.character(input.format)) input.format = make.input.format(input.format)
if(is.character(output.format)) output.format = make.output.format(output.format)
if(!missing(backend.parameters)) warning("backend.parameters is deprecated.")
backend = rmr.options('backend')
mr = switch(backend,
hadoop = rmr.stream,
local = mr.local,
stop("Unsupported backend: ", backend))
mr(map = map,
reduce = reduce,
combine = combine,
vectorized.reduce,
in.folder = if(is.list(input)) {lapply(input, to.dfs.path)} else to.dfs.path(input),
out.folder = to.dfs.path(output),
input.format = input.format,
output.format = output.format,
in.memory.combine = in.memory.combine,
backend.parameters = backend.parameters[[backend]],
verbose = verbose)
output
}
##special jobs
## a sort of relational join very useful in a variety of map reduce algorithms
## to.dfs(lapply(1:10, function(i) keyval(i, i^2)), "/tmp/reljoin.left")
## to.dfs(lapply(1:10, function(i) keyval(i, i^3)), "/tmp/reljoin.right")
## equijoin(left.input="/tmp/reljoin.left", right.input="/tmp/reljoin.right", output = "/tmp/reljoin.out")
## from.dfs("/tmp/reljoin.out")
reduce.default =
function(k, vl, vr) {
if((is.list(vl) && !is.data.frame(vl)) ||
(is.list(vr) && !is.data.frame(vr)))
list(left = vl, right = vr)
else{
vl = as.data.frame(vl)
vr = as.data.frame(vr)
names(vl) = paste(names(vl), "l", sep = ".")
names(vr) = paste(names(vr), "r", sep = ".")
if(all(is.na(vl))) vr
else {
if(all(is.na(vr))) vl
else
merge(vl, vr, by = NULL)}}}
equijoin =
function(
left.input = NULL,
right.input = NULL,
input = NULL,
output = NULL,
input.format = "native",
output.format = "native",
outer = c("", "left", "right", "full"),
map.left = to.map(identity),
map.right = to.map(identity),
reduce = reduce.default) {
stopifnot(
xor(
!is.null(left.input), !is.null(input) &&
(is.null(left.input) == is.null(right.input))))
outer = match.arg(outer)
left.outer = outer == "left"
right.outer = outer == "right"
full.outer = outer == "full"
if (is.null(left.input)) {
left.input = input}
mark.side =
function(kv, is.left) {
kv = split.keyval(kv)
keyval(keys(kv),
lapply(values(kv),
function(v) {
list(val = v, is.left = is.left)}))}
prefix.cmp =
function(l,r)
suppressWarnings(
min(
which(!(strsplit(l,split="")[[1]] == strsplit(r, split = "")[[1]]))))
is.left.side =
function(left.input, right.input) {
li = rmr.normalize.path(to.dfs.path(left.input))
ri = rmr.normalize.path(to.dfs.path(right.input))
ci = rmr.normalize.path(current.input())
prefix.cmp(ci, li) > prefix.cmp(ci, ri)}
reduce.split =
function(vv) {
tapply(
vv,
sapply(vv, function(v) v$is.left),
function(v) lapply(v, function(x)x$val),
simplify = FALSE)}
pad.side =
function(vv, outer)
if (length(vv) == 0 && (outer)) c(NA) else c.or.rbind(vv)
map =
if (is.null(input)) {
function(k, v) {
ils = is.left.side(left.input, right.input)
mark.side(if(ils) map.left(k, v) else map.right(k, v), ils)}}
else {
function(k, v) {
c.keyval(mark.side(map.left(k, v), TRUE),
mark.side(map.right(k, v), FALSE))}}
eqj.reduce =
function(k, vv) {
rs = reduce.split(vv)
left.side = pad.side(rs$`TRUE`, right.outer || full.outer)
right.side = pad.side(rs$`FALSE`, left.outer || full.outer)
if(!is.null(left.side) && !is.null(right.side))
reduce(k[[1]], left.side, right.side)}
mapreduce(
map = map,
reduce = eqj.reduce,
input = c(left.input, right.input),
output = output,
input.format = input.format,
output.format = output.format,)}
status = function(value)
cat(
sprintf(
"reporter:status:%s\n",
value),
file = stderr())
increment.counter =
function(group, counter, increment = 1)
cat(
sprintf(
"reporter:counter:%s\n",
paste(group, counter, increment, sep=",")),
file = stderr())
| /pkg/R/mapreduce.R | no_license | justin2061/rmr2 | R | false | false | 13,433 | r | # Copyright 2011 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#options
rmr.options.env = new.env(parent=emptyenv())
rmr.options.env$backend = "hadoop"
rmr.options.env$profile.nodes = "off"
rmr.options.env$hdfs.tempdir = "/tmp" #can't check it exists here
rmr.options.env$exclude.objects = NULL
rmr.options.env$backend.parameters = list()
add.last =
function(action) {
old.Last = {
if (exists(".Last"))
.Last
else
function() NULL}
.Last <<-
function() {
action()
.Last <<- old.Last
.Last()}}
rmr.options =
function(
backend = c("hadoop", "local"),
profile.nodes = c("off", "calls", "memory", "both"),
hdfs.tempdir = "/tmp",
exclude.objects = NULL,
backend.parameters = list()) {
opt.assign = Curry(assign, envir = rmr.options.env)
args = as.list(sys.call())[-1]
is.named.arg = function(x) is.element(x, names(args))
if(is.named.arg("backend"))
opt.assign("backend", match.arg(backend))
if(is.named.arg("profile.nodes")) {
if (is.logical(profile.nodes)) {
profile.nodes = {
if(profile.nodes)
"calls"
else
"off"}}
else
opt.assign("profile.nodes", match.arg(profile.nodes))}
if(is.named.arg("hdfs.tempdir")) {
if(!dfs.exists(hdfs.tempdir)) {
hdfs.mkdir(hdfs.tempdir)
add.last(function() if(!in.a.task()) hdfs.rmr(hdfs.tempdir))}
opt.assign("hdfs.tempdir", hdfs.tempdir)}
if(is.named.arg("backend.parameters"))
opt.assign("backend.parameters", backend.parameters)
if(is.named.arg("exclude.objects"))
opt.assign("exclude.objects", exclude.objects)
if (rmr.options.env$backend == "hadoop")
if(!hdfs.exists(hdfs.tempdir)) #can't do this at package load time
warning("Please set an HDFS temp directory with rmr.options(hdfs.tempdir = ...)")
read.args = {
if(is.null(names(args)))
args
else
named.slice(args, "")}
if(length(read.args) > 0) {
read.args = simplify2array(read.args)
retval = as.list(rmr.options.env)[read.args]
if (length(retval) == 1) retval[[1]] else retval}
else NULL }
## map and reduce function generation
to.map =
function(fun1, fun2 = identity) {
if (missing(fun2)) {
function(k, v) fun1(keyval(k, v))}
else {
function(k, v) keyval(fun1(k), fun2(v))}}
to.reduce = to.map
## mapred combinators
compose.mapred =
function(mapred, map)
function(k, v) {
out = mapred(k, v)
if (is.null(out)) NULL
else map(keys(out), values(out))}
union.mapred =
function(mr1, mr2) function(k, v) {
c.keyval(mr1(k, v), mr2(k, v))}
# backend independent dfs section
is.hidden.file =
function(fname)
regexpr("[\\._]", basename(fname)) == 1
part.list =
function(fname) {
fname = to.dfs.path(fname)
if(rmr.options('backend') == "local") fname
else {
if(dfs.is.dir(fname)) {
du = hdfs.ls(fname)
du[!is.hidden.file(du[,"path"]),"path"]}
else fname}}
dfs.exists =
function(fname) {
fname = to.dfs.path(fname)
if (rmr.options('backend') == 'hadoop')
hdfs.exists(fname)
else file.exists(fname)}
dfs.rmr =
function(fname) {
fname = to.dfs.path(fname)
if(rmr.options('backend') == 'hadoop')
hdfs.rmr(fname)
else stopifnot(unlink(fname, recursive = TRUE) == 0)
NULL}
dfs.is.dir =
function(fname) {
fname = to.dfs.path(fname)
if (rmr.options('backend') == 'hadoop')
hdfs.isdir(fname)
else file.info(fname)[["isdir"]]}
dfs.empty =
function(fname) {
if(dfs.size(fname) > 1000) #size heuristic
FALSE
else
length.keyval(from.dfs(fname)) == 0}
dfs.size =
function(fname) {
fname = to.dfs.path(fname)
if(rmr.options('backend') == 'hadoop') {
du = hdfs.ls(fname)
if(is.null(du)) 0
else
sum(du[!is.hidden.file(du[["path"]]), "size"])}
else file.info(fname)[1, "size"] }
dfs.mv =
function(from, to) {
fname = to.dfs.path(from)
if(rmr.options('backend') == 'hadoop')
hdfs.mv(fname, to)
else
stopifnot(file.rename(fname, to))
NULL}
dfs.mkdir =
function(fname) {
fname = to.dfs.path(fname)
if (rmr.options('backend') == 'hadoop')
hdfs.mkdir(fname)
else
stopifnot(all(dir.create(fname)))
NULL}
# dfs bridge
to.dfs.path =
function(input) {
if (is.character(input)) {
input}
else {
if(is.function(input)) {
input()}}}
loadtb =
function(inf, outf)
system(paste(hadoop.streaming(), "loadtb", outf, "<", inf))
to.dfs =
function(
kv,
output = dfs.tempfile(),
format = "native") {
kv = as.keyval(kv)
tmp = tempfile()
dfs.output = to.dfs.path(output)
if(is.character(format))
format = make.output.format(format)
keyval.writer = make.keyval.writer(tmp, format)
keyval.writer(kv)
eval(
quote(
if(length(con) == 1)
close(con)
else lapply(con, close)),
envir=environment(keyval.writer))
if(rmr.options('backend') == 'hadoop') {
if(format$mode == "binary")
loadtb(tmp, dfs.output)
else #text
hdfs.put(tmp, dfs.output)}
else { #local
if(file.exists(dfs.output))
stop("Can't overwrite ", dfs.output)
file.copy(tmp, dfs.output)}
unlink(tmp, recursive=TRUE)
output}
from.dfs = function(input, format = "native") {
read.file = function(fname) {
keyval.reader =
make.keyval.reader(fname, format)
retval = make.fast.list()
kv = keyval.reader()
while(!is.null(kv)) {
retval(list(kv))
kv = keyval.reader()}
eval(
quote(close(con)),
envir = environment(keyval.reader))
c.keyval(retval())}
dumptb = function(src, dest){
lapply(src, function(x) system(paste(hadoop.streaming(), "dumptb", x, ">>", dest)))}
getmerge = function(src, dest) {
on.exit(unlink(tmp))
tmp = tempfile()
lapply(
src,
function(x) {
hdfs.get(as.character(x), tmp)
if(.Platform$OS.type == "windows") {
cmd = paste('type', tmp, '>>' , dest)
system(paste(Sys.getenv("COMSPEC"),"/c",cmd))}
else {
system(paste('cat', tmp, '>>' , dest))}
unlink(tmp)})
dest}
fname = to.dfs.path(input)
if(is.character(format)) format = make.input.format(format)
if(rmr.options("backend") == "hadoop") {
tmp = tempfile()
if(format$mode == "binary")
dumptb(part.list(fname), tmp)
else getmerge(part.list(fname), tmp)}
else
tmp = fname
retval = read.file(tmp)
if(rmr.options("backend") == "hadoop") unlink(tmp)
retval}
# mapreduce
rmr.normalize.path =
function(url.or.path) {
if(.Platform$OS.type == "windows")
url.or.path = gsub("\\\\","/", url.or.path)
gsub(
"/$",
"",
gsub(
"/+",
"/",
paste(
"/",
parse_url(url.or.path)$path,
sep = "")))}
current.input =
function() {
fname =
default(
Sys.getenv("mapreduce_map_input_file"),
Sys.getenv("map_input_file"),
"")
if (fname == "") NULL
else rmr.normalize.path(fname)}
dfs.tempfile =
function(
pattern = "file",
tmpdir = {
if(rmr.options("backend") == "hadoop")
rmr.options("hdfs.tempdir")
else
tempdir()}) {
fname = tempfile(pattern, tmpdir)
subfname = strsplit(fname, ":")
if(length(subfname[[1]]) > 1) fname = subfname[[1]][2]
namefun = function() {fname}
reg.finalizer(
environment(namefun),
function(e) {
fname = eval(expression(fname), envir = e)
if(!in.a.task() && dfs.exists(fname)) dfs.rmr(fname)
},
onexit = TRUE)
namefun}
dfs.managed.file = function(call, managed.dir = rmr.options('managed.dir')) {
file.path(managed.dir, digest(lapply(call, eval)))}
mapreduce = function(
input,
output = NULL,
map = to.map(identity),
reduce = NULL,
vectorized.reduce = FALSE,
combine = NULL,
in.memory.combine = FALSE,
input.format = "native",
output.format = "native",
backend.parameters = list(),
verbose = TRUE) {
on.exit(expr = gc(), add = TRUE) #this is here to trigger cleanup of tempfiles
if (is.null(output))
output = dfs.tempfile()
if(is.character(input.format)) input.format = make.input.format(input.format)
if(is.character(output.format)) output.format = make.output.format(output.format)
if(!missing(backend.parameters)) warning("backend.parameters is deprecated.")
backend = rmr.options('backend')
mr = switch(backend,
hadoop = rmr.stream,
local = mr.local,
stop("Unsupported backend: ", backend))
mr(map = map,
reduce = reduce,
combine = combine,
vectorized.reduce,
in.folder = if(is.list(input)) {lapply(input, to.dfs.path)} else to.dfs.path(input),
out.folder = to.dfs.path(output),
input.format = input.format,
output.format = output.format,
in.memory.combine = in.memory.combine,
backend.parameters = backend.parameters[[backend]],
verbose = verbose)
output
}
##special jobs
## a sort of relational join very useful in a variety of map reduce algorithms
## to.dfs(lapply(1:10, function(i) keyval(i, i^2)), "/tmp/reljoin.left")
## to.dfs(lapply(1:10, function(i) keyval(i, i^3)), "/tmp/reljoin.right")
## equijoin(left.input="/tmp/reljoin.left", right.input="/tmp/reljoin.right", output = "/tmp/reljoin.out")
## from.dfs("/tmp/reljoin.out")
reduce.default =
function(k, vl, vr) {
if((is.list(vl) && !is.data.frame(vl)) ||
(is.list(vr) && !is.data.frame(vr)))
list(left = vl, right = vr)
else{
vl = as.data.frame(vl)
vr = as.data.frame(vr)
names(vl) = paste(names(vl), "l", sep = ".")
names(vr) = paste(names(vr), "r", sep = ".")
if(all(is.na(vl))) vr
else {
if(all(is.na(vr))) vl
else
merge(vl, vr, by = NULL)}}}
equijoin =
function(
left.input = NULL,
right.input = NULL,
input = NULL,
output = NULL,
input.format = "native",
output.format = "native",
outer = c("", "left", "right", "full"),
map.left = to.map(identity),
map.right = to.map(identity),
reduce = reduce.default) {
stopifnot(
xor(
!is.null(left.input), !is.null(input) &&
(is.null(left.input) == is.null(right.input))))
outer = match.arg(outer)
left.outer = outer == "left"
right.outer = outer == "right"
full.outer = outer == "full"
if (is.null(left.input)) {
left.input = input}
mark.side =
function(kv, is.left) {
kv = split.keyval(kv)
keyval(keys(kv),
lapply(values(kv),
function(v) {
list(val = v, is.left = is.left)}))}
prefix.cmp =
function(l,r)
suppressWarnings(
min(
which(!(strsplit(l,split="")[[1]] == strsplit(r, split = "")[[1]]))))
is.left.side =
function(left.input, right.input) {
li = rmr.normalize.path(to.dfs.path(left.input))
ri = rmr.normalize.path(to.dfs.path(right.input))
ci = rmr.normalize.path(current.input())
prefix.cmp(ci, li) > prefix.cmp(ci, ri)}
reduce.split =
function(vv) {
tapply(
vv,
sapply(vv, function(v) v$is.left),
function(v) lapply(v, function(x)x$val),
simplify = FALSE)}
pad.side =
function(vv, outer)
if (length(vv) == 0 && (outer)) c(NA) else c.or.rbind(vv)
map =
if (is.null(input)) {
function(k, v) {
ils = is.left.side(left.input, right.input)
mark.side(if(ils) map.left(k, v) else map.right(k, v), ils)}}
else {
function(k, v) {
c.keyval(mark.side(map.left(k, v), TRUE),
mark.side(map.right(k, v), FALSE))}}
eqj.reduce =
function(k, vv) {
rs = reduce.split(vv)
left.side = pad.side(rs$`TRUE`, right.outer || full.outer)
right.side = pad.side(rs$`FALSE`, left.outer || full.outer)
if(!is.null(left.side) && !is.null(right.side))
reduce(k[[1]], left.side, right.side)}
mapreduce(
map = map,
reduce = eqj.reduce,
input = c(left.input, right.input),
output = output,
input.format = input.format,
output.format = output.format,)}
status = function(value)
cat(
sprintf(
"reporter:status:%s\n",
value),
file = stderr())
increment.counter =
function(group, counter, increment = 1)
cat(
sprintf(
"reporter:counter:%s\n",
paste(group, counter, increment, sep=",")),
file = stderr())
|
#########################################################################################
setwd("D:/MATFilesALL")
rename_jpg_to_JPG <- function(subdir = ".", recursive = FALSE, message = TRUE,
text_to_find=".jpg.mat", new_extension = ".JPG.mat", ...) {
wd <- getwd()
dir_to_work_on <- file.path(wd, subdir)
# on.exit(setwd(old_wd))
files <- list.files(dir_to_work_on)
# Remove dirs!
isdir <- file.info(file.path(dir_to_work_on,files))$isdir
files <- files[!isdir]
# find files with .r
files_without_ext <- substr(files, 1, nchar(files)-8)
files_with_R <- paste(files_without_ext, new_extension, sep = "")
ss_r <- grepl(text_to_find, files)
file.rename(from = file.path(dir_to_work_on,files)[ss_r],
to = file.path(dir_to_work_on,files_with_R)[ss_r])
n_changes <- sum(ss_r)
if(message) cat("We renamed ", n_changes, " files from ",text_to_find," to ",new_extension,"\n\n")
if(message & n_changes>0) cat("We renamed: \n", paste(files[ss_r], collapse = "\n"))
if(recursive) {
all_dirs <- list.dirs(full.names = FALSE, recursive = TRUE)
fo <- function(x,...) rename_r_to_R(subdir = x, text_to_find=text_to_find, new_extension=new_extension ,...)
n_changes_dirs <- sapply(all_dirs, fo, message = message)
n_changes <- n_changes + n_changes_dirs
}
return(invisible(n_changes))
}
| /Change jpg to JPG.R | no_license | mighster/Data_Wrangling | R | false | false | 1,439 | r | #########################################################################################
setwd("D:/MATFilesALL")
rename_jpg_to_JPG <- function(subdir = ".", recursive = FALSE, message = TRUE,
text_to_find=".jpg.mat", new_extension = ".JPG.mat", ...) {
wd <- getwd()
dir_to_work_on <- file.path(wd, subdir)
# on.exit(setwd(old_wd))
files <- list.files(dir_to_work_on)
# Remove dirs!
isdir <- file.info(file.path(dir_to_work_on,files))$isdir
files <- files[!isdir]
# find files with .r
files_without_ext <- substr(files, 1, nchar(files)-8)
files_with_R <- paste(files_without_ext, new_extension, sep = "")
ss_r <- grepl(text_to_find, files)
file.rename(from = file.path(dir_to_work_on,files)[ss_r],
to = file.path(dir_to_work_on,files_with_R)[ss_r])
n_changes <- sum(ss_r)
if(message) cat("We renamed ", n_changes, " files from ",text_to_find," to ",new_extension,"\n\n")
if(message & n_changes>0) cat("We renamed: \n", paste(files[ss_r], collapse = "\n"))
if(recursive) {
all_dirs <- list.dirs(full.names = FALSE, recursive = TRUE)
fo <- function(x,...) rename_r_to_R(subdir = x, text_to_find=text_to_find, new_extension=new_extension ,...)
n_changes_dirs <- sapply(all_dirs, fo, message = message)
n_changes <- n_changes + n_changes_dirs
}
return(invisible(n_changes))
}
|
# Author: Alfredo Sanchez-Tojar, MPIO (Seewiesen) and ICL (Silwood Park), alfredo.tojar@gmail.com
# Github profile: https://github.com/ASanchez-Tojar
# Script created on the 6th October, 2016
########################################################################################################
# Description of script and Instructions
########################################################################################################
# This script is to plot the results on dominance status and tarsus
########################################################################################################
# Packages needed
########################################################################################################
# packages needed to be loaded for this script
library(lme4)
library(arm)
library(blmeco)
# Clear memory and get to know where you are
rm(list=ls())
#getwd()
# loading the clean databases from Lundy with all the data needed
#rank.TLandM.VB.fitness <- read.table("finaldatabases/rank.TLandM.VB.fitness.csv",header=TRUE,sep=",")
rank.TLandM.VB.fitness <- read.table("finaldatabases/rank.TLandM.VB.fitness.9int.csv",header=TRUE,sep=",")
VB.TLandM.age.fitness <- read.table("finaldatabases/VB.TLandM.age.fitness.csv",header=TRUE,sep=",")
# males subset (females and unknown excluded)
rank.TLandM.VB.fitness.2 <- rank.TLandM.VB.fitness[!(is.na(rank.TLandM.VB.fitness$sex)),]
################################################################
# WILD MODEL
################################################################
# subsetting only the necessary for the plotting of each model.
data.plot1 <- rank.TLandM.VB.fitness.2[!(is.na(rank.TLandM.VB.fitness.2$age)) &
rank.TLandM.VB.fitness.2$age>0 &
!(is.na(rank.TLandM.VB.fitness.2$tarsus)) &
!(is.na(rank.TLandM.VB.fitness.2$season)),]
data.plot1$sex <- as.factor(data.plot1$sex)
#data.plot1$season <- as.factor(data.plot1$season)
mod.rank.age <- lmer(StElo~age*
sex+
tarsus+
season+
(1|BirdID)+
(1|eventSW),
data=data.plot1)
#simulating a posterior distribution with 5000 draws
smod.rank.age<-sim(mod.rank.age,5000)
# Generating a database with what is run in the model. The model estimates
# calculated and presented in the plot correspond to those for a mean value
# of tarsus and age (from this database), and a mean fictious season of 0.5
# (remember that season was coded as 0: non-breeding, and 1: breeding)
newdat<-expand.grid(age=mean(data.plot1$age,na.rm = TRUE),
sex=levels(data.plot1$sex),
tarsus = seq(min(data.plot1$tarsus,na.rm = TRUE),
max(data.plot1$tarsus,na.rm = TRUE),
0.001),
season=0.5)
xmat<-model.matrix(~age*
sex+
tarsus+
season,
data=newdat)
fitmatboth <- matrix(NA,
ncol = nrow(smod.rank.age@fixef),
nrow = nrow(newdat))
for(i in 1:nrow(smod.rank.age@fixef)) {
fitmatboth[,i] <- xmat%*%smod.rank.age@fixef[i,]
}
# finally estimating the mean and the credible intervals for each
# value of bib length. This is what I will plot later on.
newdat$fit<-apply(fitmatboth, 1, mean) # 1= row , 2 = colum
newdat$lower<-apply(fitmatboth, 1, quantile, prob= 0.025)
newdat$upper<-apply(fitmatboth, 1, quantile, prob= 0.975)
################################################################
# PLOTTING
################################################################
# vector needed to obtain the final rgb colours
rgbing <- c(255,255,255)
# few colours in rb
darkblue <- c(31,120,180)/rgbing
chocolate1 <- c(255,127,36)/rgbing
data.plot1.m <- data.plot1[data.plot1$sex==1,]
data.plot1.f <- data.plot1[data.plot1$sex==0,]
# PLOT saved as .tiff
# tiff("plots/talks/Figure11_Status_and_tarsus_both_sexes.tiff", height=20, width=20,
# units='cm', compression="lzw", res=300)
tiff("plots/talks/9interactions/Figure11_Status_and_tarsus_both_sexes_9int.tiff", height=20, width=20,
units='cm', compression="lzw", res=300)
#par(mar=c(5, 5, 1, 1))
par(mar=c(6, 7, 1, 1))
plot(data.plot1$tarsus,
data.plot1$StElo,
type="n",
# xlab="Bib length (mm)",
# ylab= "Standardized Elo-rating",
xlab="",
ylab="",
#cex.lab=1.7,
cex.lab=2.4,
xaxt="n",yaxt="n",xlim=c(15.5,20.5),ylim=c(0,1),
family="serif",
frame.plot = FALSE)
title(xlab="tarsus length (mm)", line=4, cex.lab=3.0, family="serif")
title(ylab="Standardized Elo-rating", line=4.5, cex.lab=3.0, family="serif")
axis(1,at=seq(15.5,20.5,by=1),
#cex.axis=1.3,
cex.axis=1.8,
family="serif")
axis(2,at=seq(0,1,by=0.2),
las=2,
#cex.axis=1.3,
cex.axis=1.8,
family="serif")
points(data.plot1.m$tarsus,
data.plot1.m$StElo,
pch = 19, col=rgb(darkblue[1],darkblue[2],darkblue[3],0.4),
cex = 2.0)
points(data.plot1.f$tarsus,
data.plot1.f$StElo,
pch = 19, col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.4),
cex = 2.0)
index.m<-newdat$sex=="1" # only calls the plot but not the points yet
polygon(c(newdat$tarsus[index.m],rev(newdat$tarsus[index.m])),
c(newdat$lower[index.m],rev(newdat$upper[index.m])),
border=NA,col=rgb(darkblue[1],darkblue[2],darkblue[3], 0.15))
#index.f<-newdat$sex=="0"
#summary(data.plot1.f$tarsus)
newdat.f6 <- newdat[newdat$tarsus<19.89 & newdat$tarsus>16.31,]
index.f6<-newdat.f6$sex=="0"
polygon(c(newdat.f6$tarsus[index.f6],rev(newdat.f6$tarsus[index.f6])),
c(newdat.f6$lower[index.f6],rev(newdat.f6$upper[index.f6])),
border=NA,col=rgb(chocolate1[1], chocolate1[2], chocolate1[3], 0.15))
lines(newdat$tarsus[index.m], newdat$fit[index.m], lwd=3.5,
col=rgb(darkblue[1],darkblue[2],darkblue[3],0.65))
lines(newdat$tarsus[index.m], newdat$lower[index.m], lty=2, lwd=2,
col=rgb(darkblue[1],darkblue[2],darkblue[3],0.65))
lines(newdat$tarsus[index.m], newdat$upper[index.m], lty=2, lwd=2,
col=rgb(darkblue[1],darkblue[2],darkblue[3],0.65))
lines(newdat.f6$tarsus[index.f6], newdat.f6$fit[index.f6], lwd=3.5,
col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8))
lines(newdat.f6$tarsus[index.f6], newdat.f6$lower[index.f6], lty=2, lwd=2,
col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8))
lines(newdat.f6$tarsus[index.f6], newdat.f6$upper[index.f6], lty=2, lwd=2,
col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8))
op <- par(family = "serif")
#par(op)
# legend(57,1.02,
# legend=c("captive","wild"),
# pch=19,
# col=c(rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8),
# rgb(darkblue[1],darkblue[2],darkblue[3],0.8)),
# pt.cex=1.9,
# cex=1.1)
dev.off()
| /009_Figure_11_Status_and_tarsus_both_sexes.R | no_license | ASanchez-Tojar/dominance | R | false | false | 7,096 | r | # Author: Alfredo Sanchez-Tojar, MPIO (Seewiesen) and ICL (Silwood Park), alfredo.tojar@gmail.com
# Github profile: https://github.com/ASanchez-Tojar
# Script created on the 6th October, 2016
########################################################################################################
# Description of script and Instructions
########################################################################################################
# This script is to plot the results on dominance status and tarsus
########################################################################################################
# Packages needed
########################################################################################################
# packages needed to be loaded for this script
library(lme4)
library(arm)
library(blmeco)
# Clear memory and get to know where you are
rm(list=ls())
#getwd()
# loading the clean databases from Lundy with all the data needed
#rank.TLandM.VB.fitness <- read.table("finaldatabases/rank.TLandM.VB.fitness.csv",header=TRUE,sep=",")
rank.TLandM.VB.fitness <- read.table("finaldatabases/rank.TLandM.VB.fitness.9int.csv",header=TRUE,sep=",")
VB.TLandM.age.fitness <- read.table("finaldatabases/VB.TLandM.age.fitness.csv",header=TRUE,sep=",")
# males subset (females and unknown excluded)
rank.TLandM.VB.fitness.2 <- rank.TLandM.VB.fitness[!(is.na(rank.TLandM.VB.fitness$sex)),]
################################################################
# WILD MODEL
################################################################
# subsetting only the necessary for the plotting of each model.
data.plot1 <- rank.TLandM.VB.fitness.2[!(is.na(rank.TLandM.VB.fitness.2$age)) &
rank.TLandM.VB.fitness.2$age>0 &
!(is.na(rank.TLandM.VB.fitness.2$tarsus)) &
!(is.na(rank.TLandM.VB.fitness.2$season)),]
data.plot1$sex <- as.factor(data.plot1$sex)
#data.plot1$season <- as.factor(data.plot1$season)
mod.rank.age <- lmer(StElo~age*
sex+
tarsus+
season+
(1|BirdID)+
(1|eventSW),
data=data.plot1)
#simulating a posterior distribution with 5000 draws
smod.rank.age<-sim(mod.rank.age,5000)
# Generating a database with what is run in the model. The model estimates
# calculated and presented in the plot correspond to those for a mean value
# of tarsus and age (from this database), and a mean fictious season of 0.5
# (remember that season was coded as 0: non-breeding, and 1: breeding)
newdat<-expand.grid(age=mean(data.plot1$age,na.rm = TRUE),
sex=levels(data.plot1$sex),
tarsus = seq(min(data.plot1$tarsus,na.rm = TRUE),
max(data.plot1$tarsus,na.rm = TRUE),
0.001),
season=0.5)
xmat<-model.matrix(~age*
sex+
tarsus+
season,
data=newdat)
fitmatboth <- matrix(NA,
ncol = nrow(smod.rank.age@fixef),
nrow = nrow(newdat))
for(i in 1:nrow(smod.rank.age@fixef)) {
fitmatboth[,i] <- xmat%*%smod.rank.age@fixef[i,]
}
# finally estimating the mean and the credible intervals for each
# value of bib length. This is what I will plot later on.
newdat$fit<-apply(fitmatboth, 1, mean) # 1= row , 2 = colum
newdat$lower<-apply(fitmatboth, 1, quantile, prob= 0.025)
newdat$upper<-apply(fitmatboth, 1, quantile, prob= 0.975)
################################################################
# PLOTTING
################################################################
# vector needed to obtain the final rgb colours
rgbing <- c(255,255,255)
# few colours in rb
darkblue <- c(31,120,180)/rgbing
chocolate1 <- c(255,127,36)/rgbing
data.plot1.m <- data.plot1[data.plot1$sex==1,]
data.plot1.f <- data.plot1[data.plot1$sex==0,]
# PLOT saved as .tiff
# tiff("plots/talks/Figure11_Status_and_tarsus_both_sexes.tiff", height=20, width=20,
# units='cm', compression="lzw", res=300)
tiff("plots/talks/9interactions/Figure11_Status_and_tarsus_both_sexes_9int.tiff", height=20, width=20,
units='cm', compression="lzw", res=300)
#par(mar=c(5, 5, 1, 1))
par(mar=c(6, 7, 1, 1))
plot(data.plot1$tarsus,
data.plot1$StElo,
type="n",
# xlab="Bib length (mm)",
# ylab= "Standardized Elo-rating",
xlab="",
ylab="",
#cex.lab=1.7,
cex.lab=2.4,
xaxt="n",yaxt="n",xlim=c(15.5,20.5),ylim=c(0,1),
family="serif",
frame.plot = FALSE)
title(xlab="tarsus length (mm)", line=4, cex.lab=3.0, family="serif")
title(ylab="Standardized Elo-rating", line=4.5, cex.lab=3.0, family="serif")
axis(1,at=seq(15.5,20.5,by=1),
#cex.axis=1.3,
cex.axis=1.8,
family="serif")
axis(2,at=seq(0,1,by=0.2),
las=2,
#cex.axis=1.3,
cex.axis=1.8,
family="serif")
points(data.plot1.m$tarsus,
data.plot1.m$StElo,
pch = 19, col=rgb(darkblue[1],darkblue[2],darkblue[3],0.4),
cex = 2.0)
points(data.plot1.f$tarsus,
data.plot1.f$StElo,
pch = 19, col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.4),
cex = 2.0)
index.m<-newdat$sex=="1" # only calls the plot but not the points yet
polygon(c(newdat$tarsus[index.m],rev(newdat$tarsus[index.m])),
c(newdat$lower[index.m],rev(newdat$upper[index.m])),
border=NA,col=rgb(darkblue[1],darkblue[2],darkblue[3], 0.15))
#index.f<-newdat$sex=="0"
#summary(data.plot1.f$tarsus)
newdat.f6 <- newdat[newdat$tarsus<19.89 & newdat$tarsus>16.31,]
index.f6<-newdat.f6$sex=="0"
polygon(c(newdat.f6$tarsus[index.f6],rev(newdat.f6$tarsus[index.f6])),
c(newdat.f6$lower[index.f6],rev(newdat.f6$upper[index.f6])),
border=NA,col=rgb(chocolate1[1], chocolate1[2], chocolate1[3], 0.15))
lines(newdat$tarsus[index.m], newdat$fit[index.m], lwd=3.5,
col=rgb(darkblue[1],darkblue[2],darkblue[3],0.65))
lines(newdat$tarsus[index.m], newdat$lower[index.m], lty=2, lwd=2,
col=rgb(darkblue[1],darkblue[2],darkblue[3],0.65))
lines(newdat$tarsus[index.m], newdat$upper[index.m], lty=2, lwd=2,
col=rgb(darkblue[1],darkblue[2],darkblue[3],0.65))
lines(newdat.f6$tarsus[index.f6], newdat.f6$fit[index.f6], lwd=3.5,
col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8))
lines(newdat.f6$tarsus[index.f6], newdat.f6$lower[index.f6], lty=2, lwd=2,
col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8))
lines(newdat.f6$tarsus[index.f6], newdat.f6$upper[index.f6], lty=2, lwd=2,
col=rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8))
op <- par(family = "serif")
#par(op)
# legend(57,1.02,
# legend=c("captive","wild"),
# pch=19,
# col=c(rgb(chocolate1[1], chocolate1[2], chocolate1[3],0.8),
# rgb(darkblue[1],darkblue[2],darkblue[3],0.8)),
# pt.cex=1.9,
# cex=1.1)
dev.off()
|
\name{abic.logis.rayleigh}
\alias{abic.logis.rayleigh}
\title{Akaike information criterion (AIC) and Bayesian information criterion (BIC)
for Logistic-Rayleigh(LR) distribution}
\description{
The function \code{abic.logis.rayleigh()} gives the \code{loglikelihood}, \code{AIC} and \code{BIC} values
assuming an Logistic-Rayleigh(LR) distribution with parameters alpha and lambda.}
\usage{abic.logis.rayleigh(x, alpha.est, lambda.est)}
\arguments{
\item{x}{vector of observations}
\item{alpha.est}{estimate of the parameter alpha}
\item{lambda.est}{estimate of the parameter lambda}
}
\value{
The function \code{abic.logis.rayleigh()} gives the \code{loglikelihood}, \code{AIC} and \code{BIC} values.
}
\references{
Akaike, H. (1978).
\emph{A new look at the Bayes procedure}, Biometrika, 65, 53-59.
Claeskens, G. and Hjort, N. L. (2008).
\emph{Model Selection and Model Averaging}, Cambridge University Press, London.
Konishi., S. and Kitagawa, G.(2008).
\emph{Information Criteria and Statistical Modeling}, Springer Science+Business Media, LLC.
Schwarz, S. (1978).
\emph{Estimating the dimension of the model}, Annals of Statistics, 6, 461-464.
Spiegelhalter, D. J., Best, N. G., Carlin, B. P. and van der Linde, A. (2002).
\emph{Bayesian measures of complexity and fit}, Journal of the Royal Statistical Society Series B 64, 1-34.
}
\seealso{
\code{\link{pp.logis.rayleigh}} for \code{PP} plot and \code{\link{qq.logis.rayleigh}} for \code{QQ} plot
}
\examples{
## Load data sets
data(stress)
## Maximum Likelihood(ML) Estimates of alpha & lambda for the data(stress)
## Estimates of alpha & lambda using 'maxLik' package
## alpha.est = 1.4779388, lambda.est = 0.2141343
## Values of AIC, BIC and LogLik for the data(stress)
abic.logis.rayleigh(stress, 1.4779388, 0.2141343)
}
\keyword{models}
| /man/abic.logis.rayleigh.Rd | no_license | statwonk/reliaR | R | false | false | 1,877 | rd | \name{abic.logis.rayleigh}
\alias{abic.logis.rayleigh}
\title{Akaike information criterion (AIC) and Bayesian information criterion (BIC)
for Logistic-Rayleigh(LR) distribution}
\description{
The function \code{abic.logis.rayleigh()} gives the \code{loglikelihood}, \code{AIC} and \code{BIC} values
assuming an Logistic-Rayleigh(LR) distribution with parameters alpha and lambda.}
\usage{abic.logis.rayleigh(x, alpha.est, lambda.est)}
\arguments{
\item{x}{vector of observations}
\item{alpha.est}{estimate of the parameter alpha}
\item{lambda.est}{estimate of the parameter lambda}
}
\value{
The function \code{abic.logis.rayleigh()} gives the \code{loglikelihood}, \code{AIC} and \code{BIC} values.
}
\references{
Akaike, H. (1978).
\emph{A new look at the Bayes procedure}, Biometrika, 65, 53-59.
Claeskens, G. and Hjort, N. L. (2008).
\emph{Model Selection and Model Averaging}, Cambridge University Press, London.
Konishi., S. and Kitagawa, G.(2008).
\emph{Information Criteria and Statistical Modeling}, Springer Science+Business Media, LLC.
Schwarz, S. (1978).
\emph{Estimating the dimension of the model}, Annals of Statistics, 6, 461-464.
Spiegelhalter, D. J., Best, N. G., Carlin, B. P. and van der Linde, A. (2002).
\emph{Bayesian measures of complexity and fit}, Journal of the Royal Statistical Society Series B 64, 1-34.
}
\seealso{
\code{\link{pp.logis.rayleigh}} for \code{PP} plot and \code{\link{qq.logis.rayleigh}} for \code{QQ} plot
}
\examples{
## Load data sets
data(stress)
## Maximum Likelihood(ML) Estimates of alpha & lambda for the data(stress)
## Estimates of alpha & lambda using 'maxLik' package
## alpha.est = 1.4779388, lambda.est = 0.2141343
## Values of AIC, BIC and LogLik for the data(stress)
abic.logis.rayleigh(stress, 1.4779388, 0.2141343)
}
\keyword{models}
|
library(RPostgreSQL)
library(ggplot2)
library(scales)
query <- function(){
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, user = "mabotech",password = "mabouser", port = 6432, dbname = "maboss")
res <- dbSendQuery(con, statement = paste(
"SELECT station, state_start, state_stop, duration from public.engine_test where station = 'TestZone6_TC10' and state_start>'2010-12-20 19:35:00' limit 20"))
# we now fetch the first 100 records from the resultSet into a data.frame
data1 <- fetch(res, n = -1)
#print(dim(data1))
#print(data1)
#dbHasCompleted(res)
# let's get all remaining records
# data2 <- fetch(res, n = -1)
#return(data2)
return(data1)
}
df1<- query()
print(df1)
plt <- ggplot(data = df1 ) +
geom_point(aes(x=state_start,y = log(duration)), color="red", shape=0) +
geom_line( aes(x=state_start,y = log(duration)) , color="red")
#geom_point(aes(x=state_stop,y = log(duration)), color="blue", shape=0) +
#geom_line( aes(x=state_stop,y = log(duration)), color="blue" )
# scale_x_datetime( format = "%H:%M:%S")
ggsave("output/pg01.png", width=8, height=2)
| /pg01.R | permissive | mabotech/mabo.data | R | false | false | 1,242 | r |
library(RPostgreSQL)
library(ggplot2)
library(scales)
query <- function(){
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, user = "mabotech",password = "mabouser", port = 6432, dbname = "maboss")
res <- dbSendQuery(con, statement = paste(
"SELECT station, state_start, state_stop, duration from public.engine_test where station = 'TestZone6_TC10' and state_start>'2010-12-20 19:35:00' limit 20"))
# we now fetch the first 100 records from the resultSet into a data.frame
data1 <- fetch(res, n = -1)
#print(dim(data1))
#print(data1)
#dbHasCompleted(res)
# let's get all remaining records
# data2 <- fetch(res, n = -1)
#return(data2)
return(data1)
}
df1<- query()
print(df1)
plt <- ggplot(data = df1 ) +
geom_point(aes(x=state_start,y = log(duration)), color="red", shape=0) +
geom_line( aes(x=state_start,y = log(duration)) , color="red")
#geom_point(aes(x=state_stop,y = log(duration)), color="blue", shape=0) +
#geom_line( aes(x=state_stop,y = log(duration)), color="blue" )
# scale_x_datetime( format = "%H:%M:%S")
ggsave("output/pg01.png", width=8, height=2)
|
dataset = read.csv('wine.csv')
dim(dataset)
str(dataset) #Compactly display the internal structure
head(dataset) #Returns the first or last parts
summary(dataset)
unique(is.na(dataset))
data_1=dataset %>% filter(dataset$Customer_Segment == 1)
data_2=dataset %>% filter(dataset$Customer_Segment == 2)
data_3=dataset %>% filter(dataset$Customer_Segment == 3)
a=table(dataset$Alcohol)
barplot(a,main="Using BarPlot to display alcohol Content of all wines",
ylab="Count",
xlab="alcohol",
col='red',
legend=rownames(a))
hist(dataset$Alcohol,
col="blue",
main="Histogram to display range of alcohol Content of all wines",
xlab="alcohol",
ylab="Count",
labels=TRUE)
boxplot(dataset$Alcohol,
col="pink",
main="Boxplot for Descriptive Analysis of alcohol Content of all wines")
sd(dataset$Alcohol)
sd(data_1$Alcohol)
sd(data_2$Alcohol)
sd(data_3$Alcohol)
a=table(dataset$Customer_Segment)
barplot(a,main="Using BarPlot to display frequency of wines",
ylab="Count",
xlab="wines",
col=rainbow(3),
legend=rownames(a))
pct=round(a/sum(a)*100)
lbs=paste(c("Winery 1","Winery 2","Winery 3")," ",pct,"%",sep=" ")
library(plotrix)
pie(a,labels=lbs,main="Pie Chart Depicting Ratio of wines")
plot(density(dataset$Alcohol),
main="Density Plot for alcohol content",
xlab="alcohol content",ylab="Density")
polygon(density(dataset$Alcohol),col="#ccff66")
#pca
library(caTools)
set.seed(123)
split = sample.split(dataset$Customer_Segment, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-14] = scale(training_set[-14])
test_set[-14] = scale(test_set[-14])
library(caret)
library(e1071)
pca = preProcess(x = training_set[-14], method = 'pca', pcaComp = 2)
training_set = predict(pca, training_set)
training_set = training_set[c(2, 3, 1)]
test_set = predict(pca, test_set)
test_set = test_set[c(2, 3, 1)]
# Fitting SVM to the Training set
classifier = svm(formula = Customer_Segment ~ .,
data = training_set,
type = 'C-classification',
kernel = 'linear')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
cm
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set,
pch = '.',
col = ifelse(y_grid == 2, 'deepskyblue', ifelse(y_grid == 1, 'springgreen3', 'tomato')))
points(set, pch = 21,
bg = ifelse(set[, 3] == 2, 'blue3', ifelse(set[, 3] == 1, 'green4', 'red3')))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set,
pch = '.',
col = ifelse(y_grid == 2, 'deepskyblue', ifelse(y_grid == 1, 'springgreen3', 'tomato')))
points(set,
pch = 21,
bg = ifelse(set[, 3] == 2, 'blue3', ifelse(set[, 3] == 1, 'green4', 'red3')))
| /wine.R | no_license | zeearo/PCA-analysis-for-Wine-dataset | R | false | false | 4,004 | r | dataset = read.csv('wine.csv')
dim(dataset)
str(dataset) #Compactly display the internal structure
head(dataset) #Returns the first or last parts
summary(dataset)
unique(is.na(dataset))
data_1=dataset %>% filter(dataset$Customer_Segment == 1)
data_2=dataset %>% filter(dataset$Customer_Segment == 2)
data_3=dataset %>% filter(dataset$Customer_Segment == 3)
a=table(dataset$Alcohol)
barplot(a,main="Using BarPlot to display alcohol Content of all wines",
ylab="Count",
xlab="alcohol",
col='red',
legend=rownames(a))
hist(dataset$Alcohol,
col="blue",
main="Histogram to display range of alcohol Content of all wines",
xlab="alcohol",
ylab="Count",
labels=TRUE)
boxplot(dataset$Alcohol,
col="pink",
main="Boxplot for Descriptive Analysis of alcohol Content of all wines")
sd(dataset$Alcohol)
sd(data_1$Alcohol)
sd(data_2$Alcohol)
sd(data_3$Alcohol)
a=table(dataset$Customer_Segment)
barplot(a,main="Using BarPlot to display frequency of wines",
ylab="Count",
xlab="wines",
col=rainbow(3),
legend=rownames(a))
pct=round(a/sum(a)*100)
lbs=paste(c("Winery 1","Winery 2","Winery 3")," ",pct,"%",sep=" ")
library(plotrix)
pie(a,labels=lbs,main="Pie Chart Depicting Ratio of wines")
plot(density(dataset$Alcohol),
main="Density Plot for alcohol content",
xlab="alcohol content",ylab="Density")
polygon(density(dataset$Alcohol),col="#ccff66")
#pca
library(caTools)
set.seed(123)
split = sample.split(dataset$Customer_Segment, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-14] = scale(training_set[-14])
test_set[-14] = scale(test_set[-14])
library(caret)
library(e1071)
pca = preProcess(x = training_set[-14], method = 'pca', pcaComp = 2)
training_set = predict(pca, training_set)
training_set = training_set[c(2, 3, 1)]
test_set = predict(pca, test_set)
test_set = test_set[c(2, 3, 1)]
# Fitting SVM to the Training set
classifier = svm(formula = Customer_Segment ~ .,
data = training_set,
type = 'C-classification',
kernel = 'linear')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
cm
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set,
pch = '.',
col = ifelse(y_grid == 2, 'deepskyblue', ifelse(y_grid == 1, 'springgreen3', 'tomato')))
points(set, pch = 21,
bg = ifelse(set[, 3] == 2, 'blue3', ifelse(set[, 3] == 1, 'green4', 'red3')))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set,
pch = '.',
col = ifelse(y_grid == 2, 'deepskyblue', ifelse(y_grid == 1, 'springgreen3', 'tomato')))
points(set,
pch = 21,
bg = ifelse(set[, 3] == 2, 'blue3', ifelse(set[, 3] == 1, 'green4', 'red3')))
|
load("MyData/EDA.df.total.RData")
# Based on article: https://www.ncbi.nlm.nih.gov/pubmed/18195335
####### Framingham model ####################
## Interpret variables on parental hypertension
BPHigMothEv <-df.total$BPHigMoth2== "Mor - h\xf8yt BT"
BPHigMothEv[is.na(df.total$BPHigMoth2)] <- FALSE
BPHigFathEv <-df.total$BPHigFath2== "Far - h\xf8yt BT"
BPHigFathEv[is.na(df.total$BPHigFath2)] <- FALSE
## Implementation of pseudocode to find 4 year risk from Figure 2 in Framingham paper
## Applied to our data set
fram.risk.fun <- function(){
score <- rep(0,length(df.total$PID))
risk <- rep(0,length(df.total$PID))
for(i in 1: length(df.total$PID)){
# Step 1
if(df.total$SystolicBP2[i]<110){
score[i]=score[i]-4
} else if(df.total$SystolicBP2[i]<114){
score[i]=score[i]
}else if(df.total$SystolicBP2[i]<119){
score[i]=score[i]+2
}else if(df.total$SystolicBP2[i]<124){
score[i]=score[i]+4
}else if(df.total$SystolicBP2[i]<129){
score[i]=score[i]+6
}else if(df.total$SystolicBP2[i]<134){
score[i]=score[i]+8
}else{
score[i]=score[i]+10
}
# Step 2
if(df.total$Sex[i]=="Female"){score[i]= score[i]+1}
# Step 3
if(df.total$BMI2[i]<25){
score[i]=score[i]
} else if(df.total$BMI2[i]<30){
score[i]=score[i]+1
}else{
score[i]=score[i]+3
}
# Step 4
if(df.total$BirthYear[i]>=1967){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-8
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]-3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+3
}else{
score[i]=score[i]+6
}
} else if(df.total$BirthYear[i]>=1957){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-5
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+2
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+5
}else{
score[i]=score[i]+7
}
} else if(df.total$BirthYear[i]>=1947){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-1
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+6
}else{
score[i]=score[i]+8
}
} else if(df.total$BirthYear[i]>=1937){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+3
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+7
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+8
}else{
score[i]=score[i]+9
}
} else if(df.total$BirthYear[i]>=1927){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+6
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+8
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+9
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+10
}else{
score[i]=score[i]+10
}
} else{
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+10
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+11
}else{
score[i]=score[i]+11
}
}
# Step 5
if(df.total$Smoking2[i]=="Current"){score[i]= score[i]+1}
# Step 6
if(BPHigFathEv[i] & BPHigMothEv[i]){
score[i]=score[i]+2
} else if(BPHigFathEv[i]){
score[i]=score[i]+1
} else if(BPHigMothEv[i]){
score[i]=score[i]+1
}else{
score[i]=score[i]
}
risk[i] <- switch(toString(score[i]), "-12"=0.22, "-11"=0.27, "-10"=0.31, "-9"=0.37,
"-8"=0.44,"-7"=0.52,"-6"=0.62,"-5"=0.73,"-4"=0.86,"-3"=1.02,"-2"=1.21,"-1"=1.43,"0"=1.69,
"1"=2.00,"2"=2.37,"3"=2.80,"4"=3.31,"5"=3.90,"6"=4.61,"7"=4.53,"8"=6.40,"9"=7.53,"10"=8.86,
"11"=10.40,"12"=12.20,"13"=14.28,"14"=16.68,"15"=19.43,"16"=22.58,"17"=26.14,"18"=30.16,"19"=34.63,"20"=39.55,
"21"=44.91,"22"=50.64,"23"=56.66,"24"=62.85,"25"=69.05,"26"=75.06,"27"=80.69,"28"=85.74)
}
return(risk/100)
}
fram.risk <- fram.risk.fun()
mean(fram.risk)
hist(fram.risk)
######## Modified Framingham model #######
## Implementation of pseudocode to find 4 year risk from Figure 2 in Framingham paper
## Applied to our data set
# Modified the age by adding 7 years
fram.risk.ad.age.fun <- function(){
score <- rep(0,length(df.total$PID))
risk <- rep(0,length(df.total$PID))
for(i in 1: length(df.total$PID)){
# Step 1
if(df.total$SystolicBP2[i]<110){
score[i]=score[i]-4
} else if(df.total$SystolicBP2[i]<114){
score[i]=score[i]
}else if(df.total$SystolicBP2[i]<119){
score[i]=score[i]+2
}else if(df.total$SystolicBP2[i]<124){
score[i]=score[i]+4
}else if(df.total$SystolicBP2[i]<129){
score[i]=score[i]+6
}else if(df.total$SystolicBP2[i]<134){
score[i]=score[i]+8
}else{
score[i]=score[i]+10
}
# Step 2
if(df.total$Sex[i]=="Female"){score[i]= score[i]+1}
# Step 3
if(df.total$BMI2[i]<25){
score[i]=score[i]
} else if(df.total$BMI2[i]<30){
score[i]=score[i]+1
}else{
score[i]=score[i]+3
}
# Step 4, MODIFIED
if((df.total$BirthYear[i]-7)>=1967){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-8
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]-3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+3
}else{
score[i]=score[i]+6
}
} else if((df.total$BirthYear[i]-7)>=1957){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-5
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+2
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+5
}else{
score[i]=score[i]+7
}
} else if((df.total$BirthYear[i]-7)>=1947){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-1
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+6
}else{
score[i]=score[i]+8
}
} else if((df.total$BirthYear[i]-7)>=1937){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+3
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+7
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+8
}else{
score[i]=score[i]+9
}
} else if((df.total$BirthYear[i]-7)>=1927){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+6
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+8
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+9
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+10
}else{
score[i]=score[i]+10
}
} else{
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+10
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+11
}else{
score[i]=score[i]+11
}
}
# Step 5
if(df.total$Smoking2[i]=="Current"){score[i]= score[i]+1}
# Step 6
if(BPHigFathEv[i] & BPHigMothEv[i]){
score[i]=score[i]+2
} else if(BPHigFathEv[i]){
score[i]=score[i]+1
} else if(BPHigMothEv[i]){
score[i]=score[i]+1
}else{
score[i]=score[i]
}
risk[i] <- switch(toString(score[i]), "-12"=0.22, "-11"=0.27, "-10"=0.31, "-9"=0.37,
"-8"=0.44,"-7"=0.52,"-6"=0.62,"-5"=0.73,"-4"=0.86,"-3"=1.02,"-2"=1.21,"-1"=1.43,"0"=1.69,
"1"=2.00,"2"=2.37,"3"=2.80,"4"=3.31,"5"=3.90,"6"=4.61,"7"=4.53,"8"=6.40,"9"=7.53,"10"=8.86,
"11"=10.40,"12"=12.20,"13"=14.28,"14"=16.68,"15"=19.43,"16"=22.58,"17"=26.14,"18"=30.16,"19"=34.63,"20"=39.55,
"21"=44.91,"22"=50.64,"23"=56.66,"24"=62.85,"25"=69.05,"26"=75.06,"27"=80.69,"28"=85.74)
}
return(risk/100)
}
fram.risk.ad.age <- fram.risk.ad.age.fun()
mean(fram.risk.ad.age)
hist(fram.risk.ad.age)
####### SAVE ######
save(fram.risk.ad.age, fram.risk, file="MyData/Framingham.RData")
| /Framingham.R | no_license | fridentnu/masterthesis | R | false | false | 9,115 | r | load("MyData/EDA.df.total.RData")
# Based on article: https://www.ncbi.nlm.nih.gov/pubmed/18195335
####### Framingham model ####################
## Interpret variables on parental hypertension
BPHigMothEv <-df.total$BPHigMoth2== "Mor - h\xf8yt BT"
BPHigMothEv[is.na(df.total$BPHigMoth2)] <- FALSE
BPHigFathEv <-df.total$BPHigFath2== "Far - h\xf8yt BT"
BPHigFathEv[is.na(df.total$BPHigFath2)] <- FALSE
## Implementation of pseudocode to find 4 year risk from Figure 2 in Framingham paper
## Applied to our data set
fram.risk.fun <- function(){
score <- rep(0,length(df.total$PID))
risk <- rep(0,length(df.total$PID))
for(i in 1: length(df.total$PID)){
# Step 1
if(df.total$SystolicBP2[i]<110){
score[i]=score[i]-4
} else if(df.total$SystolicBP2[i]<114){
score[i]=score[i]
}else if(df.total$SystolicBP2[i]<119){
score[i]=score[i]+2
}else if(df.total$SystolicBP2[i]<124){
score[i]=score[i]+4
}else if(df.total$SystolicBP2[i]<129){
score[i]=score[i]+6
}else if(df.total$SystolicBP2[i]<134){
score[i]=score[i]+8
}else{
score[i]=score[i]+10
}
# Step 2
if(df.total$Sex[i]=="Female"){score[i]= score[i]+1}
# Step 3
if(df.total$BMI2[i]<25){
score[i]=score[i]
} else if(df.total$BMI2[i]<30){
score[i]=score[i]+1
}else{
score[i]=score[i]+3
}
# Step 4
if(df.total$BirthYear[i]>=1967){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-8
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]-3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+3
}else{
score[i]=score[i]+6
}
} else if(df.total$BirthYear[i]>=1957){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-5
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+2
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+5
}else{
score[i]=score[i]+7
}
} else if(df.total$BirthYear[i]>=1947){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-1
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+6
}else{
score[i]=score[i]+8
}
} else if(df.total$BirthYear[i]>=1937){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+3
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+7
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+8
}else{
score[i]=score[i]+9
}
} else if(df.total$BirthYear[i]>=1927){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+6
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+8
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+9
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+10
}else{
score[i]=score[i]+10
}
} else{
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+10
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+11
}else{
score[i]=score[i]+11
}
}
# Step 5
if(df.total$Smoking2[i]=="Current"){score[i]= score[i]+1}
# Step 6
if(BPHigFathEv[i] & BPHigMothEv[i]){
score[i]=score[i]+2
} else if(BPHigFathEv[i]){
score[i]=score[i]+1
} else if(BPHigMothEv[i]){
score[i]=score[i]+1
}else{
score[i]=score[i]
}
risk[i] <- switch(toString(score[i]), "-12"=0.22, "-11"=0.27, "-10"=0.31, "-9"=0.37,
"-8"=0.44,"-7"=0.52,"-6"=0.62,"-5"=0.73,"-4"=0.86,"-3"=1.02,"-2"=1.21,"-1"=1.43,"0"=1.69,
"1"=2.00,"2"=2.37,"3"=2.80,"4"=3.31,"5"=3.90,"6"=4.61,"7"=4.53,"8"=6.40,"9"=7.53,"10"=8.86,
"11"=10.40,"12"=12.20,"13"=14.28,"14"=16.68,"15"=19.43,"16"=22.58,"17"=26.14,"18"=30.16,"19"=34.63,"20"=39.55,
"21"=44.91,"22"=50.64,"23"=56.66,"24"=62.85,"25"=69.05,"26"=75.06,"27"=80.69,"28"=85.74)
}
return(risk/100)
}
fram.risk <- fram.risk.fun()
mean(fram.risk)
hist(fram.risk)
######## Modified Framingham model #######
## Implementation of pseudocode to find 4 year risk from Figure 2 in Framingham paper
## Applied to our data set
# Modified the age by adding 7 years
fram.risk.ad.age.fun <- function(){
score <- rep(0,length(df.total$PID))
risk <- rep(0,length(df.total$PID))
for(i in 1: length(df.total$PID)){
# Step 1
if(df.total$SystolicBP2[i]<110){
score[i]=score[i]-4
} else if(df.total$SystolicBP2[i]<114){
score[i]=score[i]
}else if(df.total$SystolicBP2[i]<119){
score[i]=score[i]+2
}else if(df.total$SystolicBP2[i]<124){
score[i]=score[i]+4
}else if(df.total$SystolicBP2[i]<129){
score[i]=score[i]+6
}else if(df.total$SystolicBP2[i]<134){
score[i]=score[i]+8
}else{
score[i]=score[i]+10
}
# Step 2
if(df.total$Sex[i]=="Female"){score[i]= score[i]+1}
# Step 3
if(df.total$BMI2[i]<25){
score[i]=score[i]
} else if(df.total$BMI2[i]<30){
score[i]=score[i]+1
}else{
score[i]=score[i]+3
}
# Step 4, MODIFIED
if((df.total$BirthYear[i]-7)>=1967){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-8
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]-3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+3
}else{
score[i]=score[i]+6
}
} else if((df.total$BirthYear[i]-7)>=1957){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-5
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+2
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+5
}else{
score[i]=score[i]+7
}
} else if((df.total$BirthYear[i]-7)>=1947){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]-1
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+3
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+6
}else{
score[i]=score[i]+8
}
} else if((df.total$BirthYear[i]-7)>=1937){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+3
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+5
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+7
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+8
}else{
score[i]=score[i]+9
}
} else if((df.total$BirthYear[i]-7)>=1927){
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+6
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+8
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+9
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+10
}else{
score[i]=score[i]+10
}
} else{
if(df.total$DiastolicBP2[i]<70){
score[i]=score[i]+10
} else if(df.total$DiastolicBP2[i]<74){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<79){
score[i]=score[i]+11
}else if(df.total$DiastolicBP2[i]<84){
score[i]=score[i]+11
}else{
score[i]=score[i]+11
}
}
# Step 5
if(df.total$Smoking2[i]=="Current"){score[i]= score[i]+1}
# Step 6
if(BPHigFathEv[i] & BPHigMothEv[i]){
score[i]=score[i]+2
} else if(BPHigFathEv[i]){
score[i]=score[i]+1
} else if(BPHigMothEv[i]){
score[i]=score[i]+1
}else{
score[i]=score[i]
}
risk[i] <- switch(toString(score[i]), "-12"=0.22, "-11"=0.27, "-10"=0.31, "-9"=0.37,
"-8"=0.44,"-7"=0.52,"-6"=0.62,"-5"=0.73,"-4"=0.86,"-3"=1.02,"-2"=1.21,"-1"=1.43,"0"=1.69,
"1"=2.00,"2"=2.37,"3"=2.80,"4"=3.31,"5"=3.90,"6"=4.61,"7"=4.53,"8"=6.40,"9"=7.53,"10"=8.86,
"11"=10.40,"12"=12.20,"13"=14.28,"14"=16.68,"15"=19.43,"16"=22.58,"17"=26.14,"18"=30.16,"19"=34.63,"20"=39.55,
"21"=44.91,"22"=50.64,"23"=56.66,"24"=62.85,"25"=69.05,"26"=75.06,"27"=80.69,"28"=85.74)
}
return(risk/100)
}
fram.risk.ad.age <- fram.risk.ad.age.fun()
mean(fram.risk.ad.age)
hist(fram.risk.ad.age)
####### SAVE ######
save(fram.risk.ad.age, fram.risk, file="MyData/Framingham.RData")
|
# Libraries
library(tidyverse)
library(caret)
library(parallel)
library(tictoc)
library(doParallel)
library(haven)
library(RANN)
# Data Import and Cleaning
gss <- read_sav("GSS2006.sav") %>% # read from the same folder
select(BIG5A1,BIG5B1,BIG5C1,BIG5D1,BIG5E1,BIG5A2,BIG5B2,BIG5C2,BIG5D2,BIG5E2,HEALTH) %>%
mutate_all(as.numeric)
gss_tbl <- gss %>%
filter(rowSums(is.na(gss[,1:10]))!=ncol(gss[,1:10])) %>%
filter(!is.na(HEALTH))
# Data Analysis--ML xgbLinear Model
## Non-parallel
tic()
xgb_model <- train(
HEALTH~.^3,
data=gss_tbl,
method="xgbLinear",
tuneLength=3,
trControl=trainControl(method="cv",number=10,verboseIter=T),
preProcess=c("center","scale","zv","knnImpute"),
na.action=na.pass
)
exec_time_np <- toc()
## Parallel
local_cluster <- makeCluster(2)
registerDoParallel(local_cluster)
tic()
xgb_model_p <- train(
HEALTH~.^3,
data=gss_tbl,
method="xgbLinear",
tuneLength=3,
trControl=trainControl(method="cv",number=10,verboseIter=T),
preProcess=c("center","scale","zv","knnImpute"),
na.action=na.pass
)
exec_time_p <- toc()
stopCluster(local_cluster)
# The first run used 1 processor and took 203.3 seconds. Parallel processing used 3 preprocessors and took 132.3 seconds (71 seconds faster).
# In MSI, the first run took 471 seconds and parallel processing took 239 seconds.
# Save Execution times
interactive <- data.frame("run"=c("non-parallel","parallel"),
"time"=c(exec_time_np$toc-exec_time_np$tic,exec_time_p$toc-exec_time_p$tic))
write_csv(interactive,"interactive.csv")
| /msi/interactive.R | no_license | zhan5449/week-12-project | R | false | false | 1,561 | r | # Libraries
library(tidyverse)
library(caret)
library(parallel)
library(tictoc)
library(doParallel)
library(haven)
library(RANN)
# Data Import and Cleaning
gss <- read_sav("GSS2006.sav") %>% # read from the same folder
select(BIG5A1,BIG5B1,BIG5C1,BIG5D1,BIG5E1,BIG5A2,BIG5B2,BIG5C2,BIG5D2,BIG5E2,HEALTH) %>%
mutate_all(as.numeric)
gss_tbl <- gss %>%
filter(rowSums(is.na(gss[,1:10]))!=ncol(gss[,1:10])) %>%
filter(!is.na(HEALTH))
# Data Analysis--ML xgbLinear Model
## Non-parallel
tic()
xgb_model <- train(
HEALTH~.^3,
data=gss_tbl,
method="xgbLinear",
tuneLength=3,
trControl=trainControl(method="cv",number=10,verboseIter=T),
preProcess=c("center","scale","zv","knnImpute"),
na.action=na.pass
)
exec_time_np <- toc()
## Parallel
local_cluster <- makeCluster(2)
registerDoParallel(local_cluster)
tic()
xgb_model_p <- train(
HEALTH~.^3,
data=gss_tbl,
method="xgbLinear",
tuneLength=3,
trControl=trainControl(method="cv",number=10,verboseIter=T),
preProcess=c("center","scale","zv","knnImpute"),
na.action=na.pass
)
exec_time_p <- toc()
stopCluster(local_cluster)
# The first run used 1 processor and took 203.3 seconds. Parallel processing used 3 preprocessors and took 132.3 seconds (71 seconds faster).
# In MSI, the first run took 471 seconds and parallel processing took 239 seconds.
# Save Execution times
interactive <- data.frame("run"=c("non-parallel","parallel"),
"time"=c(exec_time_np$toc-exec_time_np$tic,exec_time_p$toc-exec_time_p$tic))
write_csv(interactive,"interactive.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/threads.R
\name{extract_thread}
\alias{extract_thread}
\title{Extract a thread from a path}
\usage{
extract_thread(path, replace_limit = NULL)
}
\arguments{
\item{path}{a character string that starts with /r/, which you obtain using either the download_sub_urls() or the download_keyword_urls() functions}
\item{replace_limit}{either `NULL` (default) or a positive integer. This corresponds to the `limit` argument in PRAW's `replace_more()` method. See here: https://praw.readthedocs.io/en/latest/code_overview/other/commentforest.html#praw.models.comment_forest.CommentForest.replace_more Currently, this is what causes getting data to be slow in some cases.}
}
\value{
A list with two tibbles: 1. information on nodes and 2. an edge list
}
\description{
Extract a thread from a path
}
\examples{
\dontrun{
df <- extract_thread("/r/rstats/comments/f5bxyk/r_will_use_stringsasfactors_false_by_default_in/")
}
}
| /man/extract_thread.Rd | permissive | acastroaraujo/rawrr | R | false | true | 991 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/threads.R
\name{extract_thread}
\alias{extract_thread}
\title{Extract a thread from a path}
\usage{
extract_thread(path, replace_limit = NULL)
}
\arguments{
\item{path}{a character string that starts with /r/, which you obtain using either the download_sub_urls() or the download_keyword_urls() functions}
\item{replace_limit}{either `NULL` (default) or a positive integer. This corresponds to the `limit` argument in PRAW's `replace_more()` method. See here: https://praw.readthedocs.io/en/latest/code_overview/other/commentforest.html#praw.models.comment_forest.CommentForest.replace_more Currently, this is what causes getting data to be slow in some cases.}
}
\value{
A list with two tibbles: 1. information on nodes and 2. an edge list
}
\description{
Extract a thread from a path
}
\examples{
\dontrun{
df <- extract_thread("/r/rstats/comments/f5bxyk/r_will_use_stringsasfactors_false_by_default_in/")
}
}
|
#' Begin a capture group.
#'
#' @details Capture groups are used to extract data from within the regular
#' expression match for further processing.
#'
#' @param .data Expression to append, typically pulled from the pipe \code{ \%>\% }
#'
#' @export
rx_begin_capture <- function(.data = NULL) {
new_rx(paste0(.data, "("))
}
#' End a capture group.
#'
#' @details Capture groups are used to extract data from within the regular
#' expression match for further processing.
#'
#' @param .data Expression to append, typically pulled from the pipe \code{\%>\%}
#'
#' @export
rx_end_capture <- function(.data = NULL) {
new_rx(paste0(.data, ")"))
}
| /R/capture_groups.R | permissive | crsky1023/RVerbalExpressions | R | false | false | 648 | r | #' Begin a capture group.
#'
#' @details Capture groups are used to extract data from within the regular
#' expression match for further processing.
#'
#' @param .data Expression to append, typically pulled from the pipe \code{ \%>\% }
#'
#' @export
rx_begin_capture <- function(.data = NULL) {
new_rx(paste0(.data, "("))
}
#' End a capture group.
#'
#' @details Capture groups are used to extract data from within the regular
#' expression match for further processing.
#'
#' @param .data Expression to append, typically pulled from the pipe \code{\%>\%}
#'
#' @export
rx_end_capture <- function(.data = NULL) {
new_rx(paste0(.data, ")"))
}
|
library(data.table)
library(tidyverse)
library(stringr)
library(car)
library(anchors)
library(GGally)
library(h2o)
require(MASS)
require(dplyr)
library(magrittr)
library(caret)
#directory path
path <- "/home/sanaz/Desktop/DataMining/projectlab"
#set working directory
setwd(path)
#load train and test file
train <- read.csv("train.csv")
test <- read.csv("test.csv")
train1<- train[0:100000,]
test1<- test[0:100000,]
str(train1)
summary(train1)
head(train1)
#No duplicated row
duplicated(train1)
#to convert these -1's to NA
train1[train1== -1] <- NA
test1[test1==-1]<- NA
#Number of missing values
sum(is.na(train1)) #141891
sum(is.na(test1)) #142288
#Target Feature Analysis
#https://www.kaggle.com/captcalculator/a-very-extensive-porto-exploratory-analysis
#First letโs look at the target variable.
#How many positives (files claimed) are there?
library(ggplot2)
ggplot(data=train1, aes(x = as.factor(target))) +
geom_bar(fill = '#84a5a3')+
labs(title ='Distribution of Target')
c <- table(train1$target)
print(c)
#Missing Data
data.frame(feature = names(train),
miss_val = map_dbl(train, function(x) { sum(x == -1) / length(x) }))+
ggplot(aes(x= reorder(feature, -miss_val), y= miss_val))+
geom_bar(stat = 'identity', colors='white', fill = '#5a64cd')+
labs(x = '', y = '% missing', title = 'Missing Values by Feature') +
scale_y_continuous(labels = scales::percent)
library(Amelia)
library(mlbench)
missmap(train, col=c("black", "grey"), legend=FALSE)
###################################################################
library(corrplot)
cont_vars <- names(train1)[!grepl("_cat|_bin", names(train1))]
corrplot(cor(train1[, cont_vars][3:length(cont_vars)]),
type = 'lower',
col = colorRampPalette(c('#feeb8c', '#5a64cd'))(50),
tl.col = 'grey40',
mar = c(0,0,1,0),
title = 'Correlation Matrix of Continuous Features')
#################################################################
cont_vars <- names(train)[!grepl("_cat|_bin", names(train))]
traind<- train[,cont_vars][3:length(cont_vars)]
#For ind features
ind_vars <- c('target', names(traind)[grepl('_ind_[0-9]{2}$', names(traind))])
train_ind<- train[,ind_vars]
for(i in 1:5){
boxplot(train_ind[,4], main=names(train_ind)[4])
}
#For reg features
reg_vars <- c('target', names(traind)[grepl('_ind_[0-9]{2}$', names(traind))])
train_reg<- train[,reg_vars]
##############################################################
# Correlations:Get features names that are not binary or categorical(ind,car,reg,calc)
#https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
library(corrplot)
cont_vars <- names(train1)[!grepl("_cat|_bin", names(train1))]
correlation <- cor(train1[,cont_vars][3:length(cont_vars)])
corrplot(correlation, method = "circle", title = 'Correlation Matrix of Continuous Features')
#Positive correlations are displayed in blue and negative correlations in red color
#Letโs break the correlations down by feature group:
#for ind:
ind_vars<- c('target', names(train1)[grepl('_ind_[0-9]{2}$', names(train1))])
correlation <- cor(train[,ind_vars])
corrplot.mixed(correlation, lower.col = "black", number.cex = .7)
#for reg:
reg_vars<- c('target', names(train)[grepl('_reg_[0-9]{2}$', names(train))])
correlation <- cor(train[,reg_vars])
corrplot.mixed(correlation, lower.col = "black", number.cex = .7)
#For car:
car_vars<- c('target', names(train)[grepl('_car_[0-9]{2}$', names(train))])
correlation <- cor(train[,car_vars])
corrplot.mixed(correlation, lower.col = "black", number.cex = .7)
#Categorical features
cat_vars <- names(train)[grepl("_cat|_bin", names(train))]
correlation <- cor(train[,cat_vars])
corrplot(correlation,order = "hclust", addrect = 3, col = heat.colors(100))
###########################################################DONE###############################
##############################################################################################
#####################################PCA################################################
#Add a column
test1$target <- 1
#combine the data set
comb <- rbind(train1, test1)
sapply(train, function(x) sum(is.na(x))) # missing values in each column
sapply(test, function(x) sum(is.na(x))) # missing values in each column
#impute missing values with median
comb$ps_ind_05_cat[is.na(comb$ps_ind_05_cat)]<- median(comb$ps_ind_05_cat, na.rm = TRUE)
comb$ps_ind_02_cat[is.na(comb$ps_ind_02_cat)]<- median(comb$ps_ind_02_cat, na.rm = TRUE)
comb$ps_ind_04_cat[is.na(comb$ps_ind_04_cat)]<- median(comb$ps_ind_04_cat, na.rm = TRUE)
comb$ps_car_01_cat[is.na(comb$ps_car_01_cat)]<- median(comb$ps_car_01_cat, na.rm = TRUE)
comb$ps_car_02_cat[is.na(comb$ps_car_02_cat)]<- median(comb$ps_car_02_cat, na.rm = TRUE)
comb$ps_car_03_cat[is.na(comb$ps_car_03_cat)]<- median(comb$ps_car_03_cat, na.rm = TRUE)
comb$ps_car_05_cat[is.na(comb$ps_car_05_cat)]<- median(comb$ps_car_05_cat, na.rm = TRUE)
comb$ps_car_07_cat[is.na(comb$ps_car_07_cat)]<- median(comb$ps_car_07_cat, na.rm = TRUE)
comb$ps_car_09_cat[is.na(comb$ps_car_09_cat)]<- median(comb$ps_car_09_cat, na.rm = TRUE)
comb$ps_reg_03[is.na(comb$ps_reg_03)]<- median(comb$ps_reg_03, na.rm = TRUE)
comb$ps_car_11 [is.na(comb$ps_car_11)]<- median(comb$ps_car_11, na.rm = TRUE)
comb$ps_car_12 [is.na(comb$ps_car_12)]<- median(comb$ps_car_12, na.rm = TRUE)
comb$ps_car_14 [is.na(comb$ps_car_14)]<- median(comb$ps_car_14, na.rm = TRUE)
Train <- comb[1:nrow(train1),]
Test <- comb[1:nrow(-train1),]
sum(is.na(Train)) # 0
sum(is.na(Train))# 0
my_data <- subset(comb, select = -c(target,id))
str(my_data)
sum(is.na(my_data))#0 Now we don't have missing values
pca.train <- my_data[1:nrow(train1),]
pca.test <- my_data[-(1:nrow(train1)),]
#principal component analysis
prin_comp <- prcomp(pca.train, scale. = T)
names(prin_comp) #"sdev" "rotation" "center" "scale" "x"
#outputs the mean of variables
prin_comp$center
prin_comp$rotation
prin_comp$rotation[1:10,1:4]
dim(prin_comp$x) #[1] 100000 57
#Letโs plot the resultant principal components:
biplot(prin_comp, scale = 0)
#compute standard deviation of each principal component
std_dev <- prin_comp$sdev
#compute variance
pr_var <- std_dev^2
#check variance of first 10 components
pr_var[1:10]
#proportion of variance explained
prop_varex <- pr_var/sum(pr_var)
prop_varex[1:20]
#scree plot: A scree plot is used to access components
#or factors which explains the most of variability in the data.
plot(prop_varex, xlab = "Principal Component",
ylab = "Proportion of Variance",
type = "b")
#cumulative scree plot
plot(cumsum(prop_varex), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance",
type = "b")
#########Predictive Modeling with PCA Components##########
#add a training set with principal components
train.data <- data.frame(target = train1$target, prin_comp$x)
#we are interested in first 20 PCAs
train.data <- train.data[,1:21]
test.data <- predict(prin_comp, newdata = pca.test)
test.data <- as.data.frame(test.data)
test.data <- test.data[,1:20]
test.data$predicted<-NULL
gc()
# used (Mb) gc trigger (Mb) max used (Mb)
#Ncells 48196522 2574.0 62761417 3351.9 49091534 2621.8
#Vcells 499957106 3814.4 825531125 6298.4 811662218 6192.5
model_log <- glm(as.factor(train1$target) ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 +
PC7 + PC8 + PC9 + PC10 + PC11 + PC14 + PC15 + PC18 + PC19 +
PC20 ,family=binomial(link='logit'),data=train.data)
results <- predict(model_log,newdata=subset(test.data),type='response')
result <- data.frame(id = test1$id,target = results)
res <- data.frame(pred = results, actual=train.data$target)
#####################run a decision tree###################DONE!
library(rpart)
n<- nrow(train1)
Data <- comb[0:n,]
tr <- sort(sample(0:n,floor(n/2)))
Train_data <- train1[tr,]
Test_data <- train1[-tr,]
rp <- rpart(target ~ ., data=train1[2:59,], subset= tr, method = "class")
pred.rpt <- predict(rp, newdata = Test_data, type = "class")
pred.rpt
table(train1[-tr,]$target, pred.rpt)
# pred.rpt
#0 1
#0 48175 0
#1 1826 0
#How many examples are well predicted?48175
| /Assurance.R | no_license | sanazafshari/porto-seguro-safe-driver-prediction-Kaggle-Competition- | R | false | false | 8,179 | r | library(data.table)
library(tidyverse)
library(stringr)
library(car)
library(anchors)
library(GGally)
library(h2o)
require(MASS)
require(dplyr)
library(magrittr)
library(caret)
#directory path
path <- "/home/sanaz/Desktop/DataMining/projectlab"
#set working directory
setwd(path)
#load train and test file
train <- read.csv("train.csv")
test <- read.csv("test.csv")
train1<- train[0:100000,]
test1<- test[0:100000,]
str(train1)
summary(train1)
head(train1)
#No duplicated row
duplicated(train1)
#to convert these -1's to NA
train1[train1== -1] <- NA
test1[test1==-1]<- NA
#Number of missing values
sum(is.na(train1)) #141891
sum(is.na(test1)) #142288
#Target Feature Analysis
#https://www.kaggle.com/captcalculator/a-very-extensive-porto-exploratory-analysis
#First letโs look at the target variable.
#How many positives (files claimed) are there?
library(ggplot2)
ggplot(data=train1, aes(x = as.factor(target))) +
geom_bar(fill = '#84a5a3')+
labs(title ='Distribution of Target')
c <- table(train1$target)
print(c)
#Missing Data
data.frame(feature = names(train),
miss_val = map_dbl(train, function(x) { sum(x == -1) / length(x) }))+
ggplot(aes(x= reorder(feature, -miss_val), y= miss_val))+
geom_bar(stat = 'identity', colors='white', fill = '#5a64cd')+
labs(x = '', y = '% missing', title = 'Missing Values by Feature') +
scale_y_continuous(labels = scales::percent)
library(Amelia)
library(mlbench)
missmap(train, col=c("black", "grey"), legend=FALSE)
###################################################################
library(corrplot)
cont_vars <- names(train1)[!grepl("_cat|_bin", names(train1))]
corrplot(cor(train1[, cont_vars][3:length(cont_vars)]),
type = 'lower',
col = colorRampPalette(c('#feeb8c', '#5a64cd'))(50),
tl.col = 'grey40',
mar = c(0,0,1,0),
title = 'Correlation Matrix of Continuous Features')
#################################################################
cont_vars <- names(train)[!grepl("_cat|_bin", names(train))]
traind<- train[,cont_vars][3:length(cont_vars)]
#For ind features
ind_vars <- c('target', names(traind)[grepl('_ind_[0-9]{2}$', names(traind))])
train_ind<- train[,ind_vars]
for(i in 1:5){
boxplot(train_ind[,4], main=names(train_ind)[4])
}
#For reg features
reg_vars <- c('target', names(traind)[grepl('_ind_[0-9]{2}$', names(traind))])
train_reg<- train[,reg_vars]
##############################################################
# Correlations:Get features names that are not binary or categorical(ind,car,reg,calc)
#https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
library(corrplot)
cont_vars <- names(train1)[!grepl("_cat|_bin", names(train1))]
correlation <- cor(train1[,cont_vars][3:length(cont_vars)])
corrplot(correlation, method = "circle", title = 'Correlation Matrix of Continuous Features')
#Positive correlations are displayed in blue and negative correlations in red color
#Letโs break the correlations down by feature group:
#for ind:
ind_vars<- c('target', names(train1)[grepl('_ind_[0-9]{2}$', names(train1))])
correlation <- cor(train[,ind_vars])
corrplot.mixed(correlation, lower.col = "black", number.cex = .7)
#for reg:
reg_vars<- c('target', names(train)[grepl('_reg_[0-9]{2}$', names(train))])
correlation <- cor(train[,reg_vars])
corrplot.mixed(correlation, lower.col = "black", number.cex = .7)
#For car:
car_vars<- c('target', names(train)[grepl('_car_[0-9]{2}$', names(train))])
correlation <- cor(train[,car_vars])
corrplot.mixed(correlation, lower.col = "black", number.cex = .7)
#Categorical features
cat_vars <- names(train)[grepl("_cat|_bin", names(train))]
correlation <- cor(train[,cat_vars])
corrplot(correlation,order = "hclust", addrect = 3, col = heat.colors(100))
###########################################################DONE###############################
##############################################################################################
#####################################PCA################################################
#Add a column
test1$target <- 1
#combine the data set
comb <- rbind(train1, test1)
sapply(train, function(x) sum(is.na(x))) # missing values in each column
sapply(test, function(x) sum(is.na(x))) # missing values in each column
#impute missing values with median
comb$ps_ind_05_cat[is.na(comb$ps_ind_05_cat)]<- median(comb$ps_ind_05_cat, na.rm = TRUE)
comb$ps_ind_02_cat[is.na(comb$ps_ind_02_cat)]<- median(comb$ps_ind_02_cat, na.rm = TRUE)
comb$ps_ind_04_cat[is.na(comb$ps_ind_04_cat)]<- median(comb$ps_ind_04_cat, na.rm = TRUE)
comb$ps_car_01_cat[is.na(comb$ps_car_01_cat)]<- median(comb$ps_car_01_cat, na.rm = TRUE)
comb$ps_car_02_cat[is.na(comb$ps_car_02_cat)]<- median(comb$ps_car_02_cat, na.rm = TRUE)
comb$ps_car_03_cat[is.na(comb$ps_car_03_cat)]<- median(comb$ps_car_03_cat, na.rm = TRUE)
comb$ps_car_05_cat[is.na(comb$ps_car_05_cat)]<- median(comb$ps_car_05_cat, na.rm = TRUE)
comb$ps_car_07_cat[is.na(comb$ps_car_07_cat)]<- median(comb$ps_car_07_cat, na.rm = TRUE)
comb$ps_car_09_cat[is.na(comb$ps_car_09_cat)]<- median(comb$ps_car_09_cat, na.rm = TRUE)
comb$ps_reg_03[is.na(comb$ps_reg_03)]<- median(comb$ps_reg_03, na.rm = TRUE)
comb$ps_car_11 [is.na(comb$ps_car_11)]<- median(comb$ps_car_11, na.rm = TRUE)
comb$ps_car_12 [is.na(comb$ps_car_12)]<- median(comb$ps_car_12, na.rm = TRUE)
comb$ps_car_14 [is.na(comb$ps_car_14)]<- median(comb$ps_car_14, na.rm = TRUE)
Train <- comb[1:nrow(train1),]
Test <- comb[1:nrow(-train1),]
sum(is.na(Train)) # 0
sum(is.na(Train))# 0
my_data <- subset(comb, select = -c(target,id))
str(my_data)
sum(is.na(my_data))#0 Now we don't have missing values
pca.train <- my_data[1:nrow(train1),]
pca.test <- my_data[-(1:nrow(train1)),]
#principal component analysis
prin_comp <- prcomp(pca.train, scale. = T)
names(prin_comp) #"sdev" "rotation" "center" "scale" "x"
#outputs the mean of variables
prin_comp$center
prin_comp$rotation
prin_comp$rotation[1:10,1:4]
dim(prin_comp$x) #[1] 100000 57
#Letโs plot the resultant principal components:
biplot(prin_comp, scale = 0)
#compute standard deviation of each principal component
std_dev <- prin_comp$sdev
#compute variance
pr_var <- std_dev^2
#check variance of first 10 components
pr_var[1:10]
#proportion of variance explained
prop_varex <- pr_var/sum(pr_var)
prop_varex[1:20]
#scree plot: A scree plot is used to access components
#or factors which explains the most of variability in the data.
plot(prop_varex, xlab = "Principal Component",
ylab = "Proportion of Variance",
type = "b")
#cumulative scree plot
plot(cumsum(prop_varex), xlab = "Principal Component",
ylab = "Cumulative Proportion of Variance",
type = "b")
#########Predictive Modeling with PCA Components##########
#add a training set with principal components
train.data <- data.frame(target = train1$target, prin_comp$x)
#we are interested in first 20 PCAs
train.data <- train.data[,1:21]
test.data <- predict(prin_comp, newdata = pca.test)
test.data <- as.data.frame(test.data)
test.data <- test.data[,1:20]
test.data$predicted<-NULL
gc()
# used (Mb) gc trigger (Mb) max used (Mb)
#Ncells 48196522 2574.0 62761417 3351.9 49091534 2621.8
#Vcells 499957106 3814.4 825531125 6298.4 811662218 6192.5
model_log <- glm(as.factor(train1$target) ~ PC1 + PC2 + PC3 + PC4 + PC5 + PC6 +
PC7 + PC8 + PC9 + PC10 + PC11 + PC14 + PC15 + PC18 + PC19 +
PC20 ,family=binomial(link='logit'),data=train.data)
results <- predict(model_log,newdata=subset(test.data),type='response')
result <- data.frame(id = test1$id,target = results)
res <- data.frame(pred = results, actual=train.data$target)
#####################run a decision tree###################DONE!
library(rpart)
n<- nrow(train1)
Data <- comb[0:n,]
tr <- sort(sample(0:n,floor(n/2)))
Train_data <- train1[tr,]
Test_data <- train1[-tr,]
rp <- rpart(target ~ ., data=train1[2:59,], subset= tr, method = "class")
pred.rpt <- predict(rp, newdata = Test_data, type = "class")
pred.rpt
table(train1[-tr,]$target, pred.rpt)
# pred.rpt
#0 1
#0 48175 0
#1 1826 0
#How many examples are well predicted?48175
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{sql.Holdings.bulk}
\alias{sql.Holdings.bulk}
\title{sql.Holdings.bulk}
\usage{
sql.Holdings.bulk(x, y, n, w, h)
}
\arguments{
\item{x}{= name of temp table with holdings}
\item{y}{= columns of <x> (in order)}
\item{n}{= the holdings date in YYYYMMDD}
\item{w}{= unused temp table name for benchmark holdings}
\item{h}{= unused temp table name for benchmark AUM}
}
\description{
query to bulk data with known benchmark holdings
}
\seealso{
Other sql: \code{\link{sql.1dActWtTrend.Alloc}},
\code{\link{sql.1dActWtTrend.Final}},
\code{\link{sql.1dActWtTrend.Flow}},
\code{\link{sql.1dActWtTrend.select}},
\code{\link{sql.1dActWtTrend.topline.from}},
\code{\link{sql.1dActWtTrend.topline}},
\code{\link{sql.1dActWtTrend.underlying.basic}},
\code{\link{sql.1dActWtTrend.underlying}},
\code{\link{sql.1dActWtTrend}},
\code{\link{sql.1dFloMo.CountryId.List}},
\code{\link{sql.1dFloMo.FI}},
\code{\link{sql.1dFloMo.Rgn}},
\code{\link{sql.1dFloMo.Sec.topline}},
\code{\link{sql.1dFloMo.filter}},
\code{\link{sql.1dFloMo.grp}},
\code{\link{sql.1dFloMo.select.wrapper}},
\code{\link{sql.1dFloMo.select}},
\code{\link{sql.1dFloMo.underlying}},
\code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}},
\code{\link{sql.1dFloTrend.Alloc.data}},
\code{\link{sql.1dFloTrend.Alloc.fetch}},
\code{\link{sql.1dFloTrend.Alloc.final}},
\code{\link{sql.1dFloTrend.Alloc.from}},
\code{\link{sql.1dFloTrend.Alloc.purge}},
\code{\link{sql.1dFloTrend.Alloc}},
\code{\link{sql.1dFloTrend.select}},
\code{\link{sql.1dFloTrend.underlying}},
\code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}},
\code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}},
\code{\link{sql.1mActWt.underlying}},
\code{\link{sql.1mActWtIncrPct}},
\code{\link{sql.1mActWtTrend.underlying}},
\code{\link{sql.1mActWtTrend}},
\code{\link{sql.1mActWt}},
\code{\link{sql.1mAllocD.from}},
\code{\link{sql.1mAllocD.select}},
\code{\link{sql.1mAllocD.topline.from}},
\code{\link{sql.1mAllocD}},
\code{\link{sql.1mAllocMo.select}},
\code{\link{sql.1mAllocMo.underlying.from}},
\code{\link{sql.1mAllocMo.underlying.pre}},
\code{\link{sql.1mAllocMo}},
\code{\link{sql.1mAllocSkew.topline.from}},
\code{\link{sql.1mAllocSkew}},
\code{\link{sql.1mBullish.Alloc}},
\code{\link{sql.1mBullish.Final}},
\code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}},
\code{\link{sql.1mFloTrend.underlying}},
\code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}},
\code{\link{sql.1mHoldAum}},
\code{\link{sql.1mSRIAdvisorPct}},
\code{\link{sql.1wFlow.Corp}},
\code{\link{sql.ActWtDiff2}},
\code{\link{sql.Allocation.Sec.FinsExREst}},
\code{\link{sql.Allocation.Sec}},
\code{\link{sql.Allocations.bulk.EqWtAvg}},
\code{\link{sql.Allocations.bulk.Single}},
\code{\link{sql.Allocation}},
\code{\link{sql.BenchIndex.duplication}},
\code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}},
\code{\link{sql.Diff}}, \code{\link{sql.Dispersion}},
\code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}},
\code{\link{sql.Foreign}},
\code{\link{sql.FundHistory.macro}},
\code{\link{sql.FundHistory.sf}},
\code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}},
\code{\link{sql.HerdingLSV}},
\code{\link{sql.Holdings.bulk.wrapper}},
\code{\link{sql.Holdings}}, \code{\link{sql.ION}},
\code{\link{sql.MonthlyAlloc}},
\code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}},
\code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}},
\code{\link{sql.ReportDate}}, \code{\link{sql.SRI}},
\code{\link{sql.ShareClass}},
\code{\link{sql.TopDownAllocs.items}},
\code{\link{sql.TopDownAllocs.underlying}},
\code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}},
\code{\link{sql.and}}, \code{\link{sql.arguments}},
\code{\link{sql.bcp}}, \code{\link{sql.breakdown}},
\code{\link{sql.case}}, \code{\link{sql.close}},
\code{\link{sql.connect.wrapper}},
\code{\link{sql.connect}},
\code{\link{sql.cross.border}},
\code{\link{sql.datediff}}, \code{\link{sql.declare}},
\code{\link{sql.delete}}, \code{\link{sql.drop}},
\code{\link{sql.exists}},
\code{\link{sql.extra.domicile}},
\code{\link{sql.index}}, \code{\link{sql.into}},
\code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}},
\code{\link{sql.label}}, \code{\link{sql.map.classif}},
\code{\link{sql.mat.cofactor}},
\code{\link{sql.mat.crossprod.vector}},
\code{\link{sql.mat.crossprod}},
\code{\link{sql.mat.determinant}},
\code{\link{sql.mat.flip}},
\code{\link{sql.mat.multiply}}, \code{\link{sql.median}},
\code{\link{sql.nonneg}},
\code{\link{sql.query.underlying}},
\code{\link{sql.query}}, \code{\link{sql.regr}},
\code{\link{sql.tbl}}, \code{\link{sql.ui}},
\code{\link{sql.unbracket}}, \code{\link{sql.update}},
\code{\link{sql.yield.curve.1dFloMo}},
\code{\link{sql.yield.curve}},
\code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}}
}
\keyword{sql.Holdings.bulk}
| /man/sql.Holdings.bulk.Rd | no_license | vsrimurthy/EPFR | R | false | true | 5,030 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{sql.Holdings.bulk}
\alias{sql.Holdings.bulk}
\title{sql.Holdings.bulk}
\usage{
sql.Holdings.bulk(x, y, n, w, h)
}
\arguments{
\item{x}{= name of temp table with holdings}
\item{y}{= columns of <x> (in order)}
\item{n}{= the holdings date in YYYYMMDD}
\item{w}{= unused temp table name for benchmark holdings}
\item{h}{= unused temp table name for benchmark AUM}
}
\description{
query to bulk data with known benchmark holdings
}
\seealso{
Other sql: \code{\link{sql.1dActWtTrend.Alloc}},
\code{\link{sql.1dActWtTrend.Final}},
\code{\link{sql.1dActWtTrend.Flow}},
\code{\link{sql.1dActWtTrend.select}},
\code{\link{sql.1dActWtTrend.topline.from}},
\code{\link{sql.1dActWtTrend.topline}},
\code{\link{sql.1dActWtTrend.underlying.basic}},
\code{\link{sql.1dActWtTrend.underlying}},
\code{\link{sql.1dActWtTrend}},
\code{\link{sql.1dFloMo.CountryId.List}},
\code{\link{sql.1dFloMo.FI}},
\code{\link{sql.1dFloMo.Rgn}},
\code{\link{sql.1dFloMo.Sec.topline}},
\code{\link{sql.1dFloMo.filter}},
\code{\link{sql.1dFloMo.grp}},
\code{\link{sql.1dFloMo.select.wrapper}},
\code{\link{sql.1dFloMo.select}},
\code{\link{sql.1dFloMo.underlying}},
\code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}},
\code{\link{sql.1dFloTrend.Alloc.data}},
\code{\link{sql.1dFloTrend.Alloc.fetch}},
\code{\link{sql.1dFloTrend.Alloc.final}},
\code{\link{sql.1dFloTrend.Alloc.from}},
\code{\link{sql.1dFloTrend.Alloc.purge}},
\code{\link{sql.1dFloTrend.Alloc}},
\code{\link{sql.1dFloTrend.select}},
\code{\link{sql.1dFloTrend.underlying}},
\code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}},
\code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}},
\code{\link{sql.1mActWt.underlying}},
\code{\link{sql.1mActWtIncrPct}},
\code{\link{sql.1mActWtTrend.underlying}},
\code{\link{sql.1mActWtTrend}},
\code{\link{sql.1mActWt}},
\code{\link{sql.1mAllocD.from}},
\code{\link{sql.1mAllocD.select}},
\code{\link{sql.1mAllocD.topline.from}},
\code{\link{sql.1mAllocD}},
\code{\link{sql.1mAllocMo.select}},
\code{\link{sql.1mAllocMo.underlying.from}},
\code{\link{sql.1mAllocMo.underlying.pre}},
\code{\link{sql.1mAllocMo}},
\code{\link{sql.1mAllocSkew.topline.from}},
\code{\link{sql.1mAllocSkew}},
\code{\link{sql.1mBullish.Alloc}},
\code{\link{sql.1mBullish.Final}},
\code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}},
\code{\link{sql.1mFloTrend.underlying}},
\code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}},
\code{\link{sql.1mHoldAum}},
\code{\link{sql.1mSRIAdvisorPct}},
\code{\link{sql.1wFlow.Corp}},
\code{\link{sql.ActWtDiff2}},
\code{\link{sql.Allocation.Sec.FinsExREst}},
\code{\link{sql.Allocation.Sec}},
\code{\link{sql.Allocations.bulk.EqWtAvg}},
\code{\link{sql.Allocations.bulk.Single}},
\code{\link{sql.Allocation}},
\code{\link{sql.BenchIndex.duplication}},
\code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}},
\code{\link{sql.Diff}}, \code{\link{sql.Dispersion}},
\code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}},
\code{\link{sql.Foreign}},
\code{\link{sql.FundHistory.macro}},
\code{\link{sql.FundHistory.sf}},
\code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}},
\code{\link{sql.HerdingLSV}},
\code{\link{sql.Holdings.bulk.wrapper}},
\code{\link{sql.Holdings}}, \code{\link{sql.ION}},
\code{\link{sql.MonthlyAlloc}},
\code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}},
\code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}},
\code{\link{sql.ReportDate}}, \code{\link{sql.SRI}},
\code{\link{sql.ShareClass}},
\code{\link{sql.TopDownAllocs.items}},
\code{\link{sql.TopDownAllocs.underlying}},
\code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}},
\code{\link{sql.and}}, \code{\link{sql.arguments}},
\code{\link{sql.bcp}}, \code{\link{sql.breakdown}},
\code{\link{sql.case}}, \code{\link{sql.close}},
\code{\link{sql.connect.wrapper}},
\code{\link{sql.connect}},
\code{\link{sql.cross.border}},
\code{\link{sql.datediff}}, \code{\link{sql.declare}},
\code{\link{sql.delete}}, \code{\link{sql.drop}},
\code{\link{sql.exists}},
\code{\link{sql.extra.domicile}},
\code{\link{sql.index}}, \code{\link{sql.into}},
\code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}},
\code{\link{sql.label}}, \code{\link{sql.map.classif}},
\code{\link{sql.mat.cofactor}},
\code{\link{sql.mat.crossprod.vector}},
\code{\link{sql.mat.crossprod}},
\code{\link{sql.mat.determinant}},
\code{\link{sql.mat.flip}},
\code{\link{sql.mat.multiply}}, \code{\link{sql.median}},
\code{\link{sql.nonneg}},
\code{\link{sql.query.underlying}},
\code{\link{sql.query}}, \code{\link{sql.regr}},
\code{\link{sql.tbl}}, \code{\link{sql.ui}},
\code{\link{sql.unbracket}}, \code{\link{sql.update}},
\code{\link{sql.yield.curve.1dFloMo}},
\code{\link{sql.yield.curve}},
\code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}}
}
\keyword{sql.Holdings.bulk}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alt_posterior.R
\name{alt_posterior}
\alias{alt_posterior}
\title{Compute the posterior probability of the alternative hypothesis}
\usage{
alt_posterior(p, model)
}
\arguments{
\item{p}{A numeric vector of p-values}
\item{model}{The beta mixture model represented as a named list of distribution parameters.}
}
\description{
Maps a vector of p-values to a vector of posterior probabilites
given a beta mixture model.
}
| /man/alt_posterior.Rd | permissive | stevehoang/pbayes | R | false | true | 498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alt_posterior.R
\name{alt_posterior}
\alias{alt_posterior}
\title{Compute the posterior probability of the alternative hypothesis}
\usage{
alt_posterior(p, model)
}
\arguments{
\item{p}{A numeric vector of p-values}
\item{model}{The beta mixture model represented as a named list of distribution parameters.}
}
\description{
Maps a vector of p-values to a vector of posterior probabilites
given a beta mixture model.
}
|
library(ggbio)
library(GenomicRanges)
library(GenomicFeatures)
library(GenomicAlignments)
library(biovizBase)
library(Rsamtools)
library(org.Hs.eg.db)
library (TxDb.Hsapiens.UCSC.hg19.knownGene)
library(biomaRt)
ensembl=useMart(biomart="ENSEMBL_MART_ENSEMBL",host="feb2014.archive.ensembl.org",path="/biomart/martservice",archive=FALSE)
ensembl=useDataset("hsapiens_gene_ensembl",mart=ensembl)
gene_ids <- c(
"ENSG00000186205",
"ENSG00000133636",
"ENSG00000105723",
"ENSG00000148408",
"ENSG00000131471",
"ENSG00000092421"
)
genes.annotation=getBM(attributes=c("ensembl_gene_id", "ensembl_transcript_id","external_gene_id"),filters=c("ensembl_gene_id"),values=gene_ids, mart= ensembl) # fuction to get gene id's and gene name from data base
transcripts = genes.annotation$ensembl_transcript_id
genes2id <- unique(genes.annotation[,c(1,3)])
rownames(genes2id) <- genes2id[,1]
txdb=makeTxDbFromBiomart(transcript_ids=transcripts,biomart="ENSEMBL_MART_ENSEMBL",host="feb2014.archive.ensembl.org")
genes <- genes(txdb)
seqlevels(genes) <- sapply(seqlevels(genes),function(x) { if (any(grep("^\\d",x))) { paste("chr",x,sep="") } else { x } })
exons <- exonsBy(txdb, "gene")
#wh.genes <- genes(txdb,vals=list(gene_id=gene_ids))
wh.transcripts <- transcriptsBy(txdb ,by="gene")
bfl <- c(
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/DiseaseUntreated.bam",
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/DiseaseEpi.bam",
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/ControlUntreated.bam",
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/ControlEpi.bam"
)
bfn <- c(
"Disease Untreated",
"Disease Epi",
"Control Untreated",
"Control Epi"
)
# Normalize sizes
norm <- c(
135765722,
134176076,
89563374,
85207016
)
norm <- min(norm)/norm
pdf("genes.pdf")
for (i in 1:length(gene_ids)) {
n <- gene_ids[i]
#p <- ScanBamParam(which = genesymbol[n])
p <- ScanBamParam(which = genes[n])
wh <- wh.transcripts[n][[1]]
z <- tracks(autoplot(txdb, which = wh, gap.geom = "chevron"),xlim=reduce(ranges(wh)),scale.height=2)
seqlevels(wh) <- sapply(seqlevels(wh),function(x) { if (any(grep("^\\d",x))) { paste("chr",x,sep="") } else { x } })
for (j in 1:length(bfl)) {
ga <- readGAlignments(bfl[j], param=p,use.names=TRUE)
ga <- ga[sample(length(ga),length(ga) * norm[j])]
ga2 <- ga[(start(ga) > start(reduce(ranges(wh)))) & (end(ga) < end(reduce(ranges(wh))))]
x <- tracks(autoplot(ga2, stat = "coverage",ylab=genes2id[n,2]))
if (j == 1) {
y <- x
} else {
y <- y + x
}
}
print(y + z + theme_alignment() )
}
dev.off()
| /makeReadPileup.R | no_license | bdownie/analysis | R | false | false | 2,681 | r | library(ggbio)
library(GenomicRanges)
library(GenomicFeatures)
library(GenomicAlignments)
library(biovizBase)
library(Rsamtools)
library(org.Hs.eg.db)
library (TxDb.Hsapiens.UCSC.hg19.knownGene)
library(biomaRt)
ensembl=useMart(biomart="ENSEMBL_MART_ENSEMBL",host="feb2014.archive.ensembl.org",path="/biomart/martservice",archive=FALSE)
ensembl=useDataset("hsapiens_gene_ensembl",mart=ensembl)
gene_ids <- c(
"ENSG00000186205",
"ENSG00000133636",
"ENSG00000105723",
"ENSG00000148408",
"ENSG00000131471",
"ENSG00000092421"
)
genes.annotation=getBM(attributes=c("ensembl_gene_id", "ensembl_transcript_id","external_gene_id"),filters=c("ensembl_gene_id"),values=gene_ids, mart= ensembl) # fuction to get gene id's and gene name from data base
transcripts = genes.annotation$ensembl_transcript_id
genes2id <- unique(genes.annotation[,c(1,3)])
rownames(genes2id) <- genes2id[,1]
txdb=makeTxDbFromBiomart(transcript_ids=transcripts,biomart="ENSEMBL_MART_ENSEMBL",host="feb2014.archive.ensembl.org")
genes <- genes(txdb)
seqlevels(genes) <- sapply(seqlevels(genes),function(x) { if (any(grep("^\\d",x))) { paste("chr",x,sep="") } else { x } })
exons <- exonsBy(txdb, "gene")
#wh.genes <- genes(txdb,vals=list(gene_id=gene_ids))
wh.transcripts <- transcriptsBy(txdb ,by="gene")
bfl <- c(
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/DiseaseUntreated.bam",
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/DiseaseEpi.bam",
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/ControlUntreated.bam",
"/ngsdata/intern/150409_SN933_0186_AC6TN4ACXX/Aligned/Boemecke_909/bam/ControlEpi.bam"
)
bfn <- c(
"Disease Untreated",
"Disease Epi",
"Control Untreated",
"Control Epi"
)
# Normalize sizes
norm <- c(
135765722,
134176076,
89563374,
85207016
)
norm <- min(norm)/norm
pdf("genes.pdf")
for (i in 1:length(gene_ids)) {
n <- gene_ids[i]
#p <- ScanBamParam(which = genesymbol[n])
p <- ScanBamParam(which = genes[n])
wh <- wh.transcripts[n][[1]]
z <- tracks(autoplot(txdb, which = wh, gap.geom = "chevron"),xlim=reduce(ranges(wh)),scale.height=2)
seqlevels(wh) <- sapply(seqlevels(wh),function(x) { if (any(grep("^\\d",x))) { paste("chr",x,sep="") } else { x } })
for (j in 1:length(bfl)) {
ga <- readGAlignments(bfl[j], param=p,use.names=TRUE)
ga <- ga[sample(length(ga),length(ga) * norm[j])]
ga2 <- ga[(start(ga) > start(reduce(ranges(wh)))) & (end(ga) < end(reduce(ranges(wh))))]
x <- tracks(autoplot(ga2, stat = "coverage",ylab=genes2id[n,2]))
if (j == 1) {
y <- x
} else {
y <- y + x
}
}
print(y + z + theme_alignment() )
}
dev.off()
|
context("marker accessors")
test_that("simple marker getters work", {
x = nuclearPed(1)
m = marker(x, name="m1", chrom=1, posMb=1e7)
x = setMarkers(x, m)
expect_equal(name(m), "m1")
expect_equal(name(x, 1), "m1")
expect_equal(chrom(m), "1")
expect_equal(chrom(x, markers=1), "1")
expect_equal(chrom(x, markers="m1"), "1")
expect_equal(posMb(m), 1e7)
expect_equal(posMb(x, markers=1), 1e7)
expect_equal(posMb(x, markers="m1"), 1e7)
expect_equal(posCm(m), NA_real_)
expect_equal(posCm(x, markers=1), NA_real_)
expect_equal(posCm(x, markers="m1"), NA_real_)
})
test_that("alleles() accessor works", {
x = nuclearPed(1)
als = c("p","e","d")
m1 = marker(x, alleles=1:3, name="m1")
m2 = marker(x, alleles=als, name="m2")
x = setMarkers(x, list(m1,m2))
expect_equal(alleles(m1), as.character(1:3))
expect_equal(alleles(x, marker=1), as.character(1:3))
expect_equal(alleles(x, marker="m1"), as.character(1:3))
expect_equal(alleles(m2), sort(als))
expect_equal(alleles(x, marker=2), sort(als))
expect_equal(alleles(x, marker="m2"), sort(als))
})
test_that("afreq() accessor works", {
x = nuclearPed(1)
afr = c(.2,.3,.5)
m1 = marker(x, name="m1")
m2 = marker(x, alleles=1:3, afreq=afr, name="m2")
m3 = marker(x, alleles=3:1, afreq=afr, name="m3")
x = setMarkers(x, list(m1,m2,m3))
ans1 = c('1'=0.5, '2'=0.5)
expect_equal(afreq(m1), ans1)
expect_equal(afreq(x, marker=1), ans1)
expect_equal(afreq(x, marker="m1"), ans1)
names(afr) = 1:3
expect_equal(afreq(m2), afr)
expect_equal(afreq(x, marker=2), afr)
afr_rev = rev(afr); names(afr_rev) = 1:3
expect_equal(afreq(m3), afr_rev)
expect_equal(afreq(x, marker=3), afr_rev)
})
test_that("afreq replacement works", {
x = nuclearPed(1)
m = marker(x, alleles=c("a", "b"), name="m1")
x = setMarkers(x, list(m))
afr = c(a=.1, b=.9)
afreq(x, "m1") = afr
expect_equal(afreq(x, 1), afr)
afreq(x, 1) = rev(afr)
expect_equal(afreq(x, "m1"), afr)
})
test_that("afreq replacement gives correct error messages", {
x = nuclearPed(1)
m = marker(x, alleles=c("a"), name="m1")
x = setMarkers(x, list(m))
expect_error({afreq(x, "m2") = c(a=1)}, "Unknown marker name: m2")
expect_error({afreq(x, 2) = c(a=1)}, "Marker index out of range: 2")
expect_error({afreq(x, 1:2) = c(a=1)}, "Frequency replacement can only be done for a single marker")
expect_error({afreq(x, "m1") = 1}, "Frequency vector must be named")
expect_error({afreq(x, "m1") = c(b=1)}, "Unknown allele: b")
expect_error({afreq(x, "m1") = c(a=1)[0]}, "Alleles missing from frequency vector: a")
expect_error({afreq(x, "m1") = c(a=0.1)}, "Frequencies must sum to 1")
})
test_that("genotype() works", {
x = nuclearPed(children="boy") # labels are 1,2,boy
m1 = marker(x, name="m1")
m2 = marker(x, boy=1:2, name="m2")
m3 = marker(x, '1'=17.2, name="m3") # homoz for STR allele
x = setMarkers(x, list(m1,m2,m3))
genoNA = c(NA_character_, NA_character_)
expect_equal(genotype(m1, "boy"), genoNA)
expect_equal(genotype(x, marker=1, id="boy"), genoNA)
expect_equal(genotype(x, marker="m1", id="boy"), genoNA)
genoHet = as.character(1:2)
expect_equal(genotype(m2, id="boy"), genoHet)
expect_equal(genotype(x, marker=2, id="boy"), genoHet)
genoSTR = c("17.2", "17.2")
expect_equal(genotype(m3, 1), genoSTR)
expect_equal(genotype(m3, "1"), genoSTR)
expect_equal(genotype(x, marker="m3", id=1), genoSTR)
})
test_that("genotype replacement works", {
x = nuclearPed(father=101, mother=102, children="boy")
m1 = marker(x, name="m1", alleles=1:2)
m2 = marker(x, name="m2", alleles=c("a", "b"))
x = setMarkers(x, list(m1, m2))
genotype(x, 1, id=101) = 2
genotype(x, "m1", "boy") = 2:1
expect_equal(genotype(x, "m1", 101), c("2", "2"))
expect_equal(genotype(x, 1, "boy"), c("2", "1"))
genotype(x, 2, id=101) = 'b'
genotype(x, "m2", "boy") = c('b','a')
expect_equal(genotype(x, "m2", 101), c("b", "b"))
expect_equal(genotype(x, 2, "boy"), c("b", "a"))
})
test_that("genotype replacement gives correct error messages", {
x = nuclearPed(father=101, mother=102, children="boy")
m1 = marker(x, name="m1", alleles=1:2)
x = setMarkers(x, m1)
expect_error({genotype(x, "m2", 101) = 3}, "Unknown marker name: m2")
expect_error({genotype(x, 2, 101) = 3}, "Marker index out of range: 2")
expect_error({genotype(x, 1:2, 101) = 3}, "Genotype replacement can only be done for a single marker")
expect_error({genotype(x, "m1", 100) = 3}, "Unknown ID label: 100")
expect_error({genotype(x, "m1", "girl") = 3}, "Unknown ID label: girl")
expect_error({genotype(x, "m1", 101) = 3}, "Unknown allele for this marker: 3")
expect_error({genotype(x, "m1", 101) = 1:3}, "Length of genotype vector must be 1 or 2")
})
test_that("genotype replacement works with partial genotypes", {
x = nuclearPed(father=101, mother=102, children=1:2)
m1 = marker(x, name="m1", alleles=c('a','b'))
x = setMarkers(x, m1)
genotype(x, "m1", id=101) = c("a", NA)
genotype(x, "m1", id=102) = c("a", "")
genotype(x, "m1", id=1) = c("b", 0)
genotype(x, "m1", id=2) = c("b", "-")
expect_equal(x$markerdata[[1]][,1], c(1,1,2,2))
expect_equal(x$markerdata[[1]][,2], c(0,0,0,0))
expect_equal(genotype(x, 1, 101), c("a", NA_character_))
expect_equal(genotype(x, 1, 102), c("a", NA_character_))
expect_equal(genotype(x, 1, 1), c("b", NA_character_))
expect_equal(genotype(x, 1, 2), c("b", NA_character_))
})
| /tests/testthat/test-marker-accessors.R | no_license | luansheng/pedtools | R | false | false | 5,467 | r | context("marker accessors")
test_that("simple marker getters work", {
x = nuclearPed(1)
m = marker(x, name="m1", chrom=1, posMb=1e7)
x = setMarkers(x, m)
expect_equal(name(m), "m1")
expect_equal(name(x, 1), "m1")
expect_equal(chrom(m), "1")
expect_equal(chrom(x, markers=1), "1")
expect_equal(chrom(x, markers="m1"), "1")
expect_equal(posMb(m), 1e7)
expect_equal(posMb(x, markers=1), 1e7)
expect_equal(posMb(x, markers="m1"), 1e7)
expect_equal(posCm(m), NA_real_)
expect_equal(posCm(x, markers=1), NA_real_)
expect_equal(posCm(x, markers="m1"), NA_real_)
})
test_that("alleles() accessor works", {
x = nuclearPed(1)
als = c("p","e","d")
m1 = marker(x, alleles=1:3, name="m1")
m2 = marker(x, alleles=als, name="m2")
x = setMarkers(x, list(m1,m2))
expect_equal(alleles(m1), as.character(1:3))
expect_equal(alleles(x, marker=1), as.character(1:3))
expect_equal(alleles(x, marker="m1"), as.character(1:3))
expect_equal(alleles(m2), sort(als))
expect_equal(alleles(x, marker=2), sort(als))
expect_equal(alleles(x, marker="m2"), sort(als))
})
test_that("afreq() accessor works", {
x = nuclearPed(1)
afr = c(.2,.3,.5)
m1 = marker(x, name="m1")
m2 = marker(x, alleles=1:3, afreq=afr, name="m2")
m3 = marker(x, alleles=3:1, afreq=afr, name="m3")
x = setMarkers(x, list(m1,m2,m3))
ans1 = c('1'=0.5, '2'=0.5)
expect_equal(afreq(m1), ans1)
expect_equal(afreq(x, marker=1), ans1)
expect_equal(afreq(x, marker="m1"), ans1)
names(afr) = 1:3
expect_equal(afreq(m2), afr)
expect_equal(afreq(x, marker=2), afr)
afr_rev = rev(afr); names(afr_rev) = 1:3
expect_equal(afreq(m3), afr_rev)
expect_equal(afreq(x, marker=3), afr_rev)
})
test_that("afreq replacement works", {
x = nuclearPed(1)
m = marker(x, alleles=c("a", "b"), name="m1")
x = setMarkers(x, list(m))
afr = c(a=.1, b=.9)
afreq(x, "m1") = afr
expect_equal(afreq(x, 1), afr)
afreq(x, 1) = rev(afr)
expect_equal(afreq(x, "m1"), afr)
})
test_that("afreq replacement gives correct error messages", {
x = nuclearPed(1)
m = marker(x, alleles=c("a"), name="m1")
x = setMarkers(x, list(m))
expect_error({afreq(x, "m2") = c(a=1)}, "Unknown marker name: m2")
expect_error({afreq(x, 2) = c(a=1)}, "Marker index out of range: 2")
expect_error({afreq(x, 1:2) = c(a=1)}, "Frequency replacement can only be done for a single marker")
expect_error({afreq(x, "m1") = 1}, "Frequency vector must be named")
expect_error({afreq(x, "m1") = c(b=1)}, "Unknown allele: b")
expect_error({afreq(x, "m1") = c(a=1)[0]}, "Alleles missing from frequency vector: a")
expect_error({afreq(x, "m1") = c(a=0.1)}, "Frequencies must sum to 1")
})
test_that("genotype() works", {
x = nuclearPed(children="boy") # labels are 1,2,boy
m1 = marker(x, name="m1")
m2 = marker(x, boy=1:2, name="m2")
m3 = marker(x, '1'=17.2, name="m3") # homoz for STR allele
x = setMarkers(x, list(m1,m2,m3))
genoNA = c(NA_character_, NA_character_)
expect_equal(genotype(m1, "boy"), genoNA)
expect_equal(genotype(x, marker=1, id="boy"), genoNA)
expect_equal(genotype(x, marker="m1", id="boy"), genoNA)
genoHet = as.character(1:2)
expect_equal(genotype(m2, id="boy"), genoHet)
expect_equal(genotype(x, marker=2, id="boy"), genoHet)
genoSTR = c("17.2", "17.2")
expect_equal(genotype(m3, 1), genoSTR)
expect_equal(genotype(m3, "1"), genoSTR)
expect_equal(genotype(x, marker="m3", id=1), genoSTR)
})
test_that("genotype replacement works", {
x = nuclearPed(father=101, mother=102, children="boy")
m1 = marker(x, name="m1", alleles=1:2)
m2 = marker(x, name="m2", alleles=c("a", "b"))
x = setMarkers(x, list(m1, m2))
genotype(x, 1, id=101) = 2
genotype(x, "m1", "boy") = 2:1
expect_equal(genotype(x, "m1", 101), c("2", "2"))
expect_equal(genotype(x, 1, "boy"), c("2", "1"))
genotype(x, 2, id=101) = 'b'
genotype(x, "m2", "boy") = c('b','a')
expect_equal(genotype(x, "m2", 101), c("b", "b"))
expect_equal(genotype(x, 2, "boy"), c("b", "a"))
})
test_that("genotype replacement gives correct error messages", {
x = nuclearPed(father=101, mother=102, children="boy")
m1 = marker(x, name="m1", alleles=1:2)
x = setMarkers(x, m1)
expect_error({genotype(x, "m2", 101) = 3}, "Unknown marker name: m2")
expect_error({genotype(x, 2, 101) = 3}, "Marker index out of range: 2")
expect_error({genotype(x, 1:2, 101) = 3}, "Genotype replacement can only be done for a single marker")
expect_error({genotype(x, "m1", 100) = 3}, "Unknown ID label: 100")
expect_error({genotype(x, "m1", "girl") = 3}, "Unknown ID label: girl")
expect_error({genotype(x, "m1", 101) = 3}, "Unknown allele for this marker: 3")
expect_error({genotype(x, "m1", 101) = 1:3}, "Length of genotype vector must be 1 or 2")
})
test_that("genotype replacement works with partial genotypes", {
x = nuclearPed(father=101, mother=102, children=1:2)
m1 = marker(x, name="m1", alleles=c('a','b'))
x = setMarkers(x, m1)
genotype(x, "m1", id=101) = c("a", NA)
genotype(x, "m1", id=102) = c("a", "")
genotype(x, "m1", id=1) = c("b", 0)
genotype(x, "m1", id=2) = c("b", "-")
expect_equal(x$markerdata[[1]][,1], c(1,1,2,2))
expect_equal(x$markerdata[[1]][,2], c(0,0,0,0))
expect_equal(genotype(x, 1, 101), c("a", NA_character_))
expect_equal(genotype(x, 1, 102), c("a", NA_character_))
expect_equal(genotype(x, 1, 1), c("b", NA_character_))
expect_equal(genotype(x, 1, 2), c("b", NA_character_))
})
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{heatmap3}
\alias{heatmap3}
\title{heatmap3}
\usage{
heatmap3(x, Rowv = NULL, Colv = if (symm) "Rowv" else NULL,
distfun = function(x) as.dist(1 - cor(t(x), use = "pa")), distfunC,
distfunR, balanceColor = F, ColSideLabs, RowSideLabs, showColDendro = T,
showRowDendro = T, col = colorRampPalette(c("navy", "white",
"firebrick3"))(1024), legendfun, method = "complete", ColAxisColors = 0,
RowAxisColors = 0, hclustfun = hclust, reorderfun = function(d, w)
reorder(d, w), add.expr, symm = FALSE, revC = identical(Colv, "Rowv"),
scale = c("row", "column", "none"), na.rm = TRUE, ColSideFun, ColSideAnn,
ColSideWidth = 0.4, ColSideCut, colorCell, highlightCell,
file = "heatmap3.pdf", topN = NA, filterFun = sd, margins = c(5, 5),
ColSideColors, RowSideColors, cexRow = 0.2 + 1/log10(nrow(x)),
cexCol = 0.2 + 1/log10(ncol(x)), lasRow = 2, lasCol = 2,
labRow = NULL, labCol = NULL, main = NULL, xlab = NULL, ylab = NULL,
keep.dendro = FALSE, verbose = getOption("verbose"), useRaster = if
(ncol(x) * nrow(x) >= 50000) TRUE else FALSE, ...)
}
\arguments{
\item{x}{numeric matrix of the values to be plotted. }
\item{Rowv}{determines if and how the \emph{row} dendrogram should be
computed and reordered. Either a \code{\link{dendrogram}} or a
vector of values used to reorder the row dendrogram or
\code{\link{NA}} to suppress any row dendrogram (and reordering) or
by default, \code{\link{NULL}}, see \sQuote{Details} below.}
\item{Colv}{determines if and how the \emph{column} dendrogram should be
reordered. Has the same options as the \code{Rowv} argument above and
\emph{additionally} when \code{x} is a square matrix, \code{Colv =
"Rowv"} means that columns should be treated identically to the
rows (and so if there is to be no row dendrogram there will not be a
column one either).}
\item{distfun}{function used to compute the distance (dissimilarity)
between both rows and columns. Defaults to \code{\link{dist}}.}
\item{distfunC}{function used to compute the distance (dissimilarity) between and columns. Will be the same as distfun if not specified.}
\item{distfunR}{function used to compute the distance (dissimilarity) between and rows. Will be the same as distfun if not specified.}
\item{balanceColor}{logical indicating if the colors need to be balanced so that the median color will represent the 0 value. The default value is F.}
\item{ColSideLabs}{label for ColSideColors}
\item{RowSideLabs}{label for RowSideColors}
\item{showColDendro}{logical indicating if the coloum dendrogram should be plotted (when Colv isn't NA).}
\item{showRowDendro}{logical indicating if the row dendrogram should be plotted (when Rowv isn't NA).}
\item{col}{specifying the colors, used in \code{\link{image}} function.}
\item{legendfun}{function used to generate legend in top left of the figure. If not specified, the color bar will be plotted. The users can use any plot functions to generate their own legend. Or a function \code{\link{showLegend}} is also provided as a example.}
\item{method}{the agglomeration method to be used by \code{\link{hclust}} function. This should be (an unambiguous abbreviation of) one of "ward", "single", "complete", "average", "mcquitty", "median" or "centroid".}
\item{ColAxisColors}{integer indicating which coloum of ColSideColors will be used as colors for labels in coloum axis. The default value is 0, which means all coloum labels will be in black color.}
\item{RowAxisColors}{integer indicating which coloum of RowSideColors will be used as colors for labels in row axis. The default value is 0, which means all row labels will be in black color.}
\item{hclustfun}{function used to compute the hierarchical clustering
when \code{Rowv} or \code{Colv} are not dendrograms. Defaults to
\code{\link{hclust}}. Should take as argument a result of \code{distfun}
and return an object to which \code{\link{as.dendrogram}} can be applied.}
\item{reorderfun}{\code{function(d, w)} of dendrogram and weights for
reordering the row and column dendrograms. The default uses
\code{\link{reorder.dendrogram}}.}
\item{add.expr}{expression that will be evaluated after the call to
\code{image}. Can be used to add components to the plot.}
\item{symm}{logical indicating if \code{x} should be treated
\bold{symm}etrically; can only be true when \code{x} is a square matrix.}
\item{revC}{logical indicating if the column order should be
\code{\link{rev}}ersed for plotting, such that e.g., for the
symmetric case, the symmetry axis is as usual.}
\item{scale}{character indicating if the values should be centered and
scaled in either the row direction or the column direction, or
none. The default is \code{"row"} if \code{symm} false, and
\code{"none"} otherwise.}
\item{na.rm}{logical indicating whether \code{NA}'s should be removed.}
\item{ColSideFun}{function used to generate annotation and labeling figure in column side. The users can use any plot functions to generate their own figure. And a function \code{\link{showAnn}} is also provided as a example.}
\item{ColSideAnn}{data frame with continuous and factor variables as annotation information. This parameter will be sorted by coloum dendrogram and then passed to ColSideFun.}
\item{ColSideWidth}{numeric the height of column side area, which can be used by ColSideFun function.}
\item{ColSideCut}{numeric the value to be used in cutting coloum dendrogram. The dendrogram and annotation will be divided into different parts and labeled respectively.}
\item{colorCell}{A data.frame with 3 columns, indicating which cells will be colored by specific colors. The first column is row index, second column is column index, and the third column is color.}
\item{highlightCell}{A data.frame with 3 or 4 columns, indicating which cells will be highlighted by rectangles with specific colors. The first column is row index, second column is column index, the third column is color for rectangle border, and the optional forth column is width for rectangle border.}
\item{file}{pdf file name, only works when topN was used.}
\item{topN}{vector a list of numbers. topN genes will be used to generate the heatmaps.}
\item{filterFun}{function used to filter genes, such as sd, mean, sum. It will be used in a apply function to caculate for each row.}
\item{margins}{numeric vector of length 2 containing the margins
(see \code{\link{par}(mar = *)}) for column and row names, respectively.}
\item{ColSideColors}{(optional) character vector of length \code{ncol(x)}
containing the color names for a horizontal side bar that may be used to
annotate the columns of \code{x}.}
\item{RowSideColors}{(optional) character vector of length \code{nrow(x)}
containing the color names for a vertical side bar that may be used to
annotate the rows of \code{x}.}
\item{cexRow,cexCol}{positive numbers, used as cex.axis in for the row or column axis labeling. The defaults currently only use number of rows or columns, respectively.}
\item{lasRow,lasCol}{the style of row or column axis labels.}
\item{labRow,labCol}{character vectors with row and column labels to use; these default to rownames(x) or colnames(x), respectively.}
\item{main,xlab,ylab}{main, x- and y-axis titles; defaults to none.}
\item{keep.dendro}{logical indicating if the dendrogram(s) should be
kept as part of the result (when \code{Rowv} and/or \code{Colv} are
not NA).}
\item{verbose}{logical indicating if information should be printed.}
\item{useRaster}{logical; if TRUE a bitmap raster is used to plot the image instead of polygons. The grid must be regular in that case, otherwise an error is raised.}
\item{...}{additional arguments passed on to \code{\link{image}}.}
}
\value{
The same return value as \code{\link{hclust}} function.
}
\description{
The function heatmap3 is completely compatible with the original R function heatmap, and provides more new features.
}
\examples{
#gererate data
set.seed(123456789)
rnormData<-matrix(rnorm(1000), 40, 25)
rnormData[1:15, seq(6, 25, 2)] = rnormData[1:15, seq(6, 25, 2)] + 2
rnormData[16:40, seq(7, 25, 2)] = rnormData[16:40, seq(7, 25, 2)] + 4
colnames(rnormData)<-c(paste("Control", 1:5, sep = ""), paste(c("TrtA", "TrtB"),
rep(1:10,each=2), sep = ""))
rownames(rnormData)<-paste("Probe", 1:40, sep = "")
ColSideColors<-cbind(Group1=c(rep("steelblue2",5), rep(c("brown1", "mediumpurple2"),10)),
Group2=sample(c("steelblue2","brown1", "mediumpurple2"),25,replace=TRUE))
colorCell<-data.frame(row=c(1,3,5),col=c(2,4,6),color=c("green4","black","orange2"),
stringsAsFactors=FALSE)
highlightCell<-data.frame(row=c(2,4,6),col=c(1,3,5),color=c("black","green4","orange2"),
lwd=1:3,stringsAsFactors=FALSE)
#A simple example
heatmap3(rnormData,ColSideColors=ColSideColors,showRowDendro=FALSE,colorCell=colorCell,
highlightCell=highlightCell)
#A more detail example
ColSideAnn<-data.frame(Information=rnorm(25),Group=c(rep("Control",5), rep(c("TrtA", "TrtB"),10)))
row.names(ColSideAnn)<-colnames(rnormData)
RowSideColors<-colorRampPalette(c("chartreuse4", "white", "firebrick"))(40)
result<-heatmap3(rnormData,ColSideCut=1.2,ColSideAnn=ColSideAnn,ColSideFun=function(x)
showAnn(x),ColSideWidth=0.8,RowSideColors=RowSideColors,col=colorRampPalette(c("green",
"black", "red"))(1024),RowAxisColors=1,legendfun=function() showLegend(legend=c("Low",
"High"),col=c("chartreuse4","firebrick")),verbose=TRUE)
#annotations distribution in different clusters and the result of statistic tests
result$cutTable
}
| /man/heatmap3.Rd | no_license | Maddocent/heatmap3 | R | false | false | 9,820 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{heatmap3}
\alias{heatmap3}
\title{heatmap3}
\usage{
heatmap3(x, Rowv = NULL, Colv = if (symm) "Rowv" else NULL,
distfun = function(x) as.dist(1 - cor(t(x), use = "pa")), distfunC,
distfunR, balanceColor = F, ColSideLabs, RowSideLabs, showColDendro = T,
showRowDendro = T, col = colorRampPalette(c("navy", "white",
"firebrick3"))(1024), legendfun, method = "complete", ColAxisColors = 0,
RowAxisColors = 0, hclustfun = hclust, reorderfun = function(d, w)
reorder(d, w), add.expr, symm = FALSE, revC = identical(Colv, "Rowv"),
scale = c("row", "column", "none"), na.rm = TRUE, ColSideFun, ColSideAnn,
ColSideWidth = 0.4, ColSideCut, colorCell, highlightCell,
file = "heatmap3.pdf", topN = NA, filterFun = sd, margins = c(5, 5),
ColSideColors, RowSideColors, cexRow = 0.2 + 1/log10(nrow(x)),
cexCol = 0.2 + 1/log10(ncol(x)), lasRow = 2, lasCol = 2,
labRow = NULL, labCol = NULL, main = NULL, xlab = NULL, ylab = NULL,
keep.dendro = FALSE, verbose = getOption("verbose"), useRaster = if
(ncol(x) * nrow(x) >= 50000) TRUE else FALSE, ...)
}
\arguments{
\item{x}{numeric matrix of the values to be plotted. }
\item{Rowv}{determines if and how the \emph{row} dendrogram should be
computed and reordered. Either a \code{\link{dendrogram}} or a
vector of values used to reorder the row dendrogram or
\code{\link{NA}} to suppress any row dendrogram (and reordering) or
by default, \code{\link{NULL}}, see \sQuote{Details} below.}
\item{Colv}{determines if and how the \emph{column} dendrogram should be
reordered. Has the same options as the \code{Rowv} argument above and
\emph{additionally} when \code{x} is a square matrix, \code{Colv =
"Rowv"} means that columns should be treated identically to the
rows (and so if there is to be no row dendrogram there will not be a
column one either).}
\item{distfun}{function used to compute the distance (dissimilarity)
between both rows and columns. Defaults to \code{\link{dist}}.}
\item{distfunC}{function used to compute the distance (dissimilarity) between and columns. Will be the same as distfun if not specified.}
\item{distfunR}{function used to compute the distance (dissimilarity) between and rows. Will be the same as distfun if not specified.}
\item{balanceColor}{logical indicating if the colors need to be balanced so that the median color will represent the 0 value. The default value is F.}
\item{ColSideLabs}{label for ColSideColors}
\item{RowSideLabs}{label for RowSideColors}
\item{showColDendro}{logical indicating if the coloum dendrogram should be plotted (when Colv isn't NA).}
\item{showRowDendro}{logical indicating if the row dendrogram should be plotted (when Rowv isn't NA).}
\item{col}{specifying the colors, used in \code{\link{image}} function.}
\item{legendfun}{function used to generate legend in top left of the figure. If not specified, the color bar will be plotted. The users can use any plot functions to generate their own legend. Or a function \code{\link{showLegend}} is also provided as a example.}
\item{method}{the agglomeration method to be used by \code{\link{hclust}} function. This should be (an unambiguous abbreviation of) one of "ward", "single", "complete", "average", "mcquitty", "median" or "centroid".}
\item{ColAxisColors}{integer indicating which coloum of ColSideColors will be used as colors for labels in coloum axis. The default value is 0, which means all coloum labels will be in black color.}
\item{RowAxisColors}{integer indicating which coloum of RowSideColors will be used as colors for labels in row axis. The default value is 0, which means all row labels will be in black color.}
\item{hclustfun}{function used to compute the hierarchical clustering
when \code{Rowv} or \code{Colv} are not dendrograms. Defaults to
\code{\link{hclust}}. Should take as argument a result of \code{distfun}
and return an object to which \code{\link{as.dendrogram}} can be applied.}
\item{reorderfun}{\code{function(d, w)} of dendrogram and weights for
reordering the row and column dendrograms. The default uses
\code{\link{reorder.dendrogram}}.}
\item{add.expr}{expression that will be evaluated after the call to
\code{image}. Can be used to add components to the plot.}
\item{symm}{logical indicating if \code{x} should be treated
\bold{symm}etrically; can only be true when \code{x} is a square matrix.}
\item{revC}{logical indicating if the column order should be
\code{\link{rev}}ersed for plotting, such that e.g., for the
symmetric case, the symmetry axis is as usual.}
\item{scale}{character indicating if the values should be centered and
scaled in either the row direction or the column direction, or
none. The default is \code{"row"} if \code{symm} false, and
\code{"none"} otherwise.}
\item{na.rm}{logical indicating whether \code{NA}'s should be removed.}
\item{ColSideFun}{function used to generate annotation and labeling figure in column side. The users can use any plot functions to generate their own figure. And a function \code{\link{showAnn}} is also provided as a example.}
\item{ColSideAnn}{data frame with continuous and factor variables as annotation information. This parameter will be sorted by coloum dendrogram and then passed to ColSideFun.}
\item{ColSideWidth}{numeric the height of column side area, which can be used by ColSideFun function.}
\item{ColSideCut}{numeric the value to be used in cutting coloum dendrogram. The dendrogram and annotation will be divided into different parts and labeled respectively.}
\item{colorCell}{A data.frame with 3 columns, indicating which cells will be colored by specific colors. The first column is row index, second column is column index, and the third column is color.}
\item{highlightCell}{A data.frame with 3 or 4 columns, indicating which cells will be highlighted by rectangles with specific colors. The first column is row index, second column is column index, the third column is color for rectangle border, and the optional forth column is width for rectangle border.}
\item{file}{pdf file name, only works when topN was used.}
\item{topN}{vector a list of numbers. topN genes will be used to generate the heatmaps.}
\item{filterFun}{function used to filter genes, such as sd, mean, sum. It will be used in a apply function to caculate for each row.}
\item{margins}{numeric vector of length 2 containing the margins
(see \code{\link{par}(mar = *)}) for column and row names, respectively.}
\item{ColSideColors}{(optional) character vector of length \code{ncol(x)}
containing the color names for a horizontal side bar that may be used to
annotate the columns of \code{x}.}
\item{RowSideColors}{(optional) character vector of length \code{nrow(x)}
containing the color names for a vertical side bar that may be used to
annotate the rows of \code{x}.}
\item{cexRow,cexCol}{positive numbers, used as cex.axis in for the row or column axis labeling. The defaults currently only use number of rows or columns, respectively.}
\item{lasRow,lasCol}{the style of row or column axis labels.}
\item{labRow,labCol}{character vectors with row and column labels to use; these default to rownames(x) or colnames(x), respectively.}
\item{main,xlab,ylab}{main, x- and y-axis titles; defaults to none.}
\item{keep.dendro}{logical indicating if the dendrogram(s) should be
kept as part of the result (when \code{Rowv} and/or \code{Colv} are
not NA).}
\item{verbose}{logical indicating if information should be printed.}
\item{useRaster}{logical; if TRUE a bitmap raster is used to plot the image instead of polygons. The grid must be regular in that case, otherwise an error is raised.}
\item{...}{additional arguments passed on to \code{\link{image}}.}
}
\value{
The same return value as \code{\link{hclust}} function.
}
\description{
The function heatmap3 is completely compatible with the original R function heatmap, and provides more new features.
}
\examples{
#gererate data
set.seed(123456789)
rnormData<-matrix(rnorm(1000), 40, 25)
rnormData[1:15, seq(6, 25, 2)] = rnormData[1:15, seq(6, 25, 2)] + 2
rnormData[16:40, seq(7, 25, 2)] = rnormData[16:40, seq(7, 25, 2)] + 4
colnames(rnormData)<-c(paste("Control", 1:5, sep = ""), paste(c("TrtA", "TrtB"),
rep(1:10,each=2), sep = ""))
rownames(rnormData)<-paste("Probe", 1:40, sep = "")
ColSideColors<-cbind(Group1=c(rep("steelblue2",5), rep(c("brown1", "mediumpurple2"),10)),
Group2=sample(c("steelblue2","brown1", "mediumpurple2"),25,replace=TRUE))
colorCell<-data.frame(row=c(1,3,5),col=c(2,4,6),color=c("green4","black","orange2"),
stringsAsFactors=FALSE)
highlightCell<-data.frame(row=c(2,4,6),col=c(1,3,5),color=c("black","green4","orange2"),
lwd=1:3,stringsAsFactors=FALSE)
#A simple example
heatmap3(rnormData,ColSideColors=ColSideColors,showRowDendro=FALSE,colorCell=colorCell,
highlightCell=highlightCell)
#A more detail example
ColSideAnn<-data.frame(Information=rnorm(25),Group=c(rep("Control",5), rep(c("TrtA", "TrtB"),10)))
row.names(ColSideAnn)<-colnames(rnormData)
RowSideColors<-colorRampPalette(c("chartreuse4", "white", "firebrick"))(40)
result<-heatmap3(rnormData,ColSideCut=1.2,ColSideAnn=ColSideAnn,ColSideFun=function(x)
showAnn(x),ColSideWidth=0.8,RowSideColors=RowSideColors,col=colorRampPalette(c("green",
"black", "red"))(1024),RowAxisColors=1,legendfun=function() showLegend(legend=c("Low",
"High"),col=c("chartreuse4","firebrick")),verbose=TRUE)
#annotations distribution in different clusters and the result of statistic tests
result$cutTable
}
|
/library(caret)&๋ฐ์ดํฐ๋ถํ .R | no_license | Thing-ji/R.Library-Datamining | R | false | false | 374 | r | ||
testlist <- list(x1 = numeric(0), x2 = numeric(0), y1 = c(5.38037779566735e-202, 2.8177692137361e-202, 2.81776900841821e-202, 2.81024070102441e-202, 2.81776900886378e-202, 3.06530172927542e-304, 9.88131291682493e-324, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) | /palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968536-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 492 | r | testlist <- list(x1 = numeric(0), x2 = numeric(0), y1 = c(5.38037779566735e-202, 2.8177692137361e-202, 2.81776900841821e-202, 2.81024070102441e-202, 2.81776900886378e-202, 3.06530172927542e-304, 9.88131291682493e-324, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result) |
power_median<-function(n,delta){
1-pnorm(155.4047, mean=64*n*(1/4)*(delta^2), sqrt(64*4*n*(1/4)*(delta^2)))
}
par(mfrow=c(3,1))
plot(power_median(1:1000,0.2), type='l')
plot(power_median(1:1000,0.5), type='l')
plot(power_median(1:1000,0.8), type='l')
s<-c()
Power<-c()
power<-function(n,delta,M){
for(j in 1:length(M)){
for(i in 1:M[j]){
s[i]<-n[i]*1/4*delta^2
}
Power[j]<-1-pnorm(155.4047,1/2*sum(s),sqrt(sum(s)))+pnorm(-155.4047,1/2*sum(s),sqrt(sum(s)))
}
return(Power)
}
n<-rep(76,64)
M<-c(1,2,3,4,5,10,32,64) #vector of number of non-null effects
power(n,0.2)
power(n,0.5)
power(n,0.8)
x1<-power(large_n_used[,1],0.2,M)
x2<-power(large_n_used[,2],0.5,M)
x3<-power(large_n_used[,3],0.8,M)
| /Fishers_method/asympotic power.R | no_license | j3schaue/replication_metrics | R | false | false | 719 | r | power_median<-function(n,delta){
1-pnorm(155.4047, mean=64*n*(1/4)*(delta^2), sqrt(64*4*n*(1/4)*(delta^2)))
}
par(mfrow=c(3,1))
plot(power_median(1:1000,0.2), type='l')
plot(power_median(1:1000,0.5), type='l')
plot(power_median(1:1000,0.8), type='l')
s<-c()
Power<-c()
power<-function(n,delta,M){
for(j in 1:length(M)){
for(i in 1:M[j]){
s[i]<-n[i]*1/4*delta^2
}
Power[j]<-1-pnorm(155.4047,1/2*sum(s),sqrt(sum(s)))+pnorm(-155.4047,1/2*sum(s),sqrt(sum(s)))
}
return(Power)
}
n<-rep(76,64)
M<-c(1,2,3,4,5,10,32,64) #vector of number of non-null effects
power(n,0.2)
power(n,0.5)
power(n,0.8)
x1<-power(large_n_used[,1],0.2,M)
x2<-power(large_n_used[,2],0.5,M)
x3<-power(large_n_used[,3],0.8,M)
|
#' Draw a banner-like box in the console
#'
#' @details
#'
#' ## Defaults
#'
#' ```{asciicast box-default}
#' boxx("Hello there!")
#' ```
#'
#' ## Change border style
#'
#' ```{asciicast box-border}
#' boxx("Hello there!", border_style = "double")
#' ```
#'
#' ## Multiple lines
#'
#' ```{asciicast box-lines}
#' boxx(c("Hello", "there!"), padding = 1)
#' ```
#'
#' ## Padding
#'
#' ```{asciicast box-padding}
#' boxx("Hello there!", padding = 1)
#' boxx("Hello there!", padding = c(1, 5, 1, 5))
#' ```
#'
#' ## Floating
#'
#' ```{asciicast box-float}
#' boxx("Hello there!", padding = 1, float = "center")
#' boxx("Hello there!", padding = 1, float = "right")
#' ```
#'
#' ## Text color
#'
#' ```{asciicast box-text-color}
#' boxx(col_cyan("Hello there!"), padding = 1, float = "center")
#' ```
#'
#' ## Backgorund color
#'
#' ```{asciicast box-bg-color}
#' boxx("Hello there!", padding = 1, background_col = "brown")
#' boxx("Hello there!", padding = 1, background_col = bg_red)
#' ```
#'
#' ## Border color
#'
#' ```{asciicast box-border-color}
#' boxx("Hello there!", padding = 1, border_col = "green")
#' boxx("Hello there!", padding = 1, border_col = col_red)
#' ```
#'
#' ## Label alignment
#'
#' ```{asciicast box-label-align}
#' boxx(c("Hi", "there", "you!"), padding = 1, align = "left")
#' boxx(c("Hi", "there", "you!"), padding = 1, align = "center")
#' boxx(c("Hi", "there", "you!"), padding = 1, align = "right")
#' ```
#'
#' ## A very customized box
#'
#' ```{asciicast box-custom}
#' star <- symbol$star
#' label <- c(paste(star, "Hello", star), " there!")
#' boxx(
#' col_white(label),
#' border_style="round",
#' padding = 1,
#' float = "center",
#' border_col = "tomato3",
#' background_col="darkolivegreen"
#' )
#' ```
#'
#' @param label Label to show, a character vector. Each element will be
#' in a new line. You can color it using the `col_*`, `bg_*` and
#' `style_*` functions, see [ansi-styles] and the examples below.
#' @param header Text to show on top border of the box. If too long,
#' it will be cut.
#' @param footer Text to show on the bottom border of the box. If too long,
#' it will be cut.
#' @param border_style String that specifies the border style.
#' `list_border_styles` lists all current styles.
#' @param padding Padding within the box. Either an integer vector of
#' four numbers (bottom, left, top, right), or a single number `x`, which
#' is interpreted as `c(x, 3*x, x, 3*x)`.
#' @param margin Margin around the box. Either an integer vector of four
#' numbers (bottom, left, top, right), or a single number `x`, which is
#' interpreted as `c(x, 3*x, x, 3*x)`.
#' @param float Whether to display the box on the `"left"`, `"center"`, or
#' the `"right"` of the screen.
#' @param background_col Background color of the inside of the box.
#' Either a style function (see [ansi-styles]), or a color name which
#' will be used in [make_ansi_style()] to create a *background* style
#' (i.e. `bg = TRUE` is used).
#' @param col Color of text, and default border color. Either a style
#' function (see [ansi-styles]) or a color name that is passed to
#' [make_ansi_style()].
#' @param border_col Color of the border. Either a style function
#' (see [ansi-styles]) or a color name that is passed to
#' [make_ansi_style()].
#' @param align Alignment of the label within the box: `"left"`,
#' `"center"`, or `"right"`.
#' @param width Width of the screen, defaults to [console_width()].
#'
#' @section About fonts and terminal settings:
#' The boxes might or might not look great in your terminal, depending
#' on the box style you use and the font the terminal uses. We found that
#' the Menlo font looks nice in most terminals an also in Emacs.
#'
#' RStudio currently has a line height greater than one for console output,
#' which makes the boxes ugly.
#'
#' @export
boxx <- function(label, header = "", footer = "",
border_style = "single", padding = 1, margin = 0,
float = c("left", "center", "right"),
col = NULL, background_col = NULL, border_col = col,
align = c("left", "center", "right"),
width = console_width()) {
label <- apply_style(as.character(label), col)
widest <- max(ansi_nchar(label, "width"), 0)
stopifnot(
is_border_style(border_style),
is_padding_or_margin(padding),
is_padding_or_margin(margin)
)
float <- match.arg(float)
align <- match.arg(align)
if (length(padding) == 1) {
padding <- c(padding, padding * 3, padding, padding * 3)
}
if (length(margin) == 1) {
margin <- c(margin, margin * 3, margin, margin * 3)
}
label <- ansi_align(label, align = align, width = widest)
content_width <- widest + padding[2] + padding[4]
mar_left <- if (float == "center") {
make_space((width - content_width) / 2)
} else if (float == "right") {
make_space(max(width - content_width - 2, 0))
} else {
make_space(margin[2])
}
color_border <- function(x) apply_style(x, border_col)
color_content <- function(x) apply_style(x, background_col, bg = TRUE)
label <- c(rep("", padding[3]), label, rep("", padding[1]))
chars <- box_styles()[border_style, ]
pad_left <- make_space(padding[2])
pad_right <- make_space(
content_width - ansi_nchar(label, "width") - padding[2]
)
if (header != "") {
header <- paste0(" ", ansi_strtrim(header, content_width - 2), " ")
}
hdw <- ansi_nchar(header, "width")
if (footer != "") {
footer <- paste0(" ", ansi_strtrim(footer, content_width - 2), " ")
}
ftw <- ansi_nchar(footer, "width")
hdline <- paste0(header, strrep(chars$horizontal, content_width - hdw))
top <- color_border(paste0(
strrep("\n", margin[3]),
mar_left, chars$top_left, hdline, chars$top_right
))
ftline <- paste0(strrep(chars$horizontal, content_width - ftw), footer)
bottom <- color_border(paste0(
mar_left, chars$bottom_left, ftline, chars$bottom_right,
strrep("\n", margin[1])
))
side <- color_border(chars$vertical)
middle <- paste0(mar_left, side,
color_content(paste0(pad_left, label, pad_right)), side)
box <- paste0(top, "\n", paste0(middle, collapse = "\n"), "\n", bottom)
class(box) <- unique(c("boxx", class(box), "character"))
box
}
methods::setOldClass(c("boxx", "character"))
#' @export
print.boxx <- function(x, ..., sep = "\n") {
cat(x, ..., sep = sep)
invisible(x)
}
| /R/boxes.R | permissive | isabella232/cli-12 | R | false | false | 6,452 | r |
#' Draw a banner-like box in the console
#'
#' @details
#'
#' ## Defaults
#'
#' ```{asciicast box-default}
#' boxx("Hello there!")
#' ```
#'
#' ## Change border style
#'
#' ```{asciicast box-border}
#' boxx("Hello there!", border_style = "double")
#' ```
#'
#' ## Multiple lines
#'
#' ```{asciicast box-lines}
#' boxx(c("Hello", "there!"), padding = 1)
#' ```
#'
#' ## Padding
#'
#' ```{asciicast box-padding}
#' boxx("Hello there!", padding = 1)
#' boxx("Hello there!", padding = c(1, 5, 1, 5))
#' ```
#'
#' ## Floating
#'
#' ```{asciicast box-float}
#' boxx("Hello there!", padding = 1, float = "center")
#' boxx("Hello there!", padding = 1, float = "right")
#' ```
#'
#' ## Text color
#'
#' ```{asciicast box-text-color}
#' boxx(col_cyan("Hello there!"), padding = 1, float = "center")
#' ```
#'
#' ## Backgorund color
#'
#' ```{asciicast box-bg-color}
#' boxx("Hello there!", padding = 1, background_col = "brown")
#' boxx("Hello there!", padding = 1, background_col = bg_red)
#' ```
#'
#' ## Border color
#'
#' ```{asciicast box-border-color}
#' boxx("Hello there!", padding = 1, border_col = "green")
#' boxx("Hello there!", padding = 1, border_col = col_red)
#' ```
#'
#' ## Label alignment
#'
#' ```{asciicast box-label-align}
#' boxx(c("Hi", "there", "you!"), padding = 1, align = "left")
#' boxx(c("Hi", "there", "you!"), padding = 1, align = "center")
#' boxx(c("Hi", "there", "you!"), padding = 1, align = "right")
#' ```
#'
#' ## A very customized box
#'
#' ```{asciicast box-custom}
#' star <- symbol$star
#' label <- c(paste(star, "Hello", star), " there!")
#' boxx(
#' col_white(label),
#' border_style="round",
#' padding = 1,
#' float = "center",
#' border_col = "tomato3",
#' background_col="darkolivegreen"
#' )
#' ```
#'
#' @param label Label to show, a character vector. Each element will be
#' in a new line. You can color it using the `col_*`, `bg_*` and
#' `style_*` functions, see [ansi-styles] and the examples below.
#' @param header Text to show on top border of the box. If too long,
#' it will be cut.
#' @param footer Text to show on the bottom border of the box. If too long,
#' it will be cut.
#' @param border_style String that specifies the border style.
#' `list_border_styles` lists all current styles.
#' @param padding Padding within the box. Either an integer vector of
#' four numbers (bottom, left, top, right), or a single number `x`, which
#' is interpreted as `c(x, 3*x, x, 3*x)`.
#' @param margin Margin around the box. Either an integer vector of four
#' numbers (bottom, left, top, right), or a single number `x`, which is
#' interpreted as `c(x, 3*x, x, 3*x)`.
#' @param float Whether to display the box on the `"left"`, `"center"`, or
#' the `"right"` of the screen.
#' @param background_col Background color of the inside of the box.
#' Either a style function (see [ansi-styles]), or a color name which
#' will be used in [make_ansi_style()] to create a *background* style
#' (i.e. `bg = TRUE` is used).
#' @param col Color of text, and default border color. Either a style
#' function (see [ansi-styles]) or a color name that is passed to
#' [make_ansi_style()].
#' @param border_col Color of the border. Either a style function
#' (see [ansi-styles]) or a color name that is passed to
#' [make_ansi_style()].
#' @param align Alignment of the label within the box: `"left"`,
#' `"center"`, or `"right"`.
#' @param width Width of the screen, defaults to [console_width()].
#'
#' @section About fonts and terminal settings:
#' The boxes might or might not look great in your terminal, depending
#' on the box style you use and the font the terminal uses. We found that
#' the Menlo font looks nice in most terminals an also in Emacs.
#'
#' RStudio currently has a line height greater than one for console output,
#' which makes the boxes ugly.
#'
#' @export
boxx <- function(label, header = "", footer = "",
border_style = "single", padding = 1, margin = 0,
float = c("left", "center", "right"),
col = NULL, background_col = NULL, border_col = col,
align = c("left", "center", "right"),
width = console_width()) {
label <- apply_style(as.character(label), col)
widest <- max(ansi_nchar(label, "width"), 0)
stopifnot(
is_border_style(border_style),
is_padding_or_margin(padding),
is_padding_or_margin(margin)
)
float <- match.arg(float)
align <- match.arg(align)
if (length(padding) == 1) {
padding <- c(padding, padding * 3, padding, padding * 3)
}
if (length(margin) == 1) {
margin <- c(margin, margin * 3, margin, margin * 3)
}
label <- ansi_align(label, align = align, width = widest)
content_width <- widest + padding[2] + padding[4]
mar_left <- if (float == "center") {
make_space((width - content_width) / 2)
} else if (float == "right") {
make_space(max(width - content_width - 2, 0))
} else {
make_space(margin[2])
}
color_border <- function(x) apply_style(x, border_col)
color_content <- function(x) apply_style(x, background_col, bg = TRUE)
label <- c(rep("", padding[3]), label, rep("", padding[1]))
chars <- box_styles()[border_style, ]
pad_left <- make_space(padding[2])
pad_right <- make_space(
content_width - ansi_nchar(label, "width") - padding[2]
)
if (header != "") {
header <- paste0(" ", ansi_strtrim(header, content_width - 2), " ")
}
hdw <- ansi_nchar(header, "width")
if (footer != "") {
footer <- paste0(" ", ansi_strtrim(footer, content_width - 2), " ")
}
ftw <- ansi_nchar(footer, "width")
hdline <- paste0(header, strrep(chars$horizontal, content_width - hdw))
top <- color_border(paste0(
strrep("\n", margin[3]),
mar_left, chars$top_left, hdline, chars$top_right
))
ftline <- paste0(strrep(chars$horizontal, content_width - ftw), footer)
bottom <- color_border(paste0(
mar_left, chars$bottom_left, ftline, chars$bottom_right,
strrep("\n", margin[1])
))
side <- color_border(chars$vertical)
middle <- paste0(mar_left, side,
color_content(paste0(pad_left, label, pad_right)), side)
box <- paste0(top, "\n", paste0(middle, collapse = "\n"), "\n", bottom)
class(box) <- unique(c("boxx", class(box), "character"))
box
}
methods::setOldClass(c("boxx", "character"))
#' @export
print.boxx <- function(x, ..., sep = "\n") {
cat(x, ..., sep = sep)
invisible(x)
}
|
# Can't run with devtools_shims b/c need to access package files
if ("devtools_shims" %in% search()) {
skipping <- TRUE
} else {
skipping <- FALSE
message("Running tests in test-wflow_update.R")
if (!file.exists("test-wflow_update.R"))
stop("Tests must be manually run in same working directory as this file.",
call. = FALSE)
library("testthat")
library("workflowr")
}
context("wflow_update")
# Test wflow_update ------------------------------------------------------------
test_that("wflow_update can update from v0.3.0 to v0.4.0 with no Git", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# dry_run = TRUE
expect_message(files_updated <- wflow_update(log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update in dry run mode")
files_expected <- c(file.path(tmp_dir_v0.3.0, "v0.3.0.Rproj"),
file.path(tmp_dir_v0.3.0, "analysis",
c("chunks.R", "ex1.Rmd", "ex2.Rmd")))
files_expected <- workflowr:::relative(files_expected)
expect_identical(sort(files_updated), sort(files_expected))
# dry_run = FALSE
expect_message(wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
files_expected <- list.files("files/test-wflow_update/v0.4.0/",
full.names = TRUE, recursive = TRUE)
files_actual <- list.files(tmp_dir_v0.3.0,
full.names = TRUE, recursive = TRUE)
files_expected_lines <- unlist(Map(readLines, files_expected))
files_actual_lines <- unlist(Map(readLines, files_actual))
expect_true(all(files_actual_lines == files_expected_lines))
# The tests below pass locally but fail on AppVeyor. The problem is due to
# difference in line endings, and I wasn't able to determine why the line
# endings are different on AppVeyor versus my local Windows 10 machine.
skip_on_appveyor()
files_expected_md5 <- tools::md5sum(files_expected)
files_actual_md5 <- tools::md5sum(files_actual)
expect_true(all(files_actual_md5 == files_expected_md5))
})
test_that("wflow_update can update from v0.3.0 to v0.4.0 with Git", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and commit everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
git2r::commit(r, "commit project")
commit_last <- git2r::commits(r)[[1]]
# dry_run = TRUE
expect_message(files_updated <- wflow_update(log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update in dry run mode")
expect_identical(git2r::commits(r)[[1]], commit_last)
# dry_run = FALSE
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update <- git2r::commits(r)[[1]]
expect_identical(commit_update@message,
sprintf("Update workflowr project with wflow_update (version %s).",
as.character(utils::packageVersion("workflowr"))))
files_expected <- workflowr:::obtain_files_in_commit(r, commit_update)
files_expected <- file.path(tmp_dir_v0.3.0, files_expected)
files_expected <- workflowr:::relative(files_expected)
expect_false(commit_update@sha == commit_last@sha)
expect_identical(sort(files_updated),
sort(files_expected))
})
test_that("wflow_update ignores Rmd files starting with _", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Add an Rmd file starting with an underscore
rmd_ignore <- file.path(tmp_dir_v0.3.0, "analysis", "_ignore.Rmd")
file.create(rmd_ignore)
rmd_ignore <- workflowr:::relative(rmd_ignore)
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
expect_true(length(files_updated) > 0)
expect_false(rmd_ignore %in% files_updated)
})
test_that("wflow_update only commits tracked files", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and commit everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
git2r::commit(r, "commit project")
# Create an untracked Rmd file
rmd_untracked <- file.path(tmp_dir_v0.3.0, "analysis", "untracked.Rmd")
file.copy(file.path(tmp_dir_v0.3.0, "analysis", "ex1.Rmd"),
rmd_untracked)
rmd_untracked <- workflowr:::relative(rmd_untracked)
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update <- git2r::commits(r)[[1]]
files_committed <- workflowr:::obtain_files_in_commit(r, commit_update)
files_committed <- file.path(tmp_dir_v0.3.0, files_committed)
files_committed <- workflowr:::relative(files_committed)
expect_true(rmd_untracked %in% files_updated)
expect_false(rmd_untracked %in% files_committed)
})
test_that("wflow_update does nothing if everything is up-to-date", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and commit everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
git2r::commit(r, "commit project")
commit_last <- git2r::commits(r)[[1]]
# Update
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update <- git2r::commits(r)[[1]]
expect_false(commit_update@sha == commit_last@sha)
# Run a second time
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update_2 <- git2r::commits(r)[[1]]
expect_true(commit_update@sha == commit_update_2@sha)
expect_true(length(files_updated) == 0)
})
# Test error handling ----------------------------------------------------------
test_that("wflow_update fails early if files in staging area", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and add everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
# Expect early error
expect_error(wflow_update(log_open = FALSE, project = tmp_dir_v0.3.0),
"You have added files to the Git staging area.")
})
| /tests/testthat/test-wflow_update.R | permissive | jdblischak/workflowrBeta | R | false | false | 9,123 | r | # Can't run with devtools_shims b/c need to access package files
if ("devtools_shims" %in% search()) {
skipping <- TRUE
} else {
skipping <- FALSE
message("Running tests in test-wflow_update.R")
if (!file.exists("test-wflow_update.R"))
stop("Tests must be manually run in same working directory as this file.",
call. = FALSE)
library("testthat")
library("workflowr")
}
context("wflow_update")
# Test wflow_update ------------------------------------------------------------
test_that("wflow_update can update from v0.3.0 to v0.4.0 with no Git", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# dry_run = TRUE
expect_message(files_updated <- wflow_update(log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update in dry run mode")
files_expected <- c(file.path(tmp_dir_v0.3.0, "v0.3.0.Rproj"),
file.path(tmp_dir_v0.3.0, "analysis",
c("chunks.R", "ex1.Rmd", "ex2.Rmd")))
files_expected <- workflowr:::relative(files_expected)
expect_identical(sort(files_updated), sort(files_expected))
# dry_run = FALSE
expect_message(wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
files_expected <- list.files("files/test-wflow_update/v0.4.0/",
full.names = TRUE, recursive = TRUE)
files_actual <- list.files(tmp_dir_v0.3.0,
full.names = TRUE, recursive = TRUE)
files_expected_lines <- unlist(Map(readLines, files_expected))
files_actual_lines <- unlist(Map(readLines, files_actual))
expect_true(all(files_actual_lines == files_expected_lines))
# The tests below pass locally but fail on AppVeyor. The problem is due to
# difference in line endings, and I wasn't able to determine why the line
# endings are different on AppVeyor versus my local Windows 10 machine.
skip_on_appveyor()
files_expected_md5 <- tools::md5sum(files_expected)
files_actual_md5 <- tools::md5sum(files_actual)
expect_true(all(files_actual_md5 == files_expected_md5))
})
test_that("wflow_update can update from v0.3.0 to v0.4.0 with Git", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and commit everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
git2r::commit(r, "commit project")
commit_last <- git2r::commits(r)[[1]]
# dry_run = TRUE
expect_message(files_updated <- wflow_update(log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update in dry run mode")
expect_identical(git2r::commits(r)[[1]], commit_last)
# dry_run = FALSE
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update <- git2r::commits(r)[[1]]
expect_identical(commit_update@message,
sprintf("Update workflowr project with wflow_update (version %s).",
as.character(utils::packageVersion("workflowr"))))
files_expected <- workflowr:::obtain_files_in_commit(r, commit_update)
files_expected <- file.path(tmp_dir_v0.3.0, files_expected)
files_expected <- workflowr:::relative(files_expected)
expect_false(commit_update@sha == commit_last@sha)
expect_identical(sort(files_updated),
sort(files_expected))
})
test_that("wflow_update ignores Rmd files starting with _", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Add an Rmd file starting with an underscore
rmd_ignore <- file.path(tmp_dir_v0.3.0, "analysis", "_ignore.Rmd")
file.create(rmd_ignore)
rmd_ignore <- workflowr:::relative(rmd_ignore)
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
expect_true(length(files_updated) > 0)
expect_false(rmd_ignore %in% files_updated)
})
test_that("wflow_update only commits tracked files", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and commit everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
git2r::commit(r, "commit project")
# Create an untracked Rmd file
rmd_untracked <- file.path(tmp_dir_v0.3.0, "analysis", "untracked.Rmd")
file.copy(file.path(tmp_dir_v0.3.0, "analysis", "ex1.Rmd"),
rmd_untracked)
rmd_untracked <- workflowr:::relative(rmd_untracked)
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update <- git2r::commits(r)[[1]]
files_committed <- workflowr:::obtain_files_in_commit(r, commit_update)
files_committed <- file.path(tmp_dir_v0.3.0, files_committed)
files_committed <- workflowr:::relative(files_committed)
expect_true(rmd_untracked %in% files_updated)
expect_false(rmd_untracked %in% files_committed)
})
test_that("wflow_update does nothing if everything is up-to-date", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and commit everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
git2r::commit(r, "commit project")
commit_last <- git2r::commits(r)[[1]]
# Update
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update <- git2r::commits(r)[[1]]
expect_false(commit_update@sha == commit_last@sha)
# Run a second time
expect_message(files_updated <- wflow_update(dry_run = FALSE, log_open = FALSE,
project = tmp_dir_v0.3.0),
"Running wflow_update")
commit_update_2 <- git2r::commits(r)[[1]]
expect_true(commit_update@sha == commit_update_2@sha)
expect_true(length(files_updated) == 0)
})
# Test error handling ----------------------------------------------------------
test_that("wflow_update fails early if files in staging area", {
if (skipping)
skip("Must be run manually.")
# Create a temporary directory with v0.3.0 files
tmp_dir_v0.3.0 <- tempfile("v0.3.0-")
dir.create(tmp_dir_v0.3.0, recursive = TRUE)
on.exit(unlink(tmp_dir_v0.3.0, recursive = TRUE, force = TRUE))
tmp_dir_v0.3.0 <- workflowr:::absolute(tmp_dir_v0.3.0)
file.copy(from = "files/test-wflow_update/v0.3.0/.",
to = tmp_dir_v0.3.0, recursive = TRUE)
# Initilize Git repo and add everything
git2r::init(tmp_dir_v0.3.0)
r <- git2r::repository(tmp_dir_v0.3.0)
git2r::add(r, Sys.glob(file.path(tmp_dir_v0.3.0, "*")))
# Expect early error
expect_error(wflow_update(log_open = FALSE, project = tmp_dir_v0.3.0),
"You have added files to the Git staging area.")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImputePower.R
\name{ImputePower}
\alias{ImputePower}
\title{Estimate the power of imputation methods based on input data}
\usage{
ImputePower(
RawData,
GroupAsize,
GroupBsize,
DERatio = 0.2,
logFC = 1,
ReTimes = NULL,
Pair,
MNARratio,
ImputeMethod = c("kNN_fill", "SVD_fill", "MLE_fill", "Mean_fill", "QRILC_fill",
"LLS_fill", "BPCA_fill", "Min_fill")
)
}
\arguments{
\item{RawData}{A two-dimensional matrix with missing values}
\item{GroupAsize}{Sample size of group A}
\item{GroupBsize}{Sample size of group B}
\item{DERatio}{Differential expression ratio in simulation matrix}
\item{logFC}{Fold Change after log2 conversion for simulation}
\item{ReTimes}{Repeat times. To eliminate random error. The smaller the more accurate but more time consuming. You can enter NULL and we will automatically calculate the repeat times based on the matrix size}
\item{Pair}{Paired experimental design or not}
\item{MNARratio}{MNAR ratio of the input matrix}
\item{ImputeMethod}{Name of imputation methods for evaluation and comparison (You can use imputation functions not included in this package,just enter the name)}
}
\value{
A list containing the following components:
\item{Total}{ Overall result}
\item{NRMSE}{ NRMSE of each imputation method}
\item{BLCI}{ BLCI of each imputation method}
}
\description{
By establishing simulation matrices, the effectiveness of each Imputation method is evaluated from the two levels of NRMSE and BLCI (TPR + TNR - 1). First generate the corresponding simulation matrix based on the input matrix and dataset features. Then after generating the missing values, use each Imputation method to be evaluated to impute the missing values. Compare the initial simulation matrix with the filled simulation matrix to get the result. Repeat to eliminate random errors
}
\note{
Require \code{\link{imputeLCMD}}
}
\examples{
##################################### a simulated example ###################
rm(list = ls())
library('imputeLCMD')
set.seed(2020)
a <- matrix(data = rnorm(100000), ncol = 100) # Generate random matrix
a[sample(length(a), 10000)] <- NA # Generate missing values randomly
rownames(a) <- seq(1, 1000, 1) # Matrix must have row names
res <- ImputePower(RawData = a, GroupAsize = 50, GroupBsize = 50, DERatio = 0.2, logFC = 1, ReTimes = 30, Pair = T, MNARratio = 0,
ImputeMethod = c('kNN_fill', 'SVD_fill', 'MLE_fill', 'Mean_fill', 'QRILC_fill', 'LLS_fill', 'BPCA_fill', 'Min_fill')) # Evaluate input Imputation methods
}
| /MIMO/man/ImputePower.Rd | no_license | Li-Lab-SJTU/R_package | R | false | true | 2,604 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImputePower.R
\name{ImputePower}
\alias{ImputePower}
\title{Estimate the power of imputation methods based on input data}
\usage{
ImputePower(
RawData,
GroupAsize,
GroupBsize,
DERatio = 0.2,
logFC = 1,
ReTimes = NULL,
Pair,
MNARratio,
ImputeMethod = c("kNN_fill", "SVD_fill", "MLE_fill", "Mean_fill", "QRILC_fill",
"LLS_fill", "BPCA_fill", "Min_fill")
)
}
\arguments{
\item{RawData}{A two-dimensional matrix with missing values}
\item{GroupAsize}{Sample size of group A}
\item{GroupBsize}{Sample size of group B}
\item{DERatio}{Differential expression ratio in simulation matrix}
\item{logFC}{Fold Change after log2 conversion for simulation}
\item{ReTimes}{Repeat times. To eliminate random error. The smaller the more accurate but more time consuming. You can enter NULL and we will automatically calculate the repeat times based on the matrix size}
\item{Pair}{Paired experimental design or not}
\item{MNARratio}{MNAR ratio of the input matrix}
\item{ImputeMethod}{Name of imputation methods for evaluation and comparison (You can use imputation functions not included in this package,just enter the name)}
}
\value{
A list containing the following components:
\item{Total}{ Overall result}
\item{NRMSE}{ NRMSE of each imputation method}
\item{BLCI}{ BLCI of each imputation method}
}
\description{
By establishing simulation matrices, the effectiveness of each Imputation method is evaluated from the two levels of NRMSE and BLCI (TPR + TNR - 1). First generate the corresponding simulation matrix based on the input matrix and dataset features. Then after generating the missing values, use each Imputation method to be evaluated to impute the missing values. Compare the initial simulation matrix with the filled simulation matrix to get the result. Repeat to eliminate random errors
}
\note{
Require \code{\link{imputeLCMD}}
}
\examples{
##################################### a simulated example ###################
rm(list = ls())
library('imputeLCMD')
set.seed(2020)
a <- matrix(data = rnorm(100000), ncol = 100) # Generate random matrix
a[sample(length(a), 10000)] <- NA # Generate missing values randomly
rownames(a) <- seq(1, 1000, 1) # Matrix must have row names
res <- ImputePower(RawData = a, GroupAsize = 50, GroupBsize = 50, DERatio = 0.2, logFC = 1, ReTimes = 30, Pair = T, MNARratio = 0,
ImputeMethod = c('kNN_fill', 'SVD_fill', 'MLE_fill', 'Mean_fill', 'QRILC_fill', 'LLS_fill', 'BPCA_fill', 'Min_fill')) # Evaluate input Imputation methods
}
|
# read in the data
to_plot <- read.csv('/Users/brodyvogel/Desktop/household_power_consumption.txt', header=T, sep=';', na.strings="?")
# subset it
to_plot_1 <- subset(to_plot, Date %in% c("1/2/2007","2/2/2007"))
# reformat the date
to_plot_1$Date <- as.Date(to_plot_1$Date, format="%d/%m/%Y")
new_date <- paste(as.Date(to_plot_1$Date), to_plot_1$Time)
to_plot_1$New_Date <- as.POSIXct(new_date)
# and plot it
plot(Sub_metering_1 ~ New_Date, type="l", data = to_plot_1,
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2 ~ New_Date, col='Red', data = to_plot_1)
lines(Sub_metering_3 ~ New_Date, col='Blue', data = to_plot_1)
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = .5) | /plot3.R | no_license | BrodyVogel/Exploring-Data-Project-1 | R | false | false | 797 | r | # read in the data
to_plot <- read.csv('/Users/brodyvogel/Desktop/household_power_consumption.txt', header=T, sep=';', na.strings="?")
# subset it
to_plot_1 <- subset(to_plot, Date %in% c("1/2/2007","2/2/2007"))
# reformat the date
to_plot_1$Date <- as.Date(to_plot_1$Date, format="%d/%m/%Y")
new_date <- paste(as.Date(to_plot_1$Date), to_plot_1$Time)
to_plot_1$New_Date <- as.POSIXct(new_date)
# and plot it
plot(Sub_metering_1 ~ New_Date, type="l", data = to_plot_1,
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2 ~ New_Date, col='Red', data = to_plot_1)
lines(Sub_metering_3 ~ New_Date, col='Blue', data = to_plot_1)
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = .5) |
dataset = read.csv('dia.csv')
str(dataset)
ncol(dataset)
lapply(dataset, class)
summary(dataset)
library(caret)
library(caTools)
library(e1071)
dataset$Outcome = factor(dataset$Outcome, levels = c(0, 1), labels = c(0, 1))
split_set = sample.split(dataset, SplitRatio = 2/3)
train_set = subset(dataset, split_set == TRUE)
test_set = subset(dataset, split_set == FALSE)
classifier = naiveBayes(x = train_set[-9], y = train_set$Outcome)
classifier
predicted_data = predict(classifier, newdata = test_set[-9])
predicted_data
cf = confusionMatrix(predicted_data, test_set$Outcome)
cf | /DA_2/da2_try.R | no_license | TanmayDharne/DA | R | false | false | 580 | r | dataset = read.csv('dia.csv')
str(dataset)
ncol(dataset)
lapply(dataset, class)
summary(dataset)
library(caret)
library(caTools)
library(e1071)
dataset$Outcome = factor(dataset$Outcome, levels = c(0, 1), labels = c(0, 1))
split_set = sample.split(dataset, SplitRatio = 2/3)
train_set = subset(dataset, split_set == TRUE)
test_set = subset(dataset, split_set == FALSE)
classifier = naiveBayes(x = train_set[-9], y = train_set$Outcome)
classifier
predicted_data = predict(classifier, newdata = test_set[-9])
predicted_data
cf = confusionMatrix(predicted_data, test_set$Outcome)
cf |
#Sets working directory
setwd("")
#Function to take the inverse of logit to backtransform means and SE
inv.logit<-function(x) 1/(1+1/(exp(x)))
#Reads in Data
UVvB<-read.csv("Blue 715 vs continuous stats.csv")
#Creates response variable, 2 column object with treatment and control
resp<-with(UVvB,cbind(Treatment, Control))
#Model with intercept, intercept here indicates the mean value in the
#logit space is not 0, and the logit of 50% is 0.
#So if the intercept is significant the proportion is not 50%
model.1<-glm(resp~1, quasibinomial)
#Model without an intercept
model.2<-glm(resp~0, quasibinomial)
#Significance test of intercept comparing the two models, report this
#F-value and p-value
anova(model.1,model.2, test="F")
#model summary
summary(model.1)
#Assumption tests, just checking normality
par(mfrow=c(2,2))
plot(model.1)
#extract the treatment proportion (still a logit) from model
treat<-model.1$coefficients
#extract the SE of treatment proportion (still a logit) from model
se<-summary(model.1)$coefficients[,2]
#creates a vector with treatment proportion and the upper/lower SE bar values
t.props<-c(treat, treat-se, treat+se)
#Back-transformes the logit values to proportions
t.props<-inv.logit(t.props)
#Creates table from back-transformed data and calculates
#control values from treatment values (control=1-treatment)
data.frame( "Proportion"=c(t.props[1], 1-t.props[1]),
"Upper SE"=c(t.props[2], 1-t.props[2]),
"Lower SE"=c(t.props[3], 1-t.props[3]),
row.names=c("treatment", "control"))
# | /mosquito proportion stat test - Copy.R | no_license | elton-ko/R-code-for-mosquito-t-test | R | false | false | 1,544 | r | #Sets working directory
setwd("")
#Function to take the inverse of logit to backtransform means and SE
inv.logit<-function(x) 1/(1+1/(exp(x)))
#Reads in Data
UVvB<-read.csv("Blue 715 vs continuous stats.csv")
#Creates response variable, 2 column object with treatment and control
resp<-with(UVvB,cbind(Treatment, Control))
#Model with intercept, intercept here indicates the mean value in the
#logit space is not 0, and the logit of 50% is 0.
#So if the intercept is significant the proportion is not 50%
model.1<-glm(resp~1, quasibinomial)
#Model without an intercept
model.2<-glm(resp~0, quasibinomial)
#Significance test of intercept comparing the two models, report this
#F-value and p-value
anova(model.1,model.2, test="F")
#model summary
summary(model.1)
#Assumption tests, just checking normality
par(mfrow=c(2,2))
plot(model.1)
#extract the treatment proportion (still a logit) from model
treat<-model.1$coefficients
#extract the SE of treatment proportion (still a logit) from model
se<-summary(model.1)$coefficients[,2]
#creates a vector with treatment proportion and the upper/lower SE bar values
t.props<-c(treat, treat-se, treat+se)
#Back-transformes the logit values to proportions
t.props<-inv.logit(t.props)
#Creates table from back-transformed data and calculates
#control values from treatment values (control=1-treatment)
data.frame( "Proportion"=c(t.props[1], 1-t.props[1]),
"Upper SE"=c(t.props[2], 1-t.props[2]),
"Lower SE"=c(t.props[3], 1-t.props[3]),
row.names=c("treatment", "control"))
# |
#' Data about each season aired in the US (raw)
#'
#' This data has one row per season aired in the US as 'The Great British
#' Baking Show'.
#'
#' @format A data frame with 8 rows representing individual series and 11
#' variables:
#' \describe{
#' \item{series}{an integer denoting UK series (`1`-`8`)}
#' \item{episode}{an integer denoting total number of episodes within series}
#' \item{us_season}{an integer denoting US season (`1`-`5`)}
#' \item{us_airdate}{a date denoting original airdate of episode in the US,
#' according to
#' [pbs.org](https://www.pbs.org/food/shows/great-british-baking-show/)}
#' }
#'
#' @source US airdates manually recorded from
#' \url{https://www.pbs.org/food/shows/great-british-baking-show/}
#' @examples
#' if (require('tibble')) {
#' seasons_raw
#' }
#' head(seasons_raw)
"seasons_raw"
| /R/seasons_raw.R | permissive | apreshill/bakeoff | R | false | false | 845 | r | #' Data about each season aired in the US (raw)
#'
#' This data has one row per season aired in the US as 'The Great British
#' Baking Show'.
#'
#' @format A data frame with 8 rows representing individual series and 11
#' variables:
#' \describe{
#' \item{series}{an integer denoting UK series (`1`-`8`)}
#' \item{episode}{an integer denoting total number of episodes within series}
#' \item{us_season}{an integer denoting US season (`1`-`5`)}
#' \item{us_airdate}{a date denoting original airdate of episode in the US,
#' according to
#' [pbs.org](https://www.pbs.org/food/shows/great-british-baking-show/)}
#' }
#'
#' @source US airdates manually recorded from
#' \url{https://www.pbs.org/food/shows/great-british-baking-show/}
#' @examples
#' if (require('tibble')) {
#' seasons_raw
#' }
#' head(seasons_raw)
"seasons_raw"
|
#' Individual Risk computation
#'
#' Estimation of the risk for each observation. After the risk is computed one
#' can use e.g. the function localSuppr() for the protection of values of high
#' risk. Further details can be found at the link given below.
#'
#' S4 class sdcMicro objects are only supported by function \emph{measure_risk}
#' that also estimates the individual risk with the same method.
#'
#' @param x object from class freqCalc
#' @param method approx (default) or exact
#' @param qual final correction factor
#' @param survey TRUE, if we have survey data and FALSE if we deal with a population.
#' @return
#' \itemize{
#' \item{rk: }{ base individual risk }
#' \item{method: }{method}
#' \item{qual: }{final correction factor}
#' \item{fk: }{frequency count}
#' \item{knames: }{colnames of the key variables}}
#' @note The base individual risk method was developed by Benedetti,
#' Capobianchi and Franconi
#' @author Matthias Templ. Bug in method \dQuote{exact} fixed since version
#' 2.6.5. by Youri Baeyens.
#' @references
#' Templ, M. and Kowarik, A. and Meindl, B.
#' Statistical Disclosure Control for Micro-Data Using the R Package sdcMicro.
#' \emph{Journal of Statistical Software}, \strong{67} (4), 1--36, 2015. \doi{10.18637/jss.v067.i04}
#' @seealso \code{\link{measure_risk}}, \code{\link{freqCalc}}
#' @references Franconi, L. and Polettini, S. (2004) \emph{Individual risk
#' estimation in mu-Argus: a review}. Privacy in Statistical Databases, Lecture
#' Notes in Computer Science, 262--272. Springer
#'
#' Machanavajjhala, A. and Kifer, D. and Gehrke, J. and Venkitasubramaniam, M.
#' (2007) \emph{l-Diversity: Privacy Beyond k-Anonymity}. ACM Trans. Knowl.
#' Discov. Data, 1(1)
#'
#' additionally, have a look at the vignettes of sdcMicro for further reading.
#' @keywords manip
#' @export
#' @examples
#'
#' ## example from Capobianchi, Polettini and Lucarelli:
#' data(francdat)
#' f <- freqCalc(francdat, keyVars=c(2,4,5,6),w=8)
#' f
#' f$fk
#' f$Fk
#' ## individual risk calculation:
#' indivf <- indivRisk(f)
#' indivf$rk
#'
indivRisk <- function(x, method = "approx", qual = 1, survey = TRUE) {
## x ... object from freqCalc
if (length(colnames(x$freqCalc)) > 0) {
if (all(is.numeric(x$keyVars))) {
knames <- colnames(x$freqCalc)[x$keyVars]
} else if (all(is.character(x$keyVars))) {
knames <- x$keyVars
} else {
stop("in 'indivRisk' all keyVars must be defined in the same way:\n as column index or as column name.")
}
} else {
knames <- NULL
}
if (survey == TRUE) {
P <- ncol(x$freqCalc)
N <- dim(x$freqCalc)[1]
fk <- x$fk
Fk <- x$Fk
pk <- fk/Fk
# pk = pk-0.0001
rk <- rep(0, N)
if (method == "exact") {
A <- (pk^(1 - fk) - 1)/(fk - 1)
B <- function(fk, pk, i) {
(fk - 1 - i)^2/((i + 1) * (fk - 2 - i)) * (pk^(i + 2 - fk) - 1)/(pk^(i + 1 -
fk) - 1)
}
BB <- function(fk, pk) {
bb <- 0
for (m in 0:(fk - 3)) {
b <- 1
for (m2 in 0:m) {
b <- b * B(fk, pk, m2)
}
bb <- bb + (-1)^(m + 1) * b
}
bb
}
# r <- (pk/(1-pk)) * (A * (1 + (-1)^1 * B(0) ))
eins <- (pk/(1 - pk))^fk
drei <- (-1)^fk * log(pk)
rk <- rep(0, N)
for (k in 1:N) {
if (fk[k] > 2) {
rk[k] <- eins[k] * ((A[k] * (1 + BB(fk[k], pk[k]))) + drei[k])
}
if (fk[k] == 2) {
rk[k] <- (pk[k]/(1 - pk[k])) - (((pk[k]/(1 - pk[k]))^2) * log(1/pk[k]))
}
if (fk[k] == 1) {
rk[k] <- (pk[k]/(1 - pk[k])) * log(1/pk[k])
}
}
}
if (method == "approx") {
rk <- rep(0, N)
for (k in 1:N) {
if (fk[k] > 2) {
rk[k] <- pk[k]/(fk[k] - (1 - pk[k]))
}
if (fk[k] == 2) {
rk[k] <- (pk[k]/(1 - pk[k])) - (((pk[k]/(1 - pk[k]))^2) * log(1/pk[k]))
}
if (fk[k] == 1) {
rk[k] <- (pk[k]/(1 - pk[k])) * log(1/pk[k])
}
}
}
TF <- fk == Fk
if (any(TF)) {
rk[TF] <- 1/fk[TF]
}
rk <- rk * qual
rk <- list(rk = rk, method = method, qual = qual, fk = x$fk, knames = knames)
}
if (survey == FALSE) {
rk <- list(rk = 1/x$fk, method = NA, qual = NA, fk = x$fk, knames = knames)
}
class(rk) <- "indivRisk"
invisible(rk)
}
#' Print method for objects from class indivRisk
#'
#' Print method for objects from class indivRisk
#'
#' @param x object from class indivRisk
#' @param \dots Additional arguments passed through.
#' @return few information about the method and the final correction factor for
#' objects of class \sQuote{indivRisk}.
#' @author Matthias Templ
#' @seealso \code{\link{indivRisk}}
#' @keywords print
#' @method print indivRisk
#' @export
#' @examples
#'
#' ## example from Capobianchi, Polettini and Lucarelli:
#' data(francdat)
#' f1 <- freqCalc(francdat, keyVars=c(2,4,5,6),w=8)
#' data.frame(fk=f1$fk, Fk=f1$Fk)
#' ## individual risk calculation:
#' indivRisk(f1)
#'
print.indivRisk <- function(x, ...) {
# cat('\n ----- individual risk ----- \n')
cat(paste("method=", x$method, ", qual=", x$qual, sep = ""))
cat("\n --------------------------- \n")
s <- sum(x$rk > median(x$rk) + 3 * mad(x$rk) & x$rk > 0.1)
cat(paste(s, "obs. with high risk"))
}
| /R/indivRisk.R | no_license | avivaprins/sdcMicro | R | false | false | 5,314 | r | #' Individual Risk computation
#'
#' Estimation of the risk for each observation. After the risk is computed one
#' can use e.g. the function localSuppr() for the protection of values of high
#' risk. Further details can be found at the link given below.
#'
#' S4 class sdcMicro objects are only supported by function \emph{measure_risk}
#' that also estimates the individual risk with the same method.
#'
#' @param x object from class freqCalc
#' @param method approx (default) or exact
#' @param qual final correction factor
#' @param survey TRUE, if we have survey data and FALSE if we deal with a population.
#' @return
#' \itemize{
#' \item{rk: }{ base individual risk }
#' \item{method: }{method}
#' \item{qual: }{final correction factor}
#' \item{fk: }{frequency count}
#' \item{knames: }{colnames of the key variables}}
#' @note The base individual risk method was developed by Benedetti,
#' Capobianchi and Franconi
#' @author Matthias Templ. Bug in method \dQuote{exact} fixed since version
#' 2.6.5. by Youri Baeyens.
#' @references
#' Templ, M. and Kowarik, A. and Meindl, B.
#' Statistical Disclosure Control for Micro-Data Using the R Package sdcMicro.
#' \emph{Journal of Statistical Software}, \strong{67} (4), 1--36, 2015. \doi{10.18637/jss.v067.i04}
#' @seealso \code{\link{measure_risk}}, \code{\link{freqCalc}}
#' @references Franconi, L. and Polettini, S. (2004) \emph{Individual risk
#' estimation in mu-Argus: a review}. Privacy in Statistical Databases, Lecture
#' Notes in Computer Science, 262--272. Springer
#'
#' Machanavajjhala, A. and Kifer, D. and Gehrke, J. and Venkitasubramaniam, M.
#' (2007) \emph{l-Diversity: Privacy Beyond k-Anonymity}. ACM Trans. Knowl.
#' Discov. Data, 1(1)
#'
#' additionally, have a look at the vignettes of sdcMicro for further reading.
#' @keywords manip
#' @export
#' @examples
#'
#' ## example from Capobianchi, Polettini and Lucarelli:
#' data(francdat)
#' f <- freqCalc(francdat, keyVars=c(2,4,5,6),w=8)
#' f
#' f$fk
#' f$Fk
#' ## individual risk calculation:
#' indivf <- indivRisk(f)
#' indivf$rk
#'
indivRisk <- function(x, method = "approx", qual = 1, survey = TRUE) {
## x ... object from freqCalc
if (length(colnames(x$freqCalc)) > 0) {
if (all(is.numeric(x$keyVars))) {
knames <- colnames(x$freqCalc)[x$keyVars]
} else if (all(is.character(x$keyVars))) {
knames <- x$keyVars
} else {
stop("in 'indivRisk' all keyVars must be defined in the same way:\n as column index or as column name.")
}
} else {
knames <- NULL
}
if (survey == TRUE) {
P <- ncol(x$freqCalc)
N <- dim(x$freqCalc)[1]
fk <- x$fk
Fk <- x$Fk
pk <- fk/Fk
# pk = pk-0.0001
rk <- rep(0, N)
if (method == "exact") {
A <- (pk^(1 - fk) - 1)/(fk - 1)
B <- function(fk, pk, i) {
(fk - 1 - i)^2/((i + 1) * (fk - 2 - i)) * (pk^(i + 2 - fk) - 1)/(pk^(i + 1 -
fk) - 1)
}
BB <- function(fk, pk) {
bb <- 0
for (m in 0:(fk - 3)) {
b <- 1
for (m2 in 0:m) {
b <- b * B(fk, pk, m2)
}
bb <- bb + (-1)^(m + 1) * b
}
bb
}
# r <- (pk/(1-pk)) * (A * (1 + (-1)^1 * B(0) ))
eins <- (pk/(1 - pk))^fk
drei <- (-1)^fk * log(pk)
rk <- rep(0, N)
for (k in 1:N) {
if (fk[k] > 2) {
rk[k] <- eins[k] * ((A[k] * (1 + BB(fk[k], pk[k]))) + drei[k])
}
if (fk[k] == 2) {
rk[k] <- (pk[k]/(1 - pk[k])) - (((pk[k]/(1 - pk[k]))^2) * log(1/pk[k]))
}
if (fk[k] == 1) {
rk[k] <- (pk[k]/(1 - pk[k])) * log(1/pk[k])
}
}
}
if (method == "approx") {
rk <- rep(0, N)
for (k in 1:N) {
if (fk[k] > 2) {
rk[k] <- pk[k]/(fk[k] - (1 - pk[k]))
}
if (fk[k] == 2) {
rk[k] <- (pk[k]/(1 - pk[k])) - (((pk[k]/(1 - pk[k]))^2) * log(1/pk[k]))
}
if (fk[k] == 1) {
rk[k] <- (pk[k]/(1 - pk[k])) * log(1/pk[k])
}
}
}
TF <- fk == Fk
if (any(TF)) {
rk[TF] <- 1/fk[TF]
}
rk <- rk * qual
rk <- list(rk = rk, method = method, qual = qual, fk = x$fk, knames = knames)
}
if (survey == FALSE) {
rk <- list(rk = 1/x$fk, method = NA, qual = NA, fk = x$fk, knames = knames)
}
class(rk) <- "indivRisk"
invisible(rk)
}
#' Print method for objects from class indivRisk
#'
#' Print method for objects from class indivRisk
#'
#' @param x object from class indivRisk
#' @param \dots Additional arguments passed through.
#' @return few information about the method and the final correction factor for
#' objects of class \sQuote{indivRisk}.
#' @author Matthias Templ
#' @seealso \code{\link{indivRisk}}
#' @keywords print
#' @method print indivRisk
#' @export
#' @examples
#'
#' ## example from Capobianchi, Polettini and Lucarelli:
#' data(francdat)
#' f1 <- freqCalc(francdat, keyVars=c(2,4,5,6),w=8)
#' data.frame(fk=f1$fk, Fk=f1$Fk)
#' ## individual risk calculation:
#' indivRisk(f1)
#'
print.indivRisk <- function(x, ...) {
# cat('\n ----- individual risk ----- \n')
cat(paste("method=", x$method, ", qual=", x$qual, sep = ""))
cat("\n --------------------------- \n")
s <- sum(x$rk > median(x$rk) + 3 * mad(x$rk) & x$rk > 0.1)
cat(paste(s, "obs. with high risk"))
}
|
#' S3 class sfpca model with number of pc, knot, sampling data from rstan,
#' log_liklihood, and leave-one-out cross-validation (LOO) information criterion
sfpcaClass <- function(Nsamples = NULL, Nchains = NULL, pc=NULL, knot=NULL,
sa=NULL, log_lik=NULL, looic=NULL, basis=NULL){
sfpca_model <- list(
Nsamples = Nsamples,
Nchains = Nchains,
pc = pc,
knot = knot,
sa = sa,
log_lik = log_lik,
looic = looic,
basis = basis
)
## Set the name for the class
class(sfpca_model) <- append(class(sfpca_model),"sfpcaClass")
return(sfpca_model)
}
#' generate sfpca models with different parameters
#'
#' @param sfpca_data: The prepared data list from prepare_data() function
#' @param Nsamples: Number of objects sampling from rstan
#' @param Nchains: Number of Markov chain using in rstan model
#' @param Ncores: Number of cores using in rstan model
#' @param PC_range: A vector of pc number
#' @param nknot_range: A vector of knot number
#' @return A list of sfpca classes with difference pc and knot numbers
#' @import loo
#' @import Rcpp
#' @import methods
#' @importFrom rstan sampling
#' @useDynLib BayesTime, .registration = TRUE
#' @export
stan_fit <- function(sfpca_data, Nsamples, Nchains, Ncores=NULL,
PC_range, nknot_range, seed=NULL){
stan_results <- list()
i <- 0
for (k in PC_range) {
for (d in nknot_range) {
i <- i + 1
sfpca <- sfpcaClass()
sfpca$pc <- k
sfpca$knot <- d
print(paste('index i is:', i, 'number of PC:', k, 'number of knots:', d))
results_basis <- basis_setup_sparse(sfpca_data = sfpca_data,
nknots = d, orth = TRUE)
sfpca$basis <- results_basis
pca_data <- list(N = sfpca_data$num_subjects,
K = k,
Q = d + 4,
Y = sfpca_data$response.list,
V = sfpca_data$visits.vector,
subject_starts = sfpca_data$visits.start,
subject_stops = sfpca_data$visits.stop,
cov_starts = sfpca_data$cov.start,
cov_stops = sfpca_data$cov.stop,
cov_size = sfpca_data$cov.size,
B = results_basis$orth_spline_basis_sparse_stacked)
if (is.null(Ncores)) Ncores <- getOption("mc.cores", 1L)
if (is.null(seed)) seed <- 31
set.seed(seed)
sa <- rstan::sampling(stanmodels$sfpca, data = pca_data, iter = Nsamples,
chains = Nchains, cores = Ncores, init = "random")
sfpca$sa <- sa
sfpca$log_lik <- rstan::extract(sa,"log_lik_marg")[[1]]
sfpca$looic <- loo::loo(sfpca$log_lik)
sfpca$Nsamples <- Nsamples
sfpca$Nchains <- Nchains
stan_results[[i]] <- sfpca
print("######### SFPCA ###############")
}
}
return(stan_results)
}
| /R/stan_fit.R | no_license | biocore/bayestime | R | false | false | 2,931 | r | #' S3 class sfpca model with number of pc, knot, sampling data from rstan,
#' log_liklihood, and leave-one-out cross-validation (LOO) information criterion
sfpcaClass <- function(Nsamples = NULL, Nchains = NULL, pc=NULL, knot=NULL,
sa=NULL, log_lik=NULL, looic=NULL, basis=NULL){
sfpca_model <- list(
Nsamples = Nsamples,
Nchains = Nchains,
pc = pc,
knot = knot,
sa = sa,
log_lik = log_lik,
looic = looic,
basis = basis
)
## Set the name for the class
class(sfpca_model) <- append(class(sfpca_model),"sfpcaClass")
return(sfpca_model)
}
#' generate sfpca models with different parameters
#'
#' @param sfpca_data: The prepared data list from prepare_data() function
#' @param Nsamples: Number of objects sampling from rstan
#' @param Nchains: Number of Markov chain using in rstan model
#' @param Ncores: Number of cores using in rstan model
#' @param PC_range: A vector of pc number
#' @param nknot_range: A vector of knot number
#' @return A list of sfpca classes with difference pc and knot numbers
#' @import loo
#' @import Rcpp
#' @import methods
#' @importFrom rstan sampling
#' @useDynLib BayesTime, .registration = TRUE
#' @export
stan_fit <- function(sfpca_data, Nsamples, Nchains, Ncores=NULL,
PC_range, nknot_range, seed=NULL){
stan_results <- list()
i <- 0
for (k in PC_range) {
for (d in nknot_range) {
i <- i + 1
sfpca <- sfpcaClass()
sfpca$pc <- k
sfpca$knot <- d
print(paste('index i is:', i, 'number of PC:', k, 'number of knots:', d))
results_basis <- basis_setup_sparse(sfpca_data = sfpca_data,
nknots = d, orth = TRUE)
sfpca$basis <- results_basis
pca_data <- list(N = sfpca_data$num_subjects,
K = k,
Q = d + 4,
Y = sfpca_data$response.list,
V = sfpca_data$visits.vector,
subject_starts = sfpca_data$visits.start,
subject_stops = sfpca_data$visits.stop,
cov_starts = sfpca_data$cov.start,
cov_stops = sfpca_data$cov.stop,
cov_size = sfpca_data$cov.size,
B = results_basis$orth_spline_basis_sparse_stacked)
if (is.null(Ncores)) Ncores <- getOption("mc.cores", 1L)
if (is.null(seed)) seed <- 31
set.seed(seed)
sa <- rstan::sampling(stanmodels$sfpca, data = pca_data, iter = Nsamples,
chains = Nchains, cores = Ncores, init = "random")
sfpca$sa <- sa
sfpca$log_lik <- rstan::extract(sa,"log_lik_marg")[[1]]
sfpca$looic <- loo::loo(sfpca$log_lik)
sfpca$Nsamples <- Nsamples
sfpca$Nchains <- Nchains
stan_results[[i]] <- sfpca
print("######### SFPCA ###############")
}
}
return(stan_results)
}
|
#Code to create ScotPHO's Shiny profile platform
# This script includes the user-interface definition of the app.
###############################################.
## Header ----
###############################################.
tagList( #needed for shinyjs
useShinyjs(), # Include shinyjs
introjsUI(), # Required to enable introjs scripts
navbarPage(id = "intabset", #needed for landing page
title = div(tags$a(img(src="scotpho_reduced.png", height=40), href= "http://www.scotpho.org.uk/"),
style = "position: relative; top: -5px;"), # Navigation bar
windowTitle = "ScotPHO profiles", #title for browser tab
theme = shinytheme("cerulean"), #Theme of the app (blue navbar)
collapsible = TRUE, #tab panels collapse into menu in small screens
header =
tags$head( #CSS styles
cookie_box, ##Cookie box
tags$link(rel="shortcut icon", href="favicon_scotpho.ico"), #Icon for browser tab
#Including Google analytics and Cookie control
includeScript("google-analytics.js"),
# HTML('<script src="https://cc.cdn.civiccomputing.com/8/cookieControl-8.x.min.js"></script>'),
# includeScript("cookie-control.js"),
includeCSS("www/styles.css"),
HTML("<base target='_blank'>") # to make external links open a new tab
),
###############################################.
## Landing page ----
###############################################.
tabPanel(
title = " Home", icon = icon("home"),
mainPanel(width = 11, style="margin-left:4%; margin-right:4%",
introBox(
fluidRow(column(7,(h3("Welcome to the ScotPHO profiles", style="margin-top:0px;"))),
(column(4,actionButton("btn_landing",label="Help: Take tour of the tool",icon=icon('question-circle'),class="down")))),
data.step = 1,
data.intro =(p(h4("Welcome to the ScotPHO Profiles Tool"),
h5("This interactive tool provides access to a range of public
health related indicators at different geographies including NHS boards, council areas and health and
social care partnerships."),
br(),
h5("There are different ways to navigate around the tool."),
h5("Different visualisations can be opened using the menu bar (the blue strip) at the top of the screen."),
img(src='introjs_tabset_panel.PNG',width=300),
br(),
h5("The 'Home' option in the menu bar will return to the profiles tool homepage."),
style = "color:0E3E5D; font-size:20px")),
data.position = "left"),
fluidRow(
#Summary box
column(6, class="landing-page-column",br(), #spacing
introBox(
lp_main_box(image_name= "landing_button_heatmap_2",
button_name = 'jump_to_summary', title_box = "Profile summary",
description = 'A high level view of an area across a set of indicators'),
data.step = 2,
data.intro = h5("The profile summary allows you to look at multiple indicators within an area at the same time"),
data.position = "bottom-right-aligned")),
#Table box
column(6, class="landing-page-column",
br(), #spacing
introBox( # tour of the tool
lp_main_box(image_name= "landing_button_data_table",
button_name = 'jump_to_table', title_box = "Data",
description = 'View and download the data behind the tool'),
data.step = 6,
data.intro = h5("The 'Data' window can be used to filter and download profiles data")))),
#2nd row of boxes
fluidRow(
br(), #spacing
column(8, style = "padding-left: 0px; padding-right: 0px;",
introBox( #tour of the rank and trend tabs
data.step = 3,
data.intro = h5("The trend and rank charts allow detailed exploration of one indicator at a time."),
#Trend plot box
column(6, class="landing-page-column",
lp_main_box(image_name= "landing_button_time_trend",
button_name = 'jump_to_trend', title_box = "Trend",
description = 'Look at how an indicator changes over time')),
#Rank/map plot box
column(6, class="landing-page-column",
lp_main_box(image_name= "landing_button_maprank",
button_name = 'jump_to_rank', title_box = "Rank",
description = 'Compare geographical variation for an indicator'))
)),#introBox 3 close
#Inequalities box
column(4, class="landing-page-column",
introBox(
data.step = 7,
data.intro = h5("The inequalities module allows exploration of deprivation effects for a selection of indicators from the main profiles tool."),
lp_main_box(image_name= "landing_button_health_inequality",
button_name = 'jump_to_ineq', title_box = "Health inequalities",
description = 'Explore how an indicator varies with deprivation'))
) #introBox 7 close
), # fluid row close
# end of landing page second row
# third row of landing page
fluidRow(
introBox(data.step=8, # tour around the tool
data.intro =h5("There are also options to find out information such as detailed descriptions of the profile indicators, indicator update schedules and links to evidence for action briefings"),
#About box
column(4, class="landing-page-column",
lp_about_box(image_name= "landing_button_about_2", button_name = 'jump_to_about',
title_box = "About", description = 'About ScotPHO Profiles'),
#Evidence box
div(class="landing-page-box-about",
div("Evidence for action",title="Links to briefing documents containing practical actions for improvement", class = "landing-page-box-title" ),
div(class = "landing-page-about-icon", div(img(src="landing_button_other_profile.png",class="centerabout"))),
actionButton('jump_to_efa', 'Links to ScotPHO evidence for action briefings',
onclick ="window.open('https://www.scotpho.org.uk/comparative-health/profiles/resources/evidence-for-action/', '_blank')",
class="landing-page-button",
icon = icon("arrow-circle-right", "icon-lp")))),
column(4, class="landing-page-column",
#Indicator updates
lp_about_box(image_name= "landing_button_calendar", button_name = 'btn_indicator_updates',
title_box = "Indicator updates",
description = 'Find out which indicators have been updated in the last 60 days'),
#Resources box
lp_about_box(image_name= "landing_button_resources", button_name = 'jump_to_resources',
title_box = "Resources",
description = 'Find technical information about the ScotPHO profile definitions and methodology')),
column(4, class="landing-page-column",
#Definitions
lp_about_box(image_name= "landing_button_technical_resources",
button_name = 'jump_to_definitions', title_box = "Definitions",
description = 'Find out about indicator definitions and data sources'),
#Other profiles
lp_about_box(image_name= "landing_button_related_links", button_name = 'jump_to_others',
title_box = "Other profiles", description = 'Links to alternative profiling tools'))
) #Close IntroBox
)#Fluidrow bracket
) #main Panel bracket
),# tab panel bracket
###############################################.
## Summary ----
###############################################.
tabPanel("Summary", icon = icon("list-ul"), value = "summary",
introBox(
wellPanel(fluidRow( #Filter options
column(3,
div(title="Select a geography level first, then select the are you want from the list. You can click in the box, hit backspace and start to type if you want to start searching.",
p(tags$b("Step 1. Select a geography level and then an area of interest.")),
selectInput("geotype_summary", label = NULL, choices=areatype_list,
selected = "Health board"),
conditionalPanel(#Conditional panel for extra dropdown for localities & IZ
condition = "input.geotype_summary== 'HSC locality' | input.geotype_summary == 'Intermediate zone' ",
div(title="This option restricts the HSC locality or IZ options below to only areas within a parent geography",
selectInput("loc_iz_summary", label = "Step 1b. Select a region for localities or intermediate zones",
choices = partnership_name))
),
uiOutput("geoname_ui_summary"))
),
column(3,
div(title="Select the profile you are interested in. Not all profiles are available for all geographies",
p(tags$b("Step 2. Select a profile ")),
div(id= "summary_div", uiOutput("profile_ui_summary")),
# domain if spine selected
conditionalPanel(condition = 'input.chart_summary == "Spine"',
uiOutput("topic_ui_spine")))
),
column(3,
div(title="Compare against another area (e.g. Scotland) or against a previous period to see the evolution of the area",
p(tags$b("Step 3. Select to compare by ")),
awesomeRadio("comp_summary", label = NULL,
choices = list("Area or" = 1, "Time" = 2),
selected = 1, inline=TRUE, checkbox = TRUE),
uiOutput("comp_ui_summary")) # comparator options
),
column(3,
actionButton("help_summary",label="Help", icon= icon('question-circle'), class ="down"),
actionButton("defs_summary",label="Definitions", icon= icon('info'), class ="down"),
downloadButton('download_summary', 'Download data', class = "down"),
uiOutput("save_chart_ui"))),
fluidRow(column(12,
column(3),#empty column to replicate offset and center content
column(6,
p(tags$b("Step 4. Select what type of summary you want to see:"),
" snapshot is a comparison with the latest data available,
trend will show how things are changing over time, and
spine compares indicators with the rest of areas of the same level."),
radioGroupButtons("chart_summary", status = "primary", justified = TRUE,
choices = c("Snapshot", "Trend", "Spine"), label=NULL )),
column(3) #empty column to replicate offset and center content
)) # column and row brackets
), #well panel bracket
data.step = 4,
data.intro =(p(h5("Throughout the tool use the dropdown menus to change which indicators or geographies are displayed in the charts."),
br(),
h5("While using dropdown menus mouse click within a dropdown menu and press backspace on your keyboard ('<-') then start typing a word to quickly find the options you are looking for"),
img(src='introjs_how_to_select.png')))
), #introbox bracket
mainPanel(width = 12,
shiny::hr(),
bsModal("mod_defs_summary", "Definitions", "defs_summary",
htmlOutput('defs_text_summary')),
fluidRow(column(4,
h4(textOutput("summary_title"), style="color: black; text-align: left"),
h5(textOutput("summary_subtitle"), style="color: black; text-align: left")
),
column(3,
br(),
br(),
p(img(src='signif_better.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"Better than comparator", br(),
img(src='non_signif.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"Not different to comparator", br(),
img(src='signif_worse.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"Worse than comparator", br(),
img(src='signif_nocalc.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"No differences can be calculated")),
conditionalPanel(condition = 'input.chart_summary == "Spine"',
column(5,
br(),
br(),
uiOutput("ui_spine_legend_selected"),
uiOutput("ui_spine_legend_areatype"),
uiOutput("ui_spine_legend_comparator")))),
# Depending what users selects different visualizations
uiOutput("summary_expl_text"),
uiOutput("summary_ui_plots")
)
), #Tab panel bracket
###############################################.
## Time trend ----
###############################################.
tabPanel("Trend", icon = icon("area-chart"), value = "trend",
sidebarPanel(width=4,
column(6,
actionButton("help_trend",label="Help", icon= icon('question-circle'), class ="down")),
column(6,
actionButton("defs_trend", label="Definitions", icon= icon('info'), class ="down")),
column(12,
shiny::hr(),
div(title="Select an indicator to see trend information. Click in this box, hit backspace and start to type if you want to quickly find an indicator.",
selectInput("indic_trend", shiny::HTML("<p>Step 1. Select an indicator <br/> <span style='font-weight: 400'>(hit backspace and start typing to search for an indicator)</span></p>"),
choices=indicator_list, selected = "Alcohol-related hospital admissions")),
shiny::hr(),
div(title="Use the options below to add geographies to the trend chart, remember some indicators may not be available for all geography types. See technical information to find out which geographies indicators are available for.",
p(tags$b("Step 2. Select areas to plot."),
p("(You can select multiple areas of any geography type)."))),
awesomeCheckbox("scotname_trend", tags$b("Scotland"), value=TRUE)),
column(6,
selectizeInput("hbname_trend", "Health board", choices = c("Select health boards" = "", paste(hb_name)),
multiple=TRUE, selected = ""),
selectizeInput("partname_trend", "HSC partnership", choices = c("Select partnerships" = "", paste(partnership_name)),
multiple=TRUE, selected = "")),
column(6,
selectizeInput("caname_trend", "Council area", choices = c("Select council areas" = "", paste(la_name)),
multiple=TRUE, selected = ""),
selectizeInput("adpname_trend", "Alcohol & drug partnership", choices = c("Select partnerships" = "", paste(adp_name)),
multiple=TRUE, selected = "")),
div(title="This option restricts the HSC locality or IZ options below to only areas within a parent geography",
selectInput("loc_iz_trend", "To choose a locality or intermediate zone, first
select an HSC partnership", choices = partnership_name)),
column(6,div(title="If greyed out locality data not available",uiOutput("loc_ui_trend"))),
column(6,div(title="If greyed out IZ data not available",uiOutput("iz_ui_trend"))),
column(12,
shiny::hr(),
div(tags$b("Step 3. Decide how to present data in the chart.")),
div(title= "Display the rate/percentage data or the raw numbers.",
awesomeRadio("var_plot_trend", label =NULL, inline = TRUE,
choices = c("Rate/Percentage" = "measure",
"Numerator" = "numerator"))),
div(title="Show or hide the 95% confidence intervals for the data selected.", # tooltip
awesomeCheckbox("ci_trend", label = "95% confidence intervals", value = FALSE)),
downloadButton('download_trend', 'Download data', class = "down"),
savechart_button('download_trendplot', 'Save chart', class = "down", disabled=TRUE))),
mainPanel(width = 8, #Main panel
bsModal("mod_defs_trend", "Definitions", "defs_trend", htmlOutput('defs_text_trend')),
h4(textOutput("title_trend"), style="color: black; text-align: left"),
h5(textOutput("subtitle_trend"), style="color: black; text-align: left"),
withSpinner(plotlyOutput("trend_plot"))
)
), #Tab panel bracket
###############################################.
## Rank and map ----
###############################################.
tabPanel("Rank", icon = icon("signal"), value = "rank",
wellPanel(#Filter options
column(width = 4,
div(title="Select an indicator to see comparative information. Click in this box, hit backspace and start to type if you want to quickly find an indicator.",
selectInput("indic_rank", shiny::HTML("<p>Step 1. Select an indicator <span style='font-weight: 400'> <br/>
(hit backspace and start typing to search for an indicator)</span></p>"),
choices=indicator_list, selected = "Alcohol-related hospital admissions")),
div(title="Use this option to change the type of geography displayed in the chart.
Some indicators are not be available for all geography types.
See the indicator definitions tab to find out which geographies indicators are available for.",
uiOutput("geotype_ui_rank")),
div(title="There are too many hscp localities or IZs to show in the rank chart a
selection must be made to limit localities or IZs to only those within a parent area",
conditionalPanel( #Conditional panel for extra dropdown for localities & IZ
condition = "input.geotype_rank == 'HSC locality' | input.geotype_rank == 'Intermediate zone' ",
selectInput("loc_iz_rank", "Step 2b. Select a region for localities or intermediate zones",
choices = partnership_name)))
),
column(width = 3,
div(title="This option will change whether the chart compares areas to another area (e.g. the Scotland average) or against a different time period (e.g. figures for the year 2017 compared to the year 2010).",
awesomeRadio("comp_rank", label =shiny::HTML("<p>Step 3. Select to compare by:<br/><br/></p>"), #br required to try and keep alignment across columns
choices = list("Area or"= 1, "Time" = 2),
selected = 1, inline=TRUE, checkbox=TRUE)),
conditionalPanel(condition = "input.comp_rank == 1 ",
div(title="Use this option to change which area is the comparator (red line in barchart)",
selectInput("geocomp_rank", "Step 3b. Select comparator area", choices = comparator_list,
selectize=TRUE, selected = "Scotland")),
div(tags$b("Step 3c. Decide how to present data in the chart.")),
div(title="Show or hide the 95% confidence intervals on chart.",
awesomeCheckbox("ci_rank", label = "95% confidence intervals", value = FALSE))),
conditionalPanel(condition = "input.comp_rank == 2 ",
uiOutput("yearcomp_ui_rank"))
),
column(width = 3,
div(title="Use this option to change the time period presented in the chart and map",
uiOutput("year_ui_rank"))),
column(width = 2,
introBox(
actionButton("rank_help",label="Help", icon= icon('question-circle'), class ="down"),
actionButton("defs_rank", label="Definitions", icon= icon('info'), class ="down"),
downloadButton('download_rank', 'Download data', class = "down"),
savechart_button('download_rankplot', 'Save chart', class = "down", disabled=TRUE),
savechart_button('download_mapplot', 'Save map', class = "down"),
data.step = 5,
data.intro =(p(h5("Throughout the tool look out for options in each window that provide"),
tags$li("indicator definitions or help to interpret a visualisation,",style="color: #007ba7"),
tags$li("data download options for individual charts,",style="color: #007ba7"),
tags$li("image downloads for individual charts.",style="color: #007ba7")))))
), #well pannel bracket
mainPanel(width = 12, #Main panel
bsModal("mod_defs_rank", "Definitions", "defs_rank", htmlOutput('defs_text_rank')),
uiOutput("rank_summary"), #description of the charts
shiny::hr(), #header row
column(width = 7, #rank bar
h4(textOutput("rank_title"), style="color: black; text-align: left"),
h5(textOutput("rank_subtitle"), style="color: black; text-align: left"),
withSpinner(plotlyOutput("rank_plot"))),
column(width = 5, #map
uiOutput("rank_legend"),
uiOutput("map_ui"))
) #main panel bracket
), #Tab panel bracket
###############################################.
## Health inequalities ----
###############################################.
tabPanel("Inequalities", icon = icon("balance-scale"), value = "ineq",
sidebarPanel(width = 3, #Filter options
actionButton("help_simd", label="Help",
icon= icon('question-circle'), class ="down"),
actionButton("defs_simd",label="Definitions", icon= icon('info'), class ="down"),
div(style = "margin-top: 30px",
selectInput("geotype_simd", label = "Step 1 - Select a geography level and an area",
choices = areatype_depr_list, selected = "Scotland")),
uiOutput("geoname_ui_simd"),
selectInput("indic_simd", label = "Step 2 - Choose an indicator (type to search)",
choices = ind_depr_list),
uiOutput("year_ui_simd"),
div(title="Select what aspect of inequality you want to explore.", # tooltip
style = "margin-top: 10px; margin-bottom: 20px;",
radioGroupButtons("measure_simd",
label= "Step 4 - Select what aspect of inequality you want to explore.",
choices = depr_measure_types, status = "primary",
justified = TRUE
)),
awesomeCheckbox("ci_simd", label = "Show/hide 95% confidence intervals", value = F),
tags$div(title="Select if you want to use local or national quintiles", # tooltip
awesomeRadio("quint_type", label= "Local/Scotland quintiles",
choices = c("Local", "Scotland"), inline=TRUE, checkbox = TRUE)),
downloadButton(outputId = 'download_simd',
"Download data", class = "down"),
savechart_button('report_simd', 'Save charts', class = "down", disabled=TRUE)
),
mainPanel(width = 9, #Main panel
bsModal("mod_defs_simd", "Definitions", "defs_simd", htmlOutput('defs_text_simd')),
#Overview: trend and bar chart
div(class= "depr-text-box",
div(class= "title", textOutput("simd_nutshell_title")),
div(class= "content", htmlOutput("simd_text"))),
conditionalPanel("input.measure_simd == 'Trend'",
column(6,
htmlOutput("simd_barplot_title"),
withSpinner(plotlyOutput("simd_bar_plot"))),
column(6,
htmlOutput("simd_trendplot_title"),
withSpinner(plotlyOutput("simd_trend_plot"))),
column(12, align="center", #legend
style= "padding-bottom: 40px;",
p(column(1),
column(2, img(src="quintile1.png", height = "16px"), "1 - most deprived"),
column(1, img(src="quintile2.png", height = "16px"), "2"),
column(1, img(src="quintile3.png", height = "16px"), "3"),
column(1, img(src="quintile4.png", height = "16px"), "4"),
column(2, img(src="quintile5.png", height = "16px"), "5 - least deprived"),
column(2, img(src="simd_overall.png", height = "8px"), "Average"),
column(1)))
),#trend minitab bracket
#Absolute and realtive inequality
conditionalPanel("input.measure_simd == 'Gap'",
column(6, htmlOutput("title_sii"), br(),
withSpinner(plotlyOutput("simd_sii_plot"))),
column(6,
htmlOutput("title_rii"),
withSpinner(plotlyOutput("simd_rii_plot")))
),
#Population attributable risk
conditionalPanel("input.measure_simd == 'Risk'",
column(6,
htmlOutput("simd_par_barplot_title"),
withSpinner(plotlyOutput("simd_par_barplot")),
p(img(src= "signif_worse.png", height = "16px"),
"Attributable to inequality",
style= "text-align: center; padding-bottom: 40px")),
column(6,
htmlOutput("simd_par_trendplot_title"),
withSpinner(plotlyOutput("simd_par_trendplot")))
)
)
), #Tab panel bracket
###############################################.
## Data ----
###############################################.
tabPanel("Data", icon = icon("table"), value = "table",
#Sidepanel for filtering data
mainPanel(
width = 12, style="margin-left:0.5%; margin-right:0.5%",
#Row 1 for intro
fluidRow(
p("Download the data used in the tool",
style = "font-weight: bold; color: black;"),
p("Use the filters below to select the data you want to download. ",
"To delete choices use backspace or select item and delete"),
br()
),
#Row 2 for selections
fluidRow(
column(3,
p("Select what data you want", style = "font-weight: bold; color: black;"),
div("All available indicators will be displayed for
selected geography if none specified"),
awesomeRadio("product_filter", label=NULL, choices = c("Indicator", "Domain", "Profile"), selected = NULL, inline = FALSE,
status = "primary", checkbox = TRUE),
conditionalPanel(condition="input.product_filter=='Indicator'",
selectizeInput("indicator_filter", label = NULL,
choices = indicator_list, selected = NULL,
multiple=TRUE, options = list(maxOptions = 1000, placeholder = "Select or type indicators to filter by"))
),
conditionalPanel(condition="input.product_filter=='Domain'",
selectizeInput("topic_filter", label = NULL,
choices = topic_list, selected = NULL,
multiple=TRUE, options = list(maxOptions = 1000, placeholder = "Select or type domains to filter by"))
),
conditionalPanel(condition="input.product_filter=='Profile'",
selectizeInput("profile_filter", label = NULL,
choices = profile_list, selected = NULL,
multiple=TRUE, options = list(maxOptions = 1000, placeholder = "Select or type profiles to filter by"))
)
),# column bracket
column(3,
p("Select what areas you want", style = "font-weight: bold; color: black;"),
# Scotland selections
awesomeCheckbox("scotland",label = "Scotland", value = FALSE),
# Panel for health board selections
awesomeCheckbox("hb",label = "Health board", value = FALSE),
conditionalPanel(
condition = "input.hb == true",
selectInput("hb_true", label = NULL,
choices = hb_name, selected = NULL, multiple=TRUE)),
# Panel for council area selections
awesomeCheckbox("la", label = "Council area", value = FALSE),
conditionalPanel(condition = "input.la == true",
selectizeInput("la_true", label = NULL,
choices = la_name, selected = NULL, multiple=TRUE,
options = list(placeholder = "Select or type council area of interest"))),
# Panel for ADP selections
awesomeCheckbox("adp",label = "Alcohol & drug partnership", value = FALSE),
conditionalPanel(condition = "input.adp == true",
selectizeInput("adp_true", label = NULL,
choices = adp_name, selected = NULL, multiple=TRUE,
options = list(placeholder = "Select or type ADP of interest")))
), # column bracket
column(3,
br(),
# Panel for HSC partnership selections
awesomeCheckbox("hscp",label = "Health & social care partnership", value = FALSE),
conditionalPanel(
condition = "input.hscp == true",
selectInput("hscp_true", label = NULL, choices = partnership_name,
selected = NULL, multiple=TRUE)),
# Panel for locality selections
awesomeCheckbox("hscl",label = "Health & social care locality", value = FALSE),
conditionalPanel(condition = "input.hscl == true",
selectizeInput("hscl_parent", label = "Filter locality list by HSC partnership",
choices = parent_geo_list,
selected = "Show all", multiple=FALSE),
# if they haven't selected all, show tickbox so they can select all localities of parent area
conditionalPanel(condition = "input.hscl_parent != 'Show all'",
checkboxInput("hscl_parent_all",label = "Select all HSC localities in this area",
value = FALSE)),
uiOutput("hscl_filtered")),
# Panel for intermediate zone selections
awesomeCheckbox("iz",label = "Intermediate zone", value = FALSE),
conditionalPanel(condition = "input.iz == true",
selectizeInput("iz_parent", label = "Filter intermediate zone list by HSC partnership",
choices = parent_geo_list, selected = "Show all", multiple=FALSE),
# if they haven't selected all, show tickbox so they can select all izs of parent area
conditionalPanel(condition = "input.iz_parent != 'Show all'",
checkboxInput("iz_parent_all",label = "Select all intermediate zones in this area", value = FALSE)),
uiOutput("iz_filtered")),
# To select all available geographies
awesomeCheckbox("all_data",label = "All available geographies", value = FALSE),
# to search by code
selectizeInput("code", label = NULL, choices = code_list,
options = list(placeholder = 'Or search by area code'),
multiple=TRUE, selected = "")
), #column bracket
column(3, style = "width:20%",
p("Select the time period", style = "font-weight: bold; color: black;"),
sliderInput("date_from",label = NULL, min = min_year,
max = max_year, value = c(min_year,max_year),
step = 1, sep="", round = TRUE,
ticks = TRUE, dragRange = FALSE),
br(),
actionButton("clear", label = "Clear all filters", icon ("eraser"), class = "down"),
downloadButton("download_table_csv", 'Download data', class = "down")
) #column bracket
), #filters fluid row bracket
#Row 3- Table
fluidRow(
column(12, div(DT::dataTableOutput("table_filtered"),
style = "font-size: 98%; width: 98%"))
)
) # main panel bracket
), #Tab panel bracket
###############################################.
##############NavBar Menu----
###############################################.
#Starting navbarMenu to have tab with dropdown list
navbarMenu("Info", icon = icon("info-circle"),
###############################################.
## About ----
###############################################.
tabPanel("About", value = "about",
sidebarPanel(width=1),
mainPanel(width=8,
h4("About", style = "color:black;"),
p("ScotPHO's profiles tool allows users to explore the various different profiles
produced by the ", tags$a(href="http://www.scotpho.org.uk/about-us/about-scotpho/", "ScotPHO collaboration.",
class="externallink")),
p("The profiles are intended to increase understanding of local health issues
and to prompt further investigation, rather than to be used as a performance
management tool. The information needs to be interpreted within a local
framework; an indicator may be higher or lower in one area compared to another,
but local knowledge is needed to understand and interpret differences."),
p("The Scottish Public Health Observatory (ScotPHO) collaboration is led
by Public Health Scotland, and includes Glasgow Centre for Population Health, National Records of Scotland,
the MRC/CSO Social and Public Health Sciences Unit and the Scottish Learning Disabilities Observatory."),
p("We aim to provide a clear picture of the health of the Scottish population and the factors
that affect it. We contribute to improved collection and use of routine data on health,
risk factors, behaviours and wider health determinants. We take a lead in determining
Scotland's future public health information needs, develop innovations in public health
information and provide a focus for new routine public health information development
where gaps exist."),
p("If you have any trouble accessing any information on this site or have
any further questions or feedback relating to the data or the tool, then please contact us at: ",
tags$b(tags$a(href="mailto:phs.scotpho@phs.scot", "phs.scotpho@phs.scot", class="externallink")),
"and we will be happy to help.")),
br()
),#Tab panel
###############################################.
## Indicator definitions ----
###############################################.
tabPanel("Indicator definitions", value = "definitions",
#Sidepanel for filtering data
fluidRow(style = "width:60%; margin-left: 2%; min-width: 350px",
h4("Indicator definitions and technical information", style = "color:black;"),
h5(style = "color:black",
"ScotPHO Profiles are made up of a collection of indicators related to a specific theme
e.g. 'Alcohol' or 'Drugs'. Profiles are further divided into topic areas to group similar indicators together.
This page allows users to see available indicators and geographies as well as finding detailed technical information
about how indicators are created."),
br(),
div(title="Choose if you want to see a list of all available indicators or all the details for a specific indicator",
radioGroupButtons("techdoc_selection", status = "primary",
choices = c("List of available indicators", "Detailed information about single indicator"),
label= "Step 1. Select what you want to see:" )),
br(),
conditionalPanel(condition = 'input.techdoc_selection == "Detailed information about single indicator"',
uiOutput("indicator_choices"),
br()
),
uiOutput("profile_picked_ui"),
br(),
#conditional panel for profile summary
conditionalPanel(condition = 'input.techdoc_selection == "List of available indicators"',
uiOutput("tecdoc_geographies"),
downloadButton("download_techdoc1_csv",'Download indicator summary (.csv)', class = "down")),
#conditional panel for single indicator
conditionalPanel(condition = 'input.techdoc_selection == "Detailed information about single indicator"',
div(style="display:inline-block",
title="Filter indicator list from step 2 selecting only indicators from a specific domain",
selectizeInput("topic_defined", label = "Step 3b. Filter indicator list selecting a domain within a particular profile (optional)",
width = "100%", choices = topic_list_filter,
selected = "Show all", multiple=FALSE)),
downloadButton("download_detailtechdoc_csv",'Download selected definition', class = "down"),
downloadButton("download_alltechdoc_csv",'Download all indicator definitions', class = "down")
)),
wellPanel(width = 11,
# display flextable
conditionalPanel(condition = 'input.techdoc_selection == "List of available indicators"',
br(),
br(),
uiOutput("techdoc_display")),
#techdoc single indicator
conditionalPanel(condition = 'input.techdoc_selection == "Detailed information about single indicator" & input.indicator_selection != null',
useShinydashboard(),
valueBoxOutput("indicator", width=12),
column(5,
ind_def_box("Definition", "definition"),
ind_def_box("Data source", "source"),
ind_def_box("Numerator", "numerator"),
ind_def_box("Measure", "measure"),
ind_def_box("Rounding and imputation", "rounding"),
ind_def_box("Year type", "year"),
ind_def_box("Trends from", "trends_from"),
ind_def_box("Geographies available", "geos"),
ind_def_box("Notes,caveats and other info", "notes"),
ind_def_box("Date last updated", "last_updated")),
column(5,
ind_def_box("Rationale for inclusion", "rationale"),
ind_def_box("Diagnostic codes & position", "diagnosis"),
ind_def_box("Denominator", "denominator"),
ind_def_box("Disclosure control", "disclosure"),
ind_def_box("Age group", "age"),
ind_def_box("Sex", "sex"),
ind_def_box("Aggregation", "aggregation"),
ind_def_box("Frequency of update", "update_frequency"),
ind_def_box("Confidence interval method", "confidence_interval"),
ind_def_box("Links to supporting information", "supporting_info"),
ind_def_box("Next update due", "next_update") ))
) # well panel
), #tab panel
###############################################.
##############Resources----
###############################################.
tabPanel("Resources", value = "resources",
sidebarPanel(width=1),
mainPanel(
h4("Resources", style = "color:black;"),
p("We list a number of resources that help you to understand better the profiles or to
carry out similar analysis to ours"),
tags$ul(
#Link to user guide
tags$li(class= "li-custom", tags$a(href="https://www.scotpho.org.uk/media/1880/scotpho-profiles-quick-reference-guide-sep2019.docx",
"User quick reference guide", class="externallink"),
" - Learn how to use and get the most out of the tool"),
#Link to overview reports
tags$li(class= "li-custom", tags$a(href="http://www.scotpho.org.uk/comparative-health/profiles/resources/",
"Overview reports", class="externallink"),
" - These provide context, narrative and analysis for each profile"),
#Link to user guide
tags$li(class= "li-custom", tags$a(href="http://www.scotpho.org.uk/media/1026/explanation-of-statistics-used-in-profiles-v2.pptx",
"Statistics of the profiles", class="externallink"),
" - A guide and explanation of the statistics used in the profiles"),
#Link to timetable of updates
tags$li(class= "li-custom", tags$a(href="https://docs.google.com/spreadsheets/d/e/2PACX-1vQUQMORMqe9RrMnS9WJSu51Q6ef0rubiF1M-QN3BYZIBueErtTvvbRe_kTZbWmnupiO_Uie80BoZCnK/pubhtml",
"Timetable of updates", class="externallink"),
"- List of available indicators, date of last update and expected next update"),
#Link to Github repositories
tags$li(class= "li-custom", tags$a(href="https://github.com/ScotPHO/indicator-production",
"Indicator production code", class="externallink"),
" and ",
tags$a(href="https://github.com/ScotPHO/scotpho-profiles-tool",
"Profile tool code", class="externallink"),
"- Access the code used to produce the indicator data and this tool"),
#Link to population lookups
tags$li(class= "li-custom", tags$a(href="https://www.opendata.nhs.scot/dataset/population-estimates",
"Population estimate", class="externallink"), " and ",
tags$a(href=" https://www.opendata.nhs.scot/dataset/geography-codes-and-labels",
"geography names and codes", class="externallink"),
"- Where you can find the files with the populations and geographies
used for the analysis"),
#Link to shapefiles
tags$li(class= "li-custom", tags$a(href="https://data.gov.uk/publisher/scottish-government-spatial-data-infrastructure",
"Shapefiles", class="externallink"),
"- Where you can find the shapefiles used for the map")
), #Bullet point list bracket
br()
) # mainPanel bracket
), #Tab panel bracket
###############################################.
##############Evidence for action----
###############################################.
tabPanel(a("Evidence for action", href="https://www.scotpho.org.uk/comparative-health/profiles/resources/evidence-for-action/", target="_blank")
), #tabPanel bracket
###############################################.
############## Tour of the tool----
###############################################.
tabPanel("Tour of the tool", value = "tour",
sidebarPanel(width=1),
mainPanel(width=10,
fluidRow(p(h4("Welcome to the ScotPHO Profiles Tool"),
h5("This interactive tool provides access to a range of public
health related indicators at different geographies including NHS boards, council areas and health and
social care partnerships.", style = "color:black;"),
h5("There are different ways to navigate around the tool.", style = "color:black;"),
h5("Different visualisations can be opened using the menu bar (the blue strip) at the top of the screen.",
style = "color:black;"),
img(src='introjs_tabset_panel.PNG',width=300),
br(),
h5("The 'Home' option in the menu bar will return to the profiles tool homepage.",
style = "color:black;"),
style = "font-size:20px")),
hr(),
fluidRow(column(6,
h5("The profile summary allows you to look at multiple indicators within an area at the same time.",
style = "color:black;")),
column(6, img(src='tour_summary1.PNG'))),
hr(),
fluidRow(column(3,
h5("The trend and rank charts allow detailed exploration of one indicator at a time.",
style = "color:black;")),
column(9, img(src='tour_trendrank1.PNG'))),
hr(),
fluidRow(p(h5("Throughout the tool use the dropdown menus to change which indicators or geographies are displayed in the charts.",
style = "color:black;"),
img(src='tour_summary2.png', style = "vertical-align: middle; border-style: solid; border-color: black; border-width: 1px"),
column(6, h5("While using dropdown menus mouse click within a dropdown menu and press backspace on your keyboard ('<-') then start typing a word to quickly find the options you are looking for",
style = "color:black;")),
column(6, img(src='introjs_how_to_select.png')))),
hr(),
br(),
fluidRow(column(8,
p(h5("Throughout the tool look out for options in each window that provide",
style = "color:black;"),
tags$ul( tags$li("indicator definitions or help to interpret a visualisation,"),
tags$li("data download options for individual charts,"),
tags$li("image downloads for individual charts.")))),
column(4, img(src='tour_rankmap2.PNG'))),
hr(),
br(),
fluidRow(column(6,
h5("The 'Data' window can be used to filter and download profiles data.",
style = "color:black;")),
column(6, img(src='tour_data1.PNG'))),
hr(),
br(),
fluidRow(column(6,
h5("The inequalities module allows exploration of deprivation effects for a selection of indicators from the main profiles tool.",
style = "color:black;")),
column(6, img(src='tour_ineq1.png'))),
hr(),
br(),
fluidRow(h5("There are also options to find out information such as detailed descriptions of the profile indicators, indicator update schedules and links to evidence for action briefings.",
style = "color:black;"),
img(src='tour_about1.PNG', width="100%"))
)#main panel bracket
), #tab panel bracket
###############################################.
##############Other profiles----
###############################################.
tabPanel("Other profiles", value = "others",
sidebarPanel(width=1),
mainPanel(
h4("Alternative profiles & resources", style = "color:black;"),
p("There are a number of organisations that provide local information relating to the wider determinants of health in Scotland.
Below are links to some of alternative profiling products."),
tags$ul(
#Link to GCPH
tags$li(class= "li-custom", tags$a(href="http://www.nssdiscovery.scot.nhs.uk/",
"NSS Discovery", class="externallink")),
#Link to GCPH
tags$li(class= "li-custom", tags$a(href="http://www.understandingglasgow.com/",
"Glasgow Centre for Population Health (GCPH)", class="externallink")),
#Link to Fife
tags$li(class= "li-custom", tags$a(href="https://knowfife.fife.gov.uk/",
"KnowFife Dataset", class="externallink")),
#Link to IS
tags$li(class= "li-custom", tags$a(href="http://www.improvementservice.org.uk/community-planning-outcomes-profile.html",
"Improvement Service (IS) - Community planning outcomes profile (CPOP)", class="externallink")),
#Link to NRS
tags$li(class= "li-custom", tags$a(href="https://www.nrscotland.gov.uk/statistics-and-data/statistics/stats-at-a-glance/council-area-profiles",
"National Records of Scotland (NRS) Council Area Profiles", class="externallink")),
#Link to stats.gov.scot
tags$li(class= "li-custom", tags$a(href="http://statistics.gov.scot/home",
"Statistics.gov.scot", class="externallink")),
#Link to Scottish nation
tags$li(class= "li-custom", tags$a(href="http://www.environment.gov.scot/",
"Scotland's Environment Hub", class="externallink"))
), #Bullet point list bracket
br()
) # mainPanel bracket
) #tabPanel bracket
)# NavbarMenu bracket
), #Bracket navbarPage
div(style = "margin-bottom: 30px;"), # this adds breathing space between content and footer
###############################################.
##############Footer----
###############################################.
#Copyright warning
tags$footer(column(6, "ยฉ Scottish Public Health Observatory v2.0 2018"),
column(2, tags$a(href="mailto:phs.scotpho@phs.scot", tags$b("Contact us!"),
class="externallink", style = "color: white; text-decoration: none")),
column(3, tags$a(href="https://www.scotpho.org.uk/about-us/scotpho-website-policies-and-statements/privacy-and-cookies", tags$b("Privacy & cookies"),
class="externallink", style = "color: white; text-decoration: none")),
column(1, actionLink("twitter_share", label = "Share", icon = icon("twitter"),
style= "color:white;", onclick = sprintf("window.open('%s')",
"https://twitter.com/intent/tweet?text=Check%out%ScotPHO's%profile%tool&url=https://scotland.shinyapps.io/ScotPHO_profiles_tool/"))),
style = "
position:fixed;
text-align:center;
left: 0;
bottom:0;
width:100%;
z-index:1000;
height:30px; /* Height of the footer */
color: white;
padding: 10px;
font-weight: bold;
background-color: #1995dc"
)
################################################.
) #bracket tagList
###END
| /shiny_app/ui.R | no_license | fortune417/scotpho-profiles-tool | R | false | false | 60,424 | r | #Code to create ScotPHO's Shiny profile platform
# This script includes the user-interface definition of the app.
###############################################.
## Header ----
###############################################.
tagList( #needed for shinyjs
useShinyjs(), # Include shinyjs
introjsUI(), # Required to enable introjs scripts
navbarPage(id = "intabset", #needed for landing page
title = div(tags$a(img(src="scotpho_reduced.png", height=40), href= "http://www.scotpho.org.uk/"),
style = "position: relative; top: -5px;"), # Navigation bar
windowTitle = "ScotPHO profiles", #title for browser tab
theme = shinytheme("cerulean"), #Theme of the app (blue navbar)
collapsible = TRUE, #tab panels collapse into menu in small screens
header =
tags$head( #CSS styles
cookie_box, ##Cookie box
tags$link(rel="shortcut icon", href="favicon_scotpho.ico"), #Icon for browser tab
#Including Google analytics and Cookie control
includeScript("google-analytics.js"),
# HTML('<script src="https://cc.cdn.civiccomputing.com/8/cookieControl-8.x.min.js"></script>'),
# includeScript("cookie-control.js"),
includeCSS("www/styles.css"),
HTML("<base target='_blank'>") # to make external links open a new tab
),
###############################################.
## Landing page ----
###############################################.
tabPanel(
title = " Home", icon = icon("home"),
mainPanel(width = 11, style="margin-left:4%; margin-right:4%",
introBox(
fluidRow(column(7,(h3("Welcome to the ScotPHO profiles", style="margin-top:0px;"))),
(column(4,actionButton("btn_landing",label="Help: Take tour of the tool",icon=icon('question-circle'),class="down")))),
data.step = 1,
data.intro =(p(h4("Welcome to the ScotPHO Profiles Tool"),
h5("This interactive tool provides access to a range of public
health related indicators at different geographies including NHS boards, council areas and health and
social care partnerships."),
br(),
h5("There are different ways to navigate around the tool."),
h5("Different visualisations can be opened using the menu bar (the blue strip) at the top of the screen."),
img(src='introjs_tabset_panel.PNG',width=300),
br(),
h5("The 'Home' option in the menu bar will return to the profiles tool homepage."),
style = "color:0E3E5D; font-size:20px")),
data.position = "left"),
fluidRow(
#Summary box
column(6, class="landing-page-column",br(), #spacing
introBox(
lp_main_box(image_name= "landing_button_heatmap_2",
button_name = 'jump_to_summary', title_box = "Profile summary",
description = 'A high level view of an area across a set of indicators'),
data.step = 2,
data.intro = h5("The profile summary allows you to look at multiple indicators within an area at the same time"),
data.position = "bottom-right-aligned")),
#Table box
column(6, class="landing-page-column",
br(), #spacing
introBox( # tour of the tool
lp_main_box(image_name= "landing_button_data_table",
button_name = 'jump_to_table', title_box = "Data",
description = 'View and download the data behind the tool'),
data.step = 6,
data.intro = h5("The 'Data' window can be used to filter and download profiles data")))),
#2nd row of boxes
fluidRow(
br(), #spacing
column(8, style = "padding-left: 0px; padding-right: 0px;",
introBox( #tour of the rank and trend tabs
data.step = 3,
data.intro = h5("The trend and rank charts allow detailed exploration of one indicator at a time."),
#Trend plot box
column(6, class="landing-page-column",
lp_main_box(image_name= "landing_button_time_trend",
button_name = 'jump_to_trend', title_box = "Trend",
description = 'Look at how an indicator changes over time')),
#Rank/map plot box
column(6, class="landing-page-column",
lp_main_box(image_name= "landing_button_maprank",
button_name = 'jump_to_rank', title_box = "Rank",
description = 'Compare geographical variation for an indicator'))
)),#introBox 3 close
#Inequalities box
column(4, class="landing-page-column",
introBox(
data.step = 7,
data.intro = h5("The inequalities module allows exploration of deprivation effects for a selection of indicators from the main profiles tool."),
lp_main_box(image_name= "landing_button_health_inequality",
button_name = 'jump_to_ineq', title_box = "Health inequalities",
description = 'Explore how an indicator varies with deprivation'))
) #introBox 7 close
), # fluid row close
# end of landing page second row
# third row of landing page
fluidRow(
introBox(data.step=8, # tour around the tool
data.intro =h5("There are also options to find out information such as detailed descriptions of the profile indicators, indicator update schedules and links to evidence for action briefings"),
#About box
column(4, class="landing-page-column",
lp_about_box(image_name= "landing_button_about_2", button_name = 'jump_to_about',
title_box = "About", description = 'About ScotPHO Profiles'),
#Evidence box
div(class="landing-page-box-about",
div("Evidence for action",title="Links to briefing documents containing practical actions for improvement", class = "landing-page-box-title" ),
div(class = "landing-page-about-icon", div(img(src="landing_button_other_profile.png",class="centerabout"))),
actionButton('jump_to_efa', 'Links to ScotPHO evidence for action briefings',
onclick ="window.open('https://www.scotpho.org.uk/comparative-health/profiles/resources/evidence-for-action/', '_blank')",
class="landing-page-button",
icon = icon("arrow-circle-right", "icon-lp")))),
column(4, class="landing-page-column",
#Indicator updates
lp_about_box(image_name= "landing_button_calendar", button_name = 'btn_indicator_updates',
title_box = "Indicator updates",
description = 'Find out which indicators have been updated in the last 60 days'),
#Resources box
lp_about_box(image_name= "landing_button_resources", button_name = 'jump_to_resources',
title_box = "Resources",
description = 'Find technical information about the ScotPHO profile definitions and methodology')),
column(4, class="landing-page-column",
#Definitions
lp_about_box(image_name= "landing_button_technical_resources",
button_name = 'jump_to_definitions', title_box = "Definitions",
description = 'Find out about indicator definitions and data sources'),
#Other profiles
lp_about_box(image_name= "landing_button_related_links", button_name = 'jump_to_others',
title_box = "Other profiles", description = 'Links to alternative profiling tools'))
) #Close IntroBox
)#Fluidrow bracket
) #main Panel bracket
),# tab panel bracket
###############################################.
## Summary ----
###############################################.
tabPanel("Summary", icon = icon("list-ul"), value = "summary",
introBox(
wellPanel(fluidRow( #Filter options
column(3,
div(title="Select a geography level first, then select the are you want from the list. You can click in the box, hit backspace and start to type if you want to start searching.",
p(tags$b("Step 1. Select a geography level and then an area of interest.")),
selectInput("geotype_summary", label = NULL, choices=areatype_list,
selected = "Health board"),
conditionalPanel(#Conditional panel for extra dropdown for localities & IZ
condition = "input.geotype_summary== 'HSC locality' | input.geotype_summary == 'Intermediate zone' ",
div(title="This option restricts the HSC locality or IZ options below to only areas within a parent geography",
selectInput("loc_iz_summary", label = "Step 1b. Select a region for localities or intermediate zones",
choices = partnership_name))
),
uiOutput("geoname_ui_summary"))
),
column(3,
div(title="Select the profile you are interested in. Not all profiles are available for all geographies",
p(tags$b("Step 2. Select a profile ")),
div(id= "summary_div", uiOutput("profile_ui_summary")),
# domain if spine selected
conditionalPanel(condition = 'input.chart_summary == "Spine"',
uiOutput("topic_ui_spine")))
),
column(3,
div(title="Compare against another area (e.g. Scotland) or against a previous period to see the evolution of the area",
p(tags$b("Step 3. Select to compare by ")),
awesomeRadio("comp_summary", label = NULL,
choices = list("Area or" = 1, "Time" = 2),
selected = 1, inline=TRUE, checkbox = TRUE),
uiOutput("comp_ui_summary")) # comparator options
),
column(3,
actionButton("help_summary",label="Help", icon= icon('question-circle'), class ="down"),
actionButton("defs_summary",label="Definitions", icon= icon('info'), class ="down"),
downloadButton('download_summary', 'Download data', class = "down"),
uiOutput("save_chart_ui"))),
fluidRow(column(12,
column(3),#empty column to replicate offset and center content
column(6,
p(tags$b("Step 4. Select what type of summary you want to see:"),
" snapshot is a comparison with the latest data available,
trend will show how things are changing over time, and
spine compares indicators with the rest of areas of the same level."),
radioGroupButtons("chart_summary", status = "primary", justified = TRUE,
choices = c("Snapshot", "Trend", "Spine"), label=NULL )),
column(3) #empty column to replicate offset and center content
)) # column and row brackets
), #well panel bracket
data.step = 4,
data.intro =(p(h5("Throughout the tool use the dropdown menus to change which indicators or geographies are displayed in the charts."),
br(),
h5("While using dropdown menus mouse click within a dropdown menu and press backspace on your keyboard ('<-') then start typing a word to quickly find the options you are looking for"),
img(src='introjs_how_to_select.png')))
), #introbox bracket
mainPanel(width = 12,
shiny::hr(),
bsModal("mod_defs_summary", "Definitions", "defs_summary",
htmlOutput('defs_text_summary')),
fluidRow(column(4,
h4(textOutput("summary_title"), style="color: black; text-align: left"),
h5(textOutput("summary_subtitle"), style="color: black; text-align: left")
),
column(3,
br(),
br(),
p(img(src='signif_better.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"Better than comparator", br(),
img(src='non_signif.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"Not different to comparator", br(),
img(src='signif_worse.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"Worse than comparator", br(),
img(src='signif_nocalc.png', height=18, style="padding-right: 2px; vertical-align:middle"),
"No differences can be calculated")),
conditionalPanel(condition = 'input.chart_summary == "Spine"',
column(5,
br(),
br(),
uiOutput("ui_spine_legend_selected"),
uiOutput("ui_spine_legend_areatype"),
uiOutput("ui_spine_legend_comparator")))),
# Depending what users selects different visualizations
uiOutput("summary_expl_text"),
uiOutput("summary_ui_plots")
)
), #Tab panel bracket
###############################################.
## Time trend ----
###############################################.
tabPanel("Trend", icon = icon("area-chart"), value = "trend",
sidebarPanel(width=4,
column(6,
actionButton("help_trend",label="Help", icon= icon('question-circle'), class ="down")),
column(6,
actionButton("defs_trend", label="Definitions", icon= icon('info'), class ="down")),
column(12,
shiny::hr(),
div(title="Select an indicator to see trend information. Click in this box, hit backspace and start to type if you want to quickly find an indicator.",
selectInput("indic_trend", shiny::HTML("<p>Step 1. Select an indicator <br/> <span style='font-weight: 400'>(hit backspace and start typing to search for an indicator)</span></p>"),
choices=indicator_list, selected = "Alcohol-related hospital admissions")),
shiny::hr(),
div(title="Use the options below to add geographies to the trend chart, remember some indicators may not be available for all geography types. See technical information to find out which geographies indicators are available for.",
p(tags$b("Step 2. Select areas to plot."),
p("(You can select multiple areas of any geography type)."))),
awesomeCheckbox("scotname_trend", tags$b("Scotland"), value=TRUE)),
column(6,
selectizeInput("hbname_trend", "Health board", choices = c("Select health boards" = "", paste(hb_name)),
multiple=TRUE, selected = ""),
selectizeInput("partname_trend", "HSC partnership", choices = c("Select partnerships" = "", paste(partnership_name)),
multiple=TRUE, selected = "")),
column(6,
selectizeInput("caname_trend", "Council area", choices = c("Select council areas" = "", paste(la_name)),
multiple=TRUE, selected = ""),
selectizeInput("adpname_trend", "Alcohol & drug partnership", choices = c("Select partnerships" = "", paste(adp_name)),
multiple=TRUE, selected = "")),
div(title="This option restricts the HSC locality or IZ options below to only areas within a parent geography",
selectInput("loc_iz_trend", "To choose a locality or intermediate zone, first
select an HSC partnership", choices = partnership_name)),
column(6,div(title="If greyed out locality data not available",uiOutput("loc_ui_trend"))),
column(6,div(title="If greyed out IZ data not available",uiOutput("iz_ui_trend"))),
column(12,
shiny::hr(),
div(tags$b("Step 3. Decide how to present data in the chart.")),
div(title= "Display the rate/percentage data or the raw numbers.",
awesomeRadio("var_plot_trend", label =NULL, inline = TRUE,
choices = c("Rate/Percentage" = "measure",
"Numerator" = "numerator"))),
div(title="Show or hide the 95% confidence intervals for the data selected.", # tooltip
awesomeCheckbox("ci_trend", label = "95% confidence intervals", value = FALSE)),
downloadButton('download_trend', 'Download data', class = "down"),
savechart_button('download_trendplot', 'Save chart', class = "down", disabled=TRUE))),
mainPanel(width = 8, #Main panel
bsModal("mod_defs_trend", "Definitions", "defs_trend", htmlOutput('defs_text_trend')),
h4(textOutput("title_trend"), style="color: black; text-align: left"),
h5(textOutput("subtitle_trend"), style="color: black; text-align: left"),
withSpinner(plotlyOutput("trend_plot"))
)
), #Tab panel bracket
###############################################.
## Rank and map ----
###############################################.
tabPanel("Rank", icon = icon("signal"), value = "rank",
wellPanel(#Filter options
column(width = 4,
div(title="Select an indicator to see comparative information. Click in this box, hit backspace and start to type if you want to quickly find an indicator.",
selectInput("indic_rank", shiny::HTML("<p>Step 1. Select an indicator <span style='font-weight: 400'> <br/>
(hit backspace and start typing to search for an indicator)</span></p>"),
choices=indicator_list, selected = "Alcohol-related hospital admissions")),
div(title="Use this option to change the type of geography displayed in the chart.
Some indicators are not be available for all geography types.
See the indicator definitions tab to find out which geographies indicators are available for.",
uiOutput("geotype_ui_rank")),
div(title="There are too many hscp localities or IZs to show in the rank chart a
selection must be made to limit localities or IZs to only those within a parent area",
conditionalPanel( #Conditional panel for extra dropdown for localities & IZ
condition = "input.geotype_rank == 'HSC locality' | input.geotype_rank == 'Intermediate zone' ",
selectInput("loc_iz_rank", "Step 2b. Select a region for localities or intermediate zones",
choices = partnership_name)))
),
column(width = 3,
div(title="This option will change whether the chart compares areas to another area (e.g. the Scotland average) or against a different time period (e.g. figures for the year 2017 compared to the year 2010).",
awesomeRadio("comp_rank", label =shiny::HTML("<p>Step 3. Select to compare by:<br/><br/></p>"), #br required to try and keep alignment across columns
choices = list("Area or"= 1, "Time" = 2),
selected = 1, inline=TRUE, checkbox=TRUE)),
conditionalPanel(condition = "input.comp_rank == 1 ",
div(title="Use this option to change which area is the comparator (red line in barchart)",
selectInput("geocomp_rank", "Step 3b. Select comparator area", choices = comparator_list,
selectize=TRUE, selected = "Scotland")),
div(tags$b("Step 3c. Decide how to present data in the chart.")),
div(title="Show or hide the 95% confidence intervals on chart.",
awesomeCheckbox("ci_rank", label = "95% confidence intervals", value = FALSE))),
conditionalPanel(condition = "input.comp_rank == 2 ",
uiOutput("yearcomp_ui_rank"))
),
column(width = 3,
div(title="Use this option to change the time period presented in the chart and map",
uiOutput("year_ui_rank"))),
column(width = 2,
introBox(
actionButton("rank_help",label="Help", icon= icon('question-circle'), class ="down"),
actionButton("defs_rank", label="Definitions", icon= icon('info'), class ="down"),
downloadButton('download_rank', 'Download data', class = "down"),
savechart_button('download_rankplot', 'Save chart', class = "down", disabled=TRUE),
savechart_button('download_mapplot', 'Save map', class = "down"),
data.step = 5,
data.intro =(p(h5("Throughout the tool look out for options in each window that provide"),
tags$li("indicator definitions or help to interpret a visualisation,",style="color: #007ba7"),
tags$li("data download options for individual charts,",style="color: #007ba7"),
tags$li("image downloads for individual charts.",style="color: #007ba7")))))
), #well pannel bracket
mainPanel(width = 12, #Main panel
bsModal("mod_defs_rank", "Definitions", "defs_rank", htmlOutput('defs_text_rank')),
uiOutput("rank_summary"), #description of the charts
shiny::hr(), #header row
column(width = 7, #rank bar
h4(textOutput("rank_title"), style="color: black; text-align: left"),
h5(textOutput("rank_subtitle"), style="color: black; text-align: left"),
withSpinner(plotlyOutput("rank_plot"))),
column(width = 5, #map
uiOutput("rank_legend"),
uiOutput("map_ui"))
) #main panel bracket
), #Tab panel bracket
###############################################.
## Health inequalities ----
###############################################.
tabPanel("Inequalities", icon = icon("balance-scale"), value = "ineq",
sidebarPanel(width = 3, #Filter options
actionButton("help_simd", label="Help",
icon= icon('question-circle'), class ="down"),
actionButton("defs_simd",label="Definitions", icon= icon('info'), class ="down"),
div(style = "margin-top: 30px",
selectInput("geotype_simd", label = "Step 1 - Select a geography level and an area",
choices = areatype_depr_list, selected = "Scotland")),
uiOutput("geoname_ui_simd"),
selectInput("indic_simd", label = "Step 2 - Choose an indicator (type to search)",
choices = ind_depr_list),
uiOutput("year_ui_simd"),
div(title="Select what aspect of inequality you want to explore.", # tooltip
style = "margin-top: 10px; margin-bottom: 20px;",
radioGroupButtons("measure_simd",
label= "Step 4 - Select what aspect of inequality you want to explore.",
choices = depr_measure_types, status = "primary",
justified = TRUE
)),
awesomeCheckbox("ci_simd", label = "Show/hide 95% confidence intervals", value = F),
tags$div(title="Select if you want to use local or national quintiles", # tooltip
awesomeRadio("quint_type", label= "Local/Scotland quintiles",
choices = c("Local", "Scotland"), inline=TRUE, checkbox = TRUE)),
downloadButton(outputId = 'download_simd',
"Download data", class = "down"),
savechart_button('report_simd', 'Save charts', class = "down", disabled=TRUE)
),
mainPanel(width = 9, #Main panel
bsModal("mod_defs_simd", "Definitions", "defs_simd", htmlOutput('defs_text_simd')),
#Overview: trend and bar chart
div(class= "depr-text-box",
div(class= "title", textOutput("simd_nutshell_title")),
div(class= "content", htmlOutput("simd_text"))),
conditionalPanel("input.measure_simd == 'Trend'",
column(6,
htmlOutput("simd_barplot_title"),
withSpinner(plotlyOutput("simd_bar_plot"))),
column(6,
htmlOutput("simd_trendplot_title"),
withSpinner(plotlyOutput("simd_trend_plot"))),
column(12, align="center", #legend
style= "padding-bottom: 40px;",
p(column(1),
column(2, img(src="quintile1.png", height = "16px"), "1 - most deprived"),
column(1, img(src="quintile2.png", height = "16px"), "2"),
column(1, img(src="quintile3.png", height = "16px"), "3"),
column(1, img(src="quintile4.png", height = "16px"), "4"),
column(2, img(src="quintile5.png", height = "16px"), "5 - least deprived"),
column(2, img(src="simd_overall.png", height = "8px"), "Average"),
column(1)))
),#trend minitab bracket
#Absolute and realtive inequality
conditionalPanel("input.measure_simd == 'Gap'",
column(6, htmlOutput("title_sii"), br(),
withSpinner(plotlyOutput("simd_sii_plot"))),
column(6,
htmlOutput("title_rii"),
withSpinner(plotlyOutput("simd_rii_plot")))
),
#Population attributable risk
conditionalPanel("input.measure_simd == 'Risk'",
column(6,
htmlOutput("simd_par_barplot_title"),
withSpinner(plotlyOutput("simd_par_barplot")),
p(img(src= "signif_worse.png", height = "16px"),
"Attributable to inequality",
style= "text-align: center; padding-bottom: 40px")),
column(6,
htmlOutput("simd_par_trendplot_title"),
withSpinner(plotlyOutput("simd_par_trendplot")))
)
)
), #Tab panel bracket
###############################################.
## Data ----
###############################################.
tabPanel("Data", icon = icon("table"), value = "table",
#Sidepanel for filtering data
mainPanel(
width = 12, style="margin-left:0.5%; margin-right:0.5%",
#Row 1 for intro
fluidRow(
p("Download the data used in the tool",
style = "font-weight: bold; color: black;"),
p("Use the filters below to select the data you want to download. ",
"To delete choices use backspace or select item and delete"),
br()
),
#Row 2 for selections
fluidRow(
column(3,
p("Select what data you want", style = "font-weight: bold; color: black;"),
div("All available indicators will be displayed for
selected geography if none specified"),
awesomeRadio("product_filter", label=NULL, choices = c("Indicator", "Domain", "Profile"), selected = NULL, inline = FALSE,
status = "primary", checkbox = TRUE),
conditionalPanel(condition="input.product_filter=='Indicator'",
selectizeInput("indicator_filter", label = NULL,
choices = indicator_list, selected = NULL,
multiple=TRUE, options = list(maxOptions = 1000, placeholder = "Select or type indicators to filter by"))
),
conditionalPanel(condition="input.product_filter=='Domain'",
selectizeInput("topic_filter", label = NULL,
choices = topic_list, selected = NULL,
multiple=TRUE, options = list(maxOptions = 1000, placeholder = "Select or type domains to filter by"))
),
conditionalPanel(condition="input.product_filter=='Profile'",
selectizeInput("profile_filter", label = NULL,
choices = profile_list, selected = NULL,
multiple=TRUE, options = list(maxOptions = 1000, placeholder = "Select or type profiles to filter by"))
)
),# column bracket
column(3,
p("Select what areas you want", style = "font-weight: bold; color: black;"),
# Scotland selections
awesomeCheckbox("scotland",label = "Scotland", value = FALSE),
# Panel for health board selections
awesomeCheckbox("hb",label = "Health board", value = FALSE),
conditionalPanel(
condition = "input.hb == true",
selectInput("hb_true", label = NULL,
choices = hb_name, selected = NULL, multiple=TRUE)),
# Panel for council area selections
awesomeCheckbox("la", label = "Council area", value = FALSE),
conditionalPanel(condition = "input.la == true",
selectizeInput("la_true", label = NULL,
choices = la_name, selected = NULL, multiple=TRUE,
options = list(placeholder = "Select or type council area of interest"))),
# Panel for ADP selections
awesomeCheckbox("adp",label = "Alcohol & drug partnership", value = FALSE),
conditionalPanel(condition = "input.adp == true",
selectizeInput("adp_true", label = NULL,
choices = adp_name, selected = NULL, multiple=TRUE,
options = list(placeholder = "Select or type ADP of interest")))
), # column bracket
column(3,
br(),
# Panel for HSC partnership selections
awesomeCheckbox("hscp",label = "Health & social care partnership", value = FALSE),
conditionalPanel(
condition = "input.hscp == true",
selectInput("hscp_true", label = NULL, choices = partnership_name,
selected = NULL, multiple=TRUE)),
# Panel for locality selections
awesomeCheckbox("hscl",label = "Health & social care locality", value = FALSE),
conditionalPanel(condition = "input.hscl == true",
selectizeInput("hscl_parent", label = "Filter locality list by HSC partnership",
choices = parent_geo_list,
selected = "Show all", multiple=FALSE),
# if they haven't selected all, show tickbox so they can select all localities of parent area
conditionalPanel(condition = "input.hscl_parent != 'Show all'",
checkboxInput("hscl_parent_all",label = "Select all HSC localities in this area",
value = FALSE)),
uiOutput("hscl_filtered")),
# Panel for intermediate zone selections
awesomeCheckbox("iz",label = "Intermediate zone", value = FALSE),
conditionalPanel(condition = "input.iz == true",
selectizeInput("iz_parent", label = "Filter intermediate zone list by HSC partnership",
choices = parent_geo_list, selected = "Show all", multiple=FALSE),
# if they haven't selected all, show tickbox so they can select all izs of parent area
conditionalPanel(condition = "input.iz_parent != 'Show all'",
checkboxInput("iz_parent_all",label = "Select all intermediate zones in this area", value = FALSE)),
uiOutput("iz_filtered")),
# To select all available geographies
awesomeCheckbox("all_data",label = "All available geographies", value = FALSE),
# to search by code
selectizeInput("code", label = NULL, choices = code_list,
options = list(placeholder = 'Or search by area code'),
multiple=TRUE, selected = "")
), #column bracket
column(3, style = "width:20%",
p("Select the time period", style = "font-weight: bold; color: black;"),
sliderInput("date_from",label = NULL, min = min_year,
max = max_year, value = c(min_year,max_year),
step = 1, sep="", round = TRUE,
ticks = TRUE, dragRange = FALSE),
br(),
actionButton("clear", label = "Clear all filters", icon ("eraser"), class = "down"),
downloadButton("download_table_csv", 'Download data', class = "down")
) #column bracket
), #filters fluid row bracket
#Row 3- Table
fluidRow(
column(12, div(DT::dataTableOutput("table_filtered"),
style = "font-size: 98%; width: 98%"))
)
) # main panel bracket
), #Tab panel bracket
###############################################.
##############NavBar Menu----
###############################################.
#Starting navbarMenu to have tab with dropdown list
navbarMenu("Info", icon = icon("info-circle"),
###############################################.
## About ----
###############################################.
tabPanel("About", value = "about",
sidebarPanel(width=1),
mainPanel(width=8,
h4("About", style = "color:black;"),
p("ScotPHO's profiles tool allows users to explore the various different profiles
produced by the ", tags$a(href="http://www.scotpho.org.uk/about-us/about-scotpho/", "ScotPHO collaboration.",
class="externallink")),
p("The profiles are intended to increase understanding of local health issues
and to prompt further investigation, rather than to be used as a performance
management tool. The information needs to be interpreted within a local
framework; an indicator may be higher or lower in one area compared to another,
but local knowledge is needed to understand and interpret differences."),
p("The Scottish Public Health Observatory (ScotPHO) collaboration is led
by Public Health Scotland, and includes Glasgow Centre for Population Health, National Records of Scotland,
the MRC/CSO Social and Public Health Sciences Unit and the Scottish Learning Disabilities Observatory."),
p("We aim to provide a clear picture of the health of the Scottish population and the factors
that affect it. We contribute to improved collection and use of routine data on health,
risk factors, behaviours and wider health determinants. We take a lead in determining
Scotland's future public health information needs, develop innovations in public health
information and provide a focus for new routine public health information development
where gaps exist."),
p("If you have any trouble accessing any information on this site or have
any further questions or feedback relating to the data or the tool, then please contact us at: ",
tags$b(tags$a(href="mailto:phs.scotpho@phs.scot", "phs.scotpho@phs.scot", class="externallink")),
"and we will be happy to help.")),
br()
),#Tab panel
###############################################.
## Indicator definitions ----
###############################################.
tabPanel("Indicator definitions", value = "definitions",
#Sidepanel for filtering data
fluidRow(style = "width:60%; margin-left: 2%; min-width: 350px",
h4("Indicator definitions and technical information", style = "color:black;"),
h5(style = "color:black",
"ScotPHO Profiles are made up of a collection of indicators related to a specific theme
e.g. 'Alcohol' or 'Drugs'. Profiles are further divided into topic areas to group similar indicators together.
This page allows users to see available indicators and geographies as well as finding detailed technical information
about how indicators are created."),
br(),
div(title="Choose if you want to see a list of all available indicators or all the details for a specific indicator",
radioGroupButtons("techdoc_selection", status = "primary",
choices = c("List of available indicators", "Detailed information about single indicator"),
label= "Step 1. Select what you want to see:" )),
br(),
conditionalPanel(condition = 'input.techdoc_selection == "Detailed information about single indicator"',
uiOutput("indicator_choices"),
br()
),
uiOutput("profile_picked_ui"),
br(),
#conditional panel for profile summary
conditionalPanel(condition = 'input.techdoc_selection == "List of available indicators"',
uiOutput("tecdoc_geographies"),
downloadButton("download_techdoc1_csv",'Download indicator summary (.csv)', class = "down")),
#conditional panel for single indicator
conditionalPanel(condition = 'input.techdoc_selection == "Detailed information about single indicator"',
div(style="display:inline-block",
title="Filter indicator list from step 2 selecting only indicators from a specific domain",
selectizeInput("topic_defined", label = "Step 3b. Filter indicator list selecting a domain within a particular profile (optional)",
width = "100%", choices = topic_list_filter,
selected = "Show all", multiple=FALSE)),
downloadButton("download_detailtechdoc_csv",'Download selected definition', class = "down"),
downloadButton("download_alltechdoc_csv",'Download all indicator definitions', class = "down")
)),
wellPanel(width = 11,
# display flextable
conditionalPanel(condition = 'input.techdoc_selection == "List of available indicators"',
br(),
br(),
uiOutput("techdoc_display")),
#techdoc single indicator
conditionalPanel(condition = 'input.techdoc_selection == "Detailed information about single indicator" & input.indicator_selection != null',
useShinydashboard(),
valueBoxOutput("indicator", width=12),
column(5,
ind_def_box("Definition", "definition"),
ind_def_box("Data source", "source"),
ind_def_box("Numerator", "numerator"),
ind_def_box("Measure", "measure"),
ind_def_box("Rounding and imputation", "rounding"),
ind_def_box("Year type", "year"),
ind_def_box("Trends from", "trends_from"),
ind_def_box("Geographies available", "geos"),
ind_def_box("Notes,caveats and other info", "notes"),
ind_def_box("Date last updated", "last_updated")),
column(5,
ind_def_box("Rationale for inclusion", "rationale"),
ind_def_box("Diagnostic codes & position", "diagnosis"),
ind_def_box("Denominator", "denominator"),
ind_def_box("Disclosure control", "disclosure"),
ind_def_box("Age group", "age"),
ind_def_box("Sex", "sex"),
ind_def_box("Aggregation", "aggregation"),
ind_def_box("Frequency of update", "update_frequency"),
ind_def_box("Confidence interval method", "confidence_interval"),
ind_def_box("Links to supporting information", "supporting_info"),
ind_def_box("Next update due", "next_update") ))
) # well panel
), #tab panel
###############################################.
##############Resources----
###############################################.
tabPanel("Resources", value = "resources",
sidebarPanel(width=1),
mainPanel(
h4("Resources", style = "color:black;"),
p("We list a number of resources that help you to understand better the profiles or to
carry out similar analysis to ours"),
tags$ul(
#Link to user guide
tags$li(class= "li-custom", tags$a(href="https://www.scotpho.org.uk/media/1880/scotpho-profiles-quick-reference-guide-sep2019.docx",
"User quick reference guide", class="externallink"),
" - Learn how to use and get the most out of the tool"),
#Link to overview reports
tags$li(class= "li-custom", tags$a(href="http://www.scotpho.org.uk/comparative-health/profiles/resources/",
"Overview reports", class="externallink"),
" - These provide context, narrative and analysis for each profile"),
#Link to user guide
tags$li(class= "li-custom", tags$a(href="http://www.scotpho.org.uk/media/1026/explanation-of-statistics-used-in-profiles-v2.pptx",
"Statistics of the profiles", class="externallink"),
" - A guide and explanation of the statistics used in the profiles"),
#Link to timetable of updates
tags$li(class= "li-custom", tags$a(href="https://docs.google.com/spreadsheets/d/e/2PACX-1vQUQMORMqe9RrMnS9WJSu51Q6ef0rubiF1M-QN3BYZIBueErtTvvbRe_kTZbWmnupiO_Uie80BoZCnK/pubhtml",
"Timetable of updates", class="externallink"),
"- List of available indicators, date of last update and expected next update"),
#Link to Github repositories
tags$li(class= "li-custom", tags$a(href="https://github.com/ScotPHO/indicator-production",
"Indicator production code", class="externallink"),
" and ",
tags$a(href="https://github.com/ScotPHO/scotpho-profiles-tool",
"Profile tool code", class="externallink"),
"- Access the code used to produce the indicator data and this tool"),
#Link to population lookups
tags$li(class= "li-custom", tags$a(href="https://www.opendata.nhs.scot/dataset/population-estimates",
"Population estimate", class="externallink"), " and ",
tags$a(href=" https://www.opendata.nhs.scot/dataset/geography-codes-and-labels",
"geography names and codes", class="externallink"),
"- Where you can find the files with the populations and geographies
used for the analysis"),
#Link to shapefiles
tags$li(class= "li-custom", tags$a(href="https://data.gov.uk/publisher/scottish-government-spatial-data-infrastructure",
"Shapefiles", class="externallink"),
"- Where you can find the shapefiles used for the map")
), #Bullet point list bracket
br()
) # mainPanel bracket
), #Tab panel bracket
###############################################.
##############Evidence for action----
###############################################.
tabPanel(a("Evidence for action", href="https://www.scotpho.org.uk/comparative-health/profiles/resources/evidence-for-action/", target="_blank")
), #tabPanel bracket
###############################################.
############## Tour of the tool----
###############################################.
tabPanel("Tour of the tool", value = "tour",
sidebarPanel(width=1),
mainPanel(width=10,
fluidRow(p(h4("Welcome to the ScotPHO Profiles Tool"),
h5("This interactive tool provides access to a range of public
health related indicators at different geographies including NHS boards, council areas and health and
social care partnerships.", style = "color:black;"),
h5("There are different ways to navigate around the tool.", style = "color:black;"),
h5("Different visualisations can be opened using the menu bar (the blue strip) at the top of the screen.",
style = "color:black;"),
img(src='introjs_tabset_panel.PNG',width=300),
br(),
h5("The 'Home' option in the menu bar will return to the profiles tool homepage.",
style = "color:black;"),
style = "font-size:20px")),
hr(),
fluidRow(column(6,
h5("The profile summary allows you to look at multiple indicators within an area at the same time.",
style = "color:black;")),
column(6, img(src='tour_summary1.PNG'))),
hr(),
fluidRow(column(3,
h5("The trend and rank charts allow detailed exploration of one indicator at a time.",
style = "color:black;")),
column(9, img(src='tour_trendrank1.PNG'))),
hr(),
fluidRow(p(h5("Throughout the tool use the dropdown menus to change which indicators or geographies are displayed in the charts.",
style = "color:black;"),
img(src='tour_summary2.png', style = "vertical-align: middle; border-style: solid; border-color: black; border-width: 1px"),
column(6, h5("While using dropdown menus mouse click within a dropdown menu and press backspace on your keyboard ('<-') then start typing a word to quickly find the options you are looking for",
style = "color:black;")),
column(6, img(src='introjs_how_to_select.png')))),
hr(),
br(),
fluidRow(column(8,
p(h5("Throughout the tool look out for options in each window that provide",
style = "color:black;"),
tags$ul( tags$li("indicator definitions or help to interpret a visualisation,"),
tags$li("data download options for individual charts,"),
tags$li("image downloads for individual charts.")))),
column(4, img(src='tour_rankmap2.PNG'))),
hr(),
br(),
fluidRow(column(6,
h5("The 'Data' window can be used to filter and download profiles data.",
style = "color:black;")),
column(6, img(src='tour_data1.PNG'))),
hr(),
br(),
fluidRow(column(6,
h5("The inequalities module allows exploration of deprivation effects for a selection of indicators from the main profiles tool.",
style = "color:black;")),
column(6, img(src='tour_ineq1.png'))),
hr(),
br(),
fluidRow(h5("There are also options to find out information such as detailed descriptions of the profile indicators, indicator update schedules and links to evidence for action briefings.",
style = "color:black;"),
img(src='tour_about1.PNG', width="100%"))
)#main panel bracket
), #tab panel bracket
###############################################.
##############Other profiles----
###############################################.
tabPanel("Other profiles", value = "others",
sidebarPanel(width=1),
mainPanel(
h4("Alternative profiles & resources", style = "color:black;"),
p("There are a number of organisations that provide local information relating to the wider determinants of health in Scotland.
Below are links to some of alternative profiling products."),
tags$ul(
#Link to GCPH
tags$li(class= "li-custom", tags$a(href="http://www.nssdiscovery.scot.nhs.uk/",
"NSS Discovery", class="externallink")),
#Link to GCPH
tags$li(class= "li-custom", tags$a(href="http://www.understandingglasgow.com/",
"Glasgow Centre for Population Health (GCPH)", class="externallink")),
#Link to Fife
tags$li(class= "li-custom", tags$a(href="https://knowfife.fife.gov.uk/",
"KnowFife Dataset", class="externallink")),
#Link to IS
tags$li(class= "li-custom", tags$a(href="http://www.improvementservice.org.uk/community-planning-outcomes-profile.html",
"Improvement Service (IS) - Community planning outcomes profile (CPOP)", class="externallink")),
#Link to NRS
tags$li(class= "li-custom", tags$a(href="https://www.nrscotland.gov.uk/statistics-and-data/statistics/stats-at-a-glance/council-area-profiles",
"National Records of Scotland (NRS) Council Area Profiles", class="externallink")),
#Link to stats.gov.scot
tags$li(class= "li-custom", tags$a(href="http://statistics.gov.scot/home",
"Statistics.gov.scot", class="externallink")),
#Link to Scottish nation
tags$li(class= "li-custom", tags$a(href="http://www.environment.gov.scot/",
"Scotland's Environment Hub", class="externallink"))
), #Bullet point list bracket
br()
) # mainPanel bracket
) #tabPanel bracket
)# NavbarMenu bracket
), #Bracket navbarPage
div(style = "margin-bottom: 30px;"), # this adds breathing space between content and footer
###############################################.
##############Footer----
###############################################.
#Copyright warning
tags$footer(column(6, "ยฉ Scottish Public Health Observatory v2.0 2018"),
column(2, tags$a(href="mailto:phs.scotpho@phs.scot", tags$b("Contact us!"),
class="externallink", style = "color: white; text-decoration: none")),
column(3, tags$a(href="https://www.scotpho.org.uk/about-us/scotpho-website-policies-and-statements/privacy-and-cookies", tags$b("Privacy & cookies"),
class="externallink", style = "color: white; text-decoration: none")),
column(1, actionLink("twitter_share", label = "Share", icon = icon("twitter"),
style= "color:white;", onclick = sprintf("window.open('%s')",
"https://twitter.com/intent/tweet?text=Check%out%ScotPHO's%profile%tool&url=https://scotland.shinyapps.io/ScotPHO_profiles_tool/"))),
style = "
position:fixed;
text-align:center;
left: 0;
bottom:0;
width:100%;
z-index:1000;
height:30px; /* Height of the footer */
color: white;
padding: 10px;
font-weight: bold;
background-color: #1995dc"
)
################################################.
) #bracket tagList
###END
|
# 1.load 3 trials vegas data
dat <- read.csv("dxl2015-el6383-project/output041_4.txt")
cnames <- c("time", "srcip", "srcport", "dstip", "dstport", "id", "interval", "data", "tput")
names(dat) <- cnames
dat$tcp <- "vegas"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 1
LFN1 <- dat
dat <- read.csv("dxl2015-el6383-project/output042_4.txt")
names(dat) <- cnames
dat$tcp <- "vegas"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 2
LFN1 <- rbind(LFN1, dat)
dat <- read.csv("dxl2015-el6383-project/output043_4.txt")
names(dat) <- cnames
dat$tcp <- "vegas"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 3
LFN1 <- rbind(LFN1, dat)
# 2.load 3 trials reno data
dat <- read.csv("dxl2015-el6383-project/output151_4.txt")
names(dat) <- cnames
dat$tcp <- "reno"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 1
LFN1 <- rbind(LFN1, dat)
dat <- read.csv("dxl2015-el6383-project/output152_4.txt")
names(dat) <- cnames
dat$tcp <- "reno"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 2
LFN1 <- rbind(LFN1, dat)
dat <- read.csv("dxl2015-el6383-project/output153_4.txt")
names(dat) <- cnames
dat$tcp <- "reno"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 3
LFN1 <- rbind(LFN1, dat)
LFN1$trial <- as.factor(LFN1$trial)
LFN1$tcp <- as.factor(LFN1$tcp)
library(reshape2)
LFN1 <- transform(LFN1, interval = colsplit(interval,"-", names = c('begin', 'end')))
LFN1$interval.begin <- LFN1$interval$begin
LFN1$interval.end <- LFN1$interval$end
LFN1$interval <- NULL
totals <- LFN1[LFN1$interval.begin<=1 & LFN1$interval.end>=10,]
details <- LFN1[!(LFN1$interval.begin<=1 & LFN1$interval.end>=10),]
library(ggplot2)
q <- ggplot(details)
q <- q + scale_x_discrete("interval.begin")
q <- q + scale_y_continuous("Throughput (bps)")
q <- q + ggtitle("Lineplot of Throughput Vegas vs.Reno in Buffer 87380BYTE")
q <- q + geom_point(aes(x=interval.begin, y=tput, colour=tcp))
q <- q + geom_line(aes(x=interval.begin, y=tput, colour=tcp, linetype=trial))
q
ggsave("Exper4_lineplot.png", plot=q)
q <- ggplot(details)
q <- q + scale_x_discrete("interbal.begin")
q <- q + scale_y_continuous("Throughput (bps)")
q <- q + ggtitle("Violinplot of Throughput Vegas vs.Reno in Buffer 87380BYTE")
q <- q + geom_violin(aes(x=tcp, y=tput))
q
ggsave("Exper4_violinplot.png", plot=q)
| /review/projects/0019/R script/Experiment4Buffer87380.R | no_license | Roshni-Natarajan/HSN-Lab | R | false | false | 2,221 | r | # 1.load 3 trials vegas data
dat <- read.csv("dxl2015-el6383-project/output041_4.txt")
cnames <- c("time", "srcip", "srcport", "dstip", "dstport", "id", "interval", "data", "tput")
names(dat) <- cnames
dat$tcp <- "vegas"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 1
LFN1 <- dat
dat <- read.csv("dxl2015-el6383-project/output042_4.txt")
names(dat) <- cnames
dat$tcp <- "vegas"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 2
LFN1 <- rbind(LFN1, dat)
dat <- read.csv("dxl2015-el6383-project/output043_4.txt")
names(dat) <- cnames
dat$tcp <- "vegas"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 3
LFN1 <- rbind(LFN1, dat)
# 2.load 3 trials reno data
dat <- read.csv("dxl2015-el6383-project/output151_4.txt")
names(dat) <- cnames
dat$tcp <- "reno"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 1
LFN1 <- rbind(LFN1, dat)
dat <- read.csv("dxl2015-el6383-project/output152_4.txt")
names(dat) <- cnames
dat$tcp <- "reno"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 2
LFN1 <- rbind(LFN1, dat)
dat <- read.csv("dxl2015-el6383-project/output153_4.txt")
names(dat) <- cnames
dat$tcp <- "reno"
dat$capacity <- 2
dat$rtt <- 2
dat$trial <- 3
LFN1 <- rbind(LFN1, dat)
LFN1$trial <- as.factor(LFN1$trial)
LFN1$tcp <- as.factor(LFN1$tcp)
library(reshape2)
LFN1 <- transform(LFN1, interval = colsplit(interval,"-", names = c('begin', 'end')))
LFN1$interval.begin <- LFN1$interval$begin
LFN1$interval.end <- LFN1$interval$end
LFN1$interval <- NULL
totals <- LFN1[LFN1$interval.begin<=1 & LFN1$interval.end>=10,]
details <- LFN1[!(LFN1$interval.begin<=1 & LFN1$interval.end>=10),]
library(ggplot2)
q <- ggplot(details)
q <- q + scale_x_discrete("interval.begin")
q <- q + scale_y_continuous("Throughput (bps)")
q <- q + ggtitle("Lineplot of Throughput Vegas vs.Reno in Buffer 87380BYTE")
q <- q + geom_point(aes(x=interval.begin, y=tput, colour=tcp))
q <- q + geom_line(aes(x=interval.begin, y=tput, colour=tcp, linetype=trial))
q
ggsave("Exper4_lineplot.png", plot=q)
q <- ggplot(details)
q <- q + scale_x_discrete("interbal.begin")
q <- q + scale_y_continuous("Throughput (bps)")
q <- q + ggtitle("Violinplot of Throughput Vegas vs.Reno in Buffer 87380BYTE")
q <- q + geom_violin(aes(x=tcp, y=tput))
q
ggsave("Exper4_violinplot.png", plot=q)
|
%
% Copyright 2007-2020 by the individuals mentioned in the source code history
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{mxGetExpected}
\alias{mxGetExpected}
\alias{imxGetExpectationComponent}
\title{Extract the component from a model's expectation}
\description{
This function extracts the expected means, covariance, or thresholds from a model.
}
\usage{
mxGetExpected(model, component, defvar.row=1, subname=model$name)
imxGetExpectationComponent(model, component, defvar.row=1, subname=model$name)
}
\arguments{
\item{model}{MxModel object from which to extract the expectation component.}
\item{component}{Character vector. The name(s) of the component(s) to extract. Recognized names are \dQuote{covariance}, \dQuote{means}, and \dQuote{thresholds}.}
\item{defvar.row}{A row index. Which row to load for definition variables.}
\item{subname}{Name of the submodel to evaluate.}
}
\details{
The expected means, covariance, or thresholds can be extracted from
Normal (\link{mxExpectationNormal}), RAM (\link{mxExpectationRAM}), and
LISREL (\link{mxExpectationLISREL}) models. When more than one component
is requested, the components will be returned as a list.
If component 'vector' is requested then the non-redundant coefficients
of the expected manifest distribution will be returned as a vector.
If component 'standVector' is requested then the same parameter structure as
'vector' is returned, but it is standardized. For Normal expectations the covariances
are returned as correlations, the means are returned as zeros, and the
thresholds are returned as z-scores. For the thresholds the z-scores
are computed by using the model-implied means and variances.
Note that capitalization is ignored for the 'standVector' option, so 'standvector'
is also acceptable.
}
\value{
See details.
}
\references{
The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation.
}
\examples{
# ===============================================
# = Build a 1-factor CFA, with bad start values =
# ===============================================
require(OpenMx)
manifests = paste("x", 1:5, sep="")
latents = c("G")
factorModel = mxModel("One Factor", type="RAM",
manifestVars = manifests,
latentVars = latents,
mxPath(from = latents, to = manifests),
mxPath(from = manifests, arrows = 2),
mxPath(from = latents, arrows = 2, free = FALSE, values = 1.0),
mxPath(from = 'one', to = manifests),
mxData(demoOneFactor, type = "raw")
)
# ============================================================================
# = What do our starting values indicate about the expected data covariance? =
# ============================================================================
mxGetExpected(factorModel, "covariance")
# Oops. Starting values indicate an expected zero-covariance matrix.
# The model likely won't run from these start values.
# Let's adjust them:
factorModel = mxModel("One Factor", type = "RAM",
manifestVars = manifests, latentVars = latents,
# Reasonable start VALUES
mxPath(from = latents, to = manifests, values = .2),
mxPath(from = manifests, arrows = 2),
mxPath(from = latents, arrows = 2, free = FALSE, values = 1.0),
mxPath(from = 'one', to = manifests),
mxData(demoOneFactor, type = "raw")
)
mxGetExpected(factorModel, "covariance")
# x1 x2 x3 x4 x5
# x1 0.04 0.04 0.04 0.04 0.04
# x2 0.04 0.04 0.04 0.04 0.04
# x3 0.04 0.04 0.04 0.04 0.04
# x4 0.04 0.04 0.04 0.04 0.04
# x5 0.04 0.04 0.04 0.04 0.04
# And this version will run:
factorModel = mxRun(factorModel)
}
| /man/mxGetExpected.Rd | no_license | khusmann/OpenMx | R | false | false | 4,148 | rd | %
% Copyright 2007-2020 by the individuals mentioned in the source code history
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{mxGetExpected}
\alias{mxGetExpected}
\alias{imxGetExpectationComponent}
\title{Extract the component from a model's expectation}
\description{
This function extracts the expected means, covariance, or thresholds from a model.
}
\usage{
mxGetExpected(model, component, defvar.row=1, subname=model$name)
imxGetExpectationComponent(model, component, defvar.row=1, subname=model$name)
}
\arguments{
\item{model}{MxModel object from which to extract the expectation component.}
\item{component}{Character vector. The name(s) of the component(s) to extract. Recognized names are \dQuote{covariance}, \dQuote{means}, and \dQuote{thresholds}.}
\item{defvar.row}{A row index. Which row to load for definition variables.}
\item{subname}{Name of the submodel to evaluate.}
}
\details{
The expected means, covariance, or thresholds can be extracted from
Normal (\link{mxExpectationNormal}), RAM (\link{mxExpectationRAM}), and
LISREL (\link{mxExpectationLISREL}) models. When more than one component
is requested, the components will be returned as a list.
If component 'vector' is requested then the non-redundant coefficients
of the expected manifest distribution will be returned as a vector.
If component 'standVector' is requested then the same parameter structure as
'vector' is returned, but it is standardized. For Normal expectations the covariances
are returned as correlations, the means are returned as zeros, and the
thresholds are returned as z-scores. For the thresholds the z-scores
are computed by using the model-implied means and variances.
Note that capitalization is ignored for the 'standVector' option, so 'standvector'
is also acceptable.
}
\value{
See details.
}
\references{
The OpenMx User's guide can be found at http://openmx.ssri.psu.edu/documentation.
}
\examples{
# ===============================================
# = Build a 1-factor CFA, with bad start values =
# ===============================================
require(OpenMx)
manifests = paste("x", 1:5, sep="")
latents = c("G")
factorModel = mxModel("One Factor", type="RAM",
manifestVars = manifests,
latentVars = latents,
mxPath(from = latents, to = manifests),
mxPath(from = manifests, arrows = 2),
mxPath(from = latents, arrows = 2, free = FALSE, values = 1.0),
mxPath(from = 'one', to = manifests),
mxData(demoOneFactor, type = "raw")
)
# ============================================================================
# = What do our starting values indicate about the expected data covariance? =
# ============================================================================
mxGetExpected(factorModel, "covariance")
# Oops. Starting values indicate an expected zero-covariance matrix.
# The model likely won't run from these start values.
# Let's adjust them:
factorModel = mxModel("One Factor", type = "RAM",
manifestVars = manifests, latentVars = latents,
# Reasonable start VALUES
mxPath(from = latents, to = manifests, values = .2),
mxPath(from = manifests, arrows = 2),
mxPath(from = latents, arrows = 2, free = FALSE, values = 1.0),
mxPath(from = 'one', to = manifests),
mxData(demoOneFactor, type = "raw")
)
mxGetExpected(factorModel, "covariance")
# x1 x2 x3 x4 x5
# x1 0.04 0.04 0.04 0.04 0.04
# x2 0.04 0.04 0.04 0.04 0.04
# x3 0.04 0.04 0.04 0.04 0.04
# x4 0.04 0.04 0.04 0.04 0.04
# x5 0.04 0.04 0.04 0.04 0.04
# And this version will run:
factorModel = mxRun(factorModel)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rainfall.R
\name{rainfall}
\alias{rainfall}
\title{rainfall}
\usage{
rainfall(..., snv_data = NULL, write = FALSE, title = NULL,
exclude_chroms = c("4", "Y"))
}
\description{
Plot log10 distances between snvs as rainfall plot
}
\keyword{rainfall}
| /man/rainfall.Rd | no_license | nriddiford/mutationProfiles | R | false | true | 327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rainfall.R
\name{rainfall}
\alias{rainfall}
\title{rainfall}
\usage{
rainfall(..., snv_data = NULL, write = FALSE, title = NULL,
exclude_chroms = c("4", "Y"))
}
\description{
Plot log10 distances between snvs as rainfall plot
}
\keyword{rainfall}
|
fivenum(~time, data = OldFaithful)
fivenum(time ~ year, data = OldFaithful)
| /inst/snippets/Example6.1.R | no_license | rpruim/ISIwithR | R | false | false | 77 | r | fivenum(~time, data = OldFaithful)
fivenum(time ~ year, data = OldFaithful)
|
# Load metadata.
DummyVar <- readline("Ready to select Metadata file? (you don't have to say 'y,' just hit return) ")
cat('\n')
file <- file.choose()
Metadata <- read.table(file, header = TRUE, stringsAsFactors=FALSE, sep = '\t', fill = TRUE, nrows = 6400, quote = '"')
Documents <- Metadata$V1
TotalsPerDoc <- as.integer(Metadata$V8)
names(TotalsPerDoc) <- Documents
Titles <- paste(substr(Metadata$V9, 1, 25), Documents)
names(Titles) <- Documents
LongTitles <- Metadata$V9
names(LongTitles) <- Documents
Authors <- Metadata$V3
names(Authors) <- Documents
DocDates <- as.numeric(Metadata$V4)
names(DocDates) <- Documents
Genres <- Metadata$V5
names(Genres) <- Documents
DummyVar <- readline("Ready to select Phi (topic distributions over words)? ")
cat('\n')
file <- file.choose()
FileLines <- readLines(con = file, n = -1, encoding = "UTF-8")
TopicCount <- as.integer(FileLines[1])
FileLines <- FileLines[-1]
Topic = 1
Phi <- vector("list", TopicCount)
for (Line in FileLines) {
Prefix <- substr(Line, 1, 5)
if (Prefix == "Topic") next
if (Prefix == "-----") {
Topic = Topic + 1
next
}
Phi[[Topic]] <- c(Phi[[Topic]], Line)
}
AllWords <- character(0)
for (i in 1: TopicCount) {
AllWords <- union(AllWords, Phi[[i]])
}
DummyVar <- readline("Ready to select KL file (topic relations to each other)? ")
cat('\n')
file <- file.choose()
FileLines <- readLines(con = file, n = -1, encoding = "UTF-8")
KL <- vector("list", TopicCount)
Topic = 0
for (Line in FileLines) {
Prefix <- substr(Line, 1, 5)
if (Prefix == "Topic") {
Topic = Topic + 1
next
}
Connection <- as.integer(Line)
if (Connection == (Topic - 1) ) next
else KL[[Topic]] <- c(KL[[Topic]], Connection)
}
DummyVar <- readline("Ready to select Theta file (topic relations to documents)? ")
cat('\n')
file <- file.choose()
Theta <- as.matrix(read.table(file, sep = ","))
cat('Theta read in. Now processing document information; may take 5 min.\n\n')
# Create topic sizes.
TopicSize <- integer(TopicCount)
for (i in 1: TopicCount) {
TopicSize[i] <- sum(Theta[i, ])
}
# Rank topics
TopicBulk <- TopicSize
TopicRanks <- integer(TopicCount)
names(TopicSize) <- 1:TopicCount
TopicSize <- sort(TopicSize, decreasing = TRUE)
for (i in 1: TopicCount) {
TopicRanks[i] <- which(names(TopicSize) == as.character(i))
}
NumDocs <- length(Documents)
MinDate = min(DocDates)
MaxDate = max(DocDates)
Timespan = (MaxDate - MinDate) + 1
TotalsPerYear <- integer(Timespan)
for (i in 1: NumDocs) {
DateIndex = (DocDates[i] - MinDate) + 1
TotalsPerYear[DateIndex] = TotalsPerYear[DateIndex] + TotalsPerDoc[i]
}
TotalsPerYear[TotalsPerYear < 1] = 1
ThetaSum <- array(data=0, dim = c(TopicCount, Timespan))
for (i in 1: NumDocs) {
DateIndex = (DocDates[i] - MinDate) + 1
ThetaSum[ , DateIndex] = ThetaSum[ , DateIndex] + Theta[ , i]
}
for (i in 1: TopicCount) {
HoldVector = ThetaSum[i ,] / TotalsPerYear
HoldVector[HoldVector == 0] <- NA
ThetaSum[i ,] <- HoldVector
}
# This turns absolute occurrences into proportions per document.
Dimensions <- dim(Theta)
for (i in 1: TopicCount) {
Theta[i, ] <- Theta[i, ] / TotalsPerDoc
}
display.topic <- function(TopicNum) {
Freqs <- Theta[TopicNum, ]
names(Freqs) <- 1:Dimensions[2]
Freqs <- sort(Freqs, decreasing = TRUE)
Top30 <- as.integer(names(Freqs[1:30]))
for (DocNum in Top30) {
cat(Documents[DocNum], Authors[DocNum], DocDates[DocNum], Genres[DocNum], TotalsPerDoc[DocNum], '\n')
cat(LongTitles[DocNum],'\n','\n')
}
}
repeat {
Proceed = FALSE
while (!Proceed) {
Word <- readline('Enter a word, a DocID, or a topic# (*topic# to see a longer doc list): ')
if (substr(Word,1,1) == "*") {
display.topic(as.integer(substr(Word,2,6)))
next
}
TopNum <- suppressWarnings(as.integer(Word))
if (!is.na(TopNum) | Word %in% AllWords | Word %in% Documents) Proceed = TRUE
else cat("That wasn't a valid entry, perhaps because we don't have that word.", '\n')
}
# The following section deals with the case where the user has
# entered a word to look up.
if (Word %in% AllWords) {
Hits <- numeric(0)
NumHits <- 0
Indices <- numeric(0)
for (i in 1: TopicCount) {
if (Word %in% Phi[[i]]) {
NumHits <- NumHits + 1
Hits <- c(Hits, which(Phi[[i]] == Word))
Indices <- c(Indices, i)
}
}
names(Hits) <- Indices
Hits <- sort(Hits, decreasing = FALSE)
cat('\n')
if (NumHits > 5) NumHits <- 5
for (i in 1: NumHits) {
Top <- as.integer(names(Hits[i]))
cat("Topic", Top, ":", Phi[[Top]][1], Phi[[Top]][2], Phi[[Top]][3], Phi[[Top]][4], Phi[[Top]][5], Phi[[Top]][6], '\n')
}
User <- readline('Which of these topics do you select? ')
TopNum <- as.integer(User)
if (is.na(TopNum)) TopNum <- 1
}
else if (Word %in% Documents) {
DocIndex <- which(Documents == Word)
TopicVector <- Theta[ , DocIndex]
names(TopicVector) <- 1:TopicCount
TopicVector <- sort(TopicVector, decreasing = TRUE)
Top10 <- as.integer(names(TopicVector[1:10]))
for (Top in Top10) {
cat("Topic", Top, ":", Phi[[Top]][1], Phi[[Top]][2], Phi[[Top]][3], Phi[[Top]][4], Phi[[Top]][5], Phi[[Top]][6], '\n')
}
User <- readline('Which of these topics do you select? ')
TopNum <- as.integer(User)
if (is.na(TopNum)) TopNum <- 1
}
if (TopNum < 1) TopNum <- 1
if (TopNum > TopicCount) TopNum <- TopicCount
# By this point we presumably have a valid TopNum.
cat('\n')
Freqs <- Theta[TopNum, ]
names(Freqs) <- 1:Dimensions[2]
Freqs <- sort(Freqs, decreasing = TRUE)
Top10 <- as.integer(names(Freqs[1:10]))
# Generate smoothed curve.
Smoothed <- numeric(Timespan)
for (i in 1: Timespan) {
Start = i-5
End = i + 5
if (Start < 1) Start = 1
if (End > Timespan) End = Timespan
Smoothed[i] = mean(ThetaSum[TopNum, Start:End], na.rm = TRUE)
}
Ratio <- max(Theta[TopNum, ])/max(Smoothed)
Smoothed <- (Smoothed * Ratio)
Range <- 1: Timespan
Loess.Smoothed <- loess(Smoothed ~ Range, span = 0.7)
Predict.Smoothed <- predict(Loess.Smoothed)
Selected <- Theta[TopNum, ]
Index <- which(Selected > quantile(Selected)[4])
Selected <- Selected[Index]
SelectLen <- length(Selected)
Colours <- character(SelectLen)
Shapes <- integer(SelectLen)
for (i in 1: SelectLen) {
Ind <- Index[i]
if (Ind %in% Top10) Shapes[i] <- 4
else Shapes[i] <- 1
Colours[i] <- "gray42"
if (Genres[Ind] == "poe") Colours[i] <- "mediumorchid2"
if (Genres[Ind] == "bio") Colours[i] <- "gray3"
if (Genres[Ind] == "fic") Colours[i] <- "dodgerblue3"
if (Genres[Ind] == "dra") Colours[i] <- "olivedrab3"
if (Genres[Ind] == "juv") Colours[i] <- "gold1"
if (Genres[Ind] == "non") Colours[i] <- "tan4"
if (Genres[Ind] == "let" | Genres[Ind] == "ora") {
Colours[i] <- "salmon3"
Shapes[i] <- 2
}
}
PlotDates <- DocDates[Index]
plot(PlotDates, Selected, col = Colours, pch = Shapes, xlab = "Blue/fic, purple/poe, green/drama, black/bio, brown/nonfic, triangle/letters or orations.", ylab = "Freq of topic in doc.", main = paste('Topic', TopNum, ':', Phi[[TopNum]][1], Phi[[TopNum]][2], Phi[[TopNum]][3], Phi[[TopNum]][4]))
par(new=TRUE)
plot(Predict.Smoothed*.7, type = 'l', lwd = 2, col = "gray75", axes = FALSE, ylab = "", xlab = "")
for (DocNum in Top10) {
cat(Documents[DocNum], Authors[DocNum], DocDates[DocNum], '\n')
cat(LongTitles[DocNum],'\n','\n')
}
cat('TOPIC', TopNum,':', Phi[[TopNum]][1:50], '\n')
cat('OF', TopicCount, 'TOPICS this is #',TopicRanks[TopNum], 'in desc order, with', TopicBulk[TopNum], 'words. Related topics: \n')
for (i in 1:5) {
Top <- KL[[TopNum]][i] + 1
cat("Topic", Top, ":", Phi[[Top]][1], Phi[[Top]][2], Phi[[Top]][3], Phi[[Top]][4], Phi[[Top]][5], Phi[[Top]][6], '\n')
}
cat('\n')
}
| /R Tutorials/text mining/BrowseLDA6.R | no_license | chengjun/Research | R | false | false | 7,685 | r | # Load metadata.
DummyVar <- readline("Ready to select Metadata file? (you don't have to say 'y,' just hit return) ")
cat('\n')
file <- file.choose()
Metadata <- read.table(file, header = TRUE, stringsAsFactors=FALSE, sep = '\t', fill = TRUE, nrows = 6400, quote = '"')
Documents <- Metadata$V1
TotalsPerDoc <- as.integer(Metadata$V8)
names(TotalsPerDoc) <- Documents
Titles <- paste(substr(Metadata$V9, 1, 25), Documents)
names(Titles) <- Documents
LongTitles <- Metadata$V9
names(LongTitles) <- Documents
Authors <- Metadata$V3
names(Authors) <- Documents
DocDates <- as.numeric(Metadata$V4)
names(DocDates) <- Documents
Genres <- Metadata$V5
names(Genres) <- Documents
DummyVar <- readline("Ready to select Phi (topic distributions over words)? ")
cat('\n')
file <- file.choose()
FileLines <- readLines(con = file, n = -1, encoding = "UTF-8")
TopicCount <- as.integer(FileLines[1])
FileLines <- FileLines[-1]
Topic = 1
Phi <- vector("list", TopicCount)
for (Line in FileLines) {
Prefix <- substr(Line, 1, 5)
if (Prefix == "Topic") next
if (Prefix == "-----") {
Topic = Topic + 1
next
}
Phi[[Topic]] <- c(Phi[[Topic]], Line)
}
AllWords <- character(0)
for (i in 1: TopicCount) {
AllWords <- union(AllWords, Phi[[i]])
}
DummyVar <- readline("Ready to select KL file (topic relations to each other)? ")
cat('\n')
file <- file.choose()
FileLines <- readLines(con = file, n = -1, encoding = "UTF-8")
KL <- vector("list", TopicCount)
Topic = 0
for (Line in FileLines) {
Prefix <- substr(Line, 1, 5)
if (Prefix == "Topic") {
Topic = Topic + 1
next
}
Connection <- as.integer(Line)
if (Connection == (Topic - 1) ) next
else KL[[Topic]] <- c(KL[[Topic]], Connection)
}
DummyVar <- readline("Ready to select Theta file (topic relations to documents)? ")
cat('\n')
file <- file.choose()
Theta <- as.matrix(read.table(file, sep = ","))
cat('Theta read in. Now processing document information; may take 5 min.\n\n')
# Create topic sizes.
TopicSize <- integer(TopicCount)
for (i in 1: TopicCount) {
TopicSize[i] <- sum(Theta[i, ])
}
# Rank topics
TopicBulk <- TopicSize
TopicRanks <- integer(TopicCount)
names(TopicSize) <- 1:TopicCount
TopicSize <- sort(TopicSize, decreasing = TRUE)
for (i in 1: TopicCount) {
TopicRanks[i] <- which(names(TopicSize) == as.character(i))
}
NumDocs <- length(Documents)
MinDate = min(DocDates)
MaxDate = max(DocDates)
Timespan = (MaxDate - MinDate) + 1
TotalsPerYear <- integer(Timespan)
for (i in 1: NumDocs) {
DateIndex = (DocDates[i] - MinDate) + 1
TotalsPerYear[DateIndex] = TotalsPerYear[DateIndex] + TotalsPerDoc[i]
}
TotalsPerYear[TotalsPerYear < 1] = 1
ThetaSum <- array(data=0, dim = c(TopicCount, Timespan))
for (i in 1: NumDocs) {
DateIndex = (DocDates[i] - MinDate) + 1
ThetaSum[ , DateIndex] = ThetaSum[ , DateIndex] + Theta[ , i]
}
for (i in 1: TopicCount) {
HoldVector = ThetaSum[i ,] / TotalsPerYear
HoldVector[HoldVector == 0] <- NA
ThetaSum[i ,] <- HoldVector
}
# This turns absolute occurrences into proportions per document.
Dimensions <- dim(Theta)
for (i in 1: TopicCount) {
Theta[i, ] <- Theta[i, ] / TotalsPerDoc
}
display.topic <- function(TopicNum) {
Freqs <- Theta[TopicNum, ]
names(Freqs) <- 1:Dimensions[2]
Freqs <- sort(Freqs, decreasing = TRUE)
Top30 <- as.integer(names(Freqs[1:30]))
for (DocNum in Top30) {
cat(Documents[DocNum], Authors[DocNum], DocDates[DocNum], Genres[DocNum], TotalsPerDoc[DocNum], '\n')
cat(LongTitles[DocNum],'\n','\n')
}
}
repeat {
Proceed = FALSE
while (!Proceed) {
Word <- readline('Enter a word, a DocID, or a topic# (*topic# to see a longer doc list): ')
if (substr(Word,1,1) == "*") {
display.topic(as.integer(substr(Word,2,6)))
next
}
TopNum <- suppressWarnings(as.integer(Word))
if (!is.na(TopNum) | Word %in% AllWords | Word %in% Documents) Proceed = TRUE
else cat("That wasn't a valid entry, perhaps because we don't have that word.", '\n')
}
# The following section deals with the case where the user has
# entered a word to look up.
if (Word %in% AllWords) {
Hits <- numeric(0)
NumHits <- 0
Indices <- numeric(0)
for (i in 1: TopicCount) {
if (Word %in% Phi[[i]]) {
NumHits <- NumHits + 1
Hits <- c(Hits, which(Phi[[i]] == Word))
Indices <- c(Indices, i)
}
}
names(Hits) <- Indices
Hits <- sort(Hits, decreasing = FALSE)
cat('\n')
if (NumHits > 5) NumHits <- 5
for (i in 1: NumHits) {
Top <- as.integer(names(Hits[i]))
cat("Topic", Top, ":", Phi[[Top]][1], Phi[[Top]][2], Phi[[Top]][3], Phi[[Top]][4], Phi[[Top]][5], Phi[[Top]][6], '\n')
}
User <- readline('Which of these topics do you select? ')
TopNum <- as.integer(User)
if (is.na(TopNum)) TopNum <- 1
}
else if (Word %in% Documents) {
DocIndex <- which(Documents == Word)
TopicVector <- Theta[ , DocIndex]
names(TopicVector) <- 1:TopicCount
TopicVector <- sort(TopicVector, decreasing = TRUE)
Top10 <- as.integer(names(TopicVector[1:10]))
for (Top in Top10) {
cat("Topic", Top, ":", Phi[[Top]][1], Phi[[Top]][2], Phi[[Top]][3], Phi[[Top]][4], Phi[[Top]][5], Phi[[Top]][6], '\n')
}
User <- readline('Which of these topics do you select? ')
TopNum <- as.integer(User)
if (is.na(TopNum)) TopNum <- 1
}
if (TopNum < 1) TopNum <- 1
if (TopNum > TopicCount) TopNum <- TopicCount
# By this point we presumably have a valid TopNum.
cat('\n')
Freqs <- Theta[TopNum, ]
names(Freqs) <- 1:Dimensions[2]
Freqs <- sort(Freqs, decreasing = TRUE)
Top10 <- as.integer(names(Freqs[1:10]))
# Generate smoothed curve.
Smoothed <- numeric(Timespan)
for (i in 1: Timespan) {
Start = i-5
End = i + 5
if (Start < 1) Start = 1
if (End > Timespan) End = Timespan
Smoothed[i] = mean(ThetaSum[TopNum, Start:End], na.rm = TRUE)
}
Ratio <- max(Theta[TopNum, ])/max(Smoothed)
Smoothed <- (Smoothed * Ratio)
Range <- 1: Timespan
Loess.Smoothed <- loess(Smoothed ~ Range, span = 0.7)
Predict.Smoothed <- predict(Loess.Smoothed)
Selected <- Theta[TopNum, ]
Index <- which(Selected > quantile(Selected)[4])
Selected <- Selected[Index]
SelectLen <- length(Selected)
Colours <- character(SelectLen)
Shapes <- integer(SelectLen)
for (i in 1: SelectLen) {
Ind <- Index[i]
if (Ind %in% Top10) Shapes[i] <- 4
else Shapes[i] <- 1
Colours[i] <- "gray42"
if (Genres[Ind] == "poe") Colours[i] <- "mediumorchid2"
if (Genres[Ind] == "bio") Colours[i] <- "gray3"
if (Genres[Ind] == "fic") Colours[i] <- "dodgerblue3"
if (Genres[Ind] == "dra") Colours[i] <- "olivedrab3"
if (Genres[Ind] == "juv") Colours[i] <- "gold1"
if (Genres[Ind] == "non") Colours[i] <- "tan4"
if (Genres[Ind] == "let" | Genres[Ind] == "ora") {
Colours[i] <- "salmon3"
Shapes[i] <- 2
}
}
PlotDates <- DocDates[Index]
plot(PlotDates, Selected, col = Colours, pch = Shapes, xlab = "Blue/fic, purple/poe, green/drama, black/bio, brown/nonfic, triangle/letters or orations.", ylab = "Freq of topic in doc.", main = paste('Topic', TopNum, ':', Phi[[TopNum]][1], Phi[[TopNum]][2], Phi[[TopNum]][3], Phi[[TopNum]][4]))
par(new=TRUE)
plot(Predict.Smoothed*.7, type = 'l', lwd = 2, col = "gray75", axes = FALSE, ylab = "", xlab = "")
for (DocNum in Top10) {
cat(Documents[DocNum], Authors[DocNum], DocDates[DocNum], '\n')
cat(LongTitles[DocNum],'\n','\n')
}
cat('TOPIC', TopNum,':', Phi[[TopNum]][1:50], '\n')
cat('OF', TopicCount, 'TOPICS this is #',TopicRanks[TopNum], 'in desc order, with', TopicBulk[TopNum], 'words. Related topics: \n')
for (i in 1:5) {
Top <- KL[[TopNum]][i] + 1
cat("Topic", Top, ":", Phi[[Top]][1], Phi[[Top]][2], Phi[[Top]][3], Phi[[Top]][4], Phi[[Top]][5], Phi[[Top]][6], '\n')
}
cat('\n')
}
|
/rabbit/wcgna.R | no_license | x-nm/R | R | false | false | 5,247 | r | ||
predict.inbagg <- function(object, newdata, ...) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
if(any(names(object$W) %in% names(newdata))) newdata <- newdata[!(names(newdata) %in% names(object$W))]
NBAGG <- length(object$mtrees)
N <- nrow(newdata)
classes <- levels(object$y)
vote <- matrix(0, nrow=N, ncol=length(classes))
for(i in 1:NBAGG) {
intermed <- object$mtrees[[i]]$bfct(newdata)
# XX <- data.frame(newdata, intermed)
if(!is.null(object$mtrees[[i]]$btree$fixed.function)) {
names(intermed) <- sub(".[0-9]$", "", names(intermed))
XX <- data.frame(newdata, intermed)
# names(XX)[(ncol(XX)-ncol(intermed)+1):ncol(XX)] <- sub(".[0-9]$", "", names(intermed))
res <- object$mtrees[[i]]$btree$fixed.function(XX)
} else {
XX <- data.frame(newdata, intermed)
if(is.null(object$mtrees[[i]]$btree$predict)) {
res <- try(predict(object$mtrees[[i]]$btree$model, newdata = XX, ...))
} else {
res <- try(object$mtrees[[i]]$btree$predict(object$mtrees[[i]]$btree$model, newdata = XX, ...))
}
}
res <- cbind(1:N, res)
vote[res] <- vote[res] +1
}
RET <- factor(classes[apply(vote, 1, uwhich.max)])
RET
}
| /R/predict.inbagg.R | no_license | cran/ipred | R | false | false | 1,226 | r | predict.inbagg <- function(object, newdata, ...) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
if(any(names(object$W) %in% names(newdata))) newdata <- newdata[!(names(newdata) %in% names(object$W))]
NBAGG <- length(object$mtrees)
N <- nrow(newdata)
classes <- levels(object$y)
vote <- matrix(0, nrow=N, ncol=length(classes))
for(i in 1:NBAGG) {
intermed <- object$mtrees[[i]]$bfct(newdata)
# XX <- data.frame(newdata, intermed)
if(!is.null(object$mtrees[[i]]$btree$fixed.function)) {
names(intermed) <- sub(".[0-9]$", "", names(intermed))
XX <- data.frame(newdata, intermed)
# names(XX)[(ncol(XX)-ncol(intermed)+1):ncol(XX)] <- sub(".[0-9]$", "", names(intermed))
res <- object$mtrees[[i]]$btree$fixed.function(XX)
} else {
XX <- data.frame(newdata, intermed)
if(is.null(object$mtrees[[i]]$btree$predict)) {
res <- try(predict(object$mtrees[[i]]$btree$model, newdata = XX, ...))
} else {
res <- try(object$mtrees[[i]]$btree$predict(object$mtrees[[i]]$btree$model, newdata = XX, ...))
}
}
res <- cbind(1:N, res)
vote[res] <- vote[res] +1
}
RET <- factor(classes[apply(vote, 1, uwhich.max)])
RET
}
|
xdesc <- function (d, cols) {
s <- paste("with(d, order(-", paste("rank", "(", cols, ")", sep="", collapse=","),
"))", sep = "")
d[eval(parse(text = s)), ]
} | /R/xdesc.R | permissive | HikaGenji/fql | R | false | false | 174 | r | xdesc <- function (d, cols) {
s <- paste("with(d, order(-", paste("rank", "(", cols, ")", sep="", collapse=","),
"))", sep = "")
d[eval(parse(text = s)), ]
} |
library(testthat)
library(mlr3learners.gss)
test_check("mlr3learners.gss")
| /tests/testthat.R | no_license | mlr3learners/mlr3learners.gss | R | false | false | 76 | r | library(testthat)
library(mlr3learners.gss)
test_check("mlr3learners.gss")
|
\name{list_attributes}
\alias{list_attributes}
\title{List attributes}
\usage{
list_attributes(mart, dataset)
}
\arguments{
\item{mart}{mart object}
\item{dataset}{a dataset}
}
\value{
list of attributes
}
\description{
List attributes in a dataset
}
| /man/list_attributes.Rd | no_license | c5sire/biomart2 | R | false | false | 257 | rd | \name{list_attributes}
\alias{list_attributes}
\title{List attributes}
\usage{
list_attributes(mart, dataset)
}
\arguments{
\item{mart}{mart object}
\item{dataset}{a dataset}
}
\value{
list of attributes
}
\description{
List attributes in a dataset
}
|
# download and unzip it https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
library(data.table)
DT = fread("../household_power_consumption.txt")
library(sqldf)
res <- sqldf("select * from DT where Date='1/2/2007' or Date='2/2/2007'")
hist(as.numeric(res$Global_active_power), col="red",
main=paste("Global Active Power"),
ylim=range(0:1200),
xlab="Global Active Power (kilowatts)")
dev.copy(png, file = "../figure/plot1.png")
dev.off() | /code/plot1.R | no_license | gitcoursera/ExData_Plotting1 | R | false | false | 489 | r | # download and unzip it https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
library(data.table)
DT = fread("../household_power_consumption.txt")
library(sqldf)
res <- sqldf("select * from DT where Date='1/2/2007' or Date='2/2/2007'")
hist(as.numeric(res$Global_active_power), col="red",
main=paste("Global Active Power"),
ylim=range(0:1200),
xlab="Global Active Power (kilowatts)")
dev.copy(png, file = "../figure/plot1.png")
dev.off() |
library(shiny)
library(janitor)
#module1 for 2nd tab
clean_filterUI<-function(id1){
ns<-NS(id1)
list(sidebarLayout(
sidebarPanel(
checkboxInput(ns("clean"),"Clean Names"),
checkboxInput(ns("separate"),"Separate",FALSE),
uiOutput(ns("sep_by")),
checkboxInput(ns("unite"),"Unite",FALSE),
uiOutput(ns("unite_by"))
),
mainPanel(h3("Preview"),
tableOutput(ns("data"))
)
),
sidebarLayout(
sidebarPanel(
h3("FILTER"),
uiOutput(ns("filters"))),
mainPanel(tableOutput(ns("ftable")))
)
)
}
clean_filterServer<-function(input,output,session,tidied){
raw<-reactive({
if(input$clean){
names(tidied)<-janitor::make_clean_names(names(tidied))
}
if(input$separate){
req(input$sep_col)
req(input$sep)
req(input$n_parts)
tidied<-separate(
tidied,!!input$sep_col,into=paste0("x",seq_len(input$n_parts)),sep=input$sep)
}
if(input$unite){
req(input$col_unite)
tidied<-unite(tidied,!!input$new_col,!!input$col_unite)
}
tidied
})
ns<-session$ns
#var1<-reactive(names(raw()))
output$sep_by<-renderUI({
if(input$separate){
return(fluidRow(column(4,selectInput(ns("sep_col"),"Sep_Col",choices=names(raw()))),
column(4, numericInput(ns("n_parts"),"Parts",NULL)),
column(4,textInput(ns("sep"),"Separator"))))
}
})
output$unite_by<-renderUI({unite_ui(ns,input$unite,names(raw()))})
sep_union<-reactive({
if(input$separate){
req(input$sep_col)
req(input$sep)
req(input$n_parts)
raw()<-separate(
raw(),!!input$sep_col,into=paste0("x",seq_len(input$n_parts)),sep=input$sep)
}
if(input$unite){
req(input$col_unite)
raw()<-unite(raw(),!!input$new_col,!!input$col_unite)
}
raw()
})
output$data<-renderTable(head(raw()))
vars<-reactive(names(raw()))
output$filters<-renderUI({
map(vars(),~filter_UI(raw()[[.x]],.x))
})
filtered<-reactive({
selected<-sep_union()
map(vars(),~filter_Server(selected[[.x]],input[[.x]],selected))
})
output$ftable<-renderTable(filtered())
}
| /ui2.R | no_license | hilljairus/dqa | R | false | false | 2,421 | r | library(shiny)
library(janitor)
#module1 for 2nd tab
clean_filterUI<-function(id1){
ns<-NS(id1)
list(sidebarLayout(
sidebarPanel(
checkboxInput(ns("clean"),"Clean Names"),
checkboxInput(ns("separate"),"Separate",FALSE),
uiOutput(ns("sep_by")),
checkboxInput(ns("unite"),"Unite",FALSE),
uiOutput(ns("unite_by"))
),
mainPanel(h3("Preview"),
tableOutput(ns("data"))
)
),
sidebarLayout(
sidebarPanel(
h3("FILTER"),
uiOutput(ns("filters"))),
mainPanel(tableOutput(ns("ftable")))
)
)
}
clean_filterServer<-function(input,output,session,tidied){
raw<-reactive({
if(input$clean){
names(tidied)<-janitor::make_clean_names(names(tidied))
}
if(input$separate){
req(input$sep_col)
req(input$sep)
req(input$n_parts)
tidied<-separate(
tidied,!!input$sep_col,into=paste0("x",seq_len(input$n_parts)),sep=input$sep)
}
if(input$unite){
req(input$col_unite)
tidied<-unite(tidied,!!input$new_col,!!input$col_unite)
}
tidied
})
ns<-session$ns
#var1<-reactive(names(raw()))
output$sep_by<-renderUI({
if(input$separate){
return(fluidRow(column(4,selectInput(ns("sep_col"),"Sep_Col",choices=names(raw()))),
column(4, numericInput(ns("n_parts"),"Parts",NULL)),
column(4,textInput(ns("sep"),"Separator"))))
}
})
output$unite_by<-renderUI({unite_ui(ns,input$unite,names(raw()))})
sep_union<-reactive({
if(input$separate){
req(input$sep_col)
req(input$sep)
req(input$n_parts)
raw()<-separate(
raw(),!!input$sep_col,into=paste0("x",seq_len(input$n_parts)),sep=input$sep)
}
if(input$unite){
req(input$col_unite)
raw()<-unite(raw(),!!input$new_col,!!input$col_unite)
}
raw()
})
output$data<-renderTable(head(raw()))
vars<-reactive(names(raw()))
output$filters<-renderUI({
map(vars(),~filter_UI(raw()[[.x]],.x))
})
filtered<-reactive({
selected<-sep_union()
map(vars(),~filter_Server(selected[[.x]],input[[.x]],selected))
})
output$ftable<-renderTable(filtered())
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colours.R
\docType{data}
\name{grattan_yellow}
\alias{grattan_yellow}
\title{Hex code for the colour: Grattan yellow (butternut pumpkin soup)}
\format{An object of class \code{character} of length 1.}
\usage{
grattan_yellow
}
\description{
#FFC35A
}
\keyword{datasets}
| /man/grattan_yellow.Rd | no_license | jonathananolan/grattantheme | R | false | true | 347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colours.R
\docType{data}
\name{grattan_yellow}
\alias{grattan_yellow}
\title{Hex code for the colour: Grattan yellow (butternut pumpkin soup)}
\format{An object of class \code{character} of length 1.}
\usage{
grattan_yellow
}
\description{
#FFC35A
}
\keyword{datasets}
|
##########################################################
# Script for univariate analysis of lipidomics data
# This script runs a non-parametric rank correlation test
# Furthermore visualizes data with a volcano plot
##########################################################
# Clear workspace
rm(list = ls())
# Load packages
library(tidyverse)
library(patchwork)
library(limma)
# Load data
lipidomics_data <- read_csv("data/02_lipidomics_data_tidy.csv")
lipidomics_info <- read_csv("data/02_lipidomics_data_info.csv")
# Variables: Mann Whitney & Limma thresholds (log fold change and adjusted pvalue)
log_thres_MW <- 0
pval_thres_MW <- 0.001
log_thres_limma <- 0
pval_thres_limma <- 0.001
# Pre-filter data for HIVnomets and HIVmets -------------------------------
HIV_NoMetS <- lipidomics_data %>%
filter(str_detect(ID_Condition, "HIV_NoMetS"))
HIV_MetS <- lipidomics_data %>%
filter(str_detect(ID_Condition, "HIV_MetS"))
HIVnomets_HIVmets <- lipidomics_data %>%
filter(!str_detect(Condition, "Ctrl"))
# Mann Whitney ------------------------------------------------------------
# Check differnce between lipid abundances between HIV-NoMetS vs. HIVMetS
pval_HIVnomets_HIVmets <- sapply(HIVnomets_HIVmets[,4:ncol(HIVnomets_HIVmets)], function(x) wilcox.test(x ~ HIVnomets_HIVmets$Condition)$p.value) %>%
as.data.frame()
# Abundance ratio (log2 fold change)
fc_HIVnomets_HIVmets <- apply(HIV_MetS[,4:ncol(HIVnomets_HIVmets)], 2, FUN=mean)/apply(HIV_NoMetS[,4:ncol(HIVnomets_HIVmets)], 2, FUN=mean)
fclog2_HIVnomets_HIVmets <- log2(fc_HIVnomets_HIVmets)
# Correct for multiple testing by Benjamini-Hochberg to determine FDR
pval_BH_HIVnomets_HIVmets <- p.adjust(pval_HIVnomets_HIVmets$., method = "BH")
# Overview of significant lipids between HIVnomets and HIVmets, ordered by FDR adjusted pvalue
stats_HIVnomets_HIVmets <- data.frame(pval_HIVnomets_HIVmets,
pval_BH_HIVnomets_HIVmets,
fclog2_HIVnomets_HIVmets)[order(pval_BH_HIVnomets_HIVmets),]
# Save statistics from Mann Withney test
stats_HIV_MW <- stats_HIVnomets_HIVmets %>%
mutate(Biochemicals = row.names(stats_HIVnomets_HIVmets)) %>%
select(., Biochemicals, pvalue = ., log2FC = fclog2_HIVnomets_HIVmets, adj_pvalue = pval_BH_HIVnomets_HIVmets)
stats_HIV_MW <- merge(x = stats_HIV_MW,
y = as.data.frame(lipidomics_info))
# Write MW stats to file
write_csv(stats_HIV_MW, "data/06_MWtest_HIV.csv")
# Mann Whitney: Volcano plot ------------------------------------------------------------
# Define regulated lipid abundances
stats_HIV_MW$diffexpressed <- "No"
stats_HIV_MW$diffexpressed[stats_HIV_MW$log2FC > log_thres_MW & stats_HIV_MW$adj_pvalue<pval_thres_MW] <- "Up"
stats_HIV_MW$diffexpressed[stats_HIV_MW$log2FC < log_thres_MW & stats_HIV_MW$adj_pvalue<pval_thres_MW] <- "Down"
# Volcano plot with ggplot illustrating the significant abundance between HIVnomets vs. HIVmets
volcano_mw <- ggplot(stats_HIV_MW, aes(x = log2FC,
y = -log10(adj_pvalue),
col = diffexpressed)) +
geom_point() +
xlim(-1.5, 1.5) +
ylim(0, 16) +
xlab(expression("Fold Change, Log"[2]*"")) +
ylab(expression("FDR adjusted p-value, Log"[10]*"")) +
geom_hline(
yintercept = c(-log10(pval_thres_MW),-log10(pval_thres_MW)),
col = "black",
linetype = "dotted",
size = 1) +
theme(legend.position = "none")+
scale_colour_manual(values = c("red","grey", "forestgreen")) +
labs(title = "Volcano plot: Mann Whitney U test")
ggsave("results/06_1_volcano_HIV_MW.png", plot = volcano_mw, device = "png")
# Limma test -------------------------------------------------------------------
# Check differnce between lipid abundances between HIV-NoMetS vs. HIVMetS
lipid_df <- HIVnomets_HIVmets %>%
select(everything(), -c(GENDER, Condition, ID_Condition)) %>%
log2() %>%
t()
colnames(lipid_df) <- HIVnomets_HIVmets$ID_Condition
lipid_df <- as.data.frame(lipid_df)
# Two groups in the lipidomics data, the control group is excluded
group <- HIVnomets_HIVmets %>%
mutate(HIV_binary = case_when(Condition == "HIV_NoMetS" ~ "0",
Condition == "HIV_MetS" ~ "1",
TRUE ~ Condition))
design <- cbind(Group = as.numeric(group$HIV_binary))
# Fit model and compute moderated t-statistics
fit <- lmFit(lipid_df, design)
fit <- eBayes(fit)
# Sorting by raw pvalue between lipid concentration from HIV_NoMets vs. HIV_MetS
stats_HIV_limma <- topTable(fit, sort.by = "P", n = Inf)
# Write toptable to file
stats_HIV_limma_df <- rownames_to_column(stats_HIV_limma, "Biochemicals")
stats_HIV_limma_df <- merge(x = stats_HIV_limma_df,
y = as.data.frame(lipidomics_info))
write_csv(stats_HIV_limma_df, "data/06_limmatest_HIV.csv")
# Limma test: Volcano plot -------------------------------------------------------------
# Define regulated lipid abundances
stats_HIV_limma$diffexpressed <- "No"
stats_HIV_limma$diffexpressed[stats_HIV_limma$logFC > log_thres_limma & stats_HIV_limma$adj.P.Val<pval_thres_limma] <- "Up"
stats_HIV_limma$diffexpressed[stats_HIV_limma$logFC < log_thres_limma & stats_HIV_limma$adj.P.Val<pval_thres_limma] <- "Down"
# Volcano plot with ggplot illustrating the significant abundance between HIVnomets vs. HIVmets
volcano_limma <- ggplot(stats_HIV_limma, aes(x = logFC,
y = -log10(adj.P.Val),
col = diffexpressed)) +
geom_point() +
xlim(-1.5, 1.5) +
ylim(0, 16) +
xlab(expression("Fold Change, Log"[2]*"")) +
ylab(expression("FDR adjusted p-value, Log"[10]*"")) +
geom_hline(
yintercept = c(-log10(pval_thres_limma),-log10(pval_thres_limma)),
col = "black",
linetype = "dotted",
size = 1) +
theme(legend.position = "right")+
scale_colour_manual(values = c("red","grey", "forestgreen")) +
labs(title = "Volcano plot: Limma test", col = "Significant \ndifferential \nabundance")
ggsave("results/06_2_volcano_HIV_limma.png", plot = volcano_limma, device = "png") #, width = 6.17, height = 3.1)
# Combine volcano plots Limma test + Mann whitney test (lipidomics)
ggsave("results/06_3_volcano_plots.png", plot = volcano_mw + volcano_limma, device = "png", width = 15) #, width = 6.17, height = 3.1)
# Significant lipids ------------------------------------------------------
sign_lipids_MW <- subset(stats_HIV_MW, stats_HIV_MW$adj_pvalue <= pval_thres_MW )
sign_lipids_limma <- subset(stats_HIV_limma_df, stats_HIV_limma_df$adj.P.Val <= pval_thres_limma)
method_list_univariate <- list('Mann Whitney U Test' = sign_lipids_MW$Biochemicals,
'Limma test with "limma"' = sign_lipids_limma$Biochemicals)
# Save list to a file
save(method_list_univariate, file="data/06_methods_univariate.RData") | /R/06_univariate_analysis_lipidomics.R | no_license | neogilab/COCOMO_lipidomics | R | false | false | 6,956 | r | ##########################################################
# Script for univariate analysis of lipidomics data
# This script runs a non-parametric rank correlation test
# Furthermore visualizes data with a volcano plot
##########################################################
# Clear workspace
rm(list = ls())
# Load packages
library(tidyverse)
library(patchwork)
library(limma)
# Load data
lipidomics_data <- read_csv("data/02_lipidomics_data_tidy.csv")
lipidomics_info <- read_csv("data/02_lipidomics_data_info.csv")
# Variables: Mann Whitney & Limma thresholds (log fold change and adjusted pvalue)
log_thres_MW <- 0
pval_thres_MW <- 0.001
log_thres_limma <- 0
pval_thres_limma <- 0.001
# Pre-filter data for HIVnomets and HIVmets -------------------------------
HIV_NoMetS <- lipidomics_data %>%
filter(str_detect(ID_Condition, "HIV_NoMetS"))
HIV_MetS <- lipidomics_data %>%
filter(str_detect(ID_Condition, "HIV_MetS"))
HIVnomets_HIVmets <- lipidomics_data %>%
filter(!str_detect(Condition, "Ctrl"))
# Mann Whitney ------------------------------------------------------------
# Check differnce between lipid abundances between HIV-NoMetS vs. HIVMetS
pval_HIVnomets_HIVmets <- sapply(HIVnomets_HIVmets[,4:ncol(HIVnomets_HIVmets)], function(x) wilcox.test(x ~ HIVnomets_HIVmets$Condition)$p.value) %>%
as.data.frame()
# Abundance ratio (log2 fold change)
fc_HIVnomets_HIVmets <- apply(HIV_MetS[,4:ncol(HIVnomets_HIVmets)], 2, FUN=mean)/apply(HIV_NoMetS[,4:ncol(HIVnomets_HIVmets)], 2, FUN=mean)
fclog2_HIVnomets_HIVmets <- log2(fc_HIVnomets_HIVmets)
# Correct for multiple testing by Benjamini-Hochberg to determine FDR
pval_BH_HIVnomets_HIVmets <- p.adjust(pval_HIVnomets_HIVmets$., method = "BH")
# Overview of significant lipids between HIVnomets and HIVmets, ordered by FDR adjusted pvalue
stats_HIVnomets_HIVmets <- data.frame(pval_HIVnomets_HIVmets,
pval_BH_HIVnomets_HIVmets,
fclog2_HIVnomets_HIVmets)[order(pval_BH_HIVnomets_HIVmets),]
# Save statistics from Mann Withney test
stats_HIV_MW <- stats_HIVnomets_HIVmets %>%
mutate(Biochemicals = row.names(stats_HIVnomets_HIVmets)) %>%
select(., Biochemicals, pvalue = ., log2FC = fclog2_HIVnomets_HIVmets, adj_pvalue = pval_BH_HIVnomets_HIVmets)
stats_HIV_MW <- merge(x = stats_HIV_MW,
y = as.data.frame(lipidomics_info))
# Write MW stats to file
write_csv(stats_HIV_MW, "data/06_MWtest_HIV.csv")
# Mann Whitney: Volcano plot ------------------------------------------------------------
# Define regulated lipid abundances
stats_HIV_MW$diffexpressed <- "No"
stats_HIV_MW$diffexpressed[stats_HIV_MW$log2FC > log_thres_MW & stats_HIV_MW$adj_pvalue<pval_thres_MW] <- "Up"
stats_HIV_MW$diffexpressed[stats_HIV_MW$log2FC < log_thres_MW & stats_HIV_MW$adj_pvalue<pval_thres_MW] <- "Down"
# Volcano plot with ggplot illustrating the significant abundance between HIVnomets vs. HIVmets
volcano_mw <- ggplot(stats_HIV_MW, aes(x = log2FC,
y = -log10(adj_pvalue),
col = diffexpressed)) +
geom_point() +
xlim(-1.5, 1.5) +
ylim(0, 16) +
xlab(expression("Fold Change, Log"[2]*"")) +
ylab(expression("FDR adjusted p-value, Log"[10]*"")) +
geom_hline(
yintercept = c(-log10(pval_thres_MW),-log10(pval_thres_MW)),
col = "black",
linetype = "dotted",
size = 1) +
theme(legend.position = "none")+
scale_colour_manual(values = c("red","grey", "forestgreen")) +
labs(title = "Volcano plot: Mann Whitney U test")
ggsave("results/06_1_volcano_HIV_MW.png", plot = volcano_mw, device = "png")
# Limma test -------------------------------------------------------------------
# Check differnce between lipid abundances between HIV-NoMetS vs. HIVMetS
lipid_df <- HIVnomets_HIVmets %>%
select(everything(), -c(GENDER, Condition, ID_Condition)) %>%
log2() %>%
t()
colnames(lipid_df) <- HIVnomets_HIVmets$ID_Condition
lipid_df <- as.data.frame(lipid_df)
# Two groups in the lipidomics data, the control group is excluded
group <- HIVnomets_HIVmets %>%
mutate(HIV_binary = case_when(Condition == "HIV_NoMetS" ~ "0",
Condition == "HIV_MetS" ~ "1",
TRUE ~ Condition))
design <- cbind(Group = as.numeric(group$HIV_binary))
# Fit model and compute moderated t-statistics
fit <- lmFit(lipid_df, design)
fit <- eBayes(fit)
# Sorting by raw pvalue between lipid concentration from HIV_NoMets vs. HIV_MetS
stats_HIV_limma <- topTable(fit, sort.by = "P", n = Inf)
# Write toptable to file
stats_HIV_limma_df <- rownames_to_column(stats_HIV_limma, "Biochemicals")
stats_HIV_limma_df <- merge(x = stats_HIV_limma_df,
y = as.data.frame(lipidomics_info))
write_csv(stats_HIV_limma_df, "data/06_limmatest_HIV.csv")
# Limma test: Volcano plot -------------------------------------------------------------
# Define regulated lipid abundances
stats_HIV_limma$diffexpressed <- "No"
stats_HIV_limma$diffexpressed[stats_HIV_limma$logFC > log_thres_limma & stats_HIV_limma$adj.P.Val<pval_thres_limma] <- "Up"
stats_HIV_limma$diffexpressed[stats_HIV_limma$logFC < log_thres_limma & stats_HIV_limma$adj.P.Val<pval_thres_limma] <- "Down"
# Volcano plot with ggplot illustrating the significant abundance between HIVnomets vs. HIVmets
volcano_limma <- ggplot(stats_HIV_limma, aes(x = logFC,
y = -log10(adj.P.Val),
col = diffexpressed)) +
geom_point() +
xlim(-1.5, 1.5) +
ylim(0, 16) +
xlab(expression("Fold Change, Log"[2]*"")) +
ylab(expression("FDR adjusted p-value, Log"[10]*"")) +
geom_hline(
yintercept = c(-log10(pval_thres_limma),-log10(pval_thres_limma)),
col = "black",
linetype = "dotted",
size = 1) +
theme(legend.position = "right")+
scale_colour_manual(values = c("red","grey", "forestgreen")) +
labs(title = "Volcano plot: Limma test", col = "Significant \ndifferential \nabundance")
ggsave("results/06_2_volcano_HIV_limma.png", plot = volcano_limma, device = "png") #, width = 6.17, height = 3.1)
# Combine volcano plots Limma test + Mann whitney test (lipidomics)
ggsave("results/06_3_volcano_plots.png", plot = volcano_mw + volcano_limma, device = "png", width = 15) #, width = 6.17, height = 3.1)
# Significant lipids ------------------------------------------------------
sign_lipids_MW <- subset(stats_HIV_MW, stats_HIV_MW$adj_pvalue <= pval_thres_MW )
sign_lipids_limma <- subset(stats_HIV_limma_df, stats_HIV_limma_df$adj.P.Val <= pval_thres_limma)
method_list_univariate <- list('Mann Whitney U Test' = sign_lipids_MW$Biochemicals,
'Limma test with "limma"' = sign_lipids_limma$Biochemicals)
# Save list to a file
save(method_list_univariate, file="data/06_methods_univariate.RData") |
ca <- read.csv("CA_house_prices.csv",header = F)
oh <- read.csv("OH_house_prices.csv",header = F)
boxplot(ca$V1, oh$V1, main="Shireesha's Graph",
col="red",
names=c("California", "Ohio"),
ylab="Prices")
hist(ca[, 1], breaks = seq(0, 3500, by = 500), col = "orange", xlab = "CA House Prices (in Thousands)",
ylab = "Frequency", main = "Shireesha's Graph")
CAecdf <- ecdf(ca[, 1])
OHecdf <- ecdf(oh[, 1])
plot(CAecdf, pch = 1, xlab = "House Prices (in Thousands)", ylab = "Cumulative Percent",
main = "Shireesha")
lines(OHecdf, col = "blue", pch = 3)
legend(2000, 0.6, legend = c("California", "Ohio"), pch = c(1, 3),
col = c("black", "blue"), lwd = 1:3) | /Data Mining/DM Assignment2/assg-2qes-7.R | no_license | CH-Shireesha/DataScience_2019501092 | R | false | false | 694 | r | ca <- read.csv("CA_house_prices.csv",header = F)
oh <- read.csv("OH_house_prices.csv",header = F)
boxplot(ca$V1, oh$V1, main="Shireesha's Graph",
col="red",
names=c("California", "Ohio"),
ylab="Prices")
hist(ca[, 1], breaks = seq(0, 3500, by = 500), col = "orange", xlab = "CA House Prices (in Thousands)",
ylab = "Frequency", main = "Shireesha's Graph")
CAecdf <- ecdf(ca[, 1])
OHecdf <- ecdf(oh[, 1])
plot(CAecdf, pch = 1, xlab = "House Prices (in Thousands)", ylab = "Cumulative Percent",
main = "Shireesha")
lines(OHecdf, col = "blue", pch = 3)
legend(2000, 0.6, legend = c("California", "Ohio"), pch = c(1, 3),
col = c("black", "blue"), lwd = 1:3) |
w <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
sub3 <- w[as.Date(w$Date, format = "%d/%m/%Y") >= as.Date("2007-02-01") & as.Date(w$Date, format = "%d/%m/%Y") <= as.Date("2007-02-02"),]
k <- as.numeric(levels(sub3$Global_active_power))[sub3$Global_active_power]
plot.ts(k,xlab = "", ylab = "Global Active Power (kilowatts)", main = "")
dev.copy(png, "plot2.png")
dev.off() | /plot2.R | no_license | sgkamal/ExData_Plotting1 | R | false | false | 401 | r | w <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
sub3 <- w[as.Date(w$Date, format = "%d/%m/%Y") >= as.Date("2007-02-01") & as.Date(w$Date, format = "%d/%m/%Y") <= as.Date("2007-02-02"),]
k <- as.numeric(levels(sub3$Global_active_power))[sub3$Global_active_power]
plot.ts(k,xlab = "", ylab = "Global Active Power (kilowatts)", main = "")
dev.copy(png, "plot2.png")
dev.off() |
#' Train a Poisson Gaussian process factor analysis model
#'
#' This function trains a Poisson Gaussian process factor analysis model with a locally periodic kernel in Stan.
#'
#' @export
#' @param X data frame. The training data set, consisting of counts. Columns represent variables, rows represent observations.
#' @param ts vector. Time-points of observations in X.
#' @param gp vector. Groups for each observation.
#' @param nfac numeric. Number of factors.
#' @param nit numeric. Number of iterations.
#' @param nchain numeric. Number of chains.
#' @param period_length numeric. Length of the period.
#' @param prior_r2 vector. Parameters (2) for the inverse-gamma prior distribution for the lengthscale of the squared exponential
#' part of the locally periodic kernel.
#' @param prior_r3 vector. Parameters (2) for the inverse-gamma prior distribution for the lengthscale of the periodic
#' part of the locally periodic kernel.
#' @param transform_t function. Function for the transformation of the time-series (for example if we want it on a specific interval).
#' @param tseed numeric. Seed for sampling Defaults to 1.
#' @return A list.
#' \item{data}{Data that goes into sampling.}
#' \item{samps}{An object of S4 class \code{stanfit}. Samples.}
#' \item{gmap}{Mapping of groups to indexes.}
train_gPGPFA_LP <- function (X, ts, gp, nfac, nit, nchain, period_length,
prior_r2, prior_r3,
transform_t, tseed = NULL,...) {
tmp <- bind_cols(as.data.frame(X), ts = ts, group = gp) %>%
arrange(group)
n <- nrow(X)
m <- ncol(X)
p <- nfac
X_ar <- dplyr::select(tmp, - c(ts, group)) %>%
as.matrix()
g <- count(tmp, group) %>%
dplyr::select(n) %>%
unlist() %>%
as.vector()
gmap <- count(tmp, group) %>%
bind_cols(ind = 1:length(g))
# rescale time-series
t_ar <- transform_t(tmp$ts)
stan_data <- list(
N = n,
NG = length(g),
M = m,
P = p,
t = t_ar,
G = as.array(g),
X = t(X_ar),
p_dist = c(1, 1, 0),
p_val = c(prior_r2, prior_r3, 0),
period_length = period_length
)
if (!is.null(tseed)) {
samps <- rstan::sampling(stanmodels$gPGPFA_LP,
stan_data,
seed = tseed,
chains = nchain,
iter = nit,
...)
} else {
samps <- rstan::sampling(stanmodels$gPGPFA_LP,
stan_data,
seed = 1,
chains = nchain,
iter = nit,
...)
}
out_list <- list(data = stan_data,
samps = samps,
gmap = gmap)
return(out_list)
}
| /R/train_gPGPFA_LP.R | no_license | bstatcomp/gfac | R | false | false | 2,893 | r | #' Train a Poisson Gaussian process factor analysis model
#'
#' This function trains a Poisson Gaussian process factor analysis model with a locally periodic kernel in Stan.
#'
#' @export
#' @param X data frame. The training data set, consisting of counts. Columns represent variables, rows represent observations.
#' @param ts vector. Time-points of observations in X.
#' @param gp vector. Groups for each observation.
#' @param nfac numeric. Number of factors.
#' @param nit numeric. Number of iterations.
#' @param nchain numeric. Number of chains.
#' @param period_length numeric. Length of the period.
#' @param prior_r2 vector. Parameters (2) for the inverse-gamma prior distribution for the lengthscale of the squared exponential
#' part of the locally periodic kernel.
#' @param prior_r3 vector. Parameters (2) for the inverse-gamma prior distribution for the lengthscale of the periodic
#' part of the locally periodic kernel.
#' @param transform_t function. Function for the transformation of the time-series (for example if we want it on a specific interval).
#' @param tseed numeric. Seed for sampling Defaults to 1.
#' @return A list.
#' \item{data}{Data that goes into sampling.}
#' \item{samps}{An object of S4 class \code{stanfit}. Samples.}
#' \item{gmap}{Mapping of groups to indexes.}
train_gPGPFA_LP <- function (X, ts, gp, nfac, nit, nchain, period_length,
prior_r2, prior_r3,
transform_t, tseed = NULL,...) {
tmp <- bind_cols(as.data.frame(X), ts = ts, group = gp) %>%
arrange(group)
n <- nrow(X)
m <- ncol(X)
p <- nfac
X_ar <- dplyr::select(tmp, - c(ts, group)) %>%
as.matrix()
g <- count(tmp, group) %>%
dplyr::select(n) %>%
unlist() %>%
as.vector()
gmap <- count(tmp, group) %>%
bind_cols(ind = 1:length(g))
# rescale time-series
t_ar <- transform_t(tmp$ts)
stan_data <- list(
N = n,
NG = length(g),
M = m,
P = p,
t = t_ar,
G = as.array(g),
X = t(X_ar),
p_dist = c(1, 1, 0),
p_val = c(prior_r2, prior_r3, 0),
period_length = period_length
)
if (!is.null(tseed)) {
samps <- rstan::sampling(stanmodels$gPGPFA_LP,
stan_data,
seed = tseed,
chains = nchain,
iter = nit,
...)
} else {
samps <- rstan::sampling(stanmodels$gPGPFA_LP,
stan_data,
seed = 1,
chains = nchain,
iter = nit,
...)
}
out_list <- list(data = stan_data,
samps = samps,
gmap = gmap)
return(out_list)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssh_functions.R
\name{get_function_list}
\alias{get_function_list}
\title{Get function list}
\usage{
get_function_list(project_name, my_github = "Giappo")
}
\value{
function list
}
\description{
NOT WORKING YET
}
\author{
Giovanni Laudanno
}
| /man/get_function_list.Rd | no_license | TheoPannetier/jap | R | false | true | 320 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssh_functions.R
\name{get_function_list}
\alias{get_function_list}
\title{Get function list}
\usage{
get_function_list(project_name, my_github = "Giappo")
}
\value{
function list
}
\description{
NOT WORKING YET
}
\author{
Giovanni Laudanno
}
|
/Introduction to R/R_vector.R | no_license | jkoooong/stat_jkoooong | R | false | false | 1,552 | r | ||
#'Main SMVCIR function
#'
#'Build a Sliced Mean Variance Covariance Inverse Regression model.
#'
#'@param group A character string specifying the name of the class variable of interest in your dataset.
#'@param data A data frame (including your group variable).
#'@param pdt Percentage of SMVCIR discrimination desired from dimension
#'@param level Level of dimensionality test
#'@param test Types of tests, can use either, neither, or both.
#'@param empsimss Number of draws to use in performing each dimensionality test.
#'@param scree_plot If TRUE, a scree plot of cumulative percentage of variation explained by discriminant dimensions is produced.
#'
#'@examples
#'
#'
#'library(caret)
#'train<-createDataPartition(pima$diabetes, p = .8, list = FALSE)
#'pim.smv<-smvcir("diabetes", data = pima[train,], test = T) ###Build smvcir model on training set
#'summary(pim.smv)
#'
#'@export
smvcir<-function (group, data, pdt = 100, level = 0.05, test = FALSE, scree_plot=FALSE)
{
empTest = FALSE
apempTest = FALSE
if (test == TRUE) {
apempTest = TRUE
}
compcases <- data.frame(complete.cases(data))
compcases <- cbind(compcases, t(t(1:nrow(data))))
data_nu <- na.omit(data)
namelist <- attr(data, "names")
groupindex <- 0
for (i in 1:length(namelist)) {
if (namelist[i] == group) {
groupindex <- i
break
}
}
if (groupindex == 0) {
return(NAN)
}
if (groupindex == dim(data_nu)[2]) {
stage1readydat <- data.matrix(data_nu)
} else if (groupindex == 1) {
stage1readydat <- data.matrix(data_nu[, c(2:dim(data_nu)[2],
1)])
} else {
stage1readydat <- data.matrix(data_nu[, c(1:(groupindex -
1), (groupindex + 1):dim(data_nu)[2], groupindex)])
}
rm(data_nu)
n <- dim(stage1readydat)[1]
k <- dim(stage1readydat)[2] - 1
g <- max(stage1readydat[, k + 1])
class_labels<-levels(data[,groupindex]) #####class_labels
standat <- cbind(scale(stage1readydat[, 1:k], center = TRUE,
scale = TRUE), stage1readydat[, k + 1])
xbar <- t(t(colMeans(stage1readydat[, 1:k])))
sigmaminusoh <- matrix(0, nrow = k, ncol = k)
diag(sigmaminusoh) <- t(apply(stage1readydat[, 1:k], 2, sd))^(-1)
ind <- stage1readydat[, (k + 1)]
nupart = matrix(rep(0, k), ncol = 1)
deltapart = matrix(rep(0, k), ncol = 1)
storesigzi = matrix(rep(0, k), ncol = 1)
storecapdel = matrix(rep(0, k), ncol = 1)
storecapdel0 = matrix(rep(0, k), ncol = 1)
storevecd = matrix(rep(0, k), ncol = 1)
ni = rep(0, g)
for (i in 1:g) {
xg = stage1readydat[ind == i, 1:k]
ni[i] = length(xg[, 1])
xbari = t(t(colMeans(xg)))
zbari = sigmaminusoh %*% (xbari - xbar)
sigzi <- sigmaminusoh %*% var(xg) %*% sigmaminusoh
storesigzi = cbind(storesigzi, sigzi)
vecd = diag(sigzi)
storevecd = cbind(storevecd, vecd)
nui = sqrt(ni[i]/n) * zbari
nupart = cbind(nupart, nui)
}
nupart = nupart[, 2:(g + 1)]
storevecd = storevecd[, 2:(g + 1)]
storesigzi = storesigzi[, 2:(k * g + 1)]
sigzbar = matrix(rep(0, k^2), ncol = k)
ib = 1
ie = k
for (i in 1:g) {
sigzbar = sigzbar + (ni[i]/n) * storesigzi[, ib:ie]
ib = ib + k
ie = ie + k
}
sigzbard = diag(sigzbar)
ib = 1
ie = k
for (i in 1:g) {
deli = sqrt(ni[i]/n) * (storevecd[, i] - sigzbard)
deltapart = cbind(deltapart, deli)
capdeli = sqrt(ni[i]/n) * (storesigzi[, ib:ie] - sigzbar)
capdeli0 = capdeli - diag(diag(capdeli))
storecapdel <- cbind(storecapdel, capdeli)
storecapdel0 <- cbind(storecapdel0, capdeli0)
ib = ib + k
ie = ie + k
}
deltapart <- deltapart[, 2:(g + 1)]
storecapdel <- storecapdel[, 2:(k * g + 1)]
storecapdel0 <- storecapdel0[, 2:(k * g + 1)]
spansetF = cbind(storecapdel0, deltapart, nupart)
kernelF = spansetF %*% t(spansetF)
if (empTest == TRUE | apempTest == TRUE) {
estclt <- feststdclt1(g, stage1readydat)
estgrpmom <- eststddelta1(list(eu = estclt$eu, gprop = estclt$gprop,
g = estclt$g, k = estclt$k))
gprop = estclt$gprop
estvar <- eststddelta2(list(ef = estgrpmom$ef, g = g,
k = k, gprop = gprop))
eststd <- eststddelta3(list(ef = estvar$ef, g = g, k = k,
gprop = gprop))
estcentvar <- eststddelta4(list(ef = eststd$ef, g = g,
k = k, gprop = gprop))
estpropwt <- eststddelta5(list(ef = estcentvar$ef, g = g,
k = k, gprop = gprop))
eststackvarint <- eststackvar(list(ef = estpropwt$ef,
g = g, k = k, gprop = gprop))
vals <- eigen(kernelF, symmetric = TRUE)$values
or <- rev(order(abs(vals)))
evalues <- vals[or]
esteigensmvcir1 <- evalues
}
tmp = svd(spansetF)
sumsing = rep(0, k)
for (j in 1:k) {
sumsing[j] = sum(tmp$d[1:j])
}
sumsing = (100/sumsing[k]) * sumsing
scree = cbind(1:k, sumsing)
dpDIM = -1
for (j in 1:k) {
if (sumsing[j] >= pdt & dpDIM == -1) {
dpDIM = j
}
}
if (dpDIM == -1) {
dpDIM = k
}
if (scree_plot == TRUE) {
x11()
plot(c(0, scree[, 1]), c(0, scree[, 2]), xlab = "Singular value",
ylab = "Percentage", ylim = c(0, 100), type = "l",
main = "Scree Plot SV %")
abline(h = pdt)
}
emppvals <- matrix(NA, nrow = k, ncol = 1)
apemppvals <- matrix(NA, nrow = k, ncol = 1)
if (empTest == TRUE | apempTest == TRUE) {
vf <- matrix(0, nrow = g * k + g * k * k + g * k, ncol = g *
k + g * k * k + g * k)
vf[(1:(g * k + g * k * k)), (1:(g * k + g * k * k))] <- (estpropwt$d5 %*%
estcentvar$d4 %*% eststd$d3 %*% estvar$d2 %*% estgrpmom$d1 %*%
estclt$vu %*% t(estgrpmom$d1) %*% t(estvar$d2) %*%
t(eststd$d3) %*% t(estcentvar$d4) %*% t(estpropwt$d5))
for (i in 1:k) {
tstat <- n * sum(esteigensmvcir1[(i):length(esteigensmvcir1)])
tsmvcird <- i - 1
bigK <- kunstack(k, as.matrix(eststackvarint$ef))
sv <- svd(bigK, nu = nrow(bigK), nv = ncol(bigK))
gammanot <- sv$u[, (tsmvcird + 1):(ncol(sv$u))]
psinot <- sv$v[, (tsmvcird + 1):ncol(sv$v)]
dcmat <- ((t(psinot) %x% t(gammanot)) %*% eststackvarint$Perm %*%
vf %*% t(eststackvarint$Perm) %*% (psinot %x%
gammanot))
if (empTest == TRUE) {
devalues <- eigen(dcmat, symmetric = TRUE)$values
exceed = 0
for (j in 1:empsimss) {
realz <- t(devalues) %*% t(t(rchisq(length(devalues),
1)))
if (realz >= tstat) {
exceed <- exceed + 1
}
}
pvalue <- exceed/empsimss
emppvals[i, 1] <- pvalue
}
if (apempTest == TRUE) {
trdcmat <- sum(eigen(dcmat, symmetric = TRUE)$values)
trdcmat2 <- sum(eigen(dcmat %*% t(dcmat), symmetric = TRUE)$values)
d_num <- round((trdcmat^2)/trdcmat2)
scalecorrectstat <- tstat * ((trdcmat/d_num)^(-1))
pvalue <- 1 - pchisq(scalecorrectstat, d_num)
apemppvals[i, 1] <- pvalue
}
}
}
tmp.eig = eigen(kernelF)
wmati = standat[, 1:k] %*% tmp.eig$vectors
wmati = cbind(wmati, standat[, k + 1])
stdcoeffmat <- matrix(NA, nrow = k, ncol = k)
x <- cbind(scale(stage1readydat[, 1:k], center = TRUE, scale = TRUE),
rep(1, nrow(stage1readydat[, 1:k])))
for (i in 1:k) {
stdcoeffmat[1:k, i] <- t(t(lm.fit(y = wmati[, i], x = x)$coefficients[1:k]))[1:k,
1]
norm <- sum(stdcoeffmat[1:k, i] * stdcoeffmat[1:k, i])
stdcoeffmat[1:k, i] <- stdcoeffmat[1:k, i]/sqrt(norm)
}
if (empTest == TRUE | apempTest == TRUE) {
if (empTest == TRUE) {
empDIM = -1
for (i in 1:k) {
if (emppvals[i] >= level & empDIM == -1) {
empDIM = i - 1
}
}
if (empDIM == -1) {
empDIM = k
}
}
else {
empDIM = -1
}
if (apempTest == TRUE) {
apempDIM = -1
for (i in 1:k) {
if (apemppvals[i] >= level & apempDIM == -1) {
apempDIM = i - 1
}
}
if (apempDIM == -1) {
apempDIM = k
}
}
else {
apempDIM = -1
}
chosenDIM = max(empDIM, apempDIM)
}
printit1 <- matrix("", 7, 1)
printit1[1, 1] <- "SMVCIR"
printit1[2, 1] <- paste("# Groups: ", g, sep = "")
printit1[3, 1] <- paste("# Predictors: ", k, sep = "")
printit1[4, 1] <- paste("Observations used: ", nrow(standat),
sep = "")
printit1[5, 1] <- paste("Total Observations: ", nrow(data),
sep = "")
if (empTest == TRUE | apempTest == TRUE) {
printit1[6, 1] <- paste("Dimension: ", chosenDIM, ", at level: ",
level, sep = "")
}
printit1[7, 1] <- paste("Dimension: ", dpDIM, ", provides ",
round(sumsing[dpDIM], 0), "% Discrimination", sep = "")
rownames(printit1) <- rep("", 7)
colnames(printit1) <- rep("", 1)
#print(printit1, quote = FALSE) ##First output for summary function
pvalmat<-NULL
if (empTest == TRUE | apempTest == TRUE) {
printit2 <- matrix("", 1, 1)
printit2[1, 1] <- "Dimensionality Test P-Values"
rownames(printit2) <- c("")
colnames(printit2) <- c("")
#print(printit2, quote = FALSE)
if (apempTest == TRUE & empTest == TRUE) {
pvalmat <- round(cbind(emppvals, apemppvals), 3)
colnames(pvalmat) <- c("Empirical", "Approximate Empirical")
}
else if (apempTest == TRUE) {
pvalmat <- round(t(t(apemppvals)), 3)
colnames(pvalmat) <- c("Approximate Empirical")
}
else {
pvalmat <- round(t(t(emppvals)), 3)
colnames(pvalmat) <- c("Empirical")
}
rownamesIt <- 0:(k - 1)
if (empDIM > -1) {
if (empDIM < k) {
rownamesIt[empDIM + 1] <- paste("E ", rownamesIt[empDIM +
1], sep = "")
}
}
if (apempDIM > -1) {
if (apempDIM < k) {
rownamesIt[apempDIM + 1] <- paste("AE ", rownamesIt[apempDIM +
1], sep = "")
}
}
rownames(pvalmat) <- rownamesIt
#print(pvalmat, quote = FALSE)
}
prednames <- rep("", k)
i = 1
for (j in 1:(k + 1)) {
if (names(data)[j] != group) {
prednames[i] <- names(data)[j]
i = i + 1
}
}
rownames(stdcoeffmat) <- prednames
colnames(stdcoeffmat) <- paste("D", 1:k, sep = "")
#printit <- matrix("", 1, 1)
#printit[1, 1] <- "Standardized Coefficients"
#rownames(printit) <- c("")
#colnames(printit) <- c("")
#print(printit, quote = FALSE)
# if (empTest == TRUE | apempTest == TRUE) {
# print(round(stdcoeffmat[, 1:max(min(chosenDIM, dpDIM),
# 1)], 3), quote = FALSE)
#}
#else {
# print(round(stdcoeffmat[, 1:(max(dpDIM, 1))], 3), quote = FALSE)
#}
colnames(stage1readydat) <- c(prednames, group)
colnames(standat) <- c(prednames, group)
transdat <- data.frame(wmati)
names(transdat) <- c(paste("D", rep(1:k), sep = ""), group)
spanFnames <- character(length = ncol(spansetF))
nb <- ncol(spansetF)
for (p in 1:nb) {
if (p <= g * k) {
if (floor(p/k) < p/k) {
tempgroup <- floor(p/k) + 1
covcolm <- p - floor(p/k) * k
}
else {
tempgroup <- floor(p/k)
covcolm = k
}
spanFnames[p] <- paste("C", tempgroup, ".", covcolm,
sep = "")
}
else if (p <= g * k + g) {
tempgroup <- p - g * k
spanFnames[p] <- paste("V", tempgroup, sep = "")
}
else {
tempgroup <- p - g * k - g
spanFnames[p] <- paste("M", tempgroup, sep = "")
}
}
colnames(spansetF) <- spanFnames
if (empTest == TRUE | apempTest == TRUE) {
if (min(chosenDIM, dpDIM) > 1) {
TRANScm <- cor(transdat[, 1:(max(min(chosenDIM, dpDIM),
1))])
maxit <- max(TRANScm - diag(diag(TRANScm)))
minit <- min(TRANScm - diag(diag(TRANScm)))
if (abs(minit) > maxit) {
maxTRANScm <- minit
}
else {
maxTRANScm <- maxit
}
}
else {
TRANScm <- 0
maxTRANScm <- 0
}
}
else {
if (dpDIM > 1) {
TRANScm <- cor(transdat[, 1:dpDIM])
maxit <- max(TRANScm - diag(diag(TRANScm)))
minit <- min(TRANScm - diag(diag(TRANScm)))
if (abs(minit) > maxit) {
maxTRANScm <- minit
}
else {
maxTRANScm <- maxit
}
}
else {
TRANScm <- 0
maxTRANScm <- 0
}
}
printit <- matrix("SMVCIR dimensions should have low correlations.",
1, 1)
rownames(printit) <- c("")
colnames(printit) <- c("")
#print(printit, quote = FALSE)
printit <- matrix(paste("Maximum SMVCIR dimension correlation: ",
maxTRANScm, sep = ""), 1, 1)
rownames(printit) <- c("")
colnames(printit) <- c("")
#print(printit, quote = FALSE)
printit <- matrix("SMVCIR correlations.", 1, 1)
rownames(printit) <- c("")
colnames(printit) <- c("")
#print(printit, quote = FALSE)
#print(TRANScm, quote = FALSE) remove printing of discrim coords (too large)
if (empTest == FALSE & apempTest == FALSE) {
chosenDIM = NA
}
c_mat<-tmp.eig$vectors
muhat_ls<-aggregate(scale(stage1readydat[,1:k]), list(stage1readydat[,k+1]), mean)
muhat_ls<-matrix(NA, nrow = g, ncol = k)
for(i in 1:g){
muhat_ls[i,1:k] <- colMeans(standat[ind==i,1:k])
}
rownames(muhat_ls)<-levels(data[,which(names(data)%in%group)])
colnames(muhat_ls)<-prednames
muhat_z<-muhat_ls%*%c_mat
rownames(muhat_z)<-rownames(muhat_ls)
colnames(muhat_z)<-paste("D", 1:k, sep = "")
sighatx<-cov(standat[,1:k])
sighatz<-t(c_mat)%*%sighatx%*%c_mat
class.props<-matrix(NA, ncol = g)
for(i in 1:g){
class.props[1,i]<-mean(standat[,k+1]==i)
}
colnames(class.props)<-class_labels
colnames(sighatz)<-rownames(sighatz)<-c(paste("D", rep(1:k), sep = ""))
#if (plot == TRUE) {
# if (GL) {
# plot(smv, dimensions = 1:3, GL = GL)
# }
# else {
# if (empTest == TRUE | apempTest == TRUE) {
# plot(smv, dimensions = 1:max(min(chosenDIM, dpDIM),
# 1), GL = GL)
# }
# else {
# plot(smv, dimensions = 1:max(dpDIM, 1), GL = GL)
# }
# }
#}#
transdat[,k+1]<-factor(class_labels[transdat[,k+1]])
if(is.null(pvalmat)){pvalmat<-"No dimensionality test performed"}
smv <-list(groups = g, predictors = k, statdim = chosenDIM,
sighatz = sighatz, muhat_z = muhat_z, groupindex = groupindex,
class.props=class.props,
muhat_ls = muhat_ls, xbar = xbar, sighatx = sighatx,
dimCorr = TRANScm, maxTRANScm = maxTRANScm,
direct = transdat, compcases = compcases, spansetF = spansetF,
call = match.call(), coefficients = stdcoeffmat, originalx = stage1readydat[,1:k],
kernel = kernelF, summary1 = noquote(printit1), pvalmat = pvalmat)###switch kernel & kernelF
attr(smv, "class") <- "smvcir"
smv
}
| /R/mainfuncv3.R | no_license | danno11/SMVCIR | R | false | false | 15,510 | r | #'Main SMVCIR function
#'
#'Build a Sliced Mean Variance Covariance Inverse Regression model.
#'
#'@param group A character string specifying the name of the class variable of interest in your dataset.
#'@param data A data frame (including your group variable).
#'@param pdt Percentage of SMVCIR discrimination desired from dimension
#'@param level Level of dimensionality test
#'@param test Types of tests, can use either, neither, or both.
#'@param empsimss Number of draws to use in performing each dimensionality test.
#'@param scree_plot If TRUE, a scree plot of cumulative percentage of variation explained by discriminant dimensions is produced.
#'
#'@examples
#'
#'
#'library(caret)
#'train<-createDataPartition(pima$diabetes, p = .8, list = FALSE)
#'pim.smv<-smvcir("diabetes", data = pima[train,], test = T) ###Build smvcir model on training set
#'summary(pim.smv)
#'
#'@export
smvcir<-function (group, data, pdt = 100, level = 0.05, test = FALSE, scree_plot=FALSE)
{
empTest = FALSE
apempTest = FALSE
if (test == TRUE) {
apempTest = TRUE
}
compcases <- data.frame(complete.cases(data))
compcases <- cbind(compcases, t(t(1:nrow(data))))
data_nu <- na.omit(data)
namelist <- attr(data, "names")
groupindex <- 0
for (i in 1:length(namelist)) {
if (namelist[i] == group) {
groupindex <- i
break
}
}
if (groupindex == 0) {
return(NAN)
}
if (groupindex == dim(data_nu)[2]) {
stage1readydat <- data.matrix(data_nu)
} else if (groupindex == 1) {
stage1readydat <- data.matrix(data_nu[, c(2:dim(data_nu)[2],
1)])
} else {
stage1readydat <- data.matrix(data_nu[, c(1:(groupindex -
1), (groupindex + 1):dim(data_nu)[2], groupindex)])
}
rm(data_nu)
n <- dim(stage1readydat)[1]
k <- dim(stage1readydat)[2] - 1
g <- max(stage1readydat[, k + 1])
class_labels<-levels(data[,groupindex]) #####class_labels
standat <- cbind(scale(stage1readydat[, 1:k], center = TRUE,
scale = TRUE), stage1readydat[, k + 1])
xbar <- t(t(colMeans(stage1readydat[, 1:k])))
sigmaminusoh <- matrix(0, nrow = k, ncol = k)
diag(sigmaminusoh) <- t(apply(stage1readydat[, 1:k], 2, sd))^(-1)
ind <- stage1readydat[, (k + 1)]
nupart = matrix(rep(0, k), ncol = 1)
deltapart = matrix(rep(0, k), ncol = 1)
storesigzi = matrix(rep(0, k), ncol = 1)
storecapdel = matrix(rep(0, k), ncol = 1)
storecapdel0 = matrix(rep(0, k), ncol = 1)
storevecd = matrix(rep(0, k), ncol = 1)
ni = rep(0, g)
for (i in 1:g) {
xg = stage1readydat[ind == i, 1:k]
ni[i] = length(xg[, 1])
xbari = t(t(colMeans(xg)))
zbari = sigmaminusoh %*% (xbari - xbar)
sigzi <- sigmaminusoh %*% var(xg) %*% sigmaminusoh
storesigzi = cbind(storesigzi, sigzi)
vecd = diag(sigzi)
storevecd = cbind(storevecd, vecd)
nui = sqrt(ni[i]/n) * zbari
nupart = cbind(nupart, nui)
}
nupart = nupart[, 2:(g + 1)]
storevecd = storevecd[, 2:(g + 1)]
storesigzi = storesigzi[, 2:(k * g + 1)]
sigzbar = matrix(rep(0, k^2), ncol = k)
ib = 1
ie = k
for (i in 1:g) {
sigzbar = sigzbar + (ni[i]/n) * storesigzi[, ib:ie]
ib = ib + k
ie = ie + k
}
sigzbard = diag(sigzbar)
ib = 1
ie = k
for (i in 1:g) {
deli = sqrt(ni[i]/n) * (storevecd[, i] - sigzbard)
deltapart = cbind(deltapart, deli)
capdeli = sqrt(ni[i]/n) * (storesigzi[, ib:ie] - sigzbar)
capdeli0 = capdeli - diag(diag(capdeli))
storecapdel <- cbind(storecapdel, capdeli)
storecapdel0 <- cbind(storecapdel0, capdeli0)
ib = ib + k
ie = ie + k
}
deltapart <- deltapart[, 2:(g + 1)]
storecapdel <- storecapdel[, 2:(k * g + 1)]
storecapdel0 <- storecapdel0[, 2:(k * g + 1)]
spansetF = cbind(storecapdel0, deltapart, nupart)
kernelF = spansetF %*% t(spansetF)
if (empTest == TRUE | apempTest == TRUE) {
estclt <- feststdclt1(g, stage1readydat)
estgrpmom <- eststddelta1(list(eu = estclt$eu, gprop = estclt$gprop,
g = estclt$g, k = estclt$k))
gprop = estclt$gprop
estvar <- eststddelta2(list(ef = estgrpmom$ef, g = g,
k = k, gprop = gprop))
eststd <- eststddelta3(list(ef = estvar$ef, g = g, k = k,
gprop = gprop))
estcentvar <- eststddelta4(list(ef = eststd$ef, g = g,
k = k, gprop = gprop))
estpropwt <- eststddelta5(list(ef = estcentvar$ef, g = g,
k = k, gprop = gprop))
eststackvarint <- eststackvar(list(ef = estpropwt$ef,
g = g, k = k, gprop = gprop))
vals <- eigen(kernelF, symmetric = TRUE)$values
or <- rev(order(abs(vals)))
evalues <- vals[or]
esteigensmvcir1 <- evalues
}
tmp = svd(spansetF)
sumsing = rep(0, k)
for (j in 1:k) {
sumsing[j] = sum(tmp$d[1:j])
}
sumsing = (100/sumsing[k]) * sumsing
scree = cbind(1:k, sumsing)
dpDIM = -1
for (j in 1:k) {
if (sumsing[j] >= pdt & dpDIM == -1) {
dpDIM = j
}
}
if (dpDIM == -1) {
dpDIM = k
}
if (scree_plot == TRUE) {
x11()
plot(c(0, scree[, 1]), c(0, scree[, 2]), xlab = "Singular value",
ylab = "Percentage", ylim = c(0, 100), type = "l",
main = "Scree Plot SV %")
abline(h = pdt)
}
emppvals <- matrix(NA, nrow = k, ncol = 1)
apemppvals <- matrix(NA, nrow = k, ncol = 1)
if (empTest == TRUE | apempTest == TRUE) {
vf <- matrix(0, nrow = g * k + g * k * k + g * k, ncol = g *
k + g * k * k + g * k)
vf[(1:(g * k + g * k * k)), (1:(g * k + g * k * k))] <- (estpropwt$d5 %*%
estcentvar$d4 %*% eststd$d3 %*% estvar$d2 %*% estgrpmom$d1 %*%
estclt$vu %*% t(estgrpmom$d1) %*% t(estvar$d2) %*%
t(eststd$d3) %*% t(estcentvar$d4) %*% t(estpropwt$d5))
for (i in 1:k) {
tstat <- n * sum(esteigensmvcir1[(i):length(esteigensmvcir1)])
tsmvcird <- i - 1
bigK <- kunstack(k, as.matrix(eststackvarint$ef))
sv <- svd(bigK, nu = nrow(bigK), nv = ncol(bigK))
gammanot <- sv$u[, (tsmvcird + 1):(ncol(sv$u))]
psinot <- sv$v[, (tsmvcird + 1):ncol(sv$v)]
dcmat <- ((t(psinot) %x% t(gammanot)) %*% eststackvarint$Perm %*%
vf %*% t(eststackvarint$Perm) %*% (psinot %x%
gammanot))
if (empTest == TRUE) {
devalues <- eigen(dcmat, symmetric = TRUE)$values
exceed = 0
for (j in 1:empsimss) {
realz <- t(devalues) %*% t(t(rchisq(length(devalues),
1)))
if (realz >= tstat) {
exceed <- exceed + 1
}
}
pvalue <- exceed/empsimss
emppvals[i, 1] <- pvalue
}
if (apempTest == TRUE) {
trdcmat <- sum(eigen(dcmat, symmetric = TRUE)$values)
trdcmat2 <- sum(eigen(dcmat %*% t(dcmat), symmetric = TRUE)$values)
d_num <- round((trdcmat^2)/trdcmat2)
scalecorrectstat <- tstat * ((trdcmat/d_num)^(-1))
pvalue <- 1 - pchisq(scalecorrectstat, d_num)
apemppvals[i, 1] <- pvalue
}
}
}
tmp.eig = eigen(kernelF)
wmati = standat[, 1:k] %*% tmp.eig$vectors
wmati = cbind(wmati, standat[, k + 1])
stdcoeffmat <- matrix(NA, nrow = k, ncol = k)
x <- cbind(scale(stage1readydat[, 1:k], center = TRUE, scale = TRUE),
rep(1, nrow(stage1readydat[, 1:k])))
for (i in 1:k) {
stdcoeffmat[1:k, i] <- t(t(lm.fit(y = wmati[, i], x = x)$coefficients[1:k]))[1:k,
1]
norm <- sum(stdcoeffmat[1:k, i] * stdcoeffmat[1:k, i])
stdcoeffmat[1:k, i] <- stdcoeffmat[1:k, i]/sqrt(norm)
}
if (empTest == TRUE | apempTest == TRUE) {
if (empTest == TRUE) {
empDIM = -1
for (i in 1:k) {
if (emppvals[i] >= level & empDIM == -1) {
empDIM = i - 1
}
}
if (empDIM == -1) {
empDIM = k
}
}
else {
empDIM = -1
}
if (apempTest == TRUE) {
apempDIM = -1
for (i in 1:k) {
if (apemppvals[i] >= level & apempDIM == -1) {
apempDIM = i - 1
}
}
if (apempDIM == -1) {
apempDIM = k
}
}
else {
apempDIM = -1
}
chosenDIM = max(empDIM, apempDIM)
}
printit1 <- matrix("", 7, 1)
printit1[1, 1] <- "SMVCIR"
printit1[2, 1] <- paste("# Groups: ", g, sep = "")
printit1[3, 1] <- paste("# Predictors: ", k, sep = "")
printit1[4, 1] <- paste("Observations used: ", nrow(standat),
sep = "")
printit1[5, 1] <- paste("Total Observations: ", nrow(data),
sep = "")
if (empTest == TRUE | apempTest == TRUE) {
printit1[6, 1] <- paste("Dimension: ", chosenDIM, ", at level: ",
level, sep = "")
}
printit1[7, 1] <- paste("Dimension: ", dpDIM, ", provides ",
round(sumsing[dpDIM], 0), "% Discrimination", sep = "")
rownames(printit1) <- rep("", 7)
colnames(printit1) <- rep("", 1)
#print(printit1, quote = FALSE) ##First output for summary function
pvalmat<-NULL
if (empTest == TRUE | apempTest == TRUE) {
printit2 <- matrix("", 1, 1)
printit2[1, 1] <- "Dimensionality Test P-Values"
rownames(printit2) <- c("")
colnames(printit2) <- c("")
#print(printit2, quote = FALSE)
if (apempTest == TRUE & empTest == TRUE) {
pvalmat <- round(cbind(emppvals, apemppvals), 3)
colnames(pvalmat) <- c("Empirical", "Approximate Empirical")
}
else if (apempTest == TRUE) {
pvalmat <- round(t(t(apemppvals)), 3)
colnames(pvalmat) <- c("Approximate Empirical")
}
else {
pvalmat <- round(t(t(emppvals)), 3)
colnames(pvalmat) <- c("Empirical")
}
rownamesIt <- 0:(k - 1)
if (empDIM > -1) {
if (empDIM < k) {
rownamesIt[empDIM + 1] <- paste("E ", rownamesIt[empDIM +
1], sep = "")
}
}
if (apempDIM > -1) {
if (apempDIM < k) {
rownamesIt[apempDIM + 1] <- paste("AE ", rownamesIt[apempDIM +
1], sep = "")
}
}
rownames(pvalmat) <- rownamesIt
#print(pvalmat, quote = FALSE)
}
prednames <- rep("", k)
i = 1
for (j in 1:(k + 1)) {
if (names(data)[j] != group) {
prednames[i] <- names(data)[j]
i = i + 1
}
}
rownames(stdcoeffmat) <- prednames
colnames(stdcoeffmat) <- paste("D", 1:k, sep = "")
#printit <- matrix("", 1, 1)
#printit[1, 1] <- "Standardized Coefficients"
#rownames(printit) <- c("")
#colnames(printit) <- c("")
#print(printit, quote = FALSE)
# if (empTest == TRUE | apempTest == TRUE) {
# print(round(stdcoeffmat[, 1:max(min(chosenDIM, dpDIM),
# 1)], 3), quote = FALSE)
#}
#else {
# print(round(stdcoeffmat[, 1:(max(dpDIM, 1))], 3), quote = FALSE)
#}
colnames(stage1readydat) <- c(prednames, group)
colnames(standat) <- c(prednames, group)
transdat <- data.frame(wmati)
names(transdat) <- c(paste("D", rep(1:k), sep = ""), group)
spanFnames <- character(length = ncol(spansetF))
nb <- ncol(spansetF)
for (p in 1:nb) {
if (p <= g * k) {
if (floor(p/k) < p/k) {
tempgroup <- floor(p/k) + 1
covcolm <- p - floor(p/k) * k
}
else {
tempgroup <- floor(p/k)
covcolm = k
}
spanFnames[p] <- paste("C", tempgroup, ".", covcolm,
sep = "")
}
else if (p <= g * k + g) {
tempgroup <- p - g * k
spanFnames[p] <- paste("V", tempgroup, sep = "")
}
else {
tempgroup <- p - g * k - g
spanFnames[p] <- paste("M", tempgroup, sep = "")
}
}
colnames(spansetF) <- spanFnames
if (empTest == TRUE | apempTest == TRUE) {
if (min(chosenDIM, dpDIM) > 1) {
TRANScm <- cor(transdat[, 1:(max(min(chosenDIM, dpDIM),
1))])
maxit <- max(TRANScm - diag(diag(TRANScm)))
minit <- min(TRANScm - diag(diag(TRANScm)))
if (abs(minit) > maxit) {
maxTRANScm <- minit
}
else {
maxTRANScm <- maxit
}
}
else {
TRANScm <- 0
maxTRANScm <- 0
}
}
else {
if (dpDIM > 1) {
TRANScm <- cor(transdat[, 1:dpDIM])
maxit <- max(TRANScm - diag(diag(TRANScm)))
minit <- min(TRANScm - diag(diag(TRANScm)))
if (abs(minit) > maxit) {
maxTRANScm <- minit
}
else {
maxTRANScm <- maxit
}
}
else {
TRANScm <- 0
maxTRANScm <- 0
}
}
printit <- matrix("SMVCIR dimensions should have low correlations.",
1, 1)
rownames(printit) <- c("")
colnames(printit) <- c("")
#print(printit, quote = FALSE)
printit <- matrix(paste("Maximum SMVCIR dimension correlation: ",
maxTRANScm, sep = ""), 1, 1)
rownames(printit) <- c("")
colnames(printit) <- c("")
#print(printit, quote = FALSE)
printit <- matrix("SMVCIR correlations.", 1, 1)
rownames(printit) <- c("")
colnames(printit) <- c("")
#print(printit, quote = FALSE)
#print(TRANScm, quote = FALSE) remove printing of discrim coords (too large)
if (empTest == FALSE & apempTest == FALSE) {
chosenDIM = NA
}
c_mat<-tmp.eig$vectors
muhat_ls<-aggregate(scale(stage1readydat[,1:k]), list(stage1readydat[,k+1]), mean)
muhat_ls<-matrix(NA, nrow = g, ncol = k)
for(i in 1:g){
muhat_ls[i,1:k] <- colMeans(standat[ind==i,1:k])
}
rownames(muhat_ls)<-levels(data[,which(names(data)%in%group)])
colnames(muhat_ls)<-prednames
muhat_z<-muhat_ls%*%c_mat
rownames(muhat_z)<-rownames(muhat_ls)
colnames(muhat_z)<-paste("D", 1:k, sep = "")
sighatx<-cov(standat[,1:k])
sighatz<-t(c_mat)%*%sighatx%*%c_mat
class.props<-matrix(NA, ncol = g)
for(i in 1:g){
class.props[1,i]<-mean(standat[,k+1]==i)
}
colnames(class.props)<-class_labels
colnames(sighatz)<-rownames(sighatz)<-c(paste("D", rep(1:k), sep = ""))
#if (plot == TRUE) {
# if (GL) {
# plot(smv, dimensions = 1:3, GL = GL)
# }
# else {
# if (empTest == TRUE | apempTest == TRUE) {
# plot(smv, dimensions = 1:max(min(chosenDIM, dpDIM),
# 1), GL = GL)
# }
# else {
# plot(smv, dimensions = 1:max(dpDIM, 1), GL = GL)
# }
# }
#}#
transdat[,k+1]<-factor(class_labels[transdat[,k+1]])
if(is.null(pvalmat)){pvalmat<-"No dimensionality test performed"}
smv <-list(groups = g, predictors = k, statdim = chosenDIM,
sighatz = sighatz, muhat_z = muhat_z, groupindex = groupindex,
class.props=class.props,
muhat_ls = muhat_ls, xbar = xbar, sighatx = sighatx,
dimCorr = TRANScm, maxTRANScm = maxTRANScm,
direct = transdat, compcases = compcases, spansetF = spansetF,
call = match.call(), coefficients = stdcoeffmat, originalx = stage1readydat[,1:k],
kernel = kernelF, summary1 = noquote(printit1), pvalmat = pvalmat)###switch kernel & kernelF
attr(smv, "class") <- "smvcir"
smv
}
|
rm(list=ls(all=TRUE))
library('Rcpp')
set.seed(4)
#get functions
setwd('U:\\GIT_models\\git_LDA_abundance')
source('gibbs functions.R')
source('LDA.abundance main function.R')
sourceCpp('aux1.cpp')
dat=read.csv('fake data5.csv',as.is=T)
ind=which(colnames(dat)=='X')
y=data.matrix(dat[,-ind]); dim(y)
ncomm=30
ngibbs=1000
nburn=ngibbs/2
psi=0.01
gamma=0.1
res=LDA.abundance(y=y,ncomm=ncomm,ngibbs=ngibbs,nburn=nburn,psi=psi,gamma=gamma)
plot(res$llk,type='l')
seq1=250:nrow(res$theta)
theta=colMeans(res$theta[seq1,])
theta1=matrix(theta,nrow(y),ncomm)
boxplot(theta1)
plot(NA,xlim=c(0,nrow(y)),ylim=c(0,1))
for (i in 1:ncomm){
lines(theta1[,i],col=i)
} | /run LDA abundance main function.R | no_license | drvalle1/git_LDA_abundance | R | false | false | 660 | r | rm(list=ls(all=TRUE))
library('Rcpp')
set.seed(4)
#get functions
setwd('U:\\GIT_models\\git_LDA_abundance')
source('gibbs functions.R')
source('LDA.abundance main function.R')
sourceCpp('aux1.cpp')
dat=read.csv('fake data5.csv',as.is=T)
ind=which(colnames(dat)=='X')
y=data.matrix(dat[,-ind]); dim(y)
ncomm=30
ngibbs=1000
nburn=ngibbs/2
psi=0.01
gamma=0.1
res=LDA.abundance(y=y,ncomm=ncomm,ngibbs=ngibbs,nburn=nburn,psi=psi,gamma=gamma)
plot(res$llk,type='l')
seq1=250:nrow(res$theta)
theta=colMeans(res$theta[seq1,])
theta1=matrix(theta,nrow(y),ncomm)
boxplot(theta1)
plot(NA,xlim=c(0,nrow(y)),ylim=c(0,1))
for (i in 1:ncomm){
lines(theta1[,i],col=i)
} |
# Support Vector Machine (SVM)
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting classifier to the Training set
# Create your classifier here
library(e1071)
classifier = svm(formula = Purchased ~ .,
data = training_set,
type = 'C-classification', # SVM could be used for classification and regression
kernel = 'linear') # 'linear' is the most basic SVM method
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) | /svm_exercise.R | no_license | lixuanhong/Data-Science | R | false | false | 2,458 | r | # Support Vector Machine (SVM)
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting classifier to the Training set
# Create your classifier here
library(e1071)
classifier = svm(formula = Purchased ~ .,
data = training_set,
type = 'C-classification', # SVM could be used for classification and regression
kernel = 'linear') # 'linear' is the most basic SVM method
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'SVM (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'SVM (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) |
##############################################################################=
### ohiprep/globalprep/SPP_ICO/data_prep_ICO.R
###
### GOAL: Obtain iconics data for global
### Jun 19, 2015 - CCO. Updating ICO to automatically gather country info
### from scraped IUCN red list pages, and to incorporate parent/subpop
### extinction risk categories.
##############################################################################=
library(readr) # for read_csv()
library(XML)
setwd('~/github/ohiprep')
source('src/R/common.R')
goal <- 'globalprep/SPP_ICO'
scenario <- 'v2015'
dir_anx <- file.path(dir_neptune_data, 'git-annex', goal)
dir_git <- file.path('~/github/ohiprep', goal)
dir_data_am <- file.path(dir_neptune_data, 'git-annex/globalprep/_raw_data', 'aquamaps/v2014')
dir_data_iucn <- file.path(dir_neptune_data, 'git-annex/globalprep/_raw_data', 'iucn_spp')
source(file.path(dir_git, 'R/spp_fxn.R'))
source(file.path(dir_git, 'R/ico_fxn.R'))
# SPP-specific and ICO-specific functions
##############################################################################=
### get master list of Iconic Species -----
##############################################################################=
ico_list <- get_ico_list()
### comname | sciname | ico_gl | category | iucn_sid | trend | parent_sid | subpop_sid | ico_rgn_id
### * ico_rgn_id: rgn_id in which species is iconic by regional/national lists, separately from other lists.
##############################################################################=
### Find ICO regions for all species on ICO list -----
##############################################################################=
### NOTE: since this is pulling country lists from IUCN site, all ICO species
### must have an IUCN SID. Not a problem from 2011 list so far.
ico_spp_list <- ico_list %>%
select(sciname, iucn_sid) %>%
unique()
ico_rgn_list <- get_ico_details_all(ico_spp_list, reload = TRUE)
### | sid | rgn_name | rgn_type | rgn_id
ico_rgn_all <- ico_list %>%
left_join(ico_rgn_list,
by = c('iucn_sid' = 'sid'))
### Deal with the regionally and possibly extinct - mutate the category here.
### Regionally extinct are essentially counted as an extinct subpop.
### Possibly extinct are considered functionally extinct in this, so same as regionally extinct.
ico_rgn_all <- ico_rgn_all %>%
mutate(category = ifelse(rgn_type == 'possibly extinct', 'EX', category),
category = ifelse(rgn_type == 'regionally extinct', 'EX', category),
trend = ifelse(rgn_type == 'possibly extinct', NA, trend),
trend = ifelse(rgn_type == 'regionally extinct', NA, trend))
ico_rgn_all <- ico_rgn_all %>%
filter((ico_gl == TRUE & is.na(ico_rgn_id) | ico_rgn_id == rgn_id))
write_csv(ico_rgn_all, file.path(dir_anx, scenario, 'intermediate/ico_rgn_all.csv'))
##############################################################################=
### Report and summarize regional iconic species status -----
##############################################################################=
ico_rgn_all <- read.csv(file.path(dir_anx, scenario, 'intermediate/ico_rgn_all.csv'), stringsAsFactors = FALSE)
# Report out for toolbox format (rgn_id | sciname | category or popn_trend for each species within a region).
# Note: in toolbox, group_by(rgn_id, sciname) and then summarize(category = mean(category)) to
# average any parent/subpop species listings before aggregating to overall average per region.
ico_status <- ico_rgn_all %>%
select(rgn_id, sciname, category) %>%
arrange(rgn_id, sciname)
ico_trend <- ico_rgn_all %>%
select(rgn_id, sciname, popn_trend = trend) %>%
arrange(rgn_id, sciname)
write_csv(ico_status, file.path(dir_git, scenario, 'data/ico_status.csv'))
write_csv(ico_trend, file.path(dir_git, scenario, 'data/ico_trend.csv'))
# Report out for finalized status and trend values per region.
ico_rgn_sum <- process_ico_rgn(ico_rgn_all)
### rgn_id | mean_cat | mean_trend | status
ico_status_sum <- ico_rgn_sum %>%
select(rgn_id, score = mean_cat) %>%
arrange(rgn_id)
ico_trend_sum <- ico_rgn_sum %>%
select(rgn_id, score = mean_trend) %>%
arrange(rgn_id)
write_csv(ico_status_sum, file.path(dir_git, scenario, 'data/ico_status_sum.csv'))
write_csv(ico_trend_sum, file.path(dir_git, scenario, 'data/ico_trend_sum.csv'))
| /globalprep/spp_ico/v2015/data_prep_ICO.R | no_license | OHI-Science/ohiprep_v2017 | R | false | false | 4,360 | r | ##############################################################################=
### ohiprep/globalprep/SPP_ICO/data_prep_ICO.R
###
### GOAL: Obtain iconics data for global
### Jun 19, 2015 - CCO. Updating ICO to automatically gather country info
### from scraped IUCN red list pages, and to incorporate parent/subpop
### extinction risk categories.
##############################################################################=
library(readr) # for read_csv()
library(XML)
setwd('~/github/ohiprep')
source('src/R/common.R')
goal <- 'globalprep/SPP_ICO'
scenario <- 'v2015'
dir_anx <- file.path(dir_neptune_data, 'git-annex', goal)
dir_git <- file.path('~/github/ohiprep', goal)
dir_data_am <- file.path(dir_neptune_data, 'git-annex/globalprep/_raw_data', 'aquamaps/v2014')
dir_data_iucn <- file.path(dir_neptune_data, 'git-annex/globalprep/_raw_data', 'iucn_spp')
source(file.path(dir_git, 'R/spp_fxn.R'))
source(file.path(dir_git, 'R/ico_fxn.R'))
# SPP-specific and ICO-specific functions
##############################################################################=
### get master list of Iconic Species -----
##############################################################################=
ico_list <- get_ico_list()
### comname | sciname | ico_gl | category | iucn_sid | trend | parent_sid | subpop_sid | ico_rgn_id
### * ico_rgn_id: rgn_id in which species is iconic by regional/national lists, separately from other lists.
##############################################################################=
### Find ICO regions for all species on ICO list -----
##############################################################################=
### NOTE: since this is pulling country lists from IUCN site, all ICO species
### must have an IUCN SID. Not a problem from 2011 list so far.
ico_spp_list <- ico_list %>%
select(sciname, iucn_sid) %>%
unique()
ico_rgn_list <- get_ico_details_all(ico_spp_list, reload = TRUE)
### | sid | rgn_name | rgn_type | rgn_id
ico_rgn_all <- ico_list %>%
left_join(ico_rgn_list,
by = c('iucn_sid' = 'sid'))
### Deal with the regionally and possibly extinct - mutate the category here.
### Regionally extinct are essentially counted as an extinct subpop.
### Possibly extinct are considered functionally extinct in this, so same as regionally extinct.
ico_rgn_all <- ico_rgn_all %>%
mutate(category = ifelse(rgn_type == 'possibly extinct', 'EX', category),
category = ifelse(rgn_type == 'regionally extinct', 'EX', category),
trend = ifelse(rgn_type == 'possibly extinct', NA, trend),
trend = ifelse(rgn_type == 'regionally extinct', NA, trend))
ico_rgn_all <- ico_rgn_all %>%
filter((ico_gl == TRUE & is.na(ico_rgn_id) | ico_rgn_id == rgn_id))
write_csv(ico_rgn_all, file.path(dir_anx, scenario, 'intermediate/ico_rgn_all.csv'))
##############################################################################=
### Report and summarize regional iconic species status -----
##############################################################################=
ico_rgn_all <- read.csv(file.path(dir_anx, scenario, 'intermediate/ico_rgn_all.csv'), stringsAsFactors = FALSE)
# Report out for toolbox format (rgn_id | sciname | category or popn_trend for each species within a region).
# Note: in toolbox, group_by(rgn_id, sciname) and then summarize(category = mean(category)) to
# average any parent/subpop species listings before aggregating to overall average per region.
ico_status <- ico_rgn_all %>%
select(rgn_id, sciname, category) %>%
arrange(rgn_id, sciname)
ico_trend <- ico_rgn_all %>%
select(rgn_id, sciname, popn_trend = trend) %>%
arrange(rgn_id, sciname)
write_csv(ico_status, file.path(dir_git, scenario, 'data/ico_status.csv'))
write_csv(ico_trend, file.path(dir_git, scenario, 'data/ico_trend.csv'))
# Report out for finalized status and trend values per region.
ico_rgn_sum <- process_ico_rgn(ico_rgn_all)
### rgn_id | mean_cat | mean_trend | status
ico_status_sum <- ico_rgn_sum %>%
select(rgn_id, score = mean_cat) %>%
arrange(rgn_id)
ico_trend_sum <- ico_rgn_sum %>%
select(rgn_id, score = mean_trend) %>%
arrange(rgn_id)
write_csv(ico_status_sum, file.path(dir_git, scenario, 'data/ico_status_sum.csv'))
write_csv(ico_trend_sum, file.path(dir_git, scenario, 'data/ico_trend_sum.csv'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decoupling.r
\name{longwave.conductance}
\alias{longwave.conductance}
\title{Longwave Radiative Transfer Conductance of the Canopy}
\usage{
longwave.conductance(Tair, LAI, constants = bigleaf.constants())
}
\arguments{
\item{Tair}{Air temperature (deg C)}
\item{LAI}{Leaf area index (m2 m-2)}
\item{constants}{Kelvin - conversion degree Celsius to Kelvin \cr
sigma - Stefan-Boltzmann constant (W m-2 K-4) \cr
cp - specific heat of air for constant pressure (J K-1 kg-1)}
}
\value{
\item{Gr -}{longwave radiative transfer conductance of the canopy (m s-1)}
}
\description{
Longwave Radiative Transfer Conductance of the Canopy
}
\details{
the following formula is used (Martin, 1989):
\deqn{Gr = 4 \sigma Tair^3 LAI / cp}
}
\examples{
longwave.conductance(25,seq(1,8,1))
}
\references{
Martin P., 1989: The significance of radiative coupling between
vegetation and the atmosphere. Agricultural and Forest Meteorology 49, 45-53.
}
| /man/longwave.conductance.Rd | no_license | cran/bigleaf | R | false | true | 1,061 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decoupling.r
\name{longwave.conductance}
\alias{longwave.conductance}
\title{Longwave Radiative Transfer Conductance of the Canopy}
\usage{
longwave.conductance(Tair, LAI, constants = bigleaf.constants())
}
\arguments{
\item{Tair}{Air temperature (deg C)}
\item{LAI}{Leaf area index (m2 m-2)}
\item{constants}{Kelvin - conversion degree Celsius to Kelvin \cr
sigma - Stefan-Boltzmann constant (W m-2 K-4) \cr
cp - specific heat of air for constant pressure (J K-1 kg-1)}
}
\value{
\item{Gr -}{longwave radiative transfer conductance of the canopy (m s-1)}
}
\description{
Longwave Radiative Transfer Conductance of the Canopy
}
\details{
the following formula is used (Martin, 1989):
\deqn{Gr = 4 \sigma Tair^3 LAI / cp}
}
\examples{
longwave.conductance(25,seq(1,8,1))
}
\references{
Martin P., 1989: The significance of radiative coupling between
vegetation and the atmosphere. Agricultural and Forest Meteorology 49, 45-53.
}
|
# Load complete file
raw <- read.delim('data/household_power_consumption.txt', sep = ';', na.strings = '?',
colClasses = c('character', 'character',
'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric'))
# Grab just the two dates we need (dates are in D/M/Y format)
power <- raw[(raw$Date == '1/2/2007' | raw$Date == '2/2/2007'), ]
# Save some memory
remove(raw)
# Clean up column types
power$DateTime <- strptime(paste(power$Date, power$Time), '%d/%m/%Y %T')
png('plot2.png')
plot(power$DateTime, power$Global_active_power,
type = 'l', xlab = '',
ylab = 'Global Active Power (kilowatts)')
dev.off()
| /plot2.R | no_license | codemonkeyjim/ExData_Plotting1 | R | false | false | 688 | r | # Load complete file
raw <- read.delim('data/household_power_consumption.txt', sep = ';', na.strings = '?',
colClasses = c('character', 'character',
'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric', 'numeric'))
# Grab just the two dates we need (dates are in D/M/Y format)
power <- raw[(raw$Date == '1/2/2007' | raw$Date == '2/2/2007'), ]
# Save some memory
remove(raw)
# Clean up column types
power$DateTime <- strptime(paste(power$Date, power$Time), '%d/%m/%Y %T')
png('plot2.png')
plot(power$DateTime, power$Global_active_power,
type = 'l', xlab = '',
ylab = 'Global Active Power (kilowatts)')
dev.off()
|
\name{print.gp}
\alias{print.gp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Spectral GP default print statement}
\description{
This is the default print statement for a spectral GP object. If you need
a list of everything that is part of a spectral GP object,
use 'names()'.
}
\references{Type 'citation("spectralGP")' for references.}
\author{Christopher Paciorek \email{paciorek@alumni.cmu.edu} }
\seealso{\code{\link{gp}}, \code{\link{names.gp}}}
\keyword{models }% at least one, from doc/KEYWORDS
\keyword{smooth}% __ONLY ONE__keyword per line
\keyword{spatial}% __ONLY ONE__ keyword per line
| /man/print.gp.Rd | no_license | cran/spectralGP | R | false | false | 640 | rd | \name{print.gp}
\alias{print.gp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Spectral GP default print statement}
\description{
This is the default print statement for a spectral GP object. If you need
a list of everything that is part of a spectral GP object,
use 'names()'.
}
\references{Type 'citation("spectralGP")' for references.}
\author{Christopher Paciorek \email{paciorek@alumni.cmu.edu} }
\seealso{\code{\link{gp}}, \code{\link{names.gp}}}
\keyword{models }% at least one, from doc/KEYWORDS
\keyword{smooth}% __ONLY ONE__keyword per line
\keyword{spatial}% __ONLY ONE__ keyword per line
|
#' @include utilities.R ggpar.R
NULL
#' Stripcharts
#' @description Create a stripchart, also known as one dimensional scatter
#' plots. These plots are suitable compared to box plots when sample sizes are
#' small.
#' @inheritParams ggboxplot
#' @param x,y x and y variables for drawing.
#' @param color,fill outline and fill colors.
#' @param shape point shape
#' @param position position adjustment, either as a string, or the result of a
#' call to a position adjustment function. Used to adjust position for
#' multiple groups.
#' @param ... other arguments to be passed to geom_jitter.
#' @details The plot can be easily customized using the function ggpar(). Read
#' ?ggpar for changing: \itemize{ \item main title and axis labels: main,
#' xlab, ylab \item axis limits: xlim, ylim (e.g.: ylim = c(0, 30)) \item axis
#' scales: xscale, yscale (e.g.: yscale = "log2") \item color palettes:
#' palette = "Dark2" or palette = c("gray", "blue", "red") \item legend title,
#' labels and position: legend = "right" \item plot orientation : orientation
#' = c("vertical", "horizontal", "reverse") }
#' @seealso \code{\link{ggpar}}
#' @examples
#' # Load data
#' data("ToothGrowth")
#' df <- ToothGrowth
#'
#' # Basic plot with summary statistics: mean_se
#' # +++++++++++++++++++++++++++
#' # Change point shapes by groups: "dose"
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", size = 3,
#' add = "mean_se")
#'
#' # Use mean_sd
#' # Change error.plot to "crossbar"
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", size = 3,
#' add = "mean_sd", add.params = list(width = 0.5),
#' error.plot = "crossbar")
#'
#'
#'
#' # Add summary statistics
#' # ++++++++++++++++++++++++++
#'
#' # Add box plot
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", add = "boxplot")
#'
#' # Add violin + mean_sd
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", add = c("violin", "mean_sd"))
#'
#'
#' # Change colors
#' # +++++++++++++++++++++++++++
#' # Change colors by groups: dose
#' # Use custom color palette
#' ggstripchart(df, "dose", "len", shape = "dose",
#' color = "dose", palette = c("#00AFBB", "#E7B800", "#FC4E07"),
#' add = "mean_sd")
#'
#'
#'
#' # Plot with multiple groups
#' # +++++++++++++++++++++
#' # Change shape and color by a second group : "supp"
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"))
#'
#' # Adjust point position
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"),
#' position = position_dodge(0.8) )
#'
#' # You can also use position_jitterdodge()
#' # but fill aesthetic is required
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"),
#' position = position_jitterdodge() )
#'
#' # Add boxplot
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"),
#' add = "boxplot", add.params = list(color = "black") )
#'
#' @export
ggstripchart <- function(data, x, y,
color = "black", fill = "white", palette = NULL,
shape = 19, size = 2,
select = NULL, order = NULL,
add = "mean_se",
add.params = list(),
error.plot = "pointrange",
position = position_jitter(0.4),
ggtheme = theme_pubr(),
...)
{
data[, x] <- factor(data[, x])
p <- ggplot(data, aes_string(x, y))
if("none" %in% add) add <- "none"
add.params <- .check_add.params(add, add.params, error.plot, data, color, fill, ...)
# plot boxplot | violin | crossbar before jitter
if( any( c("boxplot", "violin") %in% add)){
p <- .add(p, add = intersect(add, c("boxplot", "violin")),
add.params = add.params, data = data)
}
if(error.plot == "crossbar"){
p <- .add(p, add = setdiff(add, c("boxplot", "violin", "jitter")),
add.params = add.params, data = data, error.plot = error.plot)
}
# Plot jitter
p <- p +
.geom_exec(geom_jitter, data = data,
color = color, fill = fill, shape = shape,
position = position, size = size, ...)
# Add errors
if(error.plot == "crossbar"){}
else p <- .add(p, add = setdiff(add, c("boxplot", "violin", "jitter")),
add.params = add.params, error.plot = error.plot)
# Select and order
if(is.null(select)) select <- order
if (!is.null(select) | !is.null(order))
p <- p + scale_x_discrete(limits = as.character(select))
p <- ggpar(p, palette = palette, ggtheme = ggtheme, ...)
p
}
| /R/ggstripchart.R | no_license | jinyancool/ggpubr | R | false | false | 4,724 | r | #' @include utilities.R ggpar.R
NULL
#' Stripcharts
#' @description Create a stripchart, also known as one dimensional scatter
#' plots. These plots are suitable compared to box plots when sample sizes are
#' small.
#' @inheritParams ggboxplot
#' @param x,y x and y variables for drawing.
#' @param color,fill outline and fill colors.
#' @param shape point shape
#' @param position position adjustment, either as a string, or the result of a
#' call to a position adjustment function. Used to adjust position for
#' multiple groups.
#' @param ... other arguments to be passed to geom_jitter.
#' @details The plot can be easily customized using the function ggpar(). Read
#' ?ggpar for changing: \itemize{ \item main title and axis labels: main,
#' xlab, ylab \item axis limits: xlim, ylim (e.g.: ylim = c(0, 30)) \item axis
#' scales: xscale, yscale (e.g.: yscale = "log2") \item color palettes:
#' palette = "Dark2" or palette = c("gray", "blue", "red") \item legend title,
#' labels and position: legend = "right" \item plot orientation : orientation
#' = c("vertical", "horizontal", "reverse") }
#' @seealso \code{\link{ggpar}}
#' @examples
#' # Load data
#' data("ToothGrowth")
#' df <- ToothGrowth
#'
#' # Basic plot with summary statistics: mean_se
#' # +++++++++++++++++++++++++++
#' # Change point shapes by groups: "dose"
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", size = 3,
#' add = "mean_se")
#'
#' # Use mean_sd
#' # Change error.plot to "crossbar"
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", size = 3,
#' add = "mean_sd", add.params = list(width = 0.5),
#' error.plot = "crossbar")
#'
#'
#'
#' # Add summary statistics
#' # ++++++++++++++++++++++++++
#'
#' # Add box plot
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", add = "boxplot")
#'
#' # Add violin + mean_sd
#' ggstripchart(df, x = "dose", y = "len",
#' shape = "dose", add = c("violin", "mean_sd"))
#'
#'
#' # Change colors
#' # +++++++++++++++++++++++++++
#' # Change colors by groups: dose
#' # Use custom color palette
#' ggstripchart(df, "dose", "len", shape = "dose",
#' color = "dose", palette = c("#00AFBB", "#E7B800", "#FC4E07"),
#' add = "mean_sd")
#'
#'
#'
#' # Plot with multiple groups
#' # +++++++++++++++++++++
#' # Change shape and color by a second group : "supp"
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"))
#'
#' # Adjust point position
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"),
#' position = position_dodge(0.8) )
#'
#' # You can also use position_jitterdodge()
#' # but fill aesthetic is required
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"),
#' position = position_jitterdodge() )
#'
#' # Add boxplot
#' ggstripchart(df, "dose", "len", shape = "supp",
#' color = "supp", palette = c("#00AFBB", "#E7B800"),
#' add = "boxplot", add.params = list(color = "black") )
#'
#' @export
ggstripchart <- function(data, x, y,
color = "black", fill = "white", palette = NULL,
shape = 19, size = 2,
select = NULL, order = NULL,
add = "mean_se",
add.params = list(),
error.plot = "pointrange",
position = position_jitter(0.4),
ggtheme = theme_pubr(),
...)
{
data[, x] <- factor(data[, x])
p <- ggplot(data, aes_string(x, y))
if("none" %in% add) add <- "none"
add.params <- .check_add.params(add, add.params, error.plot, data, color, fill, ...)
# plot boxplot | violin | crossbar before jitter
if( any( c("boxplot", "violin") %in% add)){
p <- .add(p, add = intersect(add, c("boxplot", "violin")),
add.params = add.params, data = data)
}
if(error.plot == "crossbar"){
p <- .add(p, add = setdiff(add, c("boxplot", "violin", "jitter")),
add.params = add.params, data = data, error.plot = error.plot)
}
# Plot jitter
p <- p +
.geom_exec(geom_jitter, data = data,
color = color, fill = fill, shape = shape,
position = position, size = size, ...)
# Add errors
if(error.plot == "crossbar"){}
else p <- .add(p, add = setdiff(add, c("boxplot", "violin", "jitter")),
add.params = add.params, error.plot = error.plot)
# Select and order
if(is.null(select)) select <- order
if (!is.null(select) | !is.null(order))
p <- p + scale_x_discrete(limits = as.character(select))
p <- ggpar(p, palette = palette, ggtheme = ggtheme, ...)
p
}
|
ppt = load(('~/Rutgers/Data Analytics 16.137.550.01/Project/Terrrorism Analysis/ppt/ppt_data.RData'))
| /R Code/Terrorist_organizations.r | no_license | chandansaha2014/GTD-Predictive-Modeling | R | false | false | 103 | r | ppt = load(('~/Rutgers/Data Analytics 16.137.550.01/Project/Terrrorism Analysis/ppt/ppt_data.RData'))
|
RDoc ๅฝขๅผใฎใใญใฅใกใณใใใจในใฑใผใใทใผใฑใณในใงๅคชๅญใใขใณใใผใฉใคใณใฎๅน
ๆใๆใใใใใใซ่ฆใใใตใใฉใคใใฉใชใงใใ
#@until 1.9.3
require 'rdoc/markup/formatter'
#@end
require 'rdoc/markup/to_bs'
h = RDoc::Markup::ToBs.new
puts h.convert(input_string)
ๅคๆใใ็ตๆใฏๆๅญๅใงๅๅพใงใใพใใๅฎ้ใซๅนๆใ็ขบ่ชใใใใใซใฏใใผใธใฃ
ใชใฉใงๅบๅใ็ขบ่ชใใพใใ
ใใญในใไธญใฎไปฅไธใฎใใผใฏใขใใ่จๆณใใจในใฑใผใใทใผใฑใณในใฎ \b ใจ็ตใฟๅ
ใใใไบใซใใฃใฆ่ฆใ็ฎใๅคๆดใใพใใ
* ใคใฟใชใใฏไฝ(_word_): word ใซใขใณใใผใฉใคใณใไปใใฆ่กจ็คบใใ("w\b_o\b_r\b_d\b_" ใซๅคๆใใใพใ)
* ใใผใซใไฝ(*word*): word ใๅคชๅญใง่กจ็คบใใ("w\bwo\bor\brd\bd" ใซๅคๆใใใพใ)
= class RDoc::Markup::ToBs < RDoc::Markup::ToRdoc
RDoc ๅฝขๅผใฎใใญใฅใกใณใใใจในใฑใผใใทใผใฑใณในใงๅคชๅญใใขใณใใผใฉใคใณใฎๅน
ๆใๆใใใใใใซ่ฆใใใฏใฉในใงใใ
== Class Methods
#@since 1.9.3
--- new(markup = nil) -> RDoc::Markup::ToBs
#@else
--- new -> RDoc::Markup::ToBs
#@end
๊ฐ์ฒด๋ฅผ ์ด๊ธฐํํฉ๋๋ค.
#@since 1.9.3
@param markup [[c:RDoc::Markup]] ๊ฐ์ฒด๋ฅผ ์ง์ ํฉ๋๋ค.์ง์ ๋์ง ์์ผ๋ฉด
์๋ก์ด ๊ฐ์ฒด๋ฅผ ์์ฑํฉ๋๋ค.
#@end
| /target/rubydoc/refm/api/src/rdoc/markup/to_bs.rd | no_license | nacyot/omegat-rurima-ruby | R | false | false | 1,371 | rd | RDoc ๅฝขๅผใฎใใญใฅใกใณใใใจในใฑใผใใทใผใฑใณในใงๅคชๅญใใขใณใใผใฉใคใณใฎๅน
ๆใๆใใใใใใซ่ฆใใใตใใฉใคใใฉใชใงใใ
#@until 1.9.3
require 'rdoc/markup/formatter'
#@end
require 'rdoc/markup/to_bs'
h = RDoc::Markup::ToBs.new
puts h.convert(input_string)
ๅคๆใใ็ตๆใฏๆๅญๅใงๅๅพใงใใพใใๅฎ้ใซๅนๆใ็ขบ่ชใใใใใซใฏใใผใธใฃ
ใชใฉใงๅบๅใ็ขบ่ชใใพใใ
ใใญในใไธญใฎไปฅไธใฎใใผใฏใขใใ่จๆณใใจในใฑใผใใทใผใฑใณในใฎ \b ใจ็ตใฟๅ
ใใใไบใซใใฃใฆ่ฆใ็ฎใๅคๆดใใพใใ
* ใคใฟใชใใฏไฝ(_word_): word ใซใขใณใใผใฉใคใณใไปใใฆ่กจ็คบใใ("w\b_o\b_r\b_d\b_" ใซๅคๆใใใพใ)
* ใใผใซใไฝ(*word*): word ใๅคชๅญใง่กจ็คบใใ("w\bwo\bor\brd\bd" ใซๅคๆใใใพใ)
= class RDoc::Markup::ToBs < RDoc::Markup::ToRdoc
RDoc ๅฝขๅผใฎใใญใฅใกใณใใใจในใฑใผใใทใผใฑใณในใงๅคชๅญใใขใณใใผใฉใคใณใฎๅน
ๆใๆใใใใใใซ่ฆใใใฏใฉในใงใใ
== Class Methods
#@since 1.9.3
--- new(markup = nil) -> RDoc::Markup::ToBs
#@else
--- new -> RDoc::Markup::ToBs
#@end
๊ฐ์ฒด๋ฅผ ์ด๊ธฐํํฉ๋๋ค.
#@since 1.9.3
@param markup [[c:RDoc::Markup]] ๊ฐ์ฒด๋ฅผ ์ง์ ํฉ๋๋ค.์ง์ ๋์ง ์์ผ๋ฉด
์๋ก์ด ๊ฐ์ฒด๋ฅผ ์์ฑํฉ๋๋ค.
#@end
|
library(tidyverse)
library(ggplot2)
library(plotly)
library(shiny)
library(lubridate)
#read all tide data here. Be sure to reformat dates to yyyy/mm/dd HH:MM
tidedata <- read.csv("June2016.csv")
#load date from shiny
fname1 <- "0097_40024_10938353_TEMP_20160623_God Wants You Slough 2 (mid)_0_.Rdata"
###################################################################################################################
###################################################################################################################
### LOCATION OF DATA FILES TO BE PROCESSED (This shouldn't change)
shiny_path <- "//deqlab1/wqm/Volunteer Monitoring/datamanagement/R/ContinuousDataReview/Check_shinyapp/data/"
#Load site data from shiny folder as dataframe
load(paste0(shiny_path, fname1))
site1 <- tmp_data
#Sort out some dates
site1$DATETIME <- as.POSIXct(site1$DATETIME, format = "%y%m%d %H:%M")
tidedata$DATETIME <- as.POSIXct(tidedata$DATETIME)
#add tide info
sitetide <- left_join(site1, tidedata, by = "DATETIME")
##################################
### Start of shiny app graphing###
##################################
ui <- fluidPage(
plotlyOutput("plot"),
verbatimTextOutput("hover"),
verbatimTextOutput("click"),
verbatimTextOutput("brush"),
verbatimTextOutput("zoom")
)
server <- function(input, output, session) {
output$plot <- renderPlotly({
# use the key aesthetic/argument to help uniquely identify selected observations
key <- sitetide$DATETIME
plot_ly(
sitetide,
x = ~ DATETIME,
y = ~ r,
color = ~ water_level,
colors = "Blues",
key = ~ key,
type = 'scatter'
) %>%
layout(
dragmode = "zoom",
legend = list(
x = 0,
y = -0.7,
orientation = 'h'
),
height = 900
)
})
# output$click <- renderPrint({
# d <- event_data("plotly_click")
# if (is.null(d)) "Click events appear here (double-click to clear)" else d
# })
#
#
# output$zoom <- renderPrint({
# d <- event_data("plotly_relayout")
# if (is.null(d)) "Relayout (i.e., zoom) events appear here" else d
# })
}
shinyApp(ui, server)
| /tidecheck.R | no_license | TravisPritchardODEQ/Cont-data-review | R | false | false | 2,320 | r | library(tidyverse)
library(ggplot2)
library(plotly)
library(shiny)
library(lubridate)
#read all tide data here. Be sure to reformat dates to yyyy/mm/dd HH:MM
tidedata <- read.csv("June2016.csv")
#load date from shiny
fname1 <- "0097_40024_10938353_TEMP_20160623_God Wants You Slough 2 (mid)_0_.Rdata"
###################################################################################################################
###################################################################################################################
### LOCATION OF DATA FILES TO BE PROCESSED (This shouldn't change)
shiny_path <- "//deqlab1/wqm/Volunteer Monitoring/datamanagement/R/ContinuousDataReview/Check_shinyapp/data/"
#Load site data from shiny folder as dataframe
load(paste0(shiny_path, fname1))
site1 <- tmp_data
#Sort out some dates
site1$DATETIME <- as.POSIXct(site1$DATETIME, format = "%y%m%d %H:%M")
tidedata$DATETIME <- as.POSIXct(tidedata$DATETIME)
#add tide info
sitetide <- left_join(site1, tidedata, by = "DATETIME")
##################################
### Start of shiny app graphing###
##################################
ui <- fluidPage(
plotlyOutput("plot"),
verbatimTextOutput("hover"),
verbatimTextOutput("click"),
verbatimTextOutput("brush"),
verbatimTextOutput("zoom")
)
server <- function(input, output, session) {
output$plot <- renderPlotly({
# use the key aesthetic/argument to help uniquely identify selected observations
key <- sitetide$DATETIME
plot_ly(
sitetide,
x = ~ DATETIME,
y = ~ r,
color = ~ water_level,
colors = "Blues",
key = ~ key,
type = 'scatter'
) %>%
layout(
dragmode = "zoom",
legend = list(
x = 0,
y = -0.7,
orientation = 'h'
),
height = 900
)
})
# output$click <- renderPrint({
# d <- event_data("plotly_click")
# if (is.null(d)) "Click events appear here (double-click to clear)" else d
# })
#
#
# output$zoom <- renderPrint({
# d <- event_data("plotly_relayout")
# if (is.null(d)) "Relayout (i.e., zoom) events appear here" else d
# })
}
shinyApp(ui, server)
|
library(tidyverse)
library(ggrepel)
library(ggimage)
library(nflfastR)
library(dplyr)
library(ggplot2)
library(ggrepel)
library(stringr)
options(scipen = 9999)
source("utils/nfl_utils.R")
### Generate boxplots of expected FP ###
# TODO - Split this into RB and WR files
### Background Work ###
# Define variables
SEASON_TO_ANALYZE <- 2020
START_WEEK <- 1
# TODO - Remember that now seasons have 18 weeks
END_WEEK <- 17
PTS_PER_RECEPTION <- 1
# Load ADP data
adp_data <- read.csv(file = "helpful_csvs/2021_clean_adp_data.csv") %>%
select(gsis_id, adp)
# Grab the rosters for use in filtering by position
players <- nflfastR::fast_scraper_roster(SEASON_TO_ANALYZE) %>%
subset(select = c(team, position, first_name, last_name, gsis_id))
# Load annual PBP Data
pbp_df <- load_pbp(SEASON_TO_ANALYZE) %>%
filter(season_type == "REG", week >= START_WEEK, week <= END_WEEK)
### Expected points from rushing, grouped by game ###
xfp_rushes <- calculate_rush_xfp_by_game(pbp_df)
### Expected points from receiving ###
# Add xyac data to pbp
pbp_with_xyac <- add_xyac_to_pbp(pbp_df)
# Calculate xfp using xyac data
xfp_targets <- calculate_rec_xfp_by_game(pbp_with_xyac, PTS_PER_RECEPTION)
# Prune the dataframe only to what's necessary
concise_xfp_targets <- xfp_targets %>%
select(
game_id,
player = receiver,
gsis_id = receiver_id,
rec_games = games,
exp_rec_pts = exp_pts,
actual_rec_pts = pts
)
## Boxplots for receivers
# Grab only the receivers above a certain season points threshold
relevant_players <- concise_xfp_targets %>%
group_by(gsis_id) %>%
summarize(total_xfp = sum(exp_rec_pts)) %>%
filter(total_xfp > 50)
# Filter by receiver type if you wish
relevant_receivers <- merge(relevant_players, players) %>%
filter(position == "TE")
# Create list of season-long/other data to merge with the game-by-game data
relevant_receivers_with_adp <- merge(relevant_receivers, adp_data)
# Create a df of all the games by relevant receivers
receivers_to_plot = merge(concise_xfp_targets, relevant_receivers_with_adp)
# Plot
# To order by avg. xfp per game use reorder(player, -exP-rec_pts)
# To order by total season xfp, use reorder(player, -total_xfp)
# To order by IQR size use reorder(player, exp_rec_pts, IQR)
# To order by ADP use reorder(player, adp)
ggplot(receivers_to_plot, aes(x=reorder(player, adp), y=exp_rec_pts, label=player)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = -90)) +
labs(x = "Player",
y = str_glue("Exp.{PTS_PER_RECEPTION}PPR Pts."),
title = str_glue("{SEASON_TO_ANALYZE} Expected {PTS_PER_RECEPTION}PPR Pts. Boxplots"),
caption = "Via nflFastR"
)
## Boxplots for RBs
# Prune the dataframe only to what's necessary
concise_xfp_rushes <- xfp_rushes %>%
select(
player = rusher,
gsis_id,
game_id,
rush_games = games,
exp_rush_pts,
actual_rush_pts
)
# Get the total (season-long) combined rush/rec xfp for players (for use in determining relevant players and graph ordering)
combined_xfp_aggregate <- dplyr::bind_rows(concise_xfp_rushes, concise_xfp_targets) %>%
group_by(gsis_id, player) %>%
summarise(total_xfp = sum(exp_rec_pts, exp_rush_pts, na.rm=TRUE))
# Capture only players above a certain threshold for eventual graphing
players_meeting_points_threshold <- combined_xfp_aggregate %>%
filter(total_xfp > 125) %>%
select(player, total_xfp)
# Create list of season-long/other data to merge with the game-by-game data
rbs_to_merge <- merge(players_meeting_points_threshold, adp_data)
# Build a list of each player's combined rush/rec xfp on a game-by-game basis
combined_xfp_by_game <- dplyr::bind_rows(concise_xfp_rushes, concise_xfp_targets) %>%
group_by(gsis_id, player, game_id) %>%
summarise(
xfp = sum(exp_rec_pts, exp_rush_pts, na.rm=TRUE)
)
# Combine a list of all running back with a list of all players meeting the graphing threshold
# to produce a list of all running backs that will be graphed
relevant_rbs <- merge(rbs_to_merge, players) %>%
filter(position == "RB") %>%
select(gsis_id, player, total_xfp, adp)
# Then merge the above list with the list of all games to get all games played by relevant RBs
rb_xfp_by_game <- merge(combined_xfp_by_game, relevant_rbs)
# Plot
# To order by avg. xfp per game use reorder(player, -xfp)
# To order by total season xfp, use reorder(player, -total_xfp)
# To order by IQR size use reorder(player, xfp, IQR)
# To order by ADP use reorder(player, adp)
ggplot(rb_xfp_by_game, aes(x=reorder(player, -xfp), y=xfp, label=player)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = -90)) +
labs(x = "Player",
y = str_glue("Exp.{PTS_PER_RECEPTION}PPR Pts."),
title = str_glue("{SEASON_TO_ANALYZE} Expected {PTS_PER_RECEPTION}PPR Pts. Boxplots"),
caption = "Via nflFastR"
)
| /adp_analysis/xfp_boxplot.R | no_license | lbuckheit/nfl | R | false | false | 4,845 | r | library(tidyverse)
library(ggrepel)
library(ggimage)
library(nflfastR)
library(dplyr)
library(ggplot2)
library(ggrepel)
library(stringr)
options(scipen = 9999)
source("utils/nfl_utils.R")
### Generate boxplots of expected FP ###
# TODO - Split this into RB and WR files
### Background Work ###
# Define variables
SEASON_TO_ANALYZE <- 2020
START_WEEK <- 1
# TODO - Remember that now seasons have 18 weeks
END_WEEK <- 17
PTS_PER_RECEPTION <- 1
# Load ADP data
adp_data <- read.csv(file = "helpful_csvs/2021_clean_adp_data.csv") %>%
select(gsis_id, adp)
# Grab the rosters for use in filtering by position
players <- nflfastR::fast_scraper_roster(SEASON_TO_ANALYZE) %>%
subset(select = c(team, position, first_name, last_name, gsis_id))
# Load annual PBP Data
pbp_df <- load_pbp(SEASON_TO_ANALYZE) %>%
filter(season_type == "REG", week >= START_WEEK, week <= END_WEEK)
### Expected points from rushing, grouped by game ###
xfp_rushes <- calculate_rush_xfp_by_game(pbp_df)
### Expected points from receiving ###
# Add xyac data to pbp
pbp_with_xyac <- add_xyac_to_pbp(pbp_df)
# Calculate xfp using xyac data
xfp_targets <- calculate_rec_xfp_by_game(pbp_with_xyac, PTS_PER_RECEPTION)
# Prune the dataframe only to what's necessary
concise_xfp_targets <- xfp_targets %>%
select(
game_id,
player = receiver,
gsis_id = receiver_id,
rec_games = games,
exp_rec_pts = exp_pts,
actual_rec_pts = pts
)
## Boxplots for receivers
# Grab only the receivers above a certain season points threshold
relevant_players <- concise_xfp_targets %>%
group_by(gsis_id) %>%
summarize(total_xfp = sum(exp_rec_pts)) %>%
filter(total_xfp > 50)
# Filter by receiver type if you wish
relevant_receivers <- merge(relevant_players, players) %>%
filter(position == "TE")
# Create list of season-long/other data to merge with the game-by-game data
relevant_receivers_with_adp <- merge(relevant_receivers, adp_data)
# Create a df of all the games by relevant receivers
receivers_to_plot = merge(concise_xfp_targets, relevant_receivers_with_adp)
# Plot
# To order by avg. xfp per game use reorder(player, -exP-rec_pts)
# To order by total season xfp, use reorder(player, -total_xfp)
# To order by IQR size use reorder(player, exp_rec_pts, IQR)
# To order by ADP use reorder(player, adp)
ggplot(receivers_to_plot, aes(x=reorder(player, adp), y=exp_rec_pts, label=player)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = -90)) +
labs(x = "Player",
y = str_glue("Exp.{PTS_PER_RECEPTION}PPR Pts."),
title = str_glue("{SEASON_TO_ANALYZE} Expected {PTS_PER_RECEPTION}PPR Pts. Boxplots"),
caption = "Via nflFastR"
)
## Boxplots for RBs
# Prune the dataframe only to what's necessary
concise_xfp_rushes <- xfp_rushes %>%
select(
player = rusher,
gsis_id,
game_id,
rush_games = games,
exp_rush_pts,
actual_rush_pts
)
# Get the total (season-long) combined rush/rec xfp for players (for use in determining relevant players and graph ordering)
combined_xfp_aggregate <- dplyr::bind_rows(concise_xfp_rushes, concise_xfp_targets) %>%
group_by(gsis_id, player) %>%
summarise(total_xfp = sum(exp_rec_pts, exp_rush_pts, na.rm=TRUE))
# Capture only players above a certain threshold for eventual graphing
players_meeting_points_threshold <- combined_xfp_aggregate %>%
filter(total_xfp > 125) %>%
select(player, total_xfp)
# Create list of season-long/other data to merge with the game-by-game data
rbs_to_merge <- merge(players_meeting_points_threshold, adp_data)
# Build a list of each player's combined rush/rec xfp on a game-by-game basis
combined_xfp_by_game <- dplyr::bind_rows(concise_xfp_rushes, concise_xfp_targets) %>%
group_by(gsis_id, player, game_id) %>%
summarise(
xfp = sum(exp_rec_pts, exp_rush_pts, na.rm=TRUE)
)
# Combine a list of all running back with a list of all players meeting the graphing threshold
# to produce a list of all running backs that will be graphed
relevant_rbs <- merge(rbs_to_merge, players) %>%
filter(position == "RB") %>%
select(gsis_id, player, total_xfp, adp)
# Then merge the above list with the list of all games to get all games played by relevant RBs
rb_xfp_by_game <- merge(combined_xfp_by_game, relevant_rbs)
# Plot
# To order by avg. xfp per game use reorder(player, -xfp)
# To order by total season xfp, use reorder(player, -total_xfp)
# To order by IQR size use reorder(player, xfp, IQR)
# To order by ADP use reorder(player, adp)
ggplot(rb_xfp_by_game, aes(x=reorder(player, -xfp), y=xfp, label=player)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = -90)) +
labs(x = "Player",
y = str_glue("Exp.{PTS_PER_RECEPTION}PPR Pts."),
title = str_glue("{SEASON_TO_ANALYZE} Expected {PTS_PER_RECEPTION}PPR Pts. Boxplots"),
caption = "Via nflFastR"
)
|
# Load data
rm(list = ls())
library(car)
nlsw88 <- read.csv('nlsw88.csv')
#covariance
cov_yx <- cov(nlsw88$lwage, nlsw88$yrs_school)
var_x <- var(nlsw88$yrs_school)
hatbeta1_0 <- cov_yx / var_x
print(hatbeta1_0)
#simple linear regression
single <- lm(lwage ~ yrs_school, data = nlsw88)
summary(single) # show results
coefficients(single) # model coefficients
ci <- confint(single, level=0.9)
ci
resid <- residuals(single) # residuals
sum(resid)
#dummy variables
meanother <- mean(nlsw88$lwage[nlsw88$black == 0])
meanblack <- mean(nlsw88$lwage[nlsw88$black == 1])
meanother
meanblack - meanother
dummymodel <- lm(lwage ~ black, data = nlsw88)
summary(dummymodel)
#multivariable regression
multi <- lm(lwage ~ yrs_school + ttl_exp, data = nlsw88)
summary(multi) # show results
anova_unrest <- anova(multi)
#Restricted model
nlsw88$newvar <- nlsw88$yrs_school + 2*nlsw88$ttl_exp
restricted <- lm(lwage ~ newvar, data = nlsw88)
summary(restricted) # show results
anova_rest <- anova(restricted)
#Test
statistic_test <- (((anova_rest$`Sum Sq`[2]-anova_unrest$`Sum Sq`[3])/1)
/((anova_unrest$`Sum Sq`[3])/anova_unrest$Df[3]))
statistic_test
pvalue <- df(statistic_test, 1, anova_unrest$Df[3])
pvalue
matrixR <- c(0, -2, 1)
linearHypothesis(multi, matrixR)
| /Wage_vs_yrsschool/Wage_vs_yrsschool.R | no_license | tylerjlang93/R_JPAL | R | false | false | 1,286 | r | # Load data
rm(list = ls())
library(car)
nlsw88 <- read.csv('nlsw88.csv')
#covariance
cov_yx <- cov(nlsw88$lwage, nlsw88$yrs_school)
var_x <- var(nlsw88$yrs_school)
hatbeta1_0 <- cov_yx / var_x
print(hatbeta1_0)
#simple linear regression
single <- lm(lwage ~ yrs_school, data = nlsw88)
summary(single) # show results
coefficients(single) # model coefficients
ci <- confint(single, level=0.9)
ci
resid <- residuals(single) # residuals
sum(resid)
#dummy variables
meanother <- mean(nlsw88$lwage[nlsw88$black == 0])
meanblack <- mean(nlsw88$lwage[nlsw88$black == 1])
meanother
meanblack - meanother
dummymodel <- lm(lwage ~ black, data = nlsw88)
summary(dummymodel)
#multivariable regression
multi <- lm(lwage ~ yrs_school + ttl_exp, data = nlsw88)
summary(multi) # show results
anova_unrest <- anova(multi)
#Restricted model
nlsw88$newvar <- nlsw88$yrs_school + 2*nlsw88$ttl_exp
restricted <- lm(lwage ~ newvar, data = nlsw88)
summary(restricted) # show results
anova_rest <- anova(restricted)
#Test
statistic_test <- (((anova_rest$`Sum Sq`[2]-anova_unrest$`Sum Sq`[3])/1)
/((anova_unrest$`Sum Sq`[3])/anova_unrest$Df[3]))
statistic_test
pvalue <- df(statistic_test, 1, anova_unrest$Df[3])
pvalue
matrixR <- c(0, -2, 1)
linearHypothesis(multi, matrixR)
|
html_fragment <- function(...,
toc = TRUE,
number_sections = TRUE,
fig_width = 5.67,
fig_height = fig_width,
fig_align = "right",
fig_crop = TRUE,
theme = NULL,
highlight = NULL,
navlinks = TRUE,
navlinks_top = TRUE,
navlinks_level = 3) {
## enable BiocStyle macros
require(BiocStyle, quietly = TRUE)
knitr <- knitr_options(opts_knit = list(width = 80L),
# remove figure margins
opts_chunk = list(crop = isTRUE(fig_crop),
fig.align = fig_align),
knit_hooks = list(crop = hook_pdfcrop))
post_processor <- function(metadata, input, output, clean, verbose) {
lines <- readUTF8(output)
## add author affiliations
lines <- modifyLines(lines, from='<!-- AUTH AFFIL -->', insert=auth_affil_html(metadata))
## append "References" section header
if ( !is.null(metadata$bibliography) )
lines <- add_refs_header(lines)
## move all headers one level down (for proper formatting when embedded in the website)
lines <- process_headers(lines)
writeUTF8(lines, output)
output
}
config <- rmarkdown::output_format(
knitr = knitr,
pandoc = NULL,
post_processor = post_processor,
base_format = bookdown::html_document2(
toc = toc,
number_sections = number_sections,
fig_width = fig_width,
fig_height = fig_height,
theme = theme,
highlight = highlight,
...
))
## swap template afterwards in order to retain original mathjax functionality
template <- system.file("resources", "fragment.html", package="BiocStyle")
idx <- which(config$pandoc$args=="--template") + 1L
config$pandoc$args[idx] <- template
## one needs to run the post processor processing headers before bookdown, but
## process captions only after bookdown
post = config$post_processor
config$post_processor <- function(metadata, input, output, clean, verbose) {
if (is.function(post)) output = post(metadata, input, output, clean, verbose)
lines <- readUTF8(output)
## replace footnotes with sidenotes
lines = process_footnotes(lines)
## set captions as sidenotes
lines = process_captions(lines)
## add navigation
if (isTRUE(navlinks))
lines <- add_navigation(lines, navlinks_top, navlinks_level)
writeUTF8(lines, output)
output
}
config
}
add_refs_header = function(lines) {
refs <- grep('^<div id="refs" class="references">$', lines)
if ( !isTRUE(grepl('<h1>.*</h1>', lines[refs-1L])) )
lines <- c(lines[1:refs],
'<h1>References</h1>',
lines[(refs+1L):length(lines)])
lines
}
process_headers = function(lines) {
template <- '<h%s>%s</h%s>'
pattern <- sprintf(template, '([1-5])', '(.*)', '\\1')
headers <- grep(pattern, lines, value=TRUE)
levels <- as.integer(gsub(pattern, '\\1', headers))
matches <- regexpr(pattern, lines)
f <- function(header, level) {
sub(pattern, sprintf(template, level, '\\2', level), header)
}
regmatches(lines, matches) <- mapply(f, headers, levels+1L, USE.NAMES = FALSE)
lines
}
add_navigation = function(lines, top, level) {
## match to all section div's
pattern <- sprintf('^<div id="(.*)" class="section level([1-%d]).*">$', level)
idx <- grep(pattern, lines)
sections <- lines[idx]
sections_length <- length(sections)
## extract section metadata
section_ids <- sub(pattern, '#\\1', sections)
section_levels <- as.integer(sub(pattern, '\\2', sections))
section_names <- sub('<h([1-6])>(?:<span class="header-section-number">[0-9.]*</span> )?(.*)</h\\1>', '\\2', lines[idx+1L])
seq_indices = seq_along(sections)
## add virtual top section at level 0
if (isTRUE(top)) {
section_ids <- c("#top", section_ids)
section_levels <- c(0L, section_levels)
section_names <- c("Top", section_names)
seq_indices <- seq_indices + 1L
}
## index of previous section on the same level
section_prev <- sapply(seq_indices, function(i) {
level = section_levels[i]
neighbor <- which(section_levels==level)
level_up <- which(section_levels==level-1L)
level_up <- max(level_up[level_up<i], 0L)
neighbor <- neighbor[neighbor<i & neighbor>level_up]
neighbor[length(neighbor)][1L]
})
## index of next section on the same level
section_next <- sapply(seq_indices, function(i) {
level = section_levels[i]
neighbor <- which(section_levels==level)
level_up <- which(section_levels==level-1L)
level_up <- min(level_up[level_up>i], sections_length+1L)
neighbor[neighbor>i & neighbor<level_up][1L]
})
## index of parent section
section_up <- sapply(seq_indices, function(i) {
level = section_levels[i]
level_up <- which(section_levels[1:i]==level-1L)
level_up[length(level_up)][1L]
})
## join navigation for sections and their immediate subsections
section_fuse <- which(c(FALSE, idx[-length(idx)]+2L == idx[-1L]))
section_ids[section_fuse + isTRUE(top)] <- section_ids[section_fuse + isTRUE(top) - 1L]
idx <- idx[-section_fuse]
## move links to next subsections from fused sections up to their parents
section_down <- rep(NA_integer_, sections_length)
section_down[section_fuse -1L] <- section_next[section_fuse]
## create links
create_link <- function(v=0L, id=section_ids[v], name=section_names[v], icon)
ifelse(is.na(v), "", sprintf('<span class="nav-icon">%s</span> <a href="%s">%s</a><br/>', icon, id, name))
links <- paste0(
create_link(id="#TOC", name="Table of Contents", icon="≣"),
create_link(section_next, icon="▸"),
create_link(section_prev, icon="◂"),
create_link(section_up, icon="▴"),
create_link(section_down, icon="▾")
)[-section_fuse]
## preallocate the results vector and populate it with original lines
idx_length <- length(idx)
res <- vector(mode = "character", length = length(lines)+idx_length)
idx <- idx + seq_len(idx_length)
res[-idx] <- lines
## insert links
res[idx] <- sprintf('<p class="sidenote">%s</p>', links)
res
}
process_captions = function(lines) {
## convert table captions enlosed in <caption> tags into <p class="caption">
pattern <- '(^<caption>)(.*)(</caption>$)'
idx <- grep('^<table', lines)
idx <- idx[grepl(pattern, lines[idx+1L])]
captions <- lines[idx+1L]
lines[idx+1L] <- lines[idx]
lines[idx] <- gsub(pattern, '<p class="caption">\\2</p>', captions)
## append 'sidenote' CSS class to figure and table captions
lines = gsub('(?<=^<p class="caption)(?=">)', ' sidenote', lines, perl=TRUE)
lines
}
| /R/html_fragment.R | no_license | mtmorgan/BiocStyle | R | false | false | 7,010 | r | html_fragment <- function(...,
toc = TRUE,
number_sections = TRUE,
fig_width = 5.67,
fig_height = fig_width,
fig_align = "right",
fig_crop = TRUE,
theme = NULL,
highlight = NULL,
navlinks = TRUE,
navlinks_top = TRUE,
navlinks_level = 3) {
## enable BiocStyle macros
require(BiocStyle, quietly = TRUE)
knitr <- knitr_options(opts_knit = list(width = 80L),
# remove figure margins
opts_chunk = list(crop = isTRUE(fig_crop),
fig.align = fig_align),
knit_hooks = list(crop = hook_pdfcrop))
post_processor <- function(metadata, input, output, clean, verbose) {
lines <- readUTF8(output)
## add author affiliations
lines <- modifyLines(lines, from='<!-- AUTH AFFIL -->', insert=auth_affil_html(metadata))
## append "References" section header
if ( !is.null(metadata$bibliography) )
lines <- add_refs_header(lines)
## move all headers one level down (for proper formatting when embedded in the website)
lines <- process_headers(lines)
writeUTF8(lines, output)
output
}
config <- rmarkdown::output_format(
knitr = knitr,
pandoc = NULL,
post_processor = post_processor,
base_format = bookdown::html_document2(
toc = toc,
number_sections = number_sections,
fig_width = fig_width,
fig_height = fig_height,
theme = theme,
highlight = highlight,
...
))
## swap template afterwards in order to retain original mathjax functionality
template <- system.file("resources", "fragment.html", package="BiocStyle")
idx <- which(config$pandoc$args=="--template") + 1L
config$pandoc$args[idx] <- template
## one needs to run the post processor processing headers before bookdown, but
## process captions only after bookdown
post = config$post_processor
config$post_processor <- function(metadata, input, output, clean, verbose) {
if (is.function(post)) output = post(metadata, input, output, clean, verbose)
lines <- readUTF8(output)
## replace footnotes with sidenotes
lines = process_footnotes(lines)
## set captions as sidenotes
lines = process_captions(lines)
## add navigation
if (isTRUE(navlinks))
lines <- add_navigation(lines, navlinks_top, navlinks_level)
writeUTF8(lines, output)
output
}
config
}
add_refs_header = function(lines) {
refs <- grep('^<div id="refs" class="references">$', lines)
if ( !isTRUE(grepl('<h1>.*</h1>', lines[refs-1L])) )
lines <- c(lines[1:refs],
'<h1>References</h1>',
lines[(refs+1L):length(lines)])
lines
}
process_headers = function(lines) {
template <- '<h%s>%s</h%s>'
pattern <- sprintf(template, '([1-5])', '(.*)', '\\1')
headers <- grep(pattern, lines, value=TRUE)
levels <- as.integer(gsub(pattern, '\\1', headers))
matches <- regexpr(pattern, lines)
f <- function(header, level) {
sub(pattern, sprintf(template, level, '\\2', level), header)
}
regmatches(lines, matches) <- mapply(f, headers, levels+1L, USE.NAMES = FALSE)
lines
}
add_navigation = function(lines, top, level) {
## match to all section div's
pattern <- sprintf('^<div id="(.*)" class="section level([1-%d]).*">$', level)
idx <- grep(pattern, lines)
sections <- lines[idx]
sections_length <- length(sections)
## extract section metadata
section_ids <- sub(pattern, '#\\1', sections)
section_levels <- as.integer(sub(pattern, '\\2', sections))
section_names <- sub('<h([1-6])>(?:<span class="header-section-number">[0-9.]*</span> )?(.*)</h\\1>', '\\2', lines[idx+1L])
seq_indices = seq_along(sections)
## add virtual top section at level 0
if (isTRUE(top)) {
section_ids <- c("#top", section_ids)
section_levels <- c(0L, section_levels)
section_names <- c("Top", section_names)
seq_indices <- seq_indices + 1L
}
## index of previous section on the same level
section_prev <- sapply(seq_indices, function(i) {
level = section_levels[i]
neighbor <- which(section_levels==level)
level_up <- which(section_levels==level-1L)
level_up <- max(level_up[level_up<i], 0L)
neighbor <- neighbor[neighbor<i & neighbor>level_up]
neighbor[length(neighbor)][1L]
})
## index of next section on the same level
section_next <- sapply(seq_indices, function(i) {
level = section_levels[i]
neighbor <- which(section_levels==level)
level_up <- which(section_levels==level-1L)
level_up <- min(level_up[level_up>i], sections_length+1L)
neighbor[neighbor>i & neighbor<level_up][1L]
})
## index of parent section
section_up <- sapply(seq_indices, function(i) {
level = section_levels[i]
level_up <- which(section_levels[1:i]==level-1L)
level_up[length(level_up)][1L]
})
## join navigation for sections and their immediate subsections
section_fuse <- which(c(FALSE, idx[-length(idx)]+2L == idx[-1L]))
section_ids[section_fuse + isTRUE(top)] <- section_ids[section_fuse + isTRUE(top) - 1L]
idx <- idx[-section_fuse]
## move links to next subsections from fused sections up to their parents
section_down <- rep(NA_integer_, sections_length)
section_down[section_fuse -1L] <- section_next[section_fuse]
## create links
create_link <- function(v=0L, id=section_ids[v], name=section_names[v], icon)
ifelse(is.na(v), "", sprintf('<span class="nav-icon">%s</span> <a href="%s">%s</a><br/>', icon, id, name))
links <- paste0(
create_link(id="#TOC", name="Table of Contents", icon="≣"),
create_link(section_next, icon="▸"),
create_link(section_prev, icon="◂"),
create_link(section_up, icon="▴"),
create_link(section_down, icon="▾")
)[-section_fuse]
## preallocate the results vector and populate it with original lines
idx_length <- length(idx)
res <- vector(mode = "character", length = length(lines)+idx_length)
idx <- idx + seq_len(idx_length)
res[-idx] <- lines
## insert links
res[idx] <- sprintf('<p class="sidenote">%s</p>', links)
res
}
process_captions = function(lines) {
## convert table captions enlosed in <caption> tags into <p class="caption">
pattern <- '(^<caption>)(.*)(</caption>$)'
idx <- grep('^<table', lines)
idx <- idx[grepl(pattern, lines[idx+1L])]
captions <- lines[idx+1L]
lines[idx+1L] <- lines[idx]
lines[idx] <- gsub(pattern, '<p class="caption">\\2</p>', captions)
## append 'sidenote' CSS class to figure and table captions
lines = gsub('(?<=^<p class="caption)(?=">)', ' sidenote', lines, perl=TRUE)
lines
}
|
emails = c("juan.sosa@gmail.com", "juan.sosa@gmailcom", "juan@sosa@gmail.com", "juan.sosa@111gmail.com",
"juan.sosa@gmail.com.", "JUAN.SOSA@GMAIL.COM")
regex1 = "^[a-z0-9\\._]{4,}@[a-z]{4,}(\\.[a-z]{2,})+$"
grepl(regex1, emails)
grep(pattern = regex1, x = emails)
grep()
grepl(regex1, emails, ignore.case = TRUE)
text = "Hola como estas hoy?"
regex2 = "\\s+"
sub(regex2, " ", text)
gsub(regex2, " ", text)
| /r17-regex.R | no_license | juancsosap/rtraining | R | false | false | 446 | r | emails = c("juan.sosa@gmail.com", "juan.sosa@gmailcom", "juan@sosa@gmail.com", "juan.sosa@111gmail.com",
"juan.sosa@gmail.com.", "JUAN.SOSA@GMAIL.COM")
regex1 = "^[a-z0-9\\._]{4,}@[a-z]{4,}(\\.[a-z]{2,})+$"
grepl(regex1, emails)
grep(pattern = regex1, x = emails)
grep()
grepl(regex1, emails, ignore.case = TRUE)
text = "Hola como estas hoy?"
regex2 = "\\s+"
sub(regex2, " ", text)
gsub(regex2, " ", text)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arealMethods.R
\name{as.stppp}
\alias{as.stppp}
\title{as.stppp function}
\usage{
as.stppp(obj, ...)
}
\arguments{
\item{obj}{an object}
\item{...}{additional arguments}
}
\value{
method as.stppp
}
\description{
Generic function for converting to stppp objects
}
| /man/as.stppp.Rd | no_license | goldingn/lgcp | R | false | true | 342 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arealMethods.R
\name{as.stppp}
\alias{as.stppp}
\title{as.stppp function}
\usage{
as.stppp(obj, ...)
}
\arguments{
\item{obj}{an object}
\item{...}{additional arguments}
}
\value{
method as.stppp
}
\description{
Generic function for converting to stppp objects
}
|
setwd("../..")
test_that("Test convertAttributeTypes",{
project_dir <- "workspace/project_my_first_experiment"
performance_evaluator = PerformanceEvaluator$new()
# load and prepare test dataset
test_dataset <- read.csv("workspace/datasets_repo/baboon_mating.csv",
header = TRUE, sep=",", stringsAsFactors=FALSE)
file_manipulator <- FileManipulator$new()
data_prepare <- DataPrepare$new()
test_dictionary <- file_manipulator$loadOrderedDictionary()
# add dummy ordered attributes
test_dataset$height <- as.character(sample(0:1, nrow(test_dataset), replace = TRUE))
test_dataset$dummy_ordered <- rep("high", nrow(test_dataset))
test_dataset <- data_prepare$convertAttributeTypes(dataset = test_dataset, dictionary = test_dictionary)
# test that characters are converted to factors
expect_true( !all(sapply(test_dataset,class) == "character"))
# test that ordinal factors are recognized
expect_true(is.ordered(test_dataset$height))
expect_true(is.ordered(test_dataset$dummy_ordered))
})
test_that("Test disposeRareLevels",{
project_dir <- "workspace/project_my_first_experiment"
performance_evaluator = PerformanceEvaluator$new()
# load and prepare test dataset
test_dataset <- read.csv("workspace/datasets_repo/baboon_mating.csv",
header = TRUE, sep=",", stringsAsFactors=FALSE)
variables <- names(test_dataset[sapply(test_dataset,class) == "character"])
test_dataset[, (names(test_dataset) %in% variables)] <- lapply(test_dataset[, (names(test_dataset) %in% variables)], as.factor)
factor_dataset <- test_dataset[, (names(test_dataset) %in% variables)]
file_manipulator <- FileManipulator$new()
data_prepare <- DataPrepare$new()
factor_dataset <- data_prepare$disposeRareLevels(dataset = factor_dataset)
factor_threshold <- data_prepare$getFactorThreshold()
# not sure what kind of test to apply
#str(as.vector(lapply(factor_dataset, function(x) nlevels(x))) < 49)
})
test_that("Test partitionData ",{
project_dir <- "workspace/project_my_first_experiment"
performance_evaluator = PerformanceEvaluator$new()
# load and prepare test dataset
test_dataset <- read.csv("workspace/datasets_repo/baboon_mating.csv",
header = TRUE, sep=",", stringsAsFactors=FALSE)
variables <- names(test_dataset[sapply(test_dataset,class) == "character"])
test_dataset[, (names(test_dataset) %in% variables)] <- lapply(test_dataset[, (names(test_dataset) %in% variables)], as.factor)
factor_dataset <- test_dataset[, (names(test_dataset) %in% variables)]
file_manipulator <- FileManipulator$new()
data_prepare <- DataPrepare$new()
partition1 <- data_prepare$partitionData(test_dataset, technique = list(name = "holdout", ratio = 0.7))
partition2 <- data_prepare$partitionData(test_dataset, technique = list(name = "kfold", ratio = 0.8))
partition3 <- data_prepare$partitionData(test_dataset, technique = list(name = "loocv"))
# check holdout size
expect_equal(length(partition1), ceiling(0.7 * nrow(test_dataset)))
# check kfold size
nr_folds <- 1/(1-0.8)
expect_equal(ncol(partition2), nr_folds)
expect_equal(nrow(partition2), ceiling(0.8 * nrow(test_dataset)))
# check loocv size
expect_equal(nrow(partition3), nrow(test_dataset)-1)
expect_equal(ncol(partition3), nrow(test_dataset))
})
| /preprocessor/testing/test_DataPrepare.R | no_license | elefthcn/automl | R | false | false | 3,362 | r | setwd("../..")
test_that("Test convertAttributeTypes",{
project_dir <- "workspace/project_my_first_experiment"
performance_evaluator = PerformanceEvaluator$new()
# load and prepare test dataset
test_dataset <- read.csv("workspace/datasets_repo/baboon_mating.csv",
header = TRUE, sep=",", stringsAsFactors=FALSE)
file_manipulator <- FileManipulator$new()
data_prepare <- DataPrepare$new()
test_dictionary <- file_manipulator$loadOrderedDictionary()
# add dummy ordered attributes
test_dataset$height <- as.character(sample(0:1, nrow(test_dataset), replace = TRUE))
test_dataset$dummy_ordered <- rep("high", nrow(test_dataset))
test_dataset <- data_prepare$convertAttributeTypes(dataset = test_dataset, dictionary = test_dictionary)
# test that characters are converted to factors
expect_true( !all(sapply(test_dataset,class) == "character"))
# test that ordinal factors are recognized
expect_true(is.ordered(test_dataset$height))
expect_true(is.ordered(test_dataset$dummy_ordered))
})
test_that("Test disposeRareLevels",{
project_dir <- "workspace/project_my_first_experiment"
performance_evaluator = PerformanceEvaluator$new()
# load and prepare test dataset
test_dataset <- read.csv("workspace/datasets_repo/baboon_mating.csv",
header = TRUE, sep=",", stringsAsFactors=FALSE)
variables <- names(test_dataset[sapply(test_dataset,class) == "character"])
test_dataset[, (names(test_dataset) %in% variables)] <- lapply(test_dataset[, (names(test_dataset) %in% variables)], as.factor)
factor_dataset <- test_dataset[, (names(test_dataset) %in% variables)]
file_manipulator <- FileManipulator$new()
data_prepare <- DataPrepare$new()
factor_dataset <- data_prepare$disposeRareLevels(dataset = factor_dataset)
factor_threshold <- data_prepare$getFactorThreshold()
# not sure what kind of test to apply
#str(as.vector(lapply(factor_dataset, function(x) nlevels(x))) < 49)
})
test_that("Test partitionData ",{
project_dir <- "workspace/project_my_first_experiment"
performance_evaluator = PerformanceEvaluator$new()
# load and prepare test dataset
test_dataset <- read.csv("workspace/datasets_repo/baboon_mating.csv",
header = TRUE, sep=",", stringsAsFactors=FALSE)
variables <- names(test_dataset[sapply(test_dataset,class) == "character"])
test_dataset[, (names(test_dataset) %in% variables)] <- lapply(test_dataset[, (names(test_dataset) %in% variables)], as.factor)
factor_dataset <- test_dataset[, (names(test_dataset) %in% variables)]
file_manipulator <- FileManipulator$new()
data_prepare <- DataPrepare$new()
partition1 <- data_prepare$partitionData(test_dataset, technique = list(name = "holdout", ratio = 0.7))
partition2 <- data_prepare$partitionData(test_dataset, technique = list(name = "kfold", ratio = 0.8))
partition3 <- data_prepare$partitionData(test_dataset, technique = list(name = "loocv"))
# check holdout size
expect_equal(length(partition1), ceiling(0.7 * nrow(test_dataset)))
# check kfold size
nr_folds <- 1/(1-0.8)
expect_equal(ncol(partition2), nr_folds)
expect_equal(nrow(partition2), ceiling(0.8 * nrow(test_dataset)))
# check loocv size
expect_equal(nrow(partition3), nrow(test_dataset)-1)
expect_equal(ncol(partition3), nrow(test_dataset))
})
|
\name{tr.wgaim}
\alias{tr}
\alias{tr.wgaim}
\title{Display diagnostic information about the QTL detected.}
\description{Displays diagnostic infomation about QTL detection and
significance for the sequence of models generated in a \code{wgaim}
analysis.
}
\usage{
\method{tr}{wgaim}(object, iter = 1:length(object$QTL$effects),
lik.out = TRUE, \ldots)
}
\arguments{
\item{object}{an object of class \code{"wgaim"}}
\item{iter}{a vector of integers specifying what rows of the p-value matrix to display}
\item{lik.out}{logical value. If \code{TRUE} then diagnostic information
about the testing of the genetic variance is given for all iterations.}
\item{\ldots}{arguments passed to \code{print.default} for displaying
of information}
}
\details{
By default the printing of the objects occur with arguments \code{quote = FALSE}
and \code{right = TRUE}. Users should avoid using these arguments.
}
\value{For the selected QTL, a probability value matrix is displayed
with rows specified by \code{iter}. If \code{lik.out =
TRUE} then a matrix with rows consisting of the likelihood with
additive genetic variance, the likelihood without additive genetic
variance (NULL model), the test statistic and the p-value for the statistic.
}
\references{
Verbyla, A. P & Taylor, J. D, Verbyla, K. L (2012). RWGAIM: An efficient high
dimensional random whole genome average (QTL) interval mapping approach.
\emph{Genetics Research}. \bold{94}, 291-306.
Julian Taylor, Arunas Vebyla (2011). R Package wgaim: QTL Analysis in
Bi-Parental Populations Using Linear Mixed Models. \emph{Journal of
Statistical Software}, \bold{40}(7), 1-18. URL \url{http://www.jstatsoft.org/v40/i07/}.
}
\author{Julian Taylor}
\seealso{\code{\link{wgaim}}}
\examples{
\dontrun{
# read in data
data(phenoRxK, package = "wgaim")
data(genoRxK, package = "wgaim")
# subset linkage map and convert to "interval" object
genoRxK <- subset(genoRxK, chr = c("1A", "2D1", "2D2", "3B"))
genoRxK <- cross2int(genoRxK, impute = "Martinez", id = "Genotype")
# base model
rkyld.asf <- asreml(yld ~ lrow, random = ~ Genotype + Range,
residual = ~ ar1(Range):ar1(Row), data = phenoRxK)
# find QTL
rkyld.qtl <- wgaim(rkyld.asf, intervalObj = genoRxK, merge.by = "Genotype",
trace = "trace.txt", na.action = na.method(x = "include"))
# diagnostic check
tr(rkyld.qtl, digits = 4)
}
}
\keyword{regression}
| /man/tr.wgaim.Rd | no_license | DrJ001/wgaim | R | false | false | 2,419 | rd | \name{tr.wgaim}
\alias{tr}
\alias{tr.wgaim}
\title{Display diagnostic information about the QTL detected.}
\description{Displays diagnostic infomation about QTL detection and
significance for the sequence of models generated in a \code{wgaim}
analysis.
}
\usage{
\method{tr}{wgaim}(object, iter = 1:length(object$QTL$effects),
lik.out = TRUE, \ldots)
}
\arguments{
\item{object}{an object of class \code{"wgaim"}}
\item{iter}{a vector of integers specifying what rows of the p-value matrix to display}
\item{lik.out}{logical value. If \code{TRUE} then diagnostic information
about the testing of the genetic variance is given for all iterations.}
\item{\ldots}{arguments passed to \code{print.default} for displaying
of information}
}
\details{
By default the printing of the objects occur with arguments \code{quote = FALSE}
and \code{right = TRUE}. Users should avoid using these arguments.
}
\value{For the selected QTL, a probability value matrix is displayed
with rows specified by \code{iter}. If \code{lik.out =
TRUE} then a matrix with rows consisting of the likelihood with
additive genetic variance, the likelihood without additive genetic
variance (NULL model), the test statistic and the p-value for the statistic.
}
\references{
Verbyla, A. P & Taylor, J. D, Verbyla, K. L (2012). RWGAIM: An efficient high
dimensional random whole genome average (QTL) interval mapping approach.
\emph{Genetics Research}. \bold{94}, 291-306.
Julian Taylor, Arunas Vebyla (2011). R Package wgaim: QTL Analysis in
Bi-Parental Populations Using Linear Mixed Models. \emph{Journal of
Statistical Software}, \bold{40}(7), 1-18. URL \url{http://www.jstatsoft.org/v40/i07/}.
}
\author{Julian Taylor}
\seealso{\code{\link{wgaim}}}
\examples{
\dontrun{
# read in data
data(phenoRxK, package = "wgaim")
data(genoRxK, package = "wgaim")
# subset linkage map and convert to "interval" object
genoRxK <- subset(genoRxK, chr = c("1A", "2D1", "2D2", "3B"))
genoRxK <- cross2int(genoRxK, impute = "Martinez", id = "Genotype")
# base model
rkyld.asf <- asreml(yld ~ lrow, random = ~ Genotype + Range,
residual = ~ ar1(Range):ar1(Row), data = phenoRxK)
# find QTL
rkyld.qtl <- wgaim(rkyld.asf, intervalObj = genoRxK, merge.by = "Genotype",
trace = "trace.txt", na.action = na.method(x = "include"))
# diagnostic check
tr(rkyld.qtl, digits = 4)
}
}
\keyword{regression}
|
library(tidyverse)
#joining chromosomes together.
deni1=read_csv("deni8chr1example1.bin.xz")
deni2=read_csv("deni8chr2example1.bin.xz")
deni3=read_csv("deni8chr3example1.bin.xz")
deni4=read_csv("deni8chr4example1.bin.xz")
deni5=read_csv("deni8chr5example1.bin.xz")
deni6=read_csv("deni8chr6example1.bin.xz")
deni7=read_csv("deni8chr7example1.bin.xz")
deni8=read_csv("deni8chr8example1.bin.xz")
deni9=read_csv("deni8chr9example1.bin.xz")
deni10=read_csv("deni8chr10example1.bin.xz")
deni11=read_csv("deni8chr11example1.bin.xz")
deni12=read_csv("deni8chr12example1.bin.xz")
deni13=read_csv("deni8chr13example1.bin.xz")
deni14=read_csv("deni8chr14example1.bin.xz")
deni15=read_csv("deni8chr15example1.bin.xz")
deni16=read_csv("deni8chr16example1.bin.xz")
deni17=read_csv("deni8chr17example1.bin.xz")
deni18=read_csv("deni8chr18example1.bin.xz")
deni19=read_csv("deni8chr19example1.bin.xz")
deni20=read_csv("deni8chr20example1.bin.xz")
deni22=read_csv("deni8chr22example1.bin.xz")
# reading in the data in prep for binding.
bound_deni4_bin=rbind.data.frame(deni1, deni2, deni3, deni4, deni5, deni6, deni7, deni8, deni9, deni10, deni11, deni12, deni13,
deni14, deni15, deni16, deni17, deni18, deni19, deni20, deni22,
make.row.names = T)
write_csv(bound_deni8_bin, "bound_deni8_bin.csv")
#writes out to a csv file, which i think can be transformed into an .xz file if needed
| /binding_frames.R | no_license | hemr3/admixfrog_work | R | false | false | 1,401 | r | library(tidyverse)
#joining chromosomes together.
deni1=read_csv("deni8chr1example1.bin.xz")
deni2=read_csv("deni8chr2example1.bin.xz")
deni3=read_csv("deni8chr3example1.bin.xz")
deni4=read_csv("deni8chr4example1.bin.xz")
deni5=read_csv("deni8chr5example1.bin.xz")
deni6=read_csv("deni8chr6example1.bin.xz")
deni7=read_csv("deni8chr7example1.bin.xz")
deni8=read_csv("deni8chr8example1.bin.xz")
deni9=read_csv("deni8chr9example1.bin.xz")
deni10=read_csv("deni8chr10example1.bin.xz")
deni11=read_csv("deni8chr11example1.bin.xz")
deni12=read_csv("deni8chr12example1.bin.xz")
deni13=read_csv("deni8chr13example1.bin.xz")
deni14=read_csv("deni8chr14example1.bin.xz")
deni15=read_csv("deni8chr15example1.bin.xz")
deni16=read_csv("deni8chr16example1.bin.xz")
deni17=read_csv("deni8chr17example1.bin.xz")
deni18=read_csv("deni8chr18example1.bin.xz")
deni19=read_csv("deni8chr19example1.bin.xz")
deni20=read_csv("deni8chr20example1.bin.xz")
deni22=read_csv("deni8chr22example1.bin.xz")
# reading in the data in prep for binding.
bound_deni4_bin=rbind.data.frame(deni1, deni2, deni3, deni4, deni5, deni6, deni7, deni8, deni9, deni10, deni11, deni12, deni13,
deni14, deni15, deni16, deni17, deni18, deni19, deni20, deni22,
make.row.names = T)
write_csv(bound_deni8_bin, "bound_deni8_bin.csv")
#writes out to a csv file, which i think can be transformed into an .xz file if needed
|
## These functions avoid recalculating an inverse matrix if it had been already calculated. This saves running time.
## makeCacheMatrix creates a matrix object and sets and gets the inverse
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <-NULL
set <- function(y) {
x <<- y
cachedInverse <<- NULL
}
get <- function() x
setInverse <- function(inverse) cachedInverse <<- inverse
getInverse <- function() cachedInverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve retrieves a cached inverse matrix if one is available, or calculates it if it doesn't
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cachedInverse <- x$getInverse()
if(!is.null(cachedInverse)) {
message("getting cached data")
return(cachedInverse)
}
else {
inverse <- solve(x$get())
x$setInverse(inverse)
}
inverse
}
| /cachematrix.R | no_license | solved1/ProgrammingAssignment2 | R | false | false | 917 | r | ## These functions avoid recalculating an inverse matrix if it had been already calculated. This saves running time.
## makeCacheMatrix creates a matrix object and sets and gets the inverse
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <-NULL
set <- function(y) {
x <<- y
cachedInverse <<- NULL
}
get <- function() x
setInverse <- function(inverse) cachedInverse <<- inverse
getInverse <- function() cachedInverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve retrieves a cached inverse matrix if one is available, or calculates it if it doesn't
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cachedInverse <- x$getInverse()
if(!is.null(cachedInverse)) {
message("getting cached data")
return(cachedInverse)
}
else {
inverse <- solve(x$get())
x$setInverse(inverse)
}
inverse
}
|
#############################################
## dataProcess
#############################################
#' @export dataProcess
#' @import survival
#' @import preprocessCore
#' @import statmod
#' @importFrom reshape2 dcast melt
#' @importFrom stats medpolish aggregate t.test lm summary.lm fitted resid p.adjust
#' @importFrom stats C approx coef cor dist formula loess median na.omit
#' @importFrom stats predict pt qnorm qt quantile reshape rnorm runif sd var vcov xtabs
#' @importFrom utils head read.table sessionInfo setTxtProgressBar txtProgressBar write.csv write.table
#' @importFrom methods validObject
#' @importFrom doSNOW registerDoSNOW
#' @importFrom snow makeCluster
#' @importFrom foreach foreach %dopar%
#' @importFrom dplyr filter n
#' @importFrom tidyr gather
dataProcess <- function(raw,
logTrans=2,
normalization="equalizeMedians",
nameStandards=NULL,
address="",
fillIncompleteRows=TRUE,
featureSubset="all",
remove_uninformative_feature_outlier=FALSE,
n_top_feature=3,
summaryMethod="TMP",
equalFeatureVar=TRUE,
censoredInt="NA",
cutoffCensored="minFeature",
MBimpute=TRUE,
remove50missing=FALSE,
maxQuantileforCensored=0.999,
clusters=NULL) {
## save process output in each step
allfiles <- list.files()
num <- 0
filenaming <- "msstats"
finalfile <- "msstats.log"
while(is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".log")
}
session <- sessionInfo()
sink("sessionInfo.txt")
print(session)
sink()
processout <- as.matrix(read.table("sessionInfo.txt", header=TRUE, sep="\t"))
write.table(processout, file=finalfile, row.names=FALSE)
processout <- rbind(processout, as.matrix(c(" "," ","MSstats - dataProcess function"," "), ncol=1))
## make case-insensitive for function options
## ------------------------------------------
normalization <- toupper(normalization)
## Check correct option or input
## check right column in input
requiredinput <- c("ProteinName", "PeptideSequence", "PrecursorCharge",
"FragmentIon", "ProductCharge", "IsotopeLabelType",
"Condition", "BioReplicate", "Run", "Intensity")
## [THT: disambiguation for PeptideSequence & PeptideModifiedSequence - begin]
## PeptideModifiedSequence is also allowed.
requiredInputUpper <- toupper(requiredinput)
providedInputUpper <- toupper(colnames(raw))
if (all(requiredInputUpper %in% providedInputUpper)) {
processout <- rbind(processout, c("The required input : provided - okay"))
write.table(processout, file = finalfile, row.names = FALSE)
} else if (all(setdiff(requiredInputUpper, "PEPTIDESEQUENCE") %in% providedInputUpper) && "PEPTIDEMODIFIEDSEQUENCE" %in% providedInputUpper) {
processout <- rbind(processout, c("The required input : provided - okay"))
write.table(processout, file = finalfile, row.names = FALSE)
# if PeptideModifiedSequence is provided instead of PeptideSequence,
# change the column name as PeptideSequence
colnames(raw)[which(providedInputUpper == "PEPTIDEMODIFIEDSEQUENCE")] <- "PeptideSequence"
} else {
missedInput <- which(!(requiredInputUpper %in% providedInputUpper))
processout <- rbind(processout, c(paste("ERROR : The required input : ",
paste(requiredinput[missedInput], collapse = ", "),
" are not provided in input - stop")))
write.table(processout, file = finalfile, row.names = FALSE)
stop("Please check the required input. The required input needs (ProteinName, PeptideSequence (or PeptideModifiedSequence), PrecursorCharge, FragmentIon, ProductCharge, IsotopeLabelType, Condition, BioReplicate, Run, Intensity)")
}
## [THT: disambiguation for PeptideSequence & PeptideModifiedSequence - end]
## check logTrans is 2,10 or not
if (logTrans!=2 & logTrans!=10) {
processout <- rbind(processout,c("ERROR : Logarithm transformation : log2 or log10 only - stop"))
write.table(processout, file=finalfile,row.names=FALSE)
stop("Only log2 or log10 are posssible.\n")
}
## check no row for some feature : balanced structure or not
if (!(fillIncompleteRows==TRUE | fillIncompleteRows==FALSE) | !is.logical(fillIncompleteRows)) {
processout <- rbind(processout, c(paste("The required input - fillIncompleteRows : 'fillIncompleteRows' value is wrong. It should be either TRUE or FALSE. - stop")))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'fillIncompleteRows' must be one of TRUE or FALSE as a logical value.")
}
## check input for summaryMethod
if (sum(summaryMethod == c("linear", "TMP")) == 0) {
processout <- rbind(processout,c("The required input - summaryMethod : 'summaryMethod' value is wrong. It should be one of 'TMP' or 'linear'. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'summaryMethod' value is wrong. It should be one of 'TMP' or 'linear'.")
} else {
processout <- rbind(processout, c(paste("summaryMethod : ", as.character(summaryMethod), sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## check input for cutoffCensored
if (sum(cutoffCensored==c("minFeature","minRun","minFeatureNRun"))==0) {
processout <- rbind(processout,c("The required input - cutoffCensored : 'cutoffCensored' value is wrong. It should be one of 'minFeature','minRun','minFeatureNRun'. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'cutoffCensored' value is wrong. It should be one of 'minFeature','minRun','minFeatureNRun'.")
} else {
processout <- rbind(processout,c(paste("cutoffCensored : ",as.character(cutoffCensored), sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## check input for censoredInt
if (sum(censoredInt == c("0", "NA")) == 0 & !is.null(censoredInt)) {
processout <- rbind(processout,c("The required input - censoredInt : 'censoredInt' value is wrong.
It should be one of '0','NA', NULL. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'censoredInt' value is wrong. It should be one of '0','NA',NULL.")
} else {
processout <- rbind(processout, c(paste("censoredInt : ", as.character(censoredInt), sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## check input for censoredInt and MBimpute
if ( summaryMethod == 'TMP' & MBimpute & is.null(censoredInt) ) {
processout <- rbind(processout, c("The rcombination of equired input -
censoredInt and MBimpute : 'censoredInt=NULL' has no censored missing values.
Imputation will not be performed.- stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'censoredInt=NULL' means that dataset has no censored missing value and MSstats will not impute.
But, 'MBimpute=TRUE' is selected. Please replace by 'MBimpute=FALSE' or censoredInt='NA' or '0'")
}
## [THT: if (!all(normalization %in% c("NONE", "FALSE", "EQUALIZEMEDIANS", "QUANTILE", "GLOBALSTANDARDS")))]
## [THT: send a warning message if the user mixes "NONE" with any of the last three choices]
if (!(normalization=="NONE" | normalization=="FALSE" |
normalization=="EQUALIZEMEDIANS" | normalization=="QUANTILE" |
normalization=="GLOBALSTANDARDS")) {
processout <- rbind(processout,c(paste("The required input - normalization : 'normalization' value is wrong. - stop")))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'normalization' must be one of \"None\", \"FALSE\", \"equalizeMedians\",
\"quantile\", or \"globalStandards\". Please assign 'normalization' again.")
}
## need the names of global standards
if (!is.element("NONE",normalization) &
!is.element("FALSE",normalization) &
is.element("GLOBALSTANDARDS",normalization) &
is.null(nameStandards)) {
processout <- rbind(processout, c("ERROR : For normalization with global standards,
the names of global standards are needed. Please add 'nameStandards' input."))
write.table(processout, file=finalfile,row.names=FALSE)
stop ("For normalization with global standards, the names of global standards are needed.
Please add 'nameStandards' input." )
}
## check whether class of intensity is factor or chaterer, if yes, neec to chage as numeric
if (is.factor(raw$Intensity) | is.character(raw$Intensity)) {
suppressWarnings(raw$Intensity <- as.numeric(as.character(raw$Intensity)))
}
## check whether the intensity has 0 value or negative value
# if (length(which(raw$Intensity<=0))>0 & !skylineReport) {
# if (is.null(censoredInt)) {
# processout <- rbind(processout,c("ERROR : There are some intensities which are zero or negative values. need to change them. - stop"))
# write.table(processout, file=finalfile,row.names=FALSE)
# stop("Intensity has 0 or negative values. Please check these intensities and change them. \n")
# } else if (censoredInt=="NA") {
# processout <- rbind(processout,c("ERROR : There are some intensities which are zero or negative values. need to change them. - stop"))
# write.table(processout, file=finalfile,row.names=FALSE)
# stop("Intensity has 0 or negative values. Please check these intensities and change them. \n")
# }
#}
## here, need to get standard protein name
## column name : standardtype..
## what value it has, normzalition, unique(proteinname)
## if normalition== "standard" & no normalizaion selection, error message
## annotation information :
if ( any(is.na(raw$Run)) ) {
processout <- rbind(processout, c("ERROR : There is missing information in 'Run' column. Please check 'Run' column."))
write.table(processout, file=finalfile, row.names=FALSE)
stop ("There is missing information in 'Run' column. Please check 'Run' column." )
}
if ( any(is.na(raw$BioReplicate)) ) {
processout <- rbind(processout, c("ERROR : There is missing information in 'BioReplicate' column.
Please check 'BioReplicate' column."))
write.table(processout, file=finalfile, row.names=FALSE)
stop ("There is missing information in 'BioReplicate' column. Please check 'BioReplicate' column." )
}
if ( any(is.na(raw$Condition)) ) {
processout <- rbind(processout, c("ERROR : There is missing information in 'Condition' column.
Please check 'Condition' column."))
write.table(processout, file=finalfile, row.names=FALSE)
stop ("There is missing information in 'Condition' column. Please check 'Condition' column." )
}
## make letters case-insensitive
colnames(raw) <- toupper(colnames(raw))
if( any(is.element(colnames(raw), 'FRACTION')) ) {
fraction <- 'FRACTION'
} else {
fraction <- NULL
}
if( any(is.element(colnames(raw), 'TECHREPLICATE')) ) {
tech.rep <- 'TECHREPLICATE'
} else {
tech.rep <- NULL
}
require.col <- c("PROTEINNAME", "PEPTIDESEQUENCE", "PRECURSORCHARGE",
"FRAGMENTION", "PRODUCTCHARGE", "ISOTOPELABELTYPE",
"CONDITION", "BIOREPLICATE", "RUN", "INTENSITY", fraction, tech.rep)
raw.temp <- raw[, require.col]
## before remove, get PeptideSequence and combination of PeptideSequence and precursorcharge for global standard normalization
tempPeptide <- unique(raw[, c("PEPTIDESEQUENCE", "PRECURSORCHARGE")])
tempPeptide$PEPTIDE <- paste(tempPeptide$PEPTIDESEQUENCE, tempPeptide$PRECURSORCHARGE, sep="_")
rm(raw)
## assign peptide, transition
raw.temp <- data.frame(raw.temp,
PEPTIDE=paste(raw.temp$PEPTIDESEQUENCE, raw.temp$PRECURSORCHARGE, sep="_"),
TRANSITION=paste(raw.temp$FRAGMENTION, raw.temp$PRODUCTCHARGE, sep="_"))
if (length(unique(raw.temp$ISOTOPELABELTYPE)) > 2) {
processout <- rbind(processout, c("ERROR : There are more than two levels of labeling.
So far, only label-free or reference-labeled experiment are supported. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("Statistical tools in MSstats are only proper for label-free or with reference peptide experiments.")
}
## change light, heavy -> L,H
## [THT: should check if users really provide light/heavy, L/H, l/h, or something else ]
## [THT: should also check if users provide only H (instead of L)]
raw.temp$ISOTOPELABELTYPE <- factor(raw.temp$ISOTOPELABELTYPE)
if (nlevels(raw.temp$ISOTOPELABELTYPE) == 2) {
levels(raw.temp$ISOTOPELABELTYPE) <- c("H", "L")
}
if (nlevels(raw.temp$ISOTOPELABELTYPE) == 1) {
levels(raw.temp$ISOTOPELABELTYPE) <- c("L")
}
if( any(is.element(colnames(raw.temp), 'FRACTION')) ) {
fraction <- 'FRACTION'
} else {
fraction <- NULL
}
if( any(is.element(colnames(raw.temp), 'TECHREPLICATE')) ) {
tech.rep <- 'TECHREPLICATE'
} else {
tech.rep <- NULL
}
require.col <- c("PROTEINNAME", "PEPTIDE", "TRANSITION", "ISOTOPELABELTYPE",
"CONDITION", "BIOREPLICATE", "RUN", "INTENSITY",
fraction, tech.rep)
raw.temp <- raw.temp[, require.col]
if( ncol(raw.temp) == 10 &
any(is.element(colnames(raw.temp), 'FRACTION')) &
any(is.element(colnames(raw.temp), 'TECHREPLICATE'))) {
colnames(raw.temp) <- c("Protein", "Peptide", "Transition", "Label",
"Condition", "Sample", "Run", "Intensity", 'Fraction', 'TechReplicate')
} else if( ncol(raw.temp) == 9 &
any(is.element(colnames(raw.temp), 'FRACTION')) ) {
colnames(raw.temp) <- c("Protein", "Peptide", "Transition", "Label",
"Condition", "Sample", "Run", "Intensity", 'Fraction')
} else {
colnames(raw.temp) <- c("Protein", "Peptide", "Transition", "Label",
"Condition", "Sample", "Run", "Intensity")
}
## create work data for quant analysis
## -----------------------------------
raw.temp <- raw.temp[!is.na(raw.temp$Protein), ]
raw.temp <- raw.temp[raw.temp$Protein != '', ]
work <- data.frame(PROTEIN=raw.temp$Protein,
PEPTIDE=raw.temp$Peptide,
TRANSITION=raw.temp$Transition,
FEATURE=paste(raw.temp$Peptide, raw.temp$Transition, sep="_"),
LABEL=raw.temp$Label,
GROUP_ORIGINAL=raw.temp$Condition,
SUBJECT_ORIGINAL=raw.temp$Sample,
RUN=raw.temp$Run,
GROUP=0,
SUBJECT=0,
INTENSITY=raw.temp$Intensity)
work$GROUP_ORIGINAL <- factor(work$GROUP_ORIGINAL)
work$SUBJECT_ORIGINAL <- factor(work$SUBJECT_ORIGINAL, levels=unique(work$SUBJECT_ORIGINAL))
work$LABEL <- factor(work$LABEL, levels=levels(work$LABEL))
work[work$LABEL=="L", "GROUP"] <- work[work$LABEL=="L", "GROUP_ORIGINAL"]
work[work$LABEL=="L", "SUBJECT"] <- work[work$LABEL=="L", "SUBJECT_ORIGINAL"]
work <- data.frame(work, SUBJECT_NESTED=paste(work$GROUP, work$SUBJECT, sep="."))
if( any(is.element(colnames(raw.temp), 'Fraction')) ) {
work <- data.frame(work, FRACTION = raw.temp$Fraction)
}
if( any(is.element(colnames(raw.temp), 'TechReplicate')) ) {
work <- data.frame(work, TECHREPLICATE = raw.temp$TechReplicate)
}
processout <- rbind(processout, c("New input format : made new columns for analysis - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
## 2016. 08.29 : replace <1 with zero for log2(intensity)
if ( length(which(!is.na(work$INTENSITY) & work$INTENSITY < 1)) > 0 ) {
processout <- rbind(processout, c(paste0("** There are ",
length(which(!is.na(work$INTENSITY) & work$INTENSITY < 1)),
" intensities which are zero. These intensities are replaced with 1.")))
write.table(processout, file=finalfile, row.names=FALSE)
message(paste0("** There are ", length(which(!is.na(work$INTENSITY) & work$INTENSITY < 1)),
" intensities which are zero or less than 1. These intensities are replaced with 1."))
work[!is.na(work$INTENSITY) & work$INTENSITY < 1, 'INTENSITY'] <- 1
}
## log transformation
work$ABUNDANCE <- work$INTENSITY
## now, INTENSITY keeps original values.
## NA means no observation. assume that spectral tools are not report if no observation. zero means detected but zero.
## considered intenseity <1 -> intensity = 1
## work[!is.na(work$ABUNDANCE) & work$ABUNDANCE==0,"ABUNDANCE"] <- 1
## based on logTrans option, assign log transformation
## remove log2 or log10 intensity
### [THT: add one more conidtion to have the program complain if a user
### provide unexpected value for logTrans]
if (logTrans == 2) {
work$ABUNDANCE <- log2(work$ABUNDANCE)
} else if (logTrans == 10) {
work$ABUNDANCE <- log10(work$ABUNDANCE)
}
processout <- rbind(processout,
c(paste0("Logarithm transformation: log", logTrans,
" transformation is done - okay")))
write.table(processout, file=finalfile, row.names=FALSE)
## Check multi-method or not : multiple run for a replicate
work$RUN <- factor(work$RUN)
checkMultirun <- .countMultiRun(work)
if ( checkMultirun$is.risky ){
## if can't matching fractionation, make warning and stop it.
stop('** MSstats suspects that there are fractionations and potentially technical replicates too. Please add Fraction column in the input.')
} else if ( checkMultirun$out ) {
if ( any(is.element(colnames(work), 'FRACTION')) ){
processout <- rbind(processout,
c(paste("Multiple fractionations are existed : ",
length(unique(work$FRACTION)),
"fractionations per MS replicate.")))
write.table(processout, file=finalfile, row.names=FALSE)
} else {
## need to make new column 'Fraction'
## each sample has no technical replicate, all runs are fractionated MS runs.
work$FRACTION <- NA
info <- unique(work[, c('GROUP_ORIGINAL', 'SUBJECT_ORIGINAL', 'RUN')])
info$condition <- paste(info$GROUP_ORIGINAL, info$SUBJECT_ORIGINAL, sep="_")
tmp <- work[!is.na(work$ABUNDANCE), ]
## get on one sample first
info.sample1 <- info[info$condition == unique(info$condition)[1], ]
## assign fraction info first
info.sample1$FRACTION <- seq(1, nrow(info.sample1))
for(k in 1:length(unique(info.sample1$RUN))){
## then fine the same fraction for next sample
unique.feature <- unique( tmp[tmp$RUN %in% info.sample1$RUN[k], 'FEATURE'] )
tmptmp <- tmp[which(tmp$FEATURE %in% unique.feature), ]
tmptmp$condition <- paste(tmptmp$GROUP_ORIGINAL, tmptmp$SUBJECT_ORIGINAL, sep="_")
count.feature <- reshape2::dcast(RUN ~ GROUP_ORIGINAL + SUBJECT_ORIGINAL,
data=tmptmp, fun.aggregate=length, value.var='ABUNDANCE')
## !! get one run which has maximum overlapped feature by each sample
same.frac <- apply(count.feature[,-which(colnames(count.feature) %in% c('RUN'))], 2,
function(x) count.feature[which.max(x), 'RUN'])
work[ which(work$RUN %in% same.frac), 'FRACTION'] <- info.sample1[ which(info.sample1$RUN %in% info.sample1$RUN[k]), 'FRACTION']
}
rm(tmp)
## final check up
checkup <- sum( is.na(unique(work$FRACTION)) ) > 0
if ( !checkup ){
processout <- rbind(processout, c(paste("Multiple fractions are existed : ",
length(unique(work$FRACTION)), "fractions per MS replicate.")))
write.table(processout, file=finalfile, row.names=FALSE)
} else {
processout <- rbind(processout, c('** It is hard to find the same fractionation across sample, due to lots of overlapped features between fractionations.
Please add Fraction column in input.'))
write.table(processout, file=finalfile, row.names=FALSE)
stop("** It is hard to find the same fractionation across sample, due to lots of overlapped features between fractionations.
Please add Fraction column in input.")
}
}
################################################
## need additional step that remove overlapped features across several fraction
################################################
if ( length(unique(work$FRACTION)) > 1 ){
## extra info for feature and fraction
work$tmp <- paste(work$FEATURE, work$FRACTION, sep="_")
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
count.feature <- reshape2::dcast(FEATURE ~ FRACTION,
data=tmp,
fun.aggregate=length,
value.var='ABUNDANCE')
rm(tmp)
## 1. first, keep features which are measured in one fraction
count.fraction <- apply(count.feature[, -which(colnames(count.feature) %in% c('FEATURE'))],
1,
function(x) sum(x>0))
# keep.feature <- count.feature[count.fraction == 1, 'FEATURE']
## 2. second, if features are measured in multiple fractionations,
## use the fractionation with maximum number of measurements.
## if there are multiple maximum number of measurements, remove features completely.
## count.feature1 : features that are measured in multiple fractions
count.feature1 <- count.feature[count.fraction > 1, ]
if( nrow(count.feature1) > 0 ){
## how many fractions have maximum number of measurements?
count.fraction <- apply(count.feature1[, -which(colnames(count.feature1) %in% c('FEATURE'))],
1,
function(x) sum(x == max(x)))
## 2.1 count.fraction == 1 means that there is one fraction that have one maximum # measurements.
## count.feature2 : features that measured in multiple fractions.
## however, it has one fraction with max number of measurements across fractions.
count.feature2 <- count.feature1[count.fraction == 1, ]
count.feature2$FEATURE <- as.character(count.feature2$FEATURE)
if( nrow(count.feature2) > 0 ){
#remove.fraction <- apply(count.feature2, 1,
# function(x) paste(x[1], names(x[-1])[x[-1] != max(x[-1]) & x[-1] != 0], sep="_") )
#remove.fraction <- unlist(remove.fraction)
remove.fraction <- gather(count.feature2, 'Fraction', 'ncount', 2:ncol(count.feature2))
remove.fraction <- remove.fraction %>% group_by(FEATURE) %>% filter(ncount != max(ncount))
remove.fraction <- remove.fraction %>% filter(ncount != 0)
remove.fraction$tmp <- paste(remove.fraction$FEATURE, remove.fraction$Fraction, sep="_")
work[work$tmp %in% remove.fraction$tmp, 'INTENSITY'] <- NA
work[work$tmp %in% remove.fraction$tmp, 'ABUNDANCE'] <- NA
}
rm(count.feature2)
rm(remove.fraction)
## 2.2 count.fraction > 1 means that there are multiple fractions have the same # measurements.
## Then check whether there are multiple maximum number of measurements across fractionation
count.feature3 <- count.feature1[count.fraction > 1, ]
if( nrow(count.feature3) > 0 ){
## 2.2.1 : maximum number of measurement / fraction == 1, remove that feature
max.feature <- apply(count.feature3[, -which(colnames(count.feature3) %in% c('FEATURE'))],
1,
function(x) max(x))
max.feature.1 <- count.feature3[max.feature == 1, 'FEATURE']
if (length(max.feature.1) > 0){
work <- work[-which(work$FEATURE %in% max.feature.1), ]
count.feature3 <- count.feature3[-which(count.feature3$FEATURE %in% max.feature.1), ]
}
if ( nrow(count.feature3) > 0 ) {
###############
## 2.2.2 : remove fractionations which have not maximum number of measurements
remove.fraction <- gather(count.feature3, 'Fraction', 'ncount', 2:ncol(count.feature3))
remove.fraction <- remove.fraction %>% group_by(FEATURE) %>% filter(ncount != max(ncount))
remove.fraction <- remove.fraction %>% filter(ncount != 0)
remove.fraction$tmp <- paste(remove.fraction$FEATURE, remove.fraction$Fraction, sep="_")
work[work$tmp %in% remove.fraction$tmp, 'INTENSITY'] <- NA
work[work$tmp %in% remove.fraction$tmp, 'ABUNDANCE'] <- NA
rm(remove.fraction)
###############
## 2.2.3 : among fractionations, keep one fractionation which has maximum average
tmptmp <- work[which(work$FEATURE %in% count.feature3$FEATURE), ]
tmptmp <- tmptmp[!is.na(tmptmp$ABUNDANCE), ]
mean.frac.feature <- tmptmp %>% group_by(FEATURE, tmp) %>% summarise(mean=mean(ABUNDANCE))
remove.fraction <- mean.frac.feature %>% group_by(FEATURE) %>% filter(mean != max(mean))
work[work$tmp %in% remove.fraction$tmp, 'INTENSITY'] <- NA
work[work$tmp %in% remove.fraction$tmp, 'ABUNDANCE'] <- NA
rm(remove.fraction)
rm(tmptmp)
}
}
}
work <- work[, -which(colnames(work) %in% c('tmp'))]
}
} else { ## no fractionation
work$FRACTION <- 1
}
## check messingness for multirun
## check no value for some feature : balanced structure or not
## need to separate label-free or label-based
processout <- rbind(processout, c(paste("fillIncompleteRows = ", fillIncompleteRows,sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## [THT: better to write a function for single method, and call that function
## here and for the case with multuple methods]
## only 1 method
if ( !checkMultirun$out | length(unique(work$FRACTION)) == 1 ) {
## label-free experiments
if (nlevels(work$LABEL) == 1) {
## get feature by Run count of data
structure = tapply ( work$ABUNDANCE, list ( work$FEATURE, work$RUN ) , function ( x ) length ( x ) )
## structure value should be 1 for label-free, if not there are missingness. if more there are duplicates.
flagmissing = sum(is.na(structure)) > 0
flagduplicate = sum(structure[!is.na(structure)]>1) > 0
### if there is missing rows
if ( flagmissing ) {
processout <- rbind(processout, c("CAUTION: the input dataset has incomplete rows.
If missing peaks occur they should be included in the dataset as separate rows,
and the missing intensity values should be indicated with 'NA'.
The incomplete rows are listed below."))
write.table(processout, file=finalfile,row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows.
If missing peaks occur they should be included in the dataset as separate rows,
and the missing intensity values should be indicated with 'NA'.
The incomplete rows are listed below.")
## first, which run has missing
runstructure <- apply ( structure, 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## for missign row, need to assign before looping
missingwork <- NULL
## then for each run, which features are missing,
for(j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work[work$RUN==runID[j], c("SUBJECT_ORIGINAL","GROUP_ORIGINAL",
"GROUP","SUBJECT","SUBJECT_NESTED",
"RUN","FRACTION")])
## get feature ID
featureID <- structure[,colnames(structure)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has incomplete rows for some features (",
paste(names(finalfeatureID), collapse=", "), ")"))
## save in process file.
processout <- rbind(processout, c(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has incomplete rows for some features (",
paste(names(featureID[is.na(featureID)]), collapse=", "), ")")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work[which(work$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork <- rbind(missingwork, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
## [THT: this part can probably be merged into the above.
## Also, it might be better to check fillIncompleteRows earlier
## and terminate the process when it's FALSE]
if (fillIncompleteRows) {
## merge with work
## in future, use rbindlist?? rbindlist(list(work, missingwork))
work <- rbind(work, missingwork)
## print message
message("\n DONE : Incomplete rows for missing peaks are added with intensity values=NA. \n")
## save in process file.
processout <- rbind(processout, "Incomplete rows for missing peaks are added with intensity values=NA. - done, Okay")
write.table(processout, file=finalfile, row.names=FALSE)
} else {
## save in process file.
processout <- rbind(processout,"Please check whether features in the list are generated from spectral processing tool.
Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
write.table(processout, file=finalfile,row.names=FALSE)
stop("Please check whether features in the list are generated from spectral processing tool or not.
Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
}
} # end for flag missing
## if there are duplicates measurements
if (flagduplicate) {
## first, which run has duplicates
runstructure <- apply ( structure, 2, function ( x ) sum (x[!is.na(x)] > 1 ) > 0 )
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features have duplicates,
for(j in 1:length(runID)) {
nameID <- unique(work[work$RUN == runID[j], c("SUBJECT_ORIGINAL", "GROUP_ORIGINAL",
"GROUP","SUBJECT", "SUBJECT_NESTED",
"RUN", "FRACTION")])
featureID <- structure[, colnames(structure)==runID[j]]
finalfeatureID <- featureID[!is.na(featureID) & featureID > 1]
message(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has multiple rows (duplicate rows) for some features (",
paste(names(finalfeatureID), collapse=", "), ")"))
## save in process file.
processout <- rbind(processout, c(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has multiple rows (duplicate rows) for some features (",
paste(names(featureID[is.na(featureID)]), collapse=", "), ")")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## save in process file.
processout <- rbind(processout,"Please remove duplicate rows in the list above. ")
write.table(processout, file=finalfile,row.names=FALSE)
stop("Please remove duplicate rows in the list above.\n")
} # end flag duplicate
## no missing and no duplicates
if (!flagmissing & !flagduplicate) {
processout <- rbind(processout, c("Balanced data format with NA for missing feature intensities - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
## end label-free
} else {
## label-based experiment
## count the reference and endobenous separately
work.l <- work[work$LABEL == "L", ]
work.h <- work[work$LABEL == "H", ]
## get feature by Run count of data
structure.l <- tapply(work.l$ABUNDANCE, list(work.l$FEATURE, work.l$RUN), function (x) length (x) )
structure.h <- tapply(work.h$ABUNDANCE, list(work.h$FEATURE, work.h$RUN), function (x) length (x) )
## first, check some features which completely missing across run
missingcomplete.l <- NULL
missingcomplete.h <- NULL
## 1. reference peptides
featurestructure.h <- apply(structure.h, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.h <- names(featurestructure.h[featurestructure.h == ncol(structure.h)])
if (length(featureID.h) > 0) {
## print message
message(paste0("CAUTION : some REFERENCE features have missing intensities in all the runs.
The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool. \n"))
## save in process file.
processout <- rbind(processout,c(paste("CAUTION : some REFERENCE features have missing intensities in all the runs.
The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool.", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
## get unique Run information
nameID <- unique(work.h[, c("SUBJECT_ORIGINAL", "GROUP_ORIGINAL", "GROUP", "SUBJECT", "SUBJECT_NESTED", "RUN", "FRACTION")])
## get PROTEIN and FEATURE information
## here use whole work dataset
tempTogetfeature <- work[which(work$FEATURE %in% featureID.h), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
#for(j in 1:nrow(nameID)) {
# ## merge feature info and run info as 'work' format
# tempmissingwork <- data.frame(tempfeatureID, LABEL="H",GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j], SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j], RUN=nameID$RUN[j], GROUP=nameID$GROUP[j], SUBJECT=nameID$SUBJECT[j], SUBJECT_NESTED=nameID$SUBJECT_NESTED[j], INTENSITY=NA, ABUNDANCE=NA, METHOD=nameID$METHOD[j])
# ## merge with tempary space, missingwork
# missingcomplete.h <- rbind(missingcomplete.h, tempmissingwork)
#}
# MC : 2016.04.21 : use merge for simplicity
tmp <- merge(nameID, tempfeatureID, by=NULL)
missingcomplete.h <- data.frame(PROTEIN=tmp$PROTEIN,
PEPTIDE=tmp$PEPTIDE,
TRANSITION=tmp$TRANSITION,
FEATURE=tmp$FEATURE,
LABEL="H",
GROUP_ORIGINAL=tmp$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=tmp$SUBJECT_ORIGINAL,
RUN=tmp$RUN,
GROUP=tmp$GROUP,
SUBJECT=tmp$SUBJECT,
SUBJECT_NESTED=tmp$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=tmp$FRACTION)
rm(tmp)
} # end fillIncompleteRows option
} # end for reference peptides
## 2. endogenous peptides
featurestructure.l <- apply(structure.l, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.l <- names(featurestructure.l[featurestructure.l == ncol(structure.l)])
if (length(featureID.l) > 0) {
## print message
message(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs.
The completely missing ENDOGENOUS features are ", paste(featureID.l, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep=""))
## save in process file.
processout <- rbind(processout,c(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs.
The completely missing ENDOGENOUS features are ",
paste(featureID.l, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
## get unique Run information
nameID <- unique(work.l[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get PROTEIN and FEATURE information
## here use whole work dataset
tempTogetfeature <- work[which(work$FEATURE %in% featureID.l), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
#for (j in 1:nrow(nameID)) {
# ## merge feature info and run info as 'work' format
# tempmissingwork <- data.frame(tempfeatureID, LABEL="L",GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j], SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j], RUN=nameID$RUN[j], GROUP=nameID$GROUP[j], SUBJECT=nameID$SUBJECT[j], SUBJECT_NESTED=nameID$SUBJECT_NESTED[j], INTENSITY=NA, ABUNDANCE=NA, METHOD=nameID$METHOD[j])
# ## merge with tempary space, missingwork
# missingcomplete.l <- rbind(missingcomplete.l, tempmissingwork)
#}
# MC : 2016.04.21 : use merge for simplicity
tmp <- merge(nameID, tempfeatureID, by=NULL)
missingcomplete.l <- data.frame(PROTEIN=tmp$PROTEIN,
PEPTIDE=tmp$PEPTIDE,
TRANSITION=tmp$TRANSITION,
FEATURE=tmp$FEATURE,
LABEL="L",
GROUP_ORIGINAL=tmp$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=tmp$SUBJECT_ORIGINAL,
RUN=tmp$RUN,
GROUP=tmp$GROUP,
SUBJECT=tmp$SUBJECT,
SUBJECT_NESTED=tmp$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=tmp$FRACTION)
rm(tmp)
} # end fillIncompleteRows option
} # end endogenous peptides
## second, check other some missingness
## for missign row, need to assign before looping. need to assign at the beginning because it need either cases, with missingness or not
missingwork.l <- NULL
missingwork.h <- NULL
## structure value should be 1 for reference and endogenous separately, if not there are missingness. if more there are duplicates.
## if count of NA is not zero and not number of run (excluding complete missingness across runs)
missing.l <- names(featurestructure.l[featurestructure.l != ncol(structure.l) & featurestructure.l != 0])
missing.h <- names(featurestructure.h[featurestructure.h != ncol(structure.h) & featurestructure.h != 0])
flagmissing.l = length(missing.l) > 0
flagmissing.h = length(missing.h) > 0
## structure value is greater than 1, there are duplicates
flagduplicate.l = sum(structure.l[!is.na(structure.l)] > 1) > 0
flagduplicate.h = sum(structure.h[!is.na(structure.h)] > 1) > 0
## if there is missing rows for endogenous
if ( flagmissing.l | flagmissing.h ) {
processout <- rbind(processout,c("CAUTION: the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below."))
write.table(processout, file=finalfile, row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below.")
## endogenous intensities
if (flagmissing.l) {
if (length(missing.l) > 1){
runstructure <- apply ( structure.l[which(rownames(structure.l) %in% missing.l), ], 2, function ( x ) sum ( is.na ( x ) ) ) > 0
} else if (length(missing.l) == 1) {
runstructure <- is.na ( structure.l[which(rownames(structure.l) %in% missing.l), ]) > 0
}
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for(j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.l[work.l$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
# MC : 2016/04/21. if there is one row, can't catch up data.frame
## get feature ID
if (length(missing.l) > 1){
featureID <- structure.l[which(rownames(structure.l) %in% missing.l), colnames(structure.l) == runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- names(featureID[is.na(featureID)])
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(finalfeatureID, collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(finalfeatureID, collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
} else if (length(missing.l) == 1) {
finalfeatureID <- missing.l
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", finalfeatureID,")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", finalfeatureID,")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.l[which(work.l$FEATURE %in% finalfeatureID), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.l <- rbind(missingwork.l,tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
## reference intensities
if (flagmissing.h) {
## first, which run has missing
if (length(missing.h) > 1){
runstructure <- apply ( structure.h[which(rownames(structure.h) %in% missing.h), ], 2,
function ( x ) sum ( is.na ( x ) ) ) > 0
} else if (length(missing.h) == 1) {
runstructure <- is.na ( structure.h[which(rownames(structure.h) %in% missing.h), ]) > 0
}
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for(j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.h[work.h$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
# MC : 2016/04/21. if there is one row, can't catch up data.frame
## get feature ID
if (length(missing.h) > 1){
featureID <- structure.h[which(rownames(structure.h) %in% missing.h), colnames(structure.h) == runID[j] ]
## get feature ID which has no measuremnt.
finalfeatureID <- names(featureID[is.na(featureID)])
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(finalfeatureID, collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(finalfeatureID, collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
} else if (length(missing.h) == 1) {
finalfeatureID <- missing.h
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", finalfeatureID,")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", finalfeatureID,")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.h[which(work.h$FEATURE %in% finalfeatureID), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="H",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.h <- rbind(missingwork.h, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
} # end for flag missing
## merge missing rows if fillIncompleteRows=TRUE or message.
if (fillIncompleteRows) {
## merge with work
## in future, use rbindlist?? rbindlist(list(work, missingwork))
work <- rbind(work,missingcomplete.l, missingcomplete.h, missingwork.l, missingwork.h)
## print message
message("\n DONE : Incomplete rows for missing peaks are added with intensity values=NA. \n")
## save in process file.
processout <- rbind(processout, "Incomplete rows for missing peaks are added with intensity values=NA. - done, Okay")
write.table(processout, file=finalfile, row.names=FALSE)
} else if (!is.null(missingcomplete.l) |
!is.null(missingcomplete.h) |
!is.null(missingwork.l) |
!is.null(missingwork.l) ) {
## save in process file.
processout <- rbind(processout,
"Please check whether features in the list are generated from spectral processing tool.
Or the option, fillIncompleteRows=TRUE,
will add incomplete rows for missing peaks with intensity=NA.")
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please check whether features in the list are generated from spectral processing tool or not. Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
}
## if there are duplicates measurements
if (flagduplicate.h) {
## first, which run has duplicates
runstructure <- apply ( structure.h, 2, function ( x ) sum ( x[!is.na(x)] > 1 )>0 )
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features have duplicates,
for(j in 1:length(runID)) {
nameID <- unique(work[work$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
featureID <- structure.h[, colnames(structure.h)==runID[j]]
finalfeatureID <- featureID[!is.na(featureID) & featureID > 1]
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some REFERENCE features (", paste(names(finalfeatureID), collapse=", "), ")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some REFERENCE features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## save in process file.
processout <- rbind(processout,"Please remove duplicate rows in the list above. ")
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please remove duplicate rows in the list above.\n")
} # end flag duplicate for reference
if (flagduplicate.l) {
## first, which run has duplicates
runstructure <- apply ( structure.l, 2, function ( x ) sum ( x[!is.na(x)] > 1 )>0 )
runID <- names(runstructure[runstructure == TRUE])
## then for each run, which features have duplicates,
for (j in 1:length(runID)) {
nameID <- unique(work[work$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
featureID <- structure.l[, colnames(structure.l)==runID[j]]
finalfeatureID <- featureID[!is.na(featureID) & featureID > 1]
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some ENDOGENOUS features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some ENDOGENOUS features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## save in process file.
processout <- rbind(processout,"ERROR : Please remove duplicate rows in the list above. ")
write.table(processout, file=finalfile, row.names=FALSE)
stop("ERROR : Please remove duplicate rows in the list above.\n")
} # end flag duplicate for endogenous
## no missing and no duplicates
if (!flagmissing.h & !flagmissing.l & !flagduplicate.h & !flagduplicate.l) {
processout <- rbind(processout, c("Balanced data format with NA for missing feature intensities - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
} # end 1 method
} else { # multiple fractionations
allflagmissing <- NULL
allflagduplicate <- NULL
## check each method
for (k in 1:length(unique(work$FRACTION))) {
worktemp <- work[work$FRACTION == k, ]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
structure <- tapply ( worktemp$ABUNDANCE, list ( worktemp$FEATURE, worktemp$RUN ) , function ( x ) length ( x ) )
## structure value should be 2 for labeled, 1 for label-free, if not there are missingness
if (nlevels(worktemp$LABEL) == 2) { ## label-based
flag = sum(is.na(structure)) > 0 | sum(structure[!is.na(structure)] < 2) > 0
} else { ## label-free
flag = sum(is.na(structure)) > 0
}
allflagmissing <- c(allflagmissing,flag)
## for duplicate
if (nlevels(worktemp$LABEL) == 2) { # label-based
worktemp.h <- worktemp[worktemp$LABEL == "H", ]
worktemp.l <- worktemp[worktemp$LABEL == "L", ]
structure.h <- tapply ( worktemp.h$ABUNDANCE, list ( worktemp.h$FEATURE, worktemp.h$RUN ) , function ( x ) length ( x ) )
structure.l <- tapply ( worktemp.l$ABUNDANCE, list ( worktemp.l$FEATURE, worktemp.l$RUN ) , function ( x ) length ( x ) )
flagduplicate <- sum(structure.h[!is.na(structure.h)] > 1) > 0 | sum(structure.l[!is.na(structure.l)] > 1) > 0
} else { # label-free
flagduplicate <- sum(structure[!is.na(structure)]>1) > 0
}
allflagduplicate <- c(allflagduplicate, flag)
} # end to check any flag among methods
if ( sum(allflagmissing) != 0 ) {
processout <- rbind(processout, c("CAUTION: the input dataset has incomplete rows. Missing feature intensities should be present in the dataset, and their intensities should be indicated with 'NA'. The incomplete rows are listed below."))
write.table(processout, file=finalfile, row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows. Missing feature intensities should be present in the dataset, and their intensities should be indicated with 'NA'. The incomplete rows are listed below.")
## for missign row, need to assign before looping
missingwork <- NULL
missingcomplete.h <- NULL
missingcomplete.l <- NULL
missingwork.h <- NULL
missingwork.l <- NULL
for (k in 1:length(unique(work$FRACTION))) {
## see which method has missing rows
if (allflagmissing[k]) {
worktemp <- work[work$FRACTION==k, ]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
if (nlevels(worktemp$LABEL) == 1) { ## label-free
structure = tapply ( worktemp$ABUNDANCE, list ( worktemp$FEATURE, worktemp$RUN ) , function ( x ) length ( x ) )
## first, which run has missing
runstructure <- apply ( structure, 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for (j in 1:length(runID)) {
nameID <- unique(worktemp[worktemp$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get feature ID
featureID <- structure[, colnames(structure)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work[which(work$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork <- rbind(missingwork, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run
} else { # end label-free
## label-based
## count the reference and endobenous separately
work.l <- worktemp[worktemp$LABEL=="L", ]
work.h <- worktemp[worktemp$LABEL=="H", ]
## get feature by Run count of data
structure.l <- tapply ( work.l$ABUNDANCE, list(work.l$FEATURE, work.l$RUN), function (x) length (x) )
structure.h <- tapply ( work.h$ABUNDANCE, list(work.h$FEATURE, work.h$RUN), function (x) length (x) )
## 1. reference peptides
featurestructure.h <- apply(structure.h, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.h <- names(featurestructure.h[featurestructure.h==ncol(structure.h)])
if (length(featureID.h) > 0) {
## print message
message(paste("CAUTION : some REFERENCE features have missing intensities in all the runs. The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep=""))
## save in process file.
processout <- rbind(processout,c(paste("CAUTION : some REFERENCE features have missing intensities in all the runs. The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),". Please check whether features in the list are correctly generated from spectral processing tool.", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
if( nrow(work.h) == 0 ){
work.h <- work[work$LABEL=="H", ]
## get unique Run information
nameID <- unique(work.h[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
nameID$FRACTION <- k
} else {
## get unique Run information
nameID <- unique(work.h[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
}
## get PROTEIN and FEATURE information
## here use whole worktemp dataset
tempTogetfeature <- worktemp[which(worktemp$FEATURE %in% featureID.h), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
for (j in 1:nrow(nameID)) {
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="H",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j],
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j],
RUN=nameID$RUN[j],
GROUP=nameID$GROUP[j],
SUBJECT=nameID$SUBJECT[j],
SUBJECT_NESTED=nameID$SUBJECT_NESTED[j],
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION[j])
## merge with tempary space, missingwork
missingcomplete.h <- rbind(missingcomplete.h, tempmissingwork)
}
} # end fillIncompleteRows option
} # end for reference peptides
## 2. endogenous peptides
featurestructure.l <- apply(structure.l, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.l <- names(featurestructure.l[featurestructure.l==ncol(structure.l)])
if (length(featureID.l) > 0) {
## print message
message(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs. The completely missing ENDOGENOUS features are ", paste(featureID.l, collapse=", "), ". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep=""))
## save in process file.
processout <- rbind(processout, c(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs. The completely missing ENCOGENOUS features are ", paste(featureID.l, collapse=", "),". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
## get unique Run information
nameID <- unique(work.l[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get PROTEIN and FEATURE information
## here use whole worktemp dataset
tempTogetfeature <- worktemp[which(worktemp$FEATURE %in% featureID.l), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
for(j in 1:nrow(nameID)) {
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j],
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j],
RUN=nameID$RUN[j],
GROUP=nameID$GROUP[j],
SUBJECT=nameID$SUBJECT[j],
SUBJECT_NESTED=nameID$SUBJECT_NESTED[j],
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION[j])
## merge with tempary space, missingwork
missingcomplete.l <- rbind(missingcomplete.l, tempmissingwork)
}
} # end fillIncompleteRows option
} # end endogenous peptides
## second, check other some missingness
## structure value should be 1 for reference and endogenous separately, if not there are missingness. if more there are duplicates.
## if count of NA is not zero and not number of run (excluding complete missingness across runs)
missing.l <- names(featurestructure.l[featurestructure.l!=ncol(structure.l) & featurestructure.l != 0])
missing.h <- names(featurestructure.h[featurestructure.h!=ncol(structure.h) & featurestructure.h != 0])
flagmissing.l <- length(missing.l) > 0
flagmissing.h <- length(missing.h) > 0
## structure value is greater than 1, there are duplicates
flagduplicate.l <- sum(structure.l[!is.na(structure.l)] > 1) > 0
flagduplicate.h <- sum(structure.h[!is.na(structure.h)] > 1) > 0
## if there is missing rows for endogenous
if (flagmissing.l | flagmissing.h) {
processout <- rbind(processout, c("CAUTION: the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below."))
write.table(processout, file=finalfile, row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below.")
## endogenous intensities
if (flagmissing.l) {
## first, which run has missing
runstructure <- apply ( structure.l[-which(rownames(structure.l) %in% featureID.l),], 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for (j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.l[work.l$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get feature ID
featureID <- structure.l[-which(rownames(structure.l) %in% featureID.l), colnames(structure.l)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[, "GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.l[which(work.l$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.l <- rbind(missingwork.l, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
## reference intensities
if (flagmissing.h) {
## first, which run has missing
runstructure <- apply ( structure.h[-which(rownames(structure.h) %in% featureID.h),], 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for (j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.h[work.h$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get feature ID
featureID <- structure.h[-which(rownames(structure.h) %in% featureID.h), colnames(structure.h)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.h[which(work.h$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="H",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.h <- rbind(missingwork.h, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
} # end any missingness
} # end label-based
} # if only any flag for method
} # end loop for methods
if (fillIncompleteRows) {
## merge with work
## in future, use rbindlist?? rbindlist(list(work, missingwork))
if (nlevels(worktemp$LABEL) == 1) {
work <- rbind(work, missingwork)
} else {
work <- rbind(work, missingcomplete.l, missingcomplete.h, missingwork.l, missingwork.h)
}
## print message
message("\n DONE : Incomplete rows for missing peaks are added with intensity values=NA. \n")
## save in process file.
processout <- rbind(processout, "Incomplete rows for missing peaks are added with intensity values=NA. - done, Okay")
write.table(processout, file=finalfile,row.names=FALSE)
} else if (!is.null(missingcomplete.l) | !is.null(missingcomplete.h) | !is.null(missingwork.l) | !is.null(missingwork.l) | !is.null(missingwork)) {
## save in process file.
processout <- rbind(processout, "Please check whether features in the list are generated from spectral processing tool. Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please check whether features in the list are generated from spectral processing tool. Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
}
} else {
processout <- rbind(processout, c("Balanced data format with NA for missing feature intensities - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
## for duplicate, in future
} # end multiple method
## factorize GROUP, SUBJECT, GROUP_ORIGINAL, SUBJECT_ORIGINAL, SUBJECT_ORIGINAL_NESTED, FEATURE, RUN
## -------------------------------------------------------------------------------------------------
work$PROTEIN <- factor(work$PROTEIN)
work$PEPTIDE <- factor(work$PEPTIDE)
work$TRANSITION <- factor(work$TRANSITION)
work <- work[with(work, order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, PROTEIN, PEPTIDE, TRANSITION)),]
work$GROUP <- factor(work$GROUP)
work$SUBJECT <- factor(work$SUBJECT)
## SUBJECT_ORIGINAL_NESTED will sorted as GROUP_ORIGINAL, SUBJECT_ORIGINAL
work$SUBJECT_NESTED <- factor(work$SUBJECT_NESTED, levels=unique(work$SUBJECT_NESTED))
## FEATURE will sorted as PROTEIN, PEPTIDE, TRANSITION
work$FEATURE <- factor(work$FEATURE, levels=unique(work$FEATURE))
## RUN will sorted as GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN
work$originalRUN <- work$RUN
work$RUN <- factor(work$RUN, levels=unique(work$RUN), labels=seq(1, length(unique(work$RUN))))
processout <- rbind(processout, c("Factorize in columns(GROUP, SUBJECT, GROUP_ORIGINAL,
SUBJECT_ORIGINAL, SUBJECT_ORIGINAL_NESTED, FEATURE, RUN) - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
## Normalization ##
## ------------- ##
## Normalization : option 0. none
if (is.element("NONE",normalization) | is.element("FALSE",normalization)) { # after 'toupper', FALSE becomes character.
processout <- rbind(processout, c("Normalization : no normalization - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
## Normalization : option 1. constant normalization , equalize medians ##
## -------------------------------------------------------------------
if (!is.element("NONE", normalization) &
!is.element("FALSE", normalization) &
is.element("EQUALIZEMEDIANS", normalization)) {
if (nlevels(work$LABEL) == 1) {
## Constant normalization by endogenous per method
## [MC : use median of medians]
median.run.method <- aggregate(ABUNDANCE ~ RUN + FRACTION, data = work, median, na.rm = TRUE)
median.method <- tapply(median.run.method$ABUNDANCE, median.run.method$FRACTION, median, na.rm = TRUE)
nmethod <- unique(work$FRACTION)
for(j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION == nmethod[j], "RUN"])
for (i in 1:length(namerun)) {
## ABUNDANCE is normalized
namerun.idx <- which(work$RUN == namerun[i])
work[namerun.idx, "ABUNDANCE"] <- work[namerun.idx, "ABUNDANCE"] - median.run.method[median.run.method$RUN == namerun[i], "ABUNDANCE"] + median.method[j]
}
}
}
if (nlevels(work$LABEL) == 2 ) {
## Constant normalization by heavy standard per method
h <- work[work$LABEL == "H", ]
## [MC : use median of medians]
median.run.method <- aggregate(ABUNDANCE ~ RUN + FRACTION, data = h, median, na.rm = TRUE)
median.method <- tapply(median.run.method$ABUNDANCE, median.run.method$FRACTION, median, na.rm = TRUE)
nmethod <- unique(work$FRACTION)
for(j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION==nmethod[j],"RUN"])
for (i in 1:length(namerun)) {
## ABUNDANCE is normalized
namerun.idx <- which(work$RUN == namerun[i])
work[namerun.idx, "ABUNDANCE"] <- work[namerun.idx, "ABUNDANCE"] - median.run.method[median.run.method$RUN == namerun[i], "ABUNDANCE"] + median.method[j]
}
} # end loop method
} # for labe-based
if(length(nmethod) == 1) {
processout <- rbind(processout, c("Normalization : Constant normalization (equalize medians) - okay"))
} else if (length(nmethod) >1) {
## if there are fractions, report addition information.
processout <- rbind(processout, c("Normalization : Constant normalization (equalize medians) per fraction - okay"))
}
write.table(processout, file=finalfile, row.names=FALSE)
} ## end equaliemedian normalization
## Normalization : option 2. quantile normalization ##
## ------------------------------------------------ ##
if (!is.element("NONE", normalization) &
!is.element("FALSE", normalization) &
is.element("QUANTILE", normalization)) {
if (nlevels(work$LABEL) == 1) {
## for label-free, just use endogenous
nmethod <- unique(work$FRACTION)
quantileall <- NULL
## ABUNDANCE=0 replace with 1, in order to distinguish later.
work[!is.na(work$ABUNDANCE) & work$ABUNDANCE == 0, 'ABUNDANCE'] <- 1
for (j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION == nmethod[j],"RUN"])
worktemp <- work[which(work$RUN %in% namerun & !is.na(work$INTENSITY)),]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
quantiletemp <- as.matrix(xtabs(ABUNDANCE~FEATURE+RUN, data=worktemp))
## need to put NA for missing value in endogenous
quantiletemp[quantiletemp == 0] <- NA
## using preprocessCore library
quantiledone <- normalize.quantiles(quantiletemp)
rownames(quantiledone) <- rownames(quantiletemp)
colnames(quantiledone) <- colnames(quantiletemp)
## get quantiled to long format for apply difference endogenous
quantilelong <- melt(quantiledone, id=rownames(quantiledone))
colnames(quantilelong) <- c("FEATURE", "RUN", "ABUNDANCE_quantile")
rm(quantiledone)
## quantileall <- rbindlist(list(quantileall,quantilelong))
quantileall <- rbind(quantileall, quantilelong)
rm(quantilelong)
}
work <- merge(work, quantileall, by=c("FEATURE", "RUN"))
rm(quantileall)
## reorder
work <- data.frame("PROTEIN"=work$PROTEIN,
"PEPTIDE"=work$PEPTIDE,
"TRANSITION"=work$TRANSITION,
"FEATURE"=work$FEATURE,
"LABEL"=work$LABEL,
"GROUP_ORIGINAL"=work$GROUP_ORIGINAL,
"SUBJECT_ORIGINAL"=work$SUBJECT_ORIGINAL,
"RUN"=work$RUN,
"GROUP"=work$GROUP,
"SUBJECT"=work$SUBJECT,
"SUBJECT_NESTED"=work$SUBJECT_NESTED,
"INTENSITY"=work$INTENSITY,
"ABUNDANCE"=work$ABUNDANCE_quantile,
"FRACTION"=work$FRACTION,
"originalRUN"=work$originalRUN)
work <- work[with(work, order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, PROTEIN, PEPTIDE, TRANSITION)), ]
## for skyline case, separate 1 and zero
work[!is.na(work$INTENSITY) & work$INTENSITY == 1, 'ABUNDANCE'] <- 0
}
if (nlevels(work$LABEL) == 2) {
nmethod <- unique(work$FRACTION)
quantileall <- NULL
for (j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION == nmethod[j], "RUN"])
## for label-based, make quantile normalization for reference
##worktemp <- work[which(work$RUN %in% namerun & work$LABEL=="H" & !is.na(work$INTENSITY)),] ## because for sparse of reference
worktemp <- work[which(work$RUN %in% namerun & work$LABEL == "H"),]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
quantiletemp <- as.matrix(xtabs(ABUNDANCE~FEATURE+RUN, data=worktemp))
rm(worktemp)
## need to put NA for missing value in endogenous
quantiletemp[quantiletemp==0] <- NA
## using preprocessCore library
quantiledone <- normalize.quantiles(quantiletemp)
rownames(quantiledone) <- rownames(quantiletemp)
colnames(quantiledone) <- colnames(quantiletemp)
## get quantiled to long format for apply difference endogenous
quantilelong.h <- melt(quantiledone, id=rownames(quantiledone))
colnames(quantilelong.h) <- c("FEATURE","RUN","ABUNDANCE_quantile")
quantilelong.h <- data.frame(quantilelong.h, LABEL="H")
## endogenous, in order to applying
##worktemp.l <- work[which(work$RUN %in% namerun & work$LABEL=="L" & !is.na(work$INTENSITY)),] ## because for sparse of reference
worktemp.l <- work[which(work$RUN %in% namerun & work$LABEL=="L"),]
worktemp.l$RUN <- factor(worktemp.l$RUN)
worktemp.l$FEATURE <- factor(worktemp.l$FEATURE)
quantiletemp.l <- as.matrix(xtabs(ABUNDANCE~FEATURE+RUN, data=worktemp.l))
rm(worktemp.l)
## need to put NA for missing value in endogenous
quantiletemp.l[quantiletemp.l==0] <- NA
## apply the difference from reference
quantiledone.l <- quantiletemp.l-(quantiletemp-quantiledone)
## get quantiled to long format for apply difference endogenous
quantilelong.l <- melt(quantiledone.l, id=rownames(quantiledone.l))
colnames(quantilelong.l) <- c("FEATURE", "RUN", "ABUNDANCE_quantile")
quantilelong.l <- data.frame(quantilelong.l, LABEL="L")
rm(quantiletemp)
rm(quantiledone)
rm(quantiletemp.l)
rm(quantiledone.l)
# quantileall <- rbindlist(list(quantileall,quantilelong.h, quantilelong.l))
quantileall <- rbind(quantileall,quantilelong.h, quantilelong.l)
}
## merge with original data
work <- merge(work,quantileall, by=c("FEATURE","RUN","LABEL"))
## reorder
work <- data.frame("PROTEIN"=work$PROTEIN,
"PEPTIDE"=work$PEPTIDE,
"TRANSITION"=work$TRANSITION,
"FEATURE"=work$FEATURE,
"LABEL"=work$LABEL,
"GROUP_ORIGINAL"=work$GROUP_ORIGINAL,
"SUBJECT_ORIGINAL"=work$SUBJECT_ORIGINAL,
"RUN"=work$RUN,
"GROUP"=work$GROUP,
"SUBJECT"=work$SUBJECT,
"SUBJECT_NESTED"=work$SUBJECT_NESTED,
"INTENSITY"=work$INTENSITY,
"ABUNDANCE"=work$ABUNDANCE_quantile,
"FRACTION"=work$FRACTION,
"originalRUN" = work$originalRUN)
work <- work[with(work,order(LABEL,GROUP_ORIGINAL,SUBJECT_ORIGINAL,RUN,PROTEIN,PEPTIDE,TRANSITION)),]
}
if(length(nmethod) == 1) {
processout <- rbind(processout, c("Normalization : Quantile normalization - okay"))
} else if (length(nmethod) >1) {
## if there are fractions, report addition information.
processout <- rbind(processout, c("Normalization : Quantile normalization per fraction - okay"))
}
write.table(processout, file=finalfile, row.names=FALSE)
}
## Normalization : option 3. global standards - for endogenous ##
## ----------------------------------------------------------- ##
if (!is.element("NONE", normalization) &
!is.element("FALSE", normalization) &
is.element("GLOBALSTANDARDS", normalization)) {
work$RUN <- factor(work$RUN)
combine <- data.frame(RUN=levels(work$RUN))
allPeptide <- unique(work$PEPTIDE)
allProtein <- unique(work$PROTEIN)
for (i in 1:length(nameStandards)) {
## if Peptides
## namePeptide <- allPeptide[grep(nameStandards[i],allPeptide)] ## cannot grep for modified peptide sequence, [,],+ sign
namePeptide <- tempPeptide[tempPeptide$PEPTIDESEQUENCE == nameStandards[i], "PEPTIDE"]
if (length(namePeptide)!=0) {
tempStandard <- work[work$PEPTIDE == namePeptide,]
} else {
## if Proteins
nameProtein <- allProtein[allProtein == nameStandards[i]] # if we use 'grep', can' find the proteins name with some symbol, such as 'sp|P30153|2AAA_HUMAN'
if (length(nameProtein)!=0) {
tempStandard <- work[work$PROTEIN==nameProtein,]
} else {
processout <- rbind(processout,c(paste("global standard peptides or proteins, ",nameStandards[i] ,", is not in dataset. Please check whether 'nameStandards' input is correct or not.")))
write.table(processout, file=finalfile,row.names=FALSE)
stop(paste("global standard peptides or proteins, ",nameStandards[i] ,", is not in dataset. Please check whether 'nameStandards' input is correct or not."))
}
}
## here, by RUN, but need to check !!!
tempStandard <- tempStandard[tempStandard$GROUP!="0",]
tempStandard$RUN <- factor(tempStandard$RUN)
tempStandard <- tempStandard[!is.na(tempStandard$ABUNDANCE),]
meanStandard <- tapply(tempStandard$ABUNDANCE, tempStandard$RUN, function(x) mean(x, na.rm=TRUE))
meanStandard <- data.frame(RUN=names(meanStandard),meanStandard)
combine <- merge(combine, meanStandard, by="RUN", all=TRUE)
colnames(combine)[i+1] <- paste("meanStandard",i,sep="")
}
rownames(combine) <- combine$RUN
combine <- subset(combine, select=-c(RUN))
## get mean among global standards
allmean <- apply(combine,1, function(x) mean(x, na.rm=TRUE))
## allmean[is.na(allmean)] <- 0
allmeantemp <- data.frame(RUN=names(allmean),allmean)
allrun <- unique(work[,c("RUN","FRACTION")])
allmeantemp <- merge(allmeantemp, allrun,by="RUN")
median.all <- tapply(allmeantemp$allmean, allmeantemp$FRACTION, function(x) median(x,na.rm=TRUE))
## adjust
nmethod <- unique(work$FRACTION)
for(j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION==nmethod[j], "RUN"])
for (i in 1:length(namerun)) {
## ABUNDANCE is normalized
if (!is.na(allmean[names(allmean)==namerun[i]])) work[work$RUN==namerun[i] & work$LABEL=="L","ABUNDANCE"] <- work[work$RUN==namerun[i] & work$LABEL=="L","ABUNDANCE"]-allmean[names(allmean)==namerun[i]]+median.all[j]
}
} # end loop method
if(length(nmethod) == 1) {
processout <- rbind(processout, c("Normalization : normalization with global standards protein - okay"))
} else if (length(nmethod) >1) {
## if there are fractions, report addition information.
processout <- rbind(processout, c("Normalization : normalization with global standards protein - okay"))
}
write.table(processout, file=finalfile, row.names=FALSE)
}
## ----------------------------------------------------------- ##
## after normalization, zero intensity could be negative
## if abundance became less than zero, after normalization
work[!is.na(work$ABUNDANCE) & work$ABUNDANCE < 0, "ABUNDANCE"] <- 0
## if abundance become greater than zero, after normalization.
## hard to know how much higher, so, use intensity value, which is not used for noramlization
work[!is.na(work$INTENSITY) & work$INTENSITY == 1, "ABUNDANCE"] <- 0
## ----------------------------------------------------------- ##
## if there are multiple method, need to merge after normalization + before feature selection ##
if ( length(unique(work$FRACTION)) > 1 ){
## check any features measured across all runs.
## use the subset of data without missing values
## here 'INTENSITY' is used, instead of 'ABUNDANCE'
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
check.multiple.run <- xtabs(~ FEATURE + FRACTION, tmp)
check.multiple.run.TF <- check.multiple.run != 0
check.multiple.run.feature <- apply(check.multiple.run.TF, 1, sum)
## each feature should be measured only in one method
overlap.feature <- names(check.multiple.run.feature[check.multiple.run.feature > 1 ])
## It should be zero overlap.feature.
## however, this is for double-check.
## If there are overlapped feature, it means something not works well above filtering.
if( length(overlap.feature) > 0 ){
message(paste0("** Please check the listed featurues (",
paste(overlap.feature, collapse=", "),
") \n Those features are measured across all fractionations."))
processout <- rbind(processout,
c( paste0("** Please check the listed featurues (",
paste(overlap.feature, collapse=", "),
") Those features are measured across all fractionations.
Please keep only one intensity of listed features among fractionations from one sample.")))
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please keep only one intensity of listed features among fractinations from one sample. \n")
}
## ----------------------------------------------------------- ##
## merge ##
## get which Run id should be merged
## decide which two runs should be merged
if( any(is.element(colnames(work), 'TECHREPLICATE')) ) {
runid.multiple <- unique(work[, c('GROUP_ORIGINAL',
'SUBJECT_ORIGINAL',
'RUN',
'originalRUN',
'FRACTION',
'TECHREPLICATE')])
## if there are technical replicates from the same group and subject, can't match.
run.match <- try(reshape2::dcast(GROUP_ORIGINAL + SUBJECT_ORIGINAL + TECHREPLICATE ~ FRACTION,
data=runid.multiple, value.var = 'originalRUN'), silent=TRUE)
if (class(run.match) == "try-error") {
processout <- rbind(processout,c( "*** error : can't figure out which multiple runs come from the same sample."))
write.table(processout, file=finalfile, row.names=FALSE)
stop("*** error : can't figure out which multiple runs come from the same sample.")
} else {
work$newRun <- NA
run.match$GROUP_ORIGINAL <- as.character(run.match$GROUP_ORIGINAL)
run.match$SUBJECT_ORIGINAL <- as.character(run.match$SUBJECT_ORIGINAL)
for(k in 1:nrow(run.match)){
work[which(work$originalRUN %in%
run.match[k, 4:ncol(run.match)]), 'newRun'] <- paste(paste(run.match[k, 1:4], collapse = "_"), 'merged', sep="_")
}
## remove extra run NAs
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
na.count <- reshape2::dcast(FEATURE ~ FRACTION, data=tmp, fun.aggregate=length, value.var='ABUNDANCE')
na.count.long <- melt(na.count, id.vars=c('FEATURE'))
na.count.long <- na.count.long[na.count.long$value == length(unique(work$newRun)), ]
na.count.long$tmp <- paste(na.count.long$FEATURE, na.count.long$variable, sep="_")
work$tmp <- paste(work$FEATURE, work$FRACTION, sep="_")
work <- work[-which(work$tmp %in% na.count.long$tmp), ]
##
work$originalRUN <- work$newRun
## update RUN based on new originalRUN
work$RUN <- work$originalRUN
work$RUN <- factor(work$RUN, levels=unique(work$RUN), labels=seq(1, length(unique(work$RUN))))
work <- work[, -which(colnames(work) %in% c('tmp','newRun'))]
}
} else { ## Fraction, but no tech replicate
runid.multiple <- unique(work[, c('GROUP_ORIGINAL',
'SUBJECT_ORIGINAL',
'RUN',
'originalRUN',
'FRACTION')])
## if there are technical replicates from the same group and subject, can't match.
run.match <- try(reshape2::dcast(GROUP_ORIGINAL + SUBJECT_ORIGINAL ~ FRACTION,
data=runid.multiple, value.var = 'originalRUN'), silent=TRUE)
if (class(run.match) == "try-error") {
processout <- rbind(processout,
c( "*** error : can't figure out which multiple runs come from the same sample."))
write.table(processout, file=finalfile, row.names=FALSE)
stop("*** error : can't figure out which multiple runs come from the same sample.")
} else {
work$newRun <- NA
run.match$GROUP_ORIGINAL <- as.character(run.match$GROUP_ORIGINAL)
run.match$SUBJECT_ORIGINAL <- as.character(run.match$SUBJECT_ORIGINAL)
for(k in 1:nrow(run.match)){
work[which(work$originalRUN %in%
run.match[k, 3:ncol(run.match)]), 'newRun'] <- paste(paste(run.match[k, 1:3],
collapse = "_"), 'merged', sep="_")
}
## remove extra run NAs or less than zero
## because the goal is to find the one fraction should be used for each feature.
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
## find which fraction should be used for each feature
select.fraction <- tmp %>% group_by(FEATURE, FRACTION) %>% summarise(ncount = n())
## check : test <- select.fraction %>% group_by(FEATURE) %>% summarise(nfeature = n())
## it can be less than # of runs, if there are any missing
## just in case that there are zero runs, let's check and remove.
select.fraction <- select.fraction %>% filter(ncount != 0)
select.fraction$tmp <- paste(select.fraction$FEATURE, select.fraction$FRACTION, sep="_")
## then keep one fraction for each feature
work$tmp <- paste(work$FEATURE, work$FRACTION, sep="_")
work <- work[which(work$tmp %in% select.fraction$tmp), ]
## new run has merged run id
## original run id can be different by fraction
## now fraction information from run will be removed.
work$originalRUN <- work$newRun
## update RUN based on new originalRUN
work$RUN <- work$originalRUN
work$RUN <- factor(work$RUN, levels=unique(work$RUN), labels=seq(1, length(unique(work$RUN))))
work <- work[, -which(colnames(work) %in% c('tmp','newRun'))]
}
}
}
#Below two lines were merely for in-house testing and comparisons when needed
#work.NoImpute <- work
#AbundanceAfterImpute <- .Imputation(work, cutoffCensored, censoredInt, remove50missing, MBimpute, original_scale)
## ------------- ##
## how to decide censored or not
## ------------- ##
### If imputation=TRUE and there is any value for maxQuantileforCensored, apply cutoff for censored missing
if ( summaryMethod == "TMP" & MBimpute ) {
work$LABEL <- factor(work$LABEL)
label <- nlevels(work$LABEL)==2
work$censored <- FALSE
## if intensity = 1, but abundance > cutoff after normalization, it also should be censored.
if( !is.null(maxQuantileforCensored) ) {
### label-free
if( !label ){
### calculate outlier cutoff
## only consider intensity > 1
tmp <- work[!is.na(work$INTENSITY) & work$INTENSITY > 1, 'ABUNDANCE']
## or
#tmp <- work[!is.na(work$INTENSITY), 'ABUNDANCE']
log2int.prime.quant <- quantile(tmp, prob=c(0.01, 0.25, 0.5, 0.75, maxQuantileforCensored), na.rm = TRUE)
iqr <- log2int.prime.quant[4] - log2int.prime.quant[2]
### need to decide the multiplier from high intensities
multiplier <- (log2int.prime.quant[5] - log2int.prime.quant[4])/iqr
cutoff.lower <- (log2int.prime.quant[2] - multiplier * iqr)
work[!is.na(work$INTENSITY) &
work$ABUNDANCE < cutoff.lower, 'censored'] <- TRUE
message(paste('** Log2 intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
## if censoredInt == '0, and cutoff is negative, still zero should becensored
if ( cutoff.lower <= 0 & !is.null(censoredInt) & censoredInt == "0" ) {
work[!is.na(work$INTENSITY) & work$INTENSITY == 1, 'censored'] <- TRUE
work[!is.na(work$ABUNDANCE) & work$ABUNDANCE <= 0, 'censored'] <- TRUE
message(paste('** Log2 intensities = 0 were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 intensities = 0 were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
}
## if censoredInt == NA, original NA also shoule be 'censored'
if (!is.null(censoredInt) & censoredInt == "NA") {
work[is.na(work$INTENSITY), 'censored'] <- TRUE
message(paste('** Log2 intensities = NA were considered as censored missing values.'))
processout <- rbind(processout, c('** Log2 intensities = NA were considered as censored missing values.'))
write.table(processout, file=finalfile, row.names=FALSE)
}
}
### labeled : only consider light. Assume that missing in heavy is random.
if( label ){
work.tmp <- work[which(work$LABEL %in% 'L'), ]
### calculate outlier cutoff
## only consider intensity > 1
tmp <- work.tmp[!is.na(work.tmp$INTENSITY) & work.tmp$INTENSITY > 1, 'ABUNDANCE']
log2int.prime.quant <- quantile(tmp, prob=c(0.01, 0.25, 0.5, 0.75, maxQuantileforCensored), na.rm = TRUE)
iqr <- log2int.prime.quant[4] - log2int.prime.quant[2]
### need to decide the multiplier from high intensities
multiplier <- (log2int.prime.quant[5] - log2int.prime.quant[4])/iqr
cutoff.lower <- (log2int.prime.quant[2] - multiplier * iqr)
#work$censored <- FALSE
work[work$LABEL == 'L' &
!is.na(work$INTENSITY) &
work$ABUNDANCE < cutoff.lower, 'censored'] <- TRUE
message(paste('** Log2 endogenous intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 endogenous intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
## if censoredInt == '0, and cutoff is negative, still zero should becensored
if ( cutoff.lower <= 0 & !is.null(censoredInt) & censoredInt == "0" ) {
work[work$LABEL == 'L' &
!is.na(work$INTENSITY) & work$INTENSITY == 1, 'censored'] <- TRUE
work[work$LABEL == 'L' &
!is.na(work$ABUNDANCE) & work$ABUNDANCE <= 0, 'censored'] <- TRUE
message(paste('** Log2 endogenous intensities = 0 were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 endogenous intensities = 0 were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
}
## if censoredInt == NA, original NA also shoule be 'censored'
if (!is.null(censoredInt) & censoredInt == "NA") {
work[work$LABEL == 'L' &
is.na(work$INTENSITY), 'censored'] <- TRUE
message(paste('** Log2 endogenous intensities = NA were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 endogenous intensities = NA were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
}
}
} else { ## will MBimpute, but not apply algorithm for cutoff
if(censoredInt == '0'){
work[work$LABEL == 'L' & !is.na(work$INTENSITY) & work$INTENSITY == 1, 'censored'] <- TRUE
work[work$LABEL == 'L' & !is.na(work$ABUNDANCE) & work$ABUNDANCE <= 0, 'censored'] <- TRUE
}
if(censoredInt == 'NA'){
work[work$LABEL == 'L' & is.na(work$ABUNDANCE), 'censored'] <- TRUE
}
}
}
## ------------- ##
## featureSubset ##
## ------------- ##
## !! need to decide how to present : keep original all data and make new column to mark, or just present selected subset
if (featureSubset == "all") {
message("** Use all features that the dataset origianally has.")
processout <- rbind(processout, c("** Use all features that the dataset origianally has."))
write.table(processout, file=finalfile, row.names=FALSE)
}
if (featureSubset == "highQuality") {
### v3.15.2 (2019/04/28) : by Tsung-Heng
message("** Flag uninformative feature and outliers by feature selection algorithm.")
processout <- rbind(processout, c("** Flag uninformative feature and outliers by feature selection algorithm."))
write.table(processout, file=finalfile, row.names=FALSE)
work <- flag_noninf_data_nbftr(work)
# work <- flag_noninf_data(work)
#if(remove_uninformative_feature_outlier){
### for heavy outlier, always need to replace with NA
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'H', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'H', 'ABUNDANCE'] <- NA
### replace with censored missing
#if (!is.null(censoredInt) & censoredInt == "0") {
### [TEST] ###
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- 0
# work[work$is_outlier & work$LABEL == 'L', 'censored'] <- TRUE
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- 0
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- 0
### [TEST] ###
#} else { ## if censoredInt= NA or null, replace with NA
### [TEST] ###
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'censored'] <- TRUE
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
### [TEST] ###
#}
# message("** Filtered out noninformative feature and outliers.")
# processout <- rbind(processout, c("** Filtered out noninformative feature and outliers."))
# write.table(processout, file=finalfile, row.names=FALSE)
#}
### end : v3.15.2 (2019/04/28) : by Tsung-Heng
}
if (featureSubset == "top3") {
message("** Use top3 features that have highest average of log2(intensity) across runs.")
processout <- rbind(processout, c("** Use top3 features that have highest average of log2(intensity) across runs."))
write.table(processout, file=finalfile, row.names=FALSE)
## INTENSITY vs ABUNDANCE? [THT: make more sense to use ABUNDANCE]
## how to decide top3 for DIA?
work$remove <- FALSE
worktemp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE != 0, ]
## updated on 2019.08.09, due to big memory consumption for lapply and unlist
#temp1 <- aggregate(INTENSITY~PROTEIN+FEATURE, data=work, function(x) mean(x, na.rm=TRUE))
#temp2 <- split(temp1, temp1$PROTEIN)
#temp3 <- lapply(tmp2, function(x) {
# x <- x[order(x$INTENSITY, decreasing=TRUE), ]
# x <- x$FEATURE[1:3]
# })
#selectfeature <- unlist(temp3, use.names=FALSE)
temp1 <- worktemp %>% group_by(PROTEIN, FEATURE) %>%
summarize(mean = mean(INTENSITY, na.rm = TRUE)) %>%
group_by(PROTEIN) %>%
filter(row_number(desc(mean)) <= 3) ## updated on 2019.08.15, in order to get first row if there are ties.
#top_n(3)
selectfeature <- temp1$FEATURE
selectfeature <- selectfeature[!is.na(selectfeature)]
## end 2019.08.09
## get subset
work[-which(work$FEATURE %in% selectfeature), 'remove'] <- TRUE
}
if (featureSubset == "topN") {
## check whether there is the input for 'N'
message(paste0("** Use top", n_top_feature, " features that have highest average of log2(intensity) across runs."))
processout <- rbind(processout, c(paste0("** Use top", n_top_feature,
" features that have highest average of log2(intensity) across runs.")))
write.table(processout, file=finalfile, row.names=FALSE)
## INTENSITY vs ABUNDANCE? [THT: make more sense to use ABUNDANCE]
## how to decide top3 for DIA?
work$remove <- FALSE
worktemp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE != 0, ]
## updated on 2019.08.09, due to big memory consumption for lapply and unlist
#temp1 <- aggregate(INTENSITY ~ PROTEIN+FEATURE, data=worktemp, function(x) mean(x, na.rm=TRUE))
#temp2 <- split(temp1, temp1$PROTEIN)
#temp3 <- lapply(temp2, function(x) {
# x <- x[order(x$INTENSITY, decreasing=TRUE), ]
# x <- x$FEATURE[1:n_top_feature]
#})
#selectfeature <- unlist(temp3, use.names=FALSE)
temp1 <- worktemp %>% group_by(PROTEIN, FEATURE) %>%
summarize(mean = mean(INTENSITY, na.rm = TRUE)) %>%
group_by(PROTEIN) %>%
filter(row_number(desc(mean)) <= n_top_feature) ## updated on 2019.08.15, in order to get first row if there are ties.
#top_n(n_top_feature)
selectfeature <- temp1$FEATURE
selectfeature <- selectfeature[!is.na(selectfeature)]
## end 2019.08.09
## get subset
work[-which(work$FEATURE %in% selectfeature), 'remove'] <- TRUE
}
## check missingness
## transitions are completely missing in at least one of the condition : missingness ##
if (nlevels(work$LABEL) == 1) {
#Use the data frame before imputation to summarize the missingness
all.work <- work
test <- tapply(is.na(work[, "ABUNDANCE"]), work[, c("GROUP_ORIGINAL", "FEATURE")], function(x) sum(x, na.rm=TRUE))
numObs <- tapply(work[, "ABUNDANCE"], work[, c("GROUP_ORIGINAL", "FEATURE")], function(x) length(x))
test1 <- test == numObs
test2 <- apply(test1, 2, function(x) sum(x, na.rm=TRUE))
filterList <- names(test2)[test2 > 0]
final.decision <- ifelse(test2>0, 1, 0)
}
if (nlevels(work$LABEL) == 2) {
#Use the data frame before imputation to summarize the missingness
## first, remove NA
all.work <- work # with all NA observations
work.miss <- na.omit(work)
## draw table
light <- subset(work.miss, LABEL == "L")
heavy <- subset(work.miss, LABEL == "H")
## use FEATURE because the name of transition can be used in other peptide
count.light <- xtabs(~FEATURE+GROUP_ORIGINAL, light)
count.heavy <- xtabs(~FEATURE+GROUP_ORIGINAL, heavy)
count.light <- count.light==0
count.heavy <- count.heavy==0
count.light <- as.data.frame(count.light)
count.heavy <- as.data.frame(count.heavy)
## summary of missingness
decision <- count.light
decision[] <- 0
for (i in 1:ncol(decision)) {
for (j in 1:nrow(decision)) {
## either light or heavy has no obs -> subject to filter
if (count.light[j,i]==TRUE || count.heavy[j,i]==TRUE) {
decision[j,i] <- 1
}
}
}
final.decision <- apply(decision, 1, sum)
## assign "subject to filter" column
work <- data.frame(work, "SuggestToFilter"=0)
for(i in 1:length(final.decision)) {
## assign subject_to_filter=1 for entire transition
if (final.decision[i] != 0) {
work[work$FEATURE == names(final.decision[i]), "SuggestToFilter"] <- 1
}
}
}
## output : summary ##
## ---------------- ##
## output for label
processout <- rbind(processout, c(paste0(length(unique(work$LABEL)),
" level of Isotope type labeling in this experiment")))
write.table(processout, file=finalfile, row.names=FALSE)
temp <- data.frame("Summary of Features :")
colnames(temp) <- " "
rownames(temp) <- " "
print(temp)
summary.f <- matrix(NA,nrow=3)
summary.f[1] <- nlevels(work$PROTEIN)
temp <- unique(work[, c("PROTEIN", "PEPTIDE")])
temp1 <- xtabs(~PROTEIN, data=temp)
temp2 <- summary(as.numeric(temp1))
summary.f[2] <- paste(temp2["Min."], temp2["Max."], sep="-")
temp <- unique(work[, c("PEPTIDE", "FEATURE")])
temp1 <- xtabs(~PEPTIDE, data=temp)
temp2 <- summary(as.numeric(temp1))
summary.f[3] <- paste(temp2["Min."], temp2["Max."], sep="-")
colnames(summary.f) <- "count"
rownames(summary.f) <- c("# of Protein", "# of Peptides/Protein", "# of Transitions/Peptide")
print(as.data.frame(summary.f))
## output for process
processout <- rbind(processout, c("Summary of Features :"))
processout <- rbind(processout, c(paste(rownames(summary.f)[1]," : ", summary.f[1], sep="")))
processout <- rbind(processout, c(paste(rownames(summary.f)[2]," : ", summary.f[2], sep="")))
processout <- rbind(processout, c(paste(rownames(summary.f)[3]," : ", summary.f[3], sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## protein list with 1 feature
temp <- unique(work[, c("PROTEIN", "FEATURE")])
temp1 <- xtabs(~PROTEIN, data=temp)
temp2 <- as.data.frame(temp1[temp1 == 1])
if (nrow(temp2) > 0) {
if(nrow(temp2) > 1){
npro <- min(c(nrow(temp2), 10))
message("\n","** " , nrow(temp2),
" Proteins have only single transition : Consider excluding this protein from the dataset. (",
paste(temp2$PROTEIN[1:npro], collapse = ", "), " ...) \n")
} else {
message("\n","** " , nrow(temp2),
" Proteins have only single transition : Consider excluding this protein from the dataset. (",
rownames(temp2), ") \n")
}
}
temp <- data.frame("Summary of Samples :")
colnames(temp) <- " "
rownames(temp) <- " "
print(temp)
summary.s <- matrix(NA,ncol=nlevels(work$GROUP_ORIGINAL), nrow=3)
## # of MS runs
temp <- unique(work[, c("GROUP_ORIGINAL", "RUN")])
temp1 <- xtabs(~GROUP_ORIGINAL, data=temp)
summary.s[1,] <- temp1
## # of biological replicates
temp <- unique(work[, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL")])
temp1 <- xtabs(~GROUP_ORIGINAL, data=temp)
summary.s[2,] <- temp1
## # of technical replicates
c.tech <- round(summary.s[1,] / (summary.s[2,] * length(unique(work$FRACTION))))
##summary.s[3,] <- ifelse(c.tech==1,0,c.tech)
summary.s[3,] <- c.tech
colnames(summary.s) <- unique(work$GROUP_ORIGINAL)
rownames(summary.s) <- c("# of MS runs","# of Biological Replicates", "# of Technical Replicates")
print(summary.s)
message("\n Summary of Missingness :\n" )
message(" # transitions are completely missing in at least one of the conditions : ", sum(final.decision!=0), "\n")
if (sum(final.decision!=0)!=0) {
tmp.final <- final.decision[final.decision != 0]
if( length(tmp.final) > 5 ){
message(" -> ", paste(names(tmp.final[1:5]),collapse = ", "), " ...")
} else {
message(" -> ", paste(names(tmp.final),collapse = ", "), " ...")
}
rm(tmp.final)
}
without <- xtabs(~RUN, work)
withall <- xtabs(~RUN, all.work)
run.missing <- without / withall
message("\n # run with 75% missing observations: ", sum(run.missing<0.25), "\n")
if (sum(run.missing<0.25)!=0) {
message(" -> ", paste("RUN", names(without[run.missing<0.25]), sep=" "))
}
## output process
processout <- rbind(processout, c("Summary of Missingness :"))
processout <- rbind(processout, c(paste0(" # transitions are completely missing in at least one of the conditions : ",
sum(final.decision!=0))))
if (sum(final.decision!=0)!=0){
tmp.final <- final.decision[final.decision != 0]
if( length(tmp.final) > 5 ){
processout <- rbind(processout," -> ", paste(names(tmp.final[1:5]), collapse = ", "), " ...")
} else {
processout <- rbind(processout," -> ", paste(names(tmp.final), collapse = ", "), " ...")
}
rm(tmp.final)
}
processout <- rbind(processout, c(paste0(" # run with 75% missing observations: ", sum(run.missing < 0.25))))
if (sum(run.missing < 0.25) != 0) {
processout <- rbind(processout, " -> ", paste("RUN", names(without[run.missing < 0.25]), sep=" "))
}
write.table(processout, file=finalfile, row.names=FALSE)
## check any protein has only light for labeled-experiment
if (nlevels(work$LABEL) == 2) {
temp <- unique(work[, c("PROTEIN", "LABEL")])
temp1 <- xtabs(~PROTEIN, data=temp)
if (any(temp1 != 2)) {
## check that is L or H
namepro <- names(temp1[temp1!=2])
for(j in 1:length(namepro)) {
if (unique(work[work$PROTEIN == namepro[j], "LABEL"]) == "L") {
message("\n *** ", namepro[j],
" has only endogeneous intensities in label-based experiment. Please check this protein or remove it.")
}
if (unique(work[work$PROTEIN == namepro[j], "LABEL"]) == "H") {
message("\n *** ", namepro[j],
" has only reference intensities in label-based experiment. Please check this protein or remove it.")
}
}
}
}
processout <- rbind(processout, c("Processing data for analysis is done. - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
## get the summarization per subplot (per RUN)
## -------------------------------------------
message("\n == Start the summarization per subplot...")
rqresult <- try(.runQuantification(work, summaryMethod, equalFeatureVar,
cutoffCensored, censoredInt, remove50missing, MBimpute,
original_scale=FALSE, logsum=FALSE, featureSubset,
remove_uninformative_feature_outlier,
message.show=FALSE, clusters=clusters), silent=TRUE)
if (class(rqresult) == "try-error") {
message("*** error : can't summarize per subplot with ", summaryMethod, ".")
processout <- rbind(processout, c(paste0("error : can't summarize per subplot with ", summaryMethod, ".")))
write.table(processout, file=finalfile, row.names=FALSE)
rqall <- NULL
rqmodelqc <- NULL
workpred <- NULL
} else {
label <- nlevels(work$LABEL) == 2
if (sum(is.element(colnames(rqresult$rqdata), "RUN")) == 0) {
## logsum is summarization per subject
lab <- unique(work[, c("GROUP", "GROUP_ORIGINAL", "SUBJECT_ORIGINAL", "SUBJECT_NESTED", "SUBJECT")])
if (label) {
lab <- lab[lab$GROUP != 0, ]
}
rqall <- merge(rqresult$rqdata, lab, by="SUBJECT_ORIGINAL")
} else {
lab <- unique(work[, c("RUN", "originalRUN", "GROUP", "GROUP_ORIGINAL",
"SUBJECT_ORIGINAL", "SUBJECT_NESTED", "SUBJECT")])
if (label) {
lab <- lab[lab$GROUP != 0, ]
}
rqall <- merge(rqresult$rqdata, lab, by="RUN")
}
rqall$GROUP <- factor(rqall$GROUP)
rqall$Protein <- factor(rqall$Protein)
rqmodelqc <- rqresult$ModelQC
#MC : can't use this predicted value.
#workpred <- rqresult$PredictedBySurvival
workpred <- NULL
message("\n == the summarization per subplot is done.")
processout <- rbind(processout, c(paste0("the summarization per subplot is done.- okay : ", summaryMethod)))
write.table(processout, file=finalfile, row.names=FALSE)
}
## return work data.frame and run quantification
#Align the run quantification data
if (any(is.element(colnames(rqall), "RUN"))) {
rqall <- rqall[order(rqall$Protein, as.numeric(as.character(rqall$RUN))), ]
rownames(rqall) <- NULL
}
#Mike: Below is for in-house verification occasionally
#processedquant <- list(ProcessedData=work.NoImpute, RunlevelData=rqall, SummaryMethod=summaryMethod, ModelQC=rqmodelqc, PredictBySurvival=workpred, ImputedData=AbundanceAfterImpute)
processedquant <- list(ProcessedData=work,
RunlevelData=rqall,
SummaryMethod=summaryMethod,
ModelQC=rqmodelqc,
PredictBySurvival=workpred)
return(processedquant)
}
########################################################
# Manual function allowing foreach to return a list of multiple variables
resultsAsLists <- function(x, ...) {
lapply(seq_along(x),
function(i) c(x[[i]], lapply(list(...), function(y) y[[i]])))
}
########################################################
.runQuantification <- function(data, summaryMethod,
equalFeatureVar,
cutoffCensored, censoredInt,
remove50missing, MBimpute,
original_scale, logsum,
featureSubset,
remove_uninformative_feature_outlier,
message.show, clusters) {
##Since the imputation has been done before feature selection, delete the columns of censoring indicator to avoid imputing the same intensity again
#if(featureSubset == "highQuality") {
# data$cen <- NULL; data$pred <- NULL; data$INTENSITY <- 2^data$ABUNDANCE
#}
##If we want to impute again after the feature selection
#if(featureSubset == "highQuality" & ImputeAgain==TRUE) {
# data$ABUNDANCE <- data$ABUNDANCE.O
#}
data$LABEL <- factor(data$LABEL)
label <- nlevels(data$LABEL) == 2
# set ref which is distinguish reference and endogenous. any reference=0. endogenous is the same as RUN
if ( label ) {
data$ref <- 0
data$ref[data$LABEL != "H"] <- data$RUN[data$LABEL != "H"]
data$ref <- factor(data$ref)
}
## if there is 'remove' column (for topN or top3), remove TRUE
## v3.16.1 had error : no remove for remove column
## v3.16.2 fixes this but
if( any(is.element(colnames(data), 'remove')) ) {
data <- data[!data$remove, ]
}
### v3.15.2 (2019/04/29) by Meena
if( remove_uninformative_feature_outlier & any(is.element(colnames(data), 'feature_quality')) ) {
### v3.15.2 (2019/04/28) by Tsung-Heng
data[data$feature_quality == 'Uninformative', 'ABUNDANCE'] <- NA
data[data$is_outlier, 'ABUNDANCE'] <- NA
#data <- data[!(data$is_outlier | data$feature_quality == 'Noninformative'), ]
### end : v3.15.2 (2019/04/28) by Tsung-Heng
message("** Filtered out uninformative feature and outliers.")
}
### end : v3.15.2 (2019/04/29) by Meena
# for saving predicting value for impute option
predAbundance <- NULL
###################################
## method 1 : model based summarization
if (summaryMethod == "linear" & is.null(censoredInt)) {
data <- data[!is.na(data$ABUNDANCE),]
data$PROTEIN <- factor(data$PROTEIN)
data$RUN <- factor(data$RUN)
result <- NULL
dataafterfit <- NULL
for(i in 1: nlevels(data$PROTEIN)) {
sub <- data[data$PROTEIN==levels(data$PROTEIN)[i],]
sub$SUBJECT_NESTED <- factor(sub$SUBJECT_NESTED)
sub$FEATURE <- factor(sub$FEATURE)
sub$RUN <- factor(sub$RUN)
if (!label) {
temp <- data.frame(xtabs(~RUN, data=sub))
sub.result <- data.frame(Protein=rep(unique(sub$PROTEIN),
each=nlevels(sub$RUN)),
RUN=rep(c(levels(sub$RUN)),1),
LogIntensities=NA,
NumFeature=length(unique(sub$FEATURE)),
NumPeaks=temp$Freq)
} else {
sub$ref <- factor(sub$ref)
temp <- data.frame(xtabs(~ref, data=sub))
sub.result <- data.frame(Protein=rep(levels(data$PROTEIN)[i],each=nlevels(sub$ref)),RUN=rep(c(levels(sub$ref)[-1],"Ref"),1),LogIntensities=NA, NumFeature=length(unique(sub$FEATURE)),NumPeaks=c(temp[-1,"Freq"],temp[1,"Freq"]))
}
singleFeature <- .checkSingleFeature(sub)
singleSubject <- .checkSingleSubject(sub)
TechReplicate <- .checkTechReplicate(sub) ## use for label-free model
##### fit the model
#if (message.show) {
message(paste("Getting the summarization per subplot for protein ",unique(sub$PROTEIN), "(",i," of ",length(unique(data$PROTEIN)),")"))
#}
fit <- try(.fit.quantification.run(sub, singleFeature, singleSubject, TechReplicate, labeled=label, equalFeatureVar), silent=TRUE)
if (class(fit)=="try-error") {
message("*** error : can't fit the model for ", levels(data$PROTEIN)[i])
result <- rbind(result, sub.result)
if (nrow(sub)!=0) {
sub$residuals <- NA
sub$fitted <- NA
}
} else {
if (class(fit)=="lm") {
cf <- summary(fit)$coefficients
}else{
cf <- fixef(fit)
}
# calculate sample quantification for all levels of sample
a=1
for(j in 1:nlevels(sub$RUN)) {
contrast.matrix <- rep(0, nlevels(sub$RUN))
contrast.matrix[j] <- 1
contrast <- .make.contrast.run.quantification(fit,contrast.matrix,sub, labeled=label)
if (class(fit)=="lm") {
sub.result[a,3] <- .estimableFixedQuantification(cf,contrast)
} else {
sub.result[a,3] <- .estimableRandomQuantification(cf,contrast)
}
a=a+1
}
## for label-based case, need reference quantification
if (label) {
contrast <- .make.contrast.run.quantification.reference(fit,contrast.matrix,sub)
if (class(fit)=="lm") {
sub.result[a, 3] <- .estimableFixedQuantification(cf,contrast)
}else{
sub.result[a, 3] <- .estimableRandomQuantification(cf,contrast)
}
}
result <- rbind(result, sub.result)
if (class(fit)=="lm") { ### lm model
sub$residuals <- fit$residuals
sub$fitted <- fit$fitted.values
} else { ### lmer model
sub$residuals <- resid(fit)
sub$fitted <- fitted(fit)
}
dataafterfit <- rbind(dataafterfit,sub)
}
} ## end-loop for each protein
} ## for linear model summary
###################################
## Method 2 : Tukey Median Polish
if (summaryMethod == "TMP") {
#data <- data[!is.na(data$ABUNDANCE),]
data$PROTEIN <- factor(data$PROTEIN)
data$RUN <- factor(data$RUN)
result <- NULL
## if cluster available,
if(!is.null(clusters)){
## create cluster for paralleled workflow
message(paste0("Cluster Size: ", clusters,"\n"))
registerDoSNOW(makeCluster(clusters, type = "SOCK"))
# for(i in 1: nlevels(data$PROTEIN)) {
pb <- txtProgressBar(max = nlevels(data$PROTEIN), style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
MS_results <- foreach(i=1: nlevels(data$PROTEIN),
.combine='resultsAsLists',
.options.snow = opts,
.multicombine=TRUE,
.init=list(list(), list())) %dopar% {
sub <- data[data$PROTEIN==levels(data$PROTEIN)[i], ]
sub.pro.id <- levels(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization by Tukey's median polish per subplot for protein ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
sub$feature.label <- paste(sub$FEATURE, sub$LABEL, sep="_")
sub$run.label <- paste(sub$RUN, sub$LABEL, sep="_")
##### how to decide censored or not
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- 0
sub$cen <- ifelse(sub$censored, 0, 1)
}
### 2. all censored missing
if (censoredInt == "NA") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- NA
sub$cen <- ifelse(sub$censored, 0, 1)
}
}
}
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0)) ) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
# next()
return(NULL)
}
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
### 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
# next()
return(NULL)
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## remove features which have only 1 measurement.
namefeature1 <- names(countfeature)[countfeature == 1]
if (length(namefeature1) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature1), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because features have only one measurement across MS runs."))
# next()
return(NULL)
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## check one more time
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE ==0)) ) {
message(paste("After removing features which has only 1 measurement, Can't summarize for ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
# next()
return(NULL)
}
## remove run which has no measurement at all
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
### 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
count <- aggregate(ABUNDANCE ~ RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN))))) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (remove50missing) {
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
}
}
numFea <- xtabs(~RUN, subtemp) ## RUN or run.label?
numFea <- numFea/length(unique(subtemp$FEATURE))
numFea <- numFea <= 0.5
removerunid <- names(numFea)[numFea]
## if all measurements are NA,
if (length(removerunid)==length(numFea)) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all runs have more than 50% NAs and are removed with the option, remove50missing=TRUE."))
# next()
return(NULL)
}
}
### check whether we need to impute or not.
if (sum(sub$cen == 0) > 0) {
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0 &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j],cut.run$ABUNDANCE[k])
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0,]
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
cut.run <- aggregate(ABUNDANCE~run.label, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
if (MBimpute) {
if(!label){ ## label-free
if (nrow(sub[sub$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub[!is.na(sub$ABUNDANCE),]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
}else{
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
}else{
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub, dist='gaussian')
}
}
# get predicted value from survival
predicted <- predict(fittest, newdata=sub, type="response")
sub <- data.frame(sub, pred=ifelse(sub$censored & sub$LABEL == "L", predicted, NA))
# the replace censored value with predicted value
sub[sub$cen == 0, "ABUNDANCE"] <- sub[sub$cen == 0, "pred"]
# save predicted value
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub, type="response"))
}
} else { ## label-based
# only endogenous will be imputed
sub.h <- sub[sub$LABEL == 'H', ]
sub.l <- sub[sub$LABEL == 'L', ]
if (nrow(sub.l[sub.l$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub.l[!is.na(sub.l$ABUNDANCE),]
countdf <- nrow(subtemp)<(length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub.l$FEATURE))==1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
}else{
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
}else{
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub.l, dist='gaussian')
}
}
# get predicted value from survival
# sub.l <- data.frame(sub.l, pred=predict(fittest, newdata=sub.l, type="response"))
predicted <- predict(fittest, newdata=sub.l, type="response")
sub.l <- data.frame(sub.l, pred=ifelse(sub.l$censored & sub.l$LABEL == "L", predicted, NA))
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub.l, type="response"))
# the replace censored value with predicted value
sub.l[sub.l$cen == 0, "ABUNDANCE"] <- sub.l[sub.l$cen == 0, "pred"]
sub.h$pred <- NA
## for label-based, need to merge again
sub <- rbind(sub.h, sub.l)
}
}
}
}
## then, finally remove NA in abundance
sub <- sub[!is.na(sub$ABUNDANCE), ]
if (nlevels(sub$FEATURE) > 1) { ## for more than 1 features
if (!label) { ## label-free
data_w <- reshape2::dcast(RUN ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$RUN
data_w <- data_w[, -1]
data_w[data_w == 1] <- NA
if (!original_scale) {
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
## if fractionated sample, need to get per sample run
## ?? if there are technical replicates, how to match sample and MS run for different fractionation??
#if( length(unique(sub$METHOD)) > 1 ) {
# runinfo <- unique(sub[, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL", "RUN", "METHOD")])
# runinfo$uniquesub <- paste(runinfo$GROUP_ORIGINAL, runinfo$SUBJECT_ORIGINAL, sep="_")
#}
} else { # original_scale
data_w <- 2^(data_w)
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
tmpresult <- log2(tmpresult)
}
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[!is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
subtemp$RUN <- factor(subtemp$RUN, levels = rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
subtempimpute$RUN <- factor(subtempimpute$RUN, levels = rownames(data_w))
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = tmpresult,
RUN = names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtemp$RUN <- factor(subtemp$RUN, levels =rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
LogIntensities=tmpresult,
RUN=names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
# result <- rbind(result, sub.result)
} else { ## labeled
data_w = reshape2::dcast(run.label ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$run.label
data_w <- data_w[, -1]
#data_w[data_w==1] <- NA
meddata <- medpolish(data_w, na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
reformresult <- data.frame(tmpresult)
end <- nchar(rownames(reformresult))
reformresult$LABEL <- substr(rownames(reformresult), end, end)
reformresult$RUN <- substr(rownames(reformresult), 1, end-2)
colnames(reformresult)[1] <- "ABUNDANCE"
## now single feature, adjust reference feature difference
h <- reformresult[reformresult$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
reformresult.logical <- reformresult$RUN == unique(h$RUN)[k]
reformresult.idx <- which(reformresult.logical)
reformresult[reformresult.idx, "ABUNDANCE"] <- reformresult[reformresult.idx, "ABUNDANCE"]-reformresult[reformresult.logical & reformresult$LABEL=="H","ABUNDANCE"]+allmed
}
reformresult <- reformresult[reformresult$LABEL == "L", ]
subtemp <- reformresult[!is.na(reformresult$ABUNDANCE), ]
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
subtempimpute <- sub[sub$LABEL == "L" & is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[subtemp$LABEL == "L" & !is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF)
}
# result <- rbind(result, sub.result)
}
} else { ## single feature
if (label) { ## label-based
## single feature, adjust reference feature difference
h <- sub[sub$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
subrun.logical <- sub$RUN == unique(h$RUN)[k]
subrun.idx <- which(subrun.logical)
sub[subrun.idx, "ABUNDANCE"] <- sub[subrun.idx, "ABUNDANCE"] - sub[subrun.logical & sub$LABEL == "H", "ABUNDANCE"]+allmed
}
sub <- sub[sub$LABEL == "L", ]
}
## single feature, use original values
subtemp <- sub[!is.na(sub$ABUNDANCE),]
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtempcount <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtempcount <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtempcount <- subtempcount[!is.na(subtempcount$ABUNDANCE) & subtempcount$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtempcount <- subtemp
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
# result <- rbind(result, sub.result)
}
return(list(sub.result, predAbundance))
#return(list(sub.result))
} ## loop for proteins
close(pb)
#stopCluster(cl) # foreach autocloses
## Clean up the parallelized results
results.list <- list()
predAbundance.list <- list()
for(j in 1:length(MS_results[[1]])){
# deal with the "results" first
results.list[[j]] <- MS_results[[1]][[j]]
predAbundance.list[[j]] <- MS_results[[2]][[j]]
}
result <- do.call(rbind, results.list)
predAbundance <- do.call(c, predAbundance.list)
#predAbundance <- predAbundance[-which(duplicated(predAbundance))] # remove duplicates
dataafterfit <- NULL
} else {
##################
## no cluster
pb <- txtProgressBar(max = nlevels(data$PROTEIN), style = 3)
for(i in 1: nlevels(data$PROTEIN)) {
sub <- data[data$PROTEIN == levels(data$PROTEIN)[i], ]
sub.pro.id <- levels(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization by Tukey's median polish per subplot for protein ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
sub$feature.label <- paste(sub$FEATURE, sub$LABEL, sep="_")
sub$run.label <- paste(sub$RUN, sub$LABEL, sep="_")
### how to decide censored or not
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- 0
sub$cen <- ifelse(sub$censored, 0, 1)
}
## 2. all censored missing
if (censoredInt == "NA") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- NA
sub$cen <- ifelse(sub$censored, 0, 1)
}
}
}
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0)) ) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
## 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## remove features which have only 1 measurement.
namefeature1 <- names(countfeature)[countfeature == 1]
if (length(namefeature1) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature1), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because features have only one measurement across MS runs."))
next()
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## check one more time
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0)) ) {
message(paste("After removing features which has only 1 measurement, Can't summarize for ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove run which has no measurement at all
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
## 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
count <- aggregate(ABUNDANCE ~ RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN))))) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (remove50missing) {
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
}
}
numFea <- xtabs(~RUN, subtemp) ## RUN or run.label?
numFea <- numFea/length(unique(subtemp$FEATURE))
numFea <- numFea <= 0.5
removerunid <- names(numFea)[numFea]
## if all measurements are NA,
if (length(removerunid)==length(numFea)) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all runs have more than 50% NAs and are removed with the option, remove50missing=TRUE."))
next()
}
}
## check whether we need to impute or not.
if (sum(sub$cen == 0) > 0) {
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0 &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
# sub[is.na(sub$ABUNDANCE) &
# sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j],cut.run$ABUNDANCE[k])
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
cut.run <- aggregate(ABUNDANCE~run.label, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
if (MBimpute) {
if(!label){ ## label-free
if (nrow(sub[sub$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub[!is.na(sub$ABUNDANCE),]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub, dist='gaussian')
}
}
# get predicted value from survival
#sub <- data.frame(sub, pred=predict(fittest, newdata=sub, type="response"))
predicted <- predict(fittest, newdata=sub, type="response")
sub <- data.frame(sub, pred=ifelse(sub$censored & sub$LABEL == "L", predicted, NA))
# the replace censored value with predicted value
sub[sub$cen == 0, "ABUNDANCE"] <- sub[sub$cen == 0, "pred"]
# save predicted value
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub, type="response"))
}
} else { ## label-based
# only endogenous will be imputed
sub.h <- sub[sub$LABEL == 'H', ]
sub.l <- sub[sub$LABEL == 'L', ]
if (nrow(sub.l[sub.l$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub.l[!is.na(sub.l$ABUNDANCE),]
countdf <- nrow(subtemp)<(length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub.l$FEATURE))==1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub.l, dist='gaussian')
}
}
# get predicted value from survival
# sub.l <- data.frame(sub.l, pred=predict(fittest, newdata=sub.l, type="response"))
predicted <- predict(fittest, newdata=sub.l, type="response")
sub.l <- data.frame(sub.l, pred=ifelse(sub.l$censored & sub.l$LABEL == "L", predicted, NA))
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub.l, type="response"))
# the replace censored value with predicted value
sub.l[sub.l$cen == 0, "ABUNDANCE"] <- sub.l[sub.l$cen == 0, "pred"]
sub.h$pred <- NA
## for label-based, need to merge again
sub <- rbind(sub.h, sub.l)
}
}
}
}
## then, finally remove NA in abundance
sub <- sub[!is.na(sub$ABUNDANCE), ]
if (nlevels(sub$FEATURE) > 1) { ## for more than 1 features
if (!label) { ## label-free
data_w <- reshape2::dcast(RUN ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$RUN
data_w <- data_w[, -1]
data_w[data_w == 1] <- NA
if (!original_scale) {
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
## if fractionated sample, need to get per sample run
## ?? if there are technical replicates, how to match sample and MS run for different fractionation??
#if( length(unique(sub$METHOD)) > 1 ) {
# runinfo <- unique(sub[, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL", "RUN", "METHOD")])
# runinfo$uniquesub <- paste(runinfo$GROUP_ORIGINAL, runinfo$SUBJECT_ORIGINAL, sep="_")
#}
} else { # original_scale
data_w <- 2^(data_w)
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
tmpresult <- log2(tmpresult)
}
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[!is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
subtemp$RUN <- factor(subtemp$RUN, levels = rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
subtempimpute$RUN <- factor(subtempimpute$RUN, levels = rownames(data_w))
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = tmpresult,
RUN = names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtemp$RUN <- factor(subtemp$RUN, levels =rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
LogIntensities=tmpresult,
RUN=names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
result <- rbind(result, sub.result)
} else { ## labeled
data_w <- reshape2::dcast(run.label ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$run.label
data_w <- data_w[, -1]
#data_w[data_w==1] <- NA
meddata <- medpolish(data_w, na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
reformresult <- data.frame(tmpresult)
end <- nchar(rownames(reformresult))
reformresult$LABEL <- substr(rownames(reformresult), end, end)
reformresult$RUN <- substr(rownames(reformresult), 1, end-2)
colnames(reformresult)[1] <- "ABUNDANCE"
## now single feature, adjust reference feature difference
h <- reformresult[reformresult$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
reformresult.logical <- reformresult$RUN == unique(h$RUN)[k]
reformresult.idx <- which(reformresult.logical)
reformresult[reformresult.idx, "ABUNDANCE"] <- reformresult[reformresult.idx, "ABUNDANCE"]-reformresult[reformresult.logical & reformresult$LABEL=="H","ABUNDANCE"]+allmed
}
reformresult <- reformresult[reformresult$LABEL == "L", ]
subtemp <- reformresult[!is.na(reformresult$ABUNDANCE), ]
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
subtempimpute <- sub[sub$LABEL == "L" & is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[subtemp$LABEL == "L" & !is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF)
}
result <- rbind(result, sub.result)
}
} else { ## single feature
if (label) { ## label-based
## single feature, adjust reference feature difference
h <- sub[sub$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
subrun.logical <- sub$RUN == unique(h$RUN)[k]
subrun.idx <- which(subrun.logical)
sub[subrun.idx, "ABUNDANCE"] <- sub[subrun.idx, "ABUNDANCE"] - sub[subrun.logical & sub$LABEL == "H", "ABUNDANCE"]+allmed
}
sub <- sub[sub$LABEL == "L", ]
}
## single feature, use original values
subtemp <- sub[!is.na(sub$ABUNDANCE),]
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtempcount <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtempcount <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtempcount <- subtempcount[!is.na(subtempcount$ABUNDANCE) & subtempcount$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtempcount <- subtemp
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
result <- rbind(result, sub.result)
}
## progress
setTxtProgressBar(pb, i)
} ## loop for proteins
close(pb)
dataafterfit <- NULL
}
}
###################################
## Method 3 : log sum
## retired on Aug 2 2016
###################################
## method 4 : survival model for censored missing values
if (summaryMethod == "linear" & !is.null(censoredInt)) {
#data <- data[!is.na(data$ABUNDANCE),]
data$PROTEIN <- factor(data$PROTEIN)
data$RUN <- factor(data$RUN)
if (label) {
result <- NULL
for(i in 1:length(unique(data$PROTEIN))) {
sub <- data[data$PROTEIN==unique(data$PROTEIN)[i],]
sub.pro.id <- unique(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization for censored missing values per subplot for protein ",
sub.pro.id, "(", i, " of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
sub$feature.label <- paste(sub$FEATURE, sub$LABEL, sep="_")
sub$run.label <- paste(sub$RUN, sub$LABEL, sep="_")
## if all measurements are NA,
if (nrow(sub)==sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove run which has no measurement at all
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
count <- aggregate(ABUNDANCE~RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN))))) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (length(unique(sub$RUN)) == 1) {
message(paste("* Only 1 MS run in ", levels(data$PROTEIN)[i],
" has measurement. Can't summarize with censored intensities."))
next()
}
## remove features which are completely NAs or zero
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
sub$FEATURE <- factor(sub$FEATURE)
}
##### how to decide censored or not
## 1. censored
if (censoredInt == "0") {
sub$cen <- ifelse(!is.na(sub$INTENSITY) & sub$INTENSITY == 0, 0, 1)
}
### 2. all censored missing
if (censoredInt == "NA") {
sub$cen <- ifelse(is.na(sub$INTENSITY), 0, 1)
}
##### cutoffCensored
## 1. put minimum in protein level to NA
#if (cutoffCensored=="minEachProtein") {
# if (censoredInt=="NA") {
# cut <- min(sub$ABUNDANCE, na.rm=TRUE)
# sub[is.na(sub$INTENSITY),"ABUNDANCE"] <- cut
# }
# if (censoredInt=="0") {
# cut <- min(sub[!is.na(sub$INTENSITY) & sub$INTENSITY!=0,"ABUNDANCE"])
# sub[!is.na(sub$INTENSITY) & sub$INTENSITY==0,"ABUNDANCE"] <- cut
# }
#}
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$feature.label))) {
sub[is.na(sub$INTENSITY) & sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$feature.label))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[is.na(sub$INTENSITY) & sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$feature.label)) > 1) {
for(j in 1:length(unique(sub$feature.label))) {
for(k in 1:length(unique(sub$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j],cut.run$ABUNDANCE[k])
sub[is.na(sub$INTENSITY) & sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
cut.run <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$feature.label)) > 1) {
for(j in 1:length(unique(sub$feature.label))) {
for(k in 1:length(unique(sub$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
## when number of measurement is less than df, error for fitting
subtemp <- sub[!is.na(sub$ABUNDANCE), ]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
# with single feature, not converge, wrong intercept
# need to check
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN+ref,
data=sub, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN+ref,
data=sub, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN+ref,
data=sub, dist='gaussian')
}
}
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
RUN=rep(c(levels(sub$RUN)), 1),
LogIntensities=NA)
# get the parameters
cf <- summary(fittest)$coefficients
# calculate sample quantification for all levels of sample
a <- 1
for(j in 1:nlevels(sub$RUN)) {
contrast.matrix <- rep(0, nlevels(sub$RUN))
contrast.matrix[j] <- 1
contrast <- .make.contrast.run.quantification.Survival(fittest, contrast.matrix,sub, labeled=TRUE)
sub.result[a, 3] <- .estimableFixedQuantificationSurvival(cf, contrast)
a <- a+1
}
result <- rbind(result, sub.result)
}
datamat <- reshape2::dcast( Protein ~ RUN, data=result, value.var='LogIntensities', keep=TRUE)
datamat <- melt(datamat, id.vars=c('Protein'))
colnames(datamat) <- c('Protein', 'RUN', 'LogIntensities')
result <- datamat
} else {
result <- NULL
for(i in 1:length(unique(data$PROTEIN))) {
sub <- data[data$PROTEIN == unique(data$PROTEIN)[i], ]
sub.pro.id <- unique(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization for censored missing values per subplot for protein ",
sub.pro.id, "(", i, " of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove run which has no measurement at all
subtemp <- sub[!is.na(sub$INTENSITY), ]
count <- aggregate(ABUNDANCE~RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN)))) != 0) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (length(unique(sub$RUN)) == 1) {
message(paste("* Only 1 MS run in ", levels(data$PROTEIN)[i],
" has measurement. Can't summarize with censored intensities."))
next()
}
## remove features which are (completely NAs or zero)
subtemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
sub$FEATURE <- factor(sub$FEATURE)
}
if (nrow(sub) == 0) {
message(paste("* All measurements are NAs or only one measurement per feature in ",
levels(data$PROTEIN)[i], ". Can't summarize with censored intensities."))
next()
}
##### how to decide censored or not
## 1. censored
if (censoredInt == "0") {
sub$cen <- ifelse(!is.na(sub$INTENSITY) & sub$INTENSITY == 0, 0, 1)
}
### 2. all censored missing
if (censoredInt == "NA") {
sub$cen <- ifelse(is.na(sub$INTENSITY), 0, 1)
}
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ FEATURE, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$FEATURE))) {
sub[is.na(sub$INTENSITY) & sub$FEATURE == cut$FEATURE[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE ~ FEATURE, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$FEATURE))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$FEATURE == cut$FEATURE[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE~RUN, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$RUN))) {
sub[is.na(sub$INTENSITY) & sub$RUN == cut$RUN[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE~RUN, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$RUN))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 & sub$RUN==cut$RUN[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ FEATURE, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ RUN, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$FEATURE)) > 1) {
for(j in 1:length(unique(sub$FEATURE))) {
for(k in 1:length(unique(sub$RUN))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[is.na(sub$INTENSITY) & sub$FEATURE == cut.fea$FEATURE[j] &
sub$RUN == cut.run$RUN[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut.fea <- aggregate(ABUNDANCE ~ FEATURE, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
cut.run <- aggregate(ABUNDANCE ~ RUN, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$FEATURE)) > 1) {
for(j in 1:length(unique(sub$FEATURE))) {
for(k in 1:length(unique(sub$RUN))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$FEATURE == cut.fea$FEATURE[j] & sub$RUN == cut.run$RUN[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
## when number of measurement is less than df, error for fitting
subtemp <- sub[!is.na(sub$ABUNDANCE), ]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub, dist='gaussian')
}
}
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
RUN=rep(c(levels(sub$RUN)), 1),
LogIntensities=NA)
# get the parameters
cf <- summary(fittest)$coefficients
# calculate sample quantification for all levels of sample
a <- 1
for(j in 1:nlevels(sub$RUN)) {
contrast.matrix <- rep(0, nlevels(sub$RUN))
contrast.matrix[j] <- 1
contrast <- .make.contrast.run.quantification.Survival(fittest, contrast.matrix,sub, labeled=FALSE)
sub.result[a, 3] <- .estimableFixedQuantificationSurvival(cf, contrast)
a <- a+1
}
result <- rbind(result, sub.result)
}
datamat <- reshape2::dcast( Protein ~ RUN, data=result, value.var='LogIntensities', keep=TRUE)
datamat <- melt(datamat, id.vars=c('Protein'))
colnames(datamat) <- c('Protein','RUN','LogIntensities')
result <- datamat
}
dataafterfit <- NULL
}
###################################
## final result
finalout <- list(rqdata=result, ModelQC=dataafterfit, PredictedBySurvival=predAbundance)
return(finalout)
}
##########################################################################################
## updated v3
.fit.quantification.run <- function(sub, singleFeature, singleSubject, TechReplicate, labeled, equalFeatureVar) {
if (!labeled) { ### label-free case
## for single Feature, original value is the run quantification
if (singleFeature) {
fit.full <- lm(ABUNDANCE ~ RUN , data = sub)
}else{
fit.full <- lm(ABUNDANCE ~ FEATURE + RUN , data = sub)
}
}else{ ### labeled-based case
### update v3
if (singleFeature) {
fit.full <- lm(ABUNDANCE ~ RUN+ref , data = sub)
}else{ ## several subjects
fit.full <- lm(ABUNDANCE ~ FEATURE+RUN+ref , data = sub)
}
}
## make equal variance for feature : need to be updated
if (!equalFeatureVar) {
fit.full <- .iter.wls.fit.model(data=sub, fit=fit.full, nrepeats=1)
}
return(fit.full)
}
#############################################
# check whether there are multiple runs for a replicate
# if yes, normalization should be different way.
#############################################
.countMultiRun <- function(data) {
## if some feature are missing for this spedific run, it could be error. that is why we need balanced design.
## with balanced design (fill in NA in each row), it should have different unique number of measurments per fractionation
## change it
## 2017.05 24 : however, after going through converter functions,
## with balanced design, impossible to detect fractionation
is.risky <- FALSE
## if there is fraction info and multiple value for fraction column, we don't need to count here.
if( any(is.element(colnames(data), 'FRACTION')) ) {
## already fraction info are available. there are multiple runs.
out <- TRUE
} else {
## there is no fraction information. First chec, whether there are tech replicates or not.
info <- unique(data[, c('GROUP_ORIGINAL', 'SUBJECT_ORIGINAL', 'RUN')])
info$condition <- paste(info$GROUP_ORIGINAL, info$SUBJECT_ORIGINAL, sep="_")
count.tech <- xtabs(~ condition, info)
is.multiplerun <- any(count.tech > 1)
if ( !is.multiplerun ){
## only one run for condition*bioreplicate -> no tech replicate at all, no multiple run.
out <- FALSE
} else {
## need to distinguish whether technical replicate or multiple runs.
## For one specific sample, most of features are measured across runs -> tech replicate
## if not, multiple runs.
tmp <- data[!is.na(data$ABUNDANCE), ]
## get on sample information
info.sample1 <- info[info$condition == unique(info$condition)[1], ]
tmp.sample1 <- tmp[tmp$GROUP_ORIGINAL == unique(info.sample1$GROUP_ORIGINAL) &
tmp$SUBJECT_ORIGINAL == unique(info.sample1$SUBJECT_ORIGINAL), ]
standardFeature <- unique(tmp.sample1[tmp.sample1$RUN == unique(tmp.sample1$RUN[1]),
"FEATURE"])
tmp.sample1$RUN <- factor(tmp.sample1$RUN)
## get overlapped feature ID
countdiff <- tapply (tmp.sample1$FEATURE,
tmp.sample1$RUN,
function ( x ) length(intersect(unique(x), standardFeature)) )
per.overlap.feature <- (countdiff)[-1] / max(unique(countdiff))
## first, technical replicate, no fraction :
## all runs should have more than 50% features should be the same.
if ( all( per.overlap.feature > 0.5 ) ){ ## then there are technical replicates
out <- FALSE
} else if ( all( per.overlap.feature < 0.5 ) ) {
out <- TRUE
} else {
## hard to distinguish fractionation automatically. need information
## if both fractionation and technical replicates are there, can't distinguish.
## need fractionation info. + even technical replicate information is needed.
out <- FALSE
is.risky <- TRUE
}
}
}
result <- list(out = out,
is.risky = is.risky)
return(result)
}
| /R/DataProcess.R | no_license | bpolacco/MSstats | R | false | false | 234,312 | r |
#############################################
## dataProcess
#############################################
#' @export dataProcess
#' @import survival
#' @import preprocessCore
#' @import statmod
#' @importFrom reshape2 dcast melt
#' @importFrom stats medpolish aggregate t.test lm summary.lm fitted resid p.adjust
#' @importFrom stats C approx coef cor dist formula loess median na.omit
#' @importFrom stats predict pt qnorm qt quantile reshape rnorm runif sd var vcov xtabs
#' @importFrom utils head read.table sessionInfo setTxtProgressBar txtProgressBar write.csv write.table
#' @importFrom methods validObject
#' @importFrom doSNOW registerDoSNOW
#' @importFrom snow makeCluster
#' @importFrom foreach foreach %dopar%
#' @importFrom dplyr filter n
#' @importFrom tidyr gather
dataProcess <- function(raw,
logTrans=2,
normalization="equalizeMedians",
nameStandards=NULL,
address="",
fillIncompleteRows=TRUE,
featureSubset="all",
remove_uninformative_feature_outlier=FALSE,
n_top_feature=3,
summaryMethod="TMP",
equalFeatureVar=TRUE,
censoredInt="NA",
cutoffCensored="minFeature",
MBimpute=TRUE,
remove50missing=FALSE,
maxQuantileforCensored=0.999,
clusters=NULL) {
## save process output in each step
allfiles <- list.files()
num <- 0
filenaming <- "msstats"
finalfile <- "msstats.log"
while(is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".log")
}
session <- sessionInfo()
sink("sessionInfo.txt")
print(session)
sink()
processout <- as.matrix(read.table("sessionInfo.txt", header=TRUE, sep="\t"))
write.table(processout, file=finalfile, row.names=FALSE)
processout <- rbind(processout, as.matrix(c(" "," ","MSstats - dataProcess function"," "), ncol=1))
## make case-insensitive for function options
## ------------------------------------------
normalization <- toupper(normalization)
## Check correct option or input
## check right column in input
requiredinput <- c("ProteinName", "PeptideSequence", "PrecursorCharge",
"FragmentIon", "ProductCharge", "IsotopeLabelType",
"Condition", "BioReplicate", "Run", "Intensity")
## [THT: disambiguation for PeptideSequence & PeptideModifiedSequence - begin]
## PeptideModifiedSequence is also allowed.
requiredInputUpper <- toupper(requiredinput)
providedInputUpper <- toupper(colnames(raw))
if (all(requiredInputUpper %in% providedInputUpper)) {
processout <- rbind(processout, c("The required input : provided - okay"))
write.table(processout, file = finalfile, row.names = FALSE)
} else if (all(setdiff(requiredInputUpper, "PEPTIDESEQUENCE") %in% providedInputUpper) && "PEPTIDEMODIFIEDSEQUENCE" %in% providedInputUpper) {
processout <- rbind(processout, c("The required input : provided - okay"))
write.table(processout, file = finalfile, row.names = FALSE)
# if PeptideModifiedSequence is provided instead of PeptideSequence,
# change the column name as PeptideSequence
colnames(raw)[which(providedInputUpper == "PEPTIDEMODIFIEDSEQUENCE")] <- "PeptideSequence"
} else {
missedInput <- which(!(requiredInputUpper %in% providedInputUpper))
processout <- rbind(processout, c(paste("ERROR : The required input : ",
paste(requiredinput[missedInput], collapse = ", "),
" are not provided in input - stop")))
write.table(processout, file = finalfile, row.names = FALSE)
stop("Please check the required input. The required input needs (ProteinName, PeptideSequence (or PeptideModifiedSequence), PrecursorCharge, FragmentIon, ProductCharge, IsotopeLabelType, Condition, BioReplicate, Run, Intensity)")
}
## [THT: disambiguation for PeptideSequence & PeptideModifiedSequence - end]
## check logTrans is 2,10 or not
if (logTrans!=2 & logTrans!=10) {
processout <- rbind(processout,c("ERROR : Logarithm transformation : log2 or log10 only - stop"))
write.table(processout, file=finalfile,row.names=FALSE)
stop("Only log2 or log10 are posssible.\n")
}
## check no row for some feature : balanced structure or not
if (!(fillIncompleteRows==TRUE | fillIncompleteRows==FALSE) | !is.logical(fillIncompleteRows)) {
processout <- rbind(processout, c(paste("The required input - fillIncompleteRows : 'fillIncompleteRows' value is wrong. It should be either TRUE or FALSE. - stop")))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'fillIncompleteRows' must be one of TRUE or FALSE as a logical value.")
}
## check input for summaryMethod
if (sum(summaryMethod == c("linear", "TMP")) == 0) {
processout <- rbind(processout,c("The required input - summaryMethod : 'summaryMethod' value is wrong. It should be one of 'TMP' or 'linear'. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'summaryMethod' value is wrong. It should be one of 'TMP' or 'linear'.")
} else {
processout <- rbind(processout, c(paste("summaryMethod : ", as.character(summaryMethod), sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## check input for cutoffCensored
if (sum(cutoffCensored==c("minFeature","minRun","minFeatureNRun"))==0) {
processout <- rbind(processout,c("The required input - cutoffCensored : 'cutoffCensored' value is wrong. It should be one of 'minFeature','minRun','minFeatureNRun'. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'cutoffCensored' value is wrong. It should be one of 'minFeature','minRun','minFeatureNRun'.")
} else {
processout <- rbind(processout,c(paste("cutoffCensored : ",as.character(cutoffCensored), sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## check input for censoredInt
if (sum(censoredInt == c("0", "NA")) == 0 & !is.null(censoredInt)) {
processout <- rbind(processout,c("The required input - censoredInt : 'censoredInt' value is wrong.
It should be one of '0','NA', NULL. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'censoredInt' value is wrong. It should be one of '0','NA',NULL.")
} else {
processout <- rbind(processout, c(paste("censoredInt : ", as.character(censoredInt), sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## check input for censoredInt and MBimpute
if ( summaryMethod == 'TMP' & MBimpute & is.null(censoredInt) ) {
processout <- rbind(processout, c("The rcombination of equired input -
censoredInt and MBimpute : 'censoredInt=NULL' has no censored missing values.
Imputation will not be performed.- stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'censoredInt=NULL' means that dataset has no censored missing value and MSstats will not impute.
But, 'MBimpute=TRUE' is selected. Please replace by 'MBimpute=FALSE' or censoredInt='NA' or '0'")
}
## [THT: if (!all(normalization %in% c("NONE", "FALSE", "EQUALIZEMEDIANS", "QUANTILE", "GLOBALSTANDARDS")))]
## [THT: send a warning message if the user mixes "NONE" with any of the last three choices]
if (!(normalization=="NONE" | normalization=="FALSE" |
normalization=="EQUALIZEMEDIANS" | normalization=="QUANTILE" |
normalization=="GLOBALSTANDARDS")) {
processout <- rbind(processout,c(paste("The required input - normalization : 'normalization' value is wrong. - stop")))
write.table(processout, file=finalfile, row.names=FALSE)
stop("'normalization' must be one of \"None\", \"FALSE\", \"equalizeMedians\",
\"quantile\", or \"globalStandards\". Please assign 'normalization' again.")
}
## need the names of global standards
if (!is.element("NONE",normalization) &
!is.element("FALSE",normalization) &
is.element("GLOBALSTANDARDS",normalization) &
is.null(nameStandards)) {
processout <- rbind(processout, c("ERROR : For normalization with global standards,
the names of global standards are needed. Please add 'nameStandards' input."))
write.table(processout, file=finalfile,row.names=FALSE)
stop ("For normalization with global standards, the names of global standards are needed.
Please add 'nameStandards' input." )
}
## check whether class of intensity is factor or chaterer, if yes, neec to chage as numeric
if (is.factor(raw$Intensity) | is.character(raw$Intensity)) {
suppressWarnings(raw$Intensity <- as.numeric(as.character(raw$Intensity)))
}
## check whether the intensity has 0 value or negative value
# if (length(which(raw$Intensity<=0))>0 & !skylineReport) {
# if (is.null(censoredInt)) {
# processout <- rbind(processout,c("ERROR : There are some intensities which are zero or negative values. need to change them. - stop"))
# write.table(processout, file=finalfile,row.names=FALSE)
# stop("Intensity has 0 or negative values. Please check these intensities and change them. \n")
# } else if (censoredInt=="NA") {
# processout <- rbind(processout,c("ERROR : There are some intensities which are zero or negative values. need to change them. - stop"))
# write.table(processout, file=finalfile,row.names=FALSE)
# stop("Intensity has 0 or negative values. Please check these intensities and change them. \n")
# }
#}
## here, need to get standard protein name
## column name : standardtype..
## what value it has, normzalition, unique(proteinname)
## if normalition== "standard" & no normalizaion selection, error message
## annotation information :
if ( any(is.na(raw$Run)) ) {
processout <- rbind(processout, c("ERROR : There is missing information in 'Run' column. Please check 'Run' column."))
write.table(processout, file=finalfile, row.names=FALSE)
stop ("There is missing information in 'Run' column. Please check 'Run' column." )
}
if ( any(is.na(raw$BioReplicate)) ) {
processout <- rbind(processout, c("ERROR : There is missing information in 'BioReplicate' column.
Please check 'BioReplicate' column."))
write.table(processout, file=finalfile, row.names=FALSE)
stop ("There is missing information in 'BioReplicate' column. Please check 'BioReplicate' column." )
}
if ( any(is.na(raw$Condition)) ) {
processout <- rbind(processout, c("ERROR : There is missing information in 'Condition' column.
Please check 'Condition' column."))
write.table(processout, file=finalfile, row.names=FALSE)
stop ("There is missing information in 'Condition' column. Please check 'Condition' column." )
}
## make letters case-insensitive
colnames(raw) <- toupper(colnames(raw))
if( any(is.element(colnames(raw), 'FRACTION')) ) {
fraction <- 'FRACTION'
} else {
fraction <- NULL
}
if( any(is.element(colnames(raw), 'TECHREPLICATE')) ) {
tech.rep <- 'TECHREPLICATE'
} else {
tech.rep <- NULL
}
require.col <- c("PROTEINNAME", "PEPTIDESEQUENCE", "PRECURSORCHARGE",
"FRAGMENTION", "PRODUCTCHARGE", "ISOTOPELABELTYPE",
"CONDITION", "BIOREPLICATE", "RUN", "INTENSITY", fraction, tech.rep)
raw.temp <- raw[, require.col]
## before remove, get PeptideSequence and combination of PeptideSequence and precursorcharge for global standard normalization
tempPeptide <- unique(raw[, c("PEPTIDESEQUENCE", "PRECURSORCHARGE")])
tempPeptide$PEPTIDE <- paste(tempPeptide$PEPTIDESEQUENCE, tempPeptide$PRECURSORCHARGE, sep="_")
rm(raw)
## assign peptide, transition
raw.temp <- data.frame(raw.temp,
PEPTIDE=paste(raw.temp$PEPTIDESEQUENCE, raw.temp$PRECURSORCHARGE, sep="_"),
TRANSITION=paste(raw.temp$FRAGMENTION, raw.temp$PRODUCTCHARGE, sep="_"))
if (length(unique(raw.temp$ISOTOPELABELTYPE)) > 2) {
processout <- rbind(processout, c("ERROR : There are more than two levels of labeling.
So far, only label-free or reference-labeled experiment are supported. - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("Statistical tools in MSstats are only proper for label-free or with reference peptide experiments.")
}
## change light, heavy -> L,H
## [THT: should check if users really provide light/heavy, L/H, l/h, or something else ]
## [THT: should also check if users provide only H (instead of L)]
raw.temp$ISOTOPELABELTYPE <- factor(raw.temp$ISOTOPELABELTYPE)
if (nlevels(raw.temp$ISOTOPELABELTYPE) == 2) {
levels(raw.temp$ISOTOPELABELTYPE) <- c("H", "L")
}
if (nlevels(raw.temp$ISOTOPELABELTYPE) == 1) {
levels(raw.temp$ISOTOPELABELTYPE) <- c("L")
}
if( any(is.element(colnames(raw.temp), 'FRACTION')) ) {
fraction <- 'FRACTION'
} else {
fraction <- NULL
}
if( any(is.element(colnames(raw.temp), 'TECHREPLICATE')) ) {
tech.rep <- 'TECHREPLICATE'
} else {
tech.rep <- NULL
}
require.col <- c("PROTEINNAME", "PEPTIDE", "TRANSITION", "ISOTOPELABELTYPE",
"CONDITION", "BIOREPLICATE", "RUN", "INTENSITY",
fraction, tech.rep)
raw.temp <- raw.temp[, require.col]
if( ncol(raw.temp) == 10 &
any(is.element(colnames(raw.temp), 'FRACTION')) &
any(is.element(colnames(raw.temp), 'TECHREPLICATE'))) {
colnames(raw.temp) <- c("Protein", "Peptide", "Transition", "Label",
"Condition", "Sample", "Run", "Intensity", 'Fraction', 'TechReplicate')
} else if( ncol(raw.temp) == 9 &
any(is.element(colnames(raw.temp), 'FRACTION')) ) {
colnames(raw.temp) <- c("Protein", "Peptide", "Transition", "Label",
"Condition", "Sample", "Run", "Intensity", 'Fraction')
} else {
colnames(raw.temp) <- c("Protein", "Peptide", "Transition", "Label",
"Condition", "Sample", "Run", "Intensity")
}
## create work data for quant analysis
## -----------------------------------
raw.temp <- raw.temp[!is.na(raw.temp$Protein), ]
raw.temp <- raw.temp[raw.temp$Protein != '', ]
work <- data.frame(PROTEIN=raw.temp$Protein,
PEPTIDE=raw.temp$Peptide,
TRANSITION=raw.temp$Transition,
FEATURE=paste(raw.temp$Peptide, raw.temp$Transition, sep="_"),
LABEL=raw.temp$Label,
GROUP_ORIGINAL=raw.temp$Condition,
SUBJECT_ORIGINAL=raw.temp$Sample,
RUN=raw.temp$Run,
GROUP=0,
SUBJECT=0,
INTENSITY=raw.temp$Intensity)
work$GROUP_ORIGINAL <- factor(work$GROUP_ORIGINAL)
work$SUBJECT_ORIGINAL <- factor(work$SUBJECT_ORIGINAL, levels=unique(work$SUBJECT_ORIGINAL))
work$LABEL <- factor(work$LABEL, levels=levels(work$LABEL))
work[work$LABEL=="L", "GROUP"] <- work[work$LABEL=="L", "GROUP_ORIGINAL"]
work[work$LABEL=="L", "SUBJECT"] <- work[work$LABEL=="L", "SUBJECT_ORIGINAL"]
work <- data.frame(work, SUBJECT_NESTED=paste(work$GROUP, work$SUBJECT, sep="."))
if( any(is.element(colnames(raw.temp), 'Fraction')) ) {
work <- data.frame(work, FRACTION = raw.temp$Fraction)
}
if( any(is.element(colnames(raw.temp), 'TechReplicate')) ) {
work <- data.frame(work, TECHREPLICATE = raw.temp$TechReplicate)
}
processout <- rbind(processout, c("New input format : made new columns for analysis - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
## 2016. 08.29 : replace <1 with zero for log2(intensity)
if ( length(which(!is.na(work$INTENSITY) & work$INTENSITY < 1)) > 0 ) {
processout <- rbind(processout, c(paste0("** There are ",
length(which(!is.na(work$INTENSITY) & work$INTENSITY < 1)),
" intensities which are zero. These intensities are replaced with 1.")))
write.table(processout, file=finalfile, row.names=FALSE)
message(paste0("** There are ", length(which(!is.na(work$INTENSITY) & work$INTENSITY < 1)),
" intensities which are zero or less than 1. These intensities are replaced with 1."))
work[!is.na(work$INTENSITY) & work$INTENSITY < 1, 'INTENSITY'] <- 1
}
## log transformation
work$ABUNDANCE <- work$INTENSITY
## now, INTENSITY keeps original values.
## NA means no observation. assume that spectral tools are not report if no observation. zero means detected but zero.
## considered intenseity <1 -> intensity = 1
## work[!is.na(work$ABUNDANCE) & work$ABUNDANCE==0,"ABUNDANCE"] <- 1
## based on logTrans option, assign log transformation
## remove log2 or log10 intensity
### [THT: add one more conidtion to have the program complain if a user
### provide unexpected value for logTrans]
if (logTrans == 2) {
work$ABUNDANCE <- log2(work$ABUNDANCE)
} else if (logTrans == 10) {
work$ABUNDANCE <- log10(work$ABUNDANCE)
}
processout <- rbind(processout,
c(paste0("Logarithm transformation: log", logTrans,
" transformation is done - okay")))
write.table(processout, file=finalfile, row.names=FALSE)
## Check multi-method or not : multiple run for a replicate
work$RUN <- factor(work$RUN)
checkMultirun <- .countMultiRun(work)
if ( checkMultirun$is.risky ){
## if can't matching fractionation, make warning and stop it.
stop('** MSstats suspects that there are fractionations and potentially technical replicates too. Please add Fraction column in the input.')
} else if ( checkMultirun$out ) {
if ( any(is.element(colnames(work), 'FRACTION')) ){
processout <- rbind(processout,
c(paste("Multiple fractionations are existed : ",
length(unique(work$FRACTION)),
"fractionations per MS replicate.")))
write.table(processout, file=finalfile, row.names=FALSE)
} else {
## need to make new column 'Fraction'
## each sample has no technical replicate, all runs are fractionated MS runs.
work$FRACTION <- NA
info <- unique(work[, c('GROUP_ORIGINAL', 'SUBJECT_ORIGINAL', 'RUN')])
info$condition <- paste(info$GROUP_ORIGINAL, info$SUBJECT_ORIGINAL, sep="_")
tmp <- work[!is.na(work$ABUNDANCE), ]
## get on one sample first
info.sample1 <- info[info$condition == unique(info$condition)[1], ]
## assign fraction info first
info.sample1$FRACTION <- seq(1, nrow(info.sample1))
for(k in 1:length(unique(info.sample1$RUN))){
## then fine the same fraction for next sample
unique.feature <- unique( tmp[tmp$RUN %in% info.sample1$RUN[k], 'FEATURE'] )
tmptmp <- tmp[which(tmp$FEATURE %in% unique.feature), ]
tmptmp$condition <- paste(tmptmp$GROUP_ORIGINAL, tmptmp$SUBJECT_ORIGINAL, sep="_")
count.feature <- reshape2::dcast(RUN ~ GROUP_ORIGINAL + SUBJECT_ORIGINAL,
data=tmptmp, fun.aggregate=length, value.var='ABUNDANCE')
## !! get one run which has maximum overlapped feature by each sample
same.frac <- apply(count.feature[,-which(colnames(count.feature) %in% c('RUN'))], 2,
function(x) count.feature[which.max(x), 'RUN'])
work[ which(work$RUN %in% same.frac), 'FRACTION'] <- info.sample1[ which(info.sample1$RUN %in% info.sample1$RUN[k]), 'FRACTION']
}
rm(tmp)
## final check up
checkup <- sum( is.na(unique(work$FRACTION)) ) > 0
if ( !checkup ){
processout <- rbind(processout, c(paste("Multiple fractions are existed : ",
length(unique(work$FRACTION)), "fractions per MS replicate.")))
write.table(processout, file=finalfile, row.names=FALSE)
} else {
processout <- rbind(processout, c('** It is hard to find the same fractionation across sample, due to lots of overlapped features between fractionations.
Please add Fraction column in input.'))
write.table(processout, file=finalfile, row.names=FALSE)
stop("** It is hard to find the same fractionation across sample, due to lots of overlapped features between fractionations.
Please add Fraction column in input.")
}
}
################################################
## need additional step that remove overlapped features across several fraction
################################################
if ( length(unique(work$FRACTION)) > 1 ){
## extra info for feature and fraction
work$tmp <- paste(work$FEATURE, work$FRACTION, sep="_")
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
count.feature <- reshape2::dcast(FEATURE ~ FRACTION,
data=tmp,
fun.aggregate=length,
value.var='ABUNDANCE')
rm(tmp)
## 1. first, keep features which are measured in one fraction
count.fraction <- apply(count.feature[, -which(colnames(count.feature) %in% c('FEATURE'))],
1,
function(x) sum(x>0))
# keep.feature <- count.feature[count.fraction == 1, 'FEATURE']
## 2. second, if features are measured in multiple fractionations,
## use the fractionation with maximum number of measurements.
## if there are multiple maximum number of measurements, remove features completely.
## count.feature1 : features that are measured in multiple fractions
count.feature1 <- count.feature[count.fraction > 1, ]
if( nrow(count.feature1) > 0 ){
## how many fractions have maximum number of measurements?
count.fraction <- apply(count.feature1[, -which(colnames(count.feature1) %in% c('FEATURE'))],
1,
function(x) sum(x == max(x)))
## 2.1 count.fraction == 1 means that there is one fraction that have one maximum # measurements.
## count.feature2 : features that measured in multiple fractions.
## however, it has one fraction with max number of measurements across fractions.
count.feature2 <- count.feature1[count.fraction == 1, ]
count.feature2$FEATURE <- as.character(count.feature2$FEATURE)
if( nrow(count.feature2) > 0 ){
#remove.fraction <- apply(count.feature2, 1,
# function(x) paste(x[1], names(x[-1])[x[-1] != max(x[-1]) & x[-1] != 0], sep="_") )
#remove.fraction <- unlist(remove.fraction)
remove.fraction <- gather(count.feature2, 'Fraction', 'ncount', 2:ncol(count.feature2))
remove.fraction <- remove.fraction %>% group_by(FEATURE) %>% filter(ncount != max(ncount))
remove.fraction <- remove.fraction %>% filter(ncount != 0)
remove.fraction$tmp <- paste(remove.fraction$FEATURE, remove.fraction$Fraction, sep="_")
work[work$tmp %in% remove.fraction$tmp, 'INTENSITY'] <- NA
work[work$tmp %in% remove.fraction$tmp, 'ABUNDANCE'] <- NA
}
rm(count.feature2)
rm(remove.fraction)
## 2.2 count.fraction > 1 means that there are multiple fractions have the same # measurements.
## Then check whether there are multiple maximum number of measurements across fractionation
count.feature3 <- count.feature1[count.fraction > 1, ]
if( nrow(count.feature3) > 0 ){
## 2.2.1 : maximum number of measurement / fraction == 1, remove that feature
max.feature <- apply(count.feature3[, -which(colnames(count.feature3) %in% c('FEATURE'))],
1,
function(x) max(x))
max.feature.1 <- count.feature3[max.feature == 1, 'FEATURE']
if (length(max.feature.1) > 0){
work <- work[-which(work$FEATURE %in% max.feature.1), ]
count.feature3 <- count.feature3[-which(count.feature3$FEATURE %in% max.feature.1), ]
}
if ( nrow(count.feature3) > 0 ) {
###############
## 2.2.2 : remove fractionations which have not maximum number of measurements
remove.fraction <- gather(count.feature3, 'Fraction', 'ncount', 2:ncol(count.feature3))
remove.fraction <- remove.fraction %>% group_by(FEATURE) %>% filter(ncount != max(ncount))
remove.fraction <- remove.fraction %>% filter(ncount != 0)
remove.fraction$tmp <- paste(remove.fraction$FEATURE, remove.fraction$Fraction, sep="_")
work[work$tmp %in% remove.fraction$tmp, 'INTENSITY'] <- NA
work[work$tmp %in% remove.fraction$tmp, 'ABUNDANCE'] <- NA
rm(remove.fraction)
###############
## 2.2.3 : among fractionations, keep one fractionation which has maximum average
tmptmp <- work[which(work$FEATURE %in% count.feature3$FEATURE), ]
tmptmp <- tmptmp[!is.na(tmptmp$ABUNDANCE), ]
mean.frac.feature <- tmptmp %>% group_by(FEATURE, tmp) %>% summarise(mean=mean(ABUNDANCE))
remove.fraction <- mean.frac.feature %>% group_by(FEATURE) %>% filter(mean != max(mean))
work[work$tmp %in% remove.fraction$tmp, 'INTENSITY'] <- NA
work[work$tmp %in% remove.fraction$tmp, 'ABUNDANCE'] <- NA
rm(remove.fraction)
rm(tmptmp)
}
}
}
work <- work[, -which(colnames(work) %in% c('tmp'))]
}
} else { ## no fractionation
work$FRACTION <- 1
}
## check messingness for multirun
## check no value for some feature : balanced structure or not
## need to separate label-free or label-based
processout <- rbind(processout, c(paste("fillIncompleteRows = ", fillIncompleteRows,sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## [THT: better to write a function for single method, and call that function
## here and for the case with multuple methods]
## only 1 method
if ( !checkMultirun$out | length(unique(work$FRACTION)) == 1 ) {
## label-free experiments
if (nlevels(work$LABEL) == 1) {
## get feature by Run count of data
structure = tapply ( work$ABUNDANCE, list ( work$FEATURE, work$RUN ) , function ( x ) length ( x ) )
## structure value should be 1 for label-free, if not there are missingness. if more there are duplicates.
flagmissing = sum(is.na(structure)) > 0
flagduplicate = sum(structure[!is.na(structure)]>1) > 0
### if there is missing rows
if ( flagmissing ) {
processout <- rbind(processout, c("CAUTION: the input dataset has incomplete rows.
If missing peaks occur they should be included in the dataset as separate rows,
and the missing intensity values should be indicated with 'NA'.
The incomplete rows are listed below."))
write.table(processout, file=finalfile,row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows.
If missing peaks occur they should be included in the dataset as separate rows,
and the missing intensity values should be indicated with 'NA'.
The incomplete rows are listed below.")
## first, which run has missing
runstructure <- apply ( structure, 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## for missign row, need to assign before looping
missingwork <- NULL
## then for each run, which features are missing,
for(j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work[work$RUN==runID[j], c("SUBJECT_ORIGINAL","GROUP_ORIGINAL",
"GROUP","SUBJECT","SUBJECT_NESTED",
"RUN","FRACTION")])
## get feature ID
featureID <- structure[,colnames(structure)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has incomplete rows for some features (",
paste(names(finalfeatureID), collapse=", "), ")"))
## save in process file.
processout <- rbind(processout, c(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has incomplete rows for some features (",
paste(names(featureID[is.na(featureID)]), collapse=", "), ")")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work[which(work$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork <- rbind(missingwork, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
## [THT: this part can probably be merged into the above.
## Also, it might be better to check fillIncompleteRows earlier
## and terminate the process when it's FALSE]
if (fillIncompleteRows) {
## merge with work
## in future, use rbindlist?? rbindlist(list(work, missingwork))
work <- rbind(work, missingwork)
## print message
message("\n DONE : Incomplete rows for missing peaks are added with intensity values=NA. \n")
## save in process file.
processout <- rbind(processout, "Incomplete rows for missing peaks are added with intensity values=NA. - done, Okay")
write.table(processout, file=finalfile, row.names=FALSE)
} else {
## save in process file.
processout <- rbind(processout,"Please check whether features in the list are generated from spectral processing tool.
Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
write.table(processout, file=finalfile,row.names=FALSE)
stop("Please check whether features in the list are generated from spectral processing tool or not.
Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
}
} # end for flag missing
## if there are duplicates measurements
if (flagduplicate) {
## first, which run has duplicates
runstructure <- apply ( structure, 2, function ( x ) sum (x[!is.na(x)] > 1 ) > 0 )
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features have duplicates,
for(j in 1:length(runID)) {
nameID <- unique(work[work$RUN == runID[j], c("SUBJECT_ORIGINAL", "GROUP_ORIGINAL",
"GROUP","SUBJECT", "SUBJECT_NESTED",
"RUN", "FRACTION")])
featureID <- structure[, colnames(structure)==runID[j]]
finalfeatureID <- featureID[!is.na(featureID) & featureID > 1]
message(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has multiple rows (duplicate rows) for some features (",
paste(names(finalfeatureID), collapse=", "), ")"))
## save in process file.
processout <- rbind(processout, c(paste0("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]),
", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]),
" has multiple rows (duplicate rows) for some features (",
paste(names(featureID[is.na(featureID)]), collapse=", "), ")")))
write.table(processout, file=finalfile, row.names=FALSE)
}
## save in process file.
processout <- rbind(processout,"Please remove duplicate rows in the list above. ")
write.table(processout, file=finalfile,row.names=FALSE)
stop("Please remove duplicate rows in the list above.\n")
} # end flag duplicate
## no missing and no duplicates
if (!flagmissing & !flagduplicate) {
processout <- rbind(processout, c("Balanced data format with NA for missing feature intensities - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
## end label-free
} else {
## label-based experiment
## count the reference and endobenous separately
work.l <- work[work$LABEL == "L", ]
work.h <- work[work$LABEL == "H", ]
## get feature by Run count of data
structure.l <- tapply(work.l$ABUNDANCE, list(work.l$FEATURE, work.l$RUN), function (x) length (x) )
structure.h <- tapply(work.h$ABUNDANCE, list(work.h$FEATURE, work.h$RUN), function (x) length (x) )
## first, check some features which completely missing across run
missingcomplete.l <- NULL
missingcomplete.h <- NULL
## 1. reference peptides
featurestructure.h <- apply(structure.h, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.h <- names(featurestructure.h[featurestructure.h == ncol(structure.h)])
if (length(featureID.h) > 0) {
## print message
message(paste0("CAUTION : some REFERENCE features have missing intensities in all the runs.
The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool. \n"))
## save in process file.
processout <- rbind(processout,c(paste("CAUTION : some REFERENCE features have missing intensities in all the runs.
The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool.", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
## get unique Run information
nameID <- unique(work.h[, c("SUBJECT_ORIGINAL", "GROUP_ORIGINAL", "GROUP", "SUBJECT", "SUBJECT_NESTED", "RUN", "FRACTION")])
## get PROTEIN and FEATURE information
## here use whole work dataset
tempTogetfeature <- work[which(work$FEATURE %in% featureID.h), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
#for(j in 1:nrow(nameID)) {
# ## merge feature info and run info as 'work' format
# tempmissingwork <- data.frame(tempfeatureID, LABEL="H",GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j], SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j], RUN=nameID$RUN[j], GROUP=nameID$GROUP[j], SUBJECT=nameID$SUBJECT[j], SUBJECT_NESTED=nameID$SUBJECT_NESTED[j], INTENSITY=NA, ABUNDANCE=NA, METHOD=nameID$METHOD[j])
# ## merge with tempary space, missingwork
# missingcomplete.h <- rbind(missingcomplete.h, tempmissingwork)
#}
# MC : 2016.04.21 : use merge for simplicity
tmp <- merge(nameID, tempfeatureID, by=NULL)
missingcomplete.h <- data.frame(PROTEIN=tmp$PROTEIN,
PEPTIDE=tmp$PEPTIDE,
TRANSITION=tmp$TRANSITION,
FEATURE=tmp$FEATURE,
LABEL="H",
GROUP_ORIGINAL=tmp$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=tmp$SUBJECT_ORIGINAL,
RUN=tmp$RUN,
GROUP=tmp$GROUP,
SUBJECT=tmp$SUBJECT,
SUBJECT_NESTED=tmp$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=tmp$FRACTION)
rm(tmp)
} # end fillIncompleteRows option
} # end for reference peptides
## 2. endogenous peptides
featurestructure.l <- apply(structure.l, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.l <- names(featurestructure.l[featurestructure.l == ncol(structure.l)])
if (length(featureID.l) > 0) {
## print message
message(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs.
The completely missing ENDOGENOUS features are ", paste(featureID.l, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep=""))
## save in process file.
processout <- rbind(processout,c(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs.
The completely missing ENDOGENOUS features are ",
paste(featureID.l, collapse=", "),
". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
## get unique Run information
nameID <- unique(work.l[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get PROTEIN and FEATURE information
## here use whole work dataset
tempTogetfeature <- work[which(work$FEATURE %in% featureID.l), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
#for (j in 1:nrow(nameID)) {
# ## merge feature info and run info as 'work' format
# tempmissingwork <- data.frame(tempfeatureID, LABEL="L",GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j], SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j], RUN=nameID$RUN[j], GROUP=nameID$GROUP[j], SUBJECT=nameID$SUBJECT[j], SUBJECT_NESTED=nameID$SUBJECT_NESTED[j], INTENSITY=NA, ABUNDANCE=NA, METHOD=nameID$METHOD[j])
# ## merge with tempary space, missingwork
# missingcomplete.l <- rbind(missingcomplete.l, tempmissingwork)
#}
# MC : 2016.04.21 : use merge for simplicity
tmp <- merge(nameID, tempfeatureID, by=NULL)
missingcomplete.l <- data.frame(PROTEIN=tmp$PROTEIN,
PEPTIDE=tmp$PEPTIDE,
TRANSITION=tmp$TRANSITION,
FEATURE=tmp$FEATURE,
LABEL="L",
GROUP_ORIGINAL=tmp$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=tmp$SUBJECT_ORIGINAL,
RUN=tmp$RUN,
GROUP=tmp$GROUP,
SUBJECT=tmp$SUBJECT,
SUBJECT_NESTED=tmp$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=tmp$FRACTION)
rm(tmp)
} # end fillIncompleteRows option
} # end endogenous peptides
## second, check other some missingness
## for missign row, need to assign before looping. need to assign at the beginning because it need either cases, with missingness or not
missingwork.l <- NULL
missingwork.h <- NULL
## structure value should be 1 for reference and endogenous separately, if not there are missingness. if more there are duplicates.
## if count of NA is not zero and not number of run (excluding complete missingness across runs)
missing.l <- names(featurestructure.l[featurestructure.l != ncol(structure.l) & featurestructure.l != 0])
missing.h <- names(featurestructure.h[featurestructure.h != ncol(structure.h) & featurestructure.h != 0])
flagmissing.l = length(missing.l) > 0
flagmissing.h = length(missing.h) > 0
## structure value is greater than 1, there are duplicates
flagduplicate.l = sum(structure.l[!is.na(structure.l)] > 1) > 0
flagduplicate.h = sum(structure.h[!is.na(structure.h)] > 1) > 0
## if there is missing rows for endogenous
if ( flagmissing.l | flagmissing.h ) {
processout <- rbind(processout,c("CAUTION: the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below."))
write.table(processout, file=finalfile, row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below.")
## endogenous intensities
if (flagmissing.l) {
if (length(missing.l) > 1){
runstructure <- apply ( structure.l[which(rownames(structure.l) %in% missing.l), ], 2, function ( x ) sum ( is.na ( x ) ) ) > 0
} else if (length(missing.l) == 1) {
runstructure <- is.na ( structure.l[which(rownames(structure.l) %in% missing.l), ]) > 0
}
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for(j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.l[work.l$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
# MC : 2016/04/21. if there is one row, can't catch up data.frame
## get feature ID
if (length(missing.l) > 1){
featureID <- structure.l[which(rownames(structure.l) %in% missing.l), colnames(structure.l) == runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- names(featureID[is.na(featureID)])
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(finalfeatureID, collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(finalfeatureID, collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
} else if (length(missing.l) == 1) {
finalfeatureID <- missing.l
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", finalfeatureID,")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", finalfeatureID,")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.l[which(work.l$FEATURE %in% finalfeatureID), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.l <- rbind(missingwork.l,tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
## reference intensities
if (flagmissing.h) {
## first, which run has missing
if (length(missing.h) > 1){
runstructure <- apply ( structure.h[which(rownames(structure.h) %in% missing.h), ], 2,
function ( x ) sum ( is.na ( x ) ) ) > 0
} else if (length(missing.h) == 1) {
runstructure <- is.na ( structure.h[which(rownames(structure.h) %in% missing.h), ]) > 0
}
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for(j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.h[work.h$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
# MC : 2016/04/21. if there is one row, can't catch up data.frame
## get feature ID
if (length(missing.h) > 1){
featureID <- structure.h[which(rownames(structure.h) %in% missing.h), colnames(structure.h) == runID[j] ]
## get feature ID which has no measuremnt.
finalfeatureID <- names(featureID[is.na(featureID)])
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(finalfeatureID, collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(finalfeatureID, collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
} else if (length(missing.h) == 1) {
finalfeatureID <- missing.h
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", finalfeatureID,")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", finalfeatureID,")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.h[which(work.h$FEATURE %in% finalfeatureID), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="H",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.h <- rbind(missingwork.h, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
} # end for flag missing
## merge missing rows if fillIncompleteRows=TRUE or message.
if (fillIncompleteRows) {
## merge with work
## in future, use rbindlist?? rbindlist(list(work, missingwork))
work <- rbind(work,missingcomplete.l, missingcomplete.h, missingwork.l, missingwork.h)
## print message
message("\n DONE : Incomplete rows for missing peaks are added with intensity values=NA. \n")
## save in process file.
processout <- rbind(processout, "Incomplete rows for missing peaks are added with intensity values=NA. - done, Okay")
write.table(processout, file=finalfile, row.names=FALSE)
} else if (!is.null(missingcomplete.l) |
!is.null(missingcomplete.h) |
!is.null(missingwork.l) |
!is.null(missingwork.l) ) {
## save in process file.
processout <- rbind(processout,
"Please check whether features in the list are generated from spectral processing tool.
Or the option, fillIncompleteRows=TRUE,
will add incomplete rows for missing peaks with intensity=NA.")
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please check whether features in the list are generated from spectral processing tool or not. Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
}
## if there are duplicates measurements
if (flagduplicate.h) {
## first, which run has duplicates
runstructure <- apply ( structure.h, 2, function ( x ) sum ( x[!is.na(x)] > 1 )>0 )
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features have duplicates,
for(j in 1:length(runID)) {
nameID <- unique(work[work$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
featureID <- structure.h[, colnames(structure.h)==runID[j]]
finalfeatureID <- featureID[!is.na(featureID) & featureID > 1]
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some REFERENCE features (", paste(names(finalfeatureID), collapse=", "), ")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some REFERENCE features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## save in process file.
processout <- rbind(processout,"Please remove duplicate rows in the list above. ")
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please remove duplicate rows in the list above.\n")
} # end flag duplicate for reference
if (flagduplicate.l) {
## first, which run has duplicates
runstructure <- apply ( structure.l, 2, function ( x ) sum ( x[!is.na(x)] > 1 )>0 )
runID <- names(runstructure[runstructure == TRUE])
## then for each run, which features have duplicates,
for (j in 1:length(runID)) {
nameID <- unique(work[work$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
featureID <- structure.l[, colnames(structure.l)==runID[j]]
finalfeatureID <- featureID[!is.na(featureID) & featureID > 1]
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some ENDOGENOUS features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has multiple rows (duplicate rows) for some ENDOGENOUS features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
}
## save in process file.
processout <- rbind(processout,"ERROR : Please remove duplicate rows in the list above. ")
write.table(processout, file=finalfile, row.names=FALSE)
stop("ERROR : Please remove duplicate rows in the list above.\n")
} # end flag duplicate for endogenous
## no missing and no duplicates
if (!flagmissing.h & !flagmissing.l & !flagduplicate.h & !flagduplicate.l) {
processout <- rbind(processout, c("Balanced data format with NA for missing feature intensities - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
} # end 1 method
} else { # multiple fractionations
allflagmissing <- NULL
allflagduplicate <- NULL
## check each method
for (k in 1:length(unique(work$FRACTION))) {
worktemp <- work[work$FRACTION == k, ]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
structure <- tapply ( worktemp$ABUNDANCE, list ( worktemp$FEATURE, worktemp$RUN ) , function ( x ) length ( x ) )
## structure value should be 2 for labeled, 1 for label-free, if not there are missingness
if (nlevels(worktemp$LABEL) == 2) { ## label-based
flag = sum(is.na(structure)) > 0 | sum(structure[!is.na(structure)] < 2) > 0
} else { ## label-free
flag = sum(is.na(structure)) > 0
}
allflagmissing <- c(allflagmissing,flag)
## for duplicate
if (nlevels(worktemp$LABEL) == 2) { # label-based
worktemp.h <- worktemp[worktemp$LABEL == "H", ]
worktemp.l <- worktemp[worktemp$LABEL == "L", ]
structure.h <- tapply ( worktemp.h$ABUNDANCE, list ( worktemp.h$FEATURE, worktemp.h$RUN ) , function ( x ) length ( x ) )
structure.l <- tapply ( worktemp.l$ABUNDANCE, list ( worktemp.l$FEATURE, worktemp.l$RUN ) , function ( x ) length ( x ) )
flagduplicate <- sum(structure.h[!is.na(structure.h)] > 1) > 0 | sum(structure.l[!is.na(structure.l)] > 1) > 0
} else { # label-free
flagduplicate <- sum(structure[!is.na(structure)]>1) > 0
}
allflagduplicate <- c(allflagduplicate, flag)
} # end to check any flag among methods
if ( sum(allflagmissing) != 0 ) {
processout <- rbind(processout, c("CAUTION: the input dataset has incomplete rows. Missing feature intensities should be present in the dataset, and their intensities should be indicated with 'NA'. The incomplete rows are listed below."))
write.table(processout, file=finalfile, row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows. Missing feature intensities should be present in the dataset, and their intensities should be indicated with 'NA'. The incomplete rows are listed below.")
## for missign row, need to assign before looping
missingwork <- NULL
missingcomplete.h <- NULL
missingcomplete.l <- NULL
missingwork.h <- NULL
missingwork.l <- NULL
for (k in 1:length(unique(work$FRACTION))) {
## see which method has missing rows
if (allflagmissing[k]) {
worktemp <- work[work$FRACTION==k, ]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
if (nlevels(worktemp$LABEL) == 1) { ## label-free
structure = tapply ( worktemp$ABUNDANCE, list ( worktemp$FEATURE, worktemp$RUN ) , function ( x ) length ( x ) )
## first, which run has missing
runstructure <- apply ( structure, 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for (j in 1:length(runID)) {
nameID <- unique(worktemp[worktemp$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get feature ID
featureID <- structure[, colnames(structure)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work[which(work$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork <- rbind(missingwork, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run
} else { # end label-free
## label-based
## count the reference and endobenous separately
work.l <- worktemp[worktemp$LABEL=="L", ]
work.h <- worktemp[worktemp$LABEL=="H", ]
## get feature by Run count of data
structure.l <- tapply ( work.l$ABUNDANCE, list(work.l$FEATURE, work.l$RUN), function (x) length (x) )
structure.h <- tapply ( work.h$ABUNDANCE, list(work.h$FEATURE, work.h$RUN), function (x) length (x) )
## 1. reference peptides
featurestructure.h <- apply(structure.h, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.h <- names(featurestructure.h[featurestructure.h==ncol(structure.h)])
if (length(featureID.h) > 0) {
## print message
message(paste("CAUTION : some REFERENCE features have missing intensities in all the runs. The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep=""))
## save in process file.
processout <- rbind(processout,c(paste("CAUTION : some REFERENCE features have missing intensities in all the runs. The completely missing REFERENCE features are ", paste(featureID.h, collapse=", "),". Please check whether features in the list are correctly generated from spectral processing tool.", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
if( nrow(work.h) == 0 ){
work.h <- work[work$LABEL=="H", ]
## get unique Run information
nameID <- unique(work.h[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
nameID$FRACTION <- k
} else {
## get unique Run information
nameID <- unique(work.h[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
}
## get PROTEIN and FEATURE information
## here use whole worktemp dataset
tempTogetfeature <- worktemp[which(worktemp$FEATURE %in% featureID.h), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
for (j in 1:nrow(nameID)) {
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="H",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j],
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j],
RUN=nameID$RUN[j],
GROUP=nameID$GROUP[j],
SUBJECT=nameID$SUBJECT[j],
SUBJECT_NESTED=nameID$SUBJECT_NESTED[j],
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION[j])
## merge with tempary space, missingwork
missingcomplete.h <- rbind(missingcomplete.h, tempmissingwork)
}
} # end fillIncompleteRows option
} # end for reference peptides
## 2. endogenous peptides
featurestructure.l <- apply(structure.l, 1, function (x) sum(is.na(x)))
## get feature ID of reference which are completely missing across run
featureID.l <- names(featurestructure.l[featurestructure.l==ncol(structure.l)])
if (length(featureID.l) > 0) {
## print message
message(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs. The completely missing ENDOGENOUS features are ", paste(featureID.l, collapse=", "), ". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep=""))
## save in process file.
processout <- rbind(processout, c(paste("CAUTION : some ENDOGENOUS features have missing intensities in all the runs. The completely missing ENCOGENOUS features are ", paste(featureID.l, collapse=", "),". Please check whether features in the list are correctly generated from spectral processing tool. \n", sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
## get unique Run information
nameID <- unique(work.l[, c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get PROTEIN and FEATURE information
## here use whole worktemp dataset
tempTogetfeature <- worktemp[which(worktemp$FEATURE %in% featureID.l), ]
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## then generate data.frame for missingness,
for(j in 1:nrow(nameID)) {
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL[j],
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL[j],
RUN=nameID$RUN[j],
GROUP=nameID$GROUP[j],
SUBJECT=nameID$SUBJECT[j],
SUBJECT_NESTED=nameID$SUBJECT_NESTED[j],
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION[j])
## merge with tempary space, missingwork
missingcomplete.l <- rbind(missingcomplete.l, tempmissingwork)
}
} # end fillIncompleteRows option
} # end endogenous peptides
## second, check other some missingness
## structure value should be 1 for reference and endogenous separately, if not there are missingness. if more there are duplicates.
## if count of NA is not zero and not number of run (excluding complete missingness across runs)
missing.l <- names(featurestructure.l[featurestructure.l!=ncol(structure.l) & featurestructure.l != 0])
missing.h <- names(featurestructure.h[featurestructure.h!=ncol(structure.h) & featurestructure.h != 0])
flagmissing.l <- length(missing.l) > 0
flagmissing.h <- length(missing.h) > 0
## structure value is greater than 1, there are duplicates
flagduplicate.l <- sum(structure.l[!is.na(structure.l)] > 1) > 0
flagduplicate.h <- sum(structure.h[!is.na(structure.h)] > 1) > 0
## if there is missing rows for endogenous
if (flagmissing.l | flagmissing.h) {
processout <- rbind(processout, c("CAUTION: the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below."))
write.table(processout, file=finalfile, row.names=FALSE)
message("CAUTION : the input dataset has incomplete rows. If missing peaks occur they should be included in the dataset as separate rows, and the missing intensity values should be indicated with 'NA'. The incomplete rows are listed below.")
## endogenous intensities
if (flagmissing.l) {
## first, which run has missing
runstructure <- apply ( structure.l[-which(rownames(structure.l) %in% featureID.l),], 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for (j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.l[work.l$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get feature ID
featureID <- structure.l[-which(rownames(structure.l) %in% featureID.l), colnames(structure.l)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[, "GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some ENDOGENOUS features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile, row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.l[which(work.l$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="L",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.l <- rbind(missingwork.l, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
## reference intensities
if (flagmissing.h) {
## first, which run has missing
runstructure <- apply ( structure.h[-which(rownames(structure.h) %in% featureID.h),], 2, function ( x ) sum ( is.na ( x ) ) ) > 0
## get the name of Run
runID <- names(runstructure[runstructure==TRUE])
## then for each run, which features are missing,
for (j in 1:length(runID)) {
## get subject, group information for this run
nameID <- unique(work.h[work.h$RUN==runID[j], c("SUBJECT_ORIGINAL",
"GROUP_ORIGINAL",
"GROUP",
"SUBJECT",
"SUBJECT_NESTED",
"RUN",
"FRACTION")])
## get feature ID
featureID <- structure.h[-which(rownames(structure.h) %in% featureID.h), colnames(structure.h)==runID[j]]
## get feature ID which has no measuremnt.
finalfeatureID <- featureID[is.na(featureID)]
## print features ID
message(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(names(finalfeatureID), collapse=", "),")", sep="" ))
## save in process file.
processout <- rbind(processout,c(paste("*** Subject : ", as.character(nameID[,"SUBJECT_ORIGINAL"]) ,", Condition : ", as.character(nameID[,"GROUP_ORIGINAL"]), " has incomplete rows for some REFERENCE features (", paste(names(featureID[is.na(featureID)]), collapse=", "),")", sep="" )))
write.table(processout, file=finalfile,row.names=FALSE)
## add missing rows if option is TRUE
if (fillIncompleteRows) {
tempTogetfeature <- work.h[which(work.h$FEATURE %in% names(finalfeatureID)), ]
## get PROTEIN and FEATURE infomation
tempfeatureID <- unique(tempTogetfeature[, c("PROTEIN", "PEPTIDE", "TRANSITION", "FEATURE")])
## merge feature info and run info as 'work' format
tempmissingwork <- data.frame(tempfeatureID,
LABEL="H",
GROUP_ORIGINAL=nameID$GROUP_ORIGINAL,
SUBJECT_ORIGINAL=nameID$SUBJECT_ORIGINAL,
RUN=nameID$RUN,
GROUP=nameID$GROUP,
SUBJECT=nameID$SUBJECT,
SUBJECT_NESTED=nameID$SUBJECT_NESTED,
INTENSITY=NA,
ABUNDANCE=NA,
FRACTION=nameID$FRACTION)
## merge with tempary space, missingwork
missingwork.h <- rbind(missingwork.h, tempmissingwork)
} # end fillIncompleteRows options
} # end loop for run ID
} # end for endogenous
} # end any missingness
} # end label-based
} # if only any flag for method
} # end loop for methods
if (fillIncompleteRows) {
## merge with work
## in future, use rbindlist?? rbindlist(list(work, missingwork))
if (nlevels(worktemp$LABEL) == 1) {
work <- rbind(work, missingwork)
} else {
work <- rbind(work, missingcomplete.l, missingcomplete.h, missingwork.l, missingwork.h)
}
## print message
message("\n DONE : Incomplete rows for missing peaks are added with intensity values=NA. \n")
## save in process file.
processout <- rbind(processout, "Incomplete rows for missing peaks are added with intensity values=NA. - done, Okay")
write.table(processout, file=finalfile,row.names=FALSE)
} else if (!is.null(missingcomplete.l) | !is.null(missingcomplete.h) | !is.null(missingwork.l) | !is.null(missingwork.l) | !is.null(missingwork)) {
## save in process file.
processout <- rbind(processout, "Please check whether features in the list are generated from spectral processing tool. Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please check whether features in the list are generated from spectral processing tool. Or the option, fillIncompleteRows=TRUE, will add incomplete rows for missing peaks with intensity=NA.")
}
} else {
processout <- rbind(processout, c("Balanced data format with NA for missing feature intensities - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
## for duplicate, in future
} # end multiple method
## factorize GROUP, SUBJECT, GROUP_ORIGINAL, SUBJECT_ORIGINAL, SUBJECT_ORIGINAL_NESTED, FEATURE, RUN
## -------------------------------------------------------------------------------------------------
work$PROTEIN <- factor(work$PROTEIN)
work$PEPTIDE <- factor(work$PEPTIDE)
work$TRANSITION <- factor(work$TRANSITION)
work <- work[with(work, order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, PROTEIN, PEPTIDE, TRANSITION)),]
work$GROUP <- factor(work$GROUP)
work$SUBJECT <- factor(work$SUBJECT)
## SUBJECT_ORIGINAL_NESTED will sorted as GROUP_ORIGINAL, SUBJECT_ORIGINAL
work$SUBJECT_NESTED <- factor(work$SUBJECT_NESTED, levels=unique(work$SUBJECT_NESTED))
## FEATURE will sorted as PROTEIN, PEPTIDE, TRANSITION
work$FEATURE <- factor(work$FEATURE, levels=unique(work$FEATURE))
## RUN will sorted as GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN
work$originalRUN <- work$RUN
work$RUN <- factor(work$RUN, levels=unique(work$RUN), labels=seq(1, length(unique(work$RUN))))
processout <- rbind(processout, c("Factorize in columns(GROUP, SUBJECT, GROUP_ORIGINAL,
SUBJECT_ORIGINAL, SUBJECT_ORIGINAL_NESTED, FEATURE, RUN) - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
## Normalization ##
## ------------- ##
## Normalization : option 0. none
if (is.element("NONE",normalization) | is.element("FALSE",normalization)) { # after 'toupper', FALSE becomes character.
processout <- rbind(processout, c("Normalization : no normalization - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
}
## Normalization : option 1. constant normalization , equalize medians ##
## -------------------------------------------------------------------
if (!is.element("NONE", normalization) &
!is.element("FALSE", normalization) &
is.element("EQUALIZEMEDIANS", normalization)) {
if (nlevels(work$LABEL) == 1) {
## Constant normalization by endogenous per method
## [MC : use median of medians]
median.run.method <- aggregate(ABUNDANCE ~ RUN + FRACTION, data = work, median, na.rm = TRUE)
median.method <- tapply(median.run.method$ABUNDANCE, median.run.method$FRACTION, median, na.rm = TRUE)
nmethod <- unique(work$FRACTION)
for(j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION == nmethod[j], "RUN"])
for (i in 1:length(namerun)) {
## ABUNDANCE is normalized
namerun.idx <- which(work$RUN == namerun[i])
work[namerun.idx, "ABUNDANCE"] <- work[namerun.idx, "ABUNDANCE"] - median.run.method[median.run.method$RUN == namerun[i], "ABUNDANCE"] + median.method[j]
}
}
}
if (nlevels(work$LABEL) == 2 ) {
## Constant normalization by heavy standard per method
h <- work[work$LABEL == "H", ]
## [MC : use median of medians]
median.run.method <- aggregate(ABUNDANCE ~ RUN + FRACTION, data = h, median, na.rm = TRUE)
median.method <- tapply(median.run.method$ABUNDANCE, median.run.method$FRACTION, median, na.rm = TRUE)
nmethod <- unique(work$FRACTION)
for(j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION==nmethod[j],"RUN"])
for (i in 1:length(namerun)) {
## ABUNDANCE is normalized
namerun.idx <- which(work$RUN == namerun[i])
work[namerun.idx, "ABUNDANCE"] <- work[namerun.idx, "ABUNDANCE"] - median.run.method[median.run.method$RUN == namerun[i], "ABUNDANCE"] + median.method[j]
}
} # end loop method
} # for labe-based
if(length(nmethod) == 1) {
processout <- rbind(processout, c("Normalization : Constant normalization (equalize medians) - okay"))
} else if (length(nmethod) >1) {
## if there are fractions, report addition information.
processout <- rbind(processout, c("Normalization : Constant normalization (equalize medians) per fraction - okay"))
}
write.table(processout, file=finalfile, row.names=FALSE)
} ## end equaliemedian normalization
## Normalization : option 2. quantile normalization ##
## ------------------------------------------------ ##
if (!is.element("NONE", normalization) &
!is.element("FALSE", normalization) &
is.element("QUANTILE", normalization)) {
if (nlevels(work$LABEL) == 1) {
## for label-free, just use endogenous
nmethod <- unique(work$FRACTION)
quantileall <- NULL
## ABUNDANCE=0 replace with 1, in order to distinguish later.
work[!is.na(work$ABUNDANCE) & work$ABUNDANCE == 0, 'ABUNDANCE'] <- 1
for (j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION == nmethod[j],"RUN"])
worktemp <- work[which(work$RUN %in% namerun & !is.na(work$INTENSITY)),]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
quantiletemp <- as.matrix(xtabs(ABUNDANCE~FEATURE+RUN, data=worktemp))
## need to put NA for missing value in endogenous
quantiletemp[quantiletemp == 0] <- NA
## using preprocessCore library
quantiledone <- normalize.quantiles(quantiletemp)
rownames(quantiledone) <- rownames(quantiletemp)
colnames(quantiledone) <- colnames(quantiletemp)
## get quantiled to long format for apply difference endogenous
quantilelong <- melt(quantiledone, id=rownames(quantiledone))
colnames(quantilelong) <- c("FEATURE", "RUN", "ABUNDANCE_quantile")
rm(quantiledone)
## quantileall <- rbindlist(list(quantileall,quantilelong))
quantileall <- rbind(quantileall, quantilelong)
rm(quantilelong)
}
work <- merge(work, quantileall, by=c("FEATURE", "RUN"))
rm(quantileall)
## reorder
work <- data.frame("PROTEIN"=work$PROTEIN,
"PEPTIDE"=work$PEPTIDE,
"TRANSITION"=work$TRANSITION,
"FEATURE"=work$FEATURE,
"LABEL"=work$LABEL,
"GROUP_ORIGINAL"=work$GROUP_ORIGINAL,
"SUBJECT_ORIGINAL"=work$SUBJECT_ORIGINAL,
"RUN"=work$RUN,
"GROUP"=work$GROUP,
"SUBJECT"=work$SUBJECT,
"SUBJECT_NESTED"=work$SUBJECT_NESTED,
"INTENSITY"=work$INTENSITY,
"ABUNDANCE"=work$ABUNDANCE_quantile,
"FRACTION"=work$FRACTION,
"originalRUN"=work$originalRUN)
work <- work[with(work, order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL, RUN, PROTEIN, PEPTIDE, TRANSITION)), ]
## for skyline case, separate 1 and zero
work[!is.na(work$INTENSITY) & work$INTENSITY == 1, 'ABUNDANCE'] <- 0
}
if (nlevels(work$LABEL) == 2) {
nmethod <- unique(work$FRACTION)
quantileall <- NULL
for (j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION == nmethod[j], "RUN"])
## for label-based, make quantile normalization for reference
##worktemp <- work[which(work$RUN %in% namerun & work$LABEL=="H" & !is.na(work$INTENSITY)),] ## because for sparse of reference
worktemp <- work[which(work$RUN %in% namerun & work$LABEL == "H"),]
worktemp$RUN <- factor(worktemp$RUN)
worktemp$FEATURE <- factor(worktemp$FEATURE)
quantiletemp <- as.matrix(xtabs(ABUNDANCE~FEATURE+RUN, data=worktemp))
rm(worktemp)
## need to put NA for missing value in endogenous
quantiletemp[quantiletemp==0] <- NA
## using preprocessCore library
quantiledone <- normalize.quantiles(quantiletemp)
rownames(quantiledone) <- rownames(quantiletemp)
colnames(quantiledone) <- colnames(quantiletemp)
## get quantiled to long format for apply difference endogenous
quantilelong.h <- melt(quantiledone, id=rownames(quantiledone))
colnames(quantilelong.h) <- c("FEATURE","RUN","ABUNDANCE_quantile")
quantilelong.h <- data.frame(quantilelong.h, LABEL="H")
## endogenous, in order to applying
##worktemp.l <- work[which(work$RUN %in% namerun & work$LABEL=="L" & !is.na(work$INTENSITY)),] ## because for sparse of reference
worktemp.l <- work[which(work$RUN %in% namerun & work$LABEL=="L"),]
worktemp.l$RUN <- factor(worktemp.l$RUN)
worktemp.l$FEATURE <- factor(worktemp.l$FEATURE)
quantiletemp.l <- as.matrix(xtabs(ABUNDANCE~FEATURE+RUN, data=worktemp.l))
rm(worktemp.l)
## need to put NA for missing value in endogenous
quantiletemp.l[quantiletemp.l==0] <- NA
## apply the difference from reference
quantiledone.l <- quantiletemp.l-(quantiletemp-quantiledone)
## get quantiled to long format for apply difference endogenous
quantilelong.l <- melt(quantiledone.l, id=rownames(quantiledone.l))
colnames(quantilelong.l) <- c("FEATURE", "RUN", "ABUNDANCE_quantile")
quantilelong.l <- data.frame(quantilelong.l, LABEL="L")
rm(quantiletemp)
rm(quantiledone)
rm(quantiletemp.l)
rm(quantiledone.l)
# quantileall <- rbindlist(list(quantileall,quantilelong.h, quantilelong.l))
quantileall <- rbind(quantileall,quantilelong.h, quantilelong.l)
}
## merge with original data
work <- merge(work,quantileall, by=c("FEATURE","RUN","LABEL"))
## reorder
work <- data.frame("PROTEIN"=work$PROTEIN,
"PEPTIDE"=work$PEPTIDE,
"TRANSITION"=work$TRANSITION,
"FEATURE"=work$FEATURE,
"LABEL"=work$LABEL,
"GROUP_ORIGINAL"=work$GROUP_ORIGINAL,
"SUBJECT_ORIGINAL"=work$SUBJECT_ORIGINAL,
"RUN"=work$RUN,
"GROUP"=work$GROUP,
"SUBJECT"=work$SUBJECT,
"SUBJECT_NESTED"=work$SUBJECT_NESTED,
"INTENSITY"=work$INTENSITY,
"ABUNDANCE"=work$ABUNDANCE_quantile,
"FRACTION"=work$FRACTION,
"originalRUN" = work$originalRUN)
work <- work[with(work,order(LABEL,GROUP_ORIGINAL,SUBJECT_ORIGINAL,RUN,PROTEIN,PEPTIDE,TRANSITION)),]
}
if(length(nmethod) == 1) {
processout <- rbind(processout, c("Normalization : Quantile normalization - okay"))
} else if (length(nmethod) >1) {
## if there are fractions, report addition information.
processout <- rbind(processout, c("Normalization : Quantile normalization per fraction - okay"))
}
write.table(processout, file=finalfile, row.names=FALSE)
}
## Normalization : option 3. global standards - for endogenous ##
## ----------------------------------------------------------- ##
if (!is.element("NONE", normalization) &
!is.element("FALSE", normalization) &
is.element("GLOBALSTANDARDS", normalization)) {
work$RUN <- factor(work$RUN)
combine <- data.frame(RUN=levels(work$RUN))
allPeptide <- unique(work$PEPTIDE)
allProtein <- unique(work$PROTEIN)
for (i in 1:length(nameStandards)) {
## if Peptides
## namePeptide <- allPeptide[grep(nameStandards[i],allPeptide)] ## cannot grep for modified peptide sequence, [,],+ sign
namePeptide <- tempPeptide[tempPeptide$PEPTIDESEQUENCE == nameStandards[i], "PEPTIDE"]
if (length(namePeptide)!=0) {
tempStandard <- work[work$PEPTIDE == namePeptide,]
} else {
## if Proteins
nameProtein <- allProtein[allProtein == nameStandards[i]] # if we use 'grep', can' find the proteins name with some symbol, such as 'sp|P30153|2AAA_HUMAN'
if (length(nameProtein)!=0) {
tempStandard <- work[work$PROTEIN==nameProtein,]
} else {
processout <- rbind(processout,c(paste("global standard peptides or proteins, ",nameStandards[i] ,", is not in dataset. Please check whether 'nameStandards' input is correct or not.")))
write.table(processout, file=finalfile,row.names=FALSE)
stop(paste("global standard peptides or proteins, ",nameStandards[i] ,", is not in dataset. Please check whether 'nameStandards' input is correct or not."))
}
}
## here, by RUN, but need to check !!!
tempStandard <- tempStandard[tempStandard$GROUP!="0",]
tempStandard$RUN <- factor(tempStandard$RUN)
tempStandard <- tempStandard[!is.na(tempStandard$ABUNDANCE),]
meanStandard <- tapply(tempStandard$ABUNDANCE, tempStandard$RUN, function(x) mean(x, na.rm=TRUE))
meanStandard <- data.frame(RUN=names(meanStandard),meanStandard)
combine <- merge(combine, meanStandard, by="RUN", all=TRUE)
colnames(combine)[i+1] <- paste("meanStandard",i,sep="")
}
rownames(combine) <- combine$RUN
combine <- subset(combine, select=-c(RUN))
## get mean among global standards
allmean <- apply(combine,1, function(x) mean(x, na.rm=TRUE))
## allmean[is.na(allmean)] <- 0
allmeantemp <- data.frame(RUN=names(allmean),allmean)
allrun <- unique(work[,c("RUN","FRACTION")])
allmeantemp <- merge(allmeantemp, allrun,by="RUN")
median.all <- tapply(allmeantemp$allmean, allmeantemp$FRACTION, function(x) median(x,na.rm=TRUE))
## adjust
nmethod <- unique(work$FRACTION)
for(j in 1:length(nmethod)) {
namerun <- unique(work[work$FRACTION==nmethod[j], "RUN"])
for (i in 1:length(namerun)) {
## ABUNDANCE is normalized
if (!is.na(allmean[names(allmean)==namerun[i]])) work[work$RUN==namerun[i] & work$LABEL=="L","ABUNDANCE"] <- work[work$RUN==namerun[i] & work$LABEL=="L","ABUNDANCE"]-allmean[names(allmean)==namerun[i]]+median.all[j]
}
} # end loop method
if(length(nmethod) == 1) {
processout <- rbind(processout, c("Normalization : normalization with global standards protein - okay"))
} else if (length(nmethod) >1) {
## if there are fractions, report addition information.
processout <- rbind(processout, c("Normalization : normalization with global standards protein - okay"))
}
write.table(processout, file=finalfile, row.names=FALSE)
}
## ----------------------------------------------------------- ##
## after normalization, zero intensity could be negative
## if abundance became less than zero, after normalization
work[!is.na(work$ABUNDANCE) & work$ABUNDANCE < 0, "ABUNDANCE"] <- 0
## if abundance become greater than zero, after normalization.
## hard to know how much higher, so, use intensity value, which is not used for noramlization
work[!is.na(work$INTENSITY) & work$INTENSITY == 1, "ABUNDANCE"] <- 0
## ----------------------------------------------------------- ##
## if there are multiple method, need to merge after normalization + before feature selection ##
if ( length(unique(work$FRACTION)) > 1 ){
## check any features measured across all runs.
## use the subset of data without missing values
## here 'INTENSITY' is used, instead of 'ABUNDANCE'
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
check.multiple.run <- xtabs(~ FEATURE + FRACTION, tmp)
check.multiple.run.TF <- check.multiple.run != 0
check.multiple.run.feature <- apply(check.multiple.run.TF, 1, sum)
## each feature should be measured only in one method
overlap.feature <- names(check.multiple.run.feature[check.multiple.run.feature > 1 ])
## It should be zero overlap.feature.
## however, this is for double-check.
## If there are overlapped feature, it means something not works well above filtering.
if( length(overlap.feature) > 0 ){
message(paste0("** Please check the listed featurues (",
paste(overlap.feature, collapse=", "),
") \n Those features are measured across all fractionations."))
processout <- rbind(processout,
c( paste0("** Please check the listed featurues (",
paste(overlap.feature, collapse=", "),
") Those features are measured across all fractionations.
Please keep only one intensity of listed features among fractionations from one sample.")))
write.table(processout, file=finalfile, row.names=FALSE)
stop("Please keep only one intensity of listed features among fractinations from one sample. \n")
}
## ----------------------------------------------------------- ##
## merge ##
## get which Run id should be merged
## decide which two runs should be merged
if( any(is.element(colnames(work), 'TECHREPLICATE')) ) {
runid.multiple <- unique(work[, c('GROUP_ORIGINAL',
'SUBJECT_ORIGINAL',
'RUN',
'originalRUN',
'FRACTION',
'TECHREPLICATE')])
## if there are technical replicates from the same group and subject, can't match.
run.match <- try(reshape2::dcast(GROUP_ORIGINAL + SUBJECT_ORIGINAL + TECHREPLICATE ~ FRACTION,
data=runid.multiple, value.var = 'originalRUN'), silent=TRUE)
if (class(run.match) == "try-error") {
processout <- rbind(processout,c( "*** error : can't figure out which multiple runs come from the same sample."))
write.table(processout, file=finalfile, row.names=FALSE)
stop("*** error : can't figure out which multiple runs come from the same sample.")
} else {
work$newRun <- NA
run.match$GROUP_ORIGINAL <- as.character(run.match$GROUP_ORIGINAL)
run.match$SUBJECT_ORIGINAL <- as.character(run.match$SUBJECT_ORIGINAL)
for(k in 1:nrow(run.match)){
work[which(work$originalRUN %in%
run.match[k, 4:ncol(run.match)]), 'newRun'] <- paste(paste(run.match[k, 1:4], collapse = "_"), 'merged', sep="_")
}
## remove extra run NAs
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
na.count <- reshape2::dcast(FEATURE ~ FRACTION, data=tmp, fun.aggregate=length, value.var='ABUNDANCE')
na.count.long <- melt(na.count, id.vars=c('FEATURE'))
na.count.long <- na.count.long[na.count.long$value == length(unique(work$newRun)), ]
na.count.long$tmp <- paste(na.count.long$FEATURE, na.count.long$variable, sep="_")
work$tmp <- paste(work$FEATURE, work$FRACTION, sep="_")
work <- work[-which(work$tmp %in% na.count.long$tmp), ]
##
work$originalRUN <- work$newRun
## update RUN based on new originalRUN
work$RUN <- work$originalRUN
work$RUN <- factor(work$RUN, levels=unique(work$RUN), labels=seq(1, length(unique(work$RUN))))
work <- work[, -which(colnames(work) %in% c('tmp','newRun'))]
}
} else { ## Fraction, but no tech replicate
runid.multiple <- unique(work[, c('GROUP_ORIGINAL',
'SUBJECT_ORIGINAL',
'RUN',
'originalRUN',
'FRACTION')])
## if there are technical replicates from the same group and subject, can't match.
run.match <- try(reshape2::dcast(GROUP_ORIGINAL + SUBJECT_ORIGINAL ~ FRACTION,
data=runid.multiple, value.var = 'originalRUN'), silent=TRUE)
if (class(run.match) == "try-error") {
processout <- rbind(processout,
c( "*** error : can't figure out which multiple runs come from the same sample."))
write.table(processout, file=finalfile, row.names=FALSE)
stop("*** error : can't figure out which multiple runs come from the same sample.")
} else {
work$newRun <- NA
run.match$GROUP_ORIGINAL <- as.character(run.match$GROUP_ORIGINAL)
run.match$SUBJECT_ORIGINAL <- as.character(run.match$SUBJECT_ORIGINAL)
for(k in 1:nrow(run.match)){
work[which(work$originalRUN %in%
run.match[k, 3:ncol(run.match)]), 'newRun'] <- paste(paste(run.match[k, 1:3],
collapse = "_"), 'merged', sep="_")
}
## remove extra run NAs or less than zero
## because the goal is to find the one fraction should be used for each feature.
tmp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE > 0, ]
## find which fraction should be used for each feature
select.fraction <- tmp %>% group_by(FEATURE, FRACTION) %>% summarise(ncount = n())
## check : test <- select.fraction %>% group_by(FEATURE) %>% summarise(nfeature = n())
## it can be less than # of runs, if there are any missing
## just in case that there are zero runs, let's check and remove.
select.fraction <- select.fraction %>% filter(ncount != 0)
select.fraction$tmp <- paste(select.fraction$FEATURE, select.fraction$FRACTION, sep="_")
## then keep one fraction for each feature
work$tmp <- paste(work$FEATURE, work$FRACTION, sep="_")
work <- work[which(work$tmp %in% select.fraction$tmp), ]
## new run has merged run id
## original run id can be different by fraction
## now fraction information from run will be removed.
work$originalRUN <- work$newRun
## update RUN based on new originalRUN
work$RUN <- work$originalRUN
work$RUN <- factor(work$RUN, levels=unique(work$RUN), labels=seq(1, length(unique(work$RUN))))
work <- work[, -which(colnames(work) %in% c('tmp','newRun'))]
}
}
}
#Below two lines were merely for in-house testing and comparisons when needed
#work.NoImpute <- work
#AbundanceAfterImpute <- .Imputation(work, cutoffCensored, censoredInt, remove50missing, MBimpute, original_scale)
## ------------- ##
## how to decide censored or not
## ------------- ##
### If imputation=TRUE and there is any value for maxQuantileforCensored, apply cutoff for censored missing
if ( summaryMethod == "TMP" & MBimpute ) {
work$LABEL <- factor(work$LABEL)
label <- nlevels(work$LABEL)==2
work$censored <- FALSE
## if intensity = 1, but abundance > cutoff after normalization, it also should be censored.
if( !is.null(maxQuantileforCensored) ) {
### label-free
if( !label ){
### calculate outlier cutoff
## only consider intensity > 1
tmp <- work[!is.na(work$INTENSITY) & work$INTENSITY > 1, 'ABUNDANCE']
## or
#tmp <- work[!is.na(work$INTENSITY), 'ABUNDANCE']
log2int.prime.quant <- quantile(tmp, prob=c(0.01, 0.25, 0.5, 0.75, maxQuantileforCensored), na.rm = TRUE)
iqr <- log2int.prime.quant[4] - log2int.prime.quant[2]
### need to decide the multiplier from high intensities
multiplier <- (log2int.prime.quant[5] - log2int.prime.quant[4])/iqr
cutoff.lower <- (log2int.prime.quant[2] - multiplier * iqr)
work[!is.na(work$INTENSITY) &
work$ABUNDANCE < cutoff.lower, 'censored'] <- TRUE
message(paste('** Log2 intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
## if censoredInt == '0, and cutoff is negative, still zero should becensored
if ( cutoff.lower <= 0 & !is.null(censoredInt) & censoredInt == "0" ) {
work[!is.na(work$INTENSITY) & work$INTENSITY == 1, 'censored'] <- TRUE
work[!is.na(work$ABUNDANCE) & work$ABUNDANCE <= 0, 'censored'] <- TRUE
message(paste('** Log2 intensities = 0 were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 intensities = 0 were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
}
## if censoredInt == NA, original NA also shoule be 'censored'
if (!is.null(censoredInt) & censoredInt == "NA") {
work[is.na(work$INTENSITY), 'censored'] <- TRUE
message(paste('** Log2 intensities = NA were considered as censored missing values.'))
processout <- rbind(processout, c('** Log2 intensities = NA were considered as censored missing values.'))
write.table(processout, file=finalfile, row.names=FALSE)
}
}
### labeled : only consider light. Assume that missing in heavy is random.
if( label ){
work.tmp <- work[which(work$LABEL %in% 'L'), ]
### calculate outlier cutoff
## only consider intensity > 1
tmp <- work.tmp[!is.na(work.tmp$INTENSITY) & work.tmp$INTENSITY > 1, 'ABUNDANCE']
log2int.prime.quant <- quantile(tmp, prob=c(0.01, 0.25, 0.5, 0.75, maxQuantileforCensored), na.rm = TRUE)
iqr <- log2int.prime.quant[4] - log2int.prime.quant[2]
### need to decide the multiplier from high intensities
multiplier <- (log2int.prime.quant[5] - log2int.prime.quant[4])/iqr
cutoff.lower <- (log2int.prime.quant[2] - multiplier * iqr)
#work$censored <- FALSE
work[work$LABEL == 'L' &
!is.na(work$INTENSITY) &
work$ABUNDANCE < cutoff.lower, 'censored'] <- TRUE
message(paste('** Log2 endogenous intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 endogenous intensities under cutoff =',
format(cutoff.lower, digits=5),
' were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
## if censoredInt == '0, and cutoff is negative, still zero should becensored
if ( cutoff.lower <= 0 & !is.null(censoredInt) & censoredInt == "0" ) {
work[work$LABEL == 'L' &
!is.na(work$INTENSITY) & work$INTENSITY == 1, 'censored'] <- TRUE
work[work$LABEL == 'L' &
!is.na(work$ABUNDANCE) & work$ABUNDANCE <= 0, 'censored'] <- TRUE
message(paste('** Log2 endogenous intensities = 0 were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 endogenous intensities = 0 were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
}
## if censoredInt == NA, original NA also shoule be 'censored'
if (!is.null(censoredInt) & censoredInt == "NA") {
work[work$LABEL == 'L' &
is.na(work$INTENSITY), 'censored'] <- TRUE
message(paste('** Log2 endogenous intensities = NA were considered as censored missing values.'))
processout <- rbind(processout,
c(paste('** Log2 endogenous intensities = NA were considered as censored missing values.')))
write.table(processout, file=finalfile, row.names=FALSE)
}
}
} else { ## will MBimpute, but not apply algorithm for cutoff
if(censoredInt == '0'){
work[work$LABEL == 'L' & !is.na(work$INTENSITY) & work$INTENSITY == 1, 'censored'] <- TRUE
work[work$LABEL == 'L' & !is.na(work$ABUNDANCE) & work$ABUNDANCE <= 0, 'censored'] <- TRUE
}
if(censoredInt == 'NA'){
work[work$LABEL == 'L' & is.na(work$ABUNDANCE), 'censored'] <- TRUE
}
}
}
## ------------- ##
## featureSubset ##
## ------------- ##
## !! need to decide how to present : keep original all data and make new column to mark, or just present selected subset
if (featureSubset == "all") {
message("** Use all features that the dataset origianally has.")
processout <- rbind(processout, c("** Use all features that the dataset origianally has."))
write.table(processout, file=finalfile, row.names=FALSE)
}
if (featureSubset == "highQuality") {
### v3.15.2 (2019/04/28) : by Tsung-Heng
message("** Flag uninformative feature and outliers by feature selection algorithm.")
processout <- rbind(processout, c("** Flag uninformative feature and outliers by feature selection algorithm."))
write.table(processout, file=finalfile, row.names=FALSE)
work <- flag_noninf_data_nbftr(work)
# work <- flag_noninf_data(work)
#if(remove_uninformative_feature_outlier){
### for heavy outlier, always need to replace with NA
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'H', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'H', 'ABUNDANCE'] <- NA
### replace with censored missing
#if (!is.null(censoredInt) & censoredInt == "0") {
### [TEST] ###
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- 0
# work[work$is_outlier & work$LABEL == 'L', 'censored'] <- TRUE
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- 0
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- 0
### [TEST] ###
#} else { ## if censoredInt= NA or null, replace with NA
### [TEST] ###
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'censored'] <- TRUE
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$feature_quality == 'Noninformative' & work$LABEL == 'L', 'ABUNDANCE'] <- NA
# work[work$is_outlier & work$LABEL == 'L', 'ABUNDANCE'] <- NA
### [TEST] ###
#}
# message("** Filtered out noninformative feature and outliers.")
# processout <- rbind(processout, c("** Filtered out noninformative feature and outliers."))
# write.table(processout, file=finalfile, row.names=FALSE)
#}
### end : v3.15.2 (2019/04/28) : by Tsung-Heng
}
if (featureSubset == "top3") {
message("** Use top3 features that have highest average of log2(intensity) across runs.")
processout <- rbind(processout, c("** Use top3 features that have highest average of log2(intensity) across runs."))
write.table(processout, file=finalfile, row.names=FALSE)
## INTENSITY vs ABUNDANCE? [THT: make more sense to use ABUNDANCE]
## how to decide top3 for DIA?
work$remove <- FALSE
worktemp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE != 0, ]
## updated on 2019.08.09, due to big memory consumption for lapply and unlist
#temp1 <- aggregate(INTENSITY~PROTEIN+FEATURE, data=work, function(x) mean(x, na.rm=TRUE))
#temp2 <- split(temp1, temp1$PROTEIN)
#temp3 <- lapply(tmp2, function(x) {
# x <- x[order(x$INTENSITY, decreasing=TRUE), ]
# x <- x$FEATURE[1:3]
# })
#selectfeature <- unlist(temp3, use.names=FALSE)
temp1 <- worktemp %>% group_by(PROTEIN, FEATURE) %>%
summarize(mean = mean(INTENSITY, na.rm = TRUE)) %>%
group_by(PROTEIN) %>%
filter(row_number(desc(mean)) <= 3) ## updated on 2019.08.15, in order to get first row if there are ties.
#top_n(3)
selectfeature <- temp1$FEATURE
selectfeature <- selectfeature[!is.na(selectfeature)]
## end 2019.08.09
## get subset
work[-which(work$FEATURE %in% selectfeature), 'remove'] <- TRUE
}
if (featureSubset == "topN") {
## check whether there is the input for 'N'
message(paste0("** Use top", n_top_feature, " features that have highest average of log2(intensity) across runs."))
processout <- rbind(processout, c(paste0("** Use top", n_top_feature,
" features that have highest average of log2(intensity) across runs.")))
write.table(processout, file=finalfile, row.names=FALSE)
## INTENSITY vs ABUNDANCE? [THT: make more sense to use ABUNDANCE]
## how to decide top3 for DIA?
work$remove <- FALSE
worktemp <- work[!is.na(work$ABUNDANCE) & work$ABUNDANCE != 0, ]
## updated on 2019.08.09, due to big memory consumption for lapply and unlist
#temp1 <- aggregate(INTENSITY ~ PROTEIN+FEATURE, data=worktemp, function(x) mean(x, na.rm=TRUE))
#temp2 <- split(temp1, temp1$PROTEIN)
#temp3 <- lapply(temp2, function(x) {
# x <- x[order(x$INTENSITY, decreasing=TRUE), ]
# x <- x$FEATURE[1:n_top_feature]
#})
#selectfeature <- unlist(temp3, use.names=FALSE)
temp1 <- worktemp %>% group_by(PROTEIN, FEATURE) %>%
summarize(mean = mean(INTENSITY, na.rm = TRUE)) %>%
group_by(PROTEIN) %>%
filter(row_number(desc(mean)) <= n_top_feature) ## updated on 2019.08.15, in order to get first row if there are ties.
#top_n(n_top_feature)
selectfeature <- temp1$FEATURE
selectfeature <- selectfeature[!is.na(selectfeature)]
## end 2019.08.09
## get subset
work[-which(work$FEATURE %in% selectfeature), 'remove'] <- TRUE
}
## check missingness
## transitions are completely missing in at least one of the condition : missingness ##
if (nlevels(work$LABEL) == 1) {
#Use the data frame before imputation to summarize the missingness
all.work <- work
test <- tapply(is.na(work[, "ABUNDANCE"]), work[, c("GROUP_ORIGINAL", "FEATURE")], function(x) sum(x, na.rm=TRUE))
numObs <- tapply(work[, "ABUNDANCE"], work[, c("GROUP_ORIGINAL", "FEATURE")], function(x) length(x))
test1 <- test == numObs
test2 <- apply(test1, 2, function(x) sum(x, na.rm=TRUE))
filterList <- names(test2)[test2 > 0]
final.decision <- ifelse(test2>0, 1, 0)
}
if (nlevels(work$LABEL) == 2) {
#Use the data frame before imputation to summarize the missingness
## first, remove NA
all.work <- work # with all NA observations
work.miss <- na.omit(work)
## draw table
light <- subset(work.miss, LABEL == "L")
heavy <- subset(work.miss, LABEL == "H")
## use FEATURE because the name of transition can be used in other peptide
count.light <- xtabs(~FEATURE+GROUP_ORIGINAL, light)
count.heavy <- xtabs(~FEATURE+GROUP_ORIGINAL, heavy)
count.light <- count.light==0
count.heavy <- count.heavy==0
count.light <- as.data.frame(count.light)
count.heavy <- as.data.frame(count.heavy)
## summary of missingness
decision <- count.light
decision[] <- 0
for (i in 1:ncol(decision)) {
for (j in 1:nrow(decision)) {
## either light or heavy has no obs -> subject to filter
if (count.light[j,i]==TRUE || count.heavy[j,i]==TRUE) {
decision[j,i] <- 1
}
}
}
final.decision <- apply(decision, 1, sum)
## assign "subject to filter" column
work <- data.frame(work, "SuggestToFilter"=0)
for(i in 1:length(final.decision)) {
## assign subject_to_filter=1 for entire transition
if (final.decision[i] != 0) {
work[work$FEATURE == names(final.decision[i]), "SuggestToFilter"] <- 1
}
}
}
## output : summary ##
## ---------------- ##
## output for label
processout <- rbind(processout, c(paste0(length(unique(work$LABEL)),
" level of Isotope type labeling in this experiment")))
write.table(processout, file=finalfile, row.names=FALSE)
temp <- data.frame("Summary of Features :")
colnames(temp) <- " "
rownames(temp) <- " "
print(temp)
summary.f <- matrix(NA,nrow=3)
summary.f[1] <- nlevels(work$PROTEIN)
temp <- unique(work[, c("PROTEIN", "PEPTIDE")])
temp1 <- xtabs(~PROTEIN, data=temp)
temp2 <- summary(as.numeric(temp1))
summary.f[2] <- paste(temp2["Min."], temp2["Max."], sep="-")
temp <- unique(work[, c("PEPTIDE", "FEATURE")])
temp1 <- xtabs(~PEPTIDE, data=temp)
temp2 <- summary(as.numeric(temp1))
summary.f[3] <- paste(temp2["Min."], temp2["Max."], sep="-")
colnames(summary.f) <- "count"
rownames(summary.f) <- c("# of Protein", "# of Peptides/Protein", "# of Transitions/Peptide")
print(as.data.frame(summary.f))
## output for process
processout <- rbind(processout, c("Summary of Features :"))
processout <- rbind(processout, c(paste(rownames(summary.f)[1]," : ", summary.f[1], sep="")))
processout <- rbind(processout, c(paste(rownames(summary.f)[2]," : ", summary.f[2], sep="")))
processout <- rbind(processout, c(paste(rownames(summary.f)[3]," : ", summary.f[3], sep="")))
write.table(processout, file=finalfile, row.names=FALSE)
## protein list with 1 feature
temp <- unique(work[, c("PROTEIN", "FEATURE")])
temp1 <- xtabs(~PROTEIN, data=temp)
temp2 <- as.data.frame(temp1[temp1 == 1])
if (nrow(temp2) > 0) {
if(nrow(temp2) > 1){
npro <- min(c(nrow(temp2), 10))
message("\n","** " , nrow(temp2),
" Proteins have only single transition : Consider excluding this protein from the dataset. (",
paste(temp2$PROTEIN[1:npro], collapse = ", "), " ...) \n")
} else {
message("\n","** " , nrow(temp2),
" Proteins have only single transition : Consider excluding this protein from the dataset. (",
rownames(temp2), ") \n")
}
}
temp <- data.frame("Summary of Samples :")
colnames(temp) <- " "
rownames(temp) <- " "
print(temp)
summary.s <- matrix(NA,ncol=nlevels(work$GROUP_ORIGINAL), nrow=3)
## # of MS runs
temp <- unique(work[, c("GROUP_ORIGINAL", "RUN")])
temp1 <- xtabs(~GROUP_ORIGINAL, data=temp)
summary.s[1,] <- temp1
## # of biological replicates
temp <- unique(work[, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL")])
temp1 <- xtabs(~GROUP_ORIGINAL, data=temp)
summary.s[2,] <- temp1
## # of technical replicates
c.tech <- round(summary.s[1,] / (summary.s[2,] * length(unique(work$FRACTION))))
##summary.s[3,] <- ifelse(c.tech==1,0,c.tech)
summary.s[3,] <- c.tech
colnames(summary.s) <- unique(work$GROUP_ORIGINAL)
rownames(summary.s) <- c("# of MS runs","# of Biological Replicates", "# of Technical Replicates")
print(summary.s)
message("\n Summary of Missingness :\n" )
message(" # transitions are completely missing in at least one of the conditions : ", sum(final.decision!=0), "\n")
if (sum(final.decision!=0)!=0) {
tmp.final <- final.decision[final.decision != 0]
if( length(tmp.final) > 5 ){
message(" -> ", paste(names(tmp.final[1:5]),collapse = ", "), " ...")
} else {
message(" -> ", paste(names(tmp.final),collapse = ", "), " ...")
}
rm(tmp.final)
}
without <- xtabs(~RUN, work)
withall <- xtabs(~RUN, all.work)
run.missing <- without / withall
message("\n # run with 75% missing observations: ", sum(run.missing<0.25), "\n")
if (sum(run.missing<0.25)!=0) {
message(" -> ", paste("RUN", names(without[run.missing<0.25]), sep=" "))
}
## output process
processout <- rbind(processout, c("Summary of Missingness :"))
processout <- rbind(processout, c(paste0(" # transitions are completely missing in at least one of the conditions : ",
sum(final.decision!=0))))
if (sum(final.decision!=0)!=0){
tmp.final <- final.decision[final.decision != 0]
if( length(tmp.final) > 5 ){
processout <- rbind(processout," -> ", paste(names(tmp.final[1:5]), collapse = ", "), " ...")
} else {
processout <- rbind(processout," -> ", paste(names(tmp.final), collapse = ", "), " ...")
}
rm(tmp.final)
}
processout <- rbind(processout, c(paste0(" # run with 75% missing observations: ", sum(run.missing < 0.25))))
if (sum(run.missing < 0.25) != 0) {
processout <- rbind(processout, " -> ", paste("RUN", names(without[run.missing < 0.25]), sep=" "))
}
write.table(processout, file=finalfile, row.names=FALSE)
## check any protein has only light for labeled-experiment
if (nlevels(work$LABEL) == 2) {
temp <- unique(work[, c("PROTEIN", "LABEL")])
temp1 <- xtabs(~PROTEIN, data=temp)
if (any(temp1 != 2)) {
## check that is L or H
namepro <- names(temp1[temp1!=2])
for(j in 1:length(namepro)) {
if (unique(work[work$PROTEIN == namepro[j], "LABEL"]) == "L") {
message("\n *** ", namepro[j],
" has only endogeneous intensities in label-based experiment. Please check this protein or remove it.")
}
if (unique(work[work$PROTEIN == namepro[j], "LABEL"]) == "H") {
message("\n *** ", namepro[j],
" has only reference intensities in label-based experiment. Please check this protein or remove it.")
}
}
}
}
processout <- rbind(processout, c("Processing data for analysis is done. - okay"))
write.table(processout, file=finalfile, row.names=FALSE)
## get the summarization per subplot (per RUN)
## -------------------------------------------
message("\n == Start the summarization per subplot...")
rqresult <- try(.runQuantification(work, summaryMethod, equalFeatureVar,
cutoffCensored, censoredInt, remove50missing, MBimpute,
original_scale=FALSE, logsum=FALSE, featureSubset,
remove_uninformative_feature_outlier,
message.show=FALSE, clusters=clusters), silent=TRUE)
if (class(rqresult) == "try-error") {
message("*** error : can't summarize per subplot with ", summaryMethod, ".")
processout <- rbind(processout, c(paste0("error : can't summarize per subplot with ", summaryMethod, ".")))
write.table(processout, file=finalfile, row.names=FALSE)
rqall <- NULL
rqmodelqc <- NULL
workpred <- NULL
} else {
label <- nlevels(work$LABEL) == 2
if (sum(is.element(colnames(rqresult$rqdata), "RUN")) == 0) {
## logsum is summarization per subject
lab <- unique(work[, c("GROUP", "GROUP_ORIGINAL", "SUBJECT_ORIGINAL", "SUBJECT_NESTED", "SUBJECT")])
if (label) {
lab <- lab[lab$GROUP != 0, ]
}
rqall <- merge(rqresult$rqdata, lab, by="SUBJECT_ORIGINAL")
} else {
lab <- unique(work[, c("RUN", "originalRUN", "GROUP", "GROUP_ORIGINAL",
"SUBJECT_ORIGINAL", "SUBJECT_NESTED", "SUBJECT")])
if (label) {
lab <- lab[lab$GROUP != 0, ]
}
rqall <- merge(rqresult$rqdata, lab, by="RUN")
}
rqall$GROUP <- factor(rqall$GROUP)
rqall$Protein <- factor(rqall$Protein)
rqmodelqc <- rqresult$ModelQC
#MC : can't use this predicted value.
#workpred <- rqresult$PredictedBySurvival
workpred <- NULL
message("\n == the summarization per subplot is done.")
processout <- rbind(processout, c(paste0("the summarization per subplot is done.- okay : ", summaryMethod)))
write.table(processout, file=finalfile, row.names=FALSE)
}
## return work data.frame and run quantification
#Align the run quantification data
if (any(is.element(colnames(rqall), "RUN"))) {
rqall <- rqall[order(rqall$Protein, as.numeric(as.character(rqall$RUN))), ]
rownames(rqall) <- NULL
}
#Mike: Below is for in-house verification occasionally
#processedquant <- list(ProcessedData=work.NoImpute, RunlevelData=rqall, SummaryMethod=summaryMethod, ModelQC=rqmodelqc, PredictBySurvival=workpred, ImputedData=AbundanceAfterImpute)
processedquant <- list(ProcessedData=work,
RunlevelData=rqall,
SummaryMethod=summaryMethod,
ModelQC=rqmodelqc,
PredictBySurvival=workpred)
return(processedquant)
}
########################################################
# Manual function allowing foreach to return a list of multiple variables
resultsAsLists <- function(x, ...) {
lapply(seq_along(x),
function(i) c(x[[i]], lapply(list(...), function(y) y[[i]])))
}
########################################################
.runQuantification <- function(data, summaryMethod,
equalFeatureVar,
cutoffCensored, censoredInt,
remove50missing, MBimpute,
original_scale, logsum,
featureSubset,
remove_uninformative_feature_outlier,
message.show, clusters) {
##Since the imputation has been done before feature selection, delete the columns of censoring indicator to avoid imputing the same intensity again
#if(featureSubset == "highQuality") {
# data$cen <- NULL; data$pred <- NULL; data$INTENSITY <- 2^data$ABUNDANCE
#}
##If we want to impute again after the feature selection
#if(featureSubset == "highQuality" & ImputeAgain==TRUE) {
# data$ABUNDANCE <- data$ABUNDANCE.O
#}
data$LABEL <- factor(data$LABEL)
label <- nlevels(data$LABEL) == 2
# set ref which is distinguish reference and endogenous. any reference=0. endogenous is the same as RUN
if ( label ) {
data$ref <- 0
data$ref[data$LABEL != "H"] <- data$RUN[data$LABEL != "H"]
data$ref <- factor(data$ref)
}
## if there is 'remove' column (for topN or top3), remove TRUE
## v3.16.1 had error : no remove for remove column
## v3.16.2 fixes this but
if( any(is.element(colnames(data), 'remove')) ) {
data <- data[!data$remove, ]
}
### v3.15.2 (2019/04/29) by Meena
if( remove_uninformative_feature_outlier & any(is.element(colnames(data), 'feature_quality')) ) {
### v3.15.2 (2019/04/28) by Tsung-Heng
data[data$feature_quality == 'Uninformative', 'ABUNDANCE'] <- NA
data[data$is_outlier, 'ABUNDANCE'] <- NA
#data <- data[!(data$is_outlier | data$feature_quality == 'Noninformative'), ]
### end : v3.15.2 (2019/04/28) by Tsung-Heng
message("** Filtered out uninformative feature and outliers.")
}
### end : v3.15.2 (2019/04/29) by Meena
# for saving predicting value for impute option
predAbundance <- NULL
###################################
## method 1 : model based summarization
if (summaryMethod == "linear" & is.null(censoredInt)) {
data <- data[!is.na(data$ABUNDANCE),]
data$PROTEIN <- factor(data$PROTEIN)
data$RUN <- factor(data$RUN)
result <- NULL
dataafterfit <- NULL
for(i in 1: nlevels(data$PROTEIN)) {
sub <- data[data$PROTEIN==levels(data$PROTEIN)[i],]
sub$SUBJECT_NESTED <- factor(sub$SUBJECT_NESTED)
sub$FEATURE <- factor(sub$FEATURE)
sub$RUN <- factor(sub$RUN)
if (!label) {
temp <- data.frame(xtabs(~RUN, data=sub))
sub.result <- data.frame(Protein=rep(unique(sub$PROTEIN),
each=nlevels(sub$RUN)),
RUN=rep(c(levels(sub$RUN)),1),
LogIntensities=NA,
NumFeature=length(unique(sub$FEATURE)),
NumPeaks=temp$Freq)
} else {
sub$ref <- factor(sub$ref)
temp <- data.frame(xtabs(~ref, data=sub))
sub.result <- data.frame(Protein=rep(levels(data$PROTEIN)[i],each=nlevels(sub$ref)),RUN=rep(c(levels(sub$ref)[-1],"Ref"),1),LogIntensities=NA, NumFeature=length(unique(sub$FEATURE)),NumPeaks=c(temp[-1,"Freq"],temp[1,"Freq"]))
}
singleFeature <- .checkSingleFeature(sub)
singleSubject <- .checkSingleSubject(sub)
TechReplicate <- .checkTechReplicate(sub) ## use for label-free model
##### fit the model
#if (message.show) {
message(paste("Getting the summarization per subplot for protein ",unique(sub$PROTEIN), "(",i," of ",length(unique(data$PROTEIN)),")"))
#}
fit <- try(.fit.quantification.run(sub, singleFeature, singleSubject, TechReplicate, labeled=label, equalFeatureVar), silent=TRUE)
if (class(fit)=="try-error") {
message("*** error : can't fit the model for ", levels(data$PROTEIN)[i])
result <- rbind(result, sub.result)
if (nrow(sub)!=0) {
sub$residuals <- NA
sub$fitted <- NA
}
} else {
if (class(fit)=="lm") {
cf <- summary(fit)$coefficients
}else{
cf <- fixef(fit)
}
# calculate sample quantification for all levels of sample
a=1
for(j in 1:nlevels(sub$RUN)) {
contrast.matrix <- rep(0, nlevels(sub$RUN))
contrast.matrix[j] <- 1
contrast <- .make.contrast.run.quantification(fit,contrast.matrix,sub, labeled=label)
if (class(fit)=="lm") {
sub.result[a,3] <- .estimableFixedQuantification(cf,contrast)
} else {
sub.result[a,3] <- .estimableRandomQuantification(cf,contrast)
}
a=a+1
}
## for label-based case, need reference quantification
if (label) {
contrast <- .make.contrast.run.quantification.reference(fit,contrast.matrix,sub)
if (class(fit)=="lm") {
sub.result[a, 3] <- .estimableFixedQuantification(cf,contrast)
}else{
sub.result[a, 3] <- .estimableRandomQuantification(cf,contrast)
}
}
result <- rbind(result, sub.result)
if (class(fit)=="lm") { ### lm model
sub$residuals <- fit$residuals
sub$fitted <- fit$fitted.values
} else { ### lmer model
sub$residuals <- resid(fit)
sub$fitted <- fitted(fit)
}
dataafterfit <- rbind(dataafterfit,sub)
}
} ## end-loop for each protein
} ## for linear model summary
###################################
## Method 2 : Tukey Median Polish
if (summaryMethod == "TMP") {
#data <- data[!is.na(data$ABUNDANCE),]
data$PROTEIN <- factor(data$PROTEIN)
data$RUN <- factor(data$RUN)
result <- NULL
## if cluster available,
if(!is.null(clusters)){
## create cluster for paralleled workflow
message(paste0("Cluster Size: ", clusters,"\n"))
registerDoSNOW(makeCluster(clusters, type = "SOCK"))
# for(i in 1: nlevels(data$PROTEIN)) {
pb <- txtProgressBar(max = nlevels(data$PROTEIN), style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
MS_results <- foreach(i=1: nlevels(data$PROTEIN),
.combine='resultsAsLists',
.options.snow = opts,
.multicombine=TRUE,
.init=list(list(), list())) %dopar% {
sub <- data[data$PROTEIN==levels(data$PROTEIN)[i], ]
sub.pro.id <- levels(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization by Tukey's median polish per subplot for protein ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
sub$feature.label <- paste(sub$FEATURE, sub$LABEL, sep="_")
sub$run.label <- paste(sub$RUN, sub$LABEL, sep="_")
##### how to decide censored or not
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- 0
sub$cen <- ifelse(sub$censored, 0, 1)
}
### 2. all censored missing
if (censoredInt == "NA") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- NA
sub$cen <- ifelse(sub$censored, 0, 1)
}
}
}
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0)) ) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
# next()
return(NULL)
}
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
### 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
# next()
return(NULL)
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## remove features which have only 1 measurement.
namefeature1 <- names(countfeature)[countfeature == 1]
if (length(namefeature1) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature1), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because features have only one measurement across MS runs."))
# next()
return(NULL)
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## check one more time
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE ==0)) ) {
message(paste("After removing features which has only 1 measurement, Can't summarize for ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
# next()
return(NULL)
}
## remove run which has no measurement at all
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
### 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
count <- aggregate(ABUNDANCE ~ RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN))))) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (remove50missing) {
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
}
}
numFea <- xtabs(~RUN, subtemp) ## RUN or run.label?
numFea <- numFea/length(unique(subtemp$FEATURE))
numFea <- numFea <= 0.5
removerunid <- names(numFea)[numFea]
## if all measurements are NA,
if (length(removerunid)==length(numFea)) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all runs have more than 50% NAs and are removed with the option, remove50missing=TRUE."))
# next()
return(NULL)
}
}
### check whether we need to impute or not.
if (sum(sub$cen == 0) > 0) {
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0 &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j],cut.run$ABUNDANCE[k])
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0,]
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
cut.run <- aggregate(ABUNDANCE~run.label, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
if (MBimpute) {
if(!label){ ## label-free
if (nrow(sub[sub$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub[!is.na(sub$ABUNDANCE),]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
}else{
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
}else{
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub, dist='gaussian')
}
}
# get predicted value from survival
predicted <- predict(fittest, newdata=sub, type="response")
sub <- data.frame(sub, pred=ifelse(sub$censored & sub$LABEL == "L", predicted, NA))
# the replace censored value with predicted value
sub[sub$cen == 0, "ABUNDANCE"] <- sub[sub$cen == 0, "pred"]
# save predicted value
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub, type="response"))
}
} else { ## label-based
# only endogenous will be imputed
sub.h <- sub[sub$LABEL == 'H', ]
sub.l <- sub[sub$LABEL == 'L', ]
if (nrow(sub.l[sub.l$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub.l[!is.na(sub.l$ABUNDANCE),]
countdf <- nrow(subtemp)<(length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub.l$FEATURE))==1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
}else{
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
}else{
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub.l, dist='gaussian')
}
}
# get predicted value from survival
# sub.l <- data.frame(sub.l, pred=predict(fittest, newdata=sub.l, type="response"))
predicted <- predict(fittest, newdata=sub.l, type="response")
sub.l <- data.frame(sub.l, pred=ifelse(sub.l$censored & sub.l$LABEL == "L", predicted, NA))
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub.l, type="response"))
# the replace censored value with predicted value
sub.l[sub.l$cen == 0, "ABUNDANCE"] <- sub.l[sub.l$cen == 0, "pred"]
sub.h$pred <- NA
## for label-based, need to merge again
sub <- rbind(sub.h, sub.l)
}
}
}
}
## then, finally remove NA in abundance
sub <- sub[!is.na(sub$ABUNDANCE), ]
if (nlevels(sub$FEATURE) > 1) { ## for more than 1 features
if (!label) { ## label-free
data_w <- reshape2::dcast(RUN ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$RUN
data_w <- data_w[, -1]
data_w[data_w == 1] <- NA
if (!original_scale) {
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
## if fractionated sample, need to get per sample run
## ?? if there are technical replicates, how to match sample and MS run for different fractionation??
#if( length(unique(sub$METHOD)) > 1 ) {
# runinfo <- unique(sub[, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL", "RUN", "METHOD")])
# runinfo$uniquesub <- paste(runinfo$GROUP_ORIGINAL, runinfo$SUBJECT_ORIGINAL, sep="_")
#}
} else { # original_scale
data_w <- 2^(data_w)
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
tmpresult <- log2(tmpresult)
}
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[!is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
subtemp$RUN <- factor(subtemp$RUN, levels = rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
subtempimpute$RUN <- factor(subtempimpute$RUN, levels = rownames(data_w))
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = tmpresult,
RUN = names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtemp$RUN <- factor(subtemp$RUN, levels =rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
LogIntensities=tmpresult,
RUN=names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
# result <- rbind(result, sub.result)
} else { ## labeled
data_w = reshape2::dcast(run.label ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$run.label
data_w <- data_w[, -1]
#data_w[data_w==1] <- NA
meddata <- medpolish(data_w, na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
reformresult <- data.frame(tmpresult)
end <- nchar(rownames(reformresult))
reformresult$LABEL <- substr(rownames(reformresult), end, end)
reformresult$RUN <- substr(rownames(reformresult), 1, end-2)
colnames(reformresult)[1] <- "ABUNDANCE"
## now single feature, adjust reference feature difference
h <- reformresult[reformresult$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
reformresult.logical <- reformresult$RUN == unique(h$RUN)[k]
reformresult.idx <- which(reformresult.logical)
reformresult[reformresult.idx, "ABUNDANCE"] <- reformresult[reformresult.idx, "ABUNDANCE"]-reformresult[reformresult.logical & reformresult$LABEL=="H","ABUNDANCE"]+allmed
}
reformresult <- reformresult[reformresult$LABEL == "L", ]
subtemp <- reformresult[!is.na(reformresult$ABUNDANCE), ]
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
subtempimpute <- sub[sub$LABEL == "L" & is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[subtemp$LABEL == "L" & !is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF)
}
# result <- rbind(result, sub.result)
}
} else { ## single feature
if (label) { ## label-based
## single feature, adjust reference feature difference
h <- sub[sub$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
subrun.logical <- sub$RUN == unique(h$RUN)[k]
subrun.idx <- which(subrun.logical)
sub[subrun.idx, "ABUNDANCE"] <- sub[subrun.idx, "ABUNDANCE"] - sub[subrun.logical & sub$LABEL == "H", "ABUNDANCE"]+allmed
}
sub <- sub[sub$LABEL == "L", ]
}
## single feature, use original values
subtemp <- sub[!is.na(sub$ABUNDANCE),]
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtempcount <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtempcount <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtempcount <- subtempcount[!is.na(subtempcount$ABUNDANCE) & subtempcount$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtempcount <- subtemp
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
# result <- rbind(result, sub.result)
}
return(list(sub.result, predAbundance))
#return(list(sub.result))
} ## loop for proteins
close(pb)
#stopCluster(cl) # foreach autocloses
## Clean up the parallelized results
results.list <- list()
predAbundance.list <- list()
for(j in 1:length(MS_results[[1]])){
# deal with the "results" first
results.list[[j]] <- MS_results[[1]][[j]]
predAbundance.list[[j]] <- MS_results[[2]][[j]]
}
result <- do.call(rbind, results.list)
predAbundance <- do.call(c, predAbundance.list)
#predAbundance <- predAbundance[-which(duplicated(predAbundance))] # remove duplicates
dataafterfit <- NULL
} else {
##################
## no cluster
pb <- txtProgressBar(max = nlevels(data$PROTEIN), style = 3)
for(i in 1: nlevels(data$PROTEIN)) {
sub <- data[data$PROTEIN == levels(data$PROTEIN)[i], ]
sub.pro.id <- levels(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization by Tukey's median polish per subplot for protein ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
sub$feature.label <- paste(sub$FEATURE, sub$LABEL, sep="_")
sub$run.label <- paste(sub$RUN, sub$LABEL, sep="_")
### how to decide censored or not
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- 0
sub$cen <- ifelse(sub$censored, 0, 1)
}
## 2. all censored missing
if (censoredInt == "NA") {
sub[sub$censored == TRUE, 'ABUNDANCE'] <- NA
sub$cen <- ifelse(sub$censored, 0, 1)
}
}
}
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0)) ) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
## 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## remove features which have only 1 measurement.
namefeature1 <- names(countfeature)[countfeature == 1]
if (length(namefeature1) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature1), ]
if (nrow(sub) == 0) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because features have only one measurement across MS runs."))
next()
} else {
sub$FEATURE <- factor(sub$FEATURE)
}
}
## check one more time
## if all measurements are NA,
if ( nrow(sub) == (sum(is.na(sub$ABUNDANCE)) + sum(!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0)) ) {
message(paste("After removing features which has only 1 measurement, Can't summarize for ",
sub.pro.id, "(", i," of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove run which has no measurement at all
## remove features which are completely NAs
if ( MBimpute ) {
if (!is.null(censoredInt)) {
## 1. censored
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
## 2. all censored missing
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE), ]
}
}
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
}
count <- aggregate(ABUNDANCE ~ RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN))))) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (remove50missing) {
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
}
}
numFea <- xtabs(~RUN, subtemp) ## RUN or run.label?
numFea <- numFea/length(unique(subtemp$FEATURE))
numFea <- numFea <= 0.5
removerunid <- names(numFea)[numFea]
## if all measurements are NA,
if (length(removerunid)==length(numFea)) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all runs have more than 50% NAs and are removed with the option, remove50missing=TRUE."))
next()
}
}
## check whether we need to impute or not.
if (sum(sub$cen == 0) > 0) {
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
## cutoff for each feature is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
for(j in 1:length(unique(cut$feature.label))) {
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0 &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
## remove runs which has more than 50% missing values
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
# sub[is.na(sub$ABUNDANCE) &
# sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j],cut.run$ABUNDANCE[k])
sub[is.na(sub$ABUNDANCE) & sub$censored &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE != 0, ]
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
cut.run <- aggregate(ABUNDANCE~run.label, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(cut.fea$feature.label)) > 1) {
for(j in 1:length(unique(cut.fea$feature.label))) {
for(k in 1:length(unique(cut.run$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$ABUNDANCE) &
sub$ABUNDANCE == 0 &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$ABUNDANCE) & sub$ABUNDANCE == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
if (MBimpute) {
if(!label){ ## label-free
if (nrow(sub[sub$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub[!is.na(sub$ABUNDANCE),]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub, dist='gaussian')
}
}
# get predicted value from survival
#sub <- data.frame(sub, pred=predict(fittest, newdata=sub, type="response"))
predicted <- predict(fittest, newdata=sub, type="response")
sub <- data.frame(sub, pred=ifelse(sub$censored & sub$LABEL == "L", predicted, NA))
# the replace censored value with predicted value
sub[sub$cen == 0, "ABUNDANCE"] <- sub[sub$cen == 0, "pred"]
# save predicted value
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub, type="response"))
}
} else { ## label-based
# only endogenous will be imputed
sub.h <- sub[sub$LABEL == 'H', ]
sub.l <- sub[sub$LABEL == 'L', ]
if (nrow(sub.l[sub.l$cen == 0, ]) > 0) {
## impute by survival model
subtemp <- sub.l[!is.na(sub.l$ABUNDANCE),]
countdf <- nrow(subtemp)<(length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub.l$FEATURE))==1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub.l, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub.l, dist='gaussian')
}
}
# get predicted value from survival
# sub.l <- data.frame(sub.l, pred=predict(fittest, newdata=sub.l, type="response"))
predicted <- predict(fittest, newdata=sub.l, type="response")
sub.l <- data.frame(sub.l, pred=ifelse(sub.l$censored & sub.l$LABEL == "L", predicted, NA))
# predAbundance <- c(predAbundance,predict(fittest, newdata=sub, type="response"))
#predAbundance <- c(predict(fittest, newdata=sub.l, type="response"))
# the replace censored value with predicted value
sub.l[sub.l$cen == 0, "ABUNDANCE"] <- sub.l[sub.l$cen == 0, "pred"]
sub.h$pred <- NA
## for label-based, need to merge again
sub <- rbind(sub.h, sub.l)
}
}
}
}
## then, finally remove NA in abundance
sub <- sub[!is.na(sub$ABUNDANCE), ]
if (nlevels(sub$FEATURE) > 1) { ## for more than 1 features
if (!label) { ## label-free
data_w <- reshape2::dcast(RUN ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$RUN
data_w <- data_w[, -1]
data_w[data_w == 1] <- NA
if (!original_scale) {
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
## if fractionated sample, need to get per sample run
## ?? if there are technical replicates, how to match sample and MS run for different fractionation??
#if( length(unique(sub$METHOD)) > 1 ) {
# runinfo <- unique(sub[, c("GROUP_ORIGINAL", "SUBJECT_ORIGINAL", "RUN", "METHOD")])
# runinfo$uniquesub <- paste(runinfo$GROUP_ORIGINAL, runinfo$SUBJECT_ORIGINAL, sep="_")
#}
} else { # original_scale
data_w <- 2^(data_w)
meddata <- medpolish(data_w,na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
tmpresult <- log2(tmpresult)
}
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[!is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
subtemp$RUN <- factor(subtemp$RUN, levels = rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
subtempimpute$RUN <- factor(subtempimpute$RUN, levels = rownames(data_w))
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = tmpresult,
RUN = names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[!is.na(sub$INTENSITY), ]
subtemp$RUN <- factor(subtemp$RUN, levels =rownames(data_w))
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
LogIntensities=tmpresult,
RUN=names(tmpresult),
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
result <- rbind(result, sub.result)
} else { ## labeled
data_w <- reshape2::dcast(run.label ~ FEATURE, data=sub, value.var='ABUNDANCE', keep=TRUE)
rownames(data_w) <- data_w$run.label
data_w <- data_w[, -1]
#data_w[data_w==1] <- NA
meddata <- medpolish(data_w, na.rm=TRUE, trace.iter = FALSE)
tmpresult <- meddata$overall + meddata$row
reformresult <- data.frame(tmpresult)
end <- nchar(rownames(reformresult))
reformresult$LABEL <- substr(rownames(reformresult), end, end)
reformresult$RUN <- substr(rownames(reformresult), 1, end-2)
colnames(reformresult)[1] <- "ABUNDANCE"
## now single feature, adjust reference feature difference
h <- reformresult[reformresult$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
reformresult.logical <- reformresult$RUN == unique(h$RUN)[k]
reformresult.idx <- which(reformresult.logical)
reformresult[reformresult.idx, "ABUNDANCE"] <- reformresult[reformresult.idx, "ABUNDANCE"]-reformresult[reformresult.logical & reformresult$LABEL=="H","ABUNDANCE"]+allmed
}
reformresult <- reformresult[reformresult$LABEL == "L", ]
subtemp <- reformresult[!is.na(reformresult$ABUNDANCE), ]
# count # feature per run
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
subtempimpute <- sub[sub$LABEL == "L" & is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtemp <- subtemp[subtemp$LABEL == "L" & !is.na(subtemp$ABUNDANCE) & subtemp$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
numFea <- xtabs(~RUN, subtemp)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein = unique(sub$PROTEIN),
LogIntensities = reformresult$ABUNDANCE,
RUN = reformresult$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage = as.vector(numFeaPercentage),
more50missing = numFeaTF)
}
result <- rbind(result, sub.result)
}
} else { ## single feature
if (label) { ## label-based
## single feature, adjust reference feature difference
h <- sub[sub$LABEL == "H", ]
allmed <- median(h$ABUNDANCE, na.rm=TRUE)
for (k in 1:length(unique(h$RUN))) {
## ABUNDANCE is normalized
subrun.logical <- sub$RUN == unique(h$RUN)[k]
subrun.idx <- which(subrun.logical)
sub[subrun.idx, "ABUNDANCE"] <- sub[subrun.idx, "ABUNDANCE"] - sub[subrun.logical & sub$LABEL == "H", "ABUNDANCE"]+allmed
}
sub <- sub[sub$LABEL == "L", ]
}
## single feature, use original values
subtemp <- sub[!is.na(sub$ABUNDANCE),]
if (!is.null(censoredInt)) {
if (censoredInt == "NA") {
subtempcount <- sub[!is.na(sub$INTENSITY), ]
subtempimpute <- sub[is.na(sub$INTENSITY), ]
subtempimpute <- subtempimpute[!is.na(subtempimpute$ABUNDANCE), ]
}
if (censoredInt == "0") {
subtempcount <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY > 1, ] ## change at 2019. 10. 25
subtempcount <- subtempcount[!is.na(subtempcount$ABUNDANCE) & subtempcount$ABUNDANCE > 0, ] ## change at 2019. 10. 25
subtempimpute <- sub[!is.na(sub$INTENSITY) & sub$censored, ] ## change at 2019. 10. 25
}
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
numimpute <- xtabs(~RUN, subtempimpute)
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF,
NumImputedFeature = as.vector(numimpute))
} else {
subtempcount <- subtemp
numFea <- xtabs(~RUN, subtempcount)
numFeaPercentage <- 1 - numFea / length(unique(subtemp$FEATURE))
numFeaTF <- numFeaPercentage >= 0.5
sub.result <- data.frame(Protein=subtemp$PROTEIN,
LogIntensities=subtemp$ABUNDANCE,
RUN=subtemp$RUN,
NumMeasuredFeature = as.vector(numFea),
MissingPercentage=as.vector(numFeaPercentage),
more50missing=numFeaTF)
}
result <- rbind(result, sub.result)
}
## progress
setTxtProgressBar(pb, i)
} ## loop for proteins
close(pb)
dataafterfit <- NULL
}
}
###################################
## Method 3 : log sum
## retired on Aug 2 2016
###################################
## method 4 : survival model for censored missing values
if (summaryMethod == "linear" & !is.null(censoredInt)) {
#data <- data[!is.na(data$ABUNDANCE),]
data$PROTEIN <- factor(data$PROTEIN)
data$RUN <- factor(data$RUN)
if (label) {
result <- NULL
for(i in 1:length(unique(data$PROTEIN))) {
sub <- data[data$PROTEIN==unique(data$PROTEIN)[i],]
sub.pro.id <- unique(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization for censored missing values per subplot for protein ",
sub.pro.id, "(", i, " of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
sub$feature.label <- paste(sub$FEATURE, sub$LABEL, sep="_")
sub$run.label <- paste(sub$RUN, sub$LABEL, sep="_")
## if all measurements are NA,
if (nrow(sub)==sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove run which has no measurement at all
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY), ]
count <- aggregate(ABUNDANCE~RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN))))) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (length(unique(sub$RUN)) == 1) {
message(paste("* Only 1 MS run in ", levels(data$PROTEIN)[i],
" has measurement. Can't summarize with censored intensities."))
next()
}
## remove features which are completely NAs or zero
subtemp <- sub[sub$LABEL == "L" & !is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
sub$FEATURE <- factor(sub$FEATURE)
}
##### how to decide censored or not
## 1. censored
if (censoredInt == "0") {
sub$cen <- ifelse(!is.na(sub$INTENSITY) & sub$INTENSITY == 0, 0, 1)
}
### 2. all censored missing
if (censoredInt == "NA") {
sub$cen <- ifelse(is.na(sub$INTENSITY), 0, 1)
}
##### cutoffCensored
## 1. put minimum in protein level to NA
#if (cutoffCensored=="minEachProtein") {
# if (censoredInt=="NA") {
# cut <- min(sub$ABUNDANCE, na.rm=TRUE)
# sub[is.na(sub$INTENSITY),"ABUNDANCE"] <- cut
# }
# if (censoredInt=="0") {
# cut <- min(sub[!is.na(sub$INTENSITY) & sub$INTENSITY!=0,"ABUNDANCE"])
# sub[!is.na(sub$INTENSITY) & sub$INTENSITY==0,"ABUNDANCE"] <- cut
# }
#}
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$feature.label))) {
sub[is.na(sub$INTENSITY) & sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$feature.label))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$feature.label == cut$feature.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[is.na(sub$INTENSITY) & sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$run.label))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$run.label == cut$run.label[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ run.label, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$feature.label)) > 1) {
for(j in 1:length(unique(sub$feature.label))) {
for(k in 1:length(unique(sub$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j],cut.run$ABUNDANCE[k])
sub[is.na(sub$INTENSITY) & sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut.fea <- aggregate(ABUNDANCE ~ feature.label, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## remove runs which has more than 50% missing values
## before removing, need to contribute min feature calculation
if (remove50missing) {
if (length(removerunid) != 0) {
sub <- sub[-which(sub$RUN %in% removerunid), ]
sub$RUN <- factor(sub$RUN)
}
}
cut.run <- aggregate(ABUNDANCE ~ run.label, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$feature.label)) > 1) {
for(j in 1:length(unique(sub$feature.label))) {
for(k in 1:length(unique(sub$run.label))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$feature.label == cut.fea$feature.label[j] &
sub$run.label == cut.run$run.label[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
## when number of measurement is less than df, error for fitting
subtemp <- sub[!is.na(sub$ABUNDANCE), ]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
set.seed(100)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
# with single feature, not converge, wrong intercept
# need to check
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN+ref,
data=sub, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN+ref,
data=sub, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN+ref,
data=sub, dist='gaussian')
}
}
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
RUN=rep(c(levels(sub$RUN)), 1),
LogIntensities=NA)
# get the parameters
cf <- summary(fittest)$coefficients
# calculate sample quantification for all levels of sample
a <- 1
for(j in 1:nlevels(sub$RUN)) {
contrast.matrix <- rep(0, nlevels(sub$RUN))
contrast.matrix[j] <- 1
contrast <- .make.contrast.run.quantification.Survival(fittest, contrast.matrix,sub, labeled=TRUE)
sub.result[a, 3] <- .estimableFixedQuantificationSurvival(cf, contrast)
a <- a+1
}
result <- rbind(result, sub.result)
}
datamat <- reshape2::dcast( Protein ~ RUN, data=result, value.var='LogIntensities', keep=TRUE)
datamat <- melt(datamat, id.vars=c('Protein'))
colnames(datamat) <- c('Protein', 'RUN', 'LogIntensities')
result <- datamat
} else {
result <- NULL
for(i in 1:length(unique(data$PROTEIN))) {
sub <- data[data$PROTEIN == unique(data$PROTEIN)[i], ]
sub.pro.id <- unique(data$PROTEIN)[i]
if (message.show) {
message(paste("Getting the summarization for censored missing values per subplot for protein ",
sub.pro.id, "(", i, " of ", length(unique(data$PROTEIN)), ")"))
}
sub$FEATURE <- factor(sub$FEATURE)
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't summarize for ", sub.pro.id,
"(", i, " of ", length(unique(data$PROTEIN)),
") because all measurements are NAs."))
next()
}
## remove run which has no measurement at all
subtemp <- sub[!is.na(sub$INTENSITY), ]
count <- aggregate(ABUNDANCE~RUN, data=subtemp, length)
norun <- setdiff(unique(data$RUN), count$RUN)
if (length(norun) != 0 & length(intersect(norun, as.character(unique(sub$RUN)))) != 0) {
# removed NA rows already, if there is no overlapped run, error
sub <- sub[-which(sub$RUN %in% norun), ]
sub$RUN <- factor(sub$RUN)
}
if (length(unique(sub$RUN)) == 1) {
message(paste("* Only 1 MS run in ", levels(data$PROTEIN)[i],
" has measurement. Can't summarize with censored intensities."))
next()
}
## remove features which are (completely NAs or zero)
subtemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
countfeature <- xtabs(~FEATURE, subtemp)
namefeature <- names(countfeature)[countfeature == 0]
if (length(namefeature) != 0) {
sub <- sub[-which(sub$FEATURE %in% namefeature), ]
sub$FEATURE <- factor(sub$FEATURE)
}
if (nrow(sub) == 0) {
message(paste("* All measurements are NAs or only one measurement per feature in ",
levels(data$PROTEIN)[i], ". Can't summarize with censored intensities."))
next()
}
##### how to decide censored or not
## 1. censored
if (censoredInt == "0") {
sub$cen <- ifelse(!is.na(sub$INTENSITY) & sub$INTENSITY == 0, 0, 1)
}
### 2. all censored missing
if (censoredInt == "NA") {
sub$cen <- ifelse(is.na(sub$INTENSITY), 0, 1)
}
## 2. put minimum in feature level to NA
if (cutoffCensored == "minFeature") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE ~ FEATURE, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$FEATURE))) {
sub[is.na(sub$INTENSITY) & sub$FEATURE == cut$FEATURE[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE ~ FEATURE, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$FEATURE))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$FEATURE == cut$FEATURE[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 3. put minimum in RUN to NA
if (cutoffCensored == "minRun") {
if (censoredInt == "NA") {
cut <- aggregate(ABUNDANCE~RUN, data=sub, function(x) min(x, na.rm=TRUE))
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$RUN))) {
sub[is.na(sub$INTENSITY) & sub$RUN == cut$RUN[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut <- aggregate(ABUNDANCE~RUN, data=subtemptemp, FUN=min)
## cutoff for each Run is little less than minimum abundance in a run.
cut$ABUNDANCE <- 0.99*cut$ABUNDANCE
for(j in 1:length(unique(cut$RUN))) {
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 & sub$RUN==cut$RUN[j], "ABUNDANCE"] <- cut$ABUNDANCE[j]
}
}
}
## 20150829 : 4. put minimum RUN and FEATURE
if (cutoffCensored == "minFeatureNRun") {
if (censoredInt == "NA") {
## cutoff for each feature is little less than minimum abundance in a run.
cut.fea <- aggregate(ABUNDANCE ~ FEATURE, data=sub, function(x) min(x, na.rm=TRUE))
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
## cutoff for each Run is little less than minimum abundance in a run.
cut.run <- aggregate(ABUNDANCE ~ RUN, data=sub, function(x) min(x, na.rm=TRUE))
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$FEATURE)) > 1) {
for(j in 1:length(unique(sub$FEATURE))) {
for(k in 1:length(unique(sub$RUN))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[is.na(sub$INTENSITY) & sub$FEATURE == cut.fea$FEATURE[j] &
sub$RUN == cut.run$RUN[k], "ABUNDANCE"] <- finalcut
}
}
}
# if single feature, not impute
}
if (censoredInt == "0") {
subtemptemp <- sub[!is.na(sub$INTENSITY) & sub$INTENSITY != 0, ]
cut.fea <- aggregate(ABUNDANCE ~ FEATURE, data=subtemptemp, FUN=min)
cut.fea$ABUNDANCE <- 0.99*cut.fea$ABUNDANCE
cut.run <- aggregate(ABUNDANCE ~ RUN, data=subtemptemp, FUN=min)
cut.run$ABUNDANCE <- 0.99*cut.run$ABUNDANCE
if (length(unique(sub$FEATURE)) > 1) {
for(j in 1:length(unique(sub$FEATURE))) {
for(k in 1:length(unique(sub$RUN))) {
# get smaller value for min Run and min Feature
finalcut <- min(cut.fea$ABUNDANCE[j], cut.run$ABUNDANCE[k])
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0 &
sub$FEATURE == cut.fea$FEATURE[j] & sub$RUN == cut.run$RUN[k], "ABUNDANCE"] <- finalcut
}
}
} else { # single feature
sub[!is.na(sub$INTENSITY) & sub$INTENSITY == 0, "ABUNDANCE"] <- cut.fea$ABUNDANCE
}
}
}
## when number of measurement is less than df, error for fitting
subtemp <- sub[!is.na(sub$ABUNDANCE), ]
countdf <- nrow(subtemp) < (length(unique(subtemp$FEATURE))+length(unique(subtemp$RUN))-1)
### fit the model
if (length(unique(sub$FEATURE)) == 1) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
if (countdf) {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ RUN,
data=sub, dist='gaussian')
} else {
fittest <- survival::survreg(survival::Surv(ABUNDANCE, cen, type='left') ~ FEATURE+RUN,
data=sub, dist='gaussian')
}
}
sub.result <- data.frame(Protein=unique(sub$PROTEIN),
RUN=rep(c(levels(sub$RUN)), 1),
LogIntensities=NA)
# get the parameters
cf <- summary(fittest)$coefficients
# calculate sample quantification for all levels of sample
a <- 1
for(j in 1:nlevels(sub$RUN)) {
contrast.matrix <- rep(0, nlevels(sub$RUN))
contrast.matrix[j] <- 1
contrast <- .make.contrast.run.quantification.Survival(fittest, contrast.matrix,sub, labeled=FALSE)
sub.result[a, 3] <- .estimableFixedQuantificationSurvival(cf, contrast)
a <- a+1
}
result <- rbind(result, sub.result)
}
datamat <- reshape2::dcast( Protein ~ RUN, data=result, value.var='LogIntensities', keep=TRUE)
datamat <- melt(datamat, id.vars=c('Protein'))
colnames(datamat) <- c('Protein','RUN','LogIntensities')
result <- datamat
}
dataafterfit <- NULL
}
###################################
## final result
finalout <- list(rqdata=result, ModelQC=dataafterfit, PredictedBySurvival=predAbundance)
return(finalout)
}
##########################################################################################
## updated v3
.fit.quantification.run <- function(sub, singleFeature, singleSubject, TechReplicate, labeled, equalFeatureVar) {
if (!labeled) { ### label-free case
## for single Feature, original value is the run quantification
if (singleFeature) {
fit.full <- lm(ABUNDANCE ~ RUN , data = sub)
}else{
fit.full <- lm(ABUNDANCE ~ FEATURE + RUN , data = sub)
}
}else{ ### labeled-based case
### update v3
if (singleFeature) {
fit.full <- lm(ABUNDANCE ~ RUN+ref , data = sub)
}else{ ## several subjects
fit.full <- lm(ABUNDANCE ~ FEATURE+RUN+ref , data = sub)
}
}
## make equal variance for feature : need to be updated
if (!equalFeatureVar) {
fit.full <- .iter.wls.fit.model(data=sub, fit=fit.full, nrepeats=1)
}
return(fit.full)
}
#############################################
# check whether there are multiple runs for a replicate
# if yes, normalization should be different way.
#############################################
.countMultiRun <- function(data) {
## if some feature are missing for this spedific run, it could be error. that is why we need balanced design.
## with balanced design (fill in NA in each row), it should have different unique number of measurments per fractionation
## change it
## 2017.05 24 : however, after going through converter functions,
## with balanced design, impossible to detect fractionation
is.risky <- FALSE
## if there is fraction info and multiple value for fraction column, we don't need to count here.
if( any(is.element(colnames(data), 'FRACTION')) ) {
## already fraction info are available. there are multiple runs.
out <- TRUE
} else {
## there is no fraction information. First chec, whether there are tech replicates or not.
info <- unique(data[, c('GROUP_ORIGINAL', 'SUBJECT_ORIGINAL', 'RUN')])
info$condition <- paste(info$GROUP_ORIGINAL, info$SUBJECT_ORIGINAL, sep="_")
count.tech <- xtabs(~ condition, info)
is.multiplerun <- any(count.tech > 1)
if ( !is.multiplerun ){
## only one run for condition*bioreplicate -> no tech replicate at all, no multiple run.
out <- FALSE
} else {
## need to distinguish whether technical replicate or multiple runs.
## For one specific sample, most of features are measured across runs -> tech replicate
## if not, multiple runs.
tmp <- data[!is.na(data$ABUNDANCE), ]
## get on sample information
info.sample1 <- info[info$condition == unique(info$condition)[1], ]
tmp.sample1 <- tmp[tmp$GROUP_ORIGINAL == unique(info.sample1$GROUP_ORIGINAL) &
tmp$SUBJECT_ORIGINAL == unique(info.sample1$SUBJECT_ORIGINAL), ]
standardFeature <- unique(tmp.sample1[tmp.sample1$RUN == unique(tmp.sample1$RUN[1]),
"FEATURE"])
tmp.sample1$RUN <- factor(tmp.sample1$RUN)
## get overlapped feature ID
countdiff <- tapply (tmp.sample1$FEATURE,
tmp.sample1$RUN,
function ( x ) length(intersect(unique(x), standardFeature)) )
per.overlap.feature <- (countdiff)[-1] / max(unique(countdiff))
## first, technical replicate, no fraction :
## all runs should have more than 50% features should be the same.
if ( all( per.overlap.feature > 0.5 ) ){ ## then there are technical replicates
out <- FALSE
} else if ( all( per.overlap.feature < 0.5 ) ) {
out <- TRUE
} else {
## hard to distinguish fractionation automatically. need information
## if both fractionation and technical replicates are there, can't distinguish.
## need fractionation info. + even technical replicate information is needed.
out <- FALSE
is.risky <- TRUE
}
}
}
result <- list(out = out,
is.risky = is.risky)
return(result)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.