content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#' Get a summary of the logistic glm model considering reference levels
#' @param model A glm logistic model
#' @examples
#'
#' data("german_credit")
#'
#' model <- glm(
#' good_bad ~ purpose + present_employment_since + credit_history,
#' data = german_credit, family = binomial
#' )
#'
#' model_summary(model)
#'
#' @export
model_summary <- function(model) {
# model <- readRDS("D:/Docs/modelo-behavior/data/23/05_modelo.rds")
# model_summary(model)
response <- model %>%
stats::as.formula() %>%
as.character() %>%
.[2] %>%
stringr::str_remove_all("factor\\(|\\)")
variables <- model %>%
as.formula() %>%
as.character() %>%
.[3] %>%
stringr::str_split(" \\+ ") %>%
unlist()
dmod <- model$data %>%
dplyr::select_(.dots = c(response, variables)) %>%
dplyr::mutate_if(is.factor, as.character) %>%
tidyr::gather("key", "value", variables) %>%
dplyr::group_by(!!!syms(c("key", "value"))) %>%
dplyr::summarise(
p = n(),
target_rate = mean(!!sym(response))
) %>%
dplyr::group_by(!!sym("key")) %>%
dplyr::mutate(p = p/sum(p)) %>%
dplyr::ungroup() %>%
dplyr::mutate(
term = paste0(key, value),
key = factor(key, levels = variables)
)
dmod1 <- dplyr::tbl_df(broom::tidy(model))
dmod <- dplyr::full_join(dmod1, dmod, by = "term") %>%
# arrange(key, target_rate) %>%
dplyr::mutate(
key = as.character(key),
key = ifelse(term == "(Intercept)", "(Intercept)", key),
key = forcats::fct_inorder(key)
) %>%
dplyr::select(term, variable = key, category = value, estimate, std.error, p.value, marginal_percent = p, target_rate)
lvls <- purrr::map2(
names(model$xlevels),
model$xlevels,
~ dplyr::data_frame(variable = .x, category = .y)
) %>%
purrr::reduce(dplyr::bind_rows) %>%
dplyr::bind_rows(dplyr::data_frame(variable = "(Intercept)")) %>%
tidyr::unite(!!sym("term"), !!sym("variable"), !!sym("category"), sep = "") %>%
dplyr::pull()
dmod <- dmod %>%
dplyr::mutate(t2 = factor(!!sym("term"), levels = lvls)) %>%
dplyr::arrange(!!sym("t2")) %>%
dplyr::select(-!!sym("t2"))
dmod
}
#' Creating a scorecard from logistic model
#'
#' The defaults are given by de Siddiqi example.
#'
#' @param model A glm logistic model
#' @param pdo default 20
#' @param score0 default 600
#' @param pdo0 default to 50/1
#' @param turn.orientation change the orientation of the scorecard points
#'
#' @examples
#'
#' data("german_credit")
#'
#' model <- glm(
#' good_bad ~ purpose + present_employment_since + credit_history,
#' data = german_credit, family = binomial
#' )
#'
#' scorecard(model)
#'
#' @export
scorecard <- function(model, pdo = 20, score0 = 600, pdo0 = 50/1, turn.orientation = FALSE) {
# model <- readRDS("D:/Docs/modelo-behavior/data/23/05_modelo.rds")
# pdo <- 20; score0 <- 600; pdo0 <- 50; turn.orientation = TRUE
if(turn.orientation) {
fmla <- as.formula(model)
response <- as.character(fmla)[2]
response_name <- stringi::stri_rand_strings(1, length = 10, pattern = "[A-Za-z]")
response <- model$data %>%
dplyr::mutate_(response_name = response) %>%
dplyr::pull(response_name)
if(is.numeric(response)) {
response <- 1 - response
} else {
response <- forcats::fct_rev(factor(response))
}
model$data[[response_name]] <- response
fmla2 <- as.formula(paste(response_name, " ~ ", as.character(fmla)[3]))
model <- glm(fmla2, data = model$data, family = binomial(link = logit))
}
mod <- model_summary(model)
b0 <- model$coefficients[1]
a <- pdo/log(2)
b <- score0 - (a * log(pdo0))
k <- length(model$xlevels)
pb <- (score0 + a * b0) / k
modscorecard <- mod %>%
dplyr::select(!!!syms(c("term", "estimate"))) %>%
dplyr::mutate_(
"score" = "as.integer(floor(a * ifelse(is.na(estimate), 0, estimate) + pb))"
)
modscorecard
}
model_check <- function(model) {
TRUE
}
model_check_significant_coefficients <- function(model, sig.level = 0.1) {
modelo %>%
broom::tidy() %>%
dplyr::filter(term != "(Intercept)") %>%
dplyr::filter(p.value >= sig.level) %>%
nrow() == 0
}
|
/R/model.R
|
permissive
|
miission/irks
|
R
| false
| false
| 4,386
|
r
|
#' Get a summary of the logistic glm model considering reference levels
#' @param model A glm logistic model
#' @examples
#'
#' data("german_credit")
#'
#' model <- glm(
#' good_bad ~ purpose + present_employment_since + credit_history,
#' data = german_credit, family = binomial
#' )
#'
#' model_summary(model)
#'
#' @export
model_summary <- function(model) {
# model <- readRDS("D:/Docs/modelo-behavior/data/23/05_modelo.rds")
# model_summary(model)
response <- model %>%
stats::as.formula() %>%
as.character() %>%
.[2] %>%
stringr::str_remove_all("factor\\(|\\)")
variables <- model %>%
as.formula() %>%
as.character() %>%
.[3] %>%
stringr::str_split(" \\+ ") %>%
unlist()
dmod <- model$data %>%
dplyr::select_(.dots = c(response, variables)) %>%
dplyr::mutate_if(is.factor, as.character) %>%
tidyr::gather("key", "value", variables) %>%
dplyr::group_by(!!!syms(c("key", "value"))) %>%
dplyr::summarise(
p = n(),
target_rate = mean(!!sym(response))
) %>%
dplyr::group_by(!!sym("key")) %>%
dplyr::mutate(p = p/sum(p)) %>%
dplyr::ungroup() %>%
dplyr::mutate(
term = paste0(key, value),
key = factor(key, levels = variables)
)
dmod1 <- dplyr::tbl_df(broom::tidy(model))
dmod <- dplyr::full_join(dmod1, dmod, by = "term") %>%
# arrange(key, target_rate) %>%
dplyr::mutate(
key = as.character(key),
key = ifelse(term == "(Intercept)", "(Intercept)", key),
key = forcats::fct_inorder(key)
) %>%
dplyr::select(term, variable = key, category = value, estimate, std.error, p.value, marginal_percent = p, target_rate)
lvls <- purrr::map2(
names(model$xlevels),
model$xlevels,
~ dplyr::data_frame(variable = .x, category = .y)
) %>%
purrr::reduce(dplyr::bind_rows) %>%
dplyr::bind_rows(dplyr::data_frame(variable = "(Intercept)")) %>%
tidyr::unite(!!sym("term"), !!sym("variable"), !!sym("category"), sep = "") %>%
dplyr::pull()
dmod <- dmod %>%
dplyr::mutate(t2 = factor(!!sym("term"), levels = lvls)) %>%
dplyr::arrange(!!sym("t2")) %>%
dplyr::select(-!!sym("t2"))
dmod
}
#' Creating a scorecard from logistic model
#'
#' The defaults are given by de Siddiqi example.
#'
#' @param model A glm logistic model
#' @param pdo default 20
#' @param score0 default 600
#' @param pdo0 default to 50/1
#' @param turn.orientation change the orientation of the scorecard points
#'
#' @examples
#'
#' data("german_credit")
#'
#' model <- glm(
#' good_bad ~ purpose + present_employment_since + credit_history,
#' data = german_credit, family = binomial
#' )
#'
#' scorecard(model)
#'
#' @export
scorecard <- function(model, pdo = 20, score0 = 600, pdo0 = 50/1, turn.orientation = FALSE) {
# model <- readRDS("D:/Docs/modelo-behavior/data/23/05_modelo.rds")
# pdo <- 20; score0 <- 600; pdo0 <- 50; turn.orientation = TRUE
if(turn.orientation) {
fmla <- as.formula(model)
response <- as.character(fmla)[2]
response_name <- stringi::stri_rand_strings(1, length = 10, pattern = "[A-Za-z]")
response <- model$data %>%
dplyr::mutate_(response_name = response) %>%
dplyr::pull(response_name)
if(is.numeric(response)) {
response <- 1 - response
} else {
response <- forcats::fct_rev(factor(response))
}
model$data[[response_name]] <- response
fmla2 <- as.formula(paste(response_name, " ~ ", as.character(fmla)[3]))
model <- glm(fmla2, data = model$data, family = binomial(link = logit))
}
mod <- model_summary(model)
b0 <- model$coefficients[1]
a <- pdo/log(2)
b <- score0 - (a * log(pdo0))
k <- length(model$xlevels)
pb <- (score0 + a * b0) / k
modscorecard <- mod %>%
dplyr::select(!!!syms(c("term", "estimate"))) %>%
dplyr::mutate_(
"score" = "as.integer(floor(a * ifelse(is.na(estimate), 0, estimate) + pb))"
)
modscorecard
}
model_check <- function(model) {
TRUE
}
model_check_significant_coefficients <- function(model, sig.level = 0.1) {
modelo %>%
broom::tidy() %>%
dplyr::filter(term != "(Intercept)") %>%
dplyr::filter(p.value >= sig.level) %>%
nrow() == 0
}
|
context("ml stat - chisq")
sc <- testthat_spark_connection()
df_tbl <- sdf_copy_to(sc, data.frame(
gender = sample(c("F", "M"), 200,replace = TRUE),
party = sample(c("D", "I", "R"), 200,replace = TRUE),
stringsAsFactors = FALSE
), overwrite = TRUE)
test_that("ml_chisquare_test() works", {
test_requires_version("2.2.0", "chisquare test supported in spark 2.2+")
expect_identical(
df_tbl %>%
ml_chisquare_test(features = "gender",
label = "party") %>%
names(),
c("feature", "label", "p_value",
"degrees_of_freedom", "statistic")
)
})
test_that("ml_chisquare_test() errors on bad column spec", {
test_requires_version("2.2.0", "chisquare test supported in spark 2.2+")
expect_error(
df_tbl %>%
ml_chisquare_test(features = "foo",
label = "bar"),
"All columns specified must be in x\\. Failed to find foo, bar\\."
)
})
|
/tests/testthat/test-ml-stat-chisq.R
|
permissive
|
zslajchrt/sparklyr
|
R
| false
| false
| 920
|
r
|
context("ml stat - chisq")
sc <- testthat_spark_connection()
df_tbl <- sdf_copy_to(sc, data.frame(
gender = sample(c("F", "M"), 200,replace = TRUE),
party = sample(c("D", "I", "R"), 200,replace = TRUE),
stringsAsFactors = FALSE
), overwrite = TRUE)
test_that("ml_chisquare_test() works", {
test_requires_version("2.2.0", "chisquare test supported in spark 2.2+")
expect_identical(
df_tbl %>%
ml_chisquare_test(features = "gender",
label = "party") %>%
names(),
c("feature", "label", "p_value",
"degrees_of_freedom", "statistic")
)
})
test_that("ml_chisquare_test() errors on bad column spec", {
test_requires_version("2.2.0", "chisquare test supported in spark 2.2+")
expect_error(
df_tbl %>%
ml_chisquare_test(features = "foo",
label = "bar"),
"All columns specified must be in x\\. Failed to find foo, bar\\."
)
})
|
library(semTools)
### Name: parcelAllocation
### Title: Random Allocation of Items to Parcels in a Structural Equation
### Model
### Aliases: parcelAllocation
### ** Examples
## Fit 2-factor CFA to simulated data. Each factor has 9 indicators.
## Specify the item-level model (if NO parcels were created)
item.syntax <- c(paste0("f1 =~ f1item", 1:9),
paste0("f2 =~ f2item", 1:9))
cat(item.syntax, sep = "\n")
## Below, we reduce the size of this same model by
## applying different parceling schemes
## 3-indicator parcels
mod.parcels <- '
f1 =~ par1 + par2 + par3
f2 =~ par4 + par5 + par6
'
## names of parcels
(parcel.names <- paste0("par", 1:6))
## Not run:
##D parcelAllocation(mod.parcels, data = simParcel, parcel.names, item.syntax,
##D nAlloc = 20, std.lv = TRUE, parallel = "snow", iseed = 12345)
## End(Not run)
## multigroup example
simParcel$group <- 0:1 # arbitrary groups for example
mod.mg <- '
f1 =~ par1 + c(L2, L2)*par2 + par3
f2 =~ par4 + par5 + par6
'
## names of parcels
(parcel.names <- paste0("par", 1:6))
set.seed(12345)
parcelAllocation(mod.mg, data = simParcel, parcel.names, item.syntax,
std.lv = TRUE, group = "group", group.equal = "loadings",
nAlloc = 20, show.progress = TRUE)
## parcels for first factor, items for second factor
mod.items <- '
f1 =~ par1 + par2 + par3
f2 =~ f2item2 + f2item7 + f2item8
'
## names of parcels
(parcel.names <- paste0("par", 1:3))
set.seed(12345)
parcelAllocation(mod.items, data = simParcel, parcel.names, item.syntax,
nAlloc = 20, std.lv = TRUE)
## mixture of 1- and 3-indicator parcels for second factor
mod.mix <- '
f1 =~ par1 + par2 + par3
f2 =~ f2item2 + f2item7 + f2item8 + par4 + par5 + par6
'
## names of parcels
(parcel.names <- paste0("par", 1:6))
set.seed(12345)
parcelAllocation(mod.mix, data = simParcel, parcel.names, item.syntax,
nAlloc = 20, std.lv = TRUE)
|
/data/genthat_extracted_code/semTools/examples/parcelAllocation.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,966
|
r
|
library(semTools)
### Name: parcelAllocation
### Title: Random Allocation of Items to Parcels in a Structural Equation
### Model
### Aliases: parcelAllocation
### ** Examples
## Fit 2-factor CFA to simulated data. Each factor has 9 indicators.
## Specify the item-level model (if NO parcels were created)
item.syntax <- c(paste0("f1 =~ f1item", 1:9),
paste0("f2 =~ f2item", 1:9))
cat(item.syntax, sep = "\n")
## Below, we reduce the size of this same model by
## applying different parceling schemes
## 3-indicator parcels
mod.parcels <- '
f1 =~ par1 + par2 + par3
f2 =~ par4 + par5 + par6
'
## names of parcels
(parcel.names <- paste0("par", 1:6))
## Not run:
##D parcelAllocation(mod.parcels, data = simParcel, parcel.names, item.syntax,
##D nAlloc = 20, std.lv = TRUE, parallel = "snow", iseed = 12345)
## End(Not run)
## multigroup example
simParcel$group <- 0:1 # arbitrary groups for example
mod.mg <- '
f1 =~ par1 + c(L2, L2)*par2 + par3
f2 =~ par4 + par5 + par6
'
## names of parcels
(parcel.names <- paste0("par", 1:6))
set.seed(12345)
parcelAllocation(mod.mg, data = simParcel, parcel.names, item.syntax,
std.lv = TRUE, group = "group", group.equal = "loadings",
nAlloc = 20, show.progress = TRUE)
## parcels for first factor, items for second factor
mod.items <- '
f1 =~ par1 + par2 + par3
f2 =~ f2item2 + f2item7 + f2item8
'
## names of parcels
(parcel.names <- paste0("par", 1:3))
set.seed(12345)
parcelAllocation(mod.items, data = simParcel, parcel.names, item.syntax,
nAlloc = 20, std.lv = TRUE)
## mixture of 1- and 3-indicator parcels for second factor
mod.mix <- '
f1 =~ par1 + par2 + par3
f2 =~ f2item2 + f2item7 + f2item8 + par4 + par5 + par6
'
## names of parcels
(parcel.names <- paste0("par", 1:6))
set.seed(12345)
parcelAllocation(mod.mix, data = simParcel, parcel.names, item.syntax,
nAlloc = 20, std.lv = TRUE)
|
#' Browse
#'
#' Browse to different locations on Connect via
#' utils::browseURL
#'
#' @param content A R6 Content object
#' @param connect A R6 Connect object
#'
#' @return The url that is opened in the browser
#'
#' @rdname browse
#' @export
browse_solo <- function(content) {
validate_R6_class("Content", content)
url <- content$get_content()$url
utils::browseURL(url)
return(url)
}
#' @rdname browse
#' @export
browse_dashboard <- function(content) {
validate_R6_class("Content", content)
url <- content$get_dashboard_url()
utils::browseURL(url)
return(url)
}
#' @rdname browse
#' @export
browse_api_docs <- function(connect) {
validate_R6_class("Connect", connect)
url <- connect$docs("api", browse = FALSE)
utils::browseURL(url)
return(url)
}
|
/R/browse.R
|
no_license
|
tbradley1013/connectapi
|
R
| false
| false
| 780
|
r
|
#' Browse
#'
#' Browse to different locations on Connect via
#' utils::browseURL
#'
#' @param content A R6 Content object
#' @param connect A R6 Connect object
#'
#' @return The url that is opened in the browser
#'
#' @rdname browse
#' @export
browse_solo <- function(content) {
validate_R6_class("Content", content)
url <- content$get_content()$url
utils::browseURL(url)
return(url)
}
#' @rdname browse
#' @export
browse_dashboard <- function(content) {
validate_R6_class("Content", content)
url <- content$get_dashboard_url()
utils::browseURL(url)
return(url)
}
#' @rdname browse
#' @export
browse_api_docs <- function(connect) {
validate_R6_class("Connect", connect)
url <- connect$docs("api", browse = FALSE)
utils::browseURL(url)
return(url)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Other.R
\name{dataKNMI}
\alias{dataKNMI}
\title{Wind speeds in the Netherlands.}
\format{
dataKNMI$data is a matrix with 672 rows and 22 columns, dataKNMI$loc is a matrix with 22 rows
and 2 columns.
}
\source{
KNMI
}
\description{
Daily maximal speeds of wind gusts, measured in 0.1 m/s. The data are observed at
22 inland weather stations in the Netherlands. Only the summer months are presented
here (June, July, August). Also included are the Euclidian coordinates of the 22
weather stations, where a distance of 1 corresponds to 100 kilometers.
}
\examples{
data(dataKNMI)
n <- nrow(dataKNMI$data)
locations <- dataKNMI$loc
x <- apply(dataKNMI$data, 2, function(i) n/(n + 0.5 - rank(i)))
indices <- selectGrid(cst = c(0,1), d = 22, locations = locations, maxDistance = 0.5)
EstimationBR(x, locations, indices, k = 60, method = "Mestimator", isotropic = TRUE,
covMat = FALSE)$theta
}
\references{
Einmahl, J.H.J., Kiriliouk, A., Krajina, A., and Segers, J. (2016). An Mestimator of spatial tail dependence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(1), 275-298.
}
|
/man/dataKNMI.Rd
|
no_license
|
cran/tailDepFun
|
R
| false
| true
| 1,184
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Other.R
\name{dataKNMI}
\alias{dataKNMI}
\title{Wind speeds in the Netherlands.}
\format{
dataKNMI$data is a matrix with 672 rows and 22 columns, dataKNMI$loc is a matrix with 22 rows
and 2 columns.
}
\source{
KNMI
}
\description{
Daily maximal speeds of wind gusts, measured in 0.1 m/s. The data are observed at
22 inland weather stations in the Netherlands. Only the summer months are presented
here (June, July, August). Also included are the Euclidian coordinates of the 22
weather stations, where a distance of 1 corresponds to 100 kilometers.
}
\examples{
data(dataKNMI)
n <- nrow(dataKNMI$data)
locations <- dataKNMI$loc
x <- apply(dataKNMI$data, 2, function(i) n/(n + 0.5 - rank(i)))
indices <- selectGrid(cst = c(0,1), d = 22, locations = locations, maxDistance = 0.5)
EstimationBR(x, locations, indices, k = 60, method = "Mestimator", isotropic = TRUE,
covMat = FALSE)$theta
}
\references{
Einmahl, J.H.J., Kiriliouk, A., Krajina, A., and Segers, J. (2016). An Mestimator of spatial tail dependence. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 78(1), 275-298.
}
|
# Read Marwan's data, subset and rename 5 columns ------------------------------
# Load Experiment I. Gravel feed is set at 4.33 g/ms,
# while the sand feed starts at 0 g/ms and increases to 13.0 g/ms. The
# experiment was run for 95 hrs with a data point every 15 min.
expI_15min <- read.csv("ExpI-SievedData.csv", header=TRUE)
expI_15min_sub <- expI_15min[,c(3,10,13,12,14)]
colnames(expI_15min_sub) <- c("time","gravel_rate","gravel_feed","sand_rate","sand_feed")
head(expI_15min_sub)
good <- complete.cases(expI_15min_sub)
expI_15min_sub <- expI_15min_sub[good,]
expI_15min_sub$sand_rate <- ifelse(expI_15min_sub$sand_feed==0,NA,expI_15min_sub$sand_rate)
expI_15min_sub$sand_rate <- ifelse(expI_15min_sub$sand_feed == 0, NA, expI_15min_sub$sand_rate)
head(expI_15min_sub)
plot(expI_15min_sub$time,expI_15min_sub$gravel_rate,type="b")
plot(expI_15min_sub$time,expI_15min_sub$sand_rate,type="b")
# Standardize data by feed rate:
gravel_std <- expI_15min_sub$gravel_rate / expI_15min_sub$gravel_feed
gravel_std <- ifelse(is.finite(gravel_std)==TRUE,gravel_std,NA)
expI_15min_sub$gravel_std <- gravel_std
sand_std <- expI_15min_sub$sand_rate / expI_15min_sub$sand_feed
sand_std <- ifelse(is.nan(sand_std)==FALSE,sand_std,NA)
expI_15min_sub$sand_std <- sand_std
good <- complete.cases(expI_15min_sub)
expI_15min_sub <- expI_15min_sub[good,]
plot(expI_15min_sub$time,expI_15min_sub$gravel_std,type="b")
plot(expI_15min_sub$time,expI_15min_sub$sand_std,type="b")
expI_dat_n <- select(expI_15min_sub,gravel_std,sand_std)
# Create the timeseries:
expI_ts <- ts(data = expI_dat_n, start = 42.25, end = 95, frequency = 4)
#expI_ts <- ts(data = expI_dat_n, start = 0, end = 52.75, frequency = 4)
head(expI_ts)
tail(expI_ts)
plot(expI_ts)
# Set window length (see Golyandina, N., Korobeynikov, A., Shlemov, A., Usevich,
# K., 2013. Multivariate and 2D Extensions of Singular Spectrum Analysis with the
# Rssa Package. arXiv. doi:10.18637/jss.v067.i02):
N_expI <- length(expI_ts[,1])
s_expI <- length(expI_ts[1,])
window_length <- round(s_expI*(N_expI+1)/(s_expI+1))
print(window_length)
# Basic MSSA -------------------------------------------------------------------
s_expI <- ssa(expI_ts, L=window_length, kind="mssa") # Create an SSA object
s_expI # Look inside the SSA object for relevant details
plot(s_expI) # These are the eignevalues
plot(s_expI, type = "vectors", idx = 1:14) # These are the first eigenvectors given by the vector indicies
plot(s_expI, type = "paired", idx = 1:14, plot.contrib = FALSE) # These are the pairs of eigenvectors -- the 'groups' arguement specifies the grouping plot
plot(wcor(s_expI)) # This produces a w-correlation matrix
grouping.auto(s_expI, grouping.method = c("wcor"))
# Stop an determine the eigenvectors that represent the trend:
s_expI_recon <- reconstruct(s_expI, groups = list(1)) #list the eigenvectors that represent the trend -- "s_expI_recon" contains the residuals
expI_trend <- s_expI_recon$F1 #feature vector 1 (F1) represents eignevector 1, while F2 represents eigenvector 2... add additional vectors as needed if the trend is represented by multiple eigenvectos (e.g. s_expI_recon$F1 + s_expI_recon$F2...)
plot(expI_trend) #plot the trend
plot(expI_ts)
# Stop an determine the eigenvectors that represent the trend:
s_expI_res <- residuals(s_expI_recon) # Extract the residuals
plot(s_expI_res) # Plot the results and look for cyclical behavoir in the data
period_expI <- spec.pgram(s_expI_res[21:212,], spans=5, detrend = FALSE, log = "no") # Plot a periodogram and look for periods in the data
period_expI # Looks like 38.4 hour period for gravel and 7.4 hours for sand
# Stop and determine the best choice for L:
s2_expI <- ssa(s_expI_res, L=window_length, kind="mssa") # for better seperatebility, set "L" to the maximium value of "N/2" and evenly divisible by the period
plot(s2_expI) #these are the eignevalues
plot(s2_expI, type = "vectors", idx = 1:12)
plot(s2_expI, type = "paired", idx =1:12, plot.contrib = FALSE)
plot(wcor(s2_expI, groups = as.list(1:16)))
grouping.auto(s2_expI, grouping.method = c("wcor"))
# Stop an determine the eigenvectors that represent the seasonal component:
seasons_expI <- reconstruct(s2_expI, groups = list(9:10,12:13))
parestimate(s2_expI, groups = list(9:10,12:13),method = "esprit")
plot(seasons_expI)
saveRDS(seasons_expI, "output/seasons_expI.rds")
# For convience, repeat the reconstruction in one step:
recon_expI <- reconstruct(s_expI,
groups = list(Trend = 1, Seasonality = c(10:11,13:14)))
recon_expI_final <- plot(recon_expI, plot.method = "xyplot", superpose = FALSE,
auto.key = list(columns = 3),
col = c("blue", "green", "red", "violet"),
lty = c(rep(1, 4), rep(2, 4), rep(3, 4)))
print(recon_expI_final)
saveRDS(recon_expI_final, "output/recon_expI_final.rds")
# Examine the noise ------------------------------------------------------------
# Noise Envelope:
res_expI <- residuals(recon_expI) #extract the residuals
env_expI <- ssa(res_expI^2, L=12)
rsd_expI <- sqrt(reconstruct(env_expI, groups=list(1))$F1)
par(mfrow=c(2,1))
g.noise.plot(res_expI,rsd_expI)
s.noise.plot(res_expI,rsd_expI)
saveRDS(res_expI, "output/res_expI.rds")
saveRDS(rsd_expI, "output/rsd_expI.rds")
# Test for white noise
parestimate(s_expI, groups = list(Trend = 1:2, Seasonality = c(3:4,5:6,7:8)),method = "esprit")
# https://robjhyndman.com/hyndsight/ljung-box-test/ For seasonal time series, use h = min(2m,T/5) where T = length of record; m = period of seasonality; h = no. lags to test
Box.test(res_expI[,1],type="Ljung",lag=42)
# Reject the null hypothesis...The gravel data are not independently distributed; they exhibit serial correlation
Box.test(res_expI[,2],type="Ljung",lag=42)
# Reject the null hypothesis...The sand data are not independently distributed; they exhibit serial correlation
boot.mean <- function(x,i){boot.mean <- mean(x[i],na.rm=T)}
gres_boot_expI <- boot(abs(res_expI[,1]), boot.mean, R = 10000)
print(gres_boot_expI)
sres_boot_expI <- boot(abs(res_expI[,2]), boot.mean, R = 10000)
print(sres_boot_expI)
# Transform data to orginal units ----------------------------------------------
gravel_signal_n <- data.frame(gravel_signal = expI_trend[,1] + seasons_expI$F1[,1] + seasons_expI$F2[,1] + seasons_expI$F3[,1])
gravel_signal <- gravel_signal_n * norm_expI_dat[1]
sand_signal_n <- data.frame(sand_signal = expI_trend[,2] + seasons_expI$F1[,2] + seasons_expI$F2[,2] + seasons_expI$F3[,2])
sand_signal <- sand_signal_n * norm_expI_dat[2]
gravel_noise_n <- data.frame(gravel_noise = rsd_expI[,1])
gravel_noise <- gravel_noise_n * norm_expI_dat[1]
sand_noise_n <- data.frame(sand_noise = rsd_expI[,2])
sand_noise <- sand_noise_n * norm_expI_dat[2]
gravel_trend_n <- data.frame(gravel_trend = expI_trend[,1])
gravel_trend <- gravel_trend_n * norm_expI_dat[1]
sand_trend_n <- data.frame(sand_trend = expI_trend[,2])
sand_trend <- sand_trend_n * norm_expI_dat[2]
expI_15min_output <- merge.zoo(gravel_signal,sand_signal,gravel_noise,sand_noise,expI_15min_sub$gravel_rate,expI_15min_sub$sand_rate,expI_15min_sub$gravel_feed,expI_15min_sub$sand_feed)
names(expI_15min_output) <- c("gravel_signal","sand_signal","gravel_noise","sand_noise","gravel_rate","sand_rate","gravel_feed","sand_feed")
expI_15min_output$lower_gravel <- expI_15min_output[,1] - expI_15min_output[,3]
expI_15min_output$upper_gravel <- expI_15min_output[,1] + expI_15min_output[,3]
expI_15min_output$lower_sand <- expI_15min_output[,2] - expI_15min_output[,4]
expI_15min_output$upper_sand <- expI_15min_output[,2] + expI_15min_output[,4]
plot_lims <- ifelse(is.na(expI_15min_output),0,expI_15min_output)
plot(expI_15min_output$gravel_signal, ylim = c(min(plot_lims),max(plot_lims)), ylab = "Rate (g/ms)", xlab = "Time (hrs)",type = "l",lwd=2,main="Exp I")
lines(expI_15min_output$sand_signal,lwd=2,col="red")
lines(expI_15min_output$upper_gravel,lty=2)
lines(expI_15min_output$lower_gravel,lty=2)
points(expI_15min_output$gravel_rate,pch=1,cex=.5)
lines(expI_15min_output$gravel_feed,type="l",lty=3)
lines(expI_15min_output$upper_sand,lty=2,col="red")
lines(expI_15min_output$lower_sand,lty=2,col="red")
points(expI_15min_output$sand_rate,pch=1,cex=.5,col="red")
lines(expI_15min_output$sand_feed,type="l",lty=3,col="red")
# Output data
write.csv(expI_15min_output,"expI_15min_out.csv")
mean(expI_15min_output$gravel_rate / expI_15min_output$gravel_noise)
|
/scripts/s_g_15min_EXP_I_std.R
|
no_license
|
stephen-bird/magic_sand
|
R
| false
| false
| 8,483
|
r
|
# Read Marwan's data, subset and rename 5 columns ------------------------------
# Load Experiment I. Gravel feed is set at 4.33 g/ms,
# while the sand feed starts at 0 g/ms and increases to 13.0 g/ms. The
# experiment was run for 95 hrs with a data point every 15 min.
expI_15min <- read.csv("ExpI-SievedData.csv", header=TRUE)
expI_15min_sub <- expI_15min[,c(3,10,13,12,14)]
colnames(expI_15min_sub) <- c("time","gravel_rate","gravel_feed","sand_rate","sand_feed")
head(expI_15min_sub)
good <- complete.cases(expI_15min_sub)
expI_15min_sub <- expI_15min_sub[good,]
expI_15min_sub$sand_rate <- ifelse(expI_15min_sub$sand_feed==0,NA,expI_15min_sub$sand_rate)
expI_15min_sub$sand_rate <- ifelse(expI_15min_sub$sand_feed == 0, NA, expI_15min_sub$sand_rate)
head(expI_15min_sub)
plot(expI_15min_sub$time,expI_15min_sub$gravel_rate,type="b")
plot(expI_15min_sub$time,expI_15min_sub$sand_rate,type="b")
# Standardize data by feed rate:
gravel_std <- expI_15min_sub$gravel_rate / expI_15min_sub$gravel_feed
gravel_std <- ifelse(is.finite(gravel_std)==TRUE,gravel_std,NA)
expI_15min_sub$gravel_std <- gravel_std
sand_std <- expI_15min_sub$sand_rate / expI_15min_sub$sand_feed
sand_std <- ifelse(is.nan(sand_std)==FALSE,sand_std,NA)
expI_15min_sub$sand_std <- sand_std
good <- complete.cases(expI_15min_sub)
expI_15min_sub <- expI_15min_sub[good,]
plot(expI_15min_sub$time,expI_15min_sub$gravel_std,type="b")
plot(expI_15min_sub$time,expI_15min_sub$sand_std,type="b")
expI_dat_n <- select(expI_15min_sub,gravel_std,sand_std)
# Create the timeseries:
expI_ts <- ts(data = expI_dat_n, start = 42.25, end = 95, frequency = 4)
#expI_ts <- ts(data = expI_dat_n, start = 0, end = 52.75, frequency = 4)
head(expI_ts)
tail(expI_ts)
plot(expI_ts)
# Set window length (see Golyandina, N., Korobeynikov, A., Shlemov, A., Usevich,
# K., 2013. Multivariate and 2D Extensions of Singular Spectrum Analysis with the
# Rssa Package. arXiv. doi:10.18637/jss.v067.i02):
N_expI <- length(expI_ts[,1])
s_expI <- length(expI_ts[1,])
window_length <- round(s_expI*(N_expI+1)/(s_expI+1))
print(window_length)
# Basic MSSA -------------------------------------------------------------------
s_expI <- ssa(expI_ts, L=window_length, kind="mssa") # Create an SSA object
s_expI # Look inside the SSA object for relevant details
plot(s_expI) # These are the eignevalues
plot(s_expI, type = "vectors", idx = 1:14) # These are the first eigenvectors given by the vector indicies
plot(s_expI, type = "paired", idx = 1:14, plot.contrib = FALSE) # These are the pairs of eigenvectors -- the 'groups' arguement specifies the grouping plot
plot(wcor(s_expI)) # This produces a w-correlation matrix
grouping.auto(s_expI, grouping.method = c("wcor"))
# Stop an determine the eigenvectors that represent the trend:
s_expI_recon <- reconstruct(s_expI, groups = list(1)) #list the eigenvectors that represent the trend -- "s_expI_recon" contains the residuals
expI_trend <- s_expI_recon$F1 #feature vector 1 (F1) represents eignevector 1, while F2 represents eigenvector 2... add additional vectors as needed if the trend is represented by multiple eigenvectos (e.g. s_expI_recon$F1 + s_expI_recon$F2...)
plot(expI_trend) #plot the trend
plot(expI_ts)
# Stop an determine the eigenvectors that represent the trend:
s_expI_res <- residuals(s_expI_recon) # Extract the residuals
plot(s_expI_res) # Plot the results and look for cyclical behavoir in the data
period_expI <- spec.pgram(s_expI_res[21:212,], spans=5, detrend = FALSE, log = "no") # Plot a periodogram and look for periods in the data
period_expI # Looks like 38.4 hour period for gravel and 7.4 hours for sand
# Stop and determine the best choice for L:
s2_expI <- ssa(s_expI_res, L=window_length, kind="mssa") # for better seperatebility, set "L" to the maximium value of "N/2" and evenly divisible by the period
plot(s2_expI) #these are the eignevalues
plot(s2_expI, type = "vectors", idx = 1:12)
plot(s2_expI, type = "paired", idx =1:12, plot.contrib = FALSE)
plot(wcor(s2_expI, groups = as.list(1:16)))
grouping.auto(s2_expI, grouping.method = c("wcor"))
# Stop an determine the eigenvectors that represent the seasonal component:
seasons_expI <- reconstruct(s2_expI, groups = list(9:10,12:13))
parestimate(s2_expI, groups = list(9:10,12:13),method = "esprit")
plot(seasons_expI)
saveRDS(seasons_expI, "output/seasons_expI.rds")
# For convience, repeat the reconstruction in one step:
recon_expI <- reconstruct(s_expI,
groups = list(Trend = 1, Seasonality = c(10:11,13:14)))
recon_expI_final <- plot(recon_expI, plot.method = "xyplot", superpose = FALSE,
auto.key = list(columns = 3),
col = c("blue", "green", "red", "violet"),
lty = c(rep(1, 4), rep(2, 4), rep(3, 4)))
print(recon_expI_final)
saveRDS(recon_expI_final, "output/recon_expI_final.rds")
# Examine the noise ------------------------------------------------------------
# Noise Envelope:
res_expI <- residuals(recon_expI) #extract the residuals
env_expI <- ssa(res_expI^2, L=12)
rsd_expI <- sqrt(reconstruct(env_expI, groups=list(1))$F1)
par(mfrow=c(2,1))
g.noise.plot(res_expI,rsd_expI)
s.noise.plot(res_expI,rsd_expI)
saveRDS(res_expI, "output/res_expI.rds")
saveRDS(rsd_expI, "output/rsd_expI.rds")
# Test for white noise
parestimate(s_expI, groups = list(Trend = 1:2, Seasonality = c(3:4,5:6,7:8)),method = "esprit")
# https://robjhyndman.com/hyndsight/ljung-box-test/ For seasonal time series, use h = min(2m,T/5) where T = length of record; m = period of seasonality; h = no. lags to test
Box.test(res_expI[,1],type="Ljung",lag=42)
# Reject the null hypothesis...The gravel data are not independently distributed; they exhibit serial correlation
Box.test(res_expI[,2],type="Ljung",lag=42)
# Reject the null hypothesis...The sand data are not independently distributed; they exhibit serial correlation
boot.mean <- function(x,i){boot.mean <- mean(x[i],na.rm=T)}
gres_boot_expI <- boot(abs(res_expI[,1]), boot.mean, R = 10000)
print(gres_boot_expI)
sres_boot_expI <- boot(abs(res_expI[,2]), boot.mean, R = 10000)
print(sres_boot_expI)
# Transform data to orginal units ----------------------------------------------
gravel_signal_n <- data.frame(gravel_signal = expI_trend[,1] + seasons_expI$F1[,1] + seasons_expI$F2[,1] + seasons_expI$F3[,1])
gravel_signal <- gravel_signal_n * norm_expI_dat[1]
sand_signal_n <- data.frame(sand_signal = expI_trend[,2] + seasons_expI$F1[,2] + seasons_expI$F2[,2] + seasons_expI$F3[,2])
sand_signal <- sand_signal_n * norm_expI_dat[2]
gravel_noise_n <- data.frame(gravel_noise = rsd_expI[,1])
gravel_noise <- gravel_noise_n * norm_expI_dat[1]
sand_noise_n <- data.frame(sand_noise = rsd_expI[,2])
sand_noise <- sand_noise_n * norm_expI_dat[2]
gravel_trend_n <- data.frame(gravel_trend = expI_trend[,1])
gravel_trend <- gravel_trend_n * norm_expI_dat[1]
sand_trend_n <- data.frame(sand_trend = expI_trend[,2])
sand_trend <- sand_trend_n * norm_expI_dat[2]
expI_15min_output <- merge.zoo(gravel_signal,sand_signal,gravel_noise,sand_noise,expI_15min_sub$gravel_rate,expI_15min_sub$sand_rate,expI_15min_sub$gravel_feed,expI_15min_sub$sand_feed)
names(expI_15min_output) <- c("gravel_signal","sand_signal","gravel_noise","sand_noise","gravel_rate","sand_rate","gravel_feed","sand_feed")
expI_15min_output$lower_gravel <- expI_15min_output[,1] - expI_15min_output[,3]
expI_15min_output$upper_gravel <- expI_15min_output[,1] + expI_15min_output[,3]
expI_15min_output$lower_sand <- expI_15min_output[,2] - expI_15min_output[,4]
expI_15min_output$upper_sand <- expI_15min_output[,2] + expI_15min_output[,4]
plot_lims <- ifelse(is.na(expI_15min_output),0,expI_15min_output)
plot(expI_15min_output$gravel_signal, ylim = c(min(plot_lims),max(plot_lims)), ylab = "Rate (g/ms)", xlab = "Time (hrs)",type = "l",lwd=2,main="Exp I")
lines(expI_15min_output$sand_signal,lwd=2,col="red")
lines(expI_15min_output$upper_gravel,lty=2)
lines(expI_15min_output$lower_gravel,lty=2)
points(expI_15min_output$gravel_rate,pch=1,cex=.5)
lines(expI_15min_output$gravel_feed,type="l",lty=3)
lines(expI_15min_output$upper_sand,lty=2,col="red")
lines(expI_15min_output$lower_sand,lty=2,col="red")
points(expI_15min_output$sand_rate,pch=1,cex=.5,col="red")
lines(expI_15min_output$sand_feed,type="l",lty=3,col="red")
# Output data
write.csv(expI_15min_output,"expI_15min_out.csv")
mean(expI_15min_output$gravel_rate / expI_15min_output$gravel_noise)
|
##### Orden Datos SII #####
setwd("C:\Users\o\Documents\GitHub\Info_SII\SII")
library(readr)
##### Carga de Datos y Orden #####
anos=c("2015_1","2015_2","2016_1","2016_2","2017_1","2017_2","2018_1","2018_2","2019_1")
an=anos[9]
for (an in anos) {
N <- read_delim(paste0("txt/BRORGA2441N_NAC_",an,".txt"),
"|", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
NL <- read_delim(paste0("txt/BRORGA2441NL_NAC_",an,".txt"),
"|", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
## N
"predio_comun_1","comuna_comun_2","manzana_comun_2","predio_comun_2","superficie_terreno")
attr(N,"spec")=NULL
N$comuna=as.numeric(N$comuna)
N$manzana=as.numeric(N$manzana)
N$predio=as.numeric(N$predio)
N$avaluo=as.numeric(N$avaluo)
N$contribucion_sem=as.numeric(N$contribucion_sem)
N$exento=as.numeric(N$exento)
N$comuna_comun_1=as.numeric(N$comuna_comun_1)
N$manzana_comun_1=as.numeric(N$manzana_comun_1)
N$predio_comun_1=as.numeric(N$predio_comun_1)
N$comuna_comun_2=as.numeric(N$comuna_comun_2)
N$manzana_comun_2=as.numeric(N$manzana_comun_2)
N$predio_comun_2=as.numeric(N$predio_comun_2)
N$superficie_terreno=as.numeric(N$superficie_terreno)
N$reg=N$region=floor(N$comuna/1000)
N$reg[N$reg>13]=13
## NL
names(NL) = c("comuna","manzana","predial","linea","material","calidad","ano","superficie_construccion","uso","condicion")
attr(NL,"spec")=NULL
NL$comuna=as.numeric(NL$comuna)
NL$manzana=as.numeric(NL$manzana)
NL$predial=as.numeric(NL$predial)
NL$linea=as.numeric(NL$linea)
NL$calidad=as.numeric(NL$calidad)
NL$ano=as.numeric(NL$ano)
NL$superficie_construccion=as.numeric(NL$superficie_construccion)
NL$reg=NL$region=floor(NL$comuna/1000)
NL$reg[NL$reg>13]=13
for (reg in 1:13) {
N_reg=N[N$reg==reg,]
NL_reg=NL[NL$reg==reg,]
saveRDS(N_reg,paste0("N_",an,"_r",reg,".rds"))
saveRDS(NL_reg,paste0("NL_",an,"_r",reg,".rds"))
print(reg)
}
saveRDS(N,paste0("N_",an,".rds"))
saveRDS(NL,paste0("NL_",an,".rds"))
print(an)
}
##### Compilacion Bases para Conteo de Metros #####
# Se genera un base que comipel todos los años de manera de evitar el "sesgo de subestimacion en años pasados" para
# los años 2015,2016,2017,2018 pero es poco robusto al final, la base no esta lo suficientemente bien compialda con los años de constr.
anos=c("2015_1","2015_2","2016_1","2016_2","2017_1","2017_2","2018_1","2018_2","2019_1")
regiones=1:13
an=anos[1]
reg=11
for (reg in regiones) {
setwd("C:\Users\o\Documents\GitHub\Info_SII")
lineas=readRDS(paste0("SII/NL_",anos[length(anos)],"_r",reg,".rds"))
lineas$id=paste(lineas$comuna,lineas$manzana,lineas$predial,lineas$linea,lineas$ano,sep="-")
lineas_n=data.frame()
an=anos[length(anos)-1]
for (an in rev(anos[-length(anos)])) {
lineas_rep=readRDS(paste0("SII/NL_",an,"_r",reg,".rds"))
lineas_rep$id=paste(lineas_rep$comuna,lineas_rep$manzana,lineas_rep$predial,lineas_rep$linea,lineas_rep$ano,sep="-")
nuevas=lineas_rep[!lineas_rep$id %in% lineas$id,]
lineas_n=rbind(lineas_n,nuevas)
}
lineas=rbind(lineas,lineas_n)
saveRDS(lineas,paste0("SII/NL_comp_r",reg,".rds"))
print(reg)
beepr::beep(2)
}
beepr::beep(5)
|
/1_Orden_SII.R
|
no_license
|
agubas/Info_SII
|
R
| false
| false
| 3,228
|
r
|
##### Orden Datos SII #####
setwd("C:\Users\o\Documents\GitHub\Info_SII\SII")
library(readr)
##### Carga de Datos y Orden #####
anos=c("2015_1","2015_2","2016_1","2016_2","2017_1","2017_2","2018_1","2018_2","2019_1")
an=anos[9]
for (an in anos) {
N <- read_delim(paste0("txt/BRORGA2441N_NAC_",an,".txt"),
"|", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
NL <- read_delim(paste0("txt/BRORGA2441NL_NAC_",an,".txt"),
"|", escape_double = FALSE, col_names = FALSE,
trim_ws = TRUE)
## N
"predio_comun_1","comuna_comun_2","manzana_comun_2","predio_comun_2","superficie_terreno")
attr(N,"spec")=NULL
N$comuna=as.numeric(N$comuna)
N$manzana=as.numeric(N$manzana)
N$predio=as.numeric(N$predio)
N$avaluo=as.numeric(N$avaluo)
N$contribucion_sem=as.numeric(N$contribucion_sem)
N$exento=as.numeric(N$exento)
N$comuna_comun_1=as.numeric(N$comuna_comun_1)
N$manzana_comun_1=as.numeric(N$manzana_comun_1)
N$predio_comun_1=as.numeric(N$predio_comun_1)
N$comuna_comun_2=as.numeric(N$comuna_comun_2)
N$manzana_comun_2=as.numeric(N$manzana_comun_2)
N$predio_comun_2=as.numeric(N$predio_comun_2)
N$superficie_terreno=as.numeric(N$superficie_terreno)
N$reg=N$region=floor(N$comuna/1000)
N$reg[N$reg>13]=13
## NL
names(NL) = c("comuna","manzana","predial","linea","material","calidad","ano","superficie_construccion","uso","condicion")
attr(NL,"spec")=NULL
NL$comuna=as.numeric(NL$comuna)
NL$manzana=as.numeric(NL$manzana)
NL$predial=as.numeric(NL$predial)
NL$linea=as.numeric(NL$linea)
NL$calidad=as.numeric(NL$calidad)
NL$ano=as.numeric(NL$ano)
NL$superficie_construccion=as.numeric(NL$superficie_construccion)
NL$reg=NL$region=floor(NL$comuna/1000)
NL$reg[NL$reg>13]=13
for (reg in 1:13) {
N_reg=N[N$reg==reg,]
NL_reg=NL[NL$reg==reg,]
saveRDS(N_reg,paste0("N_",an,"_r",reg,".rds"))
saveRDS(NL_reg,paste0("NL_",an,"_r",reg,".rds"))
print(reg)
}
saveRDS(N,paste0("N_",an,".rds"))
saveRDS(NL,paste0("NL_",an,".rds"))
print(an)
}
##### Compilacion Bases para Conteo de Metros #####
# Se genera un base que comipel todos los años de manera de evitar el "sesgo de subestimacion en años pasados" para
# los años 2015,2016,2017,2018 pero es poco robusto al final, la base no esta lo suficientemente bien compialda con los años de constr.
anos=c("2015_1","2015_2","2016_1","2016_2","2017_1","2017_2","2018_1","2018_2","2019_1")
regiones=1:13
an=anos[1]
reg=11
for (reg in regiones) {
setwd("C:\Users\o\Documents\GitHub\Info_SII")
lineas=readRDS(paste0("SII/NL_",anos[length(anos)],"_r",reg,".rds"))
lineas$id=paste(lineas$comuna,lineas$manzana,lineas$predial,lineas$linea,lineas$ano,sep="-")
lineas_n=data.frame()
an=anos[length(anos)-1]
for (an in rev(anos[-length(anos)])) {
lineas_rep=readRDS(paste0("SII/NL_",an,"_r",reg,".rds"))
lineas_rep$id=paste(lineas_rep$comuna,lineas_rep$manzana,lineas_rep$predial,lineas_rep$linea,lineas_rep$ano,sep="-")
nuevas=lineas_rep[!lineas_rep$id %in% lineas$id,]
lineas_n=rbind(lineas_n,nuevas)
}
lineas=rbind(lineas,lineas_n)
saveRDS(lineas,paste0("SII/NL_comp_r",reg,".rds"))
print(reg)
beepr::beep(2)
}
beepr::beep(5)
|
#' Maximum-likelihood genetic clustering using EM algorithm
#'
#' This function implements the fast maximum-likelihood genetic clustering
#' approach described in Beugin et al (2018). The underlying model is very close
#' to the model implemented by STRUCTURE, but allows for much faster estimation
#' of genetic clusters thanks to the use of the Expectation-Maximization (EM)
#' algorithm. Optionally, the model can explicitely account for hybridization
#' and detect different types of hybrids (see \code{hybrids} and
#' \code{hybrid.coef} arguments). The method is fully documented in a dedicated
#' tutorial which can be accessed using \code{adegenetTutorial("snapclust")}.
#'
#' @details The method is described in Beugin et al (2018) A fast likelihood
#' solution to the genetic clustering problem. Methods in Ecology and
#' Evolution \doi{10.1111/2041-210X.12968}. A dedicated
#' tutorial is available by typing \code{adegenetTutorial("snapclust")}.
#'
#' @seealso The function \code{\link{snapclust.choose.k}} to investigate the optimal
#' value number of clusters 'k'.
#'
#' @author Thibaut Jombart \email{thibautjombart@@gmail.com} and Marie-Pauline
#' Beugin
#'
#' @export
#'
#' @rdname snapclust
#'
#' @param x a \linkS4class{genind} object
#'
#' @param k the number of clusters to look for
#'
#' @param pop.ini parameter indicating how the initial group membership should
#' be found. If \code{NULL}, groups are chosen at random, and the algorithm
#' will be run \code{n.start times}. If "kmeans", then the function
#' \code{find.clusters} is used to define initial groups using the K-means
#' algorithm. If "ward", then the function \code{find.clusters} is used to
#' define initial groups using the Ward algorithm. Alternatively, a factor
#' defining the initial cluster configuration can be provided.
#'
#' @param max.iter the maximum number of iteration of the EM algorithm
#'
#' @param n.start the number of times the EM algorithm is run, each time with
#' different random starting conditions
#'
#' @param n.start.kmeans the number of times the K-means algorithm is run to
#' define the starting point of the ML-EM algorithm, each time with
#' different random starting conditions
#'
#' @param hybrids a logical indicating if hybrids should be modelled
#' explicitely; this is currently implemented for 2 groups only.
#'
#' @param dim.ini the number of PCA axes to retain in the dimension reduction
#' step for \code{\link{find.clusters}}, if this method is used to define
#' initial group memberships (see argument \code{pop.ini}).
#'
#' @param hybrid.coef a vector of hybridization coefficients, defining the
#' proportion of hybrid gene pool coming from the first parental population;
#' this is symmetrized around 0.5, so that e.g. c(0.25, 0.5) will be
#' converted to c(0.25, 0.5, 0.75)
#'
#' @param parent.lab a vector of 2 character strings used to label the two
#' parental populations; only used if hybrids are detected (see argument
#' \code{hybrids})
#'
#' @param ... further arguments passed on to \code{\link{find.clusters}}
#'
#' @return
#'
#' The function \code{snapclust} returns a list with the following
#' components:
#' \itemize{
#'
#' \item \code{$group} a factor indicating the maximum-likelihood assignment of
#' individuals to groups; if identified, hybrids are labelled after
#' hybridization coefficients, e.g. 0.5_A - 0.5_B for F1, 0.75_A - 0.25_B for
#' backcross F1 / A, etc.
#'
#' \item \code{$ll}: the log-likelihood of the model
#'
#' \item \code{$proba}: a matrix of group membership probabilities, with
#' individuals in rows and groups in columns; each value correspond to the
#' probability that a given individual genotype was generated under a given
#' group, under Hardy-Weinberg hypotheses.
#'
#' \item \code{$converged} a logical indicating if the algorithm converged; if
#' FALSE, it is doubtful that the result is an actual Maximum Likelihood
#' estimate.
#'
#' \item \code{$n.iter} an integer indicating the number of iterations the EM
#' algorithm was run for.
#'
#' }
#'
#' @examples
#' \dontrun{
#' data(microbov)
#'
#' ## try function using k-means initialization
#' grp.ini <- find.clusters(microbov, n.clust=15, n.pca=150)
#'
#' ## run EM algo
#' res <- snapclust(microbov, 15, pop.ini = grp.ini$grp)
#' names(res)
#' res$converged
#' res$n.iter
#'
#' ## plot result
#' compoplot(res)
#'
#' ## flag potential hybrids
#' to.flag <- apply(res$proba,1,max)<.9
#' compoplot(res, subset=to.flag, show.lab=TRUE,
#' posi="bottomleft", bg="white")
#'
#'
#' ## Simulate hybrids F1
#' zebu <- microbov[pop="Zebu"]
#' salers <- microbov[pop="Salers"]
#' hyb <- hybridize(zebu, salers, n=30)
#' x <- repool(zebu, salers, hyb)
#'
#' ## method without hybrids
#' res.no.hyb <- snapclust(x, k=2, hybrids=FALSE)
#' compoplot(res.no.hyb, col.pal=spectral, n.col=2)
#'
#' ## method with hybrids
#' res.hyb <- snapclust(x, k=2, hybrids=TRUE)
#' compoplot(res.hyb, col.pal =
#' hybridpal(col.pal = spectral), n.col = 2)
#'
#'
#' ## Simulate hybrids backcross (F1 / parental)
#' f1.zebu <- hybridize(hyb, zebu, 20, pop = "f1.zebu")
#' f1.salers <- hybridize(hyb, salers, 25, pop = "f1.salers")
#' y <- repool(x, f1.zebu, f1.salers)
#'
#' ## method without hybrids
#' res2.no.hyb <- snapclust(y, k = 2, hybrids = FALSE)
#' compoplot(res2.no.hyb, col.pal = hybridpal(), n.col = 2)
#'
#' ## method with hybrids F1 only
#' res2.hyb <- snapclust(y, k = 2, hybrids = TRUE)
#' compoplot(res2.hyb, col.pal = hybridpal(), n.col = 2)
#'
#' ## method with back-cross
#' res2.back <- snapclust(y, k = 2, hybrids = TRUE, hybrid.coef = c(.25,.5))
#' compoplot(res2.back, col.pal = hybridpal(), n.col = 2)
#'
#' }
snapclust <- function(x, k, pop.ini = "ward", max.iter = 100, n.start = 10,
n.start.kmeans = 50, hybrids = FALSE, dim.ini = 100,
hybrid.coef = NULL, parent.lab = c('A', 'B'), ...) {
if (!is.genind(x)) {
stop("x is not a valid genind object")
}
if (any(ploidy(x) > 2)) {
stop("snapclust not currently implemented for ploidy > 2")
}
if (all(ploidy(x) == 1)) {
.ll.genotype <- .ll.genotype.haploid
} else if (all(ploidy(x) == 2)) {
.ll.genotype <- .ll.genotype.diploid
} else {
stop("snapclust not currently implemented for varying ploidy")
}
## This function uses the EM algorithm to find ML group assignment of a set
## of genotypes stored in a genind object into 'k' clusters. We need an
## initial cluster definition to start with. The rest of the algorithm
## consists of:
## i) compute the matrix of allele frequencies
## ii) compute the likelihood of each genotype for each group
## iii) assign genotypes to the group for which they have the highest
## likelihood
## iv) go back to i) until convergence
## Disable multiple starts if the initial condition is not random
use.random.start <- is.null(pop.ini)
if (!use.random.start) {
n.start <- 1L
}
if (n.start < 1L) {
stop(sprintf(
"n.start is less than 1 (%d); using n.start=1", n.start))
}
if (hybrids && k > 2) {
warning(sprintf(
"forcing k=2 for hybrid mode (requested k is %d)", k))
k <- 2
}
## Handle hybrid coefficients; these values reflect the contribution of the
## first parental population to the allele frequencies of the hybrid
## group. For instance, a value of 0.75 indicates that 'a' contributes to
## 75%, and 'b' 25% of the allele frequencies of the hybrid - a typical
## backcross F1 / a.
if (hybrids) {
if (is.null(hybrid.coef)) {
hybrid.coef <- 0.5
}
hybrid.coef <- .tidy.hybrid.coef(hybrid.coef)
}
## Initialisation using 'find.clusters'
if (!is.null(pop.ini)) {
if (tolower(pop.ini)[1] %in% c("kmeans", "k-means")) {
pop.ini <- find.clusters(x, n.clust = k, n.pca = dim.ini,
n.start = n.start.kmeans,
method = "kmeans", ...)$grp
} else if (tolower(pop.ini)[1] %in% c("ward")) {
pop.ini <- find.clusters(x, n.clust = k, n.pca = dim.ini,
method = "ward", ...)$grp
}
}
## There is one run of the EM algo for each of the n.start random initial
## conditions.
ll <- -Inf # this will be the total loglike
for (i in seq_len(n.start)) {
## Set initial conditions: if initial pop is NULL, we create a random
## group definition (each clusters have same probability)
if (use.random.start) {
pop.ini <- sample(seq_len(k), nInd(x), replace=TRUE)
}
## process initial population, store levels
pop.ini <- factor(pop.ini)
lev.ini <- levels(pop.ini)[1:k] # k+1 would be hybrids
## ensure 'pop.ini' matches 'k'
if (! (length(levels(pop.ini)) %in% c(k, k + length(hybrid.coef))) ) {
stop("pop.ini does not have k clusters")
}
## initialisation
group <- factor(as.integer(pop.ini)) # set levels to 1:k (or k+1)
genotypes <- tab(x)
n.loc <- nLoc(x)
counter <- 0L
converged <- FALSE
## This is the actual EM algorithm
while(!converged && counter<=max.iter) {
## get table of allele frequencies (columns) by population (rows);
## these are stored as 'pop.freq'; note that it will include extra
## rows for different types of hybrids too.
if (hybrids) {
pop(x) <- group
id.parents <- .find.parents(x)
x.parents <- x[id.parents]
pop.freq <- tab(genind2genpop(x.parents, quiet=TRUE),
freq=TRUE)
pop.freq <- rbind(pop.freq, # parents
.find.freq.hyb(pop.freq, hybrid.coef)) # hybrids
} else {
pop.freq <- tab(genind2genpop(x, pop=group, quiet=TRUE),
freq=TRUE)
}
## ensures no allele frequency is exactly zero
pop.freq <- .tidy.pop.freq(pop.freq, locFac(x))
## get likelihoods of genotypes in every pop
ll.mat <- apply(genotypes, 1, .ll.genotype, pop.freq, n.loc)
## assign individuals to most likely cluster
previous.group <- group
group <- apply(ll.mat, 2, which.max)
## check convergence
## converged <- all(group == previous.group)
old.ll <- .global.ll(previous.group, ll.mat)
new.ll <- .global.ll(group, ll.mat)
if (!is.finite(new.ll)) {
## stop(sprintf("log-likelihood at iteration %d is not finite (%f)",
## counter, new.ll))
}
converged <- abs(old.ll - new.ll) < 1e-14
counter <- counter + 1L
}
## ## store the best run so far
## new.ll <- .global.ll(group, ll.mat)
if (new.ll > ll || i == 1L) {
## store results
ll <- new.ll
out <- list(group = group, ll = ll)
## group membership probability
rescaled.ll.mat <- .rescale.ll.mat(ll.mat)
out$proba <- prop.table(t(exp(rescaled.ll.mat)), 1)
out$converged <- converged
out$n.iter <- counter
}
} # end of the for loop
## restore labels of groups
out$group <- factor(out$group)
if (hybrids) {
if (!is.null(parent.lab)) {
lev.ini <- parent.lab
}
hybrid.labels <- paste0(hybrid.coef, "_", lev.ini[1], "-",
1 - hybrid.coef, "_", lev.ini[2])
lev.ini <- c(lev.ini, hybrid.labels)
}
levels(out$group) <- lev.ini
colnames(out$proba) <- lev.ini
## compute the number of parameters; it is defined as the number of 'free'
## allele frequencies, multiplied by the number of groups
out$n.param <- (ncol(genotypes) - n.loc) * length(lev.ini)
class(out) <- c("snapclust", "list")
return(out)
}
## Non-exported function which computes the log-likelihood of a genotype in
## every population. For now only works for diploid individuals. 'x' is a vector
## of allele counts; 'pop.freq' is a matrix of group allele frequencies, with
## groups in rows and alleles in columns.
## TODO: extend this to various ploidy levels, possibly optimizing procedures
## for haploids.
.ll.genotype.diploid <- function(x, pop.freq, n.loc){
## homozygote (diploid)
## p(AA) = f(A)^2 for each locus
## so that log(p(AA)) = 2 * log(f(A))
ll.homoz.one.indiv <- function(f) {
sum(log(f[x == 2L]), na.rm = TRUE) * 2
}
ll.homoz <- apply(pop.freq, 1, ll.homoz.one.indiv)
## heterozygote (diploid, expl with 2 loci)
## p(AB) = 2 * f(A) f(B)
## so that log(p(AB)) = log(f(A)) + log(f(B)) + log(2)
## if an individual is heterozygote for n.heter loci, the term
## log(2) will be added n.heter times
ll.hetero.one.indiv <- function(f) {
n.heter <- sum(x == 1L, na.rm = TRUE) / 2
sum(log(f[x == 1L]), na.rm = TRUE) + n.heter * log(2)
}
ll.heteroz <- apply(pop.freq, 1, ll.hetero.one.indiv)
return(ll.homoz + ll.heteroz)
}
.ll.genotype.haploid <- function(x, pop.freq, n.loc){
## p(A) = f(A) for each locus
ll.one.indiv <- function(f) {
sum(log(f[x == 1L]), na.rm = TRUE)
}
ll <- apply(pop.freq, 1, ll.one.indiv)
return(ll)
}
## Non-exported function computing the total log-likelihood of the model given a vector of group
## assignments and a table of ll of genotypes in each group
.global.ll <- function(group, ll){
sum(t(ll)[cbind(seq_along(group), as.integer(group))], na.rm=TRUE)
}
## Non-exported function making a tidy vector of weights for allele frequencies
## of parental populations. It ensures that given any input vector of weights
## 'w' defining the types of hybrids, the output has the following properties:
## - strictly on ]0,1[
## - symmetric around 0.5, e.g. c(.25, .5) gives c(.25, .5, .75)
## - sorted by decreasing values (i.e. hybrid types are sorted by decreasing
## proximity to the first parental population.
.tidy.hybrid.coef <- function(w) {
w <- w[w > 0 & w < 1]
w <- sort(unique(round(c(w, 1-w), 4)), decreasing = TRUE)
w
}
## Non-exported function determining vectors of allele frequencies in hybrids
## from 2 parental populations. Different types of hybrids are determined by
## weights given to the allele frequencies of the parental populations. Only one
## such value is provided and taken to be the weight of the 1st parental
## population; the complementary frequency is derived for the second parental
## population.
## Parameters are:
## - x: matrix of allele frequencies for population 'a' (first row) and 'b'
## (second row), where allele are in columns.
## - w: a vector of weights for 'a' and 'b', each value determining a type of
## hybrid. For instance, 0.5 is for F1, 0.25 for backcrosses F1/parental, 0.125
## for 2nd backcross F1/parental, etc.
## The output is a matrix of allele frequencies with hybrid types in rows and
## alleles in columns.
.find.freq.hyb <- function(x, w) {
out <- cbind(w, 1-w) %*% x
rownames(out) <- w
out
}
## Non-exported function trying to find the two parental populations in a genind
## object containing 'k' clusters. The parental populations are defined as the
## two most distant clusters. The other clusters are deemed to be various types
## of hybrids. The output is a vector of indices identifying the individuals
## from the parental populations.
.find.parents <- function(x) {
## matrix of pairwise distances between clusters, using Nei's distance
D <- as.matrix(dist.genpop(genind2genpop(x, quiet = TRUE), method = 1))
parents <- which(abs(max(D)-D) < 1e-14, TRUE)[1,]
out <- which(as.integer(pop(x)) %in% parents)
out
}
## Non-exported function enforcing a minimum allele frequency in a table of
## allele frequency. As we are not accounting for the uncertainty in allele
## frequencies, we need to allow for genotypes to be generated from a population
## which does not have the genotype's allele represented, even if this is at a
## low probability. The transformation is ad-hoc, and has the form:
##
## g(f_i) = (a + f_i / \sum(a + f_i))
## where f_i is the i-th frequency in a given locus. However, this ensures that
## the output has two important properties:
## - it sums to 1
## - it contains no zero
## By default, we set 'a' to 0.01.
## Function inputs are:
## - 'pop.freq': matrix of allele frequencies, with groups in rows and alleles in
## columns
## - 'loc.fac': a factor indicating which alleles belong to which locus, as
## returned by 'locFac([a genind])'
.tidy.pop.freq <- function(pop.freq, loc.fac) {
g <- function(f, a = .01) {
(a + f) / sum(a + f)
}
out <- matrix(unlist(apply(pop.freq, 1, tapply, loc.fac, g),
use.names = FALSE),
byrow=TRUE, nrow=nrow(pop.freq))
dimnames(out) <- dimnames(pop.freq)
return(out)
}
## This function rescales log-likelihood values prior to the computation of
## group membership probabilities.
## issue reported: prop.table(t(exp(ll.mat)), 1) can cause some numerical
## approximation problems; if numbers are large, exp(...) will return Inf
## and the group membership probabilities cannot be computed
##
## Solution: rather than use p_a = exp(ll_a) / (exp(ll_a) + exp(ll_b))
## we can use p_a = exp(ll_a - C) / (exp(ll_a - C) + exp(ll_b - C))
## where 'C' is a sufficiently large constant so that exp(ll_i + C) is
## computable; naively we could use C = max(ll.mat), but the problem is this
## scaling can cause -Inf likelihoods too. In practice, we need to allow
## different scaling for each individual.
##out$proba <-
## prop.table(t(exp(ll.mat)), 1)
.rescale.ll.mat <- function(ll.mat) {
## we first compute ad-hoc minimum and maximum values of log-likelihood; these
## will be computer dependent; this is a quick fix, but better alternatives
## can be found.
## smallest ll such that exp(ll) is strictly > 0
new_min <- (0:-1000)[max(which(exp(0:-1000) > 0))]
## largest ll such that exp(ll) is strictly less than +Inf
new_max <- (1:1000)[max(which(exp(1:1000) < Inf))]
counter <- 0
## find rescaling for a single individual;
## x: vector of ll values
rescale.ll.indiv <- function(x) {
## set minimum to new_min
x <- x - min(x) + new_min
## set sum to the maximum
if (sum(x) > new_max) {
counter <<- counter + 1
x <- x - min(x) # reset min to 0
x <- new_min + (x / sum(x)) * new_max # range: new_min to new_max
}
return(x)
}
out <- apply(ll.mat, 2, rescale.ll.indiv)
if (counter > 0) {
msg <- paste("Large dataset syndrome:\n",
"for", counter, "individuals,",
"differences in log-likelihoods exceed computer precision;\n",
"group membership probabilities are approximated\n",
"(only trust clear-cut values)")
message(msg)
}
return(out)
}
|
/R/snapclust.R
|
no_license
|
thibautjombart/adegenet
|
R
| false
| false
| 18,712
|
r
|
#' Maximum-likelihood genetic clustering using EM algorithm
#'
#' This function implements the fast maximum-likelihood genetic clustering
#' approach described in Beugin et al (2018). The underlying model is very close
#' to the model implemented by STRUCTURE, but allows for much faster estimation
#' of genetic clusters thanks to the use of the Expectation-Maximization (EM)
#' algorithm. Optionally, the model can explicitely account for hybridization
#' and detect different types of hybrids (see \code{hybrids} and
#' \code{hybrid.coef} arguments). The method is fully documented in a dedicated
#' tutorial which can be accessed using \code{adegenetTutorial("snapclust")}.
#'
#' @details The method is described in Beugin et al (2018) A fast likelihood
#' solution to the genetic clustering problem. Methods in Ecology and
#' Evolution \doi{10.1111/2041-210X.12968}. A dedicated
#' tutorial is available by typing \code{adegenetTutorial("snapclust")}.
#'
#' @seealso The function \code{\link{snapclust.choose.k}} to investigate the optimal
#' value number of clusters 'k'.
#'
#' @author Thibaut Jombart \email{thibautjombart@@gmail.com} and Marie-Pauline
#' Beugin
#'
#' @export
#'
#' @rdname snapclust
#'
#' @param x a \linkS4class{genind} object
#'
#' @param k the number of clusters to look for
#'
#' @param pop.ini parameter indicating how the initial group membership should
#' be found. If \code{NULL}, groups are chosen at random, and the algorithm
#' will be run \code{n.start times}. If "kmeans", then the function
#' \code{find.clusters} is used to define initial groups using the K-means
#' algorithm. If "ward", then the function \code{find.clusters} is used to
#' define initial groups using the Ward algorithm. Alternatively, a factor
#' defining the initial cluster configuration can be provided.
#'
#' @param max.iter the maximum number of iteration of the EM algorithm
#'
#' @param n.start the number of times the EM algorithm is run, each time with
#' different random starting conditions
#'
#' @param n.start.kmeans the number of times the K-means algorithm is run to
#' define the starting point of the ML-EM algorithm, each time with
#' different random starting conditions
#'
#' @param hybrids a logical indicating if hybrids should be modelled
#' explicitely; this is currently implemented for 2 groups only.
#'
#' @param dim.ini the number of PCA axes to retain in the dimension reduction
#' step for \code{\link{find.clusters}}, if this method is used to define
#' initial group memberships (see argument \code{pop.ini}).
#'
#' @param hybrid.coef a vector of hybridization coefficients, defining the
#' proportion of hybrid gene pool coming from the first parental population;
#' this is symmetrized around 0.5, so that e.g. c(0.25, 0.5) will be
#' converted to c(0.25, 0.5, 0.75)
#'
#' @param parent.lab a vector of 2 character strings used to label the two
#' parental populations; only used if hybrids are detected (see argument
#' \code{hybrids})
#'
#' @param ... further arguments passed on to \code{\link{find.clusters}}
#'
#' @return
#'
#' The function \code{snapclust} returns a list with the following
#' components:
#' \itemize{
#'
#' \item \code{$group} a factor indicating the maximum-likelihood assignment of
#' individuals to groups; if identified, hybrids are labelled after
#' hybridization coefficients, e.g. 0.5_A - 0.5_B for F1, 0.75_A - 0.25_B for
#' backcross F1 / A, etc.
#'
#' \item \code{$ll}: the log-likelihood of the model
#'
#' \item \code{$proba}: a matrix of group membership probabilities, with
#' individuals in rows and groups in columns; each value correspond to the
#' probability that a given individual genotype was generated under a given
#' group, under Hardy-Weinberg hypotheses.
#'
#' \item \code{$converged} a logical indicating if the algorithm converged; if
#' FALSE, it is doubtful that the result is an actual Maximum Likelihood
#' estimate.
#'
#' \item \code{$n.iter} an integer indicating the number of iterations the EM
#' algorithm was run for.
#'
#' }
#'
#' @examples
#' \dontrun{
#' data(microbov)
#'
#' ## try function using k-means initialization
#' grp.ini <- find.clusters(microbov, n.clust=15, n.pca=150)
#'
#' ## run EM algo
#' res <- snapclust(microbov, 15, pop.ini = grp.ini$grp)
#' names(res)
#' res$converged
#' res$n.iter
#'
#' ## plot result
#' compoplot(res)
#'
#' ## flag potential hybrids
#' to.flag <- apply(res$proba,1,max)<.9
#' compoplot(res, subset=to.flag, show.lab=TRUE,
#' posi="bottomleft", bg="white")
#'
#'
#' ## Simulate hybrids F1
#' zebu <- microbov[pop="Zebu"]
#' salers <- microbov[pop="Salers"]
#' hyb <- hybridize(zebu, salers, n=30)
#' x <- repool(zebu, salers, hyb)
#'
#' ## method without hybrids
#' res.no.hyb <- snapclust(x, k=2, hybrids=FALSE)
#' compoplot(res.no.hyb, col.pal=spectral, n.col=2)
#'
#' ## method with hybrids
#' res.hyb <- snapclust(x, k=2, hybrids=TRUE)
#' compoplot(res.hyb, col.pal =
#' hybridpal(col.pal = spectral), n.col = 2)
#'
#'
#' ## Simulate hybrids backcross (F1 / parental)
#' f1.zebu <- hybridize(hyb, zebu, 20, pop = "f1.zebu")
#' f1.salers <- hybridize(hyb, salers, 25, pop = "f1.salers")
#' y <- repool(x, f1.zebu, f1.salers)
#'
#' ## method without hybrids
#' res2.no.hyb <- snapclust(y, k = 2, hybrids = FALSE)
#' compoplot(res2.no.hyb, col.pal = hybridpal(), n.col = 2)
#'
#' ## method with hybrids F1 only
#' res2.hyb <- snapclust(y, k = 2, hybrids = TRUE)
#' compoplot(res2.hyb, col.pal = hybridpal(), n.col = 2)
#'
#' ## method with back-cross
#' res2.back <- snapclust(y, k = 2, hybrids = TRUE, hybrid.coef = c(.25,.5))
#' compoplot(res2.back, col.pal = hybridpal(), n.col = 2)
#'
#' }
snapclust <- function(x, k, pop.ini = "ward", max.iter = 100, n.start = 10,
n.start.kmeans = 50, hybrids = FALSE, dim.ini = 100,
hybrid.coef = NULL, parent.lab = c('A', 'B'), ...) {
if (!is.genind(x)) {
stop("x is not a valid genind object")
}
if (any(ploidy(x) > 2)) {
stop("snapclust not currently implemented for ploidy > 2")
}
if (all(ploidy(x) == 1)) {
.ll.genotype <- .ll.genotype.haploid
} else if (all(ploidy(x) == 2)) {
.ll.genotype <- .ll.genotype.diploid
} else {
stop("snapclust not currently implemented for varying ploidy")
}
## This function uses the EM algorithm to find ML group assignment of a set
## of genotypes stored in a genind object into 'k' clusters. We need an
## initial cluster definition to start with. The rest of the algorithm
## consists of:
## i) compute the matrix of allele frequencies
## ii) compute the likelihood of each genotype for each group
## iii) assign genotypes to the group for which they have the highest
## likelihood
## iv) go back to i) until convergence
## Disable multiple starts if the initial condition is not random
use.random.start <- is.null(pop.ini)
if (!use.random.start) {
n.start <- 1L
}
if (n.start < 1L) {
stop(sprintf(
"n.start is less than 1 (%d); using n.start=1", n.start))
}
if (hybrids && k > 2) {
warning(sprintf(
"forcing k=2 for hybrid mode (requested k is %d)", k))
k <- 2
}
## Handle hybrid coefficients; these values reflect the contribution of the
## first parental population to the allele frequencies of the hybrid
## group. For instance, a value of 0.75 indicates that 'a' contributes to
## 75%, and 'b' 25% of the allele frequencies of the hybrid - a typical
## backcross F1 / a.
if (hybrids) {
if (is.null(hybrid.coef)) {
hybrid.coef <- 0.5
}
hybrid.coef <- .tidy.hybrid.coef(hybrid.coef)
}
## Initialisation using 'find.clusters'
if (!is.null(pop.ini)) {
if (tolower(pop.ini)[1] %in% c("kmeans", "k-means")) {
pop.ini <- find.clusters(x, n.clust = k, n.pca = dim.ini,
n.start = n.start.kmeans,
method = "kmeans", ...)$grp
} else if (tolower(pop.ini)[1] %in% c("ward")) {
pop.ini <- find.clusters(x, n.clust = k, n.pca = dim.ini,
method = "ward", ...)$grp
}
}
## There is one run of the EM algo for each of the n.start random initial
## conditions.
ll <- -Inf # this will be the total loglike
for (i in seq_len(n.start)) {
## Set initial conditions: if initial pop is NULL, we create a random
## group definition (each clusters have same probability)
if (use.random.start) {
pop.ini <- sample(seq_len(k), nInd(x), replace=TRUE)
}
## process initial population, store levels
pop.ini <- factor(pop.ini)
lev.ini <- levels(pop.ini)[1:k] # k+1 would be hybrids
## ensure 'pop.ini' matches 'k'
if (! (length(levels(pop.ini)) %in% c(k, k + length(hybrid.coef))) ) {
stop("pop.ini does not have k clusters")
}
## initialisation
group <- factor(as.integer(pop.ini)) # set levels to 1:k (or k+1)
genotypes <- tab(x)
n.loc <- nLoc(x)
counter <- 0L
converged <- FALSE
## This is the actual EM algorithm
while(!converged && counter<=max.iter) {
## get table of allele frequencies (columns) by population (rows);
## these are stored as 'pop.freq'; note that it will include extra
## rows for different types of hybrids too.
if (hybrids) {
pop(x) <- group
id.parents <- .find.parents(x)
x.parents <- x[id.parents]
pop.freq <- tab(genind2genpop(x.parents, quiet=TRUE),
freq=TRUE)
pop.freq <- rbind(pop.freq, # parents
.find.freq.hyb(pop.freq, hybrid.coef)) # hybrids
} else {
pop.freq <- tab(genind2genpop(x, pop=group, quiet=TRUE),
freq=TRUE)
}
## ensures no allele frequency is exactly zero
pop.freq <- .tidy.pop.freq(pop.freq, locFac(x))
## get likelihoods of genotypes in every pop
ll.mat <- apply(genotypes, 1, .ll.genotype, pop.freq, n.loc)
## assign individuals to most likely cluster
previous.group <- group
group <- apply(ll.mat, 2, which.max)
## check convergence
## converged <- all(group == previous.group)
old.ll <- .global.ll(previous.group, ll.mat)
new.ll <- .global.ll(group, ll.mat)
if (!is.finite(new.ll)) {
## stop(sprintf("log-likelihood at iteration %d is not finite (%f)",
## counter, new.ll))
}
converged <- abs(old.ll - new.ll) < 1e-14
counter <- counter + 1L
}
## ## store the best run so far
## new.ll <- .global.ll(group, ll.mat)
if (new.ll > ll || i == 1L) {
## store results
ll <- new.ll
out <- list(group = group, ll = ll)
## group membership probability
rescaled.ll.mat <- .rescale.ll.mat(ll.mat)
out$proba <- prop.table(t(exp(rescaled.ll.mat)), 1)
out$converged <- converged
out$n.iter <- counter
}
} # end of the for loop
## restore labels of groups
out$group <- factor(out$group)
if (hybrids) {
if (!is.null(parent.lab)) {
lev.ini <- parent.lab
}
hybrid.labels <- paste0(hybrid.coef, "_", lev.ini[1], "-",
1 - hybrid.coef, "_", lev.ini[2])
lev.ini <- c(lev.ini, hybrid.labels)
}
levels(out$group) <- lev.ini
colnames(out$proba) <- lev.ini
## compute the number of parameters; it is defined as the number of 'free'
## allele frequencies, multiplied by the number of groups
out$n.param <- (ncol(genotypes) - n.loc) * length(lev.ini)
class(out) <- c("snapclust", "list")
return(out)
}
## Non-exported function which computes the log-likelihood of a genotype in
## every population. For now only works for diploid individuals. 'x' is a vector
## of allele counts; 'pop.freq' is a matrix of group allele frequencies, with
## groups in rows and alleles in columns.
## TODO: extend this to various ploidy levels, possibly optimizing procedures
## for haploids.
.ll.genotype.diploid <- function(x, pop.freq, n.loc){
## homozygote (diploid)
## p(AA) = f(A)^2 for each locus
## so that log(p(AA)) = 2 * log(f(A))
ll.homoz.one.indiv <- function(f) {
sum(log(f[x == 2L]), na.rm = TRUE) * 2
}
ll.homoz <- apply(pop.freq, 1, ll.homoz.one.indiv)
## heterozygote (diploid, expl with 2 loci)
## p(AB) = 2 * f(A) f(B)
## so that log(p(AB)) = log(f(A)) + log(f(B)) + log(2)
## if an individual is heterozygote for n.heter loci, the term
## log(2) will be added n.heter times
ll.hetero.one.indiv <- function(f) {
n.heter <- sum(x == 1L, na.rm = TRUE) / 2
sum(log(f[x == 1L]), na.rm = TRUE) + n.heter * log(2)
}
ll.heteroz <- apply(pop.freq, 1, ll.hetero.one.indiv)
return(ll.homoz + ll.heteroz)
}
.ll.genotype.haploid <- function(x, pop.freq, n.loc){
## p(A) = f(A) for each locus
ll.one.indiv <- function(f) {
sum(log(f[x == 1L]), na.rm = TRUE)
}
ll <- apply(pop.freq, 1, ll.one.indiv)
return(ll)
}
## Non-exported function computing the total log-likelihood of the model given a vector of group
## assignments and a table of ll of genotypes in each group
.global.ll <- function(group, ll){
sum(t(ll)[cbind(seq_along(group), as.integer(group))], na.rm=TRUE)
}
## Non-exported function making a tidy vector of weights for allele frequencies
## of parental populations. It ensures that given any input vector of weights
## 'w' defining the types of hybrids, the output has the following properties:
## - strictly on ]0,1[
## - symmetric around 0.5, e.g. c(.25, .5) gives c(.25, .5, .75)
## - sorted by decreasing values (i.e. hybrid types are sorted by decreasing
## proximity to the first parental population.
.tidy.hybrid.coef <- function(w) {
w <- w[w > 0 & w < 1]
w <- sort(unique(round(c(w, 1-w), 4)), decreasing = TRUE)
w
}
## Non-exported function determining vectors of allele frequencies in hybrids
## from 2 parental populations. Different types of hybrids are determined by
## weights given to the allele frequencies of the parental populations. Only one
## such value is provided and taken to be the weight of the 1st parental
## population; the complementary frequency is derived for the second parental
## population.
## Parameters are:
## - x: matrix of allele frequencies for population 'a' (first row) and 'b'
## (second row), where allele are in columns.
## - w: a vector of weights for 'a' and 'b', each value determining a type of
## hybrid. For instance, 0.5 is for F1, 0.25 for backcrosses F1/parental, 0.125
## for 2nd backcross F1/parental, etc.
## The output is a matrix of allele frequencies with hybrid types in rows and
## alleles in columns.
.find.freq.hyb <- function(x, w) {
out <- cbind(w, 1-w) %*% x
rownames(out) <- w
out
}
## Non-exported function trying to find the two parental populations in a genind
## object containing 'k' clusters. The parental populations are defined as the
## two most distant clusters. The other clusters are deemed to be various types
## of hybrids. The output is a vector of indices identifying the individuals
## from the parental populations.
.find.parents <- function(x) {
## matrix of pairwise distances between clusters, using Nei's distance
D <- as.matrix(dist.genpop(genind2genpop(x, quiet = TRUE), method = 1))
parents <- which(abs(max(D)-D) < 1e-14, TRUE)[1,]
out <- which(as.integer(pop(x)) %in% parents)
out
}
## Non-exported function enforcing a minimum allele frequency in a table of
## allele frequency. As we are not accounting for the uncertainty in allele
## frequencies, we need to allow for genotypes to be generated from a population
## which does not have the genotype's allele represented, even if this is at a
## low probability. The transformation is ad-hoc, and has the form:
##
## g(f_i) = (a + f_i / \sum(a + f_i))
## where f_i is the i-th frequency in a given locus. However, this ensures that
## the output has two important properties:
## - it sums to 1
## - it contains no zero
## By default, we set 'a' to 0.01.
## Function inputs are:
## - 'pop.freq': matrix of allele frequencies, with groups in rows and alleles in
## columns
## - 'loc.fac': a factor indicating which alleles belong to which locus, as
## returned by 'locFac([a genind])'
.tidy.pop.freq <- function(pop.freq, loc.fac) {
g <- function(f, a = .01) {
(a + f) / sum(a + f)
}
out <- matrix(unlist(apply(pop.freq, 1, tapply, loc.fac, g),
use.names = FALSE),
byrow=TRUE, nrow=nrow(pop.freq))
dimnames(out) <- dimnames(pop.freq)
return(out)
}
## This function rescales log-likelihood values prior to the computation of
## group membership probabilities.
## issue reported: prop.table(t(exp(ll.mat)), 1) can cause some numerical
## approximation problems; if numbers are large, exp(...) will return Inf
## and the group membership probabilities cannot be computed
##
## Solution: rather than use p_a = exp(ll_a) / (exp(ll_a) + exp(ll_b))
## we can use p_a = exp(ll_a - C) / (exp(ll_a - C) + exp(ll_b - C))
## where 'C' is a sufficiently large constant so that exp(ll_i + C) is
## computable; naively we could use C = max(ll.mat), but the problem is this
## scaling can cause -Inf likelihoods too. In practice, we need to allow
## different scaling for each individual.
##out$proba <-
## prop.table(t(exp(ll.mat)), 1)
.rescale.ll.mat <- function(ll.mat) {
## we first compute ad-hoc minimum and maximum values of log-likelihood; these
## will be computer dependent; this is a quick fix, but better alternatives
## can be found.
## smallest ll such that exp(ll) is strictly > 0
new_min <- (0:-1000)[max(which(exp(0:-1000) > 0))]
## largest ll such that exp(ll) is strictly less than +Inf
new_max <- (1:1000)[max(which(exp(1:1000) < Inf))]
counter <- 0
## find rescaling for a single individual;
## x: vector of ll values
rescale.ll.indiv <- function(x) {
## set minimum to new_min
x <- x - min(x) + new_min
## set sum to the maximum
if (sum(x) > new_max) {
counter <<- counter + 1
x <- x - min(x) # reset min to 0
x <- new_min + (x / sum(x)) * new_max # range: new_min to new_max
}
return(x)
}
out <- apply(ll.mat, 2, rescale.ll.indiv)
if (counter > 0) {
msg <- paste("Large dataset syndrome:\n",
"for", counter, "individuals,",
"differences in log-likelihoods exceed computer precision;\n",
"group membership probabilities are approximated\n",
"(only trust clear-cut values)")
message(msg)
}
return(out)
}
|
test_io_check_exists <- function()
{
.io_check_exists <- Rsamtools:::.io_check_exists
fls <- c(
tempfile(), tempfile(),
"ftp://some.where/file",
"http://some.where/file",
"https://some.where/file",
NA
)
file.create(fls[1])
checkTrue(is.null(.io_check_exists(fls[1])))
checkException(.io_check_exists(fls[2]), silent = TRUE)
checkTrue(is.null(.io_check_exists(fls[3:6])))
}
test_catch_samtools <- function()
{
fl <- system.file("unitTests", "cases", "ex1_unsort.bam",
package="Rsamtools")
err <- warn <- FALSE
tryCatch(suppressWarnings(withCallingHandlers({
indexBam(fl)
}, warning=function(msg) {
warn <<- TRUE
})), error=function(msg) {
err <<- TRUE
})
checkTrue(isFALSE(warn))
checkTrue(err)
}
test_catch_samtools_504 <- function()
{
err <- FALSE
tryCatch({
scanBam("http://httpbin.org/status/504")
}, error=function(err) {
txt <- "failed to open BamFile:"
err <<- startsWith(conditionMessage(err), txt)
})
checkTrue(err)
}
test_normalizePath <- function()
{
.normalizePath <- Rsamtools:::.normalizePath
fl <- tempfile()
checkIdentical(fl, .normalizePath(fl))
checkIdentical(fl, .normalizePath(factor(fl)))
}
|
/Rsamtools/unitTests/test_utilities.R
|
permissive
|
solgenomics/R_libs
|
R
| false
| false
| 1,321
|
r
|
test_io_check_exists <- function()
{
.io_check_exists <- Rsamtools:::.io_check_exists
fls <- c(
tempfile(), tempfile(),
"ftp://some.where/file",
"http://some.where/file",
"https://some.where/file",
NA
)
file.create(fls[1])
checkTrue(is.null(.io_check_exists(fls[1])))
checkException(.io_check_exists(fls[2]), silent = TRUE)
checkTrue(is.null(.io_check_exists(fls[3:6])))
}
test_catch_samtools <- function()
{
fl <- system.file("unitTests", "cases", "ex1_unsort.bam",
package="Rsamtools")
err <- warn <- FALSE
tryCatch(suppressWarnings(withCallingHandlers({
indexBam(fl)
}, warning=function(msg) {
warn <<- TRUE
})), error=function(msg) {
err <<- TRUE
})
checkTrue(isFALSE(warn))
checkTrue(err)
}
test_catch_samtools_504 <- function()
{
err <- FALSE
tryCatch({
scanBam("http://httpbin.org/status/504")
}, error=function(err) {
txt <- "failed to open BamFile:"
err <<- startsWith(conditionMessage(err), txt)
})
checkTrue(err)
}
test_normalizePath <- function()
{
.normalizePath <- Rsamtools:::.normalizePath
fl <- tempfile()
checkIdentical(fl, .normalizePath(fl))
checkIdentical(fl, .normalizePath(factor(fl)))
}
|
# context("add SE and RSE")
# library(GenomicRanges)
# library(SummarizedExperiment)
#
#
# # SummarizedExperiment ####
# test_that("add SummarizedExperiment", {
# multi <- createMultiDataSet()
#
# nrows <- 10; ncols <- 6
# counts <- matrix(runif(nrows * ncols, 1, 1e4), nrows)
# colData <- DataFrame(Treatment=rep(c("ChIP", "Input"), 3),
# row.names=LETTERS[1:6])
# rowData <- DataFrame(chr = "chr1", start = 1:10, end = 11:20)
# se0 <- SummarizedExperiment(assays=SimpleList(counts=counts),
# colData=colData, rowData = rowData)
#
# expect_s4_class(multi2 <- add_se(multi, se0, "seEx"), "MultiDataSet")
# expect_equal(names(multi2), "seEx")
# expect_s4_class(multi2[["seEx"]], "SummarizedExperiment")
#
#
# # Check Overwrite ####
# expect_error(add_se(multi2, se0, "seEx"), "There is already an object in this slot. Set overwrite = TRUE to overwrite the previous set.")
# expect_warning(add_se(multi2, se0, "seEx", overwrite = TRUE), "Slot 'seEx' is already set in 'MultiDataSet'. Previous content will be overwritten.")
#
# # Check GRanges
# gr <- makeGRangesFromDataFrame(rowData)
# expect_s4_class(multi2 <- add_se(multi, se0, "seEx", GRanges = gr), "MultiDataSet")
#
# expect_s4_class(multi2 <- add_se(multi, se0, "seEx", GRanges = NA), "MultiDataSet")
#
# expect_error(multi2 <- add_se(multi, se0, "seEx", GRanges = "cot"), "GRanges should be a GenomicRanges or NA.")
# })
#
#
# # RangedSummarizedExperiment ####
# test_that("add RangedSummarizedExperiment", {
# multi <- createMultiDataSet()
#
# counts <- matrix(runif(200 * 6, 1, 1e4), 200)
# rowRanges <- GRanges(rep(c("chr1", "chr2"), c(50, 150)),
# IRanges(floor(runif(200, 1e5, 1e6)), width=100),
# strand=sample(c("+", "-"), 200, TRUE),
# feature_id=sprintf("ID%03d", 1:200))
# colData <- DataFrame(Treatment=rep(c("ChIP", "Input"), 3),
# row.names=LETTERS[1:6], id = LETTERS[1:6])
# names(rowRanges) <- 1:200
# rse <- SummarizedExperiment(assays=SimpleList(counts=counts),
# rowRanges=rowRanges, colData=colData)
#
# multi <- add_rse(multi, rse, "rseEx")
# expect_equal(names(multi), "rseEx")
#
# expect_error(add_rse(multi, rse, "rseEx"), "There is already an object in this slot. Set overwrite = TRUE to overwrite the previous set.")
# expect_warning(add_rse(multi, rse, "rseEx", overwrite = TRUE), "Slot 'rseEx' is already set in 'MultiDataSet'. Previous content will be overwritten.")
#
#
#
# expect_s4_class(multi[["rseEx"]], "RangedSummarizedExperiment")
#
# library(minfiData)
# data("MsetEx")
# GRset <- mapToGenome(MsetEx[1:10, 1:2])
#
# expect_warning(multi2 <- add_rse(multi, GRset, "GRSet"), "No id column found in colData. The id will be equal to the sampleNames")
# expect_equal(sampleNames(multi2[["GRSet"]]), c("5723646052_R02C02", "5723646052_R04C01"))
# expect_equal(multi2[["GRSet"]]$id, c("5723646052_R02C02", "5723646052_R04C01"))
#
# colData(GRset)$id <- letters[1:2]
# multi2 <- add_rse(multi, GRset, "GRSet")
# expect_is(multi2[["GRSet"]], "GenomicMethylSet")
# expect_equal(sampleNames(multi2[["GRSet"]]), c("5723646052_R02C02", "5723646052_R04C01"))
# expect_equal(multi2[["GRSet"]]$id, c("a", "b"))
# })
|
/testsOld/testthat/test_04addrse.R
|
permissive
|
isglobal-brge/MultiDataSet
|
R
| false
| false
| 3,450
|
r
|
# context("add SE and RSE")
# library(GenomicRanges)
# library(SummarizedExperiment)
#
#
# # SummarizedExperiment ####
# test_that("add SummarizedExperiment", {
# multi <- createMultiDataSet()
#
# nrows <- 10; ncols <- 6
# counts <- matrix(runif(nrows * ncols, 1, 1e4), nrows)
# colData <- DataFrame(Treatment=rep(c("ChIP", "Input"), 3),
# row.names=LETTERS[1:6])
# rowData <- DataFrame(chr = "chr1", start = 1:10, end = 11:20)
# se0 <- SummarizedExperiment(assays=SimpleList(counts=counts),
# colData=colData, rowData = rowData)
#
# expect_s4_class(multi2 <- add_se(multi, se0, "seEx"), "MultiDataSet")
# expect_equal(names(multi2), "seEx")
# expect_s4_class(multi2[["seEx"]], "SummarizedExperiment")
#
#
# # Check Overwrite ####
# expect_error(add_se(multi2, se0, "seEx"), "There is already an object in this slot. Set overwrite = TRUE to overwrite the previous set.")
# expect_warning(add_se(multi2, se0, "seEx", overwrite = TRUE), "Slot 'seEx' is already set in 'MultiDataSet'. Previous content will be overwritten.")
#
# # Check GRanges
# gr <- makeGRangesFromDataFrame(rowData)
# expect_s4_class(multi2 <- add_se(multi, se0, "seEx", GRanges = gr), "MultiDataSet")
#
# expect_s4_class(multi2 <- add_se(multi, se0, "seEx", GRanges = NA), "MultiDataSet")
#
# expect_error(multi2 <- add_se(multi, se0, "seEx", GRanges = "cot"), "GRanges should be a GenomicRanges or NA.")
# })
#
#
# # RangedSummarizedExperiment ####
# test_that("add RangedSummarizedExperiment", {
# multi <- createMultiDataSet()
#
# counts <- matrix(runif(200 * 6, 1, 1e4), 200)
# rowRanges <- GRanges(rep(c("chr1", "chr2"), c(50, 150)),
# IRanges(floor(runif(200, 1e5, 1e6)), width=100),
# strand=sample(c("+", "-"), 200, TRUE),
# feature_id=sprintf("ID%03d", 1:200))
# colData <- DataFrame(Treatment=rep(c("ChIP", "Input"), 3),
# row.names=LETTERS[1:6], id = LETTERS[1:6])
# names(rowRanges) <- 1:200
# rse <- SummarizedExperiment(assays=SimpleList(counts=counts),
# rowRanges=rowRanges, colData=colData)
#
# multi <- add_rse(multi, rse, "rseEx")
# expect_equal(names(multi), "rseEx")
#
# expect_error(add_rse(multi, rse, "rseEx"), "There is already an object in this slot. Set overwrite = TRUE to overwrite the previous set.")
# expect_warning(add_rse(multi, rse, "rseEx", overwrite = TRUE), "Slot 'rseEx' is already set in 'MultiDataSet'. Previous content will be overwritten.")
#
#
#
# expect_s4_class(multi[["rseEx"]], "RangedSummarizedExperiment")
#
# library(minfiData)
# data("MsetEx")
# GRset <- mapToGenome(MsetEx[1:10, 1:2])
#
# expect_warning(multi2 <- add_rse(multi, GRset, "GRSet"), "No id column found in colData. The id will be equal to the sampleNames")
# expect_equal(sampleNames(multi2[["GRSet"]]), c("5723646052_R02C02", "5723646052_R04C01"))
# expect_equal(multi2[["GRSet"]]$id, c("5723646052_R02C02", "5723646052_R04C01"))
#
# colData(GRset)$id <- letters[1:2]
# multi2 <- add_rse(multi, GRset, "GRSet")
# expect_is(multi2[["GRSet"]], "GenomicMethylSet")
# expect_equal(sampleNames(multi2[["GRSet"]]), c("5723646052_R02C02", "5723646052_R04C01"))
# expect_equal(multi2[["GRSet"]]$id, c("a", "b"))
# })
|
rm(list = ls())
source("cls2/1_pars.R")
# create cluster for parallel processing
cl <- makeCluster(cores_l)
registerDoParallel(cl)
tdfp_cat <- foreach(n = 1:length(fdate), .packages = c("hts","forecast")) %dopar% {
dx <- fdate[n]
lapply(fmethods, function(fx){
# Run forecasting methods
forecast(window(tradehts_reduced$cat, end = dx-1/12),
h = horizon,
method = "tdfp",
fmethod = fx)
})
}
tdfp_reg <- foreach(n = 1:length(fdate), .packages = c("hts","forecast")) %dopar% {
dx <- fdate[n]
lapply(fmethods, function(fx){
# Run forecasting methods
forecast(window(tradehts_reduced$reg, end = dx-1/12),
h = horizon,
method = "tdfp",
fmethod = fx)
})
}
stopCluster(cl)
names(tdfp_cat) <- fdate
names(tdfp_reg) <- fdate
save(tdfp_cat, file = "out/forecasts/forecasts_tdfp_cat.Rdata")
save(tdfp_reg, file = "out/forecasts/forecasts_tdfp_reg.Rdata")
|
/cls2/2_bt_tdfp.R
|
no_license
|
GalElgavish/hierarch_bayes
|
R
| false
| false
| 993
|
r
|
rm(list = ls())
source("cls2/1_pars.R")
# create cluster for parallel processing
cl <- makeCluster(cores_l)
registerDoParallel(cl)
tdfp_cat <- foreach(n = 1:length(fdate), .packages = c("hts","forecast")) %dopar% {
dx <- fdate[n]
lapply(fmethods, function(fx){
# Run forecasting methods
forecast(window(tradehts_reduced$cat, end = dx-1/12),
h = horizon,
method = "tdfp",
fmethod = fx)
})
}
tdfp_reg <- foreach(n = 1:length(fdate), .packages = c("hts","forecast")) %dopar% {
dx <- fdate[n]
lapply(fmethods, function(fx){
# Run forecasting methods
forecast(window(tradehts_reduced$reg, end = dx-1/12),
h = horizon,
method = "tdfp",
fmethod = fx)
})
}
stopCluster(cl)
names(tdfp_cat) <- fdate
names(tdfp_reg) <- fdate
save(tdfp_cat, file = "out/forecasts/forecasts_tdfp_cat.Rdata")
save(tdfp_reg, file = "out/forecasts/forecasts_tdfp_reg.Rdata")
|
## Put comments here that give an overall description of what your
## functions do
## Added by Antonio Tavera: Pair of functions that cache the inverse of a matrix
## Write a short comment describing this function
## Added by Antonio Tavera:
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Added by Antonio Tavera:
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
tavera509/ProgrammingAssignment2
|
R
| false
| false
| 1,338
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Added by Antonio Tavera: Pair of functions that cache the inverse of a matrix
## Write a short comment describing this function
## Added by Antonio Tavera:
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Added by Antonio Tavera:
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
|
corr <- function(directory, threshold = 0) {
form1 <- function(doc) {
data <- read.csv(file.path(directory, doc))
nobs <- sum(complete.cases(data))
if (nobs > threshold) {
return (cor(data$nitrate, data$sulfate, use="complete.obs"))
}
}
form2 <- sapply(list.files(directory), form1)
form2 <- unlist(form2[!sapply(form2, is.null)])
return (form2)
}
|
/corr.R
|
no_license
|
MattPath/datasciencecoursera
|
R
| false
| false
| 362
|
r
|
corr <- function(directory, threshold = 0) {
form1 <- function(doc) {
data <- read.csv(file.path(directory, doc))
nobs <- sum(complete.cases(data))
if (nobs > threshold) {
return (cor(data$nitrate, data$sulfate, use="complete.obs"))
}
}
form2 <- sapply(list.files(directory), form1)
form2 <- unlist(form2[!sapply(form2, is.null)])
return (form2)
}
|
#' Write data in 10X format i.e. directory with barcodes.tsv, genes.tsv, matrix.mtx
#' If object is Seurat v2, output cellranger2-like format
#' If object is Seurat v3, output cellranger3-like format
#'
#' @param obj Seurat object to print
#' @param dir Character. Directory in which to place 10X-like files
#' @export
#' @importFrom assertthat assert_that
#' @importFrom Matrix writeMM
#' @importFrom R.utils gzip
#' @examples
#' Write10X(obj, dir)
Write10X <- function(obj, dir) {
assert_that(class(obj) %in% c("seurat", "Seurat"))
if (!dir.exists(dir)) {
stop(paste0(dir, " does not exist. Stopping!"))
}
if (class(obj) == "seurat") {
# Seurat v2. Will output in cellranger2-like format i.e. barcodes.tsv,
# genes.tsv, matrix.mtx
cat(obj@cell.names, file = paste0(dir, "/barcodes.tsv"), sep = "\n")
# Genes. Seurat doesn't store ENS ID. As a hack, put ENS_ID for all genes
df <- data.frame(ID = "ENS_ID", symbol = rownames(obj@raw.data))
write.table(df, row.names = F, col.names = F, sep = "\t", quote = F, file = paste0(dir,
"/genes.tsv"))
mat <- obj@raw.data[, obj@cell.names]
writeMM(mat, file = paste0(dir, "/matrix.mtx"))
} else if (class(obj) == "Seurat") {
# Seurat v3. Will output in cellranger3-like format i.e. barcodes.tsv.gz
# features.tsv.gz matrix.mtx.gz
gz1 <- gzfile(paste0(dir, "/barcodes.tsv.gz"), "w")
cat(colnames(obj), file = gz1, sep = "\n")
close(gz1)
# Genes. Seurat doesn't store ENS ID. As a hack, put ENS_ID for all genes
df <- data.frame(ID = "ENS_ID", symbol = rownames(obj))
gz2 <- gzfile(paste0(dir, "/features.tsv.gz"), "w")
write.table(df, row.names = F, col.names = F, sep = "\t", quote = F, file = gz2)
close(gz2)
mat <- GetAssayData(obj, slot = "counts")[, colnames(obj)]
assert_that(nrow(mat) > 0 && ncol(mat) > 0, msg = "counts matrix is not present!")
writeMM(mat, file = paste0(dir, "/matrix.mtx"))
gzip(filename = paste0(dir, "/matrix.mtx"))
} else {
stop("Should not get here")
}
return()
}
|
/R/Write10X.R
|
no_license
|
daskelly/earlycross
|
R
| false
| false
| 2,177
|
r
|
#' Write data in 10X format i.e. directory with barcodes.tsv, genes.tsv, matrix.mtx
#' If object is Seurat v2, output cellranger2-like format
#' If object is Seurat v3, output cellranger3-like format
#'
#' @param obj Seurat object to print
#' @param dir Character. Directory in which to place 10X-like files
#' @export
#' @importFrom assertthat assert_that
#' @importFrom Matrix writeMM
#' @importFrom R.utils gzip
#' @examples
#' Write10X(obj, dir)
Write10X <- function(obj, dir) {
assert_that(class(obj) %in% c("seurat", "Seurat"))
if (!dir.exists(dir)) {
stop(paste0(dir, " does not exist. Stopping!"))
}
if (class(obj) == "seurat") {
# Seurat v2. Will output in cellranger2-like format i.e. barcodes.tsv,
# genes.tsv, matrix.mtx
cat(obj@cell.names, file = paste0(dir, "/barcodes.tsv"), sep = "\n")
# Genes. Seurat doesn't store ENS ID. As a hack, put ENS_ID for all genes
df <- data.frame(ID = "ENS_ID", symbol = rownames(obj@raw.data))
write.table(df, row.names = F, col.names = F, sep = "\t", quote = F, file = paste0(dir,
"/genes.tsv"))
mat <- obj@raw.data[, obj@cell.names]
writeMM(mat, file = paste0(dir, "/matrix.mtx"))
} else if (class(obj) == "Seurat") {
# Seurat v3. Will output in cellranger3-like format i.e. barcodes.tsv.gz
# features.tsv.gz matrix.mtx.gz
gz1 <- gzfile(paste0(dir, "/barcodes.tsv.gz"), "w")
cat(colnames(obj), file = gz1, sep = "\n")
close(gz1)
# Genes. Seurat doesn't store ENS ID. As a hack, put ENS_ID for all genes
df <- data.frame(ID = "ENS_ID", symbol = rownames(obj))
gz2 <- gzfile(paste0(dir, "/features.tsv.gz"), "w")
write.table(df, row.names = F, col.names = F, sep = "\t", quote = F, file = gz2)
close(gz2)
mat <- GetAssayData(obj, slot = "counts")[, colnames(obj)]
assert_that(nrow(mat) > 0 && ncol(mat) > 0, msg = "counts matrix is not present!")
writeMM(mat, file = paste0(dir, "/matrix.mtx"))
gzip(filename = paste0(dir, "/matrix.mtx"))
} else {
stop("Should not get here")
}
return()
}
|
testlist <- list(bytes1 = c(168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430113L, -1L, -402653184L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802087-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 669
|
r
|
testlist <- list(bytes1 = c(168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430090L, 168430113L, -1L, -402653184L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
# load dependencies
require("carenR")
source("LRAR/caren.R")
source("LRAR/predictLRAR.R")
source("edira/edira.R")
#default rank
def.rank <- function(ys)
{
ready <- FALSE
while (ready == FALSE)
{
r <- rank(colMeans(ys))
if (length(unique(r))==1 )
{
ys <- ys[-1,]
} else {
ready <- TRUE
}
}
r
}
# _goodmank <- function(x,y)
# {
# a <- outer(x,x,function(u,v) sign(v-u))
# b <- outer(y,y,function(u,v) sign(v-u))
# b[b==0] <- NA
# comp <- a==b
# diag(comp) <- NA
# return((sum(comp, na.rm = TRUE)-sum(!comp, na.rm = TRUE))/sum((comp)>=0, na.rm = T))
# }
goodmank <- function(x,b)
{
a <- outer(x,x,function(u,v) sign(v-u))
comp <- a==b
diag(comp) <- NA
return((sum(comp, na.rm = TRUE)-sum(!comp, na.rm = TRUE))/sum((comp)>=0, na.rm = T))
}
# _accuracy <- function(x,y)
# {
# a <- outer(x,x,function(u,v) sign(v-u))
# b <- outer(y,y,function(u,v) sign(v-u))
# b[b==0] <- NA
# comp <- a==b
# diag(comp) <- NA
# return(sum(!comp, na.rm = TRUE) == 0)
# }
accuracy <- function(x,b)
{
a <- outer(x,x,function(u,v) sign(v-u))
comp <- a==b
diag(comp) <- NA
return(sum(!comp, na.rm = TRUE) == 0)
}
equalFrequencyDisc <- function(x, indexesDiscretized, DISC)
{
discretizedList <- 1:nrow(DISC$Disc.data)
chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
splits <- chunk2(discretizedList, 4)
for(t in 1:4){
discretizedList <<- discretizedList
discretizedList[splits[[t]] ] <- t
}
noDisc <- x[,!indexesDiscretized]
noDisc <- apply(noDisc, 2, function(col){
orderedInd <- order(col)
for(o in 1:length(orderedInd)){
col[orderedInd[o]] <- discretizedList[o]
}
col
})
DISC$Disc.data[,!indexesDiscretized] <- noDisc
DISC
}
equalWidthDisc <- function(x, indexesDiscretized, DISC)
{
D <- list()
nbins <- 4
xnd <- as.matrix(x[,!indexesDiscretized])
D$Disc.data <- apply(xnd, 2, discretize, "interval", nbins, 1:nbins)
D$Disc.data <- apply(D$Disc.data, 2, as.integer)
D$cutp <- lapply(1:ncol(xnd), function(i){
discretize(xnd[,i], "interval", nbins, 1:nbins, onlycuts=TRUE)[-c(1,(nbins+1))]
})
DISC$Disc.data[,!indexesDiscretized] <- D$Disc.data
DISC$cutp[!indexesDiscretized] <- D$cutp
DISC
}
mineRules<-function(X, Y, is2Years = 0, folds = NULL, Kfolds = 1, xs = NULL, ys = NULL){
print("minerules enter")
tauList <<- list()
accuracyList <<- list()
baselineList <<- list()
completenessList <<- list()
defRankUsageList <<- list()
rulzList <<- list()
for(i in 1:Kfolds){
print("minerules fold")
if(is2Years){
x <- X
y <- Y
} else {
# Segement your data by fold
testIndexes <- which(folds==i)
train_ind <- -testIndexes
x <- X[train_ind, ]
xs <- X[-train_ind, ]
y <- Y[train_ind, ]
ys <- Y[-train_ind, ]
}
# discretize TRAIN
DISC <- mdlp.rank(x, y, method = "kendall")
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
if(sum(!indexesDiscretized) > 0){
# equal frequency
# indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
# DISC <- equalFrequencyDisc(x, indexesDiscretized, DISC)
# equal width
DISC <- equalWidthDisc(x, indexesDiscretized, DISC)
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
}
#prune columns with no partitions
DISC$Disc.data <- DISC$Disc.data[,indexesDiscretized, drop = FALSE]
if (ncol(DISC$Disc.data) == 0)
{
print("Not discretized")
xs <- NULL
} else {
xs <- sapply(which(indexesDiscretized), function(j){
findInterval(xs[,j], c(-Inf, DISC$cutp[[j]], Inf) )
})
}
# transform in unique attributes
unique.attributes <- function(train.matrix, class, test.matrix, npart)
{
mx <- c(0, cumsum(apply(train.matrix, 2, max))[-ncol(train.matrix)])
list( cbind( t(t(train.matrix) + as.vector(mx)), class ), t(t(test.matrix) + mx) , npart = npart)
}
if(!is.null(xs)){
npart <- mean(apply(DISC$Disc.data, 2, function(c) length(unique(c))))
res <- unique.attributes(DISC$Disc.data, class, xs, npart)
} else {
res <- NULL
}
xs <-res[[2]]
xd <-res[[1]]
# mine LR rules
if(isPairwise) {
rulz <- aflrC7Pairwise(xd, y, msup = minSupport, mconf = minConfidence, mlift = minLift, mimp = minImprovment,
theta = theta, Xmx = "2000M", confThreshold = confThreshold)
if(is.na(rulz)) {
stop("no rules")
}
# else if (minSupport != 1 && length(rulz) < 5000) {
# stop("few rules")
# }
} else {
rulz <- aflrC7(xd, y, msup = minSupport, mconf = minConfidence, mlift = minLift, mimp = minImprovment, theta = 0, Xmx = "2000M")
}
# predict LRARs
std <- def.rank(y)
mt <- 0
if(isPairwise){
yp <- crankPairwise(rulz, xs, ys, std, m2, mt, kfold = i)
} else {
yp <- crank(rulz, xs, ys, std, m2, mt)
}
# evaluate rules
if(is.null(ys)){
# 2017 predictions
yp <- t(yp)
colnames(yp) <- colnames(y)
saveRDS(yp, "predictions.rds")
saveRDS(y, "results.rds")
print("2017 prediction saved")
print("baseline w/ last year's results")
print(mean(sapply(1:nrow(ys), function(j) cor(ys[j,], baseline, method="kendall"))))
} else if(isPairwise){
#browser()
# yp <- t(yp)
# colnames(yp) <- colnames(y)
# saveRDS(yp, "predictions2013_17.rds")
# saveRDS(y, "results2013_17.rds")
# print("2013_17 prediction saved")
# browser()
gamma <- mean(sapply(1:nrow(ys), function(j) {
g <- goodmank(ys[j,], yp[[j]])
if (is.nan(g)){
g <- 0
}
g
} ))
acc <- mean(sapply(1:nrow(ys), function(j) accuracy(ys[j,], yp[[j]]) ))
tauList[[i]] <- gamma
accuracyList[[i]] <- acc
#rulzList[[i]] <- length(rulz)
if (defRankUsageList[[i]] != 0 && minSupport > 1) {
stop("non zero def.rank")
}
print(paste("fold", i, ": gamma=", gamma," | completeness=", completenessList[[i]]," | accuracy=", acc," | %defrank=", defRankUsageList[[i]]))
baseline <- rank(colMeans(y))
baselineTau <- mean(sapply(1:nrow(ys), function(j) cor(ys[j,], baseline, method="kendall")))
baselineList[[i]] <- baselineTau
print(paste("baseline tau=", baselineTau))
print(paste("smart baseline",mean(sapply(1:nrow(ys), function(j) cor(y[j,], ys[j,], method="kendall")))))
if(i == Kfolds){
if(!is.null(dataset)){
title <- paste("############### ",dataset," ###############")
} else {
title <- paste("############### output ###############")
}
cat("\n")
cat(title,"\n")
cat("# confidence:",minConfidence/100,"\n")
cat("# max pairs:",maxPairs,"\n")
cat("# calculated support:",minSupport/100,"\n")
cat("#\n")
gamma <- round(sum(as.numeric(tauList))/Kfolds, 2)
cat("# gamma:", gamma,"\n")
completeness <- round(sum(as.numeric(completenessList))/Kfolds, 2)
cat("# completeness:", completeness,"\n")
accuracy <- round(sum(as.numeric(accuracyList))/Kfolds, 2)
cat("# accuracy:", accuracy,"\n")
defrank <- round(sum(as.numeric(defRankUsageList))/Kfolds, 2)
cat("# def.rank:", defrank,"\n")
rules <- round(sum(as.numeric(rulzList))/Kfolds)
cat("# rules:", rules,"\n")
cat("#\n")
baseline <- round(sum(as.numeric(baselineList))/Kfolds, 2)
cat("# baseline (gamma):", baseline,"\n")
cat("%RFORMAT%",dataset,"%",minConfidence/100,"%",maxPairs,"%",minSupport/100,"%",gamma,"%",completeness,"%",accuracy,"%",defrank,"%",rules,"%",baseline,"%",minImprovment,"\n", sep = "")
cat(paste(replicate(nchar(title), "#"), collapse = ""),"\n")
}
} else {
tau <- mean(sapply(1:nrow(ys), function(j) cor(ys[j,], yp[,j], method="kendall")))
tauList[[i]] <- tau
print(paste("fold", i, ": tau=", tau))
baseline <- rank(colMeans(y))
baselineTau <- mean(sapply(1:nrow(ys), function(j) cor(ys[j,], baseline, method="kendall")))
baselineList[[i]] <- baselineTau
print(paste("baseline tau=", baselineTau))
print(paste("smart baseline",mean(sapply(1:nrow(ys), function(j) cor(y[j,], ys[j,], method="kendall")))))
if(i == Kfolds){
tau <- round(sum(as.numeric(tauList))/Kfolds,2)
baseline <- round(sum(as.numeric(baselineList))/Kfolds,2)
print(paste("final tau=", tau))
print(paste("final baseline tau=", baseline))
cat("%RFORMAT%",dataset,"%",minConfidence/100,"%","NA","%",minSupport/100,"%",tau,"%","NA","%","NA","%","NA","%","NA","%",baseline,"%",minImprovment,"\n", sep = "")
}
}
}
}
mineRulesTest<-function(X, Y, is2Years = 0, folds = NULL, Kfolds = 1, xs = NULL, ys = NULL){
tauList <<- list()
accuracyList <<- list()
baselineList <<- list()
completenessList <<- list()
defRankUsageList <<- list()
rulzList <<- list()
for(i in 1:Kfolds){
if(is2Years){
x <- X
y <- Y
} else {
# Segement your data by fold
testIndexes <- which(folds==i)
train_ind <- -testIndexes
x <- X[train_ind, ]
xs <- X[-train_ind, ]
y <- Y[train_ind, ]
ys <- Y[-train_ind, ]
}
# discretize TRAIN
DISC <- mdlp.rank(x, y, method = "kendall")
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
if(sum(!indexesDiscretized) > 0){
# equal frequency
# indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
# DISC <- equalFrequencyDisc(x, indexesDiscretized, DISC)
# equal width
DISC <- equalWidthDisc(x, indexesDiscretized, DISC)
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
}
#prune columns with no partitions
DISC$Disc.data <- DISC$Disc.data[,indexesDiscretized, drop = FALSE]
if (ncol(DISC$Disc.data) == 0)
{
print("Not discretized")
xs <- NULL
} else {
xs <- sapply(which(indexesDiscretized), function(j){
findInterval(xs[,j], c(-Inf, DISC$cutp[[j]], Inf) )
})
}
cat("\n","pair",maxPairs,"fold",i,"pass","\n")
}
}
|
/src/miner.R
|
no_license
|
mpgon/master-dissertation
|
R
| false
| false
| 10,593
|
r
|
# load dependencies
require("carenR")
source("LRAR/caren.R")
source("LRAR/predictLRAR.R")
source("edira/edira.R")
#default rank
def.rank <- function(ys)
{
ready <- FALSE
while (ready == FALSE)
{
r <- rank(colMeans(ys))
if (length(unique(r))==1 )
{
ys <- ys[-1,]
} else {
ready <- TRUE
}
}
r
}
# _goodmank <- function(x,y)
# {
# a <- outer(x,x,function(u,v) sign(v-u))
# b <- outer(y,y,function(u,v) sign(v-u))
# b[b==0] <- NA
# comp <- a==b
# diag(comp) <- NA
# return((sum(comp, na.rm = TRUE)-sum(!comp, na.rm = TRUE))/sum((comp)>=0, na.rm = T))
# }
goodmank <- function(x,b)
{
a <- outer(x,x,function(u,v) sign(v-u))
comp <- a==b
diag(comp) <- NA
return((sum(comp, na.rm = TRUE)-sum(!comp, na.rm = TRUE))/sum((comp)>=0, na.rm = T))
}
# _accuracy <- function(x,y)
# {
# a <- outer(x,x,function(u,v) sign(v-u))
# b <- outer(y,y,function(u,v) sign(v-u))
# b[b==0] <- NA
# comp <- a==b
# diag(comp) <- NA
# return(sum(!comp, na.rm = TRUE) == 0)
# }
accuracy <- function(x,b)
{
a <- outer(x,x,function(u,v) sign(v-u))
comp <- a==b
diag(comp) <- NA
return(sum(!comp, na.rm = TRUE) == 0)
}
equalFrequencyDisc <- function(x, indexesDiscretized, DISC)
{
discretizedList <- 1:nrow(DISC$Disc.data)
chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
splits <- chunk2(discretizedList, 4)
for(t in 1:4){
discretizedList <<- discretizedList
discretizedList[splits[[t]] ] <- t
}
noDisc <- x[,!indexesDiscretized]
noDisc <- apply(noDisc, 2, function(col){
orderedInd <- order(col)
for(o in 1:length(orderedInd)){
col[orderedInd[o]] <- discretizedList[o]
}
col
})
DISC$Disc.data[,!indexesDiscretized] <- noDisc
DISC
}
equalWidthDisc <- function(x, indexesDiscretized, DISC)
{
D <- list()
nbins <- 4
xnd <- as.matrix(x[,!indexesDiscretized])
D$Disc.data <- apply(xnd, 2, discretize, "interval", nbins, 1:nbins)
D$Disc.data <- apply(D$Disc.data, 2, as.integer)
D$cutp <- lapply(1:ncol(xnd), function(i){
discretize(xnd[,i], "interval", nbins, 1:nbins, onlycuts=TRUE)[-c(1,(nbins+1))]
})
DISC$Disc.data[,!indexesDiscretized] <- D$Disc.data
DISC$cutp[!indexesDiscretized] <- D$cutp
DISC
}
mineRules<-function(X, Y, is2Years = 0, folds = NULL, Kfolds = 1, xs = NULL, ys = NULL){
print("minerules enter")
tauList <<- list()
accuracyList <<- list()
baselineList <<- list()
completenessList <<- list()
defRankUsageList <<- list()
rulzList <<- list()
for(i in 1:Kfolds){
print("minerules fold")
if(is2Years){
x <- X
y <- Y
} else {
# Segement your data by fold
testIndexes <- which(folds==i)
train_ind <- -testIndexes
x <- X[train_ind, ]
xs <- X[-train_ind, ]
y <- Y[train_ind, ]
ys <- Y[-train_ind, ]
}
# discretize TRAIN
DISC <- mdlp.rank(x, y, method = "kendall")
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
if(sum(!indexesDiscretized) > 0){
# equal frequency
# indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
# DISC <- equalFrequencyDisc(x, indexesDiscretized, DISC)
# equal width
DISC <- equalWidthDisc(x, indexesDiscretized, DISC)
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
}
#prune columns with no partitions
DISC$Disc.data <- DISC$Disc.data[,indexesDiscretized, drop = FALSE]
if (ncol(DISC$Disc.data) == 0)
{
print("Not discretized")
xs <- NULL
} else {
xs <- sapply(which(indexesDiscretized), function(j){
findInterval(xs[,j], c(-Inf, DISC$cutp[[j]], Inf) )
})
}
# transform in unique attributes
unique.attributes <- function(train.matrix, class, test.matrix, npart)
{
mx <- c(0, cumsum(apply(train.matrix, 2, max))[-ncol(train.matrix)])
list( cbind( t(t(train.matrix) + as.vector(mx)), class ), t(t(test.matrix) + mx) , npart = npart)
}
if(!is.null(xs)){
npart <- mean(apply(DISC$Disc.data, 2, function(c) length(unique(c))))
res <- unique.attributes(DISC$Disc.data, class, xs, npart)
} else {
res <- NULL
}
xs <-res[[2]]
xd <-res[[1]]
# mine LR rules
if(isPairwise) {
rulz <- aflrC7Pairwise(xd, y, msup = minSupport, mconf = minConfidence, mlift = minLift, mimp = minImprovment,
theta = theta, Xmx = "2000M", confThreshold = confThreshold)
if(is.na(rulz)) {
stop("no rules")
}
# else if (minSupport != 1 && length(rulz) < 5000) {
# stop("few rules")
# }
} else {
rulz <- aflrC7(xd, y, msup = minSupport, mconf = minConfidence, mlift = minLift, mimp = minImprovment, theta = 0, Xmx = "2000M")
}
# predict LRARs
std <- def.rank(y)
mt <- 0
if(isPairwise){
yp <- crankPairwise(rulz, xs, ys, std, m2, mt, kfold = i)
} else {
yp <- crank(rulz, xs, ys, std, m2, mt)
}
# evaluate rules
if(is.null(ys)){
# 2017 predictions
yp <- t(yp)
colnames(yp) <- colnames(y)
saveRDS(yp, "predictions.rds")
saveRDS(y, "results.rds")
print("2017 prediction saved")
print("baseline w/ last year's results")
print(mean(sapply(1:nrow(ys), function(j) cor(ys[j,], baseline, method="kendall"))))
} else if(isPairwise){
#browser()
# yp <- t(yp)
# colnames(yp) <- colnames(y)
# saveRDS(yp, "predictions2013_17.rds")
# saveRDS(y, "results2013_17.rds")
# print("2013_17 prediction saved")
# browser()
gamma <- mean(sapply(1:nrow(ys), function(j) {
g <- goodmank(ys[j,], yp[[j]])
if (is.nan(g)){
g <- 0
}
g
} ))
acc <- mean(sapply(1:nrow(ys), function(j) accuracy(ys[j,], yp[[j]]) ))
tauList[[i]] <- gamma
accuracyList[[i]] <- acc
#rulzList[[i]] <- length(rulz)
if (defRankUsageList[[i]] != 0 && minSupport > 1) {
stop("non zero def.rank")
}
print(paste("fold", i, ": gamma=", gamma," | completeness=", completenessList[[i]]," | accuracy=", acc," | %defrank=", defRankUsageList[[i]]))
baseline <- rank(colMeans(y))
baselineTau <- mean(sapply(1:nrow(ys), function(j) cor(ys[j,], baseline, method="kendall")))
baselineList[[i]] <- baselineTau
print(paste("baseline tau=", baselineTau))
print(paste("smart baseline",mean(sapply(1:nrow(ys), function(j) cor(y[j,], ys[j,], method="kendall")))))
if(i == Kfolds){
if(!is.null(dataset)){
title <- paste("############### ",dataset," ###############")
} else {
title <- paste("############### output ###############")
}
cat("\n")
cat(title,"\n")
cat("# confidence:",minConfidence/100,"\n")
cat("# max pairs:",maxPairs,"\n")
cat("# calculated support:",minSupport/100,"\n")
cat("#\n")
gamma <- round(sum(as.numeric(tauList))/Kfolds, 2)
cat("# gamma:", gamma,"\n")
completeness <- round(sum(as.numeric(completenessList))/Kfolds, 2)
cat("# completeness:", completeness,"\n")
accuracy <- round(sum(as.numeric(accuracyList))/Kfolds, 2)
cat("# accuracy:", accuracy,"\n")
defrank <- round(sum(as.numeric(defRankUsageList))/Kfolds, 2)
cat("# def.rank:", defrank,"\n")
rules <- round(sum(as.numeric(rulzList))/Kfolds)
cat("# rules:", rules,"\n")
cat("#\n")
baseline <- round(sum(as.numeric(baselineList))/Kfolds, 2)
cat("# baseline (gamma):", baseline,"\n")
cat("%RFORMAT%",dataset,"%",minConfidence/100,"%",maxPairs,"%",minSupport/100,"%",gamma,"%",completeness,"%",accuracy,"%",defrank,"%",rules,"%",baseline,"%",minImprovment,"\n", sep = "")
cat(paste(replicate(nchar(title), "#"), collapse = ""),"\n")
}
} else {
tau <- mean(sapply(1:nrow(ys), function(j) cor(ys[j,], yp[,j], method="kendall")))
tauList[[i]] <- tau
print(paste("fold", i, ": tau=", tau))
baseline <- rank(colMeans(y))
baselineTau <- mean(sapply(1:nrow(ys), function(j) cor(ys[j,], baseline, method="kendall")))
baselineList[[i]] <- baselineTau
print(paste("baseline tau=", baselineTau))
print(paste("smart baseline",mean(sapply(1:nrow(ys), function(j) cor(y[j,], ys[j,], method="kendall")))))
if(i == Kfolds){
tau <- round(sum(as.numeric(tauList))/Kfolds,2)
baseline <- round(sum(as.numeric(baselineList))/Kfolds,2)
print(paste("final tau=", tau))
print(paste("final baseline tau=", baseline))
cat("%RFORMAT%",dataset,"%",minConfidence/100,"%","NA","%",minSupport/100,"%",tau,"%","NA","%","NA","%","NA","%","NA","%",baseline,"%",minImprovment,"\n", sep = "")
}
}
}
}
mineRulesTest<-function(X, Y, is2Years = 0, folds = NULL, Kfolds = 1, xs = NULL, ys = NULL){
tauList <<- list()
accuracyList <<- list()
baselineList <<- list()
completenessList <<- list()
defRankUsageList <<- list()
rulzList <<- list()
for(i in 1:Kfolds){
if(is2Years){
x <- X
y <- Y
} else {
# Segement your data by fold
testIndexes <- which(folds==i)
train_ind <- -testIndexes
x <- X[train_ind, ]
xs <- X[-train_ind, ]
y <- Y[train_ind, ]
ys <- Y[-train_ind, ]
}
# discretize TRAIN
DISC <- mdlp.rank(x, y, method = "kendall")
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
if(sum(!indexesDiscretized) > 0){
# equal frequency
# indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
# DISC <- equalFrequencyDisc(x, indexesDiscretized, DISC)
# equal width
DISC <- equalWidthDisc(x, indexesDiscretized, DISC)
indexesDiscretized <- apply(DISC$Disc.data, 2, function(dc) any(dc==2, na.rm=TRUE))
}
#prune columns with no partitions
DISC$Disc.data <- DISC$Disc.data[,indexesDiscretized, drop = FALSE]
if (ncol(DISC$Disc.data) == 0)
{
print("Not discretized")
xs <- NULL
} else {
xs <- sapply(which(indexesDiscretized), function(j){
findInterval(xs[,j], c(-Inf, DISC$cutp[[j]], Inf) )
})
}
cat("\n","pair",maxPairs,"fold",i,"pass","\n")
}
}
|
source("giveData.R")
df <- giveData("household_power_consumption.txt")
png("plot3.png",width = 480,height = 480)
#plot 3
text <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
with(df,plot(Date_Time,Sub_metering_1,type = "n",
ylab = "Energy sub metering",
xlab = ""))
with(df,lines(Date_Time,Sub_metering_1,col="black"))
with(df,lines(Date_Time,Sub_metering_2,col="red"))
with(df,lines(Date_Time,Sub_metering_3,col="blue"))
legend('topright',lty = 1,lwd=2,col=c("black","red","blue"),
legend = text)
dev.off()
|
/plot3.R
|
no_license
|
AnikMallick/ExData_Plotting1
|
R
| false
| false
| 571
|
r
|
source("giveData.R")
df <- giveData("household_power_consumption.txt")
png("plot3.png",width = 480,height = 480)
#plot 3
text <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
with(df,plot(Date_Time,Sub_metering_1,type = "n",
ylab = "Energy sub metering",
xlab = ""))
with(df,lines(Date_Time,Sub_metering_1,col="black"))
with(df,lines(Date_Time,Sub_metering_2,col="red"))
with(df,lines(Date_Time,Sub_metering_3,col="blue"))
legend('topright',lty = 1,lwd=2,col=c("black","red","blue"),
legend = text)
dev.off()
|
# Test helper functions
test_that("Special mathematics functions work as expected", {
expect_error(lmvgamma(-1, 2))
expect_error(lmvgamma(2, 0))
expect_equal(lmvgamma(2, 1), 0)
expect_equal(lmvgamma(3, 1), 0.693147, tolerance = 1e-5)
expect_equal(lmvgamma(3, 2), 1.550195, tolerance = 1e-5)
expect_equal(lmvgamma(4, 3), 5.402975, tolerance = 1e-5)
expect_error(mvdigamma(-1, 2))
expect_error(mvdigamma(2, 0))
expect_equal(mvdigamma(2, 1), digamma(2))
expect_equal(mvdigamma(2, 2), digamma(2) + digamma(1.5))
expect_equal(mvdigamma(4, 3), 3.282059, tolerance = 1e-5)
})
test_that("Matrix operators work as expected", {
M1 <- matrix(c(1, 2, 3, 4), 2, 2)
M2 <- matrix(c(1, 1), 1, 2)
expect_equal(
blockDiag(list(M1, M2)),
matrix(c(1, 2, 0, 3, 4, 0, 0, 0, 1, 0, 0, 1), 3, 4)
)
})
|
/tests/testthat/test-helpers.R
|
no_license
|
jatotterdell/varapproxr
|
R
| false
| false
| 818
|
r
|
# Test helper functions
test_that("Special mathematics functions work as expected", {
expect_error(lmvgamma(-1, 2))
expect_error(lmvgamma(2, 0))
expect_equal(lmvgamma(2, 1), 0)
expect_equal(lmvgamma(3, 1), 0.693147, tolerance = 1e-5)
expect_equal(lmvgamma(3, 2), 1.550195, tolerance = 1e-5)
expect_equal(lmvgamma(4, 3), 5.402975, tolerance = 1e-5)
expect_error(mvdigamma(-1, 2))
expect_error(mvdigamma(2, 0))
expect_equal(mvdigamma(2, 1), digamma(2))
expect_equal(mvdigamma(2, 2), digamma(2) + digamma(1.5))
expect_equal(mvdigamma(4, 3), 3.282059, tolerance = 1e-5)
})
test_that("Matrix operators work as expected", {
M1 <- matrix(c(1, 2, 3, 4), 2, 2)
M2 <- matrix(c(1, 1), 1, 2)
expect_equal(
blockDiag(list(M1, M2)),
matrix(c(1, 2, 0, 3, 4, 0, 0, 0, 1, 0, 0, 1), 3, 4)
)
})
|
##
## save.R
## Saving functions for package Hipathia
##
## Written by Marta R. Hidalgo, Jose Carbonell-Caballero
##
## Code style by Hadley Wickham (http://r-pkgs.had.co.nz/style.html)
## https://www.bioconductor.org/developers/how-to/coding-style/
##
##################################
# Save results
##################################
#' Save results to folder
#'
#' Saves results to a folder. In particular, it saves the matrix of subpathway
#' values, a table with the results of the provided comparison,
#' the accuracy of the results and the .SIF and attributes of the pathways.
#'
#' @param results Results object as returned by the \code{hipathia} function.
#' @param comp Comparison as returned by the \code{do_wilcoxon} function.
#' @param metaginfo Pathways object
#' @param output_folder Name of the folder in which the results will be stored.
#' @param path Absolute path to the parent directory in which `output_folder`
#' will be saved. If it is not provided, it will be created in a temp folder.
#'
#' @return Creates a folder in disk in which all the information to browse the
#' pathway results is stored.
#'
#' @examples
#' data(results)
#' data(comp)
#' pathways <- load_pathways(species = "hsa", pathways_list = c("hsa03320",
#' "hsa04012"))
#' save_results(results, comp, pathways, "output_results")
#'
#' @export
#'
save_results <- function(results, comp, metaginfo, output_folder = NULL,
path = NULL){
if(is.null(path))
path <- tempdir()
if(is.null(output_folder)){
n <- length(list.files(path, pattern = "hipathia_results")) + 1
output_folder <- paste0("hipathia_results_", n)
}
output_folder <- paste0(path, "/", output_folder)
if(!file.exists(output_folder))
dir.create(output_folder)
# Write files
utils::write.table(results$all$path.vals,
file = paste0(output_folder,"/all_path_vals.txt"),
col.names = TRUE,
row.names = TRUE,
quote = FALSE,
sep="\t")
comp$path.name <- get_path_names(metaginfo, rownames(comp))
utils::write.table(comp,
file = paste0(output_folder,"/all_path_stats.txt"),
col.names = TRUE,
row.names = TRUE,
quote = FALSE,
sep = "\t")
if(!is.null( results$all$accuracy )){
accu <- c(results$all$accuracy$total, results$all$accuracy$percent,
results$all$accuracy$by.path)
names(accu) <- c("Accuracy", "Percent", names(accu)[3:length(accu)])
utils::write.table(accu,
file = paste0(output_folder,"/accuracy.txt"),
col.names = TRUE,
row.names = TRUE,
quote = FALSE,
sep="\t")
}
return(output_folder)
}
write_attributes <- function(this_comp, pathway, metaginfo, prefix,
moreatts_pathway = NULL, conf = 0.05,
reverse_xref = NULL, exp = NULL){
atts <- create_node_and_edge_attributes(this_comp, pathway, metaginfo,
moreatts_pathway = moreatts_pathway,
conf = conf,
reverse_xref = reverse_xref,
exp = exp)
utils::write.table(atts$sif, file = paste0(prefix, ".sif"),
row.names = FALSE,
col.names = FALSE,
quote = FALSE,
sep = "\t")
utils::write.table(atts$node_att, file = paste0(prefix, ".natt"),
row.names = FALSE,
col.names = TRUE,
quote = FALSE,
sep = "\t")
utils::write.table(atts$edge_att, file = paste0(prefix, ".eatt"),
row.names = FALSE,
col.names = TRUE,
quote = FALSE,
sep = "\t")
}
create_node_and_edge_attributes <- function(comp, pathway, metaginfo,
moreatts_pathway = NULL, conf=0.05,
reverse_xref = NULL, exp = NULL){
pathigraphs <- metaginfo$pathigraphs
effector <- length(unlist(strsplit(rownames(comp)[1], split="-"))) == 3
if(effector == TRUE){
s <- pathigraphs[[pathway]]$effector.subgraphs
}else{
s <- pathigraphs[[pathway]]$subgraphs
}
ig <- pathigraphs[[pathway]]$graph
if(is.null(V(ig)$type)) V(ig)$type <- "node"
if(is.null(V(ig)$width)) V(ig)$width <- 15
if(is.null(V(ig)$height)) V(ig)$height <- 5
if(is.null(V(ig)$label.color)) V(ig)$label.color <- "black"
if(is.null(V(ig)$label.cex)) V(ig)$label.cex <- 0.7
# V(ig)$stroke.color <- find_node_colors(comp, s, ig, conf)[V(ig)$name]
V(ig)$stroke.color <- "lightgrey"
#V(ig)$stroke.color[grepl("func",V(ig)$name)] <- "white"
V(ig)$stroke.size <- 2
#V(ig)$stroke.size[grepl("func",V(ig)$name)] <- 0
V(ig)$color <- "white"
V(ig)$width[V(ig)$shape=="circle"] <- 15
V(ig)$width[V(ig)$shape!="circle"] <- 22
V(ig)$shape[V(ig)$shape=="rectangle" & !grepl("func", V(ig)$name)] <-
"ellipse"
V(ig)$shape[V(ig)$shape=="rectangle" & grepl("func", V(ig)$name)] <-
"rectangle"
V(ig)$width[grepl("func",V(ig)$name)] <- -1
natt <- cbind(V(ig)$name,
V(ig)$label,
10,
V(ig)$nodeX,
V(ig)$nodeY,
V(ig)$color,
V(ig)$stroke.color,
V(ig)$stroke.size,
V(ig)$shape,
V(ig)$type,
V(ig)$label.cex,
V(ig)$label.color,
V(ig)$width,
sapply(V(ig)$genesList, paste, collapse=","),
V(ig)$tooltip)
colnames(natt) <- c("ID",
"label",
"labelSize",
"X",
"Y",
"color",
"strokeColor",
"strokeSize",
"shape",
"type",
"labelCex",
"labelColor",
"size",
"genesList",
"tooltip")
rownames(natt) <- natt[,1]
natt[,"label"] <- sapply(natt[,"label"], function(x){
ul <- unlist(strsplit(x, split="\n"))
if(length(ul) > 1){
paste0(ul[1], ", ...")
}else{
ul[1]
}
})
# Add
if(!is.null(moreatts_pathway)){
common_col_idx <- colnames(moreatts_pathway) %in% colnames(natt)
common_col <- colnames(moreatts_pathway)[common_col_idx]
not_common_col_idx <- !colnames(moreatts_pathway) %in% colnames(natt)
not_common_col <- colnames(moreatts_pathway)[not_common_col_idx]
for(col in common_col)
natt[,col] <- moreatts_pathway[,col]
if(!"strokeColor" %in% common_col){
natt[,"strokeColor"] <- natt[,"color"]
natt[natt[,"strokeColor"] == "white","strokeColor"] <- "lightgrey"
}
natt <- cbind(natt, moreatts_pathway[,not_common_col])
}
node_path_assoc <- matrix(0, nrow = nrow(natt), ncol = length(s))
colnames(node_path_assoc) <- names(s)
natt <- cbind(natt, node_path_assoc)
sif <- c()
eatt <- c()
epath_assoc <- c()
for(i in seq_along(s)){
# get subgraph
subgraph <- s[[i]]
name <- names(s)[i]
pname <- get_path_names(metaginfo, name)
# sif
raw_edges <- get.edgelist(subgraph)
type <- c("activation","inhibition")[(E(subgraph)$relation == -1) + 1]
edges <- cbind(raw_edges[,1], type, raw_edges[,2])
sif <- rbind(sif, edges)
# edge attributes
eids <- apply(edges, 1, function(x) paste0(x, collapse = "_"))
status <- comp[name,"UP/DOWN"]
if("color" %in% colnames(comp)){
color <- comp[name,"color"]
} else {
if( comp[name,"FDRp.value"] < conf){
color <- c("#1f78b4","#e31a1c")[(status == "UP") + 1]
} else {
color <- "darkgrey"
}
}
path_assoc <- matrix(0, nrow = nrow(edges), ncol = length(s))
colnames(path_assoc) <- names(s)
path_assoc[,name] <- 1
edges_atts <- cbind(id = eids,
status = status,
color = color,
name = name,
pname = pname,
pvalue = comp[name,"p.value"],
adj.pvalue = comp[name,"FDRp.value"])
eatt <- rbind(eatt, edges_atts)
epath_assoc <- rbind(epath_assoc, path_assoc)
# node attributes
natt[get.vertex.attribute(subgraph, "name"), name] <- 1
}
# melt multi path interactions
unique_edges <- unique(eatt[,1])
def_eatt <- c()
def_sif <- c()
def_epath_assoc <- c()
for(ue in unique_edges){
indexes <- which(eatt[,1] == ue)
subeatt <- eatt[indexes,,drop = FALSE]
subepath_assoc <- epath_assoc[indexes,,drop = FALSE]
subsif <- sif[indexes,,drop = FALSE]
# up regulated
upsig <- which(subeatt[,"status"] == "UP" &
as.numeric(subeatt[,"adj.pvalue"]) < conf)
if(length(upsig) > 0){
selected_subsif <- subsif[1,]
selected_subsif[2] <- paste0(selected_subsif[2], ".up")
def_sif <- rbind(def_sif, selected_subsif)
mini_subeatt <- subeatt[upsig,,drop = FALSE]
selected_subeatt <- mini_subeatt[1, c("id", "status", "color",
"pvalue", "adj.pvalue")]
selected_subeatt["id"] <- paste(selected_subsif, collapse = "_")
def_eatt <- rbind(def_eatt, selected_subeatt)
selected_subepath_assoc <- subepath_assoc[upsig,,drop = FALSE]
def_epath_assoc <- rbind(def_epath_assoc,
colSums(selected_subepath_assoc) > 0)
}
# down regulated
downsig <- which(subeatt[,"status"] == "DOWN" &
as.numeric(subeatt[,"adj.pvalue"]) < conf)
if(length(downsig) > 0){
selected_subsif <- subsif[1,]
selected_subsif[2] <- paste0(selected_subsif[2], ".down")
def_sif <- rbind(def_sif, selected_subsif)
mini_subeatt <- subeatt[downsig,,drop = FALSE]
selected_subeatt <- mini_subeatt[1,c("id",
"status",
"color",
"pvalue",
"adj.pvalue")]
selected_subeatt["id"] <- paste(selected_subsif, collapse = "_")
def_eatt <- rbind(def_eatt, selected_subeatt)
selected_subepath_assoc <- subepath_assoc[downsig,,drop = FALSE]
def_epath_assoc <- rbind(def_epath_assoc,
colSums(selected_subepath_assoc) > 0)
}
# no sigs
nosigs <- which(as.numeric(subeatt[,"adj.pvalue"]) >= conf)
if(length(nosigs) > 0){
selected_subsif <- subsif[1,]
def_sif <- rbind(def_sif, selected_subsif)
mini_subeatt <- subeatt[nosigs,,drop = FALSE]
selected_subeatt <- mini_subeatt[1,c("id",
"status",
"color",
"pvalue",
"adj.pvalue")]
def_eatt <- rbind(def_eatt, selected_subeatt)
selected_subepath_assoc <- subepath_assoc[nosigs,,drop = FALSE]
def_epath_assoc <- rbind(def_epath_assoc,
colSums(selected_subepath_assoc) > 0)
}
}
rownames(def_eatt) <- NULL
def_eatt <- as.data.frame(def_eatt, stringsAsFactors = FALSE)
def_epath_assoc <- as.data.frame(def_epath_assoc, stringsAsFactors = FALSE)
rownames(def_sif) <- NULL
def_sif <- as.data.frame(def_sif, stringsAsFactors = FALSE)
def_eatt$shape <- c("inhibited", "directed")[grepl("activation",
def_sif[,2]) + 1]
def_eatt <- cbind(def_eatt, (def_epath_assoc == TRUE) + 0)
natt[,"label"] <- gsub("\\*", "", natt[,"label"])
# Add functions
#---------------------
left <- which(grepl("func", get.edgelist(ig)[,2]))
if(length(left) > 0 ){
if(length(left) == 1){
ids <- paste(get.edgelist(ig)[left,1], "activation",
get.edgelist(ig)[left,2], sep = "_")
}else{
ids <- apply(get.edgelist(ig)[left,], 1, function(x){
paste(x[1], "activation", x[2], sep = "_")
})
}
funejes <- as.data.frame(matrix(0,
nrow = length(ids),
ncol = ncol(def_eatt)),
stringsAsFactors = FALSE)
colnames(funejes) <- colnames(def_eatt)
rownames(funejes) <- ids
funejes$id <- ids
funejes$status <- "DOWN"
funejes$color <- "darkgrey"
if("pvalue" %in% colnames(funejes))
funejes$pvalue <- ids
if("adj.pvalue" %in% colnames(funejes))
funejes$adj.pvalue <- "DOWN"
funejes$shape <- "directed"
nods <- get.edgelist(ig)[left,1]
names(nods) <- ids
names(ids) <- nods
funs <- t(apply(funejes, 1, function(x){
lastnodes <- sapply(colnames(funejes), get_effnode_id)
if(any(lastnodes == nods[x[[1]]])){
x[lastnodes == nods[x[[1]]]] <- 1
x
}else{
x
}
}))
funs <- as.data.frame(funs, stringsAsFactors = FALSE)
sif_funs <- data.frame(V1 = get.edgelist(ig)[left,1],
type = rep("activation", times = length(left)),
V3 = get.edgelist(ig)[left,2],
stringsAsFactors = FALSE)
def_sif <- rbind(def_sif, sif_funs)
def_eatt <- rbind(def_eatt, funs)
}
fun_indexes <- grep("_func", rownames(natt))
fun_names <- rownames(natt)[fun_indexes]
if(length(fun_indexes) > 0){
for(i in seq_along(fun_names)){
pp <- gsub("N", "P", gsub("_func", "", fun_names[i]))
if(effector == TRUE){
natt[fun_names[i], pp] <- 1
} else {
natt[fun_names[i], grep(paste0("- ", pp), colnames(natt))] <- 1
}
}
}
if(!is.null(reverse_xref)){
sids <- strsplit(as.character(natt[,"genesList"]), split = ",")
translate_ids <- function(ids){
if(length(ids) > 0){
ids <- setdiff(ids, "/")
tids <- sapply(reverse_xref[ids],function(x){
if(is.null(x)){
return("?")
} else {
return(x)
}})
return(paste(tids, collapse = ","))
} else {
return("?")
}
}
natt <- cbind(natt, tids = sapply(sids, translate_ids))
}
if(!is.null(exp)){
sids <- strsplit(as.character(natt[,"genesList"]), split = ",")
ids_list <- as.list(seq_len(nrow(exp)))
names(ids_list) <- rownames(exp)
get_expr_ids <- function(ids){
if(length(ids) > 0){
ids <- setdiff(ids, "/")
exp_values <- sapply(ids_list[ids],function(x){
if(is.null(x)){
return("?")
}else{
return(exp[x,])
}})
return(paste(exp_values, collapse = ","))
} else {
return("?")
}
}
natt <- cbind(natt, exp_values = sapply(sids, get_expr_ids))
}
return(list(sif = def_sif,
edge_att = def_eatt,
node_att = natt))
}
create_path_info <- function(all_comp, metaginfo){
fpgs <- metaginfo$pathigraphs
effector <- length(unlist(strsplit(rownames(all_comp)[1], split="-"))) == 3
path_info <- lapply(fpgs, function(fpg){
if(effector == TRUE){
all_comp[names(fpg$effector.subgraphs),]
}else{
all_comp[names(fpg$subgraphs),]
}
})
path_json_list <- lapply(names(path_info),function(x){
out <- paste0("{\n\t\"id\":\"", x, "\",\n")
out <- paste0(out, "\t\"name\":\"", fpgs[[x]]$path.name, "\",\n")
anysig <- FALSE
anyup <- FALSE
anydown <- FALSE
anysigup <- FALSE
anysigdown <- FALSE
anychanged <- FALSE
for(i in seq_len(nrow(path_info[[x]]))){
if(path_info[[x]]$has_changed[i] == TRUE)
anychanged <- TRUE
if(path_info[[x]]$FDRp.value[i] <= 0.05) {
anysig <- TRUE
if(path_info[[x]]$status[i] == "UP")
anysigup <- TRUE
if(path_info[[x]]$status[i] == "DOWN")
anysigdown <- TRUE
}
if(path_info[[x]]$status[i] == "UP")
anyup <- TRUE
if(path_info[[x]]$status[i] == "DOWN")
anydown <- TRUE
}
out <- paste0(out, "\t\"haschanged\":", tolower(anychanged), ",\n")
out <- paste0(out, "\t\"sig\":", tolower(anysig), ",\n")
out <- paste0(out, "\t\"up\":", tolower(anyup), ",\n")
out <- paste0(out, "\t\"down\":", tolower(anydown), ",\n")
out <- paste0(out, "\t\"upsig\":", tolower(anysigup), ",\n")
out <- paste0(out, "\t\"downsig\":", tolower(anysigdown), ",\n")
out <- paste0(out, "\t\"paths\":[\n")
for(i in seq_len(nrow(path_info[[x]]))){
out <- paste0(out, "\t\t{")
out <- paste0(out, "\"id\":\"", rownames(path_info[[x]])[i], "\", ")
out <- paste0(out, "\"name\":\"",
get_path_names(metaginfo,
rownames(path_info[[x]])[i]), "\", ")
if(metaginfo$group.by == "pathways"){
out <- paste0(out, "\"shortname\":\"" ,
gsub("\\*", "", strsplit(get_path_names(
metaginfo,
rownames(path_info[[x]])[i]),": ")[[1]][2]),
"\", ")
}else{
out <- paste0(out, "\"shortname\":\"",
get_path_names(metaginfo,
rownames(path_info[[x]])[i]),
"\", ")
}
out <- paste0(out, "\"pvalue\":", path_info[[x]]$FDRp.value[i],
", ")
out <- paste0(out, "\"status\":\"", path_info[[x]]$status[i],
"\", ")
out <- paste0(out, "\"sig\":\"",
tolower(path_info[[x]]$FDRp.value[i] < 0.05), "\", ")
out <- paste0(out, "\"haschanged\":",
tolower(path_info[[x]]$has_changed[i]), ", ")
out <- paste0(out, "\"up\":",
tolower(path_info[[x]]$status[i] == "UP"), ", ")
out <- paste0(out, "\"down\":",
tolower(path_info[[x]]$status[i] == "DOWN"), ", ")
out <- paste0(out, "\"upsig\":",
tolower(path_info[[x]]$status[i] == "UP" &
path_info[[x]]$FDRp.value[i] < 0.05),
", ")
out <- paste0(out, "\"downsig\":",
tolower(path_info[[x]]$status[i] == "DOWN" &
path_info[[x]]$FDRp.value[i] < 0.05),
", ")
out <- paste0(out, "\"color\":\"", path_info[[x]]$color[i], "\"")
out <- paste0(out, "}")
if(i == nrow(path_info[[x]])){
out <- paste0(out, "\n")
} else {
out <- paste0(out, ",\n")
}
}
out <- paste0(out, "\t]\n")
out <- paste0(out, "}")
out
})
path_json <- paste0("[\n", paste(path_json_list, collapse = ","), "\n]")
return(path_json)
}
create_report_folders <- function(output_folder, home, clean_out_folder = TRUE){
pv_folder <- paste0(output_folder,"/pathway-viewer")
if(clean_out_folder == TRUE & file.exists(pv_folder)){
unlink(pv_folder, recursive = TRUE)
unlink(paste0(output_folder, "/index.html"), recursive = TRUE)
}
file.copy(paste0(home,"/pathway-viewer/"), output_folder, recursive = TRUE)
report_path <- paste0(home, "/report-files/")
png_files_copy <- list.files(path = report_path, pattern = ".png")
png_files_copy <- paste0(home, "/report-files/", png_files_copy)
file.copy(png_files_copy, pv_folder)
}
create_pathways_folder <- function(output_folder, metaginfo, comp, moreatts,
conf, verbose = FALSE){
pathways_folder <- paste0(output_folder, "/pathway-viewer/pathways/")
if(!file.exists(pathways_folder))
dir.create(pathways_folder)
for(pathway in names(metaginfo$pathigraphs)){
if(verbose == TRUE)
cat(pathway)
write_attributes(comp,
pathway,
metaginfo,
paste0(pathways_folder, pathway),
moreatts_pathway = moreatts[[pathway]],
conf = conf)
}
comp$status <- comp$"UP/DOWN"
comp$has_changed <- TRUE
path_json <- create_path_info(comp, metaginfo)
write(path_json, file = paste0(output_folder,
"/pathway-viewer/pathways/path_info.json"))
}
create_html_index <- function(home, output_folder,
template_name = "index_template.html",
output_name = "index.html"){
index <- scan(paste0(home,'/report-files/',template_name),
comment.char = "", sep = "\n", what = "character",
quiet = TRUE)
global_div <- c()
global_div <- c(global_div, paste0("<pathway-viewer id='pathway-viewer'",
" path-type='url' path='pathways'>",
"</pathway-viewer>"))
new_index <- gsub("PUT_HERE_YOUR_ELEMENTS",
paste(global_div, collapse = "\n"),
index)
write(paste(new_index, collapse = "\n"),
file = paste0(output_folder,"/pathway-viewer/",output_name))
}
#' Create visualization HTML
#'
#' Saves the results of a Wilcoxon comparison for the Hipathia pathway values
#' into a folder, and creates a HTML from which to visualize the results on
#' top of the pathways. The results are stored into the specified folder.
#' If this folder does not exist, it will be created. The parent folder must
#' exist.
#'
#' @examples
#' data(comp)
#' pathways <- load_pathways(species = "hsa", pathways_list = c("hsa03320",
#' "hsa04012"))
#' report <- create_report(comp, pathways, "save_results")
#'
#' \dontrun{
#' data(results)
#' data(brca)
#' sample_group <- colData(brca)[,1]
#' colors_de <- node_color_per_de(results, pathways,
#' sample_group, "Tumor", "Normal")
#' report_colors <- create_report(comp, pathways, "save_results",
#' node_colors = colors_de)
#'}
#'
#' @param comp Comparison object as given by the \code{do_wilcoxon} function
#' @param metaginfo Pathways object as returned by the \code{load_pathways}
#' function
#' @param output_folder Name of the folder in which the report will be stored.
#' @param path Absolute path to the parent directory in which `output_folder`
#' will be saved. If it is not provided, it will be created in a temp folder.
#' @param node_colors List of colors with which to paint the nodes of the
#' pathways, as returned by the
#' \code{node_color_per_de} function. Default is white.
#' @param group_by How to group the subpathways to be visualized. By default
#' they are grouped by the pathway to which they belong. Available groupings
#' include "uniprot", to group subpathways by their annotated Uniprot functions,
#' "GO", to group subpathways by their annotated GO terms, and "genes", to group
#' subpathways by the genes they include. Default is set to "pathway".
#' @param conf Level of significance. By default 0.05.
#' @param verbose Boolean, whether to show details about the results of the
#' execution
#'
#' @return Saves the results and creates a report to visualize them through
#' a server in the specified \code{output_folder}. Returns the folder where
#' the report has been stored.
#'
#' @export
#'
create_report <- function(comp, metaginfo, output_folder = NULL, path = NULL,
node_colors = NULL,
group_by = "pathway", conf = 0.05, verbose = FALSE){
if(group_by != "pathway" &
length(unlist(strsplit(rownames(comp)[1], split = "-"))) == 4)
stop("Grouping only available for effector subgraphs")
if(!is.null(node_colors)){
if(node_colors$group_by != group_by)
stop("Grouping in node.colors must agree with group_by")
moreatts <- summarize_atts(list(node_colors$colors), c("color"))
}else{
moreatts <- NULL
}
if(group_by != "pathway"){
message("Creating groupings by ", group_by, "...")
metaginfo <- get_pseudo_metaginfo(metaginfo, group_by = group_by)
}
if(is.null(path))
path <- tempdir()
if(is.null(output_folder)){
n <- length(list.files(path, pattern = "hipathia_report")) + 1
output_folder <- paste0("hipathia_report_", n)
}
output_folder <- paste0(path, "/", output_folder)
if(!file.exists(output_folder))
dir.create(output_folder)
pv_path <- paste0(system.file("extdata", package="hipathia"))
message("Creating report folders...")
create_report_folders(output_folder, pv_path, clean_out_folder = FALSE)
message("Creating pathways folder...")
create_pathways_folder(output_folder, metaginfo, comp, moreatts, conf,
verbose)
message("Creating HTML index...")
create_html_index(pv_path,
output_folder,
template_name = "index_template.html",
output_name = "index.html")
return(output_folder)
}
summarize_atts <- function(att_list, att_names){
df_list <- c()
for(pathway in names(att_list[[1]])){
df <- sapply(att_list, function(l){l[[pathway]]})
colnames(df) <- att_names
df_list[[pathway]] <- df
}
return(df_list)
}
#'
#' Visualize a HiPathia report
#'
#' @param output_folder Folder in which results to visualize are stored
#' @param port Port to use
#'
#' @return The instructions to visualize a HiPathia report in a web browser
#'
#' @examples
#' data(comp)
#' pathways <- load_pathways(species = "hsa", pathways_list = c("hsa03320",
#' "hsa04012"))
#' report <- create_report(comp, pathways, "save_results")
#' visualize_report(report)
#'
#' \dontrun{
#' data(results)
#' data(brca)
#' sample_group <- colData(brca)[,1]
#' colors_de <- node_color_per_de(results, pathways,
#' sample_group, "Tumor", "Normal")
#' report <- create_report(comp, pathways, "save_results",
#' node_colors = colors_de)
#' visualize_report(report)
#' visualize_report(report, port = 5000)
#' }
#' \dontshow{servr::daemon_stop()}
#'
#' @import servr
#' @export
#'
visualize_report <- function(output_folder, port = 4000){
servr::httd(paste0(output_folder, "/pathway-viewer"),
port = port, browser = FALSE, daemon = TRUE)
cat("Open a web browser and go to URL http://127.0.0.1:", port, "\n",
sep = "")
}
###########################################
# PSEUDO META_GRAPH_INFORMATION
get_pseudo_metaginfo <- function(pathways, group_by){
pseudo <- load_pseudo_mgi(pathways$species, group_by)
rownames(pseudo$all.labelids) <- pseudo$all.labelids[,1]
pathways_list <- names(pathways$pathigraphs)
if(!all(unique(pseudo$all.labelids[,"path.id"]) %in% pathways_list))
pseudo <- filter_pseudo_mgi(pseudo, pathways_list)
return(pseudo)
}
filter_pseudo_mgi <- function(pseudo_meta, pathways_list){
num_nodes <- sapply(names(pseudo_meta$pathigraphs), function(term){
graph <- pseudo_meta$pathigraphs[[term]]$graph
idx <- unlist(lapply(pathways_list, grep, V(graph)$name))
vs <- V(graph)[idx]
length(vs)
})
tofilter <- names(pseudo_meta$pathigraphs)[num_nodes >= 1]
mini_pathigraphs <- lapply(pseudo_meta$pathigraphs[tofilter],
function(pg){
minipg <- NULL
graph <- pg$graph
idx <- unlist(lapply(pathways_list, grep, V(graph)$name))
vs <- V(graph)[idx]
minipg$graph <- igraph::induced_subgraph(graph, vs)
minipg$path.name <- pg$path.name
minipg$path.id <- pg$path.id
es_ind <- unlist(lapply(pathways_list, grep, pg$effector.subgraphs))
minipg$effector.subgraphs <- pg$effector.subgraphs[es_ind]
minipg
})
names(mini_pathigraphs) <- tofilter
all_labels <- pseudo_meta$all.labelids
lab_in_pl <- all_labels[,"path.id"] %in% pathways_list
filter_labelids <- all_labels[lab_in_pl,]
mini_pseudo <- NULL
mini_pseudo$pathigraphs <- mini_pathigraphs
mini_pseudo$species <- pseudo_meta$species
mini_pseudo$all.labelids <- filter_labelids
return(mini_pseudo)
}
|
/R/save.R
|
no_license
|
mialaalv/hipathia
|
R
| false
| false
| 30,417
|
r
|
##
## save.R
## Saving functions for package Hipathia
##
## Written by Marta R. Hidalgo, Jose Carbonell-Caballero
##
## Code style by Hadley Wickham (http://r-pkgs.had.co.nz/style.html)
## https://www.bioconductor.org/developers/how-to/coding-style/
##
##################################
# Save results
##################################
#' Save results to folder
#'
#' Saves results to a folder. In particular, it saves the matrix of subpathway
#' values, a table with the results of the provided comparison,
#' the accuracy of the results and the .SIF and attributes of the pathways.
#'
#' @param results Results object as returned by the \code{hipathia} function.
#' @param comp Comparison as returned by the \code{do_wilcoxon} function.
#' @param metaginfo Pathways object
#' @param output_folder Name of the folder in which the results will be stored.
#' @param path Absolute path to the parent directory in which `output_folder`
#' will be saved. If it is not provided, it will be created in a temp folder.
#'
#' @return Creates a folder in disk in which all the information to browse the
#' pathway results is stored.
#'
#' @examples
#' data(results)
#' data(comp)
#' pathways <- load_pathways(species = "hsa", pathways_list = c("hsa03320",
#' "hsa04012"))
#' save_results(results, comp, pathways, "output_results")
#'
#' @export
#'
save_results <- function(results, comp, metaginfo, output_folder = NULL,
path = NULL){
if(is.null(path))
path <- tempdir()
if(is.null(output_folder)){
n <- length(list.files(path, pattern = "hipathia_results")) + 1
output_folder <- paste0("hipathia_results_", n)
}
output_folder <- paste0(path, "/", output_folder)
if(!file.exists(output_folder))
dir.create(output_folder)
# Write files
utils::write.table(results$all$path.vals,
file = paste0(output_folder,"/all_path_vals.txt"),
col.names = TRUE,
row.names = TRUE,
quote = FALSE,
sep="\t")
comp$path.name <- get_path_names(metaginfo, rownames(comp))
utils::write.table(comp,
file = paste0(output_folder,"/all_path_stats.txt"),
col.names = TRUE,
row.names = TRUE,
quote = FALSE,
sep = "\t")
if(!is.null( results$all$accuracy )){
accu <- c(results$all$accuracy$total, results$all$accuracy$percent,
results$all$accuracy$by.path)
names(accu) <- c("Accuracy", "Percent", names(accu)[3:length(accu)])
utils::write.table(accu,
file = paste0(output_folder,"/accuracy.txt"),
col.names = TRUE,
row.names = TRUE,
quote = FALSE,
sep="\t")
}
return(output_folder)
}
write_attributes <- function(this_comp, pathway, metaginfo, prefix,
moreatts_pathway = NULL, conf = 0.05,
reverse_xref = NULL, exp = NULL){
atts <- create_node_and_edge_attributes(this_comp, pathway, metaginfo,
moreatts_pathway = moreatts_pathway,
conf = conf,
reverse_xref = reverse_xref,
exp = exp)
utils::write.table(atts$sif, file = paste0(prefix, ".sif"),
row.names = FALSE,
col.names = FALSE,
quote = FALSE,
sep = "\t")
utils::write.table(atts$node_att, file = paste0(prefix, ".natt"),
row.names = FALSE,
col.names = TRUE,
quote = FALSE,
sep = "\t")
utils::write.table(atts$edge_att, file = paste0(prefix, ".eatt"),
row.names = FALSE,
col.names = TRUE,
quote = FALSE,
sep = "\t")
}
create_node_and_edge_attributes <- function(comp, pathway, metaginfo,
moreatts_pathway = NULL, conf=0.05,
reverse_xref = NULL, exp = NULL){
pathigraphs <- metaginfo$pathigraphs
effector <- length(unlist(strsplit(rownames(comp)[1], split="-"))) == 3
if(effector == TRUE){
s <- pathigraphs[[pathway]]$effector.subgraphs
}else{
s <- pathigraphs[[pathway]]$subgraphs
}
ig <- pathigraphs[[pathway]]$graph
if(is.null(V(ig)$type)) V(ig)$type <- "node"
if(is.null(V(ig)$width)) V(ig)$width <- 15
if(is.null(V(ig)$height)) V(ig)$height <- 5
if(is.null(V(ig)$label.color)) V(ig)$label.color <- "black"
if(is.null(V(ig)$label.cex)) V(ig)$label.cex <- 0.7
# V(ig)$stroke.color <- find_node_colors(comp, s, ig, conf)[V(ig)$name]
V(ig)$stroke.color <- "lightgrey"
#V(ig)$stroke.color[grepl("func",V(ig)$name)] <- "white"
V(ig)$stroke.size <- 2
#V(ig)$stroke.size[grepl("func",V(ig)$name)] <- 0
V(ig)$color <- "white"
V(ig)$width[V(ig)$shape=="circle"] <- 15
V(ig)$width[V(ig)$shape!="circle"] <- 22
V(ig)$shape[V(ig)$shape=="rectangle" & !grepl("func", V(ig)$name)] <-
"ellipse"
V(ig)$shape[V(ig)$shape=="rectangle" & grepl("func", V(ig)$name)] <-
"rectangle"
V(ig)$width[grepl("func",V(ig)$name)] <- -1
natt <- cbind(V(ig)$name,
V(ig)$label,
10,
V(ig)$nodeX,
V(ig)$nodeY,
V(ig)$color,
V(ig)$stroke.color,
V(ig)$stroke.size,
V(ig)$shape,
V(ig)$type,
V(ig)$label.cex,
V(ig)$label.color,
V(ig)$width,
sapply(V(ig)$genesList, paste, collapse=","),
V(ig)$tooltip)
colnames(natt) <- c("ID",
"label",
"labelSize",
"X",
"Y",
"color",
"strokeColor",
"strokeSize",
"shape",
"type",
"labelCex",
"labelColor",
"size",
"genesList",
"tooltip")
rownames(natt) <- natt[,1]
natt[,"label"] <- sapply(natt[,"label"], function(x){
ul <- unlist(strsplit(x, split="\n"))
if(length(ul) > 1){
paste0(ul[1], ", ...")
}else{
ul[1]
}
})
# Add
if(!is.null(moreatts_pathway)){
common_col_idx <- colnames(moreatts_pathway) %in% colnames(natt)
common_col <- colnames(moreatts_pathway)[common_col_idx]
not_common_col_idx <- !colnames(moreatts_pathway) %in% colnames(natt)
not_common_col <- colnames(moreatts_pathway)[not_common_col_idx]
for(col in common_col)
natt[,col] <- moreatts_pathway[,col]
if(!"strokeColor" %in% common_col){
natt[,"strokeColor"] <- natt[,"color"]
natt[natt[,"strokeColor"] == "white","strokeColor"] <- "lightgrey"
}
natt <- cbind(natt, moreatts_pathway[,not_common_col])
}
node_path_assoc <- matrix(0, nrow = nrow(natt), ncol = length(s))
colnames(node_path_assoc) <- names(s)
natt <- cbind(natt, node_path_assoc)
sif <- c()
eatt <- c()
epath_assoc <- c()
for(i in seq_along(s)){
# get subgraph
subgraph <- s[[i]]
name <- names(s)[i]
pname <- get_path_names(metaginfo, name)
# sif
raw_edges <- get.edgelist(subgraph)
type <- c("activation","inhibition")[(E(subgraph)$relation == -1) + 1]
edges <- cbind(raw_edges[,1], type, raw_edges[,2])
sif <- rbind(sif, edges)
# edge attributes
eids <- apply(edges, 1, function(x) paste0(x, collapse = "_"))
status <- comp[name,"UP/DOWN"]
if("color" %in% colnames(comp)){
color <- comp[name,"color"]
} else {
if( comp[name,"FDRp.value"] < conf){
color <- c("#1f78b4","#e31a1c")[(status == "UP") + 1]
} else {
color <- "darkgrey"
}
}
path_assoc <- matrix(0, nrow = nrow(edges), ncol = length(s))
colnames(path_assoc) <- names(s)
path_assoc[,name] <- 1
edges_atts <- cbind(id = eids,
status = status,
color = color,
name = name,
pname = pname,
pvalue = comp[name,"p.value"],
adj.pvalue = comp[name,"FDRp.value"])
eatt <- rbind(eatt, edges_atts)
epath_assoc <- rbind(epath_assoc, path_assoc)
# node attributes
natt[get.vertex.attribute(subgraph, "name"), name] <- 1
}
# melt multi path interactions
unique_edges <- unique(eatt[,1])
def_eatt <- c()
def_sif <- c()
def_epath_assoc <- c()
for(ue in unique_edges){
indexes <- which(eatt[,1] == ue)
subeatt <- eatt[indexes,,drop = FALSE]
subepath_assoc <- epath_assoc[indexes,,drop = FALSE]
subsif <- sif[indexes,,drop = FALSE]
# up regulated
upsig <- which(subeatt[,"status"] == "UP" &
as.numeric(subeatt[,"adj.pvalue"]) < conf)
if(length(upsig) > 0){
selected_subsif <- subsif[1,]
selected_subsif[2] <- paste0(selected_subsif[2], ".up")
def_sif <- rbind(def_sif, selected_subsif)
mini_subeatt <- subeatt[upsig,,drop = FALSE]
selected_subeatt <- mini_subeatt[1, c("id", "status", "color",
"pvalue", "adj.pvalue")]
selected_subeatt["id"] <- paste(selected_subsif, collapse = "_")
def_eatt <- rbind(def_eatt, selected_subeatt)
selected_subepath_assoc <- subepath_assoc[upsig,,drop = FALSE]
def_epath_assoc <- rbind(def_epath_assoc,
colSums(selected_subepath_assoc) > 0)
}
# down regulated
downsig <- which(subeatt[,"status"] == "DOWN" &
as.numeric(subeatt[,"adj.pvalue"]) < conf)
if(length(downsig) > 0){
selected_subsif <- subsif[1,]
selected_subsif[2] <- paste0(selected_subsif[2], ".down")
def_sif <- rbind(def_sif, selected_subsif)
mini_subeatt <- subeatt[downsig,,drop = FALSE]
selected_subeatt <- mini_subeatt[1,c("id",
"status",
"color",
"pvalue",
"adj.pvalue")]
selected_subeatt["id"] <- paste(selected_subsif, collapse = "_")
def_eatt <- rbind(def_eatt, selected_subeatt)
selected_subepath_assoc <- subepath_assoc[downsig,,drop = FALSE]
def_epath_assoc <- rbind(def_epath_assoc,
colSums(selected_subepath_assoc) > 0)
}
# no sigs
nosigs <- which(as.numeric(subeatt[,"adj.pvalue"]) >= conf)
if(length(nosigs) > 0){
selected_subsif <- subsif[1,]
def_sif <- rbind(def_sif, selected_subsif)
mini_subeatt <- subeatt[nosigs,,drop = FALSE]
selected_subeatt <- mini_subeatt[1,c("id",
"status",
"color",
"pvalue",
"adj.pvalue")]
def_eatt <- rbind(def_eatt, selected_subeatt)
selected_subepath_assoc <- subepath_assoc[nosigs,,drop = FALSE]
def_epath_assoc <- rbind(def_epath_assoc,
colSums(selected_subepath_assoc) > 0)
}
}
rownames(def_eatt) <- NULL
def_eatt <- as.data.frame(def_eatt, stringsAsFactors = FALSE)
def_epath_assoc <- as.data.frame(def_epath_assoc, stringsAsFactors = FALSE)
rownames(def_sif) <- NULL
def_sif <- as.data.frame(def_sif, stringsAsFactors = FALSE)
def_eatt$shape <- c("inhibited", "directed")[grepl("activation",
def_sif[,2]) + 1]
def_eatt <- cbind(def_eatt, (def_epath_assoc == TRUE) + 0)
natt[,"label"] <- gsub("\\*", "", natt[,"label"])
# Add functions
#---------------------
left <- which(grepl("func", get.edgelist(ig)[,2]))
if(length(left) > 0 ){
if(length(left) == 1){
ids <- paste(get.edgelist(ig)[left,1], "activation",
get.edgelist(ig)[left,2], sep = "_")
}else{
ids <- apply(get.edgelist(ig)[left,], 1, function(x){
paste(x[1], "activation", x[2], sep = "_")
})
}
funejes <- as.data.frame(matrix(0,
nrow = length(ids),
ncol = ncol(def_eatt)),
stringsAsFactors = FALSE)
colnames(funejes) <- colnames(def_eatt)
rownames(funejes) <- ids
funejes$id <- ids
funejes$status <- "DOWN"
funejes$color <- "darkgrey"
if("pvalue" %in% colnames(funejes))
funejes$pvalue <- ids
if("adj.pvalue" %in% colnames(funejes))
funejes$adj.pvalue <- "DOWN"
funejes$shape <- "directed"
nods <- get.edgelist(ig)[left,1]
names(nods) <- ids
names(ids) <- nods
funs <- t(apply(funejes, 1, function(x){
lastnodes <- sapply(colnames(funejes), get_effnode_id)
if(any(lastnodes == nods[x[[1]]])){
x[lastnodes == nods[x[[1]]]] <- 1
x
}else{
x
}
}))
funs <- as.data.frame(funs, stringsAsFactors = FALSE)
sif_funs <- data.frame(V1 = get.edgelist(ig)[left,1],
type = rep("activation", times = length(left)),
V3 = get.edgelist(ig)[left,2],
stringsAsFactors = FALSE)
def_sif <- rbind(def_sif, sif_funs)
def_eatt <- rbind(def_eatt, funs)
}
fun_indexes <- grep("_func", rownames(natt))
fun_names <- rownames(natt)[fun_indexes]
if(length(fun_indexes) > 0){
for(i in seq_along(fun_names)){
pp <- gsub("N", "P", gsub("_func", "", fun_names[i]))
if(effector == TRUE){
natt[fun_names[i], pp] <- 1
} else {
natt[fun_names[i], grep(paste0("- ", pp), colnames(natt))] <- 1
}
}
}
if(!is.null(reverse_xref)){
sids <- strsplit(as.character(natt[,"genesList"]), split = ",")
translate_ids <- function(ids){
if(length(ids) > 0){
ids <- setdiff(ids, "/")
tids <- sapply(reverse_xref[ids],function(x){
if(is.null(x)){
return("?")
} else {
return(x)
}})
return(paste(tids, collapse = ","))
} else {
return("?")
}
}
natt <- cbind(natt, tids = sapply(sids, translate_ids))
}
if(!is.null(exp)){
sids <- strsplit(as.character(natt[,"genesList"]), split = ",")
ids_list <- as.list(seq_len(nrow(exp)))
names(ids_list) <- rownames(exp)
get_expr_ids <- function(ids){
if(length(ids) > 0){
ids <- setdiff(ids, "/")
exp_values <- sapply(ids_list[ids],function(x){
if(is.null(x)){
return("?")
}else{
return(exp[x,])
}})
return(paste(exp_values, collapse = ","))
} else {
return("?")
}
}
natt <- cbind(natt, exp_values = sapply(sids, get_expr_ids))
}
return(list(sif = def_sif,
edge_att = def_eatt,
node_att = natt))
}
create_path_info <- function(all_comp, metaginfo){
fpgs <- metaginfo$pathigraphs
effector <- length(unlist(strsplit(rownames(all_comp)[1], split="-"))) == 3
path_info <- lapply(fpgs, function(fpg){
if(effector == TRUE){
all_comp[names(fpg$effector.subgraphs),]
}else{
all_comp[names(fpg$subgraphs),]
}
})
path_json_list <- lapply(names(path_info),function(x){
out <- paste0("{\n\t\"id\":\"", x, "\",\n")
out <- paste0(out, "\t\"name\":\"", fpgs[[x]]$path.name, "\",\n")
anysig <- FALSE
anyup <- FALSE
anydown <- FALSE
anysigup <- FALSE
anysigdown <- FALSE
anychanged <- FALSE
for(i in seq_len(nrow(path_info[[x]]))){
if(path_info[[x]]$has_changed[i] == TRUE)
anychanged <- TRUE
if(path_info[[x]]$FDRp.value[i] <= 0.05) {
anysig <- TRUE
if(path_info[[x]]$status[i] == "UP")
anysigup <- TRUE
if(path_info[[x]]$status[i] == "DOWN")
anysigdown <- TRUE
}
if(path_info[[x]]$status[i] == "UP")
anyup <- TRUE
if(path_info[[x]]$status[i] == "DOWN")
anydown <- TRUE
}
out <- paste0(out, "\t\"haschanged\":", tolower(anychanged), ",\n")
out <- paste0(out, "\t\"sig\":", tolower(anysig), ",\n")
out <- paste0(out, "\t\"up\":", tolower(anyup), ",\n")
out <- paste0(out, "\t\"down\":", tolower(anydown), ",\n")
out <- paste0(out, "\t\"upsig\":", tolower(anysigup), ",\n")
out <- paste0(out, "\t\"downsig\":", tolower(anysigdown), ",\n")
out <- paste0(out, "\t\"paths\":[\n")
for(i in seq_len(nrow(path_info[[x]]))){
out <- paste0(out, "\t\t{")
out <- paste0(out, "\"id\":\"", rownames(path_info[[x]])[i], "\", ")
out <- paste0(out, "\"name\":\"",
get_path_names(metaginfo,
rownames(path_info[[x]])[i]), "\", ")
if(metaginfo$group.by == "pathways"){
out <- paste0(out, "\"shortname\":\"" ,
gsub("\\*", "", strsplit(get_path_names(
metaginfo,
rownames(path_info[[x]])[i]),": ")[[1]][2]),
"\", ")
}else{
out <- paste0(out, "\"shortname\":\"",
get_path_names(metaginfo,
rownames(path_info[[x]])[i]),
"\", ")
}
out <- paste0(out, "\"pvalue\":", path_info[[x]]$FDRp.value[i],
", ")
out <- paste0(out, "\"status\":\"", path_info[[x]]$status[i],
"\", ")
out <- paste0(out, "\"sig\":\"",
tolower(path_info[[x]]$FDRp.value[i] < 0.05), "\", ")
out <- paste0(out, "\"haschanged\":",
tolower(path_info[[x]]$has_changed[i]), ", ")
out <- paste0(out, "\"up\":",
tolower(path_info[[x]]$status[i] == "UP"), ", ")
out <- paste0(out, "\"down\":",
tolower(path_info[[x]]$status[i] == "DOWN"), ", ")
out <- paste0(out, "\"upsig\":",
tolower(path_info[[x]]$status[i] == "UP" &
path_info[[x]]$FDRp.value[i] < 0.05),
", ")
out <- paste0(out, "\"downsig\":",
tolower(path_info[[x]]$status[i] == "DOWN" &
path_info[[x]]$FDRp.value[i] < 0.05),
", ")
out <- paste0(out, "\"color\":\"", path_info[[x]]$color[i], "\"")
out <- paste0(out, "}")
if(i == nrow(path_info[[x]])){
out <- paste0(out, "\n")
} else {
out <- paste0(out, ",\n")
}
}
out <- paste0(out, "\t]\n")
out <- paste0(out, "}")
out
})
path_json <- paste0("[\n", paste(path_json_list, collapse = ","), "\n]")
return(path_json)
}
create_report_folders <- function(output_folder, home, clean_out_folder = TRUE){
pv_folder <- paste0(output_folder,"/pathway-viewer")
if(clean_out_folder == TRUE & file.exists(pv_folder)){
unlink(pv_folder, recursive = TRUE)
unlink(paste0(output_folder, "/index.html"), recursive = TRUE)
}
file.copy(paste0(home,"/pathway-viewer/"), output_folder, recursive = TRUE)
report_path <- paste0(home, "/report-files/")
png_files_copy <- list.files(path = report_path, pattern = ".png")
png_files_copy <- paste0(home, "/report-files/", png_files_copy)
file.copy(png_files_copy, pv_folder)
}
create_pathways_folder <- function(output_folder, metaginfo, comp, moreatts,
conf, verbose = FALSE){
pathways_folder <- paste0(output_folder, "/pathway-viewer/pathways/")
if(!file.exists(pathways_folder))
dir.create(pathways_folder)
for(pathway in names(metaginfo$pathigraphs)){
if(verbose == TRUE)
cat(pathway)
write_attributes(comp,
pathway,
metaginfo,
paste0(pathways_folder, pathway),
moreatts_pathway = moreatts[[pathway]],
conf = conf)
}
comp$status <- comp$"UP/DOWN"
comp$has_changed <- TRUE
path_json <- create_path_info(comp, metaginfo)
write(path_json, file = paste0(output_folder,
"/pathway-viewer/pathways/path_info.json"))
}
create_html_index <- function(home, output_folder,
template_name = "index_template.html",
output_name = "index.html"){
index <- scan(paste0(home,'/report-files/',template_name),
comment.char = "", sep = "\n", what = "character",
quiet = TRUE)
global_div <- c()
global_div <- c(global_div, paste0("<pathway-viewer id='pathway-viewer'",
" path-type='url' path='pathways'>",
"</pathway-viewer>"))
new_index <- gsub("PUT_HERE_YOUR_ELEMENTS",
paste(global_div, collapse = "\n"),
index)
write(paste(new_index, collapse = "\n"),
file = paste0(output_folder,"/pathway-viewer/",output_name))
}
#' Create visualization HTML
#'
#' Saves the results of a Wilcoxon comparison for the Hipathia pathway values
#' into a folder, and creates a HTML from which to visualize the results on
#' top of the pathways. The results are stored into the specified folder.
#' If this folder does not exist, it will be created. The parent folder must
#' exist.
#'
#' @examples
#' data(comp)
#' pathways <- load_pathways(species = "hsa", pathways_list = c("hsa03320",
#' "hsa04012"))
#' report <- create_report(comp, pathways, "save_results")
#'
#' \dontrun{
#' data(results)
#' data(brca)
#' sample_group <- colData(brca)[,1]
#' colors_de <- node_color_per_de(results, pathways,
#' sample_group, "Tumor", "Normal")
#' report_colors <- create_report(comp, pathways, "save_results",
#' node_colors = colors_de)
#'}
#'
#' @param comp Comparison object as given by the \code{do_wilcoxon} function
#' @param metaginfo Pathways object as returned by the \code{load_pathways}
#' function
#' @param output_folder Name of the folder in which the report will be stored.
#' @param path Absolute path to the parent directory in which `output_folder`
#' will be saved. If it is not provided, it will be created in a temp folder.
#' @param node_colors List of colors with which to paint the nodes of the
#' pathways, as returned by the
#' \code{node_color_per_de} function. Default is white.
#' @param group_by How to group the subpathways to be visualized. By default
#' they are grouped by the pathway to which they belong. Available groupings
#' include "uniprot", to group subpathways by their annotated Uniprot functions,
#' "GO", to group subpathways by their annotated GO terms, and "genes", to group
#' subpathways by the genes they include. Default is set to "pathway".
#' @param conf Level of significance. By default 0.05.
#' @param verbose Boolean, whether to show details about the results of the
#' execution
#'
#' @return Saves the results and creates a report to visualize them through
#' a server in the specified \code{output_folder}. Returns the folder where
#' the report has been stored.
#'
#' @export
#'
create_report <- function(comp, metaginfo, output_folder = NULL, path = NULL,
node_colors = NULL,
group_by = "pathway", conf = 0.05, verbose = FALSE){
if(group_by != "pathway" &
length(unlist(strsplit(rownames(comp)[1], split = "-"))) == 4)
stop("Grouping only available for effector subgraphs")
if(!is.null(node_colors)){
if(node_colors$group_by != group_by)
stop("Grouping in node.colors must agree with group_by")
moreatts <- summarize_atts(list(node_colors$colors), c("color"))
}else{
moreatts <- NULL
}
if(group_by != "pathway"){
message("Creating groupings by ", group_by, "...")
metaginfo <- get_pseudo_metaginfo(metaginfo, group_by = group_by)
}
if(is.null(path))
path <- tempdir()
if(is.null(output_folder)){
n <- length(list.files(path, pattern = "hipathia_report")) + 1
output_folder <- paste0("hipathia_report_", n)
}
output_folder <- paste0(path, "/", output_folder)
if(!file.exists(output_folder))
dir.create(output_folder)
pv_path <- paste0(system.file("extdata", package="hipathia"))
message("Creating report folders...")
create_report_folders(output_folder, pv_path, clean_out_folder = FALSE)
message("Creating pathways folder...")
create_pathways_folder(output_folder, metaginfo, comp, moreatts, conf,
verbose)
message("Creating HTML index...")
create_html_index(pv_path,
output_folder,
template_name = "index_template.html",
output_name = "index.html")
return(output_folder)
}
summarize_atts <- function(att_list, att_names){
df_list <- c()
for(pathway in names(att_list[[1]])){
df <- sapply(att_list, function(l){l[[pathway]]})
colnames(df) <- att_names
df_list[[pathway]] <- df
}
return(df_list)
}
#'
#' Visualize a HiPathia report
#'
#' @param output_folder Folder in which results to visualize are stored
#' @param port Port to use
#'
#' @return The instructions to visualize a HiPathia report in a web browser
#'
#' @examples
#' data(comp)
#' pathways <- load_pathways(species = "hsa", pathways_list = c("hsa03320",
#' "hsa04012"))
#' report <- create_report(comp, pathways, "save_results")
#' visualize_report(report)
#'
#' \dontrun{
#' data(results)
#' data(brca)
#' sample_group <- colData(brca)[,1]
#' colors_de <- node_color_per_de(results, pathways,
#' sample_group, "Tumor", "Normal")
#' report <- create_report(comp, pathways, "save_results",
#' node_colors = colors_de)
#' visualize_report(report)
#' visualize_report(report, port = 5000)
#' }
#' \dontshow{servr::daemon_stop()}
#'
#' @import servr
#' @export
#'
visualize_report <- function(output_folder, port = 4000){
servr::httd(paste0(output_folder, "/pathway-viewer"),
port = port, browser = FALSE, daemon = TRUE)
cat("Open a web browser and go to URL http://127.0.0.1:", port, "\n",
sep = "")
}
###########################################
# PSEUDO META_GRAPH_INFORMATION
get_pseudo_metaginfo <- function(pathways, group_by){
pseudo <- load_pseudo_mgi(pathways$species, group_by)
rownames(pseudo$all.labelids) <- pseudo$all.labelids[,1]
pathways_list <- names(pathways$pathigraphs)
if(!all(unique(pseudo$all.labelids[,"path.id"]) %in% pathways_list))
pseudo <- filter_pseudo_mgi(pseudo, pathways_list)
return(pseudo)
}
filter_pseudo_mgi <- function(pseudo_meta, pathways_list){
num_nodes <- sapply(names(pseudo_meta$pathigraphs), function(term){
graph <- pseudo_meta$pathigraphs[[term]]$graph
idx <- unlist(lapply(pathways_list, grep, V(graph)$name))
vs <- V(graph)[idx]
length(vs)
})
tofilter <- names(pseudo_meta$pathigraphs)[num_nodes >= 1]
mini_pathigraphs <- lapply(pseudo_meta$pathigraphs[tofilter],
function(pg){
minipg <- NULL
graph <- pg$graph
idx <- unlist(lapply(pathways_list, grep, V(graph)$name))
vs <- V(graph)[idx]
minipg$graph <- igraph::induced_subgraph(graph, vs)
minipg$path.name <- pg$path.name
minipg$path.id <- pg$path.id
es_ind <- unlist(lapply(pathways_list, grep, pg$effector.subgraphs))
minipg$effector.subgraphs <- pg$effector.subgraphs[es_ind]
minipg
})
names(mini_pathigraphs) <- tofilter
all_labels <- pseudo_meta$all.labelids
lab_in_pl <- all_labels[,"path.id"] %in% pathways_list
filter_labelids <- all_labels[lab_in_pl,]
mini_pseudo <- NULL
mini_pseudo$pathigraphs <- mini_pathigraphs
mini_pseudo$species <- pseudo_meta$species
mini_pseudo$all.labelids <- filter_labelids
return(mini_pseudo)
}
|
# --------------------------------------------------- #
# Author: Marius D. Pascariu
# License: MIT
# Last update: Wed Jun 05 14:33:49 2019
# --------------------------------------------------- #
#' Compute Life Tables from Mortality Data
#'
#' Construct either a full or abridged life table with various input choices like:
#' death counts and mid-interval population estimates \code{(Dx, Ex)} or
#' age-specific death rates \code{(mx)} or death probabilities \code{(qx)}
#' or survivorship curve \code{(lx)} or a distribution of deaths \code{(dx)}.
#' If one of these options is specified, the other can be ignored. The input
#' data can be an object of class: numerical \code{vector}, \code{matrix} or
#' \code{data.frame}.
#'
#' @details
#' The "life table" is also called "mortality table" or "actuarial table".
#' This shows, for each age, what the probability is that a person of that
#' age will die before his or her next birthday, the expectation of life across
#' different age ranges or the survivorship of people from a certain population.
#' @usage
#' LifeTable(x, Dx = NULL, Ex = NULL,
#' mx = NULL,
#' qx = NULL,
#' lx = NULL,
#' dx = NULL,
#' sex = NULL,
#' lx0 = 1e5,
#' ax = NULL)
#' @param x Vector of ages at the beginning of the age interval.
#' @param Dx Object containing death counts. An element of the \code{Dx} object
#' represents the number of deaths during the year to persons aged x to x+n.
#' @param Ex Exposure in the period. \code{Ex} can be approximated by the
#' mid-year population aged x to x+n.
#' @param mx Life table death rate in age interval [x, x+n).
#' @param qx Probability of dying in age interval [x, x+n).
#' @param lx Probability of survival up until exact age x (if l(0) = 1), or
#' the number of survivors at exact age x, assuming l(0) > 1.
#' @param dx Deaths by life-table population in the age interval [x, x+n).
#' @param sex Sex of the population considered here. Default: \code{NULL}.
#' This argument affects the first two values in the life table ax column.
#' If sex is specified the values are computed based on the Coale-Demeny method
#' and are slightly different for males than for females.
#' Options: \code{NULL, male, female, total}.
#' @param lx0 Radix. Default: 100 000.
#' @param ax Numeric scalar. Subject-time alive in age-interval for those who
#' die in the same interval. If \code{NULL} this will be estimated. A common
#' assumption is \code{ax = 0.5}, i.e. the deaths occur in the middle of
#' the interval. Default: \code{NULL}.
#' @return The output is of the \code{"LifeTable"} class with the components:
#' \item{lt}{Computed life table;}
#' \item{call}{\code{Call} in which all of the specified arguments are
#' specified by their full names;}
#' \item{process_date}{Time stamp.}
#' @seealso
#' \code{\link{LawTable}}
#' \code{\link{convertFx}}
#' @author Marius D. Pascariu
#' @examples
#' # Example 1 --- Full life tables with different inputs ---
#'
#' y <- 1900
#' x <- as.numeric(rownames(ahmd$mx))
#' Dx <- ahmd$Dx[, paste(y)]
#' Ex <- ahmd$Ex[, paste(y)]
#'
#' LT1 <- LifeTable(x, Dx = Dx, Ex = Ex)
#' LT2 <- LifeTable(x, mx = LT1$lt$mx)
#' LT3 <- LifeTable(x, qx = LT1$lt$qx)
#' LT4 <- LifeTable(x, lx = LT1$lt$lx)
#' LT5 <- LifeTable(x, dx = LT1$lt$dx)
#'
#' LT1
#' LT5
#' ls(LT5)
#'
#' # Example 2 --- Compute multiple life tables at once ---
#'
#' LTs = LifeTable(x, mx = ahmd$mx)
#' LTs
#' # A warning is printed if the input contains missing values.
#' # Some of the missing values can be handled by the function.
#'
#' # Example 3 --- Abridged life table ------------
#'
#' x <- c(0, 1, seq(5, 110, by = 5))
#' mx <- c(.053, .005, .001, .0012, .0018, .002, .003, .004,
#' .004, .005, .006, .0093, .0129, .019, .031, .049,
#' .084, .129, .180, .2354, .3085, .390, .478, .551)
#' LT6 <- LifeTable(x, mx = mx, sex = "female")
#' LT6
#'
#' # Example 4 --- Abridged life table w using my own 'ax' ------------
#' # In this examples we are using the ages (x) and death rates (mx) from
#' # example 3. Note that 'ax' must have the same length as the 'x' vector
#' # otherwise an error message will be returned.
#'
#' my_ax <- c(0.1, 1.5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
#' 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1)
#'
#' LT7 <- LifeTable(x = x, mx = mx, ax = my_ax)
#'
#' @export
LifeTable <- function(x,
Dx = NULL,
Ex = NULL,
mx = NULL,
qx = NULL,
lx = NULL,
dx = NULL,
sex = NULL,
lx0 = 1e5,
ax = NULL){
input <- c(as.list(environment()))
X <- LifeTable.check(input)
LT <- NULL
if (X$iclass == "numeric") {
LT <- with(X, LifeTable.core(x, Dx, Ex, mx, qx, lx, dx, sex, lx0, ax))
} else {
for (i in 1:X$nLT) {
LTi <- with(X, LifeTable.core(x,
Dx = Dx[, i],
Ex = Ex[, i],
mx = mx[, i],
qx = qx[, i],
lx = lx[, i],
dx = dx[, i],
sex = sex,
lx0 = lx0,
ax = ax))
N <- X$LTnames
LTn <- if (is.na(N[i])) i else N[i]
LTi <- cbind(LT = LTn, LTi)
LT <- rbind(LT, LTi)
}
}
out <- list(lt = LT,
call = match.call(),
process_date = date())
out <- structure(class = "LifeTable", out)
return(out)
}
#' LifeTable.core
#' @inheritParams LifeTable
#' @keywords internal
LifeTable.core <- function(x, Dx, Ex, mx, qx, lx, dx, sex, lx0, ax){
my.case <- find.my.case(Dx, Ex, mx, qx, lx, dx)$case
gr_names <- paste0("[", x,",", c(x[-1], "+"), ")")
N <- length(x)
df <- diff(x)
nx <- c(df, df[N - 1])
if (my.case == "C1_DxEx") {
Dx <- as.numeric(Dx)
Ex <- as.numeric(Ex)
mx <- Dx/Ex
mx <- uxAbove100(x, mx)
qx <- mx_qx(x, nx, mx, out = "qx")
lx <- lx0 * c(1, cumprod(1 - qx)[1:(N - 1)])
dx <- dx_lx(lx, out = "dx")
}
if (my.case == "C2_mx") {
mx <- as.numeric(mx)
qx <- mx_qx(x, nx, mx, out = "qx")
lx <- lx0 * c(1, cumprod(1 - qx)[1:(N - 1)])
dx <- dx_lx(lx, out = "dx")
}
if (my.case == "C3_qx") {
qx <- as.numeric(qx)
mx <- mx_qx(x, nx, qx, out = "mx")
lx <- lx0 * c(1, cumprod(1 - qx)[1:(N - 1)])
dx <- dx_lx(lx, out = "dx")
}
if (my.case == "C4_lx") {
lx <- as.numeric(lx)
lx <- lx * lx0/lx[1]
dx <- dx_lx(lx, out = "dx")
qx <- dx/lx
qx <- uxAbove100(x, qx)
mx <- mx_qx(x, nx, qx, out = "mx")
}
if (my.case == "C5_dx") {
dx <- as.numeric(dx)
dx <- dx * lx0/sum(dx)
lx <- dx_lx(dx, out = "lx")
qx <- dx/lx
qx <- uxAbove100(x, qx)
mx <- mx_qx(x, nx, qx, out = "mx")
}
if (is.null(ax)) {
ax <- compute.ax(x, mx, qx)
if (!is.null(sex)) ax <- coale.demeny.ax(x, mx, ax, sex)
} else if (length(ax) == 1){
ax <- rep(ax, N)
} else if (length(ax) == N){
ax <- as.numeric(ax)
}
Lx <- nx * lx - (nx - ax) * dx
Lx[N] <- ax[N] * dx[N]
Lx[is.na(Lx)] <- 0
Tx <- rev(cumsum(rev(Lx)))
ex <- Tx/lx
ex[is.na(ex)] <- 0
ex[N] <- if (ex[N - 1] == 0) 0 else ax[N]
last_check = all(is.na(mx)) | all(is.nan(mx)) | all(is.infinite(mx)) | all(mx == 0)
if (last_check) mx = qx = ax = lx = dx = Lx = Tx = ex <- NA
out <- data.frame(x.int = gr_names,
x = x,
mx = mx,
qx = qx,
ax = ax,
lx = lx,
dx = dx,
Lx = Lx,
Tx = Tx,
ex = ex)
return(out)
}
#' Function that identifies the case/problem we have to solve
#' @inheritParams LifeTable
#' @keywords internal
find.my.case <- function(Dx = NULL,
Ex = NULL,
mx = NULL,
qx = NULL,
lx = NULL,
dx = NULL) {
input <- c(as.list(environment()))
# Matrix of possible cases --------------------
rn <- c("C1_DxEx", "C2_mx", "C3_qx", "C4_lx", "C5_dx")
cn <- c("Dx", "Ex", "mx", "qx", "lx", "dx")
mat <- matrix(ncol = 6, byrow = TRUE, dimnames = list(rn, cn),
data = c(T,T,F,F,F,F,
F,F,T,F,F,F,
F,F,F,T,F,F,
F,F,F,F,T,F,
F,F,F,F,F,T))
# ----------------------------------------------
L1 <- !unlist(lapply(input, is.null))
L2 <- apply(mat, 1, function(x) all(L1 == x))
my_case <- rn[L2]
if (sum(L1[c(1, 2)]) == 1) {
stop("If you input 'Dx' you must input 'Ex' as well, and viceversa",
call. = FALSE)
}
if (!any(L2)) {
stop("The input is not specified correctly. Check again the function ",
"arguments and make sure the input data is added properly.",
call. = FALSE)
}
X <- input[L1][[1]]
nLT <- 1
LTnames <- NA
if (!is.vector(X)) {
nLT <- ncol(X) # number of LTs to be created
LTnames <- colnames(X) # the names to be assigned to LTs
}
out <- list(case = my_case,
iclass = class(X),
nLT = nLT,
LTnames = LTnames)
return(out)
}
#' mx to qx
#'
#' Function to convert mx into qx and back, using the constant force of
#' mortality assumption (CFM).
#' @inheritParams LifeTable
#' @param nx Length of the age-intervals.
#' @param ux A vector of mx or qx.
#' @param out Type of the output: mx or qx.
#' @keywords internal
mx_qx <- function(x, nx, ux, out = c("qx", "mx")){
out <- match.arg(out)
if (out == "qx") {
eta <- 1 - exp(-nx * ux)
eta[length(nx)] <- 1 # The life table should always close with q[x] = 1
} else {
eta <- suppressWarnings(-log(1 - ux)/nx)
# If qx[last-age] = 1 then mx[last-age] = Inf. Not nice to have Inf's; they
# distort the results in the subsequent processes.
# We apply a simple extrapolation method of the last mx.
N <- length(x)
eta[N] <- eta[N - 1]^2 / eta[N - 2]
}
eta <- uxAbove100(x, eta)
return(eta)
}
#' Educate mx or qx on how to behave above age 100 if it gets in trouble
#' (with NA's, zero's and Inf)
#' @inheritParams LifeTable
#' @inheritParams mx_qx
#' @param omega Threshold age. Default: 100.
#' @param verbose A logical value. Set \code{verbose = FALSE} to silent
#' the process that take place inside the function and avoid progress messages.
#' @keywords internal
uxAbove100 <- function(x,
ux,
omega = 100,
verbose = FALSE) {
if (is.vector(ux)) {
L <- x >= 100 & (is.na(ux) | is.infinite(ux) | ux == 0)
if (any(L)) {
mux <- max(ux[!L])
ux[L] <- mux
if (verbose)
warning("The input data contains NA's, Inf or zero's over the age of ",
"100. These have been replaced with maximum observed value: ",
round(mux, 4), call. = FALSE)
}
} else {
for (i in 1:ncol(ux)) ux[, i] = uxAbove100(x, ux[, i], omega, verbose)
}
return(ux)
}
#' dx to lx
#'
#' Function to convert dx into lx and back
#' @param ux A vector of dx or lx data.
#' @param out Type of the output: dx or lx.
#' @keywords internal
dx_lx <- function(ux, out = c("dx", "lx")) {
out <- match.arg(out)
if (out == "dx") {
ux_ <- rev(diff(rev(ux)))
d <- ux[1] - sum(ux_)
eta <- c(ux_, d)
} else {
eta <- rev(cumsum(rev(ux)))
}
return(eta)
}
#' Find ax indicator
#'
#' @inheritParams LifeTable
#' @return \code{ax} - the point in the age internal where 50% of the deaths
#' have already occurred
#' @keywords internal
compute.ax <- function(x, mx, qx) {
nx <- c(diff(x), Inf)
N <- length(x)
ax <- nx + 1/mx - nx/qx
for (i in 1:(N - 1)) {
if (is.infinite(ax[i + 1]) | is.na(ax[i + 1])) ax[i + 1] = ax[i]
}
return(ax)
}
#' Find ax[1:2] indicators using Coale-Demeny coefficients
#' Here we adjust the first two values of ax to account for infant
#' mortality more accurately
#' @inheritParams LifeTable
#' @keywords internal
coale.demeny.ax <- function(x, mx, ax, sex) {
if (mx[1] < 0) stop("'m[1]' must be greater than 0", call. = FALSE)
nx <- c(diff(x), Inf)
m0 <- mx[1]
a0M <- ifelse(m0 >= 0.107, 0.330, 0.045 + 2.684 * m0)
a1M <- ifelse(m0 >= 0.107, 0.330, 1.651 - 2.816 * m0)
a0F <- ifelse(m0 >= 0.107, 0.350, 0.053 + 2.800 * m0)
a1F <- ifelse(m0 >= 0.107, 0.350, 1.522 - 1.518 * m0)
a0T <- (a0M + a0F)/2
a1T <- (a1M + a1F)/2
f <- nx[1:2] / c(1, 4)
if (sex == "male") ax[1:2] <- c(a0M, a1M) * f
if (sex == "female") ax[1:2] <- c(a0F, a1F) * f
if (sex == "total") ax[1:2] <- c(a0T, a1T) * f
return(ax)
}
#' Check LifeTable input
#' @param input A list containing the input arguments of the LifeTable functions.
#' @keywords internal
LifeTable.check <- function(input) {
with(input, {
# ----------------------------------------------
K <- find.my.case(Dx, Ex, mx, qx, lx, dx)
C <- K$case
valid_classes <- c("numeric", "matrix", "data.frame", NULL)
if (!(K$iclass %in% valid_classes)) {
stop(paste0("The class of the input should be: ",
paste(valid_classes, collapse = ", ")), call. = FALSE)
}
# ----------------------------------------------
SMS <- "contains missing values. These have been replaced with "
if (!is.null(sex)) {
if (!(sex %in% c("male", "female", "total")))
stop("'sex' should be: 'male', 'female', 'total' or 'NULL'.",
call. = FALSE)
}
if (C == "C1_DxEx") {
Dx[is.na(Dx)] <- 0
Ex[is.na(Ex) | Ex == 0] <- 0.01
if (any(is.na(Dx))) warning("'Dx'", SMS, 0, call. = FALSE)
if (any(is.na(Ex))) warning("'Ex'", SMS, 0.01, call. = FALSE)
}
if (C == "C2_mx") {
mx <- uxAbove100(x, mx)
}
if (C == "C3_qx") {
qx <- uxAbove100(x, qx)
}
if (C == "C4_lx") {
lx[is.na(lx) & x >= 100] <- 0
if (any(is.na(lx))) warning("'lx'", SMS, 0, call. = FALSE)
}
if (C == "C5_dx") {
dx[is.na(dx)] <- 0
if (any(is.na(dx))) warning("'dx'", SMS, 0, call. = FALSE)
}
if (!is.null(ax)) {
if (!is.numeric(ax))
stop("'ax' must be a numeric scalar (or NULL)", call. = FALSE)
if (!any(length(ax) %in% c(1, length(x))))
stop("'ax' must be a scalar of lenght 1 or a ",
"vector of the same dimension as 'x'",
call. = FALSE)
}
out <- list(x = x, Dx = Dx, Ex = Ex, mx = mx, qx = qx,
lx = lx, dx = dx, sex = sex, lx0 = lx0, ax = ax,
iclass = K$iclass, nLT = K$nLT, LTnames = K$LTnames)
return(out)
})
}
#' Print LifeTable
#' @param x An object of class \code{"LifeTable"}
#' @param ... Further arguments passed to or from other methods.
#' @keywords internal
#' @export
print.LifeTable <- function(x, ...){
LT <- x$lt
lt <- with(LT, data.frame(x.int = x.int,
x = x,
mx = round(mx, 6),
qx = round(qx, 6),
ax = round(ax, 2),
lx = round(lx),
dx = round(dx),
Lx = round(Lx),
Tx = round(Tx),
ex = round(ex, 2)))
if (colnames(LT)[1] == "LT") lt <- data.frame(LT = LT$LT, lt)
dimnames(lt) <- dimnames(LT)
nx <- length(unique(LT$x))
nlt <- nrow(LT) / nx
out <- head_tail(lt, hlength = 6, tlength = 3, ...)
step <- diff(LT$x)
step <- step[step > 0]
type1 <- if (all(step == 1)) "Full" else "Abridged"
type2 <- if (nlt == 1) "Life Table" else "Life Tables"
cat("\n", type1, " ", type2, "\n\n", sep = "")
cat("Number of life tables:", nlt, "\n")
cat("Dimension:", nrow(LT), "x", ncol(LT), "\n")
cat("Age intervals:", head_tail(lt$x.int, hlength = 3, tlength = 3), "\n\n")
print(out, row.names = FALSE)
}
|
/R/LifeTable.R
|
permissive
|
SheilaDesanze/MortalityLaws
|
R
| false
| false
| 16,249
|
r
|
# --------------------------------------------------- #
# Author: Marius D. Pascariu
# License: MIT
# Last update: Wed Jun 05 14:33:49 2019
# --------------------------------------------------- #
#' Compute Life Tables from Mortality Data
#'
#' Construct either a full or abridged life table with various input choices like:
#' death counts and mid-interval population estimates \code{(Dx, Ex)} or
#' age-specific death rates \code{(mx)} or death probabilities \code{(qx)}
#' or survivorship curve \code{(lx)} or a distribution of deaths \code{(dx)}.
#' If one of these options is specified, the other can be ignored. The input
#' data can be an object of class: numerical \code{vector}, \code{matrix} or
#' \code{data.frame}.
#'
#' @details
#' The "life table" is also called "mortality table" or "actuarial table".
#' This shows, for each age, what the probability is that a person of that
#' age will die before his or her next birthday, the expectation of life across
#' different age ranges or the survivorship of people from a certain population.
#' @usage
#' LifeTable(x, Dx = NULL, Ex = NULL,
#' mx = NULL,
#' qx = NULL,
#' lx = NULL,
#' dx = NULL,
#' sex = NULL,
#' lx0 = 1e5,
#' ax = NULL)
#' @param x Vector of ages at the beginning of the age interval.
#' @param Dx Object containing death counts. An element of the \code{Dx} object
#' represents the number of deaths during the year to persons aged x to x+n.
#' @param Ex Exposure in the period. \code{Ex} can be approximated by the
#' mid-year population aged x to x+n.
#' @param mx Life table death rate in age interval [x, x+n).
#' @param qx Probability of dying in age interval [x, x+n).
#' @param lx Probability of survival up until exact age x (if l(0) = 1), or
#' the number of survivors at exact age x, assuming l(0) > 1.
#' @param dx Deaths by life-table population in the age interval [x, x+n).
#' @param sex Sex of the population considered here. Default: \code{NULL}.
#' This argument affects the first two values in the life table ax column.
#' If sex is specified the values are computed based on the Coale-Demeny method
#' and are slightly different for males than for females.
#' Options: \code{NULL, male, female, total}.
#' @param lx0 Radix. Default: 100 000.
#' @param ax Numeric scalar. Subject-time alive in age-interval for those who
#' die in the same interval. If \code{NULL} this will be estimated. A common
#' assumption is \code{ax = 0.5}, i.e. the deaths occur in the middle of
#' the interval. Default: \code{NULL}.
#' @return The output is of the \code{"LifeTable"} class with the components:
#' \item{lt}{Computed life table;}
#' \item{call}{\code{Call} in which all of the specified arguments are
#' specified by their full names;}
#' \item{process_date}{Time stamp.}
#' @seealso
#' \code{\link{LawTable}}
#' \code{\link{convertFx}}
#' @author Marius D. Pascariu
#' @examples
#' # Example 1 --- Full life tables with different inputs ---
#'
#' y <- 1900
#' x <- as.numeric(rownames(ahmd$mx))
#' Dx <- ahmd$Dx[, paste(y)]
#' Ex <- ahmd$Ex[, paste(y)]
#'
#' LT1 <- LifeTable(x, Dx = Dx, Ex = Ex)
#' LT2 <- LifeTable(x, mx = LT1$lt$mx)
#' LT3 <- LifeTable(x, qx = LT1$lt$qx)
#' LT4 <- LifeTable(x, lx = LT1$lt$lx)
#' LT5 <- LifeTable(x, dx = LT1$lt$dx)
#'
#' LT1
#' LT5
#' ls(LT5)
#'
#' # Example 2 --- Compute multiple life tables at once ---
#'
#' LTs = LifeTable(x, mx = ahmd$mx)
#' LTs
#' # A warning is printed if the input contains missing values.
#' # Some of the missing values can be handled by the function.
#'
#' # Example 3 --- Abridged life table ------------
#'
#' x <- c(0, 1, seq(5, 110, by = 5))
#' mx <- c(.053, .005, .001, .0012, .0018, .002, .003, .004,
#' .004, .005, .006, .0093, .0129, .019, .031, .049,
#' .084, .129, .180, .2354, .3085, .390, .478, .551)
#' LT6 <- LifeTable(x, mx = mx, sex = "female")
#' LT6
#'
#' # Example 4 --- Abridged life table w using my own 'ax' ------------
#' # In this examples we are using the ages (x) and death rates (mx) from
#' # example 3. Note that 'ax' must have the same length as the 'x' vector
#' # otherwise an error message will be returned.
#'
#' my_ax <- c(0.1, 1.5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
#' 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1)
#'
#' LT7 <- LifeTable(x = x, mx = mx, ax = my_ax)
#'
#' @export
LifeTable <- function(x,
Dx = NULL,
Ex = NULL,
mx = NULL,
qx = NULL,
lx = NULL,
dx = NULL,
sex = NULL,
lx0 = 1e5,
ax = NULL){
input <- c(as.list(environment()))
X <- LifeTable.check(input)
LT <- NULL
if (X$iclass == "numeric") {
LT <- with(X, LifeTable.core(x, Dx, Ex, mx, qx, lx, dx, sex, lx0, ax))
} else {
for (i in 1:X$nLT) {
LTi <- with(X, LifeTable.core(x,
Dx = Dx[, i],
Ex = Ex[, i],
mx = mx[, i],
qx = qx[, i],
lx = lx[, i],
dx = dx[, i],
sex = sex,
lx0 = lx0,
ax = ax))
N <- X$LTnames
LTn <- if (is.na(N[i])) i else N[i]
LTi <- cbind(LT = LTn, LTi)
LT <- rbind(LT, LTi)
}
}
out <- list(lt = LT,
call = match.call(),
process_date = date())
out <- structure(class = "LifeTable", out)
return(out)
}
#' LifeTable.core
#' @inheritParams LifeTable
#' @keywords internal
LifeTable.core <- function(x, Dx, Ex, mx, qx, lx, dx, sex, lx0, ax){
my.case <- find.my.case(Dx, Ex, mx, qx, lx, dx)$case
gr_names <- paste0("[", x,",", c(x[-1], "+"), ")")
N <- length(x)
df <- diff(x)
nx <- c(df, df[N - 1])
if (my.case == "C1_DxEx") {
Dx <- as.numeric(Dx)
Ex <- as.numeric(Ex)
mx <- Dx/Ex
mx <- uxAbove100(x, mx)
qx <- mx_qx(x, nx, mx, out = "qx")
lx <- lx0 * c(1, cumprod(1 - qx)[1:(N - 1)])
dx <- dx_lx(lx, out = "dx")
}
if (my.case == "C2_mx") {
mx <- as.numeric(mx)
qx <- mx_qx(x, nx, mx, out = "qx")
lx <- lx0 * c(1, cumprod(1 - qx)[1:(N - 1)])
dx <- dx_lx(lx, out = "dx")
}
if (my.case == "C3_qx") {
qx <- as.numeric(qx)
mx <- mx_qx(x, nx, qx, out = "mx")
lx <- lx0 * c(1, cumprod(1 - qx)[1:(N - 1)])
dx <- dx_lx(lx, out = "dx")
}
if (my.case == "C4_lx") {
lx <- as.numeric(lx)
lx <- lx * lx0/lx[1]
dx <- dx_lx(lx, out = "dx")
qx <- dx/lx
qx <- uxAbove100(x, qx)
mx <- mx_qx(x, nx, qx, out = "mx")
}
if (my.case == "C5_dx") {
dx <- as.numeric(dx)
dx <- dx * lx0/sum(dx)
lx <- dx_lx(dx, out = "lx")
qx <- dx/lx
qx <- uxAbove100(x, qx)
mx <- mx_qx(x, nx, qx, out = "mx")
}
if (is.null(ax)) {
ax <- compute.ax(x, mx, qx)
if (!is.null(sex)) ax <- coale.demeny.ax(x, mx, ax, sex)
} else if (length(ax) == 1){
ax <- rep(ax, N)
} else if (length(ax) == N){
ax <- as.numeric(ax)
}
Lx <- nx * lx - (nx - ax) * dx
Lx[N] <- ax[N] * dx[N]
Lx[is.na(Lx)] <- 0
Tx <- rev(cumsum(rev(Lx)))
ex <- Tx/lx
ex[is.na(ex)] <- 0
ex[N] <- if (ex[N - 1] == 0) 0 else ax[N]
last_check = all(is.na(mx)) | all(is.nan(mx)) | all(is.infinite(mx)) | all(mx == 0)
if (last_check) mx = qx = ax = lx = dx = Lx = Tx = ex <- NA
out <- data.frame(x.int = gr_names,
x = x,
mx = mx,
qx = qx,
ax = ax,
lx = lx,
dx = dx,
Lx = Lx,
Tx = Tx,
ex = ex)
return(out)
}
#' Function that identifies the case/problem we have to solve
#' @inheritParams LifeTable
#' @keywords internal
find.my.case <- function(Dx = NULL,
Ex = NULL,
mx = NULL,
qx = NULL,
lx = NULL,
dx = NULL) {
input <- c(as.list(environment()))
# Matrix of possible cases --------------------
rn <- c("C1_DxEx", "C2_mx", "C3_qx", "C4_lx", "C5_dx")
cn <- c("Dx", "Ex", "mx", "qx", "lx", "dx")
mat <- matrix(ncol = 6, byrow = TRUE, dimnames = list(rn, cn),
data = c(T,T,F,F,F,F,
F,F,T,F,F,F,
F,F,F,T,F,F,
F,F,F,F,T,F,
F,F,F,F,F,T))
# ----------------------------------------------
L1 <- !unlist(lapply(input, is.null))
L2 <- apply(mat, 1, function(x) all(L1 == x))
my_case <- rn[L2]
if (sum(L1[c(1, 2)]) == 1) {
stop("If you input 'Dx' you must input 'Ex' as well, and viceversa",
call. = FALSE)
}
if (!any(L2)) {
stop("The input is not specified correctly. Check again the function ",
"arguments and make sure the input data is added properly.",
call. = FALSE)
}
X <- input[L1][[1]]
nLT <- 1
LTnames <- NA
if (!is.vector(X)) {
nLT <- ncol(X) # number of LTs to be created
LTnames <- colnames(X) # the names to be assigned to LTs
}
out <- list(case = my_case,
iclass = class(X),
nLT = nLT,
LTnames = LTnames)
return(out)
}
#' mx to qx
#'
#' Function to convert mx into qx and back, using the constant force of
#' mortality assumption (CFM).
#' @inheritParams LifeTable
#' @param nx Length of the age-intervals.
#' @param ux A vector of mx or qx.
#' @param out Type of the output: mx or qx.
#' @keywords internal
mx_qx <- function(x, nx, ux, out = c("qx", "mx")){
out <- match.arg(out)
if (out == "qx") {
eta <- 1 - exp(-nx * ux)
eta[length(nx)] <- 1 # The life table should always close with q[x] = 1
} else {
eta <- suppressWarnings(-log(1 - ux)/nx)
# If qx[last-age] = 1 then mx[last-age] = Inf. Not nice to have Inf's; they
# distort the results in the subsequent processes.
# We apply a simple extrapolation method of the last mx.
N <- length(x)
eta[N] <- eta[N - 1]^2 / eta[N - 2]
}
eta <- uxAbove100(x, eta)
return(eta)
}
#' Educate mx or qx on how to behave above age 100 if it gets in trouble
#' (with NA's, zero's and Inf)
#' @inheritParams LifeTable
#' @inheritParams mx_qx
#' @param omega Threshold age. Default: 100.
#' @param verbose A logical value. Set \code{verbose = FALSE} to silent
#' the process that take place inside the function and avoid progress messages.
#' @keywords internal
uxAbove100 <- function(x,
ux,
omega = 100,
verbose = FALSE) {
if (is.vector(ux)) {
L <- x >= 100 & (is.na(ux) | is.infinite(ux) | ux == 0)
if (any(L)) {
mux <- max(ux[!L])
ux[L] <- mux
if (verbose)
warning("The input data contains NA's, Inf or zero's over the age of ",
"100. These have been replaced with maximum observed value: ",
round(mux, 4), call. = FALSE)
}
} else {
for (i in 1:ncol(ux)) ux[, i] = uxAbove100(x, ux[, i], omega, verbose)
}
return(ux)
}
#' dx to lx
#'
#' Function to convert dx into lx and back
#' @param ux A vector of dx or lx data.
#' @param out Type of the output: dx or lx.
#' @keywords internal
dx_lx <- function(ux, out = c("dx", "lx")) {
out <- match.arg(out)
if (out == "dx") {
ux_ <- rev(diff(rev(ux)))
d <- ux[1] - sum(ux_)
eta <- c(ux_, d)
} else {
eta <- rev(cumsum(rev(ux)))
}
return(eta)
}
#' Find ax indicator
#'
#' @inheritParams LifeTable
#' @return \code{ax} - the point in the age internal where 50% of the deaths
#' have already occurred
#' @keywords internal
compute.ax <- function(x, mx, qx) {
nx <- c(diff(x), Inf)
N <- length(x)
ax <- nx + 1/mx - nx/qx
for (i in 1:(N - 1)) {
if (is.infinite(ax[i + 1]) | is.na(ax[i + 1])) ax[i + 1] = ax[i]
}
return(ax)
}
#' Find ax[1:2] indicators using Coale-Demeny coefficients
#' Here we adjust the first two values of ax to account for infant
#' mortality more accurately
#' @inheritParams LifeTable
#' @keywords internal
coale.demeny.ax <- function(x, mx, ax, sex) {
if (mx[1] < 0) stop("'m[1]' must be greater than 0", call. = FALSE)
nx <- c(diff(x), Inf)
m0 <- mx[1]
a0M <- ifelse(m0 >= 0.107, 0.330, 0.045 + 2.684 * m0)
a1M <- ifelse(m0 >= 0.107, 0.330, 1.651 - 2.816 * m0)
a0F <- ifelse(m0 >= 0.107, 0.350, 0.053 + 2.800 * m0)
a1F <- ifelse(m0 >= 0.107, 0.350, 1.522 - 1.518 * m0)
a0T <- (a0M + a0F)/2
a1T <- (a1M + a1F)/2
f <- nx[1:2] / c(1, 4)
if (sex == "male") ax[1:2] <- c(a0M, a1M) * f
if (sex == "female") ax[1:2] <- c(a0F, a1F) * f
if (sex == "total") ax[1:2] <- c(a0T, a1T) * f
return(ax)
}
#' Check LifeTable input
#' @param input A list containing the input arguments of the LifeTable functions.
#' @keywords internal
LifeTable.check <- function(input) {
with(input, {
# ----------------------------------------------
K <- find.my.case(Dx, Ex, mx, qx, lx, dx)
C <- K$case
valid_classes <- c("numeric", "matrix", "data.frame", NULL)
if (!(K$iclass %in% valid_classes)) {
stop(paste0("The class of the input should be: ",
paste(valid_classes, collapse = ", ")), call. = FALSE)
}
# ----------------------------------------------
SMS <- "contains missing values. These have been replaced with "
if (!is.null(sex)) {
if (!(sex %in% c("male", "female", "total")))
stop("'sex' should be: 'male', 'female', 'total' or 'NULL'.",
call. = FALSE)
}
if (C == "C1_DxEx") {
Dx[is.na(Dx)] <- 0
Ex[is.na(Ex) | Ex == 0] <- 0.01
if (any(is.na(Dx))) warning("'Dx'", SMS, 0, call. = FALSE)
if (any(is.na(Ex))) warning("'Ex'", SMS, 0.01, call. = FALSE)
}
if (C == "C2_mx") {
mx <- uxAbove100(x, mx)
}
if (C == "C3_qx") {
qx <- uxAbove100(x, qx)
}
if (C == "C4_lx") {
lx[is.na(lx) & x >= 100] <- 0
if (any(is.na(lx))) warning("'lx'", SMS, 0, call. = FALSE)
}
if (C == "C5_dx") {
dx[is.na(dx)] <- 0
if (any(is.na(dx))) warning("'dx'", SMS, 0, call. = FALSE)
}
if (!is.null(ax)) {
if (!is.numeric(ax))
stop("'ax' must be a numeric scalar (or NULL)", call. = FALSE)
if (!any(length(ax) %in% c(1, length(x))))
stop("'ax' must be a scalar of lenght 1 or a ",
"vector of the same dimension as 'x'",
call. = FALSE)
}
out <- list(x = x, Dx = Dx, Ex = Ex, mx = mx, qx = qx,
lx = lx, dx = dx, sex = sex, lx0 = lx0, ax = ax,
iclass = K$iclass, nLT = K$nLT, LTnames = K$LTnames)
return(out)
})
}
#' Print LifeTable
#' @param x An object of class \code{"LifeTable"}
#' @param ... Further arguments passed to or from other methods.
#' @keywords internal
#' @export
print.LifeTable <- function(x, ...){
LT <- x$lt
lt <- with(LT, data.frame(x.int = x.int,
x = x,
mx = round(mx, 6),
qx = round(qx, 6),
ax = round(ax, 2),
lx = round(lx),
dx = round(dx),
Lx = round(Lx),
Tx = round(Tx),
ex = round(ex, 2)))
if (colnames(LT)[1] == "LT") lt <- data.frame(LT = LT$LT, lt)
dimnames(lt) <- dimnames(LT)
nx <- length(unique(LT$x))
nlt <- nrow(LT) / nx
out <- head_tail(lt, hlength = 6, tlength = 3, ...)
step <- diff(LT$x)
step <- step[step > 0]
type1 <- if (all(step == 1)) "Full" else "Abridged"
type2 <- if (nlt == 1) "Life Table" else "Life Tables"
cat("\n", type1, " ", type2, "\n\n", sep = "")
cat("Number of life tables:", nlt, "\n")
cat("Dimension:", nrow(LT), "x", ncol(LT), "\n")
cat("Age intervals:", head_tail(lt$x.int, hlength = 3, tlength = 3), "\n\n")
print(out, row.names = FALSE)
}
|
setwd("F:/Introduction to Data Science/Week5/Assessment/Kaggle_Higgs_Boson_Machine_Learning_Challenge")
getwd()
#load the updated dataset
training.elimNA <- read.csv("trainingElimNA.csv")
#discard weights
Label <- training.elimNA$Label
predictors <- training.elimNA[, 1:25]
tidyData <- cbind(predictors, Label)
dim(tidyData)
View(tidyData)
print(object.size(tidyData), units = "Mb")
#load the updated test data
testData <- read.csv("testElimNA.csv")
dim(testData)
View(testData)
names(testData)
print(object.size(testData), units = "Mb")
# Model fitting
library(caret)
library(kernlab)
library(ipred)
library(plyr)
set.seed(100)
partionData <- createDataPartition(y = tidyData$Label,
p = 0.2,
list = FALSE)
print(object.size(partionData), units = "Mb")
trainData <- tidyData[partionData, ]
dim(trainData)
names(trainData)
testData.train <- training.elimNA[-partionData, ]
dim(testData.train)
View(trainData)
names(trainData)
print(object.size(trainData), units = "Mb")
#Fit a model
ctrl <- trainControl(method = "cv",
number = 5,
allowParallel = TRUE
)
modelFit.treebag <- train(Label~.,
method = "treebag",
data = trainData,
preProcess = c("scale", "center"),
tuneLength = 15, #how many candidate are evaluated
trControl = ctrl
)
modelFit.treebag
#predicting on training data
treebag.predict.train1 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[1:50000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train2 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[50001:100000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train3 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[100001:150000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train4 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[150001:200000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train5 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[200001:250000, ]
#type = "prob" #compute class probabilities from the model
)
#create a vector of the predicted class
treebag.predict.train <- c(treebag.predict.train1,
treebag.predict.train2,
treebag.predict.train3,
treebag.predict.train4,
treebag.predict.train5
)
#Accuracy measure
L <- as.numeric(training.elimNA$Label)
accuracy.train <- sum(treebag.predict.train == L) / length(treebag.predict.train)
confusionMatrix(treebag.predict.train, L)
#predicting on the training set test data
treebag.predict.train.test <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData.train[1:50000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train.test2 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData.train[50001:100000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train.test3 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData.train[100001:149999, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train.test <- c(treebag.predict.train.test,
treebag.predict.train.test2,
treebag.predict.train.test3
)
#Accuracy measure
L <- as.numeric(testData.train$Label)
accuracy.train.test <- sum(treebag.predict.train.test == L) / length(treebag.predict.train.test)
confusionMatrix(treebag.predict.train.test, L)
#predicting on test data and create submission file
treebag.predict <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[1:50000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict2 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[50001:100000, ] #actually test.elimNA
#type = "prob" #compute class probabilities from the model
)
treebag.predict3 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[100001:150000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict4 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[150001:200000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict5 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[200001:250000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict6 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[250001:300000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict7 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[300001:350000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict8 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[350001:400000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict9 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[400001:450000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict10 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[450001:500000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict11 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[500001:550000, ]
#type = "prob" #compute class probabilities from the model
)
#create column vector of the predicted class
treebag.predict.test <- c(treebag.predict,
treebag.predict2,
treebag.predict3,
treebag.predict4,
treebag.predict5,
treebag.predict6,
treebag.predict7,
treebag.predict8,
treebag.predict9,
treebag.predict10,
treebag.predict11
)
# Threshold to set results
threshold = 0.002
treebag.predict.test <- treebag.predict.test - 1
predicted=rep("b",550000)
predicted[treebag.predict.test>=threshold]="s"
weightRank = rank(treebag.predict.test, ties.method= "random")
submission = data.frame(EventId = testData$EventId, RankOrder = weightRank, Class = predicted)
write.csv(submission, "submission_version1.csv", row.names=FALSE)
train_ams <- as.numeric(training.elimNA$Label) - 1
train_ams <- as.numeric(testData.train$Label) - 1
train_ams <- L - 1
predict_ams <- as.numeric(treebag.predict.train) - 1
library(pROC)
auc = roc(train_ams, predict_ams)
plot(auc, print.thres=TRUE)
#Area under the curve: 0.8117 gbm [train 75000]
#Area under the curve: 0.8354 treebag [train 50000]
# Threshold to set results
threshold = 0.002
table(train_ams, predict_ams >= threshold)
# train_ams FALSE TRUE
# 0 143900 20433
# 1 27199 58468
#Reference - treebag
#Prediction b s
#b 148811 20111
#s 15522 65556
# This code creates a field for predicted response so you can calculate AMS
predicted <- rep("b",250000)
predicted <- rep("b",199999)
predicted <- rep("b",50000)
predicted[predict_ams >= threshold] <- "s"
AMS(pred=predicted,real=training.elimNA$Label,weight=training.elimNA$Weight)
AMS(pred=predicted,
real=testData.train$Label,
weight=testData.train$Weight)
AMS(pred=predicted,
real=testData[1:50000, 25],
weight=testData[1:50000, 24])
# 2.61082 svm
# [1] 2.636187 gbm
# 2.846631 gbm[train 50000]
# 2.87835 gbm[train 75000]
# 3.184704 treebag [50000]
# 3.563175 treebag [125000]
AMS = function(pred,real,weight)
{
#a = table(pred,real)
pred_s_ind = which(pred=="s") # Index of s in prediction
real_s_ind = which(real=="s") # Index of s in actual
real_b_ind = which(real=="b") # Index of b in actual
s = sum(weight[intersect(pred_s_ind,real_s_ind)]) # True positive rate
b = sum(weight[intersect(pred_s_ind,real_b_ind)]) # False positive rate
b_tau = 10 # Regulator weight
ans = sqrt(2 * ((s+b+b_tau) * log(1 + s / (b+b_tau)) - s))
return(ans)
}
#===================
ROC.imp <- filterVarImp(x = trainData[, -ncol(trainData)],
y = trainData$Label)
# ROC.imp
# b s
# X 0.5016555 0.5016555
# EventId 0.5016555 0.5016555
# DER_mass_MMC 0.6048648 0.6048648
# DER_mass_transverse_met_lep 0.7402352 0.7402352
# DER_mass_vis 0.5881139 0.5881139
# DER_pt_h 0.6188535 0.6188535
# DER_deltar_tau_lep 0.5133194 0.5133194
# DER_pt_tot 0.5111608 0.5111608
# DER_sum_pt 0.6324012 0.6324012
# DER_pt_ratio_lep_tau 0.6639560 0.6639560
# DER_met_phi_centrality 0.6385535 0.6385535
# PRI_tau_pt 0.6951879 0.6951879
# PRI_tau_eta 0.5013400 0.5013400
# PRI_tau_phi 0.5054810 0.5054810
# PRI_lep_pt 0.5261457 0.5261457
# PRI_lep_eta 0.5019938 0.5019938
# PRI_lep_phi 0.5042687 0.5042687
# PRI_met 0.5332799 0.5332799
# PRI_met_phi 0.5020346 0.5020346
# PRI_met_sumet 0.6082765 0.6082765
# PRI_jet_num 0.5899707 0.5899707
# PRI_jet_leading_pt 0.5414440 0.5414440
# PRI_jet_leading_eta 0.4976433 0.4976433
# PRI_jet_leading_phi 0.4999396 0.4999396
# PRI_jet_all_pt 0.6108971 0.6108971
treebag.imp <- varImp(modelFit.treebag,
scale = FALSE)
# treebag.imp
# treebag variable importance
#
# only 20 most important variables shown (out of 25)
#
# Overall
# DER_mass_MMC 9102
# DER_mass_transverse_met_lep 6923
# PRI_tau_pt 6752
# DER_mass_vis 5813
# DER_met_phi_centrality 4571
# DER_pt_ratio_lep_tau 4503
# DER_deltar_tau_lep 3990
# DER_pt_h 3287
# DER_sum_pt 3101
# DER_pt_tot 2699
# X 2652
# EventId 2383
# PRI_met 2375
# PRI_lep_eta 2095
# PRI_tau_eta 2044
# PRI_jet_leading_eta 1948
# PRI_tau_phi 1869
# PRI_met_sumet 1827
# PRI_lep_phi 1721
# PRI_met_phi 1700
|
/Week5/TreeBag.R
|
no_license
|
nasimulhasan/Data_Science
|
R
| false
| false
| 13,101
|
r
|
setwd("F:/Introduction to Data Science/Week5/Assessment/Kaggle_Higgs_Boson_Machine_Learning_Challenge")
getwd()
#load the updated dataset
training.elimNA <- read.csv("trainingElimNA.csv")
#discard weights
Label <- training.elimNA$Label
predictors <- training.elimNA[, 1:25]
tidyData <- cbind(predictors, Label)
dim(tidyData)
View(tidyData)
print(object.size(tidyData), units = "Mb")
#load the updated test data
testData <- read.csv("testElimNA.csv")
dim(testData)
View(testData)
names(testData)
print(object.size(testData), units = "Mb")
# Model fitting
library(caret)
library(kernlab)
library(ipred)
library(plyr)
set.seed(100)
partionData <- createDataPartition(y = tidyData$Label,
p = 0.2,
list = FALSE)
print(object.size(partionData), units = "Mb")
trainData <- tidyData[partionData, ]
dim(trainData)
names(trainData)
testData.train <- training.elimNA[-partionData, ]
dim(testData.train)
View(trainData)
names(trainData)
print(object.size(trainData), units = "Mb")
#Fit a model
ctrl <- trainControl(method = "cv",
number = 5,
allowParallel = TRUE
)
modelFit.treebag <- train(Label~.,
method = "treebag",
data = trainData,
preProcess = c("scale", "center"),
tuneLength = 15, #how many candidate are evaluated
trControl = ctrl
)
modelFit.treebag
#predicting on training data
treebag.predict.train1 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[1:50000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train2 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[50001:100000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train3 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[100001:150000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train4 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[150001:200000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train5 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
training.elimNA[200001:250000, ]
#type = "prob" #compute class probabilities from the model
)
#create a vector of the predicted class
treebag.predict.train <- c(treebag.predict.train1,
treebag.predict.train2,
treebag.predict.train3,
treebag.predict.train4,
treebag.predict.train5
)
#Accuracy measure
L <- as.numeric(training.elimNA$Label)
accuracy.train <- sum(treebag.predict.train == L) / length(treebag.predict.train)
confusionMatrix(treebag.predict.train, L)
#predicting on the training set test data
treebag.predict.train.test <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData.train[1:50000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train.test2 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData.train[50001:100000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train.test3 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData.train[100001:149999, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict.train.test <- c(treebag.predict.train.test,
treebag.predict.train.test2,
treebag.predict.train.test3
)
#Accuracy measure
L <- as.numeric(testData.train$Label)
accuracy.train.test <- sum(treebag.predict.train.test == L) / length(treebag.predict.train.test)
confusionMatrix(treebag.predict.train.test, L)
#predicting on test data and create submission file
treebag.predict <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[1:50000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict2 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[50001:100000, ] #actually test.elimNA
#type = "prob" #compute class probabilities from the model
)
treebag.predict3 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[100001:150000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict4 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[150001:200000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict5 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[200001:250000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict6 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[250001:300000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict7 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[300001:350000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict8 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[350001:400000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict9 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[400001:450000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict10 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[450001:500000, ]
#type = "prob" #compute class probabilities from the model
)
treebag.predict11 <- predict(modelFit.treebag,
#training.elimNA[1:90000, ]
testData[500001:550000, ]
#type = "prob" #compute class probabilities from the model
)
#create column vector of the predicted class
treebag.predict.test <- c(treebag.predict,
treebag.predict2,
treebag.predict3,
treebag.predict4,
treebag.predict5,
treebag.predict6,
treebag.predict7,
treebag.predict8,
treebag.predict9,
treebag.predict10,
treebag.predict11
)
# Threshold to set results
threshold = 0.002
treebag.predict.test <- treebag.predict.test - 1
predicted=rep("b",550000)
predicted[treebag.predict.test>=threshold]="s"
weightRank = rank(treebag.predict.test, ties.method= "random")
submission = data.frame(EventId = testData$EventId, RankOrder = weightRank, Class = predicted)
write.csv(submission, "submission_version1.csv", row.names=FALSE)
train_ams <- as.numeric(training.elimNA$Label) - 1
train_ams <- as.numeric(testData.train$Label) - 1
train_ams <- L - 1
predict_ams <- as.numeric(treebag.predict.train) - 1
library(pROC)
auc = roc(train_ams, predict_ams)
plot(auc, print.thres=TRUE)
#Area under the curve: 0.8117 gbm [train 75000]
#Area under the curve: 0.8354 treebag [train 50000]
# Threshold to set results
threshold = 0.002
table(train_ams, predict_ams >= threshold)
# train_ams FALSE TRUE
# 0 143900 20433
# 1 27199 58468
#Reference - treebag
#Prediction b s
#b 148811 20111
#s 15522 65556
# This code creates a field for predicted response so you can calculate AMS
predicted <- rep("b",250000)
predicted <- rep("b",199999)
predicted <- rep("b",50000)
predicted[predict_ams >= threshold] <- "s"
AMS(pred=predicted,real=training.elimNA$Label,weight=training.elimNA$Weight)
AMS(pred=predicted,
real=testData.train$Label,
weight=testData.train$Weight)
AMS(pred=predicted,
real=testData[1:50000, 25],
weight=testData[1:50000, 24])
# 2.61082 svm
# [1] 2.636187 gbm
# 2.846631 gbm[train 50000]
# 2.87835 gbm[train 75000]
# 3.184704 treebag [50000]
# 3.563175 treebag [125000]
AMS = function(pred,real,weight)
{
#a = table(pred,real)
pred_s_ind = which(pred=="s") # Index of s in prediction
real_s_ind = which(real=="s") # Index of s in actual
real_b_ind = which(real=="b") # Index of b in actual
s = sum(weight[intersect(pred_s_ind,real_s_ind)]) # True positive rate
b = sum(weight[intersect(pred_s_ind,real_b_ind)]) # False positive rate
b_tau = 10 # Regulator weight
ans = sqrt(2 * ((s+b+b_tau) * log(1 + s / (b+b_tau)) - s))
return(ans)
}
#===================
ROC.imp <- filterVarImp(x = trainData[, -ncol(trainData)],
y = trainData$Label)
# ROC.imp
# b s
# X 0.5016555 0.5016555
# EventId 0.5016555 0.5016555
# DER_mass_MMC 0.6048648 0.6048648
# DER_mass_transverse_met_lep 0.7402352 0.7402352
# DER_mass_vis 0.5881139 0.5881139
# DER_pt_h 0.6188535 0.6188535
# DER_deltar_tau_lep 0.5133194 0.5133194
# DER_pt_tot 0.5111608 0.5111608
# DER_sum_pt 0.6324012 0.6324012
# DER_pt_ratio_lep_tau 0.6639560 0.6639560
# DER_met_phi_centrality 0.6385535 0.6385535
# PRI_tau_pt 0.6951879 0.6951879
# PRI_tau_eta 0.5013400 0.5013400
# PRI_tau_phi 0.5054810 0.5054810
# PRI_lep_pt 0.5261457 0.5261457
# PRI_lep_eta 0.5019938 0.5019938
# PRI_lep_phi 0.5042687 0.5042687
# PRI_met 0.5332799 0.5332799
# PRI_met_phi 0.5020346 0.5020346
# PRI_met_sumet 0.6082765 0.6082765
# PRI_jet_num 0.5899707 0.5899707
# PRI_jet_leading_pt 0.5414440 0.5414440
# PRI_jet_leading_eta 0.4976433 0.4976433
# PRI_jet_leading_phi 0.4999396 0.4999396
# PRI_jet_all_pt 0.6108971 0.6108971
treebag.imp <- varImp(modelFit.treebag,
scale = FALSE)
# treebag.imp
# treebag variable importance
#
# only 20 most important variables shown (out of 25)
#
# Overall
# DER_mass_MMC 9102
# DER_mass_transverse_met_lep 6923
# PRI_tau_pt 6752
# DER_mass_vis 5813
# DER_met_phi_centrality 4571
# DER_pt_ratio_lep_tau 4503
# DER_deltar_tau_lep 3990
# DER_pt_h 3287
# DER_sum_pt 3101
# DER_pt_tot 2699
# X 2652
# EventId 2383
# PRI_met 2375
# PRI_lep_eta 2095
# PRI_tau_eta 2044
# PRI_jet_leading_eta 1948
# PRI_tau_phi 1869
# PRI_met_sumet 1827
# PRI_lep_phi 1721
# PRI_met_phi 1700
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggRaster.R
\name{aggRaster}
\alias{aggRaster}
\title{aggRaster}
\usage{
aggRaster(x, func = "mean", inmem = FALSE, outdir = getwd(), ...)
}
\arguments{
\item{x, }{Raster* object}
\item{func, }{character, 'mean' or ,'sum'}
\item{inmem, }{logical, if FALSE the result will be stored as a netCDF file}
\item{outdir, }{character, directory path where the output will be stored if inmem=FALSE}
\item{...}{additional arguments as for writeRaster, must include varnam,longname, an varunit}
}
\value{
Rasterbrick object with monthly z time dimension
}
\description{
Aggregate raster bricks from daily to monthly in parallel
}
\examples{
*optional run beginCluster() first, for parallel computing
aggRaster()
}
\keyword{aggregation,}
\keyword{monthly}
|
/man/aggRaster.Rd
|
no_license
|
dsval/splashTools
|
R
| false
| true
| 827
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aggRaster.R
\name{aggRaster}
\alias{aggRaster}
\title{aggRaster}
\usage{
aggRaster(x, func = "mean", inmem = FALSE, outdir = getwd(), ...)
}
\arguments{
\item{x, }{Raster* object}
\item{func, }{character, 'mean' or ,'sum'}
\item{inmem, }{logical, if FALSE the result will be stored as a netCDF file}
\item{outdir, }{character, directory path where the output will be stored if inmem=FALSE}
\item{...}{additional arguments as for writeRaster, must include varnam,longname, an varunit}
}
\value{
Rasterbrick object with monthly z time dimension
}
\description{
Aggregate raster bricks from daily to monthly in parallel
}
\examples{
*optional run beginCluster() first, for parallel computing
aggRaster()
}
\keyword{aggregation,}
\keyword{monthly}
|
#' Calculate the filtering distribution for a specified set of parents and a fixed delta.
#'
#' @param Yt the vector of observed time series, length T
#' @param Ft the matrix of covariates, dim = number of thetas (p) x number of time points (T), usually a column of 1s to represent an intercept and the time series of the parent nodes.
#' @param delta discount factor (scalar).
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return
#' mt the vector or matrix of the posterior mean (location parameter), dim = p x T.
#' Ct the posterior scale matrix, dim = p x p x T, Ct = CSt * S, where S is a point estimate for the observational variance phi^-1.
#' CSt the posterior scale matrix, dim = p x p x T, Ct = CSt * S, where S is a point estimate for the observational variance phi^-1.
#' Rt the prior scale matrix, dim = p x p x T. Rt = RSt * S_{t-1}, where S_{t-1} is a point estimate for the observational variance phi^-1 at the previous time point.
#' RSt the prior scale matrix, dim = p X p X T. Rt = RSt * S_{t-1}, where S_{t-1} is a point estimate for the observational variance phi^-1 at the previous time point.
#' nt and dt the vectors of the hyperparameters for the precision phi with length T.
#' S the vector of the point estimate for the observational variance phi^-1 with length T.
#' ft the vector of the one-step forecast location parameter with length T.
#' Qt the vector of the one-step forecast scale parameter with length T.
#' ets the vector of the standardised residuals with length T
#' lpl the vector of the Log Predictive Likelihood with length T.
#'
#' @export
dlm.filt.rh <- function(Yt, Ft, delta, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001){
CS0 = CS0*diag(nrow(Ft))
m0 = rep(m0,nrow(Ft))
Nt = length(Yt)+1 # the length of the time series + t0
p = nrow(Ft) # the number of parents and one for an intercept (i.e. the number of thetas)
Y = numeric(Nt)
Y[2:Nt] = Yt
F1 = array(0, dim=c(p,Nt))
F1[,2:Nt] = Ft
# Set up allocation matrices, including the priors
mt = array(0,dim=c(p,Nt))
mt[,1]=m0
Ct = array(0,dim=c(p,p,Nt))
Ct[,,1] = CS0
CSt = array(0,dim=c(p,p,Nt))
Rt = array(0,dim=c(p,p,Nt))
RSt = array(0,dim=c(p,p,Nt))
nt = numeric(Nt)
nt[1]=n0
dt = numeric(Nt)
dt[1]=d0
S = numeric(Nt)
S[1]=dt[1]/nt[1]
ft = numeric(Nt)
Qt = numeric(Nt)
ets = numeric(Nt)
lpl = numeric(Nt)
# Filtering
for (t in 2:Nt){
# Posterior at {t-1}: (theta_{t-1}|y_{t-1}) ~ T_{n_{t-1}}[m_{t-1}, C_{t-1} = C*_{t-1} x d_{t-1}/n_{t-1}]
# Prior at {t}: (theta_{t}|y_{t-1}) ~ T_{n_{t-1}}[m_{t-1}, R_{t}]
# RSt ~ C*_{t-1}/delta
RSt[,,t] = Ct[,,(t-1)] / (S[(t-1)]*delta)
Rt[,,t] = RSt[,,t] * (S[(t-1)])
# One-step forecast: (Y_{t}|y_{t-1}) ~ T_{n_{t-1}}[f_{t}, Q_{t}]
ft[t] = t(F1[,t]) %*% mt[,(t-1)]
QSt = as.vector(1 + t(F1[,t]) %*% RSt[,,t] %*% F1[,t])
Qt[t] = QSt * S[(t-1)]
et = Y[t] - ft[t]
ets[t] = et / sqrt(Qt[t])
# Posterior at t: (theta_{t}|y_{t}) ~ T_{n_{t}}[m_{t}, C_{t}]
At = (RSt[,,t] %*% F1[,t])/QSt
mt[,t] = mt[,(t-1)] + (At*et)
nt[t] = nt[(t-1)] + 1
dt[t] = dt[(t-1)] + (et^2)/QSt
S[t]=dt[t]/nt[t]
CSt[,,t] = RSt[,,t] - (At %*% t(At))*QSt
Ct[,,t] = S[t]*CSt[,,t]
# Log Predictive Likelihood
lpl[t] = lgamma((nt[(t-1)]+1)/2)-lgamma(nt[(t-1)]/2)-0.5*log(pi*nt[(t-1)]*Qt[t])-((nt[(t-1)]+1)/2)*log(1+(1/nt[(t-1)])*et^2/Qt[t])
}
mt = mt[,2:Nt]; Ct = Ct[,,2:Nt]; CSt = CSt[,,2:Nt]; Rt = Rt[,,2:Nt]; RSt = RSt[,,2:Nt]
nt = nt[2:Nt]; dt = dt[2:Nt]; S = S[2:Nt]; ft = ft[2:Nt]; Qt = Qt[2:Nt]; ets = ets[2:Nt]; lpl = lpl[2:Nt]
filt.output <- list(mt=mt,Ct=Ct,CSt=CSt,Rt=Rt,RSt=RSt,nt=nt,dt=dt,S=S,ft=ft,Qt=Qt,ets=ets,lpl=lpl)
return(filt.output)
}
#' A function to generate all the possible models.
#'
#' @param Nn number of nodes; the number of columns of the dataset can be used.
#' @param node the node of interest (i.e., the node to find parents for).
#'
#' @return
#' output.model = a matrix with dimensions (nn-1) x number of models, where number of models = 2^(nn-1).
#'
#' @export
model.generator<-function(Nn,node){
# Create the model 'no parents' (the first column of the matrix is all zeros)
empt=rep(0,(Nn-1))
for (k in 1:(Nn-1)) {
# Calculate all combinations when number of parents = k
#m=combn(c(1:Nn)[-node],k)
if (Nn==2 & node==1) {
model = matrix(c(0,2),1,2)
} else {
m=combn(c(1:Nn)[-node],k)
# Expand the array so that unconnected edges are represented by zeros
empt.new=array(0,dim=c((Nn-1),ncol(m)))
empt.new[1:k,]=m
# Bind the matrices together; the next set of models are added to this matrix
model=cbind(empt,empt.new)
empt=model
}
}
colnames(model)=NULL
output.model<-model
return(output.model)
}
#' A function for an exhaustive search, calculates the optimum value of the discount factor.
#'
#' @param Data Dataset with dimension number of time points Nt x Number of nodes Nn.
#' @param node node of interest.
#' @param nbf Log Predictive Likelihood will be calculated from (and including) this time point.
#' @param delta a vector of potential values for the discount factor.
#' @param cpp boolean true (default): fast C++ implementation, false: native R code.
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return
#' model.store a matrix with the model, LPL and chosen discount factor for all possible models.
#' runtime an estimate of the run time of the function, using proc.time().
#' @export
exhaustive.search <- function(Data, node, nbf=15, delta=seq(0.5,1,0.01), cpp=TRUE, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001) {
ptm=proc.time()
Nn=ncol(Data) # the number of nodes
Nm=2^(Nn-1) # the number of models per node
M=model.generator(Nn,node) # Generate all the possible models
models=rbind(1:Nm,M) # Label each model with a 'model number'
Yt=Data[,node] # the time series of the node we wish to find parents for
Nt=length(Yt) # the number of time points
nd=length(delta) # the number of deltas
# Create empty arrays for the lpl scores and the optimum deltas
lpldet=array(NA,c(Nm,length(delta)))
lplmax=rep(NA,Nm)
DF.hat=rep(NA,Nm)
# Now create Ft.
for (z in 1:Nm) {
pars=models[(2:Nn),z]
pars=pars[pars!=0]
Ft=array(1,dim=c(Nt,length(pars)+1))
if (ncol(Ft)>1) {
Ft[,2:ncol(Ft)]=Data[,pars] # selects parents
}
# Calculate the log predictive likelihood, for each value of delta
for (j in 1:nd) {
if (cpp) {
# new C++ implementation
lpl=c(dlmFiltCpp(Yt, t(Ft), delta[j], m0, CS0, n0, d0))
lpldet[z,j]=sum(lpl[nbf:Nt])
} else {
# original native R
a=dlm.filt.rh(Yt, t(Ft), delta=delta[j], m0=m0, CS0=CS0, n0=n0, d0=d0)
lpldet[z,j]=sum(a$lpl[nbf:Nt])
}
}
lplmax[z]=max(lpldet[z,],na.rm=TRUE)
DF.hat[z]=delta[lpldet[z,]==max(lpldet[z,],na.rm=TRUE)] # add which here for index
}
# Output model.store
model.store=rbind(models,lplmax,DF.hat)
rownames(model.store)=NULL
runtime=(proc.time()-ptm)
output<-list(model.store=model.store,runtime=runtime)
return(output)
}
#' Mean centers timeseries in a 2D array timeseries x nodes,
#' i.e. each timeseries of each node has mean of zero.
#'
#' @param X 2D array with dimensions timeseries x nodes.
#'
#' @return M 2D array.
#' @export
center <- function(X) {
d = dim(X)
M = matrix(NA, d[1], d[2])
for (i in 1:d[2]) {
M[,i]=scale(X[,i], center = T, scale = F)
}
return(M)
}
#' Estimate subject's full network: runs exhaustive search on very node.
#'
#' @param X array with dimensions timeseries x nodes.
#' @param id subject ID. If set, results are saved to a txt file.
#' @param nbf Log Predictive Likelihood will be calculated from (and including) this time point.
#' @param delta a vector of potential values for the discount factor.
#' @param cpp boolean true (default): fast C++ implementation, false: native R code.
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return store list with results.
#' @export
subject <- function(X, id=NULL, nbf=15, delta=seq(0.5,1,0.01), cpp=TRUE, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001) {
N=ncol(X) # nodes
M=2^(N-1) # rows/models
models = array(rep(0,(N+2)*M*N),dim=c(N+2,M,N))
for (n in 1:N) {
tmp=exhaustive.search(X, n, nbf=nbf, delta=delta, cpp=cpp, m0=m0, CS0=CS0, n0=n0, d0=d0)
models[,,n]=tmp$model.store
if (!is.null(id)) {
write(t(models[,,n]), file=sprintf("%s_node_%03d.txt", id, n), ncolumns = M)
}
}
store=list()
store$models=models
store$winner=getWinner(models,N)
store$adj=getAdjacency(store$winner,N)
store$thr=getThreshAdj(store$adj, store$models, store$winner)
return(store)
}
#' Runs exhaustive search on a single node and saves results in txt file.
#'
#' @param X array with dimensions timeseries x nodes.
#' @param n node number.
#' @param id subject ID. If set, results are saved to a txt file.
#' @param nbf Log Predictive Likelihood will be calculated from (and including) this time point.
#' @param delta a vector of potential values for the discount factor.#'
#' @param cpp boolean true (default): fast C++ implementation, false: native R code.
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return store list with results.
#' @export
node <- function(X, n, id=NULL, nbf=15, delta=seq(0.5,1,0.01), cpp=TRUE, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001) {
N=ncol(X) # nodes
M=2^(N-1) # rows/models
store=exhaustive.search(X, n, nbf=nbf, delta=delta, cpp=cpp, m0=m0, CS0=CS0, n0=n0, d0=d0)
if (!is.null(id)) {
write(t(store$model.store), file=sprintf("%s_node_%03d.txt", id, n), ncolumns = M)
}
return(store)
}
#' Reads single subject's network from txt files.
#'
#' @param id identifier to select all subjects' nodes, e.g. pattern containing subject ID and session number.
#' @param nodes number of nodes.
#'
#' @return store list with results.
#' @export
read.subject <- function(id, nodes) {
models = array(0,dim=c(nodes+2,2^(nodes-1),nodes))
for (n in 1:nodes) {
#file=sprintf("%s_node_%03d.txt", id, n)
file=list.files(pattern=glob2rx(sprintf("%s*_node_%03d.txt", id, n)))
models[,,n] = as.matrix(read.table(file))
}
store=list()
store$models=models
store$winner=getWinner(models,nodes)
store$adj=getAdjacency(store$winner,nodes)
store$thr=getThreshAdj(store$adj, store$models, store$winner)
return(store)
}
#' Get winner network by maximazing log predictive likelihood (LPL)
#' from a set of models.
#'
#' @param models 2D matrix, or 3D models x node.
#' @param nodes number of nodes.
#'
#' @return winner array with highest scored model(s).
#' @export
getWinner <- function(models, nodes) {
dims=length(dim(models))
if (dims==2) {
winner = models[,which.max(models[nodes+1,])]
} else if (dims==3) {
winner = array(0, dim=c(nodes+2,nodes))
for (n in 1:nodes) {
winner[,n]=models[,which.max(models[nodes+1,,n]),n]
}
}
return(winner)
}
#' Get adjacency and associated likelihoods (LPL) and disount factros (df) of winning models.
#'
#' @param winner, 2D matrix.
#' @param nodes number of nodes.
#'
#' @return adj, 2D adjacency matrix.
#' @export
getAdjacency <- function(winner, nodes) {
am = array(rep(0,nodes*nodes),dim=c(nodes,nodes))
lpl = df = array(rep(NA,nodes*nodes),dim=c(nodes,nodes))
for (n in 1:nodes) {
p = winner[2:nodes,n] # parents
p = p[p>0]
am[p,n] = 1
lpl[p,n] = winner[nodes+1,n]
df[p,n] = winner[nodes+2,n]
}
return(list(am=am, lpl=lpl, df=df))
}
#' Plots network as graph.
#'
#' @param adj 2D adjacency matrix.
#'
#' @export
plotNet <- function(adj) {
plot.igraph(graph.adjacency(adj, mode="directed", weighted=T, diag=F))
}
#' Plots network as adjacency matrix.
#'
#' @param adj 2D adjacency matrix.
#' @param col color palette.
#' @param lab labels as character array.
#' @param lim vector with two min and max values for color scaling.
#' @param diag true or false, if true showing values on the diagnoal line.
#'
#' @export
plotMat <- function(adj, col=heat.colors(12), lab=NULL, lim = c(0,1), diag=FALSE) {
if (!diag) {
adj[row(adj) == col(adj)]=NA
}
n=nrow(adj)
adj_ = t(apply(adj, 2, rev))
par(mai=c(1,1,0.5,1.1))
image(adj_, col=col, axes=F, zlim=lim)
mtext(text=rev(lab), side=2, line=0.3, at=seq(0,1,1/(n-1)), las=1, cex=0.8)
mtext(text=lab, side=1, line=0.3, at=seq(0,1,1/(n-1)), las=2, cex=0.8)
image.plot(adj_, legend.only=T, col=col, zlim=lim)
grid(n, n, lwd = 1)
}
#' Performes a binomial test with FDR correction for network edges in an adjacency matrix.
#'
#' @param adj adjacency matrix, nodes x nodes x subj, ornodes x nodes x runs x subj.
#' @param alter type of binomial test, "two.sided" (default), "less", or "greater"
#'
#' @return store list with results.
#' @export
binom.nettest <- function(adj, alter="two.sided") {
mydim=dim(adj)
M = sum(adj) # total edges over all N subjects, all R(R-1) edges
if (length(mydim) == 3) { # without runs
N=mydim[3] # No. of subjects
N_Comp=mydim[1]
adj_ = apply(adj, c(1,2), sum)
} else if (length(mydim) == 4) { # with runs
N=mydim[4] # No. of subjects
N_runs=mydim[3]
N_Comp=mydim[1]
N=N*N_runs # adjust N by for no. of runs
adj_ = apply(adj, c(1,2), sum) # sum acrosss subjects and runs
}
# binom test for every edge occurance
p0 = M/N/N_Comp/(N_Comp-1) # H0 edge probability
p = array(NA, dim=c(N_Comp,N_Comp))
for (i in 1:N_Comp) {
for (j in 1:N_Comp) {
tmp=binom.test(adj_[i,j],N,p=p0, alternative=alter)
p[i,j]=tmp$p.value
}
}
# FDR
p_fdr=matrix(p.adjust(p, method = "fdr"),N_Comp,N_Comp)
adj_fdr=adj_
adj_fdr[p_fdr>=0.05]=NA
store=list()
store$p0=p0
store$p=p
store$p_fdr=p_fdr
store$adj=adj_/N
store$adj_fdr=adj_fdr/N # significant proportions
return(store)
}
#' Reshapes a 2D concatenated time series into 3D according to no. of subjects and volumes.
#'
#' @param ts a 2D time series volumes x nodes.
#' @param N No. of subjects.
#' @param V No. of volumes.
#'
#' @return M 3D matrix, time series x nodes x subjects.
#' @export
reshapeTs <- function(ts, N, V) {
NC = ncol(ts)
M = array(NA, dim=c(V,NC,N))
for (i in 1:N) {
idx = ((i-1)*V+1):(V*i)
M[,,i] = ts[idx,]
}
return(M)
}
#' Correlation of time series.
#'
#' @param ts a 3D time series time series x nodes x subjects.
#'
#' @return M correlation matrix.
#' @export
corTs <- function(ts) {
d=dim(ts)
N=d[3] # No. subjects
N_nodes=d[2]
R=array(NA, dim=c(N_nodes,N_nodes,N))
for (s in 1:N) {
R[,,s]=cor(ts[,,s])
}
M = apply(R, c(1,2), mean)
return(M)
}
#' Get specific parent model from all models.
#'
#' @param models a 2D model matrix.
#' @param parents a vector with parent nodes.
#'
#' @return mod specific parent model.
#' @export
getModel <- function(models, parents) {
Nn = nrow(models) - 2 # No. of nodes
Nm = ncol(models) # No. of models
parents = c(parents, rep(0, (Nn - 1) - length(parents))) # add fill zeros
for (m in 1:Nm) {
if (sum(models[2:Nn,m] == parents) == Nn - 1) {
mod = models[,m]
}
}
return(mod)
}
#' A group is a list containing restructured data from subejcts for easier group analysis.
#'
#' @param subj a list of subjects.
#'
#' @return group a list.
#' @export
group <- function(subj) {
Nn=ncol(subj[[1]]$adj$am)
N=length(subj)
am = lpl = df = tam = tbi = array(rep(NA,N*Nn*Nn),dim=c(Nn,Nn,N))
tlpls = array(rep(NA,N*Nn*Nn*2),dim=c(Nn,Nn,2,N))
for (s in 1:N) {
am[,,s] = subj[[s]]$adj$am
lpl[,,s] = subj[[s]]$adj$lpl
df[,,s] = subj[[s]]$adj$df
# thresholded measures
tam[,,s] = subj[[s]]$thr$am
tbi[,,s] = subj[[s]]$thr$bi
tlpls[,,,s]= subj[[s]]$thr$lpls
}
group=list(am=am,lpl=lpl,df=df,tam=tam,tbi=tbi,tlpls=tlpls)
return(group)
}
#' Get thresholded adjacency network.
#'
#' @param adj list with network adjacency from getAdjacency().
#' @param models matrix 3D with full model estimates.
#' @param winner matrix 2D with winning models.
#'
#' @return thr list with thresholded network adjacency.
#' @export
getThreshAdj <- function(adj, models, winner) {
Nn = ncol(adj$am)
# determine bidirectional edges
bi = array(0, dim=c(Nn, Nn))
for (i in 1:Nn) {
for (j in 1:Nn) {
if (adj$am[i,j] == 1 & adj$am[j,i] == 1) {
bi[i,j] = 1
}
}
}
# Calculate models
B=bi*upper.tri(bi)
lpls=array(NA, dim=c(Nn, Nn, 2))
for (i in 1:Nn) {
for (j in 1:Nn) {
# if bidirectional, calculate 3 models
# A+B:i<>j, A:i>j, B:i<j
# A+B: LPLj + LPLi
# A : LPLj + LPLi-j
# B : LPLi + LPLj-i
if (B[i,j] == 1) {
# bidirectional LPL
lpls[i,j,1] = lpls[j,i,1] = adj$lpl[i,j] + adj$lpl[j,i]
# uni i->j
p = winner[,i][2:Nn]
p = p[p != j & p!= 0] # remove node j
lpls[i,j,2] = adj$lpl[i,j] + getModel(models[,,i], p)[Nn+1]
# uni j->i
p = winner[,j][2:Nn]
p = p[p != i & p!= 0] # remove node i
lpls[j,i,2] = adj$lpl[j,i] + getModel(models[,,j], p)[Nn+1]
}
}
}
# am matrix
am=adj$am
BF=20 # bayes factor threshold
for (i in 1:Nn) {
for (j in 1:Nn) {
if (B[i,j] == 1) {
if (lpls[i,j,1] - BF <= max(lpls[i,j,2], lpls[j,i,2]) ) {
if (lpls[i,j,2] > lpls[j,i,2]) {
am[i,j] = 1; am[j,i] = 0
} else {
am[i,j] = 0; am[j,i] = 1
}
}
}
}
}
thr=list()
thr$bi=bi # bidirectional edges
thr$lpls=lpls # lpls
thr$am=am # adjacency matrix (thresholded)
return(thr)
}
#' Performance of estimates, such as sensitivity, specificity, and more.
#'
#' @param x estimated binary network matrix.
#' @param xtrue, true binary network matrix.
#'
#' @return perf vector.
#' @export
perf <- function(x, xtrue) {
d = dim(x)
Nn=d[1]
if (length(d) == 3) {
N=d[3]
} else if (length(d) == 2) {
x=array(x, dim=c(Nn,Nn,1))
N=1
}
perf=array(NA,dim=c(N,8))
for (i in 1:N) {
# see https://en.wikipedia.org/wiki/Sensitivity_and_specificity
TP = sum(x[,,i] & xtrue)
FP = sum((x[,,i] - xtrue) == 1)
FN = sum((xtrue - x[,,i]) == 1)
TN = sum(!x[,,i] & !xtrue) - ncol(x[,,i])
tpr = TP/(TP+FN) # 1
spc = TN/(TN+FP) # 2
ppv = TP/(TP+FP) # 3
npv = TN/(TN+FN) # 4
fpr = FP/(FP+TN) # 5
fnr = FN/(TP+FN) # 6
fdr = FP/(TP+FP) # 7
acc = (TP+TN)/(TP+FP+FN+TN) # 8
perf[i,]=c(tpr,spc,ppv,npv,fpr,fnr,fdr,acc)
}
return(perf)
}
|
/R/mdm.R
|
no_license
|
AlirezaDanesh1350/mdm
|
R
| false
| false
| 20,905
|
r
|
#' Calculate the filtering distribution for a specified set of parents and a fixed delta.
#'
#' @param Yt the vector of observed time series, length T
#' @param Ft the matrix of covariates, dim = number of thetas (p) x number of time points (T), usually a column of 1s to represent an intercept and the time series of the parent nodes.
#' @param delta discount factor (scalar).
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return
#' mt the vector or matrix of the posterior mean (location parameter), dim = p x T.
#' Ct the posterior scale matrix, dim = p x p x T, Ct = CSt * S, where S is a point estimate for the observational variance phi^-1.
#' CSt the posterior scale matrix, dim = p x p x T, Ct = CSt * S, where S is a point estimate for the observational variance phi^-1.
#' Rt the prior scale matrix, dim = p x p x T. Rt = RSt * S_{t-1}, where S_{t-1} is a point estimate for the observational variance phi^-1 at the previous time point.
#' RSt the prior scale matrix, dim = p X p X T. Rt = RSt * S_{t-1}, where S_{t-1} is a point estimate for the observational variance phi^-1 at the previous time point.
#' nt and dt the vectors of the hyperparameters for the precision phi with length T.
#' S the vector of the point estimate for the observational variance phi^-1 with length T.
#' ft the vector of the one-step forecast location parameter with length T.
#' Qt the vector of the one-step forecast scale parameter with length T.
#' ets the vector of the standardised residuals with length T
#' lpl the vector of the Log Predictive Likelihood with length T.
#'
#' @export
dlm.filt.rh <- function(Yt, Ft, delta, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001){
CS0 = CS0*diag(nrow(Ft))
m0 = rep(m0,nrow(Ft))
Nt = length(Yt)+1 # the length of the time series + t0
p = nrow(Ft) # the number of parents and one for an intercept (i.e. the number of thetas)
Y = numeric(Nt)
Y[2:Nt] = Yt
F1 = array(0, dim=c(p,Nt))
F1[,2:Nt] = Ft
# Set up allocation matrices, including the priors
mt = array(0,dim=c(p,Nt))
mt[,1]=m0
Ct = array(0,dim=c(p,p,Nt))
Ct[,,1] = CS0
CSt = array(0,dim=c(p,p,Nt))
Rt = array(0,dim=c(p,p,Nt))
RSt = array(0,dim=c(p,p,Nt))
nt = numeric(Nt)
nt[1]=n0
dt = numeric(Nt)
dt[1]=d0
S = numeric(Nt)
S[1]=dt[1]/nt[1]
ft = numeric(Nt)
Qt = numeric(Nt)
ets = numeric(Nt)
lpl = numeric(Nt)
# Filtering
for (t in 2:Nt){
# Posterior at {t-1}: (theta_{t-1}|y_{t-1}) ~ T_{n_{t-1}}[m_{t-1}, C_{t-1} = C*_{t-1} x d_{t-1}/n_{t-1}]
# Prior at {t}: (theta_{t}|y_{t-1}) ~ T_{n_{t-1}}[m_{t-1}, R_{t}]
# RSt ~ C*_{t-1}/delta
RSt[,,t] = Ct[,,(t-1)] / (S[(t-1)]*delta)
Rt[,,t] = RSt[,,t] * (S[(t-1)])
# One-step forecast: (Y_{t}|y_{t-1}) ~ T_{n_{t-1}}[f_{t}, Q_{t}]
ft[t] = t(F1[,t]) %*% mt[,(t-1)]
QSt = as.vector(1 + t(F1[,t]) %*% RSt[,,t] %*% F1[,t])
Qt[t] = QSt * S[(t-1)]
et = Y[t] - ft[t]
ets[t] = et / sqrt(Qt[t])
# Posterior at t: (theta_{t}|y_{t}) ~ T_{n_{t}}[m_{t}, C_{t}]
At = (RSt[,,t] %*% F1[,t])/QSt
mt[,t] = mt[,(t-1)] + (At*et)
nt[t] = nt[(t-1)] + 1
dt[t] = dt[(t-1)] + (et^2)/QSt
S[t]=dt[t]/nt[t]
CSt[,,t] = RSt[,,t] - (At %*% t(At))*QSt
Ct[,,t] = S[t]*CSt[,,t]
# Log Predictive Likelihood
lpl[t] = lgamma((nt[(t-1)]+1)/2)-lgamma(nt[(t-1)]/2)-0.5*log(pi*nt[(t-1)]*Qt[t])-((nt[(t-1)]+1)/2)*log(1+(1/nt[(t-1)])*et^2/Qt[t])
}
mt = mt[,2:Nt]; Ct = Ct[,,2:Nt]; CSt = CSt[,,2:Nt]; Rt = Rt[,,2:Nt]; RSt = RSt[,,2:Nt]
nt = nt[2:Nt]; dt = dt[2:Nt]; S = S[2:Nt]; ft = ft[2:Nt]; Qt = Qt[2:Nt]; ets = ets[2:Nt]; lpl = lpl[2:Nt]
filt.output <- list(mt=mt,Ct=Ct,CSt=CSt,Rt=Rt,RSt=RSt,nt=nt,dt=dt,S=S,ft=ft,Qt=Qt,ets=ets,lpl=lpl)
return(filt.output)
}
#' A function to generate all the possible models.
#'
#' @param Nn number of nodes; the number of columns of the dataset can be used.
#' @param node the node of interest (i.e., the node to find parents for).
#'
#' @return
#' output.model = a matrix with dimensions (nn-1) x number of models, where number of models = 2^(nn-1).
#'
#' @export
model.generator<-function(Nn,node){
# Create the model 'no parents' (the first column of the matrix is all zeros)
empt=rep(0,(Nn-1))
for (k in 1:(Nn-1)) {
# Calculate all combinations when number of parents = k
#m=combn(c(1:Nn)[-node],k)
if (Nn==2 & node==1) {
model = matrix(c(0,2),1,2)
} else {
m=combn(c(1:Nn)[-node],k)
# Expand the array so that unconnected edges are represented by zeros
empt.new=array(0,dim=c((Nn-1),ncol(m)))
empt.new[1:k,]=m
# Bind the matrices together; the next set of models are added to this matrix
model=cbind(empt,empt.new)
empt=model
}
}
colnames(model)=NULL
output.model<-model
return(output.model)
}
#' A function for an exhaustive search, calculates the optimum value of the discount factor.
#'
#' @param Data Dataset with dimension number of time points Nt x Number of nodes Nn.
#' @param node node of interest.
#' @param nbf Log Predictive Likelihood will be calculated from (and including) this time point.
#' @param delta a vector of potential values for the discount factor.
#' @param cpp boolean true (default): fast C++ implementation, false: native R code.
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return
#' model.store a matrix with the model, LPL and chosen discount factor for all possible models.
#' runtime an estimate of the run time of the function, using proc.time().
#' @export
exhaustive.search <- function(Data, node, nbf=15, delta=seq(0.5,1,0.01), cpp=TRUE, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001) {
ptm=proc.time()
Nn=ncol(Data) # the number of nodes
Nm=2^(Nn-1) # the number of models per node
M=model.generator(Nn,node) # Generate all the possible models
models=rbind(1:Nm,M) # Label each model with a 'model number'
Yt=Data[,node] # the time series of the node we wish to find parents for
Nt=length(Yt) # the number of time points
nd=length(delta) # the number of deltas
# Create empty arrays for the lpl scores and the optimum deltas
lpldet=array(NA,c(Nm,length(delta)))
lplmax=rep(NA,Nm)
DF.hat=rep(NA,Nm)
# Now create Ft.
for (z in 1:Nm) {
pars=models[(2:Nn),z]
pars=pars[pars!=0]
Ft=array(1,dim=c(Nt,length(pars)+1))
if (ncol(Ft)>1) {
Ft[,2:ncol(Ft)]=Data[,pars] # selects parents
}
# Calculate the log predictive likelihood, for each value of delta
for (j in 1:nd) {
if (cpp) {
# new C++ implementation
lpl=c(dlmFiltCpp(Yt, t(Ft), delta[j], m0, CS0, n0, d0))
lpldet[z,j]=sum(lpl[nbf:Nt])
} else {
# original native R
a=dlm.filt.rh(Yt, t(Ft), delta=delta[j], m0=m0, CS0=CS0, n0=n0, d0=d0)
lpldet[z,j]=sum(a$lpl[nbf:Nt])
}
}
lplmax[z]=max(lpldet[z,],na.rm=TRUE)
DF.hat[z]=delta[lpldet[z,]==max(lpldet[z,],na.rm=TRUE)] # add which here for index
}
# Output model.store
model.store=rbind(models,lplmax,DF.hat)
rownames(model.store)=NULL
runtime=(proc.time()-ptm)
output<-list(model.store=model.store,runtime=runtime)
return(output)
}
#' Mean centers timeseries in a 2D array timeseries x nodes,
#' i.e. each timeseries of each node has mean of zero.
#'
#' @param X 2D array with dimensions timeseries x nodes.
#'
#' @return M 2D array.
#' @export
center <- function(X) {
d = dim(X)
M = matrix(NA, d[1], d[2])
for (i in 1:d[2]) {
M[,i]=scale(X[,i], center = T, scale = F)
}
return(M)
}
#' Estimate subject's full network: runs exhaustive search on very node.
#'
#' @param X array with dimensions timeseries x nodes.
#' @param id subject ID. If set, results are saved to a txt file.
#' @param nbf Log Predictive Likelihood will be calculated from (and including) this time point.
#' @param delta a vector of potential values for the discount factor.
#' @param cpp boolean true (default): fast C++ implementation, false: native R code.
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return store list with results.
#' @export
subject <- function(X, id=NULL, nbf=15, delta=seq(0.5,1,0.01), cpp=TRUE, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001) {
N=ncol(X) # nodes
M=2^(N-1) # rows/models
models = array(rep(0,(N+2)*M*N),dim=c(N+2,M,N))
for (n in 1:N) {
tmp=exhaustive.search(X, n, nbf=nbf, delta=delta, cpp=cpp, m0=m0, CS0=CS0, n0=n0, d0=d0)
models[,,n]=tmp$model.store
if (!is.null(id)) {
write(t(models[,,n]), file=sprintf("%s_node_%03d.txt", id, n), ncolumns = M)
}
}
store=list()
store$models=models
store$winner=getWinner(models,N)
store$adj=getAdjacency(store$winner,N)
store$thr=getThreshAdj(store$adj, store$models, store$winner)
return(store)
}
#' Runs exhaustive search on a single node and saves results in txt file.
#'
#' @param X array with dimensions timeseries x nodes.
#' @param n node number.
#' @param id subject ID. If set, results are saved to a txt file.
#' @param nbf Log Predictive Likelihood will be calculated from (and including) this time point.
#' @param delta a vector of potential values for the discount factor.#'
#' @param cpp boolean true (default): fast C++ implementation, false: native R code.
#' @param m0 the vector of the prior mean at time t=0, length p. The default is a non-informative prior with zero mean. (theta0 | y0, phi) ~ N(m0,CS0*phi^-1).
#' @param CS0 the prior scale matrix at time t=0, dim = p x p. The default is a non-informative prior, 3 * identity matrix.
#' @param n0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001. n0 has to be higher than 0.
#' @param d0 prior hypermarameter of precision phi ~ G(n0/2; d0/2). The default is a non-informative prior, with a value of 0.001.
#'
#' @return store list with results.
#' @export
node <- function(X, n, id=NULL, nbf=15, delta=seq(0.5,1,0.01), cpp=TRUE, m0 = 0, CS0 = 3, n0 = 0.001, d0 = 0.001) {
N=ncol(X) # nodes
M=2^(N-1) # rows/models
store=exhaustive.search(X, n, nbf=nbf, delta=delta, cpp=cpp, m0=m0, CS0=CS0, n0=n0, d0=d0)
if (!is.null(id)) {
write(t(store$model.store), file=sprintf("%s_node_%03d.txt", id, n), ncolumns = M)
}
return(store)
}
#' Reads single subject's network from txt files.
#'
#' @param id identifier to select all subjects' nodes, e.g. pattern containing subject ID and session number.
#' @param nodes number of nodes.
#'
#' @return store list with results.
#' @export
read.subject <- function(id, nodes) {
models = array(0,dim=c(nodes+2,2^(nodes-1),nodes))
for (n in 1:nodes) {
#file=sprintf("%s_node_%03d.txt", id, n)
file=list.files(pattern=glob2rx(sprintf("%s*_node_%03d.txt", id, n)))
models[,,n] = as.matrix(read.table(file))
}
store=list()
store$models=models
store$winner=getWinner(models,nodes)
store$adj=getAdjacency(store$winner,nodes)
store$thr=getThreshAdj(store$adj, store$models, store$winner)
return(store)
}
#' Get winner network by maximazing log predictive likelihood (LPL)
#' from a set of models.
#'
#' @param models 2D matrix, or 3D models x node.
#' @param nodes number of nodes.
#'
#' @return winner array with highest scored model(s).
#' @export
getWinner <- function(models, nodes) {
dims=length(dim(models))
if (dims==2) {
winner = models[,which.max(models[nodes+1,])]
} else if (dims==3) {
winner = array(0, dim=c(nodes+2,nodes))
for (n in 1:nodes) {
winner[,n]=models[,which.max(models[nodes+1,,n]),n]
}
}
return(winner)
}
#' Get adjacency and associated likelihoods (LPL) and disount factros (df) of winning models.
#'
#' @param winner, 2D matrix.
#' @param nodes number of nodes.
#'
#' @return adj, 2D adjacency matrix.
#' @export
getAdjacency <- function(winner, nodes) {
am = array(rep(0,nodes*nodes),dim=c(nodes,nodes))
lpl = df = array(rep(NA,nodes*nodes),dim=c(nodes,nodes))
for (n in 1:nodes) {
p = winner[2:nodes,n] # parents
p = p[p>0]
am[p,n] = 1
lpl[p,n] = winner[nodes+1,n]
df[p,n] = winner[nodes+2,n]
}
return(list(am=am, lpl=lpl, df=df))
}
#' Plots network as graph.
#'
#' @param adj 2D adjacency matrix.
#'
#' @export
plotNet <- function(adj) {
plot.igraph(graph.adjacency(adj, mode="directed", weighted=T, diag=F))
}
#' Plots network as adjacency matrix.
#'
#' @param adj 2D adjacency matrix.
#' @param col color palette.
#' @param lab labels as character array.
#' @param lim vector with two min and max values for color scaling.
#' @param diag true or false, if true showing values on the diagnoal line.
#'
#' @export
plotMat <- function(adj, col=heat.colors(12), lab=NULL, lim = c(0,1), diag=FALSE) {
if (!diag) {
adj[row(adj) == col(adj)]=NA
}
n=nrow(adj)
adj_ = t(apply(adj, 2, rev))
par(mai=c(1,1,0.5,1.1))
image(adj_, col=col, axes=F, zlim=lim)
mtext(text=rev(lab), side=2, line=0.3, at=seq(0,1,1/(n-1)), las=1, cex=0.8)
mtext(text=lab, side=1, line=0.3, at=seq(0,1,1/(n-1)), las=2, cex=0.8)
image.plot(adj_, legend.only=T, col=col, zlim=lim)
grid(n, n, lwd = 1)
}
#' Performes a binomial test with FDR correction for network edges in an adjacency matrix.
#'
#' @param adj adjacency matrix, nodes x nodes x subj, ornodes x nodes x runs x subj.
#' @param alter type of binomial test, "two.sided" (default), "less", or "greater"
#'
#' @return store list with results.
#' @export
binom.nettest <- function(adj, alter="two.sided") {
mydim=dim(adj)
M = sum(adj) # total edges over all N subjects, all R(R-1) edges
if (length(mydim) == 3) { # without runs
N=mydim[3] # No. of subjects
N_Comp=mydim[1]
adj_ = apply(adj, c(1,2), sum)
} else if (length(mydim) == 4) { # with runs
N=mydim[4] # No. of subjects
N_runs=mydim[3]
N_Comp=mydim[1]
N=N*N_runs # adjust N by for no. of runs
adj_ = apply(adj, c(1,2), sum) # sum acrosss subjects and runs
}
# binom test for every edge occurance
p0 = M/N/N_Comp/(N_Comp-1) # H0 edge probability
p = array(NA, dim=c(N_Comp,N_Comp))
for (i in 1:N_Comp) {
for (j in 1:N_Comp) {
tmp=binom.test(adj_[i,j],N,p=p0, alternative=alter)
p[i,j]=tmp$p.value
}
}
# FDR
p_fdr=matrix(p.adjust(p, method = "fdr"),N_Comp,N_Comp)
adj_fdr=adj_
adj_fdr[p_fdr>=0.05]=NA
store=list()
store$p0=p0
store$p=p
store$p_fdr=p_fdr
store$adj=adj_/N
store$adj_fdr=adj_fdr/N # significant proportions
return(store)
}
#' Reshapes a 2D concatenated time series into 3D according to no. of subjects and volumes.
#'
#' @param ts a 2D time series volumes x nodes.
#' @param N No. of subjects.
#' @param V No. of volumes.
#'
#' @return M 3D matrix, time series x nodes x subjects.
#' @export
reshapeTs <- function(ts, N, V) {
NC = ncol(ts)
M = array(NA, dim=c(V,NC,N))
for (i in 1:N) {
idx = ((i-1)*V+1):(V*i)
M[,,i] = ts[idx,]
}
return(M)
}
#' Correlation of time series.
#'
#' @param ts a 3D time series time series x nodes x subjects.
#'
#' @return M correlation matrix.
#' @export
corTs <- function(ts) {
d=dim(ts)
N=d[3] # No. subjects
N_nodes=d[2]
R=array(NA, dim=c(N_nodes,N_nodes,N))
for (s in 1:N) {
R[,,s]=cor(ts[,,s])
}
M = apply(R, c(1,2), mean)
return(M)
}
#' Get specific parent model from all models.
#'
#' @param models a 2D model matrix.
#' @param parents a vector with parent nodes.
#'
#' @return mod specific parent model.
#' @export
getModel <- function(models, parents) {
Nn = nrow(models) - 2 # No. of nodes
Nm = ncol(models) # No. of models
parents = c(parents, rep(0, (Nn - 1) - length(parents))) # add fill zeros
for (m in 1:Nm) {
if (sum(models[2:Nn,m] == parents) == Nn - 1) {
mod = models[,m]
}
}
return(mod)
}
#' A group is a list containing restructured data from subejcts for easier group analysis.
#'
#' @param subj a list of subjects.
#'
#' @return group a list.
#' @export
group <- function(subj) {
Nn=ncol(subj[[1]]$adj$am)
N=length(subj)
am = lpl = df = tam = tbi = array(rep(NA,N*Nn*Nn),dim=c(Nn,Nn,N))
tlpls = array(rep(NA,N*Nn*Nn*2),dim=c(Nn,Nn,2,N))
for (s in 1:N) {
am[,,s] = subj[[s]]$adj$am
lpl[,,s] = subj[[s]]$adj$lpl
df[,,s] = subj[[s]]$adj$df
# thresholded measures
tam[,,s] = subj[[s]]$thr$am
tbi[,,s] = subj[[s]]$thr$bi
tlpls[,,,s]= subj[[s]]$thr$lpls
}
group=list(am=am,lpl=lpl,df=df,tam=tam,tbi=tbi,tlpls=tlpls)
return(group)
}
#' Get thresholded adjacency network.
#'
#' @param adj list with network adjacency from getAdjacency().
#' @param models matrix 3D with full model estimates.
#' @param winner matrix 2D with winning models.
#'
#' @return thr list with thresholded network adjacency.
#' @export
getThreshAdj <- function(adj, models, winner) {
Nn = ncol(adj$am)
# determine bidirectional edges
bi = array(0, dim=c(Nn, Nn))
for (i in 1:Nn) {
for (j in 1:Nn) {
if (adj$am[i,j] == 1 & adj$am[j,i] == 1) {
bi[i,j] = 1
}
}
}
# Calculate models
B=bi*upper.tri(bi)
lpls=array(NA, dim=c(Nn, Nn, 2))
for (i in 1:Nn) {
for (j in 1:Nn) {
# if bidirectional, calculate 3 models
# A+B:i<>j, A:i>j, B:i<j
# A+B: LPLj + LPLi
# A : LPLj + LPLi-j
# B : LPLi + LPLj-i
if (B[i,j] == 1) {
# bidirectional LPL
lpls[i,j,1] = lpls[j,i,1] = adj$lpl[i,j] + adj$lpl[j,i]
# uni i->j
p = winner[,i][2:Nn]
p = p[p != j & p!= 0] # remove node j
lpls[i,j,2] = adj$lpl[i,j] + getModel(models[,,i], p)[Nn+1]
# uni j->i
p = winner[,j][2:Nn]
p = p[p != i & p!= 0] # remove node i
lpls[j,i,2] = adj$lpl[j,i] + getModel(models[,,j], p)[Nn+1]
}
}
}
# am matrix
am=adj$am
BF=20 # bayes factor threshold
for (i in 1:Nn) {
for (j in 1:Nn) {
if (B[i,j] == 1) {
if (lpls[i,j,1] - BF <= max(lpls[i,j,2], lpls[j,i,2]) ) {
if (lpls[i,j,2] > lpls[j,i,2]) {
am[i,j] = 1; am[j,i] = 0
} else {
am[i,j] = 0; am[j,i] = 1
}
}
}
}
}
thr=list()
thr$bi=bi # bidirectional edges
thr$lpls=lpls # lpls
thr$am=am # adjacency matrix (thresholded)
return(thr)
}
#' Performance of estimates, such as sensitivity, specificity, and more.
#'
#' @param x estimated binary network matrix.
#' @param xtrue, true binary network matrix.
#'
#' @return perf vector.
#' @export
perf <- function(x, xtrue) {
d = dim(x)
Nn=d[1]
if (length(d) == 3) {
N=d[3]
} else if (length(d) == 2) {
x=array(x, dim=c(Nn,Nn,1))
N=1
}
perf=array(NA,dim=c(N,8))
for (i in 1:N) {
# see https://en.wikipedia.org/wiki/Sensitivity_and_specificity
TP = sum(x[,,i] & xtrue)
FP = sum((x[,,i] - xtrue) == 1)
FN = sum((xtrue - x[,,i]) == 1)
TN = sum(!x[,,i] & !xtrue) - ncol(x[,,i])
tpr = TP/(TP+FN) # 1
spc = TN/(TN+FP) # 2
ppv = TP/(TP+FP) # 3
npv = TN/(TN+FN) # 4
fpr = FP/(FP+TN) # 5
fnr = FN/(TP+FN) # 6
fdr = FP/(TP+FP) # 7
acc = (TP+TN)/(TP+FP+FN+TN) # 8
perf[i,]=c(tpr,spc,ppv,npv,fpr,fnr,fdr,acc)
}
return(perf)
}
|
#' Download ncov 2019 data from
#' https://github.com/BlankerL/DXY-COVID-19-Data/ (csv) or
#' https://github.com/yiluheihei/nCoV-2019-Data (RDS)
#'
#' @param latest logical, download the latest or all time-series ncov data,
#' default \code{TRUE}
#' @param method character
#'
#' @export
get_ncov2 <- function(latest = TRUE, method = c("ncov", "api")) {
method <- match.arg(method)
from <- ifelse(
method == "ncov",
"https://github.com/yiluheihei/nCoV-2019-Data",
"https://github.com/BlankerL/DXY-COVID-19-Data"
)
if (method == "ncov") {
file <- ifelse(latest, "ncov_latest.RDS", "ncov.RDS")
ncov <- readRDS(gzcon(url(file.path(from, "raw", "master", file))))
} else {
if (latest) {
ncov <- jsonlite::fromJSON(
file.path(from, "raw", "master", "json", "DXYArea.json")
)
ncov <- ncov$results
# unnest cities
ncov <- purrr::map_df(
1:nrow(ncov),
~ unnest_province_ncov(ncov[.x, ])
)
} else {
file <- "DXYArea.csv"
ncov <- readr::read_csv(file.path(from, "raw", "master", "csv", file))
}
}
ncov <- structure(
ncov,
class = c("ncov", "data.frame"),
type = "All",
from = from
)
ncov
}
# unnest the cities data, keep inconsistent with csv data in
# https://github.com/BlankerL/DXY-COVID-19-Data
unnest_province_ncov <- function(x) {
counts_vars <- c(
"confirmedCount", "suspectedCount",
"curedCount", "deadCount"
)
province_dat <- select(
x,
continentName:countryEnglishName,
provinceName, provinceEnglishName,
province_zipCode = locationId,
one_of(counts_vars)
)
# rename province count
province_dat <- rename_with(
province_dat,
~ paste0("province_", .x),
one_of(counts_vars)
)
# no cities data, such as taiwan, foregin countries
if (length(x$cities[[1]]) == 0) {
city_vars <- c(
"cityName", "cityEnglishName",
paste0("city_", counts_vars)
)
province_dat[city_vars] <- NA
province_dat$city_zipCode <- NA
province_dat$updateTime <- conv_time(x$updateTime)
return(province_dat)
} else {
c_ncov <- x$cities[[1]]
city_dat <-select(
c_ncov,
cityName, cityEnglishName,
one_of(counts_vars)
) %>%
rename_with(~ paste0("city_", .x), one_of(counts_vars))
city_dat$city_zipCode <- c_ncov$locationId
city_dat$updateTime <- conv_time(x$updateTime)
unnest_cities_dat <- bind_cols(province_dat, city_dat)
}
unnest_cities_dat
}
#' Show ncov
#' @param x a ncov object
#' @param ... extra arguments
#' @export
print.ncov <- function(x, ...) {
type <- attr(x, "type")
cat(type, "COVID 2019 Data\n")
update_time <- as.character(x$updateTime[1])
cat("Updated at", update_time, "\n")
cat("From", attr(x, "from"), "\n")
}
#' Subset ncov data
#'
#' Subset world, china, province, and other countries ncov data
#'
#' @param x ncov data, return from `get_ncov()`
#' @param i word
#' @param j not used now
#' @param latest logical, download the latest or all time-series ncov
#' data,
#'
#' @export
`[.ncov` <- function(x, i, j, latest = TRUE) {
if (length(i) == 1) {
if (i %in% c("world", "World")) {
res <- subset_world_ncov(x, latest = latest)
type <- "World"
} else if (i %in% c("China", "china")) {
res <- subset_china_ncov(x, latest)
type <- "China"
} else {
if (!i %in% c(ncov$provinceName, ncov$provinceEnglishName)) {
stop("not contain ncov data of ", i, " province")
}
res <- subset_province_ncov(x, i, latest)
type <- res$provinceEnglishName[1]
}
} else {
ind <- which(!i %in% c(ncov$provinceName, ncov$provinceEnglishName))
if (length(ind)) {
stop("not contain ncov data of ", i[ind], " province")
}
res <- purrr::map_df(
i,
~ subset_province_ncov(ncov, .x, latest)
)
type <- paste(unique(res$provinceEnglishName), collapse = ", ")
}
res <- res[, j, drop = FALSE]
structure(
res,
class = c("ncov", "data.frame"),
type = type,
from = attr(x, "from")
)
}
#' Subset china ncov
#' @noRd
subset_china_ncov <- function(ncov, latest = TRUE) {
ncov <- data.frame(ncov)
china_ncov <- filter(
ncov,
countryEnglishName == "China"
)
if (latest) {
china_ncov <- group_by(china_ncov, provinceName) %>%
group_modify(~ head(.x, 1L)) %>%
ungroup()
}
china_ncov <- arrange(china_ncov, desc(updateTime))
china_ncov
}
#' Subset province ncov, as well as foreign country
#' @noRd
subset_province_ncov <- function(ncov, i, latest = TRUE) {
ncov <- data.frame(ncov)
province_ncov <- filter(
ncov,
provinceName == i | provinceEnglishName == i
)
if (latest) {
province_ncov <- filter(
province_ncov,
!is.na(cityName),
updateTime == max(province_ncov$updateTime)
)
}
province_ncov
}
#' Subset world ncov
#' @noRd
subset_world_ncov <- function(ncov, latest = TRUE) {
# ncov in other countries except china
ncov <- data.frame(ncov)
other_ncov <- filter(
ncov,
countryEnglishName != "China"
)
# countries <- system.file("countries_list.csv", package = "ncovr") %>%
# readr::read_csv()
# china_zh <- countries$countryName[countries$countryEnglishName == "China"]
china_ncov <- subset_china_ncov(ncov, latest)
# china_ncov <- filter(china_ncov, provinceEnglishName == "China")
# china_ncov$countryName <- china_zh
# china_ncov$countryEnglishName <- "China"
world_ncov <- bind_rows(china_ncov, other_ncov)
if (latest) {
world_ncov <- group_by(world_ncov, countryEnglishName) %>%
group_modify(~ head(.x, 1L)) %>%
ungroup()
# correct china ncov
world_ncov <- world_ncov[-(world_ncov$countryEnglishName == "China"), ]
world_ncov <- bind_rows(
world_ncov,
filter(china_ncov, provinceEnglishName == "China")
)
}
world_ncov
}
#' Download ncov 2019 area and overall data from 163
#'
#' @param country foeign country name
#' @export
get_foreign_ncov <- function(country) {
wy_ncov <- jsonlite::fromJSON("https://c.m.163.com/ug/api/wuhan/app/data/list-total")
wy_ncov <- wy_ncov$data$areaTree
foreign_ncov <- wy_ncov[wy_ncov$name == country, ]
if ("children" %in% names(foreign_ncov)) {
child <- foreign_ncov$children[[1]]
child <- data.frame(
confirmedCount = child$total$confirm,
suspectedCount = child$total$suspect,
curedCount = child$total$heal,
deadCount = child$total$dead,
provinceName = child$name,
updateTime = child$lastUpdateTime,
stringsAsFactors = FALSE
)
} else {
stop("No province/city ncov data of", country)
}
child
}
|
/R/ncov-data.R
|
permissive
|
yiluheihei/ncovmap
|
R
| false
| false
| 6,689
|
r
|
#' Download ncov 2019 data from
#' https://github.com/BlankerL/DXY-COVID-19-Data/ (csv) or
#' https://github.com/yiluheihei/nCoV-2019-Data (RDS)
#'
#' @param latest logical, download the latest or all time-series ncov data,
#' default \code{TRUE}
#' @param method character
#'
#' @export
get_ncov2 <- function(latest = TRUE, method = c("ncov", "api")) {
method <- match.arg(method)
from <- ifelse(
method == "ncov",
"https://github.com/yiluheihei/nCoV-2019-Data",
"https://github.com/BlankerL/DXY-COVID-19-Data"
)
if (method == "ncov") {
file <- ifelse(latest, "ncov_latest.RDS", "ncov.RDS")
ncov <- readRDS(gzcon(url(file.path(from, "raw", "master", file))))
} else {
if (latest) {
ncov <- jsonlite::fromJSON(
file.path(from, "raw", "master", "json", "DXYArea.json")
)
ncov <- ncov$results
# unnest cities
ncov <- purrr::map_df(
1:nrow(ncov),
~ unnest_province_ncov(ncov[.x, ])
)
} else {
file <- "DXYArea.csv"
ncov <- readr::read_csv(file.path(from, "raw", "master", "csv", file))
}
}
ncov <- structure(
ncov,
class = c("ncov", "data.frame"),
type = "All",
from = from
)
ncov
}
# unnest the cities data, keep inconsistent with csv data in
# https://github.com/BlankerL/DXY-COVID-19-Data
unnest_province_ncov <- function(x) {
counts_vars <- c(
"confirmedCount", "suspectedCount",
"curedCount", "deadCount"
)
province_dat <- select(
x,
continentName:countryEnglishName,
provinceName, provinceEnglishName,
province_zipCode = locationId,
one_of(counts_vars)
)
# rename province count
province_dat <- rename_with(
province_dat,
~ paste0("province_", .x),
one_of(counts_vars)
)
# no cities data, such as taiwan, foregin countries
if (length(x$cities[[1]]) == 0) {
city_vars <- c(
"cityName", "cityEnglishName",
paste0("city_", counts_vars)
)
province_dat[city_vars] <- NA
province_dat$city_zipCode <- NA
province_dat$updateTime <- conv_time(x$updateTime)
return(province_dat)
} else {
c_ncov <- x$cities[[1]]
city_dat <-select(
c_ncov,
cityName, cityEnglishName,
one_of(counts_vars)
) %>%
rename_with(~ paste0("city_", .x), one_of(counts_vars))
city_dat$city_zipCode <- c_ncov$locationId
city_dat$updateTime <- conv_time(x$updateTime)
unnest_cities_dat <- bind_cols(province_dat, city_dat)
}
unnest_cities_dat
}
#' Show ncov
#' @param x a ncov object
#' @param ... extra arguments
#' @export
print.ncov <- function(x, ...) {
type <- attr(x, "type")
cat(type, "COVID 2019 Data\n")
update_time <- as.character(x$updateTime[1])
cat("Updated at", update_time, "\n")
cat("From", attr(x, "from"), "\n")
}
#' Subset ncov data
#'
#' Subset world, china, province, and other countries ncov data
#'
#' @param x ncov data, return from `get_ncov()`
#' @param i word
#' @param j not used now
#' @param latest logical, download the latest or all time-series ncov
#' data,
#'
#' @export
`[.ncov` <- function(x, i, j, latest = TRUE) {
if (length(i) == 1) {
if (i %in% c("world", "World")) {
res <- subset_world_ncov(x, latest = latest)
type <- "World"
} else if (i %in% c("China", "china")) {
res <- subset_china_ncov(x, latest)
type <- "China"
} else {
if (!i %in% c(ncov$provinceName, ncov$provinceEnglishName)) {
stop("not contain ncov data of ", i, " province")
}
res <- subset_province_ncov(x, i, latest)
type <- res$provinceEnglishName[1]
}
} else {
ind <- which(!i %in% c(ncov$provinceName, ncov$provinceEnglishName))
if (length(ind)) {
stop("not contain ncov data of ", i[ind], " province")
}
res <- purrr::map_df(
i,
~ subset_province_ncov(ncov, .x, latest)
)
type <- paste(unique(res$provinceEnglishName), collapse = ", ")
}
res <- res[, j, drop = FALSE]
structure(
res,
class = c("ncov", "data.frame"),
type = type,
from = attr(x, "from")
)
}
#' Subset china ncov
#' @noRd
subset_china_ncov <- function(ncov, latest = TRUE) {
ncov <- data.frame(ncov)
china_ncov <- filter(
ncov,
countryEnglishName == "China"
)
if (latest) {
china_ncov <- group_by(china_ncov, provinceName) %>%
group_modify(~ head(.x, 1L)) %>%
ungroup()
}
china_ncov <- arrange(china_ncov, desc(updateTime))
china_ncov
}
#' Subset province ncov, as well as foreign country
#' @noRd
subset_province_ncov <- function(ncov, i, latest = TRUE) {
ncov <- data.frame(ncov)
province_ncov <- filter(
ncov,
provinceName == i | provinceEnglishName == i
)
if (latest) {
province_ncov <- filter(
province_ncov,
!is.na(cityName),
updateTime == max(province_ncov$updateTime)
)
}
province_ncov
}
#' Subset world ncov
#' @noRd
subset_world_ncov <- function(ncov, latest = TRUE) {
# ncov in other countries except china
ncov <- data.frame(ncov)
other_ncov <- filter(
ncov,
countryEnglishName != "China"
)
# countries <- system.file("countries_list.csv", package = "ncovr") %>%
# readr::read_csv()
# china_zh <- countries$countryName[countries$countryEnglishName == "China"]
china_ncov <- subset_china_ncov(ncov, latest)
# china_ncov <- filter(china_ncov, provinceEnglishName == "China")
# china_ncov$countryName <- china_zh
# china_ncov$countryEnglishName <- "China"
world_ncov <- bind_rows(china_ncov, other_ncov)
if (latest) {
world_ncov <- group_by(world_ncov, countryEnglishName) %>%
group_modify(~ head(.x, 1L)) %>%
ungroup()
# correct china ncov
world_ncov <- world_ncov[-(world_ncov$countryEnglishName == "China"), ]
world_ncov <- bind_rows(
world_ncov,
filter(china_ncov, provinceEnglishName == "China")
)
}
world_ncov
}
#' Download ncov 2019 area and overall data from 163
#'
#' @param country foeign country name
#' @export
get_foreign_ncov <- function(country) {
wy_ncov <- jsonlite::fromJSON("https://c.m.163.com/ug/api/wuhan/app/data/list-total")
wy_ncov <- wy_ncov$data$areaTree
foreign_ncov <- wy_ncov[wy_ncov$name == country, ]
if ("children" %in% names(foreign_ncov)) {
child <- foreign_ncov$children[[1]]
child <- data.frame(
confirmedCount = child$total$confirm,
suspectedCount = child$total$suspect,
curedCount = child$total$heal,
deadCount = child$total$dead,
provinceName = child$name,
updateTime = child$lastUpdateTime,
stringsAsFactors = FALSE
)
} else {
stop("No province/city ncov data of", country)
}
child
}
|
#' Calculates the weighted correlation given a data set and a set of weights.
#'
#' This is a copy of corr function from the boot package. It calculates the correlation coefficient in weighted form.
#' @param d a matrix with two columns corresponding to the two variables whose correlation we wish to calculate.
#' @param w a vector of weights to be applied to each pair of observations. The default is equal weights for each pair. Normalization takes place within the function so sum(w) need not equal 1.
#' @return the correlation coefficient between d[,1] and d[,2].
#' @keywords correlation
corr <- function (d, w = rep(1, nrow(d))/nrow(d)) {
s <- sum(w)
m1 <- sum(d[, 1L] * w)/s
m2 <- sum(d[, 2L] * w)/s
(sum(d[, 1L] * d[, 2L] * w)/s - m1 * m2)/sqrt((sum(d[, 1L]^2 * w)/s - m1^2) * (sum(d[, 2L]^2 * w)/s - m2^2))
}
|
/R/corr.R
|
no_license
|
cran/PResiduals
|
R
| false
| false
| 834
|
r
|
#' Calculates the weighted correlation given a data set and a set of weights.
#'
#' This is a copy of corr function from the boot package. It calculates the correlation coefficient in weighted form.
#' @param d a matrix with two columns corresponding to the two variables whose correlation we wish to calculate.
#' @param w a vector of weights to be applied to each pair of observations. The default is equal weights for each pair. Normalization takes place within the function so sum(w) need not equal 1.
#' @return the correlation coefficient between d[,1] and d[,2].
#' @keywords correlation
corr <- function (d, w = rep(1, nrow(d))/nrow(d)) {
s <- sum(w)
m1 <- sum(d[, 1L] * w)/s
m2 <- sum(d[, 2L] * w)/s
(sum(d[, 1L] * d[, 2L] * w)/s - m1 * m2)/sqrt((sum(d[, 1L]^2 * w)/s - m1^2) * (sum(d[, 2L]^2 * w)/s - m2^2))
}
|
Classifier <- setRefClass("Classifier", fields=list(
x = "data.frame",
y = "factor",
x.test = "data.frame",
y.test = "factor",
predicted.class = "factor",
predicted.value = "numeric",
optimalCutoff = "numeric",
sens = "numeric",#based on youden index
spec = "numeric",#based on youden index
rr = "numeric",
or = "numeric",
rr.l = "numeric",
rr.u = "numeric",
or.l = "numeric",
or.u = "numeric",
auc = "numeric",
auc.l = "numeric",
auc.u = "numeric",
contingency = "table",
model = "ANY",
name = "character",
roc = "list" # list of class "roc" from pROC package
),
methods = list(
initialize = function(...) {
createClassifier(.self,...)
},
### AUC, CI(AUC), p-value of difference between AUC
### sensitivity, specificity, relative risk, odds ratio (ci of all)
###
setName = function(name) {
.self$name <- name
},
buildClassifier = function() {
### this is the method which all the inherited classes
### should implement
callNextMethod()
},
predict = function() {
callNextMethod()
},
getROC = function() {
.self$roc[[1]]
},
computeROC = function() {
.self$roc[[1]] <- roc(y, predicted.value)
},
plotROC = function(...) {
plot(getROC(), print.auc=T, print.thres=optimalCutoff, ...)
},
computeAUC = function() {
CI <- ci(.self$getROC())
.self$auc.l <- CI[[1]]
.self$auc <- CI[[2]]
.self$auc.u <- CI[[3]]
},
computeoptimalCutoff = function() {
.self$optimalCutoff <- .self$getROC()$thresholds[which.max((.self$getROC()$sensitivities * .self$getROC()$specificities)/(.self$getROC()$sensitivities + .self$getROC()$specificities))]
},
computeSensitivity = function() {
## Sensitivity at the Youden's Index
.self$sens <- .self$getROC()$sensitivities[which.max((.self$getROC()$sensitivities * .self$getROC()$specificities)/(.self$getROC()$sensitivities + .self$getROC()$specificities))]
},
computeSpecificity = function() {
## Specificity at the Youden's Index
.self$spec <- .self$getROC()$specificities[which.max((.self$getROC()$sensitivities * .self$getROC()$specificities)/(.self$getROC()$sensitivities + .self$getROC()$specificities))]
},
printStatistics = function() {
print(data.frame("AUC CIL"=auc.l, "AUC"=auc, "AUC CIU"=auc.u,
"Optimal cutoff"=optimalCutoff,
"Sensitivity"=sens, "Specificity"=spec))
}
))
setGeneric("createClassifier", def=function(object, x, y, x.test, y.test, selectedVariables)
standardGeneric("createClassifier"))
setMethod("createClassifier", signature=c("Classifier", "missing", "missing", "missing", "missing", "missing"),
function(object) {
object
})
setMethod("createClassifier", signature=c("Classifier", "Dataset", "character", "missing", "missing", "character"),
function(object, x, y, selectedVariables) {
object$x <- data.frame(exprs(x)[selectedVariables,], check.names=F)
object$y <- factor(getSampleMetaData(x,y))
names(object$y) <- sampleNames(x)
object
})
setMethod("createClassifier", signature=c("Classifier", "Dataset", "character", "Dataset", "character", "character"),
function(object, x, y, x.test, y.test, selectedVariables) {
object$x <- data.frame(exprs(x)[selectedVariables,], check.names=F)
object$y <- factor(getSampleMetaData(x,y))
names(object$y) <- sampleNames(x)
object$x.test <- data.frame(exprs(x.test)[selectedVariables,], check.names=F)
object$y.test <- factor(getSampleMetaData(x.test,y.test))
names(object$y.test) <- sampleNames(x.test)
object
})
|
/R/Classifier.R
|
no_license
|
zzxxyui/metadarclean
|
R
| false
| false
| 4,003
|
r
|
Classifier <- setRefClass("Classifier", fields=list(
x = "data.frame",
y = "factor",
x.test = "data.frame",
y.test = "factor",
predicted.class = "factor",
predicted.value = "numeric",
optimalCutoff = "numeric",
sens = "numeric",#based on youden index
spec = "numeric",#based on youden index
rr = "numeric",
or = "numeric",
rr.l = "numeric",
rr.u = "numeric",
or.l = "numeric",
or.u = "numeric",
auc = "numeric",
auc.l = "numeric",
auc.u = "numeric",
contingency = "table",
model = "ANY",
name = "character",
roc = "list" # list of class "roc" from pROC package
),
methods = list(
initialize = function(...) {
createClassifier(.self,...)
},
### AUC, CI(AUC), p-value of difference between AUC
### sensitivity, specificity, relative risk, odds ratio (ci of all)
###
setName = function(name) {
.self$name <- name
},
buildClassifier = function() {
### this is the method which all the inherited classes
### should implement
callNextMethod()
},
predict = function() {
callNextMethod()
},
getROC = function() {
.self$roc[[1]]
},
computeROC = function() {
.self$roc[[1]] <- roc(y, predicted.value)
},
plotROC = function(...) {
plot(getROC(), print.auc=T, print.thres=optimalCutoff, ...)
},
computeAUC = function() {
CI <- ci(.self$getROC())
.self$auc.l <- CI[[1]]
.self$auc <- CI[[2]]
.self$auc.u <- CI[[3]]
},
computeoptimalCutoff = function() {
.self$optimalCutoff <- .self$getROC()$thresholds[which.max((.self$getROC()$sensitivities * .self$getROC()$specificities)/(.self$getROC()$sensitivities + .self$getROC()$specificities))]
},
computeSensitivity = function() {
## Sensitivity at the Youden's Index
.self$sens <- .self$getROC()$sensitivities[which.max((.self$getROC()$sensitivities * .self$getROC()$specificities)/(.self$getROC()$sensitivities + .self$getROC()$specificities))]
},
computeSpecificity = function() {
## Specificity at the Youden's Index
.self$spec <- .self$getROC()$specificities[which.max((.self$getROC()$sensitivities * .self$getROC()$specificities)/(.self$getROC()$sensitivities + .self$getROC()$specificities))]
},
printStatistics = function() {
print(data.frame("AUC CIL"=auc.l, "AUC"=auc, "AUC CIU"=auc.u,
"Optimal cutoff"=optimalCutoff,
"Sensitivity"=sens, "Specificity"=spec))
}
))
setGeneric("createClassifier", def=function(object, x, y, x.test, y.test, selectedVariables)
standardGeneric("createClassifier"))
setMethod("createClassifier", signature=c("Classifier", "missing", "missing", "missing", "missing", "missing"),
function(object) {
object
})
setMethod("createClassifier", signature=c("Classifier", "Dataset", "character", "missing", "missing", "character"),
function(object, x, y, selectedVariables) {
object$x <- data.frame(exprs(x)[selectedVariables,], check.names=F)
object$y <- factor(getSampleMetaData(x,y))
names(object$y) <- sampleNames(x)
object
})
setMethod("createClassifier", signature=c("Classifier", "Dataset", "character", "Dataset", "character", "character"),
function(object, x, y, x.test, y.test, selectedVariables) {
object$x <- data.frame(exprs(x)[selectedVariables,], check.names=F)
object$y <- factor(getSampleMetaData(x,y))
names(object$y) <- sampleNames(x)
object$x.test <- data.frame(exprs(x.test)[selectedVariables,], check.names=F)
object$y.test <- factor(getSampleMetaData(x.test,y.test))
names(object$y.test) <- sampleNames(x.test)
object
})
|
install.packages('rgl')
library(rgl)
iris <- data('iris')
open3d()
plot3d(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Width)
plot3d(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Width, type = 's')
plot3d(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Width, type = 's',
col = as.integer(iris$Species))
|
/plot3d Example.r
|
no_license
|
leocalnan1/Rcode
|
R
| false
| false
| 323
|
r
|
install.packages('rgl')
library(rgl)
iris <- data('iris')
open3d()
plot3d(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Width)
plot3d(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Width, type = 's')
plot3d(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Width, type = 's',
col = as.integer(iris$Species))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvlars.R
\name{plot.HDcvlars}
\alias{plot.HDcvlars}
\title{plot cross validation mean square error}
\usage{
\method{plot}{HDcvlars}(x, ...)
}
\arguments{
\item{x}{Output from HDcvlars function.}
\item{...}{graphical parameters}
}
\description{
plot cross validation mean square error
}
\examples{
dataset=simul(50,10000,0.4,10,50,matrix(c(0.1,0.8,0.02,0.02),nrow=2))
result=HDcvlars(dataset$data,dataset$response,5)
plot(result)
}
\author{
Quentin Grimonprez
}
|
/man/plot.HDcvlars.Rd
|
no_license
|
Ivis4ml/HDPenReg
|
R
| false
| true
| 540
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvlars.R
\name{plot.HDcvlars}
\alias{plot.HDcvlars}
\title{plot cross validation mean square error}
\usage{
\method{plot}{HDcvlars}(x, ...)
}
\arguments{
\item{x}{Output from HDcvlars function.}
\item{...}{graphical parameters}
}
\description{
plot cross validation mean square error
}
\examples{
dataset=simul(50,10000,0.4,10,50,matrix(c(0.1,0.8,0.02,0.02),nrow=2))
result=HDcvlars(dataset$data,dataset$response,5)
plot(result)
}
\author{
Quentin Grimonprez
}
|
% File src/library/grDevices/man/n2mfrow.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2007 R Core Team
% Distributed under GPL 2 or later
\name{n2mfrow}
\alias{n2mfrow}
\title{Compute Default mfrow From Number of Plots}
\description{
Easy setup for plotting multiple figures (in a rectangular layout) on
one page. This computes a sensible default for
\code{\link{par}(mfrow)}.
}
\usage{
n2mfrow(nr.plots)
}
\arguments{
\item{nr.plots}{integer; the number of plot figures you'll want to draw.}
}
\value{
A length two integer vector \code{nr, nc} giving the number of rows
and columns, fulfilling \code{nr >= nc >= 1} and \code{nr * nc >= nr.plots}.
}
\author{Martin Maechler}
\seealso{\code{\link{par}}, \code{\link{layout}}.}
\examples{
require(graphics)
n2mfrow(8) # 3 x 3
n <- 5 ; x <- seq(-2, 2, len = 51)
## suppose now that 'n' is not known {inside function}
op <- par(mfrow = n2mfrow(n))
for (j in 1:n)
plot(x, x^j, main = substitute(x^ exp, list(exp = j)), type = "l",
col = "blue")
sapply(1:10, n2mfrow)
}
\keyword{dplot}
\keyword{utilities}
|
/bin/R-3.5.1/src/library/grDevices/man/n2mfrow.Rd
|
permissive
|
lifebit-ai/exomedepth
|
R
| false
| false
| 1,099
|
rd
|
% File src/library/grDevices/man/n2mfrow.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2007 R Core Team
% Distributed under GPL 2 or later
\name{n2mfrow}
\alias{n2mfrow}
\title{Compute Default mfrow From Number of Plots}
\description{
Easy setup for plotting multiple figures (in a rectangular layout) on
one page. This computes a sensible default for
\code{\link{par}(mfrow)}.
}
\usage{
n2mfrow(nr.plots)
}
\arguments{
\item{nr.plots}{integer; the number of plot figures you'll want to draw.}
}
\value{
A length two integer vector \code{nr, nc} giving the number of rows
and columns, fulfilling \code{nr >= nc >= 1} and \code{nr * nc >= nr.plots}.
}
\author{Martin Maechler}
\seealso{\code{\link{par}}, \code{\link{layout}}.}
\examples{
require(graphics)
n2mfrow(8) # 3 x 3
n <- 5 ; x <- seq(-2, 2, len = 51)
## suppose now that 'n' is not known {inside function}
op <- par(mfrow = n2mfrow(n))
for (j in 1:n)
plot(x, x^j, main = substitute(x^ exp, list(exp = j)), type = "l",
col = "blue")
sapply(1:10, n2mfrow)
}
\keyword{dplot}
\keyword{utilities}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{iqsiblings}
\alias{iqsiblings}
\title{IQs of Siblings}
\format{A data frame with 80 observations on the following 2 variables.
\describe{
\item{First}{IQ of the older sibling.}
\item{Second}{IQ of the younger sibling.}
}}
\source{
William Harris, Georgetown College
}
\description{
IQs of pairs of siblings.
}
\keyword{datasets}
|
/man/iqsiblings.Rd
|
no_license
|
nishantsbi/tigerstats
|
R
| false
| false
| 444
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{iqsiblings}
\alias{iqsiblings}
\title{IQs of Siblings}
\format{A data frame with 80 observations on the following 2 variables.
\describe{
\item{First}{IQ of the older sibling.}
\item{Second}{IQ of the younger sibling.}
}}
\source{
William Harris, Georgetown College
}
\description{
IQs of pairs of siblings.
}
\keyword{datasets}
|
############## AF function for a clogit object #####################
#' @title Attributable fraction estimation based on a conditional logistic regression model as a \code{clogit} object (commonly used for matched case-control sampling designs).
#' @description \code{AFclogit} estimates the model-based adjusted attributable fraction from a conditional logistic regression model in form of a \code{\link[survival]{clogit}} object. This model is model is commonly used for data from matched case-control sampling designs.
#' @param object a fitted conditional logistic regression model object of class "\code{\link[survival]{clogit}}".
#' @param data an optional data frame, list or environment (or object coercible by \code{as.data.frame} to a data frame) containing the variables in the model. If not found in \code{data}, the variables are taken from environment (\code{formula}), typically the environment from which the function is called.
#' @param exposure the name of the exposure variable as a string. The exposure must be binary (0/1) where unexposed is coded as 0.
#' @param clusterid the name of the cluster identifier variable as a string. Because conditional logistic regression is only used for clustered data, this argument must be supplied.
#' @return \item{AF.est}{estimated attributable fraction.}
#' @return \item{AF.var}{estimated variance of \code{AF.est}. The variance is obtained by combining the delta methods with the sandwich formula.}
#' @return \item{log.or}{a vector of the estimated log odds ratio for every individual. \code{log.or} contains the estimated coefficient for the exposure variable \code{X} for every level of the confounder \code{Z} as specified by the user in the formula. If the model to be estimated is
#' \deqn{logit\{Pr(Y=1|X,Z)\} = \alpha+\beta{X}+\gamma{Z}}{logit {Pr(Y=1|X,Z)} = \alpha + \beta X + \gamma Z}
#' then \code{log.or} is the estimate of \eqn{\beta}.
#' If the model to be estimated is
#' \deqn{logit\{Pr(Y=1|X,Z)\}=\alpha+\beta{X}+\gamma{Z}+\psi{XZ}}{logit{Pr(Y=1|X,Z)} = \alpha + \beta X +\gamma Z +\psi XZ}
#' then \code{log.odds} is the estimate of
#' \eqn{\beta + \psi{Z}}{\beta + \psi Z}.}
#' @details \code{AFclogit} estimates the attributable fraction for a binary outcome \code{Y}
#' under the hypothetical scenario where a binary exposure \code{X} is eliminated from the population.
#' The estimate is adjusted for confounders \code{Z} by conditional logistic regression.
#' The estimation assumes that the outcome is rare so that the risk ratio can be approximated by the odds ratio, for details see Bruzzi et. al.
#' Let the AF be defined as
#' \deqn{AF = 1 - \frac{Pr(Y_0=1)}{Pr(Y = 1)}}{AF = 1 - Pr(Y0 = 1) / Pr(Y = 1)}
#' where \eqn{Pr(Y_0=1)}{Pr(Y0 = 1)} denotes the counterfactual probability of the outcome if
#' the exposure would have been eliminated from the population. If \code{Z} is sufficient for confounding control then the probability \eqn{Pr(Y_0=1)}{Pr(Y0 = 1)} can be expressed as
#' \deqn{Pr(Y_0=1)=E_Z\{Pr(Y=1\mid{X}=0,Z)\}.}{Pr(Y0=1) = E_z{Pr(Y = 1 | X = 0, Z)}.}
#' Using Bayes' theorem this implies that the AF can be expressed as
#' \deqn{AF = 1-\frac{E_Z\{Pr(Y=1\mid X=0,Z)\}}{Pr(Y=1)}=1-E_Z\{RR^{-X}(Z)\mid{Y = 1}\}}{
#' AF = 1 - E_z{Pr( Y = 1 | X = 0, Z)} / Pr(Y = 1) = 1 - E_z{RR^{-X} (Z) | Y = 1}}
#' where \eqn{RR(Z)} is the risk ratio \deqn{\frac{Pr(Y=1\mid{X=1,Z})}{Pr(Y=1\mid{X=0,Z})}.}{Pr(Y = 1 | X = 1,Z)/Pr(Y=1 | X = 0, Z).}
#' Moreover, the risk ratio can be approximated by the odds ratio if the outcome is rare. Thus,
#' \deqn{ AF \approx 1 - E_Z\{OR^{-X}(Z)\mid{Y = 1}\}.}{AF is approximately 1 - E_z{OR^{-X}(Z) | Y = 1}.}
#' The odds ratio is estimated by conditional logistic regression.
#' The function \code{\link[drgee]{gee}} in the \code{drgee} package is used to get the score contributions for each cluster and the hessian.
#' A clustered sandwich formula is used in the variance calculation.
#' @author Elisabeth Dahlqwist, Arvid \enc{Sjölander}{Sjolander}
#' @seealso \code{\link[survival]{clogit}} used for fitting the conditional logistic regression model for matched case-control designs. For non-matched case-control designs see \code{\link[AF]{AFglm}}.
#' @references Bruzzi, P., Green, S. B., Byar, D., Brinton, L. A., and Schairer, C. (1985). Estimating the population attributable risk for multiple risk factors using case-control data. \emph{American Journal of Epidemiology} \bold{122}, 904-914.
#' @examples
#' expit <- function(x) 1 / (1 + exp( - x))
#' NN <- 1000000
#' n <- 500
#'
#' # Example 1: matched case-control
#' # Duplicate observations in order to create a matched data sample
#' # Create an unobserved confounder U common for each pair of individuals
#' intercept <- -6
#' U <- rnorm(n = NN)
#' Z1 <- rnorm(n = NN)
#' Z2 <- rnorm(n = NN)
#' X1 <- rbinom(n = NN, size = 1, prob = expit(U + Z1))
#' X2 <- rbinom(n = NN, size = 1, prob = expit(U + Z2))
#' Y1 <- rbinom(n = NN, size = 1, prob = expit(intercept + U + Z1 + X1))
#' Y2 <- rbinom(n = NN, size = 1, prob = expit(intercept + U + Z2 + X2))
#' # Select discordant pairs
#' discordant <- which(Y1!=Y2)
#' id <- rep(1:n, 2)
#' # Sample from discordant pairs
#' incl <- sample(x = discordant, size = n, replace = TRUE)
#' data <- data.frame(id = id, Y = c(Y1[incl], Y2[incl]), X = c(X1[incl], X2[incl]),
#' Z = c(Z1[incl], Z2[incl]))
#'
#' # Fit a clogit object
#' fit <- clogit(formula = Y ~ X + Z + X * Z + strata(id), data = data)
#'
#' # Estimate the attributable fraction from the fitted conditional logistic regression
#' AFclogit_est <- AFclogit(fit, data, exposure = "X", clusterid="id")
#' summary(AFclogit_est)
#' @import survival drgee data.table
#' @export
AFclogit<-function(object, data, exposure, clusterid){
call <- match.call()
# Warning if the object is not a clogit object
objectcall <- object$userCall[1]
if(!(class(object)[1])=="clogit")
stop("The object is not a clogit object", call. = FALSE)
if(missing(clusterid))
stop("Argument 'clusterid' must be provided by the user", call. = FALSE)
#### Preparation of variables ####
formula <- object$formula
npar <- length(object$coef)
## Delete rows with missing on variables in the model ##
rownames(data) <- 1:nrow(data)
m <- model.matrix(object = formula, data = data)
complete <- as.numeric(rownames(m))
data <- data[complete, ]
outcome <- as.character(terms(formula)[[2]])[3]
variables <- attr(object$coefficients, "names")
## Create a formula which can be used to create a design matrix
formula.model <- as.formula(paste(outcome, "~", paste(variables, collapse=" + ")))
ni.vals <- ave(as.vector(data[, outcome]), data[, clusterid], FUN = function(y) {
length(unique(y[which(!is.na(y))]))
})
compl.rows <- (ni.vals > 1)
data <- data[compl.rows, ]
## Checks ##
if(is.binary(data[, outcome]) == FALSE)
stop("Only binary outcome (0/1) is accepted.", call. = FALSE)
if(is.binary(data[, exposure]) == FALSE)
stop("Only binary exposure (0/1) is accepted.", call. = FALSE)
if(max(all.vars(formula[[3]]) == exposure) == 0)
stop("The exposure variable is not included in the formula.", call. = FALSE)
#### Methods for non-matched or matched sampling designs ####
n <- nrow(data)
n.cases <- sum(data[, outcome])
n.cluster <- length(unique(data[, clusterid]))
data <- data[order(data[, clusterid]), ]
# Create dataset data0 for counterfactual X = 0s
data0 <- data
data0[, exposure] <- 0
clusters <- data[, clusterid]
## Design matrices ##
design <- model.matrix(object = formula.model, data = data)[, - 1, drop = FALSE]
design0 <- model.matrix(object = formula.model, data = data0)[, - 1, drop = FALSE]
## Create linear predictors to estimate the log odds ratio ##
diff.design <- design0 - design
linearpredictor <- design %*% coef(object)
linearpredictor0 <- design0 %*% coef(object)
#log odds ratio#
log.or <- linearpredictor - linearpredictor0
## Estimate approximate AF ##
AF.est <- 1 - sum(data[, outcome] * exp( - log.or)) / sum(data[, outcome])
#### Meat: score equations ####
## Score equation 1 ## individual estimating equations of the estimate of AF
score.AF <- data[, outcome] * (exp( - log.or) - AF.est)
## Score equation 2 ## individual estimating equations from conditional logistic reg.
pred.diff <- getScoreResidualsFromClogit(fit = object,
y = data[, outcome],
x = design,
id = clusters)
if(missing(pred.diff)) warning("Use the latest version of package 'drgee'", call. = FALSE)
score.beta <- pred.diff$U
score.equations <- cbind(score.AF, score.beta)
score.equations <- aggr(x = score.equations, clusters = clusters)
meat <- var(score.equations, na.rm=TRUE)
#### Bread: hessian of score equations ####
### Hessian of score equation 1 ##
#### Estimating variance using Sandwich estimator ####
### Aggregate data ###
hessian.AF1 <- - data[, outcome]
hessian.AF1 <- aggr(x = hessian.AF1, clusters = clusters)
hessian.AF2 <- cbind(as.matrix((design0 - design) * as.vector(data[, outcome] * exp( - log.or))))
hessian.AF2 <- aggr(x = hessian.AF2, clusters = clusters)
hessian.AF <- cbind(mean(hessian.AF1), t(colMeans(hessian.AF2)))
hessian.beta <- cbind(matrix(rep(0, npar), nrow = npar, ncol = 1), pred.diff$dU.sum / n)
### Bread ###
bread <- rbind(hessian.AF, hessian.beta)
#### Sandwich ####
sandwich <- (solve (bread) %*% meat %*% t(solve (bread)) * n.cluster/ n ^ 2 )
AF.var <- sandwich[1, 1]
#### Output ####
out <- c(list(hessian.beta = hessian.beta, hessian.AF = hessian.AF, clusterid = clusterid,
score.equations = score.equations, hessian.beta = hessian.beta, bread = bread,
meat = meat, AF.est = AF.est, AF.var = AF.var, log.or = log.or,
objectcall = objectcall, call = call, exposure = exposure, outcome = outcome,
object = object, sandwich = sandwich, formula = formula, n = n, n.cases = n.cases,
n.cluster = n.cluster))
class(out) <- "AF"
return(out)
}
|
/R/AFclogit.R
|
no_license
|
ElisabethDahlqwist/AF
|
R
| false
| false
| 10,343
|
r
|
############## AF function for a clogit object #####################
#' @title Attributable fraction estimation based on a conditional logistic regression model as a \code{clogit} object (commonly used for matched case-control sampling designs).
#' @description \code{AFclogit} estimates the model-based adjusted attributable fraction from a conditional logistic regression model in form of a \code{\link[survival]{clogit}} object. This model is model is commonly used for data from matched case-control sampling designs.
#' @param object a fitted conditional logistic regression model object of class "\code{\link[survival]{clogit}}".
#' @param data an optional data frame, list or environment (or object coercible by \code{as.data.frame} to a data frame) containing the variables in the model. If not found in \code{data}, the variables are taken from environment (\code{formula}), typically the environment from which the function is called.
#' @param exposure the name of the exposure variable as a string. The exposure must be binary (0/1) where unexposed is coded as 0.
#' @param clusterid the name of the cluster identifier variable as a string. Because conditional logistic regression is only used for clustered data, this argument must be supplied.
#' @return \item{AF.est}{estimated attributable fraction.}
#' @return \item{AF.var}{estimated variance of \code{AF.est}. The variance is obtained by combining the delta methods with the sandwich formula.}
#' @return \item{log.or}{a vector of the estimated log odds ratio for every individual. \code{log.or} contains the estimated coefficient for the exposure variable \code{X} for every level of the confounder \code{Z} as specified by the user in the formula. If the model to be estimated is
#' \deqn{logit\{Pr(Y=1|X,Z)\} = \alpha+\beta{X}+\gamma{Z}}{logit {Pr(Y=1|X,Z)} = \alpha + \beta X + \gamma Z}
#' then \code{log.or} is the estimate of \eqn{\beta}.
#' If the model to be estimated is
#' \deqn{logit\{Pr(Y=1|X,Z)\}=\alpha+\beta{X}+\gamma{Z}+\psi{XZ}}{logit{Pr(Y=1|X,Z)} = \alpha + \beta X +\gamma Z +\psi XZ}
#' then \code{log.odds} is the estimate of
#' \eqn{\beta + \psi{Z}}{\beta + \psi Z}.}
#' @details \code{AFclogit} estimates the attributable fraction for a binary outcome \code{Y}
#' under the hypothetical scenario where a binary exposure \code{X} is eliminated from the population.
#' The estimate is adjusted for confounders \code{Z} by conditional logistic regression.
#' The estimation assumes that the outcome is rare so that the risk ratio can be approximated by the odds ratio, for details see Bruzzi et. al.
#' Let the AF be defined as
#' \deqn{AF = 1 - \frac{Pr(Y_0=1)}{Pr(Y = 1)}}{AF = 1 - Pr(Y0 = 1) / Pr(Y = 1)}
#' where \eqn{Pr(Y_0=1)}{Pr(Y0 = 1)} denotes the counterfactual probability of the outcome if
#' the exposure would have been eliminated from the population. If \code{Z} is sufficient for confounding control then the probability \eqn{Pr(Y_0=1)}{Pr(Y0 = 1)} can be expressed as
#' \deqn{Pr(Y_0=1)=E_Z\{Pr(Y=1\mid{X}=0,Z)\}.}{Pr(Y0=1) = E_z{Pr(Y = 1 | X = 0, Z)}.}
#' Using Bayes' theorem this implies that the AF can be expressed as
#' \deqn{AF = 1-\frac{E_Z\{Pr(Y=1\mid X=0,Z)\}}{Pr(Y=1)}=1-E_Z\{RR^{-X}(Z)\mid{Y = 1}\}}{
#' AF = 1 - E_z{Pr( Y = 1 | X = 0, Z)} / Pr(Y = 1) = 1 - E_z{RR^{-X} (Z) | Y = 1}}
#' where \eqn{RR(Z)} is the risk ratio \deqn{\frac{Pr(Y=1\mid{X=1,Z})}{Pr(Y=1\mid{X=0,Z})}.}{Pr(Y = 1 | X = 1,Z)/Pr(Y=1 | X = 0, Z).}
#' Moreover, the risk ratio can be approximated by the odds ratio if the outcome is rare. Thus,
#' \deqn{ AF \approx 1 - E_Z\{OR^{-X}(Z)\mid{Y = 1}\}.}{AF is approximately 1 - E_z{OR^{-X}(Z) | Y = 1}.}
#' The odds ratio is estimated by conditional logistic regression.
#' The function \code{\link[drgee]{gee}} in the \code{drgee} package is used to get the score contributions for each cluster and the hessian.
#' A clustered sandwich formula is used in the variance calculation.
#' @author Elisabeth Dahlqwist, Arvid \enc{Sjölander}{Sjolander}
#' @seealso \code{\link[survival]{clogit}} used for fitting the conditional logistic regression model for matched case-control designs. For non-matched case-control designs see \code{\link[AF]{AFglm}}.
#' @references Bruzzi, P., Green, S. B., Byar, D., Brinton, L. A., and Schairer, C. (1985). Estimating the population attributable risk for multiple risk factors using case-control data. \emph{American Journal of Epidemiology} \bold{122}, 904-914.
#' @examples
#' expit <- function(x) 1 / (1 + exp( - x))
#' NN <- 1000000
#' n <- 500
#'
#' # Example 1: matched case-control
#' # Duplicate observations in order to create a matched data sample
#' # Create an unobserved confounder U common for each pair of individuals
#' intercept <- -6
#' U <- rnorm(n = NN)
#' Z1 <- rnorm(n = NN)
#' Z2 <- rnorm(n = NN)
#' X1 <- rbinom(n = NN, size = 1, prob = expit(U + Z1))
#' X2 <- rbinom(n = NN, size = 1, prob = expit(U + Z2))
#' Y1 <- rbinom(n = NN, size = 1, prob = expit(intercept + U + Z1 + X1))
#' Y2 <- rbinom(n = NN, size = 1, prob = expit(intercept + U + Z2 + X2))
#' # Select discordant pairs
#' discordant <- which(Y1!=Y2)
#' id <- rep(1:n, 2)
#' # Sample from discordant pairs
#' incl <- sample(x = discordant, size = n, replace = TRUE)
#' data <- data.frame(id = id, Y = c(Y1[incl], Y2[incl]), X = c(X1[incl], X2[incl]),
#' Z = c(Z1[incl], Z2[incl]))
#'
#' # Fit a clogit object
#' fit <- clogit(formula = Y ~ X + Z + X * Z + strata(id), data = data)
#'
#' # Estimate the attributable fraction from the fitted conditional logistic regression
#' AFclogit_est <- AFclogit(fit, data, exposure = "X", clusterid="id")
#' summary(AFclogit_est)
#' @import survival drgee data.table
#' @export
AFclogit<-function(object, data, exposure, clusterid){
call <- match.call()
# Warning if the object is not a clogit object
objectcall <- object$userCall[1]
if(!(class(object)[1])=="clogit")
stop("The object is not a clogit object", call. = FALSE)
if(missing(clusterid))
stop("Argument 'clusterid' must be provided by the user", call. = FALSE)
#### Preparation of variables ####
formula <- object$formula
npar <- length(object$coef)
## Delete rows with missing on variables in the model ##
rownames(data) <- 1:nrow(data)
m <- model.matrix(object = formula, data = data)
complete <- as.numeric(rownames(m))
data <- data[complete, ]
outcome <- as.character(terms(formula)[[2]])[3]
variables <- attr(object$coefficients, "names")
## Create a formula which can be used to create a design matrix
formula.model <- as.formula(paste(outcome, "~", paste(variables, collapse=" + ")))
ni.vals <- ave(as.vector(data[, outcome]), data[, clusterid], FUN = function(y) {
length(unique(y[which(!is.na(y))]))
})
compl.rows <- (ni.vals > 1)
data <- data[compl.rows, ]
## Checks ##
if(is.binary(data[, outcome]) == FALSE)
stop("Only binary outcome (0/1) is accepted.", call. = FALSE)
if(is.binary(data[, exposure]) == FALSE)
stop("Only binary exposure (0/1) is accepted.", call. = FALSE)
if(max(all.vars(formula[[3]]) == exposure) == 0)
stop("The exposure variable is not included in the formula.", call. = FALSE)
#### Methods for non-matched or matched sampling designs ####
n <- nrow(data)
n.cases <- sum(data[, outcome])
n.cluster <- length(unique(data[, clusterid]))
data <- data[order(data[, clusterid]), ]
# Create dataset data0 for counterfactual X = 0s
data0 <- data
data0[, exposure] <- 0
clusters <- data[, clusterid]
## Design matrices ##
design <- model.matrix(object = formula.model, data = data)[, - 1, drop = FALSE]
design0 <- model.matrix(object = formula.model, data = data0)[, - 1, drop = FALSE]
## Create linear predictors to estimate the log odds ratio ##
diff.design <- design0 - design
linearpredictor <- design %*% coef(object)
linearpredictor0 <- design0 %*% coef(object)
#log odds ratio#
log.or <- linearpredictor - linearpredictor0
## Estimate approximate AF ##
AF.est <- 1 - sum(data[, outcome] * exp( - log.or)) / sum(data[, outcome])
#### Meat: score equations ####
## Score equation 1 ## individual estimating equations of the estimate of AF
score.AF <- data[, outcome] * (exp( - log.or) - AF.est)
## Score equation 2 ## individual estimating equations from conditional logistic reg.
pred.diff <- getScoreResidualsFromClogit(fit = object,
y = data[, outcome],
x = design,
id = clusters)
if(missing(pred.diff)) warning("Use the latest version of package 'drgee'", call. = FALSE)
score.beta <- pred.diff$U
score.equations <- cbind(score.AF, score.beta)
score.equations <- aggr(x = score.equations, clusters = clusters)
meat <- var(score.equations, na.rm=TRUE)
#### Bread: hessian of score equations ####
### Hessian of score equation 1 ##
#### Estimating variance using Sandwich estimator ####
### Aggregate data ###
hessian.AF1 <- - data[, outcome]
hessian.AF1 <- aggr(x = hessian.AF1, clusters = clusters)
hessian.AF2 <- cbind(as.matrix((design0 - design) * as.vector(data[, outcome] * exp( - log.or))))
hessian.AF2 <- aggr(x = hessian.AF2, clusters = clusters)
hessian.AF <- cbind(mean(hessian.AF1), t(colMeans(hessian.AF2)))
hessian.beta <- cbind(matrix(rep(0, npar), nrow = npar, ncol = 1), pred.diff$dU.sum / n)
### Bread ###
bread <- rbind(hessian.AF, hessian.beta)
#### Sandwich ####
sandwich <- (solve (bread) %*% meat %*% t(solve (bread)) * n.cluster/ n ^ 2 )
AF.var <- sandwich[1, 1]
#### Output ####
out <- c(list(hessian.beta = hessian.beta, hessian.AF = hessian.AF, clusterid = clusterid,
score.equations = score.equations, hessian.beta = hessian.beta, bread = bread,
meat = meat, AF.est = AF.est, AF.var = AF.var, log.or = log.or,
objectcall = objectcall, call = call, exposure = exposure, outcome = outcome,
object = object, sandwich = sandwich, formula = formula, n = n, n.cases = n.cases,
n.cluster = n.cluster))
class(out) <- "AF"
return(out)
}
|
dataFile <- "/Users/shriarao/GitHub/datasciencecoursera/Coursera/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
/Plot3.R
|
no_license
|
meerao1/ExData_Plotting1
|
R
| false
| false
| 932
|
r
|
dataFile <- "/Users/shriarao/GitHub/datasciencecoursera/Coursera/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
### =========================================================================
### MulticoreParam objects
### -------------------------------------------------------------------------
multicoreWorkers <- function()
.snowCores("multicore")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
.MulticoreParam_prototype <- .SnowParam_prototype
.MulticoreParam <- setRefClass("MulticoreParam",
contains="SnowParam",
fields=list(),
methods=list()
)
MulticoreParam <- function(workers=multicoreWorkers(), tasks=0L,
stop.on.error=TRUE,
progressbar=FALSE, RNGseed=NULL, timeout= WORKER_TIMEOUT,
exportglobals=TRUE,
log=FALSE, threshold="INFO", logdir=NA_character_,
resultdir=NA_character_, jobname = "BPJOB",
force.GC = FALSE,
fallback = TRUE,
manager.hostname=NA_character_, manager.port=NA_integer_, ...)
{
if (.Platform$OS.type == "windows") {
warning("MulticoreParam() not supported on Windows, use SnowParam()")
workers = 1L
}
if (progressbar && missing(tasks))
tasks <- TASKS_MAXIMUM
clusterargs <- c(list(spec=workers, type="FORK"), list(...))
manager.hostname <-
if (is.na(manager.hostname)) {
local <- (clusterargs$type == "FORK") ||
is.numeric(clusterargs$spec)
manager.hostname <- .snowHost(local)
} else as.character(manager.hostname)
manager.port <-
if (is.na(manager.port)) {
.snowPort()
} else as.integer(manager.port)
if (!is.null(RNGseed))
RNGseed <- as.integer(RNGseed)
prototype <- .prototype_update(
.MulticoreParam_prototype,
.clusterargs=clusterargs, cluster=.NULLcluster(),
.controlled=TRUE, workers=as.integer(workers),
tasks=as.integer(tasks),
stop.on.error=stop.on.error,
progressbar=progressbar,
RNGseed=RNGseed, timeout=as.integer(timeout),
exportglobals=exportglobals,
exportvariables=FALSE,
log=log, threshold=threshold,
logdir=logdir, resultdir=resultdir, jobname=jobname,
force.GC = force.GC,
fallback = fallback,
hostname=manager.hostname, port=manager.port,
...
)
param <- do.call(.MulticoreParam, prototype)
bpworkers(param) <- workers # enforce worker number
validObject(param)
param
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Methods - control
###
setReplaceMethod("bpworkers", c("MulticoreParam", "numeric"),
function(x, value)
{
value <- as.integer(value)
nworkers <- .enforceWorkers(value, x$.clusterargs$type)
x$workers <- x$.clusterargs$spec <- nworkers
x
})
setMethod("bpschedule", "MulticoreParam",
function(x)
{
if (.Platform$OS.type == "windows")
FALSE
else
TRUE
})
|
/R/MulticoreParam-class.R
|
no_license
|
Bioconductor/BiocParallel
|
R
| false
| false
| 2,922
|
r
|
### =========================================================================
### MulticoreParam objects
### -------------------------------------------------------------------------
multicoreWorkers <- function()
.snowCores("multicore")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
.MulticoreParam_prototype <- .SnowParam_prototype
.MulticoreParam <- setRefClass("MulticoreParam",
contains="SnowParam",
fields=list(),
methods=list()
)
MulticoreParam <- function(workers=multicoreWorkers(), tasks=0L,
stop.on.error=TRUE,
progressbar=FALSE, RNGseed=NULL, timeout= WORKER_TIMEOUT,
exportglobals=TRUE,
log=FALSE, threshold="INFO", logdir=NA_character_,
resultdir=NA_character_, jobname = "BPJOB",
force.GC = FALSE,
fallback = TRUE,
manager.hostname=NA_character_, manager.port=NA_integer_, ...)
{
if (.Platform$OS.type == "windows") {
warning("MulticoreParam() not supported on Windows, use SnowParam()")
workers = 1L
}
if (progressbar && missing(tasks))
tasks <- TASKS_MAXIMUM
clusterargs <- c(list(spec=workers, type="FORK"), list(...))
manager.hostname <-
if (is.na(manager.hostname)) {
local <- (clusterargs$type == "FORK") ||
is.numeric(clusterargs$spec)
manager.hostname <- .snowHost(local)
} else as.character(manager.hostname)
manager.port <-
if (is.na(manager.port)) {
.snowPort()
} else as.integer(manager.port)
if (!is.null(RNGseed))
RNGseed <- as.integer(RNGseed)
prototype <- .prototype_update(
.MulticoreParam_prototype,
.clusterargs=clusterargs, cluster=.NULLcluster(),
.controlled=TRUE, workers=as.integer(workers),
tasks=as.integer(tasks),
stop.on.error=stop.on.error,
progressbar=progressbar,
RNGseed=RNGseed, timeout=as.integer(timeout),
exportglobals=exportglobals,
exportvariables=FALSE,
log=log, threshold=threshold,
logdir=logdir, resultdir=resultdir, jobname=jobname,
force.GC = force.GC,
fallback = fallback,
hostname=manager.hostname, port=manager.port,
...
)
param <- do.call(.MulticoreParam, prototype)
bpworkers(param) <- workers # enforce worker number
validObject(param)
param
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Methods - control
###
setReplaceMethod("bpworkers", c("MulticoreParam", "numeric"),
function(x, value)
{
value <- as.integer(value)
nworkers <- .enforceWorkers(value, x$.clusterargs$type)
x$workers <- x$.clusterargs$spec <- nworkers
x
})
setMethod("bpschedule", "MulticoreParam",
function(x)
{
if (.Platform$OS.type == "windows")
FALSE
else
TRUE
})
|
library(BAT)
### Name: beta.accum
### Title: Beta diversity accumulation curves.
### Aliases: beta.accum
### ** Examples
comm1 <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,0,2,2), nrow = 4, byrow = TRUE)
comm2 <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, byrow = TRUE)
tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
beta.accum(comm1, comm2)
beta.accum(comm1, comm2, func = "Soerensen")
beta.accum(comm1, comm2, tree)
beta.accum(comm1, comm2, abund = TRUE)
beta.accum(comm1, comm2, tree, TRUE)
|
/data/genthat_extracted_code/BAT/examples/beta.accum.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 540
|
r
|
library(BAT)
### Name: beta.accum
### Title: Beta diversity accumulation curves.
### Aliases: beta.accum
### ** Examples
comm1 <- matrix(c(2,2,0,0,0,1,1,0,0,0,0,2,2,0,0,0,0,0,2,2), nrow = 4, byrow = TRUE)
comm2 <- matrix(c(1,1,0,0,0,0,2,1,0,0,0,0,2,1,0,0,0,0,2,1), nrow = 4, byrow = TRUE)
tree <- hclust(dist(c(1:5), method="euclidean"), method="average")
beta.accum(comm1, comm2)
beta.accum(comm1, comm2, func = "Soerensen")
beta.accum(comm1, comm2, tree)
beta.accum(comm1, comm2, abund = TRUE)
beta.accum(comm1, comm2, tree, TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDimRed.R
\name{plotDimRed}
\alias{plotDimRed}
\title{Plot results either on already run results of reduced dimensions data.}
\usage{
plotDimRed(inSCE, colorBy = "No Color", shape = "No Shape",
reducedDimName = NULL, useAssay = "logcounts", comp1 = NULL,
comp2 = NULL, pcX = NULL, pcY = NULL)
}
\arguments{
\item{inSCE}{Input SCtkExperiment object with saved dimension reduction components
or a variable with saved results. Required}
\item{colorBy}{color by a condition(any column of the annotation data).}
\item{shape}{add shapes to each condition.}
\item{reducedDimName}{saved dimension reduction name in the SCtkExperiment object. Required.}
\item{useAssay}{Indicate which assay to use. The default is "logcounts"}
\item{comp1}{label for x-axis}
\item{comp2}{label for y-axis}
\item{pcX}{PCA component to be used for plotting(if applicable).
Default is first PCA component for PCA data and NULL otherwise.}
\item{pcY}{PCA component to be used for plotting(if applicable).
Default is second PCA component for PCA data and NULL otherwise.}
}
\value{
a ggplot of the reduced dimensions.
}
\description{
Plot results either on already run results of reduced dimensions data.
}
\examples{
plotDimRed(inSCE = mouseBrainSubsetSCE, colorBy = "No Color", shape = "No Shape",
reducedDimName = "TSNE_counts", useAssay = "counts",
comp1 = "tSNE1", comp2 = "tSNE2")
}
|
/man/plotDimRed.Rd
|
permissive
|
zhangyuqing/singleCellTK
|
R
| false
| true
| 1,474
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDimRed.R
\name{plotDimRed}
\alias{plotDimRed}
\title{Plot results either on already run results of reduced dimensions data.}
\usage{
plotDimRed(inSCE, colorBy = "No Color", shape = "No Shape",
reducedDimName = NULL, useAssay = "logcounts", comp1 = NULL,
comp2 = NULL, pcX = NULL, pcY = NULL)
}
\arguments{
\item{inSCE}{Input SCtkExperiment object with saved dimension reduction components
or a variable with saved results. Required}
\item{colorBy}{color by a condition(any column of the annotation data).}
\item{shape}{add shapes to each condition.}
\item{reducedDimName}{saved dimension reduction name in the SCtkExperiment object. Required.}
\item{useAssay}{Indicate which assay to use. The default is "logcounts"}
\item{comp1}{label for x-axis}
\item{comp2}{label for y-axis}
\item{pcX}{PCA component to be used for plotting(if applicable).
Default is first PCA component for PCA data and NULL otherwise.}
\item{pcY}{PCA component to be used for plotting(if applicable).
Default is second PCA component for PCA data and NULL otherwise.}
}
\value{
a ggplot of the reduced dimensions.
}
\description{
Plot results either on already run results of reduced dimensions data.
}
\examples{
plotDimRed(inSCE = mouseBrainSubsetSCE, colorBy = "No Color", shape = "No Shape",
reducedDimName = "TSNE_counts", useAssay = "counts",
comp1 = "tSNE1", comp2 = "tSNE2")
}
|
# FUNCTIONS FOR CLEANING RAW DATA FILES
#### efficacy_function cleans raw efficacy data in Shiny app
library(dplyr)
efficacy_function <- function(efficacy_df){
efficacy_clean <- efficacy_df %>%
select(Protocol_Animal, Compound, Group, Drug_Dose, Days_Treatment,
Treatment_Interval,Elung,Espleen) %>%
rename(lung_efficacy = Elung,
spleen_efficacy = Espleen,
dosage = Drug_Dose,
days_treatment = Days_Treatment,
dose_interval = Treatment_Interval,
drug = Compound) %>%
mutate(lung_efficacy = as.numeric(lung_efficacy)) %>%
mutate(spleen_efficacy = as.numeric(spleen_efficacy)) %>%
mutate(dose_interval = as.factor(dose_interval)) %>%
mutate(days_treatment = as.factor(days_treatment)) %>%
group_by(Protocol_Animal, drug, Group, dosage, days_treatment, dose_interval) %>%
summarize(lung_efficacy_log = log10(lung_efficacy),
spleen_efficacy_log = log10(spleen_efficacy))
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="Pre Rx 9 week"] <- "_Baseline"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="M-F"] <- "_QD"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="4 wk"] <- "20_Control"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="8 wk"] <- "40_Control"
levels(efficacy_clean$drug)[levels(efficacy_clean$drug)==""] <- "Baseline"
efficacy_clean <- efficacy_clean %>%
unite(days_dose, days_treatment, dose_interval, sep = "") %>%
separate(days_dose, c("days", "dose"), sep = "_") %>%
rename("days_treatment" = days,
"dose_interval" = dose) %>%
mutate(days_treatment = as.numeric(days_treatment))
return(efficacy_clean)
}
#### plasma_function cleans raw plasma data in Shiny app
#Function Title: Cleaning Plasma Dataframe
#This function
plasma_function <- function(plasma_df){
plasma_clean <- plasma_df %>%
select(MouseID,
Compound,
Group,
Protocol_Animal,
Dosing,
Timepoint,
Plasma_Parent) %>%
rename(drug = Compound,
mouse_number = MouseID,
plasma_concentration = Plasma_Parent) %>%
mutate(Group = as.character(Group))
return(plasma_clean)
}
##### Clean the tissue laser data into a tidy format
#
tissue_laser_function <- function(tissue_laser_df) {
tissue_laser_clean <- tissue_laser_df %>%
rename(`Parent [ng/ml]` = Parent) %>%
select(-StudyID, -Metabolite, - Units, - Collection, - `Sample ID`)
n <- nrow(tissue_laser_clean)
mice_ids <- rep(c(1:(n/4)), each = 4)
tissue_laser_clean <- mutate(tissue_laser_clean, MouseID = mice_ids) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(ULU = `uninvolved lung`, RIM = rim,
OCS = `outer caseum`, ICS = `inner caseum`) %>%
mutate(ULU = as.numeric(ULU), RIM = as.numeric(RIM),
OCS = as.numeric(OCS), ICS = as.numeric(ICS))
return(tissue_laser_clean)
}
##### tissue_std_pk_function cleans raw tissue std pk data in Shiny app
tissue_std_pk_function <- function(tissue_std_pk_df){
n <- nrow(tissue_std_pk_df)
mice_ids <- rep(c(1:(n/2)), each = 2)
tissue_std_pk_clean <- tissue_std_pk_df %>%
mutate(mouse_number = mice_ids) %>%
select(Compound, mouse_number, Group, Protocol_Animal, Dosing, Timepoint, Compartment, Parent) %>%
rename(drug = Compound,
`Parent [ng/ml]` = Parent) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(SLU = Lung,
SLE = Lesion) %>%
mutate(SLU = as.numeric(SLU),
SLE = as.numeric(SLE))
return(tissue_std_pk_clean)
}
###### in_vitro_function cleans raw in_vitro data in Shiny app
in_vitro_function <- function(in_vitro_df){
in_vitro_clean <- in_vitro_df
return(in_vitro_clean)
}
|
/Shiny_App/group_1_functions.R
|
no_license
|
KatieKey/input_output_shiny_group
|
R
| false
| false
| 3,932
|
r
|
# FUNCTIONS FOR CLEANING RAW DATA FILES
#### efficacy_function cleans raw efficacy data in Shiny app
library(dplyr)
efficacy_function <- function(efficacy_df){
efficacy_clean <- efficacy_df %>%
select(Protocol_Animal, Compound, Group, Drug_Dose, Days_Treatment,
Treatment_Interval,Elung,Espleen) %>%
rename(lung_efficacy = Elung,
spleen_efficacy = Espleen,
dosage = Drug_Dose,
days_treatment = Days_Treatment,
dose_interval = Treatment_Interval,
drug = Compound) %>%
mutate(lung_efficacy = as.numeric(lung_efficacy)) %>%
mutate(spleen_efficacy = as.numeric(spleen_efficacy)) %>%
mutate(dose_interval = as.factor(dose_interval)) %>%
mutate(days_treatment = as.factor(days_treatment)) %>%
group_by(Protocol_Animal, drug, Group, dosage, days_treatment, dose_interval) %>%
summarize(lung_efficacy_log = log10(lung_efficacy),
spleen_efficacy_log = log10(spleen_efficacy))
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="Pre Rx 9 week"] <- "_Baseline"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="M-F"] <- "_QD"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="4 wk"] <- "20_Control"
levels(efficacy_clean$dose_interval)[levels(efficacy_clean$dose_interval)=="8 wk"] <- "40_Control"
levels(efficacy_clean$drug)[levels(efficacy_clean$drug)==""] <- "Baseline"
efficacy_clean <- efficacy_clean %>%
unite(days_dose, days_treatment, dose_interval, sep = "") %>%
separate(days_dose, c("days", "dose"), sep = "_") %>%
rename("days_treatment" = days,
"dose_interval" = dose) %>%
mutate(days_treatment = as.numeric(days_treatment))
return(efficacy_clean)
}
#### plasma_function cleans raw plasma data in Shiny app
#Function Title: Cleaning Plasma Dataframe
#This function
plasma_function <- function(plasma_df){
plasma_clean <- plasma_df %>%
select(MouseID,
Compound,
Group,
Protocol_Animal,
Dosing,
Timepoint,
Plasma_Parent) %>%
rename(drug = Compound,
mouse_number = MouseID,
plasma_concentration = Plasma_Parent) %>%
mutate(Group = as.character(Group))
return(plasma_clean)
}
##### Clean the tissue laser data into a tidy format
#
tissue_laser_function <- function(tissue_laser_df) {
tissue_laser_clean <- tissue_laser_df %>%
rename(`Parent [ng/ml]` = Parent) %>%
select(-StudyID, -Metabolite, - Units, - Collection, - `Sample ID`)
n <- nrow(tissue_laser_clean)
mice_ids <- rep(c(1:(n/4)), each = 4)
tissue_laser_clean <- mutate(tissue_laser_clean, MouseID = mice_ids) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(ULU = `uninvolved lung`, RIM = rim,
OCS = `outer caseum`, ICS = `inner caseum`) %>%
mutate(ULU = as.numeric(ULU), RIM = as.numeric(RIM),
OCS = as.numeric(OCS), ICS = as.numeric(ICS))
return(tissue_laser_clean)
}
##### tissue_std_pk_function cleans raw tissue std pk data in Shiny app
tissue_std_pk_function <- function(tissue_std_pk_df){
n <- nrow(tissue_std_pk_df)
mice_ids <- rep(c(1:(n/2)), each = 2)
tissue_std_pk_clean <- tissue_std_pk_df %>%
mutate(mouse_number = mice_ids) %>%
select(Compound, mouse_number, Group, Protocol_Animal, Dosing, Timepoint, Compartment, Parent) %>%
rename(drug = Compound,
`Parent [ng/ml]` = Parent) %>%
spread(key = Compartment, value = `Parent [ng/ml]`) %>%
rename(SLU = Lung,
SLE = Lesion) %>%
mutate(SLU = as.numeric(SLU),
SLE = as.numeric(SLE))
return(tissue_std_pk_clean)
}
###### in_vitro_function cleans raw in_vitro data in Shiny app
in_vitro_function <- function(in_vitro_df){
in_vitro_clean <- in_vitro_df
return(in_vitro_clean)
}
|
library(DrugsInPeds)
setwd('s:/temp')
password <- NULL
user <- NULL
oracleTempSchema <- NULL
dbms <- "postgresql"
server <- "localhost/ohdsi"
cdmDatabaseSchema <- "cdm4_sim"
port <- NULL
cdmVersion <- "4"
user <- "postgres"
password <- Sys.getenv("pwPostgres")
dbms <- "sql server"
server <- "RNDUSRDHIT07.jnj.com"
cdmDatabaseSchema <- "cdm4_sim.dbo"
port <- NULL
cdmVersion <- "4"
dbms <- "sql server"
server <- "RNDUSRDHIT06.jnj.com"
cdmDatabaseSchema <- "cdm_jmdc.dbo"
port <- NULL
cdmVersion <- "4"
dbms <- "pdw"
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "cdm_jmdc_v512.dbo"
port <- 17001
cdmVersion <- "5"
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = password,
port = port)
execute(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cdmVersion = cdmVersion,
folder = "s:/temp/DrugsInPeds")
createFiguresAndTables(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cdmVersion = cdmVersion,
folder = "s:/temp/DrugsInPeds")
#OhdsiSharing::generateKeyPair("s:/temp/public.key","s:/temp/private.key")
#OhdsiSharing::decryptAndDecompressFolder("s:/temp/DrugsInPeds/StudyResults.zip.enc","s:/temp/test","s:/temp/private.key")
|
/DrugsInPeds/extras/TestCode.R
|
no_license
|
jingjingz/StudyProtocols
|
R
| false
| false
| 1,682
|
r
|
library(DrugsInPeds)
setwd('s:/temp')
password <- NULL
user <- NULL
oracleTempSchema <- NULL
dbms <- "postgresql"
server <- "localhost/ohdsi"
cdmDatabaseSchema <- "cdm4_sim"
port <- NULL
cdmVersion <- "4"
user <- "postgres"
password <- Sys.getenv("pwPostgres")
dbms <- "sql server"
server <- "RNDUSRDHIT07.jnj.com"
cdmDatabaseSchema <- "cdm4_sim.dbo"
port <- NULL
cdmVersion <- "4"
dbms <- "sql server"
server <- "RNDUSRDHIT06.jnj.com"
cdmDatabaseSchema <- "cdm_jmdc.dbo"
port <- NULL
cdmVersion <- "4"
dbms <- "pdw"
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "cdm_jmdc_v512.dbo"
port <- 17001
cdmVersion <- "5"
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = password,
port = port)
execute(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cdmVersion = cdmVersion,
folder = "s:/temp/DrugsInPeds")
createFiguresAndTables(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cdmVersion = cdmVersion,
folder = "s:/temp/DrugsInPeds")
#OhdsiSharing::generateKeyPair("s:/temp/public.key","s:/temp/private.key")
#OhdsiSharing::decryptAndDecompressFolder("s:/temp/DrugsInPeds/StudyResults.zip.enc","s:/temp/test","s:/temp/private.key")
|
##' QGIS Algorithm provided by GRASS i.smap (grass7:i.smap)
##'
##' @title QGIS algorithm i.smap
##'
##' @param input `multilayer` - Input rasters. .
##' @param signaturefile `file` - Name of input file containing signatures. Path to a file.
##' @param blocksize `number` - Size of submatrix to process at one time. A numeric value.
##' @param .m `boolean` - Use maximum likelihood estimation (instead of smap). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -m.
##' @param output `rasterDestination` - Classification. Path for new raster layer.
##' @param goodness `rasterDestination` - Goodness_of_fit. Path for new raster layer.
##' @param GRASS_REGION_PARAMETER `extent` - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..
##' @param GRASS_REGION_CELLSIZE_PARAMETER `number` - GRASS GIS 7 region cellsize (leave 0 for default). A numeric value.
##' @param GRASS_RASTER_FORMAT_OPT `string` - Output Rasters format options (createopt). String value.
##' @param GRASS_RASTER_FORMAT_META `string` - Output Rasters format metadata options (metaopt). String value.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * output - outputRaster - Classification
##' * goodness - outputRaster - Goodness_of_fit
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
grass7_i_smap <- function(input = qgisprocess::qgis_default_value(), signaturefile = qgisprocess::qgis_default_value(), blocksize = qgisprocess::qgis_default_value(), .m = qgisprocess::qgis_default_value(), output = qgisprocess::qgis_default_value(), goodness = qgisprocess::qgis_default_value(), GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(), GRASS_REGION_CELLSIZE_PARAMETER = qgisprocess::qgis_default_value(), GRASS_RASTER_FORMAT_OPT = qgisprocess::qgis_default_value(), GRASS_RASTER_FORMAT_META = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("grass7:i.smap")
output <- qgisprocess::qgis_run_algorithm("grass7:i.smap", `input` = input, `signaturefile` = signaturefile, `blocksize` = blocksize, `-m` = .m, `output` = output, `goodness` = goodness, `GRASS_REGION_PARAMETER` = GRASS_REGION_PARAMETER, `GRASS_REGION_CELLSIZE_PARAMETER` = GRASS_REGION_CELLSIZE_PARAMETER, `GRASS_RASTER_FORMAT_OPT` = GRASS_RASTER_FORMAT_OPT, `GRASS_RASTER_FORMAT_META` = GRASS_RASTER_FORMAT_META,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "output")
}
}
|
/R/grass7_i_smap.R
|
permissive
|
VB6Hobbyst7/r_package_qgis
|
R
| false
| false
| 2,872
|
r
|
##' QGIS Algorithm provided by GRASS i.smap (grass7:i.smap)
##'
##' @title QGIS algorithm i.smap
##'
##' @param input `multilayer` - Input rasters. .
##' @param signaturefile `file` - Name of input file containing signatures. Path to a file.
##' @param blocksize `number` - Size of submatrix to process at one time. A numeric value.
##' @param .m `boolean` - Use maximum likelihood estimation (instead of smap). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -m.
##' @param output `rasterDestination` - Classification. Path for new raster layer.
##' @param goodness `rasterDestination` - Goodness_of_fit. Path for new raster layer.
##' @param GRASS_REGION_PARAMETER `extent` - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..
##' @param GRASS_REGION_CELLSIZE_PARAMETER `number` - GRASS GIS 7 region cellsize (leave 0 for default). A numeric value.
##' @param GRASS_RASTER_FORMAT_OPT `string` - Output Rasters format options (createopt). String value.
##' @param GRASS_RASTER_FORMAT_META `string` - Output Rasters format metadata options (metaopt). String value.
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * output - outputRaster - Classification
##' * goodness - outputRaster - Goodness_of_fit
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
grass7_i_smap <- function(input = qgisprocess::qgis_default_value(), signaturefile = qgisprocess::qgis_default_value(), blocksize = qgisprocess::qgis_default_value(), .m = qgisprocess::qgis_default_value(), output = qgisprocess::qgis_default_value(), goodness = qgisprocess::qgis_default_value(), GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(), GRASS_REGION_CELLSIZE_PARAMETER = qgisprocess::qgis_default_value(), GRASS_RASTER_FORMAT_OPT = qgisprocess::qgis_default_value(), GRASS_RASTER_FORMAT_META = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("grass7:i.smap")
output <- qgisprocess::qgis_run_algorithm("grass7:i.smap", `input` = input, `signaturefile` = signaturefile, `blocksize` = blocksize, `-m` = .m, `output` = output, `goodness` = goodness, `GRASS_REGION_PARAMETER` = GRASS_REGION_PARAMETER, `GRASS_REGION_CELLSIZE_PARAMETER` = GRASS_REGION_CELLSIZE_PARAMETER, `GRASS_RASTER_FORMAT_OPT` = GRASS_RASTER_FORMAT_OPT, `GRASS_RASTER_FORMAT_META` = GRASS_RASTER_FORMAT_META,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "output")
}
}
|
#' Write data to a Praat TextGrid file
#'
#' Write data to a TextGrid file into R
#' @param tg TextGrid data, returned from readTextGrid() or createTextGrid() functions
#' @param file a character string of the filename to write to
#' @export
#' @examples
#' Write to the current working directory
#' writeTextGrid(tg_data, "my_tg.TextGrid")
#'
#' Write to specific file path
#' readTextGrid(tg_data, "path/to/textgrids/thisfile.TextGrid")
writeTextGrid <- function(tg, file) {
ctx <- v8()
ctx$source(system.file("js/JSONtoTextGrid.min.js",
package = "phonpack"))
tg_str <- ctx$call("JSONtoTextGrid", tg)
write_lines(tg_str, file)
}
|
/R/writeTextGrid.R
|
no_license
|
fauxneticien/phonpack
|
R
| false
| false
| 670
|
r
|
#' Write data to a Praat TextGrid file
#'
#' Write data to a TextGrid file into R
#' @param tg TextGrid data, returned from readTextGrid() or createTextGrid() functions
#' @param file a character string of the filename to write to
#' @export
#' @examples
#' Write to the current working directory
#' writeTextGrid(tg_data, "my_tg.TextGrid")
#'
#' Write to specific file path
#' readTextGrid(tg_data, "path/to/textgrids/thisfile.TextGrid")
writeTextGrid <- function(tg, file) {
ctx <- v8()
ctx$source(system.file("js/JSONtoTextGrid.min.js",
package = "phonpack"))
tg_str <- ctx$call("JSONtoTextGrid", tg)
write_lines(tg_str, file)
}
|
###################################################
### code chunk number 6: Cs04_model-default
###################################################
kem <- MARSS(dat)
|
/inst/userguide/figures/QE--Cs04_model-default.R
|
permissive
|
nwfsc-timeseries/MARSS
|
R
| false
| false
| 168
|
r
|
###################################################
### code chunk number 6: Cs04_model-default
###################################################
kem <- MARSS(dat)
|
# Identify TF genes and TF families
require(dplyr)
require(stringr)
require(tidyr)
require(ggplot2)
#### Get epigenetic data
epi <- read.csv("~/Projects/fusion_ML/data/gene_identity_data/epigenetic/EpiGenes_main_clean.csv", header=TRUE, stringsAsFactors = FALSE); head(epi)
epi <- epi %>% select(Id, UniProt_ID, Function) %>% filter(Function != "#")
head(epi)
# regroup epi data into fewer categories
epi %>% group_by(Function) %>% summarise(count=n()) %>% arrange(-count) %>% nrow
epi_new <- epi %>%
mutate(is_chromatin_remodelling = ifelse(grepl(x = Function, pattern="Chromatin remodelling"), 1, 0)) %>%
mutate(is_histone_modification = ifelse(grepl(x = Function, pattern="Histone modification"), 1, 0)) %>%
mutate(is_other_epigenetic = ifelse(is_chromatin_remodelling==0 & is_histone_modification==0, 1, 0)) %>%
mutate_each(funs(as.factor))
#### epigenetic complexes
epi_complexes <- read.csv("~/Projects/fusion_ML/data/gene_identity_data/epigenetic/EpiGenes_complexes_clean.csv", header=TRUE); head(epi_complexes)
# break up by comma OR the bar character (have to escape out the bar since it's normally logical OR)
epi_complexes_split <- epi_complexes %>% select (Id, UniProt_ID) %>%
separate(into = c(seq(1:37)), col = UniProt_ID, sep = ",|\\|") %>%
gather(key=complex_member, value=is_present, -Id, na.rm = TRUE) %>%
arrange(Id) %>% rename(UniProt_ID = is_present)
# accessions are all still messed up
# get rid of spaces, question marks, parentheses and plusses
epi_complexes_split$UniProt_ID <- gsub(pattern = "\\s*|\\?|\\(|\\)|\\+", replacement = "", epi_complexes_split$UniProt_ID)
# these IDs are a bit non-strandard, ensembl biomart won't convert
# going to use http://www.uniprot.org/uploadlists/
# which IDs need mapping
unmapped_IDs <- rbind(epi_new %>% select(UniProt_ID),
epi_complexes_split %>% select(UniProt_ID)) %>% distinct
write.table(unmapped_IDs, "~/Projects/fusion_ML/data/gene_identity_data/epigenetic/uniprot_IDs_to_map.csv", quote = FALSE, row.names = FALSE, col.names = FALSE)
# got the conversions finally
ensg_uniprot_species <- read.table("~/Projects/fusion_ML/data/gene_identity_data/epigenetic/uniprot_species_ensg_mapping", sep="\t", header=TRUE, stringsAsFactors = FALSE); head(ensg_uniprot_species)
# get the epigenetic gene table out
epi_ensg <- inner_join(ensg_uniprot_species, epi_new, by="UniProt_ID"); head(epi_ensg)
write.table(epi_ensg %>% select(ensg, chromatin_remodelling:other_epigenetic) %>% distinct,
"~/Projects/fusion_ML/features/epigenetic_gene.csv", sep=",", quote = FALSE, row.names = FALSE)
# get the epigentic complex table out
epi_complexes_ensg <- inner_join(ensg_uniprot_species, epi_complexes_split, by="UniProt_ID")
write.table(epi_complexes_ensg %>% select(ensg) %>% mutate(is_epigenetic_complex = 1) %>% distinct,
"~/Projects/fusion_ML/features/epigenetic_complexes.csv", sep=",", quote = FALSE, row.names = FALSE)
|
/identify_epigenetic_genes.R
|
no_license
|
wangdi2014/cancer_ML
|
R
| false
| false
| 2,961
|
r
|
# Identify TF genes and TF families
require(dplyr)
require(stringr)
require(tidyr)
require(ggplot2)
#### Get epigenetic data
epi <- read.csv("~/Projects/fusion_ML/data/gene_identity_data/epigenetic/EpiGenes_main_clean.csv", header=TRUE, stringsAsFactors = FALSE); head(epi)
epi <- epi %>% select(Id, UniProt_ID, Function) %>% filter(Function != "#")
head(epi)
# regroup epi data into fewer categories
epi %>% group_by(Function) %>% summarise(count=n()) %>% arrange(-count) %>% nrow
epi_new <- epi %>%
mutate(is_chromatin_remodelling = ifelse(grepl(x = Function, pattern="Chromatin remodelling"), 1, 0)) %>%
mutate(is_histone_modification = ifelse(grepl(x = Function, pattern="Histone modification"), 1, 0)) %>%
mutate(is_other_epigenetic = ifelse(is_chromatin_remodelling==0 & is_histone_modification==0, 1, 0)) %>%
mutate_each(funs(as.factor))
#### epigenetic complexes
epi_complexes <- read.csv("~/Projects/fusion_ML/data/gene_identity_data/epigenetic/EpiGenes_complexes_clean.csv", header=TRUE); head(epi_complexes)
# break up by comma OR the bar character (have to escape out the bar since it's normally logical OR)
epi_complexes_split <- epi_complexes %>% select (Id, UniProt_ID) %>%
separate(into = c(seq(1:37)), col = UniProt_ID, sep = ",|\\|") %>%
gather(key=complex_member, value=is_present, -Id, na.rm = TRUE) %>%
arrange(Id) %>% rename(UniProt_ID = is_present)
# accessions are all still messed up
# get rid of spaces, question marks, parentheses and plusses
epi_complexes_split$UniProt_ID <- gsub(pattern = "\\s*|\\?|\\(|\\)|\\+", replacement = "", epi_complexes_split$UniProt_ID)
# these IDs are a bit non-strandard, ensembl biomart won't convert
# going to use http://www.uniprot.org/uploadlists/
# which IDs need mapping
unmapped_IDs <- rbind(epi_new %>% select(UniProt_ID),
epi_complexes_split %>% select(UniProt_ID)) %>% distinct
write.table(unmapped_IDs, "~/Projects/fusion_ML/data/gene_identity_data/epigenetic/uniprot_IDs_to_map.csv", quote = FALSE, row.names = FALSE, col.names = FALSE)
# got the conversions finally
ensg_uniprot_species <- read.table("~/Projects/fusion_ML/data/gene_identity_data/epigenetic/uniprot_species_ensg_mapping", sep="\t", header=TRUE, stringsAsFactors = FALSE); head(ensg_uniprot_species)
# get the epigenetic gene table out
epi_ensg <- inner_join(ensg_uniprot_species, epi_new, by="UniProt_ID"); head(epi_ensg)
write.table(epi_ensg %>% select(ensg, chromatin_remodelling:other_epigenetic) %>% distinct,
"~/Projects/fusion_ML/features/epigenetic_gene.csv", sep=",", quote = FALSE, row.names = FALSE)
# get the epigentic complex table out
epi_complexes_ensg <- inner_join(ensg_uniprot_species, epi_complexes_split, by="UniProt_ID")
write.table(epi_complexes_ensg %>% select(ensg) %>% mutate(is_epigenetic_complex = 1) %>% distinct,
"~/Projects/fusion_ML/features/epigenetic_complexes.csv", sep=",", quote = FALSE, row.names = FALSE)
|
library(RUnit)
library(SttrDataPackage)
library(DEMOdz)
options(stringsAsFactors=FALSE)
#----------------------------------------------------------------------------------------------------
runTests <- function()
{
testConstructor();
test.loadFiles()
test.getPatientList()
test.getPatientTable()
test.getGeneSets()
} # runTests
#----------------------------------------------------------------------------------------------------
testConstructor <- function()
{
print("--- testConstructor")
dp <- SttrDataPackage();
checkEquals(length(matrices(dp)), 0)
checkEquals(nrow(manifest(dp)), 0)
checkEquals(class(history(dp))[1], "PatientHistoryClass")
checkEquals(eventCount(history(dp)), 0)
} # testConstructor
#----------------------------------------------------------------------------------------------------
test.loadFiles <- function()
{
print("--- test.loadFiles")
dir <- system.file(package="DEMOdz", "extdata")
checkTrue(file.exists(dir))
checkTrue(file.exists(file.path(dir, "manifest.tsv")))
x <- SttrDataPackage:::.loadFiles(dir)
# check some gross features. some knowledge of DEMOdz's actual data is used
expected.names <- c("data.frames", "genesets", "history", "manifest", "matrices", "networks")
checkTrue(all(expected.names %in% names(x)))
checkEquals(ncol(x$manifest), 11)
checkTrue(nrow(x$manifest) >= 10)
# more will likely be added over time
expected.variables <- c("mtx.mrna.ueArray", "mtx.mrna.bc", "mtx.mut", "mtx.cn", "history",
"tbl.ptHistory", "mtx.prot", "mtx.meth", "genesets", "g.markers.json")
checkTrue(all(expected.variables %in% x$manifest$variable))
expected.classes <- c("data.frame", "list", "matrix", "character")
checkTrue(all(x$manifest$class %in% expected.classes))
checkTrue(length(x$matrices) > 4)
checkTrue(length(x$data.frames) > 0)
checkTrue(eventCount(x$history) > 100);
checkTrue(length(x$genesets) > 1)
# now spot check some finer details. first, the expression matrix
checkEquals(x$manifest$variable[1], "mtx.mrna.ueArray")
checkEquals(head(rownames(x$matrices[[1]]), n=3), c("TCGA.02.0014", "TCGA.02.0021", "TCGA.02.0028"))
checkEquals(head(colnames(x$matrices[[1]]), n=3), c("EDIL3", "EED", "EEF2"))
checkEqualsNumeric(x$manifest$minValue[1], min(x$matrices[[1]]), tolerance=1e-6)
checkEqualsNumeric(x$manifest$maxValue[1], max(x$matrices[[1]]), tolerance=1e-6)
# now the patient history, a list of 111 events
checkTrue("history" %in% x$manifest$variable)
# an arbitrary choice of history event 12
checkEquals(geteventList(x$history)[[12]], list(PatientID="TCGA.06.0201", PtNum=369, study="TCGAgbm", Name="Birth", Fields=list(date="12/11/1943", gender="female", race="white", ethnicity="not hispanic or latino")))
} # test.loadFiles
#----------------------------------------------------------------------------------------------------
test.loadTables <- function()
{
print("--- test.loadFiles")
dir <- system.file(package="iDEMOdz", "extdata")
checkTrue(file.exists(dir))
checkTrue(file.exists(file.path(dir, "manifest.tsv")))
credentials <- list(user="oncotest", password="password")
stopifnot(all(c("user", "password") %in% names(credentials)))
db <- odbcConnect("iDEMOdz", uid = credentials$user, pwd = credentials$password)
x <- SttrDataPackage:::.loadTables(dir, db)
# check some gross features. some knowledge of DEMOdz's actual data is used
checkEquals(sort(names(x)),
c("data.frames", "genesets", "history", "manifest", "matrices", "networks"))
checkEquals(dim(x$manifest), c(10,11))
checkEquals(x$manifest$variable,
c("mtx.mrna.ueArray", "mtx.mrna.bc", "mtx.mut", "mtx.cn", "history", "ptList", "catList",
"tbl.ptHistory", "mtx.prot", "mtx.meth", "genesets", "g.markers.json"))
checkEquals(x$manifest$class,
c("matrix", "matrix", "matrix", "matrix", "list", "list", "list", "data.frame", "matrix",
"matrix", "list", "character"))
checkEquals(length(x$matrices), 6)
checkEquals(length(x$data.frames), 1)
checkEquals(eventCount(x$history), 201)
checkEquals(length(x$genesets), 3)
# now spot check some finer details. first, the expression matrix
checkEquals(x$manifest$variable[1], "mtx.mrna.ueArray")
checkEquals(head(rownames(x$matrices[[1]]), n=3), c("TCGA.02.0014", "TCGA.02.0021", "TCGA.02.0028"))
checkEquals(head(colnames(x$matrices[[1]]), n=3), c("EDIL3", "EED", "EEF2"))
checkEqualsNumeric(x$manifest$minValue[1], min(x$matrices[[1]]), tolerance=1e-6)
checkEqualsNumeric(x$manifest$maxValue[1], max(x$matrices[[1]]), tolerance=1e-6)
# now the patient history, a list of 111 events
checkTrue("history" %in% x$manifest$variable)
# an arbitrary choice of history event 12
checkEquals(geteventList(x$history)[[12]], list(PatientID="TCGA.06.0201", PtNum=369, study="TCGAgbm", Name="Birth", Fields=list(date="12/11/1943", gender="female", race="white", ethnicity="not hispanic or latino")))
}
#----------------------------------------------------------------------------------------------------
test.getPatientList <- function()
{
print("--- test.getPatientList")
dp <- DEMOdz();
x <- getPatientList(dp)
checkEquals(class(x), "list")
checkEquals(length(x), 20)
dateEvents = x[[12]]$dateEvents; rownames(dateEvents) = 1:4
print(dateEvents)
checkEquals(dateEvents$name,c("Birth", "Diagnosis", "Pathology", "Status"))
checkEquals(dateEvents$date,as.Date(c("1943-12-11", "1995-01-01", "1995-01-01", "1995-01-13"), format="%Y-%m-%d"))
checkEquals(dateEvents$eventOrder,c("single","single","single","single"))
checkEquals(dateEvents$eventID,c("event12", "event71", "event182", "event93"))
checkEquals(x[[12]]$noDateEvents,data.frame(name=c("Encounter", "Encounter"), eventID=c("event151", "event152")))
} # test.getPatientList
#----------------------------------------------------------------------------------------------------
test.getPatientTable <- function()
{
print("--- test.getPatientTable")
dp <- DEMOdz();
x <- getPatientTable(dp)
checkEquals(class(x), "data.frame")
checkEquals(nrow(x), 20)
} # test.getPatientTable
#----------------------------------------------------------------------------------------------------
test.getGeneSets <- function()
{
print("--- test.getGeneSets")
dz <- DEMOdz();
names <- getGeneSetNames(dz)
checkEquals(sort(names), c("random.24", "random.40", "test4"))
genes.24 <- getGeneSetGenes(dz, "random.24")
} # test.getPatientTable
#----------------------------------------------------------------------------------------------------
if(!interactive())
runTests()
|
/r_modules/dataPackages/SttrDataPackage/inst/unitTests/test_SttrDataPackage.R
|
permissive
|
pablopunk/Oncoscape
|
R
| false
| false
| 6,788
|
r
|
library(RUnit)
library(SttrDataPackage)
library(DEMOdz)
options(stringsAsFactors=FALSE)
#----------------------------------------------------------------------------------------------------
runTests <- function()
{
testConstructor();
test.loadFiles()
test.getPatientList()
test.getPatientTable()
test.getGeneSets()
} # runTests
#----------------------------------------------------------------------------------------------------
testConstructor <- function()
{
print("--- testConstructor")
dp <- SttrDataPackage();
checkEquals(length(matrices(dp)), 0)
checkEquals(nrow(manifest(dp)), 0)
checkEquals(class(history(dp))[1], "PatientHistoryClass")
checkEquals(eventCount(history(dp)), 0)
} # testConstructor
#----------------------------------------------------------------------------------------------------
test.loadFiles <- function()
{
print("--- test.loadFiles")
dir <- system.file(package="DEMOdz", "extdata")
checkTrue(file.exists(dir))
checkTrue(file.exists(file.path(dir, "manifest.tsv")))
x <- SttrDataPackage:::.loadFiles(dir)
# check some gross features. some knowledge of DEMOdz's actual data is used
expected.names <- c("data.frames", "genesets", "history", "manifest", "matrices", "networks")
checkTrue(all(expected.names %in% names(x)))
checkEquals(ncol(x$manifest), 11)
checkTrue(nrow(x$manifest) >= 10)
# more will likely be added over time
expected.variables <- c("mtx.mrna.ueArray", "mtx.mrna.bc", "mtx.mut", "mtx.cn", "history",
"tbl.ptHistory", "mtx.prot", "mtx.meth", "genesets", "g.markers.json")
checkTrue(all(expected.variables %in% x$manifest$variable))
expected.classes <- c("data.frame", "list", "matrix", "character")
checkTrue(all(x$manifest$class %in% expected.classes))
checkTrue(length(x$matrices) > 4)
checkTrue(length(x$data.frames) > 0)
checkTrue(eventCount(x$history) > 100);
checkTrue(length(x$genesets) > 1)
# now spot check some finer details. first, the expression matrix
checkEquals(x$manifest$variable[1], "mtx.mrna.ueArray")
checkEquals(head(rownames(x$matrices[[1]]), n=3), c("TCGA.02.0014", "TCGA.02.0021", "TCGA.02.0028"))
checkEquals(head(colnames(x$matrices[[1]]), n=3), c("EDIL3", "EED", "EEF2"))
checkEqualsNumeric(x$manifest$minValue[1], min(x$matrices[[1]]), tolerance=1e-6)
checkEqualsNumeric(x$manifest$maxValue[1], max(x$matrices[[1]]), tolerance=1e-6)
# now the patient history, a list of 111 events
checkTrue("history" %in% x$manifest$variable)
# an arbitrary choice of history event 12
checkEquals(geteventList(x$history)[[12]], list(PatientID="TCGA.06.0201", PtNum=369, study="TCGAgbm", Name="Birth", Fields=list(date="12/11/1943", gender="female", race="white", ethnicity="not hispanic or latino")))
} # test.loadFiles
#----------------------------------------------------------------------------------------------------
test.loadTables <- function()
{
print("--- test.loadFiles")
dir <- system.file(package="iDEMOdz", "extdata")
checkTrue(file.exists(dir))
checkTrue(file.exists(file.path(dir, "manifest.tsv")))
credentials <- list(user="oncotest", password="password")
stopifnot(all(c("user", "password") %in% names(credentials)))
db <- odbcConnect("iDEMOdz", uid = credentials$user, pwd = credentials$password)
x <- SttrDataPackage:::.loadTables(dir, db)
# check some gross features. some knowledge of DEMOdz's actual data is used
checkEquals(sort(names(x)),
c("data.frames", "genesets", "history", "manifest", "matrices", "networks"))
checkEquals(dim(x$manifest), c(10,11))
checkEquals(x$manifest$variable,
c("mtx.mrna.ueArray", "mtx.mrna.bc", "mtx.mut", "mtx.cn", "history", "ptList", "catList",
"tbl.ptHistory", "mtx.prot", "mtx.meth", "genesets", "g.markers.json"))
checkEquals(x$manifest$class,
c("matrix", "matrix", "matrix", "matrix", "list", "list", "list", "data.frame", "matrix",
"matrix", "list", "character"))
checkEquals(length(x$matrices), 6)
checkEquals(length(x$data.frames), 1)
checkEquals(eventCount(x$history), 201)
checkEquals(length(x$genesets), 3)
# now spot check some finer details. first, the expression matrix
checkEquals(x$manifest$variable[1], "mtx.mrna.ueArray")
checkEquals(head(rownames(x$matrices[[1]]), n=3), c("TCGA.02.0014", "TCGA.02.0021", "TCGA.02.0028"))
checkEquals(head(colnames(x$matrices[[1]]), n=3), c("EDIL3", "EED", "EEF2"))
checkEqualsNumeric(x$manifest$minValue[1], min(x$matrices[[1]]), tolerance=1e-6)
checkEqualsNumeric(x$manifest$maxValue[1], max(x$matrices[[1]]), tolerance=1e-6)
# now the patient history, a list of 111 events
checkTrue("history" %in% x$manifest$variable)
# an arbitrary choice of history event 12
checkEquals(geteventList(x$history)[[12]], list(PatientID="TCGA.06.0201", PtNum=369, study="TCGAgbm", Name="Birth", Fields=list(date="12/11/1943", gender="female", race="white", ethnicity="not hispanic or latino")))
}
#----------------------------------------------------------------------------------------------------
test.getPatientList <- function()
{
print("--- test.getPatientList")
dp <- DEMOdz();
x <- getPatientList(dp)
checkEquals(class(x), "list")
checkEquals(length(x), 20)
dateEvents = x[[12]]$dateEvents; rownames(dateEvents) = 1:4
print(dateEvents)
checkEquals(dateEvents$name,c("Birth", "Diagnosis", "Pathology", "Status"))
checkEquals(dateEvents$date,as.Date(c("1943-12-11", "1995-01-01", "1995-01-01", "1995-01-13"), format="%Y-%m-%d"))
checkEquals(dateEvents$eventOrder,c("single","single","single","single"))
checkEquals(dateEvents$eventID,c("event12", "event71", "event182", "event93"))
checkEquals(x[[12]]$noDateEvents,data.frame(name=c("Encounter", "Encounter"), eventID=c("event151", "event152")))
} # test.getPatientList
#----------------------------------------------------------------------------------------------------
test.getPatientTable <- function()
{
print("--- test.getPatientTable")
dp <- DEMOdz();
x <- getPatientTable(dp)
checkEquals(class(x), "data.frame")
checkEquals(nrow(x), 20)
} # test.getPatientTable
#----------------------------------------------------------------------------------------------------
test.getGeneSets <- function()
{
print("--- test.getGeneSets")
dz <- DEMOdz();
names <- getGeneSetNames(dz)
checkEquals(sort(names), c("random.24", "random.40", "test4"))
genes.24 <- getGeneSetGenes(dz, "random.24")
} # test.getPatientTable
#----------------------------------------------------------------------------------------------------
if(!interactive())
runTests()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSLangmuir2analysis.R
\name{SSLangmuir2analysis}
\alias{SSLangmuir2analysis}
\title{Langmuir Isotherm Nonlinear Analysis via selfStart and Langmuir Second Linear Model}
\usage{
SSLangmuir2analysis(Ce, Qe)
}
\arguments{
\item{Ce}{the numerical value for the equilibrium capacity}
\item{Qe}{the numerical value for the adsorbed capacity}
}
\value{
the nonlinear regression via selfStart, initial starting values for parameters
based on Langmuir second linear model, predicted parameter values, and
model error analysis
}
\description{
The Langmuir isotherm is described to be the most useful and
simplest isotherm for both chemical adsorption and physical adsorption. It
assumes that there is uniform adsorption energy onto the monolayer surface
and that there would be no interaction between the adsorbate and the surface.
}
\examples{
Ce <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)
Qe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)
SSLangmuir2analysis(Ce,Qe)
}
\references{
Langmuir, I. (1918) <doi:10.1021/ja01269a066> The adsorption of
gases on plane surfaces of glass, mics and platinum. Journal of the American
Chemical Society, 1361-1403.
}
\author{
Paul Angelo C. Manlapaz
Chester C. Deocaris
}
|
/man/SSLangmuir2analysis.Rd
|
no_license
|
cran/PUPAIM
|
R
| false
| true
| 1,362
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSLangmuir2analysis.R
\name{SSLangmuir2analysis}
\alias{SSLangmuir2analysis}
\title{Langmuir Isotherm Nonlinear Analysis via selfStart and Langmuir Second Linear Model}
\usage{
SSLangmuir2analysis(Ce, Qe)
}
\arguments{
\item{Ce}{the numerical value for the equilibrium capacity}
\item{Qe}{the numerical value for the adsorbed capacity}
}
\value{
the nonlinear regression via selfStart, initial starting values for parameters
based on Langmuir second linear model, predicted parameter values, and
model error analysis
}
\description{
The Langmuir isotherm is described to be the most useful and
simplest isotherm for both chemical adsorption and physical adsorption. It
assumes that there is uniform adsorption energy onto the monolayer surface
and that there would be no interaction between the adsorbate and the surface.
}
\examples{
Ce <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)
Qe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)
SSLangmuir2analysis(Ce,Qe)
}
\references{
Langmuir, I. (1918) <doi:10.1021/ja01269a066> The adsorption of
gases on plane surfaces of glass, mics and platinum. Journal of the American
Chemical Society, 1361-1403.
}
\author{
Paul Angelo C. Manlapaz
Chester C. Deocaris
}
|
household_power <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec=".")
household_power <- household_power[household_power$Date %in% c("1/2/2007","2/2/2007") ,]
Global_active_power <- as.numeric(household_power$Global_active_power)
datetime <- strptime(paste(household_power$Date, household_power$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width=480, height=480)
plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
nickfranciose/ExData_Plotting1
|
R
| false
| false
| 534
|
r
|
household_power <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec=".")
household_power <- household_power[household_power$Date %in% c("1/2/2007","2/2/2007") ,]
Global_active_power <- as.numeric(household_power$Global_active_power)
datetime <- strptime(paste(household_power$Date, household_power$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width=480, height=480)
plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
## These functions enable the caching of a matrix's
## inverse to prevent costly recalculation. It assumes
## that the matrices passed in are invertible so no
## error-checking is done.
## makeCacheMatrix - create a matrix object that is
## able to cache its inverse.
## It has getter and setter methods for the matrix and
## the inverse.
## Input : matrix 'x'
## Output : cache matrix
makeCacheMatrix <- function(x = matrix()) {
#initialise the inverse to be NULL so that we can
#check if we have calculate it or not.
inv <- NULL
#set the matrix and reset the inverse to be NULL
#since the matrix may have changed
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(solve) inv <<- solve
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function uses a cached matrix object created by
## the above function to get the inverse of a matrix,
## either by calculating it or retrieving it from a cached
## value if it has already been calculated.
## Input : cache matrix 'x'
## Output : inverse of x (plus will cache value for later access)
cacheSolve <- function(x, ...) {
inv <- x$getInv()
#if inv is not NULL, we are using the cachedvalue
if (!is.null(inv)) {
message("Retrieving cached data.")
return(inv)
}
#if inv is NULL, the inverse has not been calculated
#so should be (using 'solve') and then stored in the
#makeCacheMatrix object. Finally we return the inverse.
matrix <- x$get()
inv <- solve(matrix)
x$setInv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
Eureka25/ProgrammingAssignment2
|
R
| false
| false
| 1,642
|
r
|
## These functions enable the caching of a matrix's
## inverse to prevent costly recalculation. It assumes
## that the matrices passed in are invertible so no
## error-checking is done.
## makeCacheMatrix - create a matrix object that is
## able to cache its inverse.
## It has getter and setter methods for the matrix and
## the inverse.
## Input : matrix 'x'
## Output : cache matrix
makeCacheMatrix <- function(x = matrix()) {
#initialise the inverse to be NULL so that we can
#check if we have calculate it or not.
inv <- NULL
#set the matrix and reset the inverse to be NULL
#since the matrix may have changed
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(solve) inv <<- solve
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function uses a cached matrix object created by
## the above function to get the inverse of a matrix,
## either by calculating it or retrieving it from a cached
## value if it has already been calculated.
## Input : cache matrix 'x'
## Output : inverse of x (plus will cache value for later access)
cacheSolve <- function(x, ...) {
inv <- x$getInv()
#if inv is not NULL, we are using the cachedvalue
if (!is.null(inv)) {
message("Retrieving cached data.")
return(inv)
}
#if inv is NULL, the inverse has not been calculated
#so should be (using 'solve') and then stored in the
#makeCacheMatrix object. Finally we return the inverse.
matrix <- x$get()
inv <- solve(matrix)
x$setInv(inv)
inv
}
|
testlist <- list(lims = structure(c(1.24163114000614e-77, 4.77948086237667e-299, 7.09454157885966e-304, 1.3906712444562e-309, 2.40380942855972e-265, 7.2911185379979e-304, 5.59680502928481e-275, 7.29112201963189e-304, 5.60525372017014e-228, 1.40825002853428e+135, 5.48612408923109e+303, 1.12126408607488e-308), .Dim = c(2L, 6L)), points = structure(c(NaN, NaN, 9.73486789063095e-309, 1.63016845484697e+68), .Dim = c(2L, 2L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987573-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 491
|
r
|
testlist <- list(lims = structure(c(1.24163114000614e-77, 4.77948086237667e-299, 7.09454157885966e-304, 1.3906712444562e-309, 2.40380942855972e-265, 7.2911185379979e-304, 5.59680502928481e-275, 7.29112201963189e-304, 5.60525372017014e-228, 1.40825002853428e+135, 5.48612408923109e+303, 1.12126408607488e-308), .Dim = c(2L, 6L)), points = structure(c(NaN, NaN, 9.73486789063095e-309, 1.63016845484697e+68), .Dim = c(2L, 2L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
testlist <- list(a = 458342399L, b = -1L, x = -1L)
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055480-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 111
|
r
|
testlist <- list(a = 458342399L, b = -1L, x = -1L)
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
#install packages
install.packages('rpart')
install.packages('rattle')
install.packages('RColorBrewer')
library(rattle)
library(rpart.plot)
library(RColorBrewer)
## read .csv files
test <- read.csv("test.csv", stringsAsFactors = FALSE)
train <- read.csv("train.csv", stringsAsFactors = FALSE)
##create "Survived" column for test dataset with 0 in each rows
test$Survived <- rep(0,418)
submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived)
##using train dataset to run the % of survival rate in different varible combination
fit <- rpart(Survived ~ Pclass +
Sex + Age +
SibSp + Parch +
Fare + Embarked,
data=train, method="class")
fancyRpartPlot(fit)
##apply the % of survival rate on test data
prediction <- predict(fit, test,type = "class")
submit <- data.frame(PassengerId = test$PassengerId, Survived = prediction)
##save the table as new .csv file
write.csv(submit, file = "first_decision_tree_outcome.csv", row.names = FALSE)
|
/titanic1.R
|
no_license
|
cwang0129/datasciencecoursera
|
R
| false
| false
| 1,028
|
r
|
#install packages
install.packages('rpart')
install.packages('rattle')
install.packages('RColorBrewer')
library(rattle)
library(rpart.plot)
library(RColorBrewer)
## read .csv files
test <- read.csv("test.csv", stringsAsFactors = FALSE)
train <- read.csv("train.csv", stringsAsFactors = FALSE)
##create "Survived" column for test dataset with 0 in each rows
test$Survived <- rep(0,418)
submit <- data.frame(PassengerId = test$PassengerId, Survived = test$Survived)
##using train dataset to run the % of survival rate in different varible combination
fit <- rpart(Survived ~ Pclass +
Sex + Age +
SibSp + Parch +
Fare + Embarked,
data=train, method="class")
fancyRpartPlot(fit)
##apply the % of survival rate on test data
prediction <- predict(fit, test,type = "class")
submit <- data.frame(PassengerId = test$PassengerId, Survived = prediction)
##save the table as new .csv file
write.csv(submit, file = "first_decision_tree_outcome.csv", row.names = FALSE)
|
#STEP FOR CREATING A R PACKAGE ON THE FLY
#SOURCE: https://hilaryparker.com/2014/04/29/writing-an-r-package-from-scratch/
# 1. INSTALL DEVTOOLS AND LOAD IT IN ORDER TO BE ABLE TO DOWNLOAD PACKAGES FROM GITHUB
#install.packages("devtools")
library(devtools)
# 2. INSTALL ROXYGEN FROM REPO AND LOAD IT
#install.packages("roxygen2")
library(roxygen2)
# 3. CREATE A DIRECTORY FOR THE PACKAGE
setwd("somelocation/Desktop")
create("packagename")
# 4. CREATE AN .R SCRIPT WITH THE FUNCTION THAT YOU DESIRE TO MAKE INTO A PACKAGE
# 5. ADD DOCUMENTATION AT THE TOP OF THE .R SCRIPT AS FOLLOWS:
#'A title for the package
#'
#'Some comment about the package and what it does
#'
#'@param ParamA Comment about ParamA
#'@param ParamB Comment about ParamB
#'@keywords Keyword about the function
#'@export Not sure what this is
#'@examples
#'examples to make it clear to the user on how to use the parameters vis-a-vis the function with a toy example
# 6. PLACE THE .R SCRIPT IN THE FOLDER THAT WAS JUST CREATED INTO THE FOLDER CALLED "R"
# 7. SETWD TO THE PACKAGE FOLDER AND USE DOCUMENT() TO CREATE DOCUMENTS NECESSARY FOR THE PACKAGE
setwd("somelocation/Desktop/GitHub/packagename") #points to the parent location where the function is located. If this is a change in an existing function, this should point to the GIT clone folder
document() #creates/edits the .Rd function inside the "man" folder. Do this every time there is a change in the function or the package will not update when it is uploaded to GIT.
# 8. TEST THE PACKAGE LOCALLY - LOCATE THE DIRECTORY WHERE THE PACKAGE FOLDER LIVES - INSTALL
setwd("..")
install("packagename")
# 9. UPLOAD TO GIT USING THE RSTUDIO (This is only if it is a new package you built locally)
# Depending on the situation, refer to chapter 16, 17 or 18 here: http://happygitwithr.com
|
/pkgsteps.R
|
no_license
|
faffr/pkgsteps
|
R
| false
| false
| 1,879
|
r
|
#STEP FOR CREATING A R PACKAGE ON THE FLY
#SOURCE: https://hilaryparker.com/2014/04/29/writing-an-r-package-from-scratch/
# 1. INSTALL DEVTOOLS AND LOAD IT IN ORDER TO BE ABLE TO DOWNLOAD PACKAGES FROM GITHUB
#install.packages("devtools")
library(devtools)
# 2. INSTALL ROXYGEN FROM REPO AND LOAD IT
#install.packages("roxygen2")
library(roxygen2)
# 3. CREATE A DIRECTORY FOR THE PACKAGE
setwd("somelocation/Desktop")
create("packagename")
# 4. CREATE AN .R SCRIPT WITH THE FUNCTION THAT YOU DESIRE TO MAKE INTO A PACKAGE
# 5. ADD DOCUMENTATION AT THE TOP OF THE .R SCRIPT AS FOLLOWS:
#'A title for the package
#'
#'Some comment about the package and what it does
#'
#'@param ParamA Comment about ParamA
#'@param ParamB Comment about ParamB
#'@keywords Keyword about the function
#'@export Not sure what this is
#'@examples
#'examples to make it clear to the user on how to use the parameters vis-a-vis the function with a toy example
# 6. PLACE THE .R SCRIPT IN THE FOLDER THAT WAS JUST CREATED INTO THE FOLDER CALLED "R"
# 7. SETWD TO THE PACKAGE FOLDER AND USE DOCUMENT() TO CREATE DOCUMENTS NECESSARY FOR THE PACKAGE
setwd("somelocation/Desktop/GitHub/packagename") #points to the parent location where the function is located. If this is a change in an existing function, this should point to the GIT clone folder
document() #creates/edits the .Rd function inside the "man" folder. Do this every time there is a change in the function or the package will not update when it is uploaded to GIT.
# 8. TEST THE PACKAGE LOCALLY - LOCATE THE DIRECTORY WHERE THE PACKAGE FOLDER LIVES - INSTALL
setwd("..")
install("packagename")
# 9. UPLOAD TO GIT USING THE RSTUDIO (This is only if it is a new package you built locally)
# Depending on the situation, refer to chapter 16, 17 or 18 here: http://happygitwithr.com
|
###############################
### ###
### WIFI LOCATION ###
### ###
### ###
### ###
###############################
library("tidyr")
library("dplyr")
library("taRifx")
library("lubridate")
library("rpart")
library("caret")
WD <- "/Users/sediaz/Documents/Ubiqum/Curso online Primavera 2017/R/Course3Task3"
setwd(WD)
source("Functions.R")
WapsDatasetCut <- read.csv("data/OnlyWapCut.csv", sep=",", header=TRUE, stringsAsFactors=FALSE)
WapsDataset <- read.csv("data/OnlyWap.csv", sep=",", header=TRUE, stringsAsFactors=FALSE)
set.seed(123)
OnlyWap <- WapsDataset[,1:520]
OnlyWapCut <- WapsDatasetCut[,1:520]
Normalized_Waps <- t(apply(OnlyWap, 1, function(x)(x-min(x))/(max(x)-min(x))))
Normalized_WapsCut <- t(apply(OnlyWapCut, 1, function(x)(x-min(x))/(max(x)-min(x))))
WapsDataset[,1:520] <- Normalized_Waps
WapsDatasetCut[,1:520] <- Normalized_WapsCut
write.csv(WapsDataset,file="data/OnlyWapNorm.csv",row.names=FALSE)
write.csv(WapsDatasetCut,file="data/OnlyWapNormCut.csv",row.names=FALSE)
|
/NormalizeDataset.R
|
no_license
|
MachinelearningBcn/UJILocation
|
R
| false
| false
| 1,111
|
r
|
###############################
### ###
### WIFI LOCATION ###
### ###
### ###
### ###
###############################
library("tidyr")
library("dplyr")
library("taRifx")
library("lubridate")
library("rpart")
library("caret")
WD <- "/Users/sediaz/Documents/Ubiqum/Curso online Primavera 2017/R/Course3Task3"
setwd(WD)
source("Functions.R")
WapsDatasetCut <- read.csv("data/OnlyWapCut.csv", sep=",", header=TRUE, stringsAsFactors=FALSE)
WapsDataset <- read.csv("data/OnlyWap.csv", sep=",", header=TRUE, stringsAsFactors=FALSE)
set.seed(123)
OnlyWap <- WapsDataset[,1:520]
OnlyWapCut <- WapsDatasetCut[,1:520]
Normalized_Waps <- t(apply(OnlyWap, 1, function(x)(x-min(x))/(max(x)-min(x))))
Normalized_WapsCut <- t(apply(OnlyWapCut, 1, function(x)(x-min(x))/(max(x)-min(x))))
WapsDataset[,1:520] <- Normalized_Waps
WapsDatasetCut[,1:520] <- Normalized_WapsCut
write.csv(WapsDataset,file="data/OnlyWapNorm.csv",row.names=FALSE)
write.csv(WapsDatasetCut,file="data/OnlyWapNormCut.csv",row.names=FALSE)
|
library(tidyverse)
NRI_data_2015 <- read_csv2("Hauptseminararbeit/Data/Raw/NRI/NRI_SH3.csv") %>%
drop_na()
EC_data_2015_raw <- read_csv2("Hauptseminararbeit/Data/Raw/UNSD_Data/07_2020_UNSD__production_trade_energy_supply.csv")%>%
janitor::clean_names('snake') %>%
separate(number_region_country_area_year_series_value_footnotes_source, c("number", "country", "year", "series",
"value", "value2", "footnotes", "source"), sep = ",") %>%
unite(value, value2, col = "value", sep = ",", , remove = TRUE, na.rm = FALSE) %>%
select(number, country, year, value, series) %>%
filter(year == 2015)
write.csv(EC_data_2015_raw, "Hauptseminararbeit/Data/Raw/EC/EC_2015_data_raw.csv")
#hier war Zwischenschritt in Excel
EC_data_2015 <- read_csv2("Hauptseminararbeit/Data/Raw/EC/EC_2015_data_raw.csv") %>%
filter(number >= 31) %>%
select( -number, -year) %>%
pivot_wider(names_from = series, values_from = value) %>%
janitor::clean_names('snake') %>%
rename( total_supply = total_supply_petajoules, supply_per_capita = supply_per_capita_gigajoules) %>%
select(country, total_supply, supply_per_capita)
write.csv(EC_data_2015, "Hauptseminararbeit/Data/Wrangled/SH3/EC_2015_data.csv")
NRI_EC_merge <- left_join(NRI_data_2015, EC_data_2015)
write.csv(NRI_EC_merge, "Hauptseminararbeit/Data/Wrangled/SH3/NRI_EC_merge.csv")
#total_supply
NRI_EC_scatterplot_1 <- ggplot(data = NRI_EC_merge) +
geom_point(aes(x = NRI_value, y = total_supply))
NRI_EC_scatterplot_1 #--> mit Skalierung spielen
NRI_EC_scatterplot_2 <- ggplot(data = NRI_EC_merge, aes(x = NRI_value, y = total_supply)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
NRI_EC_scatterplot_2
cor(x = NRI_EC_merge$NRI_value, y = NRI_EC_merge$total_supply,
use = "pairwise.complete.obs")
bivar_reg_NRI_EC <- lm(total_supply ~ NRI_value, data = NRI_EC_merge)
summary(bivar_reg_NRI_EC)
# per_capita
NRI_EC_scatterplot_1 <- ggplot(data = NRI_EC_merge) +
geom_point(aes(x = NRI_value, y = supply_per_capita))
NRI_EC_scatterplot_1 #--> mit Skalierung spielen
NRI_EC_scatterplot_2 <- ggplot(data = NRI_EC_merge, aes(x = NRI_value, y = supply_per_capita)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
NRI_EC_scatterplot_2
cor(x = NRI_EC_merge$NRI_value, y = NRI_EC_merge$supply_per_capita,
use = "pairwise.complete.obs")
bivar_reg_NRI_EC <- lm(supply_per_capita ~ NRI_value, data = NRI_EC_merge)
summary(bivar_reg_NRI_EC)
#leichte Ergebnisse in Richtung, dass hypo stimmen könnte
#per_capita immer ausagekräftiger
#BEI VERWENDEN UMRECHNUNG PETAJOULE DRANDENKEN
#Im Text auf total und capita supply eingehen und auch prüfen ob bei energieverbrauch der vllt ausländische
#Verbrauch von Serverfarmen usw betroffen ist
|
/Hauptseminararbeit/SRC/code_first_try_SH3.R
|
no_license
|
BergJakob/Hauptseminar
|
R
| false
| false
| 2,830
|
r
|
library(tidyverse)
NRI_data_2015 <- read_csv2("Hauptseminararbeit/Data/Raw/NRI/NRI_SH3.csv") %>%
drop_na()
EC_data_2015_raw <- read_csv2("Hauptseminararbeit/Data/Raw/UNSD_Data/07_2020_UNSD__production_trade_energy_supply.csv")%>%
janitor::clean_names('snake') %>%
separate(number_region_country_area_year_series_value_footnotes_source, c("number", "country", "year", "series",
"value", "value2", "footnotes", "source"), sep = ",") %>%
unite(value, value2, col = "value", sep = ",", , remove = TRUE, na.rm = FALSE) %>%
select(number, country, year, value, series) %>%
filter(year == 2015)
write.csv(EC_data_2015_raw, "Hauptseminararbeit/Data/Raw/EC/EC_2015_data_raw.csv")
#hier war Zwischenschritt in Excel
EC_data_2015 <- read_csv2("Hauptseminararbeit/Data/Raw/EC/EC_2015_data_raw.csv") %>%
filter(number >= 31) %>%
select( -number, -year) %>%
pivot_wider(names_from = series, values_from = value) %>%
janitor::clean_names('snake') %>%
rename( total_supply = total_supply_petajoules, supply_per_capita = supply_per_capita_gigajoules) %>%
select(country, total_supply, supply_per_capita)
write.csv(EC_data_2015, "Hauptseminararbeit/Data/Wrangled/SH3/EC_2015_data.csv")
NRI_EC_merge <- left_join(NRI_data_2015, EC_data_2015)
write.csv(NRI_EC_merge, "Hauptseminararbeit/Data/Wrangled/SH3/NRI_EC_merge.csv")
#total_supply
NRI_EC_scatterplot_1 <- ggplot(data = NRI_EC_merge) +
geom_point(aes(x = NRI_value, y = total_supply))
NRI_EC_scatterplot_1 #--> mit Skalierung spielen
NRI_EC_scatterplot_2 <- ggplot(data = NRI_EC_merge, aes(x = NRI_value, y = total_supply)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
NRI_EC_scatterplot_2
cor(x = NRI_EC_merge$NRI_value, y = NRI_EC_merge$total_supply,
use = "pairwise.complete.obs")
bivar_reg_NRI_EC <- lm(total_supply ~ NRI_value, data = NRI_EC_merge)
summary(bivar_reg_NRI_EC)
# per_capita
NRI_EC_scatterplot_1 <- ggplot(data = NRI_EC_merge) +
geom_point(aes(x = NRI_value, y = supply_per_capita))
NRI_EC_scatterplot_1 #--> mit Skalierung spielen
NRI_EC_scatterplot_2 <- ggplot(data = NRI_EC_merge, aes(x = NRI_value, y = supply_per_capita)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE)
NRI_EC_scatterplot_2
cor(x = NRI_EC_merge$NRI_value, y = NRI_EC_merge$supply_per_capita,
use = "pairwise.complete.obs")
bivar_reg_NRI_EC <- lm(supply_per_capita ~ NRI_value, data = NRI_EC_merge)
summary(bivar_reg_NRI_EC)
#leichte Ergebnisse in Richtung, dass hypo stimmen könnte
#per_capita immer ausagekräftiger
#BEI VERWENDEN UMRECHNUNG PETAJOULE DRANDENKEN
#Im Text auf total und capita supply eingehen und auch prüfen ob bei energieverbrauch der vllt ausländische
#Verbrauch von Serverfarmen usw betroffen ist
|
#' Run R CMD check from R and Capture Results
#'
#' Run R CMD check form R programatically, and capture the results of the
#' individual checks.
#'
#' @docType package
#' @name rcmdcheck
NULL
#' Run `R CMD check` on a package or a directory
#'
#' @param path Path to a package tarball or a directory.
#' @param quiet Whether to print check output during checking.
#' @param args Character vector of arguments to pass to
#' `R CMD check`. (Note that instead of the `--output` option you
#' should use the `check_dir` argument, because `--output` cannot
#' deal with spaces and other special characters on Windows.
#' @param build_args Character vector of arguments to pass to
#' `R CMD build`
#' @param check_dir Path to a directory where the check is performed.
#' If this is not `NULL`, then the a temporary directory is used, that
#' is cleaned up when the returned object is garbage collected.
#' @param libpath The library path to set for the check.
#' The default uses the current library path.
#' @param repos The `repos` option to set for the check.
#' This is needed for cyclic dependency checks if you use the
#' `--as-cran` argument. The default uses the current value.
#' @param timeout Timeout for the check, in seconds, or as a
#' [base::difftime] object. If it is not finished before this, it will be
#' killed. `Inf` means no timeout. If the check is timed out,
#' that is added as an extra error to the result object.
#' @param error_on Whether to throw an error on `R CMD check` failures.
#' Note that the check is always completed (unless a timeout happens),
#' and the error is only thrown after completion. If `"never"`, then
#' no errors are thrown. If `"error"`, then only `ERROR` failures
#' generate errors. If `"warning"`, then `WARNING` failures generate
#' errors as well. If `"note"`, then any check failure generated an
#' error.
#' @return An S3 object (list) with fields `errors`,
#' `warnings` and `notes`. These are all character
#' vectors containing the output for the failed check.
#'
#' @export
#' @importFrom rprojroot find_package_root_file
#' @importFrom withr with_dir
#' @importFrom callr rcmd_safe
#' @importFrom desc desc
rcmdcheck <- function(path = ".", quiet = FALSE, args = character(),
build_args = character(), check_dir = NULL,
libpath = .libPaths(), repos = getOption("repos"),
timeout = Inf, error_on =
c("never", "error", "warning", "note")) {
error_on <- match.arg(error_on)
if (file.info(path)$isdir) {
path <- find_package_root_file(path = path)
} else {
path <- normalizePath(path)
}
if (is.null(check_dir)) {
check_dir <- tempfile()
cleanup <- TRUE
} else {
cleanup <- FALSE
}
targz <- build_package(path, check_dir, build_args = build_args,
libpath = libpath, quiet = quiet)
start_time <- Sys.time()
desc <- desc(targz)
out <- with_dir(
dirname(targz),
do_check(targz,
package = desc$get("Package")[[1]],
args = args,
libpath = libpath,
repos = repos,
quiet = quiet,
timeout = timeout
)
)
on.exit(unlink(out$session_info, recursive = TRUE), add = TRUE)
if (isTRUE(out$timeout)) message("R CMD check timed out")
res <- new_rcmdcheck(
stdout = out$result$stdout,
stderr = out$result$stderr,
description = desc,
status = out$result$status,
duration = duration(start_time),
timeout = out$result$timeout,
session_info = out$session_info
)
# Automatically delete temporary files when this object disappears
if (cleanup) res$cleaner <- auto_clean(check_dir)
handle_error_on(res, error_on)
res
}
#' @importFrom withr with_envvar
do_check <- function(targz, package, args, libpath, repos,
quiet, timeout) {
session_output <- tempfile()
profile <- make_fake_profile(session_output = session_output)
on.exit(unlink(profile), add = TRUE)
if (!quiet) cat_head("R CMD check")
res <- with_envvar(
c(R_PROFILE_USER = profile,
R_LIBS_USER = paste(libpath, collapse = .Platform$path.sep)),
rcmd_safe(
"check",
cmdargs = c(basename(targz), args),
user_profile = TRUE,
repos = repos,
block_callback = if (!quiet) block_callback(),
spinner = !quiet,
timeout = timeout,
fail_on_status = FALSE
)
)
list(result = res, session_info = session_output)
}
handle_error_on <- function(res, error_on) {
level <- c(never = 0, error = 1, warning = 2, note = 3)[error_on]
if (isTRUE(res$timeout)) {
print(res)
stop(make_error(res, "R CMD check timed out"))
} else if (length(res$errors) && level >= 1) {
print(res)
stop(make_error(res, "R CMD check found ERRORs"))
} else if (length(res$warnings) && level >= 2) {
print(res)
stop(make_error(res, "R CMD check found WARNINGs"))
} else if (length(res$notes) && level >= 3) {
print(res)
stop(make_error(res, "R CMD check found NOTEs"))
}
}
make_error <- function(res, msg) {
structure(
list(result = res, message = msg, call = NULL),
class = c(
if (isTRUE(res$timeout)) "rcmdcheck_timeout",
if (length(res$errors)) "rcmdcheck_error",
if (length(res$warnings)) "rcmdcheck_warning",
if (length(res$notes)) "rcmdcheck_note",
"rcmdcheck_failure",
"error",
"condition"
)
)
}
|
/R/package.R
|
no_license
|
DavisVaughan/rcmdcheck
|
R
| false
| false
| 5,460
|
r
|
#' Run R CMD check from R and Capture Results
#'
#' Run R CMD check form R programatically, and capture the results of the
#' individual checks.
#'
#' @docType package
#' @name rcmdcheck
NULL
#' Run `R CMD check` on a package or a directory
#'
#' @param path Path to a package tarball or a directory.
#' @param quiet Whether to print check output during checking.
#' @param args Character vector of arguments to pass to
#' `R CMD check`. (Note that instead of the `--output` option you
#' should use the `check_dir` argument, because `--output` cannot
#' deal with spaces and other special characters on Windows.
#' @param build_args Character vector of arguments to pass to
#' `R CMD build`
#' @param check_dir Path to a directory where the check is performed.
#' If this is not `NULL`, then the a temporary directory is used, that
#' is cleaned up when the returned object is garbage collected.
#' @param libpath The library path to set for the check.
#' The default uses the current library path.
#' @param repos The `repos` option to set for the check.
#' This is needed for cyclic dependency checks if you use the
#' `--as-cran` argument. The default uses the current value.
#' @param timeout Timeout for the check, in seconds, or as a
#' [base::difftime] object. If it is not finished before this, it will be
#' killed. `Inf` means no timeout. If the check is timed out,
#' that is added as an extra error to the result object.
#' @param error_on Whether to throw an error on `R CMD check` failures.
#' Note that the check is always completed (unless a timeout happens),
#' and the error is only thrown after completion. If `"never"`, then
#' no errors are thrown. If `"error"`, then only `ERROR` failures
#' generate errors. If `"warning"`, then `WARNING` failures generate
#' errors as well. If `"note"`, then any check failure generated an
#' error.
#' @return An S3 object (list) with fields `errors`,
#' `warnings` and `notes`. These are all character
#' vectors containing the output for the failed check.
#'
#' @export
#' @importFrom rprojroot find_package_root_file
#' @importFrom withr with_dir
#' @importFrom callr rcmd_safe
#' @importFrom desc desc
rcmdcheck <- function(path = ".", quiet = FALSE, args = character(),
build_args = character(), check_dir = NULL,
libpath = .libPaths(), repos = getOption("repos"),
timeout = Inf, error_on =
c("never", "error", "warning", "note")) {
error_on <- match.arg(error_on)
if (file.info(path)$isdir) {
path <- find_package_root_file(path = path)
} else {
path <- normalizePath(path)
}
if (is.null(check_dir)) {
check_dir <- tempfile()
cleanup <- TRUE
} else {
cleanup <- FALSE
}
targz <- build_package(path, check_dir, build_args = build_args,
libpath = libpath, quiet = quiet)
start_time <- Sys.time()
desc <- desc(targz)
out <- with_dir(
dirname(targz),
do_check(targz,
package = desc$get("Package")[[1]],
args = args,
libpath = libpath,
repos = repos,
quiet = quiet,
timeout = timeout
)
)
on.exit(unlink(out$session_info, recursive = TRUE), add = TRUE)
if (isTRUE(out$timeout)) message("R CMD check timed out")
res <- new_rcmdcheck(
stdout = out$result$stdout,
stderr = out$result$stderr,
description = desc,
status = out$result$status,
duration = duration(start_time),
timeout = out$result$timeout,
session_info = out$session_info
)
# Automatically delete temporary files when this object disappears
if (cleanup) res$cleaner <- auto_clean(check_dir)
handle_error_on(res, error_on)
res
}
#' @importFrom withr with_envvar
do_check <- function(targz, package, args, libpath, repos,
quiet, timeout) {
session_output <- tempfile()
profile <- make_fake_profile(session_output = session_output)
on.exit(unlink(profile), add = TRUE)
if (!quiet) cat_head("R CMD check")
res <- with_envvar(
c(R_PROFILE_USER = profile,
R_LIBS_USER = paste(libpath, collapse = .Platform$path.sep)),
rcmd_safe(
"check",
cmdargs = c(basename(targz), args),
user_profile = TRUE,
repos = repos,
block_callback = if (!quiet) block_callback(),
spinner = !quiet,
timeout = timeout,
fail_on_status = FALSE
)
)
list(result = res, session_info = session_output)
}
handle_error_on <- function(res, error_on) {
level <- c(never = 0, error = 1, warning = 2, note = 3)[error_on]
if (isTRUE(res$timeout)) {
print(res)
stop(make_error(res, "R CMD check timed out"))
} else if (length(res$errors) && level >= 1) {
print(res)
stop(make_error(res, "R CMD check found ERRORs"))
} else if (length(res$warnings) && level >= 2) {
print(res)
stop(make_error(res, "R CMD check found WARNINGs"))
} else if (length(res$notes) && level >= 3) {
print(res)
stop(make_error(res, "R CMD check found NOTEs"))
}
}
make_error <- function(res, msg) {
structure(
list(result = res, message = msg, call = NULL),
class = c(
if (isTRUE(res$timeout)) "rcmdcheck_timeout",
if (length(res$errors)) "rcmdcheck_error",
if (length(res$warnings)) "rcmdcheck_warning",
if (length(res$notes)) "rcmdcheck_note",
"rcmdcheck_failure",
"error",
"condition"
)
)
}
|
#reading in data
crabdata <- read.csv("https://raw.githubusercontent.com/RobertsLab/project-crab/master/data/goodsamples.csv")
crabdata
write.csv(crabdata, file = "data/crabdata.csv")
library(dplyr)
#is there a difference in RNA concentration based on when grab was in cold water treatment versus warmwater?
Mygoodsamples <- group_by(crabdata, temperature_treatment) %>%
summarize(average = Original_sample_conc) %>%
summarize(surveys_by_species, avg_weight = mean(weight))
|
/scripts/0515.R
|
no_license
|
fish497-2018/Roberts-crab
|
R
| false
| false
| 499
|
r
|
#reading in data
crabdata <- read.csv("https://raw.githubusercontent.com/RobertsLab/project-crab/master/data/goodsamples.csv")
crabdata
write.csv(crabdata, file = "data/crabdata.csv")
library(dplyr)
#is there a difference in RNA concentration based on when grab was in cold water treatment versus warmwater?
Mygoodsamples <- group_by(crabdata, temperature_treatment) %>%
summarize(average = Original_sample_conc) %>%
summarize(surveys_by_species, avg_weight = mean(weight))
|
#' Get a List Element
#'
#' @param .list a named list object.
#' @param .pattern a regular expression to be matched to a character string.
#'
#' @return a single element of a list.
#'
#' @examples
#' get_element(.list = list(first_element = 1:3, second_element = 4:6), .pattern = "second")
get_element <- function(.list, .pattern) {
# A named list is required
if (is.null(names(.list))) stop(".list must contain named elements.")
# Identify the list names that match the provided pattern
pattern_vec <- grepl(pattern = .pattern,
x = names(.list),
perl = TRUE)
# Identify the matched names
name_vec <- names(.list)[pattern_vec]
# Ensure that only one name was matched
if (length(name_vec) != 1) {
stop(
"Matched list names must be length 1. \n",
"\t Pattern:", .pattern, "\n",
"\t Matches:", paste(name_vec, collapse = ", \n\t\t")
)
}
# Extract the element from the list
.list[[name_vec]]
}
|
/R/helpers.R
|
permissive
|
BWAM/validator
|
R
| false
| false
| 985
|
r
|
#' Get a List Element
#'
#' @param .list a named list object.
#' @param .pattern a regular expression to be matched to a character string.
#'
#' @return a single element of a list.
#'
#' @examples
#' get_element(.list = list(first_element = 1:3, second_element = 4:6), .pattern = "second")
get_element <- function(.list, .pattern) {
# A named list is required
if (is.null(names(.list))) stop(".list must contain named elements.")
# Identify the list names that match the provided pattern
pattern_vec <- grepl(pattern = .pattern,
x = names(.list),
perl = TRUE)
# Identify the matched names
name_vec <- names(.list)[pattern_vec]
# Ensure that only one name was matched
if (length(name_vec) != 1) {
stop(
"Matched list names must be length 1. \n",
"\t Pattern:", .pattern, "\n",
"\t Matches:", paste(name_vec, collapse = ", \n\t\t")
)
}
# Extract the element from the list
.list[[name_vec]]
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/metricsgraphics-package.R
\docType{package}
\name{metricsgraphics}
\alias{metricsgraphics}
\alias{metricsgraphics-package}
\title{An \code{htmlwidget} interface to the
\href{http://metricsgraphicsjs.org/}{MetricsGraphics.js} D3 chart library}
\description{
An \code{htmlwidget} interface to the
\href{http://metricsgraphicsjs.org/}{MetricsGraphics.js} D3 chart library
}
\author{
Bob Rudis (@hrbrmstr)
}
|
/man/metricsgraphics.Rd
|
no_license
|
mostlyeconmix/metricsgraphics
|
R
| false
| false
| 491
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/metricsgraphics-package.R
\docType{package}
\name{metricsgraphics}
\alias{metricsgraphics}
\alias{metricsgraphics-package}
\title{An \code{htmlwidget} interface to the
\href{http://metricsgraphicsjs.org/}{MetricsGraphics.js} D3 chart library}
\description{
An \code{htmlwidget} interface to the
\href{http://metricsgraphicsjs.org/}{MetricsGraphics.js} D3 chart library
}
\author{
Bob Rudis (@hrbrmstr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/icc2x3.r
\name{icc3.nointer.fn}
\alias{icc3.nointer.fn}
\title{Intraclass Correlation Coefficients ICC(3,1) and ICCa(3,1) under ANOVA Model 3 without interaction.}
\usage{
icc3.nointer.fn(dfra)
}
\arguments{
\item{dfra}{This is a data frame containing 3 columns or more. The first column contains subject numbers (some duplicates are expected,
as some subject are assumed to have assigned multiple ratings) and each of the remaining columns is associated with a particular rater and
contains its numeric ratings.}
}
\value{
This function returns a list containing the following 11 values:\cr
1. sig2s: the subject variance component.\cr
2. sig2e: the error variance component.\cr
3. icc2r: ICC as a measure of inter-rater relliability.\cr
4. icc2a: ICC as a measure of intra-rater reliability.\cr
5. n: the number of subjects. 6. r: the number of raters.\cr
7. max.rep: the maximum number of ratings per subject.\cr
8. min.rep: the minimum number of ratings per subjects.\cr
9. M: the total number of ratings for all subjects and raters.\cr
10. ov.mean: the overall mean rating.
}
\description{
This function computes 2 Intraclass Correlation Coefficients ICC(3,1) and ICCa(3,1) under the mixed factorial ANOVA model
(Model 3) without any subject-rater interaction. ICC(3,1) is formulated as a measure of inter-rater reliability and ICCa(3,1)
as a measure of intra-rater reliability.
}
\examples{
#iccdata1 is a small dataset that comes with the package. Use it as follows:
library(irrICC)
iccdata1 #see what the iccdata1 dataset looks like
icc3.nointer.fn(iccdata1)
coeff <- icc3.nointer.fn(iccdata1)$icc2r #this only gives you the ICC coefficient
coeff
}
\references{
Gwet, K.L. (2014): \emph{Handbook of Inter-Rater Reliability - 4th ed.} - Equation 10.2.16 of chapter 10,
Advanced Analytics, LLC.
}
|
/man/icc3.nointer.fn.Rd
|
no_license
|
cran/irrICC
|
R
| false
| true
| 1,924
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/icc2x3.r
\name{icc3.nointer.fn}
\alias{icc3.nointer.fn}
\title{Intraclass Correlation Coefficients ICC(3,1) and ICCa(3,1) under ANOVA Model 3 without interaction.}
\usage{
icc3.nointer.fn(dfra)
}
\arguments{
\item{dfra}{This is a data frame containing 3 columns or more. The first column contains subject numbers (some duplicates are expected,
as some subject are assumed to have assigned multiple ratings) and each of the remaining columns is associated with a particular rater and
contains its numeric ratings.}
}
\value{
This function returns a list containing the following 11 values:\cr
1. sig2s: the subject variance component.\cr
2. sig2e: the error variance component.\cr
3. icc2r: ICC as a measure of inter-rater relliability.\cr
4. icc2a: ICC as a measure of intra-rater reliability.\cr
5. n: the number of subjects. 6. r: the number of raters.\cr
7. max.rep: the maximum number of ratings per subject.\cr
8. min.rep: the minimum number of ratings per subjects.\cr
9. M: the total number of ratings for all subjects and raters.\cr
10. ov.mean: the overall mean rating.
}
\description{
This function computes 2 Intraclass Correlation Coefficients ICC(3,1) and ICCa(3,1) under the mixed factorial ANOVA model
(Model 3) without any subject-rater interaction. ICC(3,1) is formulated as a measure of inter-rater reliability and ICCa(3,1)
as a measure of intra-rater reliability.
}
\examples{
#iccdata1 is a small dataset that comes with the package. Use it as follows:
library(irrICC)
iccdata1 #see what the iccdata1 dataset looks like
icc3.nointer.fn(iccdata1)
coeff <- icc3.nointer.fn(iccdata1)$icc2r #this only gives you the ICC coefficient
coeff
}
\references{
Gwet, K.L. (2014): \emph{Handbook of Inter-Rater Reliability - 4th ed.} - Equation 10.2.16 of chapter 10,
Advanced Analytics, LLC.
}
|
# ------------------------------------------------------------------
# Contributed by Michel Lang, TU Dortmund
# ------------------------------------------------------------------
# L1-penalized logistic regression using the glmnet package with default parameters
# USEAGE: Rscript [scriptfile] [problem-number] [number of replications]
# Output: Misclassification rate
library(glmnet)
type <- "classification"
args <- commandArgs(TRUE)
if (length(args)) {
num <- as.integer(args[1])
repls <- as.integer(args[2])
}
load(file.path("problems", sprintf("%s_%02i.RData", type, num)))
mcrs <- numeric(repls)
for (repl in seq_len(repls)) {
set.seed(repl)
train <- sample(nrow(problem)) < floor(2/3 * nrow(problem))
mod <- cv.glmnet(x = as.matrix(subset(problem, train, select=-y)), y = problem$y[train], family="binomial")
predicted = predict(mod, newx = as.matrix(subset(problem, !train, select=-y)), type="class")
mcrs[repl] <- mean(problem$y[!train] == predicted)
}
message(round(mean(mcrs), 4))
|
/MachineLearningAlg/main_functions/glmnet_classification.R
|
no_license
|
ishandutta2007/benchR
|
R
| false
| false
| 1,010
|
r
|
# ------------------------------------------------------------------
# Contributed by Michel Lang, TU Dortmund
# ------------------------------------------------------------------
# L1-penalized logistic regression using the glmnet package with default parameters
# USEAGE: Rscript [scriptfile] [problem-number] [number of replications]
# Output: Misclassification rate
library(glmnet)
type <- "classification"
args <- commandArgs(TRUE)
if (length(args)) {
num <- as.integer(args[1])
repls <- as.integer(args[2])
}
load(file.path("problems", sprintf("%s_%02i.RData", type, num)))
mcrs <- numeric(repls)
for (repl in seq_len(repls)) {
set.seed(repl)
train <- sample(nrow(problem)) < floor(2/3 * nrow(problem))
mod <- cv.glmnet(x = as.matrix(subset(problem, train, select=-y)), y = problem$y[train], family="binomial")
predicted = predict(mod, newx = as.matrix(subset(problem, !train, select=-y)), type="class")
mcrs[repl] <- mean(problem$y[!train] == predicted)
}
message(round(mean(mcrs), 4))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IPDFilecheck.R
\name{represent_numerical_data_forsubgroups}
\alias{represent_numerical_data_forsubgroups}
\title{Function to find the number and percentages of categories}
\usage{
represent_numerical_data_forsubgroups(data, variable1, variable2, nrcode = NA)
}
\arguments{
\item{data, }{a data frame}
\item{variable1}{the column name of the variable to be grouped based on
(categorical column)}
\item{variable2}{the column name of the variable to represented
(numerical data)}
\item{nrcode}{non response code for the variable2}
}
\value{
the subgroup
}
\description{
Function to find the number and percentages of categories
}
\examples{
this.df <- data.frame(c(11, 78,22), c("m", "f", "f"), c(1,2,2),
stringsAsFactors = FALSE)
colnames(this.df) <- c("mark", "gender", "group")
represent_numerical_data_forsubgroups(this.df, "group", "mark", NA)
}
|
/man/represent_numerical_data_forsubgroups.Rd
|
no_license
|
sheejamk/IPDFileCheck
|
R
| false
| true
| 929
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IPDFilecheck.R
\name{represent_numerical_data_forsubgroups}
\alias{represent_numerical_data_forsubgroups}
\title{Function to find the number and percentages of categories}
\usage{
represent_numerical_data_forsubgroups(data, variable1, variable2, nrcode = NA)
}
\arguments{
\item{data, }{a data frame}
\item{variable1}{the column name of the variable to be grouped based on
(categorical column)}
\item{variable2}{the column name of the variable to represented
(numerical data)}
\item{nrcode}{non response code for the variable2}
}
\value{
the subgroup
}
\description{
Function to find the number and percentages of categories
}
\examples{
this.df <- data.frame(c(11, 78,22), c("m", "f", "f"), c(1,2,2),
stringsAsFactors = FALSE)
colnames(this.df) <- c("mark", "gender", "group")
represent_numerical_data_forsubgroups(this.df, "group", "mark", NA)
}
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Question 1
total<-aggregate(NEI$Emissions,by=list(Category=NEI$year),FUN=sum)
png(file="Q1.png",width=480,height=480)
plot(total$Category,total$x,type="o",xlab="year",ylab="Emissions",main="Total PM 2.5 emissions")
dev.off()
#reg<-lm(x~Category,data=total)
#abline(reg)
|
/plot1.R
|
no_license
|
TheCode2017/Analysis-of-fine-particulate-matter-PM-2.5-
|
R
| false
| false
| 422
|
r
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Question 1
total<-aggregate(NEI$Emissions,by=list(Category=NEI$year),FUN=sum)
png(file="Q1.png",width=480,height=480)
plot(total$Category,total$x,type="o",xlab="year",ylab="Emissions",main="Total PM 2.5 emissions")
dev.off()
#reg<-lm(x~Category,data=total)
#abline(reg)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{gq_points_weights}
\alias{gq_points_weights}
\title{Gaussian Quadrature points and weights}
\usage{
gq_points_weights(dimensions = 1, mu = rep(0, dimensions),
sigma = diag(1, dimensions), settings = settings.gq())
}
\arguments{
\item{dimensions}{Number of dimensions}
\item{mu}{Mean of the normal distribution}
\item{sigma}{Variance covariance matrix of the normal distribution}
\item{settings}{Settings}
}
\value{
List with entries weights and grid_points
}
\description{
The function returns the grid points and weights for the calculation of means (or variances)
of a function w.r.t. a normal distribution.
}
\examples{
# points and weights for 2 dimensional integration with 3 grid points per dimension
gq_points_weights(2, settings = settings.gq(quad_points = 3))
}
|
/man/gq_points_weights.Rd
|
permissive
|
sebastianueckert/funsu
|
R
| false
| true
| 875
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{gq_points_weights}
\alias{gq_points_weights}
\title{Gaussian Quadrature points and weights}
\usage{
gq_points_weights(dimensions = 1, mu = rep(0, dimensions),
sigma = diag(1, dimensions), settings = settings.gq())
}
\arguments{
\item{dimensions}{Number of dimensions}
\item{mu}{Mean of the normal distribution}
\item{sigma}{Variance covariance matrix of the normal distribution}
\item{settings}{Settings}
}
\value{
List with entries weights and grid_points
}
\description{
The function returns the grid points and weights for the calculation of means (or variances)
of a function w.r.t. a normal distribution.
}
\examples{
# points and weights for 2 dimensional integration with 3 grid points per dimension
gq_points_weights(2, settings = settings.gq(quad_points = 3))
}
|
# Code to run in class
Help()
help()
# Source code
help
h el p ()
help ( topic = "mean")
`+`(1, 3)
1 + 3
## Assignment with arrow "<-"
a <- c(1, 3, 4, 6)
b <- c("marcel", "ramos")
## Assignment with equals sign "="
a = c(1, 3, 4, 7)
## Operation works on each value in `a`
a + 1
object.size(a)
?mean
help("mean")
?reserved
?rnorm()
b <- rnorm(20)
mean(b)
# Arguments
?cor
3 < 5
cc <- rnorm(20)
# Run functions with explicit arguments
cor(x = b, y = cc)
# Run cor function without explicit arguments
cor(b, cc)
# Value section
?mean
# File paths (what are they?)
getwd()
## Change your current working directory
setwd("C:/Users/mramos/Documents/IntroR/IntroR/Data")
## list all the contents in a directory
list.files("~/IntroR/IntroR/Data/")
## shortcut to home directory '~'
normalizePath("~")
## read a text file from your current working directory
babies <- read.table("babies.txt")
head(babies)
## use headers = TRUE
babies <- read.table("babies.txt", header = TRUE)
head(babies)
# install.packages()
# library()
|
/InClass/inClass_S1.R
|
no_license
|
gitter-badger/IntroR
|
R
| false
| false
| 1,051
|
r
|
# Code to run in class
Help()
help()
# Source code
help
h el p ()
help ( topic = "mean")
`+`(1, 3)
1 + 3
## Assignment with arrow "<-"
a <- c(1, 3, 4, 6)
b <- c("marcel", "ramos")
## Assignment with equals sign "="
a = c(1, 3, 4, 7)
## Operation works on each value in `a`
a + 1
object.size(a)
?mean
help("mean")
?reserved
?rnorm()
b <- rnorm(20)
mean(b)
# Arguments
?cor
3 < 5
cc <- rnorm(20)
# Run functions with explicit arguments
cor(x = b, y = cc)
# Run cor function without explicit arguments
cor(b, cc)
# Value section
?mean
# File paths (what are they?)
getwd()
## Change your current working directory
setwd("C:/Users/mramos/Documents/IntroR/IntroR/Data")
## list all the contents in a directory
list.files("~/IntroR/IntroR/Data/")
## shortcut to home directory '~'
normalizePath("~")
## read a text file from your current working directory
babies <- read.table("babies.txt")
head(babies)
## use headers = TRUE
babies <- read.table("babies.txt", header = TRUE)
head(babies)
# install.packages()
# library()
|
library(tidyverse)
library(modelr)
library(scatterplot3d)
library(readr,dplyr)
library(ggplot2)
library(caret)
library(rsample)
library(ISLR)
library(rpart)
library(rpart.plot)
Analysis <- read_csv("breast_cancer_data2.csv")
# TODO data cleanup
Analysis$diagnosis <- as.factor(Analysis$diagnosis) # convert diagnosis from chr to fct
Analysis %>%
ggplot(aes(mean_radius, mean_area, color=diagnosis, shape=diagnosis)) + geom_point() # graph displaying correclation between radius and area
Analysis %>%
ggplot(aes(mean_perimeter, mean_texture, color=diagnosis, shape=diagnosis)) + geom_point() + geom_smooth() # graph displaying correclation between perimeter and texture
Analysis %>%
ggplot(aes(mean_texture, mean_smoothness, color=diagnosis, shape=diagnosis)) + geom_point() + geom_boxplot() # graph displaying correclation between radius and perimeter
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_area, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for area corelation with diagnostic
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_smoothness, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for smoothness corelation with diagnostisc
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_radius, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for radius corelation with diagnostisc
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_texture, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for texture corelation with diagnostisc
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_perimeter, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for perimeter corelation with diagnostisc
by_diagnosis <- group_by(Analysis, diagnosis)
summarize(by_diagnosis, count = n()) # count occurences in the dataset
# model de regresie logistica, diagnosis in functie de mean_perimeter
mod_perimeter <- glm(data = Analysis, diagnosis ~ mean_perimeter, family = binomial) # cu cat mean_parameter e mai mic cu atata sansa e sa fie Benign si cu cat creste sansa e sa fie Malign
summary(mod_perimeter)
grid <- Analysis %>%
data_grid(mean_perimeter = seq_range(mean_perimeter, 100)) %>% # facem o prezicere punand un range de 100 de valori al lui mean_parameter din dataSet
add_predictions(mod_perimeter, "prob_default", type = "response")
ggplot() +
geom_line(data = grid, aes(mean_perimeter, prob_default), color = "red", size = 2) # graficul care ne arata predictul
nd <- tribble(~mean_perimeter, 40, 120) # predicted pentru 2 valori
predicted <- predict(mod_perimeter, newdata = nd, type = "response")
predicted
#aici am inserat eu test pentru texture @kiki
mod_texture <- glm(data = Analysis, diagnosis ~ mean_texture, family = binomial) # cu cat mean_parameter e mai mic cu atata sansa e sa fie Benign si cu cat creste sansa e sa fie Malign
summary(mod_texture)
# model de regreie logistica, diagnosis in functie de mean_smoothness
mod_smoothness <- glm(data = Analysis, diagnosis ~ mean_smoothness, family = binomial) # cu cat mean_parameter e mai mic cu atata sansa e sa fie Benign si cu cat creste sansa e sa fie Malign
summary(mod_smoothness)
#de aici am inceput eu @Kiki
grid <- Analysis %>%
data_grid(mean_smoothness = seq_range(mean_smoothness, 100)) %>%
add_predictions(mod_smoothness, "prob_diagnosis", type="response")
ggplot() +
geom_line(data = grid, aes(mean_smoothness, prob_diagnosis), color = "red", size = 2)
nd <- tribble(~mean_smoothness, 0.05, 0.150)
predicted <- predict(mod_smoothness, newdata = nd, type = "response")
# model de regreie logistica, diagnosis in functie de mean_area
mod_area <- glm(data = Analysis, diagnosis ~ mean_area, family = binomial) #
summary(mod_area)
grid <- Analysis %>%
data_grid(mean_area = seq_range(mean_area, 100)) %>%
add_predictions(mod_area, "prob_diagnosis", type="response")
ggplot() +
geom_line(data = grid, aes(mean_area, prob_diagnosis), color = "red", size = 2)
nd <- tribble(~mean_area, 500, 1000)
predicted <- predict(mod_area, newdata = nd, type = "response")
# model de regreie logistica, diagnosis in functie de diagnosis
mod_radius <- glm(data = Analysis, diagnosis ~ mean_radius, family = binomial)
summary(mod_radius)
grid <- Analysis %>%
data_grid(mean_radius = seq_range(mean_radius, 100)) %>%
add_predictions(mod_radius, "prob_diagnosis", type="response")
ggplot() +
geom_line(data = grid, aes(mean_radius, prob_diagnosis), color = "red", size = 2)
nd <- tribble(~mean_radius, 10, 20)
predicted <- predict(mod_radius, newdata = nd, type = "response")
mod_all <- glm(data = Analysis, diagnosis ~ mean_perimeter + mean_texture + mean_smoothness, family = binomial)
summary(mod_all) #aici am inserat eu modd all pentru doar perimeter, texture si smoothness @kiki
mod_all <- glm(data = Analysis, diagnosis ~ mean_perimeter + mean_smoothness + mean_area + mean_radius + mean_texture, family = binomial)
summary(mod_all) #mod all pt toate
#scoatem mean_area, ca are cel mai mic P si facem un model nou @kiki
mod_all <- glm(data = Analysis, diagnosis ~ mean_perimeter + mean_smoothness + mean_radius + mean_texture, family = binomial)
summary(mod_all)
mod_all <- glm(data = Analysis, diagnosis ~ mean_smoothness + mean_texture, family = binomial)
summary(mod_all)
#facem un box plot pt cele mai bune variabile @kiki
ggplot(Analysis) +
geom_boxplot(aes(x = mean_smoothness, y = mean_texture, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for area corelation with diagnostic
# perform clasification
set.seed(123)
split <- initial_split(Analysis, prop = 0.7, strata = "diagnosis")
train <- training(split)
test <- testing(split)
mod_area_train <- glm(data = train, diagnosis ~ mean_perimeter + mean_area, family = binomial)
summary(mod_area_train) #aici trebuie schimbat in texture si smoothness, am schimbat mai jos
mod_area_train <- glm(data = train, diagnosis ~ mean_texture + mean_smoothness, family = binomial)
summary(mod_area_train) #aici am introdus eu@kiki
pred_test <- predict(mod_area_train, newdata = test, type = "response")
table(pred_test > 0.5, test$diagnosis)
# performa clasification analysis with caret
set.seed(123)
split <- initial_split(Analysis, prop = 0.7, strata = "diagnosis")
train <- training(split)
test <- testing(split)
table(test$diagnosis)
table(train$diagnosis)
features <- setdiff(names(Analysis), "diagnosis")
x <- train[, features]
y <- train$diagnosis
fitControl <- trainControl(
method = "cv",
number = 10
)
modGLM_all <- train(
x=x,
y=y,
method = "glm",
family = "binomial",
trControl = fitControl
)
modGLM_all
confusionMatrix(modGLM_all)
pred_all = predict(modGLM_all, newdata = test, type = "raw")
confusionMatrix(pred_all, test$diagnosis)
summary(modGLM_all) #aici am mai adaugat eu @kiki
xNoMean_area <- x %>% select(-mean_area)
modGLM_NoMean_area <- train(
x=xNoMean_area,
y=y,
method = "glm",
family = "binomial",
trControl = fitControl
)
confusionMatrix(modGLM_NoMean_area)
summary(modGLM_NoMean_area)
pred_noMean_area <- predict(modGLM_NoMean_area, test)
confusionMatrix(pred_noMean_area, test$diagnosis)
|
/proiect Final.R
|
no_license
|
bpastiu/bigData
|
R
| false
| false
| 7,444
|
r
|
library(tidyverse)
library(modelr)
library(scatterplot3d)
library(readr,dplyr)
library(ggplot2)
library(caret)
library(rsample)
library(ISLR)
library(rpart)
library(rpart.plot)
Analysis <- read_csv("breast_cancer_data2.csv")
# TODO data cleanup
Analysis$diagnosis <- as.factor(Analysis$diagnosis) # convert diagnosis from chr to fct
Analysis %>%
ggplot(aes(mean_radius, mean_area, color=diagnosis, shape=diagnosis)) + geom_point() # graph displaying correclation between radius and area
Analysis %>%
ggplot(aes(mean_perimeter, mean_texture, color=diagnosis, shape=diagnosis)) + geom_point() + geom_smooth() # graph displaying correclation between perimeter and texture
Analysis %>%
ggplot(aes(mean_texture, mean_smoothness, color=diagnosis, shape=diagnosis)) + geom_point() + geom_boxplot() # graph displaying correclation between radius and perimeter
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_area, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for area corelation with diagnostic
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_smoothness, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for smoothness corelation with diagnostisc
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_radius, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for radius corelation with diagnostisc
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_texture, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for texture corelation with diagnostisc
ggplot(Analysis) +
geom_boxplot(aes(x = diagnosis, y = mean_perimeter, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for perimeter corelation with diagnostisc
by_diagnosis <- group_by(Analysis, diagnosis)
summarize(by_diagnosis, count = n()) # count occurences in the dataset
# model de regresie logistica, diagnosis in functie de mean_perimeter
mod_perimeter <- glm(data = Analysis, diagnosis ~ mean_perimeter, family = binomial) # cu cat mean_parameter e mai mic cu atata sansa e sa fie Benign si cu cat creste sansa e sa fie Malign
summary(mod_perimeter)
grid <- Analysis %>%
data_grid(mean_perimeter = seq_range(mean_perimeter, 100)) %>% # facem o prezicere punand un range de 100 de valori al lui mean_parameter din dataSet
add_predictions(mod_perimeter, "prob_default", type = "response")
ggplot() +
geom_line(data = grid, aes(mean_perimeter, prob_default), color = "red", size = 2) # graficul care ne arata predictul
nd <- tribble(~mean_perimeter, 40, 120) # predicted pentru 2 valori
predicted <- predict(mod_perimeter, newdata = nd, type = "response")
predicted
#aici am inserat eu test pentru texture @kiki
mod_texture <- glm(data = Analysis, diagnosis ~ mean_texture, family = binomial) # cu cat mean_parameter e mai mic cu atata sansa e sa fie Benign si cu cat creste sansa e sa fie Malign
summary(mod_texture)
# model de regreie logistica, diagnosis in functie de mean_smoothness
mod_smoothness <- glm(data = Analysis, diagnosis ~ mean_smoothness, family = binomial) # cu cat mean_parameter e mai mic cu atata sansa e sa fie Benign si cu cat creste sansa e sa fie Malign
summary(mod_smoothness)
#de aici am inceput eu @Kiki
grid <- Analysis %>%
data_grid(mean_smoothness = seq_range(mean_smoothness, 100)) %>%
add_predictions(mod_smoothness, "prob_diagnosis", type="response")
ggplot() +
geom_line(data = grid, aes(mean_smoothness, prob_diagnosis), color = "red", size = 2)
nd <- tribble(~mean_smoothness, 0.05, 0.150)
predicted <- predict(mod_smoothness, newdata = nd, type = "response")
# model de regreie logistica, diagnosis in functie de mean_area
mod_area <- glm(data = Analysis, diagnosis ~ mean_area, family = binomial) #
summary(mod_area)
grid <- Analysis %>%
data_grid(mean_area = seq_range(mean_area, 100)) %>%
add_predictions(mod_area, "prob_diagnosis", type="response")
ggplot() +
geom_line(data = grid, aes(mean_area, prob_diagnosis), color = "red", size = 2)
nd <- tribble(~mean_area, 500, 1000)
predicted <- predict(mod_area, newdata = nd, type = "response")
# model de regreie logistica, diagnosis in functie de diagnosis
mod_radius <- glm(data = Analysis, diagnosis ~ mean_radius, family = binomial)
summary(mod_radius)
grid <- Analysis %>%
data_grid(mean_radius = seq_range(mean_radius, 100)) %>%
add_predictions(mod_radius, "prob_diagnosis", type="response")
ggplot() +
geom_line(data = grid, aes(mean_radius, prob_diagnosis), color = "red", size = 2)
nd <- tribble(~mean_radius, 10, 20)
predicted <- predict(mod_radius, newdata = nd, type = "response")
mod_all <- glm(data = Analysis, diagnosis ~ mean_perimeter + mean_texture + mean_smoothness, family = binomial)
summary(mod_all) #aici am inserat eu modd all pentru doar perimeter, texture si smoothness @kiki
mod_all <- glm(data = Analysis, diagnosis ~ mean_perimeter + mean_smoothness + mean_area + mean_radius + mean_texture, family = binomial)
summary(mod_all) #mod all pt toate
#scoatem mean_area, ca are cel mai mic P si facem un model nou @kiki
mod_all <- glm(data = Analysis, diagnosis ~ mean_perimeter + mean_smoothness + mean_radius + mean_texture, family = binomial)
summary(mod_all)
mod_all <- glm(data = Analysis, diagnosis ~ mean_smoothness + mean_texture, family = binomial)
summary(mod_all)
#facem un box plot pt cele mai bune variabile @kiki
ggplot(Analysis) +
geom_boxplot(aes(x = mean_smoothness, y = mean_texture, fill = diagnosis)) + theme(text = element_text(size=20)) # boxplot graph for area corelation with diagnostic
# perform clasification
set.seed(123)
split <- initial_split(Analysis, prop = 0.7, strata = "diagnosis")
train <- training(split)
test <- testing(split)
mod_area_train <- glm(data = train, diagnosis ~ mean_perimeter + mean_area, family = binomial)
summary(mod_area_train) #aici trebuie schimbat in texture si smoothness, am schimbat mai jos
mod_area_train <- glm(data = train, diagnosis ~ mean_texture + mean_smoothness, family = binomial)
summary(mod_area_train) #aici am introdus eu@kiki
pred_test <- predict(mod_area_train, newdata = test, type = "response")
table(pred_test > 0.5, test$diagnosis)
# performa clasification analysis with caret
set.seed(123)
split <- initial_split(Analysis, prop = 0.7, strata = "diagnosis")
train <- training(split)
test <- testing(split)
table(test$diagnosis)
table(train$diagnosis)
features <- setdiff(names(Analysis), "diagnosis")
x <- train[, features]
y <- train$diagnosis
fitControl <- trainControl(
method = "cv",
number = 10
)
modGLM_all <- train(
x=x,
y=y,
method = "glm",
family = "binomial",
trControl = fitControl
)
modGLM_all
confusionMatrix(modGLM_all)
pred_all = predict(modGLM_all, newdata = test, type = "raw")
confusionMatrix(pred_all, test$diagnosis)
summary(modGLM_all) #aici am mai adaugat eu @kiki
xNoMean_area <- x %>% select(-mean_area)
modGLM_NoMean_area <- train(
x=xNoMean_area,
y=y,
method = "glm",
family = "binomial",
trControl = fitControl
)
confusionMatrix(modGLM_NoMean_area)
summary(modGLM_NoMean_area)
pred_noMean_area <- predict(modGLM_NoMean_area, test)
confusionMatrix(pred_noMean_area, test$diagnosis)
|
setwd("C:/Users/KSGI/Desktop/Data Science/Programming Assignement 3")
best <- function(state, outcome){
out <- read.csv("outcome-of-care-measures.csv", h=T, sep=",")
if(outcome=="heart attack") {
hosp <- out[,c(which(colnames(out)=="Hospital.Name"),
which(colnames(out)=="State"),
which(colnames(out)==names(out)[11]))]
} else if (outcome=="heart failure") {
hosp <- out[,c(which(colnames(out)=="Hospital.Name"),
which(colnames(out)=="State"),
which(colnames(out)==names(out)[17]))]
} else if (outcome=="pneumonia") {
hosp <- out[,c(which(colnames(out)=="Hospital.Name"),
which(colnames(out)=="State"),
which(colnames(out)==names(out)[23]))]
} else {
stop ("invalid outcome")
}
'%notin%' <- Negate('%in%')
if (state %notin% unique(hosp$State)) {
stop("invalid state")
} else {
hosp2 <- subset(hosp, State==state)
hosp2[,3] <-as.numeric(hosp2[,3])
#If we want to suppress warning on NAs introduced by coercion#
#suppressWarnings( hosp2[,3] <- as.numeric(hosp2[,3]))
hosp2 <- na.omit(hosp2)
hosp3 <- hosp2[with(hosp2, order(hosp2[,3], hosp2[,1])),]
colnames(hosp3) <- c("Hospital_Name", "State", outcome)
best.h <- hosp3$Hospital_Name[1]
}
best.h
}
|
/Programming Assignement 3/best.R
|
no_license
|
Kgiabou/Data-Science
|
R
| false
| false
| 1,387
|
r
|
setwd("C:/Users/KSGI/Desktop/Data Science/Programming Assignement 3")
best <- function(state, outcome){
out <- read.csv("outcome-of-care-measures.csv", h=T, sep=",")
if(outcome=="heart attack") {
hosp <- out[,c(which(colnames(out)=="Hospital.Name"),
which(colnames(out)=="State"),
which(colnames(out)==names(out)[11]))]
} else if (outcome=="heart failure") {
hosp <- out[,c(which(colnames(out)=="Hospital.Name"),
which(colnames(out)=="State"),
which(colnames(out)==names(out)[17]))]
} else if (outcome=="pneumonia") {
hosp <- out[,c(which(colnames(out)=="Hospital.Name"),
which(colnames(out)=="State"),
which(colnames(out)==names(out)[23]))]
} else {
stop ("invalid outcome")
}
'%notin%' <- Negate('%in%')
if (state %notin% unique(hosp$State)) {
stop("invalid state")
} else {
hosp2 <- subset(hosp, State==state)
hosp2[,3] <-as.numeric(hosp2[,3])
#If we want to suppress warning on NAs introduced by coercion#
#suppressWarnings( hosp2[,3] <- as.numeric(hosp2[,3]))
hosp2 <- na.omit(hosp2)
hosp3 <- hosp2[with(hosp2, order(hosp2[,3], hosp2[,1])),]
colnames(hosp3) <- c("Hospital_Name", "State", outcome)
best.h <- hosp3$Hospital_Name[1]
}
best.h
}
|
## makeCacheMatrix creates a cacheable matrix type.
## A cacheable matrix is a list with named elements that are methods that get/set the matrix
## and its inverse
## The matrix and the inverse are saved in the closure
## Arguments to makeCacheMatrix() are the same as those for matrix()
makeCacheMatrix <- function(...) {
inv <- NULL
x <- matrix(...)
set <- function(...) {
## <<- accesses the symbol defined in the closure
x <<- matrix(...)
# Reset the inverse when the matrix changes
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve takes a cacheable matrix, and returns its inverse
## When called for the first time, it computes the inverse and caches it
## on subsequent calls, it will return the cached value.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached inverse matrix")
return(inv)
}
x$setInverse(solve(x$get(), ...))
}
|
/cachematrix.R
|
no_license
|
ankanban/ProgrammingAssignment2
|
R
| false
| false
| 1,205
|
r
|
## makeCacheMatrix creates a cacheable matrix type.
## A cacheable matrix is a list with named elements that are methods that get/set the matrix
## and its inverse
## The matrix and the inverse are saved in the closure
## Arguments to makeCacheMatrix() are the same as those for matrix()
makeCacheMatrix <- function(...) {
inv <- NULL
x <- matrix(...)
set <- function(...) {
## <<- accesses the symbol defined in the closure
x <<- matrix(...)
# Reset the inverse when the matrix changes
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve takes a cacheable matrix, and returns its inverse
## When called for the first time, it computes the inverse and caches it
## on subsequent calls, it will return the cached value.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached inverse matrix")
return(inv)
}
x$setInverse(solve(x$get(), ...))
}
|
library(shinyWidgets)
### Name: downloadBttn
### Title: Create a download bttn
### Aliases: downloadBttn
### ** Examples
## Not run:
##D
##D if (interactive()) {
##D
##D library(shiny)
##D library(shinyWidgets)
##D
##D ui <- fluidPage(
##D tags$h2("Download bttn"),
##D downloadBttn(
##D outputId = "downloadData",
##D style = "bordered",
##D color = "primary"
##D )
##D )
##D
##D server <- function(input, output, session) {
##D
##D output$downloadData <- downloadHandler(
##D filename = function() {
##D paste('data-', Sys.Date(), '.csv', sep='')
##D },
##D content = function(con) {
##D write.csv(mtcars, con)
##D }
##D )
##D
##D }
##D
##D shinyApp(ui, server)
##D
##D }
##D
## End(Not run)
|
/data/genthat_extracted_code/shinyWidgets/examples/downloadBttn.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 761
|
r
|
library(shinyWidgets)
### Name: downloadBttn
### Title: Create a download bttn
### Aliases: downloadBttn
### ** Examples
## Not run:
##D
##D if (interactive()) {
##D
##D library(shiny)
##D library(shinyWidgets)
##D
##D ui <- fluidPage(
##D tags$h2("Download bttn"),
##D downloadBttn(
##D outputId = "downloadData",
##D style = "bordered",
##D color = "primary"
##D )
##D )
##D
##D server <- function(input, output, session) {
##D
##D output$downloadData <- downloadHandler(
##D filename = function() {
##D paste('data-', Sys.Date(), '.csv', sep='')
##D },
##D content = function(con) {
##D write.csv(mtcars, con)
##D }
##D )
##D
##D }
##D
##D shinyApp(ui, server)
##D
##D }
##D
## End(Not run)
|
library(leaflet)
library(ShinyDash)
shinyUI(fluidPage(
tags$head(tags$link(rel='stylesheet', type='text/css', href='styles.css')),
leafletMap(
"map", "100%", 400,
initialTileLayer = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
initialTileLayerAttribution = HTML('Maps by <a href="http://www.mapbox.com/">Mapbox</a>'),
options=list(
center = c(37.45, -93.85),
zoom = 4,
maxBounds = list(list(17, -180), list(59, 180))
)
),
fluidRow(
column(8, offset=3,
h2('Population of US cities'),
htmlWidgetOutput(
outputId = 'desc',
HTML(paste(
'The map is centered at <span id="lat"></span>, <span id="lng"></span>',
'with a zoom level of <span id="zoom"></span>.<br/>',
'Top <span id="shownCities"></span> out of <span id="totalCities"></span> visible cities are displayed.'
))
)
)
),
hr(),
fluidRow(
column(3,
selectInput('year', 'Year', c(2000:2010), 2010),
selectInput('maxCities', 'Maximum cities to display', choices=c(
5, 25, 50, 100, 200, 500, 2000, 5000, 10000, All = 100000
), selected = 100)
),
column(4,
h4('Visible cities'),
tableOutput('data')
),
column(5,
h4(id='cityTimeSeriesLabel', class='shiny-text-output'),
plotOutput('cityTimeSeries', width='100%', height='250px')
)
)
))
|
/R-Portable/library/leaflet/legacy/examples/population/ui.R
|
permissive
|
ksasso/Electron_ShinyApp_Deployment
|
R
| false
| false
| 1,410
|
r
|
library(leaflet)
library(ShinyDash)
shinyUI(fluidPage(
tags$head(tags$link(rel='stylesheet', type='text/css', href='styles.css')),
leafletMap(
"map", "100%", 400,
initialTileLayer = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
initialTileLayerAttribution = HTML('Maps by <a href="http://www.mapbox.com/">Mapbox</a>'),
options=list(
center = c(37.45, -93.85),
zoom = 4,
maxBounds = list(list(17, -180), list(59, 180))
)
),
fluidRow(
column(8, offset=3,
h2('Population of US cities'),
htmlWidgetOutput(
outputId = 'desc',
HTML(paste(
'The map is centered at <span id="lat"></span>, <span id="lng"></span>',
'with a zoom level of <span id="zoom"></span>.<br/>',
'Top <span id="shownCities"></span> out of <span id="totalCities"></span> visible cities are displayed.'
))
)
)
),
hr(),
fluidRow(
column(3,
selectInput('year', 'Year', c(2000:2010), 2010),
selectInput('maxCities', 'Maximum cities to display', choices=c(
5, 25, 50, 100, 200, 500, 2000, 5000, 10000, All = 100000
), selected = 100)
),
column(4,
h4('Visible cities'),
tableOutput('data')
),
column(5,
h4(id='cityTimeSeriesLabel', class='shiny-text-output'),
plotOutput('cityTimeSeries', width='100%', height='250px')
)
)
))
|
##########################################
#### GAM MODELS FOR T1 BIFACTOR STUDY ####
##########################################
#Load data
data.JAC <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1239_T1_subjData_NoPsychMeds.rds")
#Load library
library(mgcv)
#Get JAC variable names
jacComponents <- names(data.JAC)[grep("newJacobian_Nmf18",names(data.JAC))]
#Run gam models (GAM without TBV)
JacModels <- lapply(jacComponents, function(x) {
gam(substitute(i ~ s(age) + sex + averageManualRating + mprage_antsCT_vol_TBV + medu1 + mood_4factorv2 + psychosis_4factorv2 + externalizing_4factorv2 + phobias_4factorv2 + overall_psychopathology_4factorv2, list(i = as.name(x))), method="REML", data = data.JAC)
})
#Look at model summaries
models <- lapply(JacModels, summary)
######################
#### MOOD RESULTS ####
######################
#Pull p-values
p_mood <- sapply(JacModels, function(v) summary(v)$p.table[6,4])
#Convert to data frame
p_mood <- as.data.frame(p_mood)
#Print original p-values to three decimal places
p_mood_round <- round(p_mood,3)
#FDR correct p-values
p_mood_fdr <- p.adjust(p_mood[,1],method="fdr")
#Convert to data frame
p_mood_fdr <- as.data.frame(p_mood_fdr)
#To print fdr-corrected p-values to three decimal places
p_mood_fdr_round <- round(p_mood_fdr,3)
#List the JAC components that survive FDR correction
Jac_mood_fdr <- row.names(p_mood_fdr)[p_mood_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_mood_fdr_names <- jacComponents[as.numeric(Jac_mood_fdr)]
#To check direction of coefficient estimates
mood_coeff <- models[as.numeric(Jac_mood_fdr)]
###########################
#### PSYCHOSIS RESULTS ####
###########################
#Pull p-values
p_psy <- sapply(JacModels, function(v) summary(v)$p.table[7,4])
#Convert to data frame
p_psy <- as.data.frame(p_psy)
#Print original p-values to three decimal places
p_psy_round <- round(p_psy,3)
#FDR correct p-values
p_psy_fdr <- p.adjust(p_psy[,1],method="fdr")
#Convert to data frame
p_psy_fdr <- as.data.frame(p_psy_fdr)
#To print fdr-corrected p-values to three decimal places
p_psy_fdr_round <- round(p_psy_fdr,3)
#List the JAC components that survive FDR correction
Jac_psy_fdr <- row.names(p_psy_fdr)[p_psy_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_psy_fdr_names <- jacComponents[as.numeric(Jac_psy_fdr)]
#To check direction of coefficient estimates
psy_coeff <- models[as.numeric(Jac_psy_fdr)]
########################################
#### EXTERNALIZING BEHAVIOR RESULTS ####
########################################
#Pull p-values
p_ext <- sapply(JacModels, function(v) summary(v)$p.table[8,4])
#Convert to data frame
p_ext <- as.data.frame(p_ext)
#Print original p-values to three decimal places
p_ext_round <- round(p_ext,3)
#FDR correct p-values
p_ext_fdr <- p.adjust(p_ext[,1],method="fdr")
#Convert to data frame
p_ext_fdr <- as.data.frame(p_ext_fdr)
#To print fdr-corrected p-values to three decimal places
p_ext_fdr_round <- round(p_ext_fdr,3)
#List the JAC components that survive FDR correction
Jac_ext_fdr <- row.names(p_ext_fdr)[p_ext_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_ext_fdr_names <- jacComponents[as.numeric(Jac_ext_fdr)]
#To check direction of coefficient estimates
ext_coeff <- models[as.numeric(Jac_ext_fdr)]
##############################
#### PHOBIA(FEAR) RESULTS ####
##############################
#Pull p-values
p_fear <- sapply(JacModels, function(v) summary(v)$p.table[9,4])
#Convert to data frame
p_fear <- as.data.frame(p_fear)
#Print original p-values to three decimal places
p_fear_round <- round(p_fear,3)
#FDR correct p-values
p_fear_fdr <- p.adjust(p_fear[,1],method="fdr")
#Convert to data frame
p_fear_fdr <- as.data.frame(p_fear_fdr)
#To print fdr-corrected p-values to three decimal places
p_fear_fdr_round <- round(p_fear_fdr,3)
#List the JAC components that survive FDR correction
Jac_fear_fdr <- row.names(p_fear_fdr)[p_fear_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_fear_fdr_names <- jacComponents[as.numeric(Jac_fear_fdr)]
#To check direction of coefficient estimates
fear_coeff <- models[as.numeric(Jac_fear_fdr)]
#########################################
#### OVERALL PSYCHOPATHOLOGY RESULTS ####
#########################################
#Pull p-values
p_overall <- sapply(JacModels, function(v) summary(v)$p.table[10,4])
#Convert to data frame
p_overall <- as.data.frame(p_overall)
#Print original p-values to three decimal places
p_overall_round <- round(p_overall,3)
#FDR correct p-values
p_overall_fdr <- p.adjust(p_overall[,1],method="fdr")
#Convert to data frame
p_overall_fdr <- as.data.frame(p_overall_fdr)
#To print fdr-corrected p-values to three decimal places
p_overall_fdr_round <- round(p_overall_fdr,3)
#List the JAC components that survive FDR correction
Jac_overall_fdr <- row.names(p_overall_fdr)[p_overall_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_overall_fdr_names <- jacComponents[as.numeric(Jac_overall_fdr)]
#To check direction of coefficient estimates
overall_coeff <- models[as.numeric(Jac_overall_fdr)]
|
/NewJacobian/GamAnalyses_T1Bifactors_NewJacobian_medu1_TBV_noPsychMed.R
|
no_license
|
PennBBL/pncT1Bifactors
|
R
| false
| false
| 5,183
|
r
|
##########################################
#### GAM MODELS FOR T1 BIFACTOR STUDY ####
##########################################
#Load data
data.JAC <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1239_T1_subjData_NoPsychMeds.rds")
#Load library
library(mgcv)
#Get JAC variable names
jacComponents <- names(data.JAC)[grep("newJacobian_Nmf18",names(data.JAC))]
#Run gam models (GAM without TBV)
JacModels <- lapply(jacComponents, function(x) {
gam(substitute(i ~ s(age) + sex + averageManualRating + mprage_antsCT_vol_TBV + medu1 + mood_4factorv2 + psychosis_4factorv2 + externalizing_4factorv2 + phobias_4factorv2 + overall_psychopathology_4factorv2, list(i = as.name(x))), method="REML", data = data.JAC)
})
#Look at model summaries
models <- lapply(JacModels, summary)
######################
#### MOOD RESULTS ####
######################
#Pull p-values
p_mood <- sapply(JacModels, function(v) summary(v)$p.table[6,4])
#Convert to data frame
p_mood <- as.data.frame(p_mood)
#Print original p-values to three decimal places
p_mood_round <- round(p_mood,3)
#FDR correct p-values
p_mood_fdr <- p.adjust(p_mood[,1],method="fdr")
#Convert to data frame
p_mood_fdr <- as.data.frame(p_mood_fdr)
#To print fdr-corrected p-values to three decimal places
p_mood_fdr_round <- round(p_mood_fdr,3)
#List the JAC components that survive FDR correction
Jac_mood_fdr <- row.names(p_mood_fdr)[p_mood_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_mood_fdr_names <- jacComponents[as.numeric(Jac_mood_fdr)]
#To check direction of coefficient estimates
mood_coeff <- models[as.numeric(Jac_mood_fdr)]
###########################
#### PSYCHOSIS RESULTS ####
###########################
#Pull p-values
p_psy <- sapply(JacModels, function(v) summary(v)$p.table[7,4])
#Convert to data frame
p_psy <- as.data.frame(p_psy)
#Print original p-values to three decimal places
p_psy_round <- round(p_psy,3)
#FDR correct p-values
p_psy_fdr <- p.adjust(p_psy[,1],method="fdr")
#Convert to data frame
p_psy_fdr <- as.data.frame(p_psy_fdr)
#To print fdr-corrected p-values to three decimal places
p_psy_fdr_round <- round(p_psy_fdr,3)
#List the JAC components that survive FDR correction
Jac_psy_fdr <- row.names(p_psy_fdr)[p_psy_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_psy_fdr_names <- jacComponents[as.numeric(Jac_psy_fdr)]
#To check direction of coefficient estimates
psy_coeff <- models[as.numeric(Jac_psy_fdr)]
########################################
#### EXTERNALIZING BEHAVIOR RESULTS ####
########################################
#Pull p-values
p_ext <- sapply(JacModels, function(v) summary(v)$p.table[8,4])
#Convert to data frame
p_ext <- as.data.frame(p_ext)
#Print original p-values to three decimal places
p_ext_round <- round(p_ext,3)
#FDR correct p-values
p_ext_fdr <- p.adjust(p_ext[,1],method="fdr")
#Convert to data frame
p_ext_fdr <- as.data.frame(p_ext_fdr)
#To print fdr-corrected p-values to three decimal places
p_ext_fdr_round <- round(p_ext_fdr,3)
#List the JAC components that survive FDR correction
Jac_ext_fdr <- row.names(p_ext_fdr)[p_ext_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_ext_fdr_names <- jacComponents[as.numeric(Jac_ext_fdr)]
#To check direction of coefficient estimates
ext_coeff <- models[as.numeric(Jac_ext_fdr)]
##############################
#### PHOBIA(FEAR) RESULTS ####
##############################
#Pull p-values
p_fear <- sapply(JacModels, function(v) summary(v)$p.table[9,4])
#Convert to data frame
p_fear <- as.data.frame(p_fear)
#Print original p-values to three decimal places
p_fear_round <- round(p_fear,3)
#FDR correct p-values
p_fear_fdr <- p.adjust(p_fear[,1],method="fdr")
#Convert to data frame
p_fear_fdr <- as.data.frame(p_fear_fdr)
#To print fdr-corrected p-values to three decimal places
p_fear_fdr_round <- round(p_fear_fdr,3)
#List the JAC components that survive FDR correction
Jac_fear_fdr <- row.names(p_fear_fdr)[p_fear_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_fear_fdr_names <- jacComponents[as.numeric(Jac_fear_fdr)]
#To check direction of coefficient estimates
fear_coeff <- models[as.numeric(Jac_fear_fdr)]
#########################################
#### OVERALL PSYCHOPATHOLOGY RESULTS ####
#########################################
#Pull p-values
p_overall <- sapply(JacModels, function(v) summary(v)$p.table[10,4])
#Convert to data frame
p_overall <- as.data.frame(p_overall)
#Print original p-values to three decimal places
p_overall_round <- round(p_overall,3)
#FDR correct p-values
p_overall_fdr <- p.adjust(p_overall[,1],method="fdr")
#Convert to data frame
p_overall_fdr <- as.data.frame(p_overall_fdr)
#To print fdr-corrected p-values to three decimal places
p_overall_fdr_round <- round(p_overall_fdr,3)
#List the JAC components that survive FDR correction
Jac_overall_fdr <- row.names(p_overall_fdr)[p_overall_fdr<0.05]
#Name of the JAC components that survive FDR correction
Jac_overall_fdr_names <- jacComponents[as.numeric(Jac_overall_fdr)]
#To check direction of coefficient estimates
overall_coeff <- models[as.numeric(Jac_overall_fdr)]
|
## 이미지데이터를 randomForest로 train 및 test하기
test <- read.csv('https://raw.githubusercontent.com/ozt-ca/tjo.hatenablog.samples/master/r_samples/public_lib/jp/mnist_reproduced/short_prac_test.csv')
train <- read.csv('https://raw.githubusercontent.com/ozt-ca/tjo.hatenablog.samples/master/r_samples/public_lib/jp/mnist_reproduced/short_prac_train.csv')
str(train) # label 데이터형식 int 뜸 -> factor(그룹변수)로 바꿔줘야 함
train1 = train
test1 = test
train1$label = factor(train$label)
test1$label = factor(test$label)
r2 = randomForest(label~., train1)
pred2 = predict(r2, newdata = test1)
t2 = table(test1$label, pred2)
sum(diag(t2))/sum(t2)
###########################################################################################
train2 = train1
test2= test1
train2[,-1] = round(train2[, -1]/255, 0)
test2[,-1] = round(test2[, -1]/255, 0)
start1 = Sys.time()
r3 = randomForest(label~., train2)
interval1 = Sys.time()-start1
pred3 = predict(r3, newdata = test2)
t3 = table(test2$label, pred3)
sum(diag(t3))/sum(t3)
##########################################################################################
data(DNA)
View(DNA)
summary(DNA)
str(DNA)
n = nrow(DNA)
ok = sample(1:n, n*0.7, replace = F)
train3 = DNA[ok, ]
test3 = DNA[-ok, ]
start2 = Sys.time()
r4 = randomForest(Class~., train3)
interval2 = Sys.time()-start2
pred4 = predict(r4, newdata = test3)
t4 = table(test3$Class, pred4)
sum(diag(t4))/sum(t4)
############################################################################################
##
|
/code/2주차/170713_eun3.R
|
no_license
|
keuns/bigdata
|
R
| false
| false
| 1,558
|
r
|
## 이미지데이터를 randomForest로 train 및 test하기
test <- read.csv('https://raw.githubusercontent.com/ozt-ca/tjo.hatenablog.samples/master/r_samples/public_lib/jp/mnist_reproduced/short_prac_test.csv')
train <- read.csv('https://raw.githubusercontent.com/ozt-ca/tjo.hatenablog.samples/master/r_samples/public_lib/jp/mnist_reproduced/short_prac_train.csv')
str(train) # label 데이터형식 int 뜸 -> factor(그룹변수)로 바꿔줘야 함
train1 = train
test1 = test
train1$label = factor(train$label)
test1$label = factor(test$label)
r2 = randomForest(label~., train1)
pred2 = predict(r2, newdata = test1)
t2 = table(test1$label, pred2)
sum(diag(t2))/sum(t2)
###########################################################################################
train2 = train1
test2= test1
train2[,-1] = round(train2[, -1]/255, 0)
test2[,-1] = round(test2[, -1]/255, 0)
start1 = Sys.time()
r3 = randomForest(label~., train2)
interval1 = Sys.time()-start1
pred3 = predict(r3, newdata = test2)
t3 = table(test2$label, pred3)
sum(diag(t3))/sum(t3)
##########################################################################################
data(DNA)
View(DNA)
summary(DNA)
str(DNA)
n = nrow(DNA)
ok = sample(1:n, n*0.7, replace = F)
train3 = DNA[ok, ]
test3 = DNA[-ok, ]
start2 = Sys.time()
r4 = randomForest(Class~., train3)
interval2 = Sys.time()-start2
pred4 = predict(r4, newdata = test3)
t4 = table(test3$Class, pred4)
sum(diag(t4))/sum(t4)
############################################################################################
##
|
## Tue May 21 02:04:46 2013
## Original file Copyright 2013 A.C. Guidoum
## This file is part of the R package kedd.
## Arsalane Chouaib GUIDOUM <acguidoum@usthb.dz> and <starsalane@gmail.com>
## Department of Probabilities-Statistics
## Faculty of Mathematics
## University of Science and Technology Houari Boumediene
## BP 32 El-Alia, U.S.T.H.B, Algeris
## Algeria
##############################################################################
## Biased Cross-Validation (BCV)
h.bcv <- function(x, ...) UseMethod("h.bcv")
h.bcv.default <- function(x,whichbcv = 1,deriv.order=0,lower=0.1*hos,upper=2*hos,tol=0.1 * lower,
kernel=c("gaussian","epanechnikov","triweight","tricube",
"biweight","cosine"),...)
{
if (!is.numeric(x) || length(dim(x)) >=1 || length(x) < 3L)
stop("argument 'x' must be numeric and need at least 3 data points")
if (any(deriv.order < 0 || deriv.order != round(deriv.order)))
stop("argument 'deriv.order' is non-negative integers")
if (missing(kernel)) kernel <- "gaussian"
r <- deriv.order
name <- deparse(substitute(x))
x <- x[!is.na(x)]
x <- sort(x)
n <- length(x)
if(whichbcv == 1){
if (kernel=="epanechnikov" && r+2 >= 3) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
else if (kernel=="triweight" && r+2 >= 7) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
else if (kernel=="biweight" && r+2 >= 5) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
else if (kernel=="tricube" && r+2 >= 10) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
} else if (whichbcv == 2) {
if (kernel=="epanechnikov" && 2*(r+2) >= 3) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
else if (kernel=="triweight" && 2*(r+2) >= 7) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
else if (kernel=="biweight" && 2*(r+2) >= 5) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
else if (kernel=="tricube" && 2*(r+2) >= 10) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
}
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
if (!is.numeric(upper)){
stop("argument 'upper' must be numeric. Default 2*hos (Oversmoothing) boundary was used")
upper= hos
}
if (!is.numeric(lower)){
stop("argument 'lower' must be numeric. Default 0.1*hos boundary was used")
lower=0.1*hos
}
if (lower < 0 | lower >= upper){
stop("the boundaries must be positive and 'lower' must be smaller than 'upper'. Default boundaries were used")
upper=hos
lower=0.1*hos
}
R_Kr1 <- A3_kMr(kernel,r)
R_Kr2 <- A3_kMr(kernel,r+2)
fbcv1 <- function(h)
{
D1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r+2)
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
##(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * (D - (1/((n-1)*h^(2*r+5))) * R_Kr2 )
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
fbcv2 <- function(h)
{
D1 <- kernel_fun_der(kernel,outer(x,x,"-")/h,deriv.order=2*(r+2))
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
if(whichbcv == 1) {obj <- optimize(fbcv1 ,c(lower, upper),tol=tol)
structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = obj$minimum ,
min.bcv1=obj$objective),class="h.bcv")}else{
obj <- optimize(fbcv2 ,c(lower, upper),tol=tol)
structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = obj$minimum ,
min.bcv2=obj$objective),class="h.bcv")}
}
######
print.h.bcv <- function(x, digits=NULL, ...)
{
class(x) <- "h.bcv"
if (x$whichbcv == 1){
cat("\nCall:\t","\tBiased Cross-Validation 1","\n",
"\nDerivative order = ",x$deriv.order,
"\nData: ",x$data.name," (",x$n," obs.);","\tKernel: ",x$kernel,
"\nMin BCV = ",format(x$min.bcv1,digits=digits),";","\tBandwidth 'h' = ",format(x$h,digits=digits), "\n\n",sep="")}else{
cat("\nCall:\t","\tBiased Cross-Validation 2","\n",
"\nDerivative order = ",x$deriv.order,
"\nData: ",x$data.name," (",x$n," obs.);","\tKernel: ",x$kernel,
"\nMin BCV = ",format(x$min.bcv2,digits=digits),";","\tBandwidth 'h' = ",format(x$h,digits=digits), "\n\n",sep="")
}
invisible(x)
}
######
plot.bcv <- function(f,seq.bws=NULL,main=NULL,sub = NULL, xlab=NULL, ylab=NULL,
type="l",las=1,lwd=1,...)
{
class(f) <- "h.bcv"
r <- f$deriv.order
n <- f$n
kernel <- f$kernel
x <- sort(f$x)
if(f$whichbcv == 1){
if (kernel=="epanechnikov" && r+2 >= 3) stop(" 'epanechnikov kernel derivative = 0' for 'order + 2 >= 3' ")
else if (kernel=="triweight" && r+2 >= 7) stop(" 'triweight kernel derivative = 0' for 'order + 2 >= 7' ")
else if (kernel=="biweight" && r+2 >= 5) stop(" 'biweight kernel derivative = 0' for 'order + 2 >= 5' ")
else if (kernel=="tricube" && r+2 >= 10) stop(" 'tricube kernel derivative = 0' for 'order + 2 >= 10' ")
} else if (f$whichbcv == 2){
if (kernel=="epanechnikov" && 2*(r+2) >= 3) stop(" 'epanechnikov kernel derivative = 0' for '2 * (order + 2) >= 3' ")
else if (kernel=="triweight" && 2*(r+2) >= 7) stop(" 'triweight kernel derivative = 0' for '2 * (order + 2) >= 7' ")
else if (kernel=="biweight" && 2*(r+2) >= 5) stop(" 'biweight kernel derivative = 0' for '2 * (order + 2) >= 5' ")
else if (kernel=="tricube" && 2*(r+2) >= 10) stop(" 'tricube kernel derivative = 0' for '2 * (order + 2) >= 10' ")
}
if(is.null(xlab)) xlab <- "Bandwidths"
if(is.null(ylab)) ylab <- bquote(BCV~(h[(.(r))]))
if(is.null(main)){
if(f$whichbcv == 1){
if(r !=0) {main <- "Biased Cross-Validation (1) function for \nBandwidth Choice for Density Derivative"}else{
main <- "Biased Cross-Validation (1) function for \nBandwidth Choice for Density Function"}}else{
if(r !=0) {main <- "Biased Cross-Validation (2) function for \nBandwidth Choice for Density Derivative"}else{
main <- "Biased Cross-Validation (2) function for \nBandwidth Choice for Density Function"}
}
}
if(is.null(sub)) sub <- paste("Kernel",kernel,";","Derivative order = ",r)
if(is.null(seq.bws)){
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
seq.bws <- seq(0.15*hos,2*hos,length=50)
}
R_Kr1 <- A3_kMr(kernel,r)
R_Kr2 <- A3_kMr(kernel,r+2)
fbcv1 <- function(h)
{
D1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r+2)
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
##(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * (D - (1/((n-1)*h^(2*r+5))) * R_Kr2 )
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
fbcv2 <- function(h)
{
D1 <- kernel_fun_der(kernel,outer(x,x,"-")/h,deriv.order=2*(r+2))
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
if(f$whichbcv == 1){
D <- lapply(1:length(seq.bws), function(i) fbcv1(seq.bws[i]))}else{
D <- lapply(1:length(seq.bws), function(i) fbcv2(seq.bws[i]))}
Minf <- c(do.call("rbind",D))
plot.default(seq.bws,Minf,type=type,las=las,lwd=lwd,xlab=xlab,ylab=ylab,
main=main,sub=sub,font.main=2,cex.main=0.9,font.sub=2,cex.sub=0.7,...)
return(list(kernel=kernel,deriv.order=r,seq.bws=seq.bws, bcv=Minf))
}
plot.h.bcv <- function(x,seq.bws=NULL,...) plot.bcv(x,seq.bws,...)
#####
lines.bcv <- function(f,seq.bws=NULL,...)
{
class(f) <- "h.bcv"
r <- f$deriv.order
n <- f$n
kernel <- f$kernel
x <- sort(f$x)
if(f$whichbcv == 1){
if (kernel=="epanechnikov" && r+2 >= 3) stop(" 'epanechnikov kernel derivative = 0' for 'order + 2 >= 3' ")
else if (kernel=="triweight" && r+2 >= 7) stop(" 'triweight kernel derivative = 0' for 'order + 2 >= 7' ")
else if (kernel=="biweight" && r+2 >= 5) stop(" 'biweight kernel derivative = 0' for 'order + 2 >= 5' ")
else if (kernel=="tricube" && r+2 >= 10) stop(" 'tricube kernel derivative = 0' for 'order + 2 >= 10' ")
} else if (f$whichbcv == 2){
if (kernel=="epanechnikov" && 2*(r+2) >= 3) stop(" 'epanechnikov kernel derivative = 0' for '2 * (order + 2) >= 3' ")
else if (kernel=="triweight" && 2*(r+2) >= 7) stop(" 'triweight kernel derivative = 0' for '2 * (order + 2) >= 7' ")
else if (kernel=="biweight" && 2*(r+2) >= 5) stop(" 'biweight kernel derivative = 0' for '2 * (order + 2) >= 5' ")
else if (kernel=="tricube" && 2*(r+2) >= 10) stop(" 'tricube kernel derivative = 0' for '2 * (order + 2) >= 10' ")
}
if(is.null(seq.bws)){
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
seq.bws <- seq(0.15*hos,2*hos,length=50)
}
R_Kr1 <- A3_kMr(kernel,r)
R_Kr2 <- A3_kMr(kernel,r+2)
fbcv1 <- function(h)
{
D1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r+2)
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
##(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * (D - (1/((n-1)*h^(2*r+5))) * R_Kr2 )
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
fbcv2 <- function(h)
{
D1 <- kernel_fun_der(kernel,outer(x,x,"-")/h,deriv.order=2*(r+2))
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
if(f$whichbcv == 1){
D <- lapply(1:length(seq.bws), function(i) fbcv1(seq.bws[i]))}else{
D <- lapply(1:length(seq.bws), function(i) fbcv2(seq.bws[i]))}
Minf <- c(do.call("rbind",D))
lines.default(seq.bws,Minf,...)
invisible(NULL)
}
lines.h.bcv <- function(x,seq.bws=NULL,...) lines.bcv(x,seq.bws,...)
|
/kedd/R/BCV.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 11,539
|
r
|
## Tue May 21 02:04:46 2013
## Original file Copyright 2013 A.C. Guidoum
## This file is part of the R package kedd.
## Arsalane Chouaib GUIDOUM <acguidoum@usthb.dz> and <starsalane@gmail.com>
## Department of Probabilities-Statistics
## Faculty of Mathematics
## University of Science and Technology Houari Boumediene
## BP 32 El-Alia, U.S.T.H.B, Algeris
## Algeria
##############################################################################
## Biased Cross-Validation (BCV)
h.bcv <- function(x, ...) UseMethod("h.bcv")
h.bcv.default <- function(x,whichbcv = 1,deriv.order=0,lower=0.1*hos,upper=2*hos,tol=0.1 * lower,
kernel=c("gaussian","epanechnikov","triweight","tricube",
"biweight","cosine"),...)
{
if (!is.numeric(x) || length(dim(x)) >=1 || length(x) < 3L)
stop("argument 'x' must be numeric and need at least 3 data points")
if (any(deriv.order < 0 || deriv.order != round(deriv.order)))
stop("argument 'deriv.order' is non-negative integers")
if (missing(kernel)) kernel <- "gaussian"
r <- deriv.order
name <- deparse(substitute(x))
x <- x[!is.na(x)]
x <- sort(x)
n <- length(x)
if(whichbcv == 1){
if (kernel=="epanechnikov" && r+2 >= 3) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
else if (kernel=="triweight" && r+2 >= 7) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
else if (kernel=="biweight" && r+2 >= 5) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
else if (kernel=="tricube" && r+2 >= 10) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv1=NA),class="h.bcv"))
} else if (whichbcv == 2) {
if (kernel=="epanechnikov" && 2*(r+2) >= 3) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
else if (kernel=="triweight" && 2*(r+2) >= 7) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
else if (kernel=="biweight" && 2*(r+2) >= 5) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
else if (kernel=="tricube" && 2*(r+2) >= 10) return(structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = NA ,min.bcv2=NA),class="h.bcv"))
}
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
if (!is.numeric(upper)){
stop("argument 'upper' must be numeric. Default 2*hos (Oversmoothing) boundary was used")
upper= hos
}
if (!is.numeric(lower)){
stop("argument 'lower' must be numeric. Default 0.1*hos boundary was used")
lower=0.1*hos
}
if (lower < 0 | lower >= upper){
stop("the boundaries must be positive and 'lower' must be smaller than 'upper'. Default boundaries were used")
upper=hos
lower=0.1*hos
}
R_Kr1 <- A3_kMr(kernel,r)
R_Kr2 <- A3_kMr(kernel,r+2)
fbcv1 <- function(h)
{
D1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r+2)
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
##(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * (D - (1/((n-1)*h^(2*r+5))) * R_Kr2 )
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
fbcv2 <- function(h)
{
D1 <- kernel_fun_der(kernel,outer(x,x,"-")/h,deriv.order=2*(r+2))
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
if(whichbcv == 1) {obj <- optimize(fbcv1 ,c(lower, upper),tol=tol)
structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = obj$minimum ,
min.bcv1=obj$objective),class="h.bcv")}else{
obj <- optimize(fbcv2 ,c(lower, upper),tol=tol)
structure(list(x=x, data.name=name,n=n, kernel=kernel, deriv.order=r,whichbcv=whichbcv, h = obj$minimum ,
min.bcv2=obj$objective),class="h.bcv")}
}
######
print.h.bcv <- function(x, digits=NULL, ...)
{
class(x) <- "h.bcv"
if (x$whichbcv == 1){
cat("\nCall:\t","\tBiased Cross-Validation 1","\n",
"\nDerivative order = ",x$deriv.order,
"\nData: ",x$data.name," (",x$n," obs.);","\tKernel: ",x$kernel,
"\nMin BCV = ",format(x$min.bcv1,digits=digits),";","\tBandwidth 'h' = ",format(x$h,digits=digits), "\n\n",sep="")}else{
cat("\nCall:\t","\tBiased Cross-Validation 2","\n",
"\nDerivative order = ",x$deriv.order,
"\nData: ",x$data.name," (",x$n," obs.);","\tKernel: ",x$kernel,
"\nMin BCV = ",format(x$min.bcv2,digits=digits),";","\tBandwidth 'h' = ",format(x$h,digits=digits), "\n\n",sep="")
}
invisible(x)
}
######
plot.bcv <- function(f,seq.bws=NULL,main=NULL,sub = NULL, xlab=NULL, ylab=NULL,
type="l",las=1,lwd=1,...)
{
class(f) <- "h.bcv"
r <- f$deriv.order
n <- f$n
kernel <- f$kernel
x <- sort(f$x)
if(f$whichbcv == 1){
if (kernel=="epanechnikov" && r+2 >= 3) stop(" 'epanechnikov kernel derivative = 0' for 'order + 2 >= 3' ")
else if (kernel=="triweight" && r+2 >= 7) stop(" 'triweight kernel derivative = 0' for 'order + 2 >= 7' ")
else if (kernel=="biweight" && r+2 >= 5) stop(" 'biweight kernel derivative = 0' for 'order + 2 >= 5' ")
else if (kernel=="tricube" && r+2 >= 10) stop(" 'tricube kernel derivative = 0' for 'order + 2 >= 10' ")
} else if (f$whichbcv == 2){
if (kernel=="epanechnikov" && 2*(r+2) >= 3) stop(" 'epanechnikov kernel derivative = 0' for '2 * (order + 2) >= 3' ")
else if (kernel=="triweight" && 2*(r+2) >= 7) stop(" 'triweight kernel derivative = 0' for '2 * (order + 2) >= 7' ")
else if (kernel=="biweight" && 2*(r+2) >= 5) stop(" 'biweight kernel derivative = 0' for '2 * (order + 2) >= 5' ")
else if (kernel=="tricube" && 2*(r+2) >= 10) stop(" 'tricube kernel derivative = 0' for '2 * (order + 2) >= 10' ")
}
if(is.null(xlab)) xlab <- "Bandwidths"
if(is.null(ylab)) ylab <- bquote(BCV~(h[(.(r))]))
if(is.null(main)){
if(f$whichbcv == 1){
if(r !=0) {main <- "Biased Cross-Validation (1) function for \nBandwidth Choice for Density Derivative"}else{
main <- "Biased Cross-Validation (1) function for \nBandwidth Choice for Density Function"}}else{
if(r !=0) {main <- "Biased Cross-Validation (2) function for \nBandwidth Choice for Density Derivative"}else{
main <- "Biased Cross-Validation (2) function for \nBandwidth Choice for Density Function"}
}
}
if(is.null(sub)) sub <- paste("Kernel",kernel,";","Derivative order = ",r)
if(is.null(seq.bws)){
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
seq.bws <- seq(0.15*hos,2*hos,length=50)
}
R_Kr1 <- A3_kMr(kernel,r)
R_Kr2 <- A3_kMr(kernel,r+2)
fbcv1 <- function(h)
{
D1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r+2)
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
##(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * (D - (1/((n-1)*h^(2*r+5))) * R_Kr2 )
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
fbcv2 <- function(h)
{
D1 <- kernel_fun_der(kernel,outer(x,x,"-")/h,deriv.order=2*(r+2))
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
if(f$whichbcv == 1){
D <- lapply(1:length(seq.bws), function(i) fbcv1(seq.bws[i]))}else{
D <- lapply(1:length(seq.bws), function(i) fbcv2(seq.bws[i]))}
Minf <- c(do.call("rbind",D))
plot.default(seq.bws,Minf,type=type,las=las,lwd=lwd,xlab=xlab,ylab=ylab,
main=main,sub=sub,font.main=2,cex.main=0.9,font.sub=2,cex.sub=0.7,...)
return(list(kernel=kernel,deriv.order=r,seq.bws=seq.bws, bcv=Minf))
}
plot.h.bcv <- function(x,seq.bws=NULL,...) plot.bcv(x,seq.bws,...)
#####
lines.bcv <- function(f,seq.bws=NULL,...)
{
class(f) <- "h.bcv"
r <- f$deriv.order
n <- f$n
kernel <- f$kernel
x <- sort(f$x)
if(f$whichbcv == 1){
if (kernel=="epanechnikov" && r+2 >= 3) stop(" 'epanechnikov kernel derivative = 0' for 'order + 2 >= 3' ")
else if (kernel=="triweight" && r+2 >= 7) stop(" 'triweight kernel derivative = 0' for 'order + 2 >= 7' ")
else if (kernel=="biweight" && r+2 >= 5) stop(" 'biweight kernel derivative = 0' for 'order + 2 >= 5' ")
else if (kernel=="tricube" && r+2 >= 10) stop(" 'tricube kernel derivative = 0' for 'order + 2 >= 10' ")
} else if (f$whichbcv == 2){
if (kernel=="epanechnikov" && 2*(r+2) >= 3) stop(" 'epanechnikov kernel derivative = 0' for '2 * (order + 2) >= 3' ")
else if (kernel=="triweight" && 2*(r+2) >= 7) stop(" 'triweight kernel derivative = 0' for '2 * (order + 2) >= 7' ")
else if (kernel=="biweight" && 2*(r+2) >= 5) stop(" 'biweight kernel derivative = 0' for '2 * (order + 2) >= 5' ")
else if (kernel=="tricube" && 2*(r+2) >= 10) stop(" 'tricube kernel derivative = 0' for '2 * (order + 2) >= 10' ")
}
if(is.null(seq.bws)){
hos <- ((243 *(2*r+1)*A3_kMr(kernel,r))/(35* A2_kM(kernel)^2))^(1/(2*r+5)) * sd(x,na.rm = TRUE) * n^(-1/(2*r+5))
seq.bws <- seq(0.15*hos,2*hos,length=50)
}
R_Kr1 <- A3_kMr(kernel,r)
R_Kr2 <- A3_kMr(kernel,r+2)
fbcv1 <- function(h)
{
D1 <- kernel_fun_conv(kernel,outer(x,x,"-")/h,deriv.order=r+2)
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
##(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * (D - (1/((n-1)*h^(2*r+5))) * R_Kr2 )
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
fbcv2 <- function(h)
{
D1 <- kernel_fun_der(kernel,outer(x,x,"-")/h,deriv.order=2*(r+2))
diag(D1) <- 0
D1 <- ((-1)^(r+2)/((n-1)*h^(2*r+5)))* colSums(D1)
D <- mean(D1)
(1/(n*h^(2*r+1)))* R_Kr1 + (0.25*h^4)*(A2_kM(kernel))^2 * D
}
if(f$whichbcv == 1){
D <- lapply(1:length(seq.bws), function(i) fbcv1(seq.bws[i]))}else{
D <- lapply(1:length(seq.bws), function(i) fbcv2(seq.bws[i]))}
Minf <- c(do.call("rbind",D))
lines.default(seq.bws,Minf,...)
invisible(NULL)
}
lines.h.bcv <- function(x,seq.bws=NULL,...) lines.bcv(x,seq.bws,...)
|
## Put comments here that give an overall description of what your
## functions do
## The first function, makeVector creates a special "vector", which is really a list containing a function to set the value of the Matrix get the value of the Matrix
## set the value of the Inverse
## get the value of the Inverse
makeCacheMatrix <- function(x = matrix()) {
k <- NULL
set <- function(y) {
x <<- y
k <<- NULL
}
get <- function() x
setinv <- function(solve) k <<- solve
getinv <- function() k
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
k <- x$getinv()
if(!is.null(k)) {
message("getting cached data")
return(k)
}
data <- x$get()
k <- solve(data)
x$setinv(k)
k
}
|
/cachematrix.R
|
no_license
|
Ankurkaus029/ProgrammingAssignment2
|
R
| false
| false
| 1,012
|
r
|
## Put comments here that give an overall description of what your
## functions do
## The first function, makeVector creates a special "vector", which is really a list containing a function to set the value of the Matrix get the value of the Matrix
## set the value of the Inverse
## get the value of the Inverse
makeCacheMatrix <- function(x = matrix()) {
k <- NULL
set <- function(y) {
x <<- y
k <<- NULL
}
get <- function() x
setinv <- function(solve) k <<- solve
getinv <- function() k
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
k <- x$getinv()
if(!is.null(k)) {
message("getting cached data")
return(k)
}
data <- x$get()
k <- solve(data)
x$setinv(k)
k
}
|
## Install and require the necessary libraries
#install.packages("readr")
library(readr)
#install.packages("dplyr")
library(dplyr)
## Download the data set and unzip the file:
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "./data.zip")
unzip(zipfile = "./data.zip")
## Import the features vector and extract names vector
features <- read.table("./UCI HAR Dataset/features.txt")
feature.names <- features$V2
## Import the test and train data sets
feat.test <- read.table("./UCI HAR Dataset/test/X_test.txt")
feat.train <- read.table("./UCI HAR Dataset/train/X_train.txt")
## Merge the test and train data sets
dat <- rbind(feat.test, feat.train)
## Naming the features
names(dat) <- feature.names
## Importing the test and train activities
act.test <- read.table("./UCI HAR Dataset/test/y_test.txt")
act.train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# Merging the test and train activities
activity <- rbind(act.test, act.train)
activity <- factor(activity$V1)
## Importing labels for activities
act.labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
levels(activity) <- act.labels$V2
## Adding labels to data
dat <- cbind(activity, dat)
## Import subjects
sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## Merging subjects
subject <- rbind(sub_test, sub_train)
subject <- subject$V1
dat <- cbind(subject, dat)
## Selecting measurements on mean and standard deviation
sel <- grepl("\\mean\\()|std\\()", feature.names)
sel <- feature.names[sel]
dat <- dat[,c("subject", "activity", sel)]
names(dat) <- gsub("\\()", "", names(dat))
# install.packages("stringr")
library(stringr)
#install.packages("plyr")
library(plyr)
## Calculate the mean for the each of the records using aggregate function
new_dat <- aggregate(dat[,3:ncol(dat)], list(paste(dat$activity, dat$subject)), mean)
## Splitting the group name to form two new columns
groups <- str_split_fixed(new_dat$Group.1, " ", 2)
## Adding the two columns to the data set
new_dat <- cbind(groups, new_dat)
## Removing the unnecessary column
new_dat <- new_dat[,-3]
## Renaming the columns to reflect their actual purpose
new_dat <- rename(new_dat, c("1" = "activity", "2"="subject"))
## Writing the file into a table
write.table(new_dat, file="./tidy_data.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
SouradeepSaha/Getting-and-cleaning-data
|
R
| false
| false
| 2,443
|
r
|
## Install and require the necessary libraries
#install.packages("readr")
library(readr)
#install.packages("dplyr")
library(dplyr)
## Download the data set and unzip the file:
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "./data.zip")
unzip(zipfile = "./data.zip")
## Import the features vector and extract names vector
features <- read.table("./UCI HAR Dataset/features.txt")
feature.names <- features$V2
## Import the test and train data sets
feat.test <- read.table("./UCI HAR Dataset/test/X_test.txt")
feat.train <- read.table("./UCI HAR Dataset/train/X_train.txt")
## Merge the test and train data sets
dat <- rbind(feat.test, feat.train)
## Naming the features
names(dat) <- feature.names
## Importing the test and train activities
act.test <- read.table("./UCI HAR Dataset/test/y_test.txt")
act.train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# Merging the test and train activities
activity <- rbind(act.test, act.train)
activity <- factor(activity$V1)
## Importing labels for activities
act.labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
levels(activity) <- act.labels$V2
## Adding labels to data
dat <- cbind(activity, dat)
## Import subjects
sub_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
sub_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## Merging subjects
subject <- rbind(sub_test, sub_train)
subject <- subject$V1
dat <- cbind(subject, dat)
## Selecting measurements on mean and standard deviation
sel <- grepl("\\mean\\()|std\\()", feature.names)
sel <- feature.names[sel]
dat <- dat[,c("subject", "activity", sel)]
names(dat) <- gsub("\\()", "", names(dat))
# install.packages("stringr")
library(stringr)
#install.packages("plyr")
library(plyr)
## Calculate the mean for the each of the records using aggregate function
new_dat <- aggregate(dat[,3:ncol(dat)], list(paste(dat$activity, dat$subject)), mean)
## Splitting the group name to form two new columns
groups <- str_split_fixed(new_dat$Group.1, " ", 2)
## Adding the two columns to the data set
new_dat <- cbind(groups, new_dat)
## Removing the unnecessary column
new_dat <- new_dat[,-3]
## Renaming the columns to reflect their actual purpose
new_dat <- rename(new_dat, c("1" = "activity", "2"="subject"))
## Writing the file into a table
write.table(new_dat, file="./tidy_data.txt", row.names = FALSE)
|
library(nsRFA)
### Name: GENPAR
### Title: Three parameter generalized Pareto distribution and L-moments
### Aliases: GENPAR f.genpar F.genpar invF.genpar Lmom.genpar par.genpar
### rand.genpar
### Keywords: distribution
### ** Examples
data(hydroSIMN)
annualflows
summary(annualflows)
x <- annualflows["dato"][,]
fac <- factor(annualflows["cod"][,])
split(x,fac)
camp <- split(x,fac)$"45"
ll <- Lmoments(camp)
parameters <- par.genpar(ll[1],ll[2],ll[4])
f.genpar(1800,parameters$xi,parameters$alfa,parameters$k)
F.genpar(1800,parameters$xi,parameters$alfa,parameters$k)
invF.genpar(0.7161775,parameters$xi,parameters$alfa,parameters$k)
Lmom.genpar(parameters$xi,parameters$alfa,parameters$k)
rand.genpar(100,parameters$xi,parameters$alfa,parameters$k)
Rll <- regionalLmoments(x,fac); Rll
parameters <- par.genpar(Rll[1],Rll[2],Rll[4])
Lmom.genpar(parameters$xi,parameters$alfa,parameters$k)
|
/data/genthat_extracted_code/nsRFA/examples/GENPAR.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 903
|
r
|
library(nsRFA)
### Name: GENPAR
### Title: Three parameter generalized Pareto distribution and L-moments
### Aliases: GENPAR f.genpar F.genpar invF.genpar Lmom.genpar par.genpar
### rand.genpar
### Keywords: distribution
### ** Examples
data(hydroSIMN)
annualflows
summary(annualflows)
x <- annualflows["dato"][,]
fac <- factor(annualflows["cod"][,])
split(x,fac)
camp <- split(x,fac)$"45"
ll <- Lmoments(camp)
parameters <- par.genpar(ll[1],ll[2],ll[4])
f.genpar(1800,parameters$xi,parameters$alfa,parameters$k)
F.genpar(1800,parameters$xi,parameters$alfa,parameters$k)
invF.genpar(0.7161775,parameters$xi,parameters$alfa,parameters$k)
Lmom.genpar(parameters$xi,parameters$alfa,parameters$k)
rand.genpar(100,parameters$xi,parameters$alfa,parameters$k)
Rll <- regionalLmoments(x,fac); Rll
parameters <- par.genpar(Rll[1],Rll[2],Rll[4])
Lmom.genpar(parameters$xi,parameters$alfa,parameters$k)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/groan.R
\name{groan}
\alias{groan}
\title{Function to return random dad joke}
\usage{
groan(sting = TRUE)
}
\arguments{
\item{sting}{Plays a joke sting after the joke. Sound from
\url{https://archive.org/details/Rimshot_254}}
}
\value{
Invisibly returns a two item list with the first item holding the
joke as a character and the second item containing the joke id as a
character.
}
\description{
This function returns a random dad joke(s) from
\url{https://icanhazdadjoke.com}. as a message and invisibly as a character.
}
\examples{
groan(sting = FALSE)
}
|
/man/groan.Rd
|
no_license
|
jhollist/dadjokeapi
|
R
| false
| true
| 654
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/groan.R
\name{groan}
\alias{groan}
\title{Function to return random dad joke}
\usage{
groan(sting = TRUE)
}
\arguments{
\item{sting}{Plays a joke sting after the joke. Sound from
\url{https://archive.org/details/Rimshot_254}}
}
\value{
Invisibly returns a two item list with the first item holding the
joke as a character and the second item containing the joke id as a
character.
}
\description{
This function returns a random dad joke(s) from
\url{https://icanhazdadjoke.com}. as a message and invisibly as a character.
}
\examples{
groan(sting = FALSE)
}
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Capr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
##################
#save component function
#########################
setGeneric("saveState", function(x){standardGeneric("saveState")})
#' Save State for components
#'
#' These function coerce s4 CAPR objects to s3 so that they are in a json save state
#'
#' @param x a criteria class object in s4
#' @return the object converted to s3 to be saved as a json object
#' @rdname saveState-method
#' @aliases saveState
#' @aliases saveState,Concept-method
setMethod("saveState", "Concept",
function(x){
nm <- methods::slotNames(methods::is(x))
concept <- lapply(nm, slot, object = x)
names(concept) <- nm
return(concept)
})
#' @rdname saveState-method
#' @aliases saveState,ConceptSetItem-method
setMethod("saveState", "ConceptSetItem",
function(x){
list('Concept' = saveState(x@Concept),
'isExcluded' = x@isExcluded,
'includeDescendants' = x@includeDescendants,
'includeMapped' = x@includeMapped)
})
#' @rdname saveState-method
#' @aliases saveState,ConceptSetExpression-method
setMethod("saveState", "ConceptSetExpression",
function(x){
list('id' = x@id,
'Name' = x@Name,
'Expression' = lapply(x@Expression, saveState))
})
#' @rdname saveState-method
#' @aliases saveState,OpAttribute-method
setMethod("saveState", "OpAttribute",
function(x){
list('Name' = x@Name,
'Op' = x@Op,
'Contents' = x@Contents)
})
#' @rdname saveState-method
#' @aliases saveState,SourceConceptAttribute-method
setMethod("saveState", "SourceConceptAttribute",
function(x){
list('Name' = x@Name,
'SourceCodesetId' = x@SourceCodesetId)
})
#' @rdname saveState-method
#' @aliases saveState,ConceptAttribute-method
setMethod("saveState", "ConceptAttribute",
function(x){
list('Name' = x@Name,
'Concepts' = x@Concepts)
})
#' @rdname saveState-method
#' @aliases saveState,CorrelatedCriteriaAttribute-method
setMethod("saveState", "CorrelatedCriteriaAttribute",
function(x){
list('Name' = x@Name,
'Group' = saveState(x@Group))
})
#' @rdname saveState-method
#' @aliases saveState,LogicAttribute-method
setMethod("saveState", "LogicAttribute",
function(x){
list('Name' = x@Name,
'Logic' = x@Logic)
})
#' @rdname saveState-method
#' @aliases saveState,Window-method
setMethod("saveState", "Window",
function(x){
list('Event' = x@Event,
'Start' = x@Start,
'End' = x@End,
'Index' = x@Index)
})
#' @rdname saveState-method
#' @aliases saveState,Timeline-method
setMethod("saveState", "Timeline",
function(x){
list('StartWindow' = saveState(x@StartWindow),
'EndWindow' = saveState(x@EndWindow),
'RestrictVisit' = x@RestrictVisit,
'IgnoreObservationPeriod' = x@IgnoreObservationPeriod)
})
#' @rdname saveState-method
#' @aliases saveState,Occurrence-method
setMethod("saveState", "Occurrence",
function(x){
list('Type' = x@Type,
'Count' = x@Count,
'isDistinct' = x@isDistinct)
})
#' @rdname saveState-method
#' @aliases saveState,ExpressionType-method
setMethod("saveState", "ExpressionType",
function(x){
list('Type' = x@Type,
'Count' = x@Count)
})
#' @rdname saveState-method
#' @aliases saveState,ObservationWindow-method
setMethod("saveState", "ObservationWindow",
function(x){
list('PriorDays' = x@PriorDays,
'PostDays' = x@PostDays)
})
#' @rdname saveState-method
#' @aliases saveState,Limit-method
setMethod("saveState", "Limit",
function(x){
list('Type' = x@Type)
})
#' @rdname saveState-method
#' @aliases saveState,Query-method
setMethod("saveState", "Query", function(x){
ll <- list('Domain' = x@Domain,
'CodesetId' = x@CodesetId,
'Attributes' = lapply(x@Attributes, saveState))
if(length(ll$CodesetId) == 0){
ll$CodesetId <- NA_character_
}
return(ll)
})
#' @rdname saveState-method
#' @aliases saveState,Count-method
setMethod("saveState", "Count",
function(x){
list('Criteria' = saveState(x@Criteria),
'Timeline' = saveState(x@Timeline),
'Occurrence' =saveState(x@Occurrence))
})
#' @rdname saveState-method
#' @aliases saveState,Group-method
setMethod("saveState", "Group",
function(x){
list('Type' = saveState(x@Type),
'CriteriaList' = lapply(x@CriteriaList, saveState),
'DemographicCriteriaList' = lapply(x@DemographicCriteriaList, saveState),
'Groups' = lapply(x@Groups, saveState))
})
#' @rdname saveState-method
#' @aliases saveState,MetaData-method
setMethod("saveState", "MetaData",
function(x){
list('ComponentType' = x@ComponentType,
'Name' = x@Name,
'Description' = x@Description)
})
#' @rdname saveState-method
#' @aliases saveState,DateOffsetEndStrategy-method
setMethod("saveState", "DateOffsetEndStrategy",
function(x){
list('DateField' = x@DateField,
'Offset' = x@Offset)
})
#' @rdname saveState-method
#' @aliases saveState,CustomEraEndStrategy-method
setMethod("saveState", "CustomEraEndStrategy",
function(x){
list('DrugCodesetId' = x@DrugCodesetId,
'GapDays' = x@GapDays,
'Offset' = x@Offset)
})
#' @rdname saveState-method
#' @aliases saveState,EndOfCtsObsEndStrategy-method
setMethod("saveState", "EndOfCtsObsEndStrategy",
function(x){
list('EndOfContinuousObservation' = x@EndOfContinuousObservation)
})
#' @rdname saveState-method
#' @aliases saveState,CollapseSettings-method
setMethod("saveState", "CollapseSettings",
function(x){
list('CollapseType' = x@CollapseType,
'EraPad' = x@EraPad)
})
#' @rdname saveState-method
#' @aliases saveState,CensorWindow-method
setMethod("saveState", "CensorWindow",
function(x){
list('StartDate' = x@StartDate,
'EndDate' = x@EndDate)
})
#' @rdname saveState-method
#' @aliases saveState,Component-method
setMethod("saveState", "Component",
function(x){
if(x@MetaData@ComponentType == "PrimaryCriteria"){
cl <- lapply(x@CriteriaExpression$CriteriaList, saveState)
ow <- saveState(x@CriteriaExpression$ObservationWindow)
ll <- list('MetaData' = saveState(x@MetaData),
'CriteriaExpression' = list('CriteriaList' = cl,
'ObservationWindow' = ow),
'Limit' = lapply(x@Limit, saveState),
'ConceptSetExpression' = lapply(x@ConceptSetExpression, saveState))
} else if(x@MetaData@ComponentType == "CohortEra"){
ll <- list('MetaData' = saveState(x@MetaData),
'CriteriaExpression' = list('CollapseSettings' = saveState(x@CriteriaExpression$CollapseSettings),
'CensorWindow' = saveState(x@CriteriaExpression$CensorWindow)),
'Limit' = list(),
'ConceptSetExpression' = list())
} else{
ll <- list('MetaData' = saveState(x@MetaData),
'CriteriaExpression' = lapply(x@CriteriaExpression, saveState),
'Limit' = lapply(x@Limit, saveState),
'ConceptSetExpression' = lapply(x@ConceptSetExpression, saveState))
}
return(ll)
})
|
/R/LowLevelSaveFn.R
|
permissive
|
rfherrerac/Capr
|
R
| false
| false
| 8,873
|
r
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Capr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
##################
#save component function
#########################
setGeneric("saveState", function(x){standardGeneric("saveState")})
#' Save State for components
#'
#' These function coerce s4 CAPR objects to s3 so that they are in a json save state
#'
#' @param x a criteria class object in s4
#' @return the object converted to s3 to be saved as a json object
#' @rdname saveState-method
#' @aliases saveState
#' @aliases saveState,Concept-method
setMethod("saveState", "Concept",
function(x){
nm <- methods::slotNames(methods::is(x))
concept <- lapply(nm, slot, object = x)
names(concept) <- nm
return(concept)
})
#' @rdname saveState-method
#' @aliases saveState,ConceptSetItem-method
setMethod("saveState", "ConceptSetItem",
function(x){
list('Concept' = saveState(x@Concept),
'isExcluded' = x@isExcluded,
'includeDescendants' = x@includeDescendants,
'includeMapped' = x@includeMapped)
})
#' @rdname saveState-method
#' @aliases saveState,ConceptSetExpression-method
setMethod("saveState", "ConceptSetExpression",
function(x){
list('id' = x@id,
'Name' = x@Name,
'Expression' = lapply(x@Expression, saveState))
})
#' @rdname saveState-method
#' @aliases saveState,OpAttribute-method
setMethod("saveState", "OpAttribute",
function(x){
list('Name' = x@Name,
'Op' = x@Op,
'Contents' = x@Contents)
})
#' @rdname saveState-method
#' @aliases saveState,SourceConceptAttribute-method
setMethod("saveState", "SourceConceptAttribute",
function(x){
list('Name' = x@Name,
'SourceCodesetId' = x@SourceCodesetId)
})
#' @rdname saveState-method
#' @aliases saveState,ConceptAttribute-method
setMethod("saveState", "ConceptAttribute",
function(x){
list('Name' = x@Name,
'Concepts' = x@Concepts)
})
#' @rdname saveState-method
#' @aliases saveState,CorrelatedCriteriaAttribute-method
setMethod("saveState", "CorrelatedCriteriaAttribute",
function(x){
list('Name' = x@Name,
'Group' = saveState(x@Group))
})
#' @rdname saveState-method
#' @aliases saveState,LogicAttribute-method
setMethod("saveState", "LogicAttribute",
function(x){
list('Name' = x@Name,
'Logic' = x@Logic)
})
#' @rdname saveState-method
#' @aliases saveState,Window-method
setMethod("saveState", "Window",
function(x){
list('Event' = x@Event,
'Start' = x@Start,
'End' = x@End,
'Index' = x@Index)
})
#' @rdname saveState-method
#' @aliases saveState,Timeline-method
setMethod("saveState", "Timeline",
function(x){
list('StartWindow' = saveState(x@StartWindow),
'EndWindow' = saveState(x@EndWindow),
'RestrictVisit' = x@RestrictVisit,
'IgnoreObservationPeriod' = x@IgnoreObservationPeriod)
})
#' @rdname saveState-method
#' @aliases saveState,Occurrence-method
setMethod("saveState", "Occurrence",
function(x){
list('Type' = x@Type,
'Count' = x@Count,
'isDistinct' = x@isDistinct)
})
#' @rdname saveState-method
#' @aliases saveState,ExpressionType-method
setMethod("saveState", "ExpressionType",
function(x){
list('Type' = x@Type,
'Count' = x@Count)
})
#' @rdname saveState-method
#' @aliases saveState,ObservationWindow-method
setMethod("saveState", "ObservationWindow",
function(x){
list('PriorDays' = x@PriorDays,
'PostDays' = x@PostDays)
})
#' @rdname saveState-method
#' @aliases saveState,Limit-method
setMethod("saveState", "Limit",
function(x){
list('Type' = x@Type)
})
#' @rdname saveState-method
#' @aliases saveState,Query-method
setMethod("saveState", "Query", function(x){
ll <- list('Domain' = x@Domain,
'CodesetId' = x@CodesetId,
'Attributes' = lapply(x@Attributes, saveState))
if(length(ll$CodesetId) == 0){
ll$CodesetId <- NA_character_
}
return(ll)
})
#' @rdname saveState-method
#' @aliases saveState,Count-method
setMethod("saveState", "Count",
function(x){
list('Criteria' = saveState(x@Criteria),
'Timeline' = saveState(x@Timeline),
'Occurrence' =saveState(x@Occurrence))
})
#' @rdname saveState-method
#' @aliases saveState,Group-method
setMethod("saveState", "Group",
function(x){
list('Type' = saveState(x@Type),
'CriteriaList' = lapply(x@CriteriaList, saveState),
'DemographicCriteriaList' = lapply(x@DemographicCriteriaList, saveState),
'Groups' = lapply(x@Groups, saveState))
})
#' @rdname saveState-method
#' @aliases saveState,MetaData-method
setMethod("saveState", "MetaData",
function(x){
list('ComponentType' = x@ComponentType,
'Name' = x@Name,
'Description' = x@Description)
})
#' @rdname saveState-method
#' @aliases saveState,DateOffsetEndStrategy-method
setMethod("saveState", "DateOffsetEndStrategy",
function(x){
list('DateField' = x@DateField,
'Offset' = x@Offset)
})
#' @rdname saveState-method
#' @aliases saveState,CustomEraEndStrategy-method
setMethod("saveState", "CustomEraEndStrategy",
function(x){
list('DrugCodesetId' = x@DrugCodesetId,
'GapDays' = x@GapDays,
'Offset' = x@Offset)
})
#' @rdname saveState-method
#' @aliases saveState,EndOfCtsObsEndStrategy-method
setMethod("saveState", "EndOfCtsObsEndStrategy",
function(x){
list('EndOfContinuousObservation' = x@EndOfContinuousObservation)
})
#' @rdname saveState-method
#' @aliases saveState,CollapseSettings-method
setMethod("saveState", "CollapseSettings",
function(x){
list('CollapseType' = x@CollapseType,
'EraPad' = x@EraPad)
})
#' @rdname saveState-method
#' @aliases saveState,CensorWindow-method
setMethod("saveState", "CensorWindow",
function(x){
list('StartDate' = x@StartDate,
'EndDate' = x@EndDate)
})
#' @rdname saveState-method
#' @aliases saveState,Component-method
setMethod("saveState", "Component",
function(x){
if(x@MetaData@ComponentType == "PrimaryCriteria"){
cl <- lapply(x@CriteriaExpression$CriteriaList, saveState)
ow <- saveState(x@CriteriaExpression$ObservationWindow)
ll <- list('MetaData' = saveState(x@MetaData),
'CriteriaExpression' = list('CriteriaList' = cl,
'ObservationWindow' = ow),
'Limit' = lapply(x@Limit, saveState),
'ConceptSetExpression' = lapply(x@ConceptSetExpression, saveState))
} else if(x@MetaData@ComponentType == "CohortEra"){
ll <- list('MetaData' = saveState(x@MetaData),
'CriteriaExpression' = list('CollapseSettings' = saveState(x@CriteriaExpression$CollapseSettings),
'CensorWindow' = saveState(x@CriteriaExpression$CensorWindow)),
'Limit' = list(),
'ConceptSetExpression' = list())
} else{
ll <- list('MetaData' = saveState(x@MetaData),
'CriteriaExpression' = lapply(x@CriteriaExpression, saveState),
'Limit' = lapply(x@Limit, saveState),
'ConceptSetExpression' = lapply(x@ConceptSetExpression, saveState))
}
return(ll)
})
|
library(ggplot2)
library(stats)
library(GGally)
library(scatterplot3d)
getwd()
setwd(".\\")
getwd()
survey <- read.csv("gss-2016.csv")
survey[1:15,c('CHILDS','SIBS','SEX')]
# Assuming 1 for males and 2 for females.
males =subset(survey,SEX==1)
females = subset(survey,SEX==2)
males[1:10,c('CHILDS','SIBS','SEX')]
females[1:10,c('CHILDS','SIBS','SEX')]
## Scatter Plot with best-fit linear regression line
ggplot(data = survey,aes(x = CHILDS, y = SIBS)) +
geom_point(na.rm = TRUE) +
stat_smooth(method = "lm",na.rm = TRUE, col = "dodgerblue3") +
theme(panel.background = element_rect(fill = "white"),
axis.line.x=element_line(),
axis.line.y=element_line()) +
ggtitle("Linear Model Fitted to Data")
ggplot(data = survey,aes(x = SIBS , y = CHILDS)) +
geom_point(na.rm = TRUE) +
stat_smooth(method = "lm",na.rm = TRUE, col = "dodgerblue3") +
theme(panel.background = element_rect(fill = "white"),
axis.line.x=element_line(),
axis.line.y=element_line()) +
ggtitle("Linear Model Fitted to Data")
mean(survey$SIBS,na.rm=TRUE)
## Scatter plot shows
### Correlatrion between CHILDS and
GGally::ggpairs(data=survey,columns=c('CHILDS','SIBS','SEX'),title="survey data")
cor(survey$CHILDS,survey$SIBS,use="complete.obs",method="pearson")
cor(survey$CHILDS,survey$SIBS,use="complete.obs",method="spearman")
cor(survey$CHILDS,survey$SIBS,use="complete.obs",method="kendall")
#c('pearson','spearman','kendall')
survey.lm = lm(survey$CHILDS ~survey$SIBS,data=survey)
summary(survey.lm)$r.squared
fit_1 <- lm(CHILDS ~ SIBS,data=survey)
summary(fit_1)
fit_1$coefficients
round(predict(fit_1, data.frame(SIBS = 1)))
round(predict(fit_1, data.frame(SIBS = 2)))
round(predict(fit_1, data.frame(SIBS = 3)))
round(predict(fit_1, data.frame(SIBS = 5)))
round(predict(fit_1, data.frame(SIBS = 7)))
|
/Bellevue University/Courses/DSC520/Week6/Assignment 6 - GSS_2016_Survey.R
|
no_license
|
safarie1103/Safarie1103
|
R
| false
| false
| 1,896
|
r
|
library(ggplot2)
library(stats)
library(GGally)
library(scatterplot3d)
getwd()
setwd(".\\")
getwd()
survey <- read.csv("gss-2016.csv")
survey[1:15,c('CHILDS','SIBS','SEX')]
# Assuming 1 for males and 2 for females.
males =subset(survey,SEX==1)
females = subset(survey,SEX==2)
males[1:10,c('CHILDS','SIBS','SEX')]
females[1:10,c('CHILDS','SIBS','SEX')]
## Scatter Plot with best-fit linear regression line
ggplot(data = survey,aes(x = CHILDS, y = SIBS)) +
geom_point(na.rm = TRUE) +
stat_smooth(method = "lm",na.rm = TRUE, col = "dodgerblue3") +
theme(panel.background = element_rect(fill = "white"),
axis.line.x=element_line(),
axis.line.y=element_line()) +
ggtitle("Linear Model Fitted to Data")
ggplot(data = survey,aes(x = SIBS , y = CHILDS)) +
geom_point(na.rm = TRUE) +
stat_smooth(method = "lm",na.rm = TRUE, col = "dodgerblue3") +
theme(panel.background = element_rect(fill = "white"),
axis.line.x=element_line(),
axis.line.y=element_line()) +
ggtitle("Linear Model Fitted to Data")
mean(survey$SIBS,na.rm=TRUE)
## Scatter plot shows
### Correlatrion between CHILDS and
GGally::ggpairs(data=survey,columns=c('CHILDS','SIBS','SEX'),title="survey data")
cor(survey$CHILDS,survey$SIBS,use="complete.obs",method="pearson")
cor(survey$CHILDS,survey$SIBS,use="complete.obs",method="spearman")
cor(survey$CHILDS,survey$SIBS,use="complete.obs",method="kendall")
#c('pearson','spearman','kendall')
survey.lm = lm(survey$CHILDS ~survey$SIBS,data=survey)
summary(survey.lm)$r.squared
fit_1 <- lm(CHILDS ~ SIBS,data=survey)
summary(fit_1)
fit_1$coefficients
round(predict(fit_1, data.frame(SIBS = 1)))
round(predict(fit_1, data.frame(SIBS = 2)))
round(predict(fit_1, data.frame(SIBS = 3)))
round(predict(fit_1, data.frame(SIBS = 5)))
round(predict(fit_1, data.frame(SIBS = 7)))
|
## ---- CFCall_benchmark ----
## Compare the speed of C and Fortran interfaces with R
dyn.load(file.path("R_Fortran", "01_convolve", paste0("convolve", .Platform$dynlib.ext))) # Platform independence
dyn.load(file.path("R_C", "01_convolve", paste0("convolveC", .Platform$dynlib.ext)))
dyn.load(file.path("R_C", "01_convolve", paste0("convolveCall", .Platform$dynlib.ext)))
convolveR <- function(x, y){
nx <- length(x)
ny <- length(y)
z <- double(nx + ny -1)
for(i in 1:nx)
for (j in 1:ny)
z[i + j - 1] = z[i + j-1] + x[i]*y[j]
z
}
convolveF <- function(x, nx, y, ny){
.Fortran("convolvef", as.double(x), as.integer(nx), as.double(y), as.integer(ny), double(nx + ny - 1))
}
convolveC <- function(x, nx, y, ny){
.C("convolveC", as.double(x), as.integer(nx), as.double(y), as.integer(ny), double(nx + ny - 1))
}
convolveCall <- function(x, y){
.Call("convolveCall", x, y)
}
library(rbenchmark)
set.seed(1204)
x = rnorm(1000); y = rnorm(1000)
nx = length(x); ny = length(y)
benchmark(convolveF = convolveF(x, nx, y, ny),
convolveC = convolveC(x, nx, y ,ny), convolveCall = convolveCall(x, y),
replications=1000, columns=c('test', 'replications', 'elapsed', 'relative'))
system.time(convolveR(x, y))
dyn.unload(file.path("R_Fortran", "01_convolve", paste0("convolve", .Platform$dynlib.ext))) # Platform independence
dyn.unload(file.path("R_C", "01_convolve", paste0("convolveC", .Platform$dynlib.ext)))
dyn.unload(file.path("R_C", "01_convolve", paste0("convolveCall", .Platform$dynlib.ext)))
## ---- Rcpp_benchmark ----
## basic R function
fibR <- function(n) {
if (n == 0) return(0)
if (n == 1) return(1)
return (fibR(n - 1) + fibR(n - 2))
}
library(Rcpp)
library(inline)
## we need a pure C/C++ function here
incltxt <- '
int fibonacci(const int x) {
if (x == 0) return(0);
if (x == 1) return(1);
return (fibonacci(x - 1)) + fibonacci(x - 2);
}'
## Rcpp version of Fibonacci
fibRcpp <- cxxfunction(signature(xs="int"),
plugin="Rcpp",
incl=incltxt, body='
int x = Rcpp::as<int>(xs);
return Rcpp::wrap( fibonacci(x) );
')
library(rbenchmark)
N = 20
res <- benchmark(fibR(N), fibRcpp(N),
columns=c("test", "replications", "elapsed",
"relative", "user.self", "sys.self"),
order="relative", replications=100)
res
|
/benchmark.R
|
no_license
|
mingsnu/R_Fortran_Test_Notes
|
R
| false
| false
| 2,452
|
r
|
## ---- CFCall_benchmark ----
## Compare the speed of C and Fortran interfaces with R
dyn.load(file.path("R_Fortran", "01_convolve", paste0("convolve", .Platform$dynlib.ext))) # Platform independence
dyn.load(file.path("R_C", "01_convolve", paste0("convolveC", .Platform$dynlib.ext)))
dyn.load(file.path("R_C", "01_convolve", paste0("convolveCall", .Platform$dynlib.ext)))
convolveR <- function(x, y){
nx <- length(x)
ny <- length(y)
z <- double(nx + ny -1)
for(i in 1:nx)
for (j in 1:ny)
z[i + j - 1] = z[i + j-1] + x[i]*y[j]
z
}
convolveF <- function(x, nx, y, ny){
.Fortran("convolvef", as.double(x), as.integer(nx), as.double(y), as.integer(ny), double(nx + ny - 1))
}
convolveC <- function(x, nx, y, ny){
.C("convolveC", as.double(x), as.integer(nx), as.double(y), as.integer(ny), double(nx + ny - 1))
}
convolveCall <- function(x, y){
.Call("convolveCall", x, y)
}
library(rbenchmark)
set.seed(1204)
x = rnorm(1000); y = rnorm(1000)
nx = length(x); ny = length(y)
benchmark(convolveF = convolveF(x, nx, y, ny),
convolveC = convolveC(x, nx, y ,ny), convolveCall = convolveCall(x, y),
replications=1000, columns=c('test', 'replications', 'elapsed', 'relative'))
system.time(convolveR(x, y))
dyn.unload(file.path("R_Fortran", "01_convolve", paste0("convolve", .Platform$dynlib.ext))) # Platform independence
dyn.unload(file.path("R_C", "01_convolve", paste0("convolveC", .Platform$dynlib.ext)))
dyn.unload(file.path("R_C", "01_convolve", paste0("convolveCall", .Platform$dynlib.ext)))
## ---- Rcpp_benchmark ----
## basic R function
fibR <- function(n) {
if (n == 0) return(0)
if (n == 1) return(1)
return (fibR(n - 1) + fibR(n - 2))
}
library(Rcpp)
library(inline)
## we need a pure C/C++ function here
incltxt <- '
int fibonacci(const int x) {
if (x == 0) return(0);
if (x == 1) return(1);
return (fibonacci(x - 1)) + fibonacci(x - 2);
}'
## Rcpp version of Fibonacci
fibRcpp <- cxxfunction(signature(xs="int"),
plugin="Rcpp",
incl=incltxt, body='
int x = Rcpp::as<int>(xs);
return Rcpp::wrap( fibonacci(x) );
')
library(rbenchmark)
N = 20
res <- benchmark(fibR(N), fibRcpp(N),
columns=c("test", "replications", "elapsed",
"relative", "user.self", "sys.self"),
order="relative", replications=100)
res
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EarlyStopping.R
\name{Scaling}
\alias{Scaling}
\title{scaling input matix}
\usage{
Scaling(X.mat)
}
\description{
scaling input matix
}
|
/project-2-linear-models/linearModels.Rcheck/00_pkg_src/linearModels/man/Scaling.Rd
|
no_license
|
nhanderson/cs499-spring2019
|
R
| false
| true
| 214
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EarlyStopping.R
\name{Scaling}
\alias{Scaling}
\title{scaling input matix}
\usage{
Scaling(X.mat)
}
\description{
scaling input matix
}
|
## Fuse Networks
# Load things
library( "dplyr" )
annotated_molecule_edges <- readRDS( "processed-data/annotated_molecule_edges.rds" )
fgnem_ko_edges <- readRDS( "processed-data/fgnem_ko_edges.rds" )
molecule_ids <- readRDS( "processed-data/molecule-ids.rds" )
# Assume kos loaded
# Get the KO-protein map
ko_proteins <- bind_rows( lapply(
kos,
function( ko ) {
molecule_ids %>%
filter( grepl( paste0( "(^|;)", ko, "(;|$)") , `Protein Gene` ) ) %>%
transmute( `Molecule ID`, `Protein Gene`, Knockout = ko )
}
))
# Make ko-protein edges
ko_protein_links <-
ko_proteins %>%
transmute( source = Knockout, target = `Molecule ID`, interaction = "gene product" )
# Put all edges in one DF
cy_edges <- union_all( annotated_molecule_edges, fgnem_ko_edges ) %>%
union_all( ko_protein_links ) %>%
as.data.frame()
# Make nodes df
molecule_nodes <- union(
distinct( annotated_molecule_edges, `Molecule ID` = source ),
distinct( annotated_molecule_edges, `Molecule ID` = target ) )
cy_nodes <- molecule_nodes %>% left_joiin( molecule_ids ) %>%
select( -id ) %>%
select( id = `Molecule ID`, full_name = `Molecule Name`, type = `Molecule Type` ) %>%
union( tibble( id = kos, full_name = id, type = `Knockout` ) ) %>%
as.data.frame()
# Save objects
destination <- "results/fused-network"
dir.create( destination, recursive = T )
saveRDS( cy_edges, paste0( destination, "edges.rds" ) )
saveRDS( cy_nodes, paste0( destination, "nodes.rds" ) )
# Run Cytoscape
if ( askYesNo( "Make Cytoscape network?" ) ){
# Create cytoscape network with RCy3
net_id <- RCy3::createNetworkFromDataFrames(
nodes = cy_nodes,
edges = cy_edges,
title = paste( "Annotated Cosine Similarity Network", date() ),
collection = "H3K Networks" )
# Visual style
# TODO
}
|
/R/fuse-networks.R
|
permissive
|
sverchkov/metabolite-and-protein-network-from-knockouts
|
R
| false
| false
| 1,801
|
r
|
## Fuse Networks
# Load things
library( "dplyr" )
annotated_molecule_edges <- readRDS( "processed-data/annotated_molecule_edges.rds" )
fgnem_ko_edges <- readRDS( "processed-data/fgnem_ko_edges.rds" )
molecule_ids <- readRDS( "processed-data/molecule-ids.rds" )
# Assume kos loaded
# Get the KO-protein map
ko_proteins <- bind_rows( lapply(
kos,
function( ko ) {
molecule_ids %>%
filter( grepl( paste0( "(^|;)", ko, "(;|$)") , `Protein Gene` ) ) %>%
transmute( `Molecule ID`, `Protein Gene`, Knockout = ko )
}
))
# Make ko-protein edges
ko_protein_links <-
ko_proteins %>%
transmute( source = Knockout, target = `Molecule ID`, interaction = "gene product" )
# Put all edges in one DF
cy_edges <- union_all( annotated_molecule_edges, fgnem_ko_edges ) %>%
union_all( ko_protein_links ) %>%
as.data.frame()
# Make nodes df
molecule_nodes <- union(
distinct( annotated_molecule_edges, `Molecule ID` = source ),
distinct( annotated_molecule_edges, `Molecule ID` = target ) )
cy_nodes <- molecule_nodes %>% left_joiin( molecule_ids ) %>%
select( -id ) %>%
select( id = `Molecule ID`, full_name = `Molecule Name`, type = `Molecule Type` ) %>%
union( tibble( id = kos, full_name = id, type = `Knockout` ) ) %>%
as.data.frame()
# Save objects
destination <- "results/fused-network"
dir.create( destination, recursive = T )
saveRDS( cy_edges, paste0( destination, "edges.rds" ) )
saveRDS( cy_nodes, paste0( destination, "nodes.rds" ) )
# Run Cytoscape
if ( askYesNo( "Make Cytoscape network?" ) ){
# Create cytoscape network with RCy3
net_id <- RCy3::createNetworkFromDataFrames(
nodes = cy_nodes,
edges = cy_edges,
title = paste( "Annotated Cosine Similarity Network", date() ),
collection = "H3K Networks" )
# Visual style
# TODO
}
|
library(shiny)
library(DT)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
navbarPage(
title = "Tags classification",
tabPanel("Data",
sidebarLayout(
sidebarPanel(
fileInput("file1", "Choose CSV File",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
hr(),
h4(strong("Bigrams")),
actionButton("calc_bigrams", "Посчитать"),
br(),
hr(),
h4(strong("Trigrams")),
actionButton("calc_trigrams", "Посчитать"),
br(),
hr(),
h4(strong("Fourgrams")),
actionButton("calc_fourgrams", "Посчитать")
),
mainPanel(DT::dataTableOutput("uploaded_table"))
)),
tabPanel("Bigrams", column(6, h3("Список биграмов, присутствующих в кейвордах"), br() , DT::dataTableOutput("bigrams")),
column(6, h3("Список кейвордов в которых присутствует выбранный биграм"), br(), DT::dataTableOutput("additional_b"))),
tabPanel("Trigrams", column(6, h3("Список триграмов, присутствующих в кейвордах"), br() , DT::dataTableOutput("trigrams")),
column(6, h3("Список кейвордов в которых присутствует выбранный триграм"), br(), DT::dataTableOutput("additional_t"))),
tabPanel("Fourgrams", column(6, h3("Список 4-грамов, присутствующих в кейвордах"), br() , DT::dataTableOutput("fourgrams")),
column(6, h3("Список кейвордов в которых присутствует выбранный 4-грам"), br(), DT::dataTableOutput("additional_f")))
)
))
|
/ui.R
|
no_license
|
SergKramarev/tags-classification
|
R
| false
| false
| 2,331
|
r
|
library(shiny)
library(DT)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
navbarPage(
title = "Tags classification",
tabPanel("Data",
sidebarLayout(
sidebarPanel(
fileInput("file1", "Choose CSV File",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
hr(),
h4(strong("Bigrams")),
actionButton("calc_bigrams", "Посчитать"),
br(),
hr(),
h4(strong("Trigrams")),
actionButton("calc_trigrams", "Посчитать"),
br(),
hr(),
h4(strong("Fourgrams")),
actionButton("calc_fourgrams", "Посчитать")
),
mainPanel(DT::dataTableOutput("uploaded_table"))
)),
tabPanel("Bigrams", column(6, h3("Список биграмов, присутствующих в кейвордах"), br() , DT::dataTableOutput("bigrams")),
column(6, h3("Список кейвордов в которых присутствует выбранный биграм"), br(), DT::dataTableOutput("additional_b"))),
tabPanel("Trigrams", column(6, h3("Список триграмов, присутствующих в кейвордах"), br() , DT::dataTableOutput("trigrams")),
column(6, h3("Список кейвордов в которых присутствует выбранный триграм"), br(), DT::dataTableOutput("additional_t"))),
tabPanel("Fourgrams", column(6, h3("Список 4-грамов, присутствующих в кейвордах"), br() , DT::dataTableOutput("fourgrams")),
column(6, h3("Список кейвордов в которых присутствует выбранный 4-грам"), br(), DT::dataTableOutput("additional_f")))
)
))
|
install.packages("readxl")
install.packages("matrixStats")
install.packages("factoextra")
library(matrixStats)
library(reshape2)
workingdic <- getwd()
workingdic <- trimws(workingdic) # Trimming and spaces at either ends
print(workingdic)
setwd(workingdic)
###################################################### Total of the data of for perticular industry over the years to give yearly numbers for all tabs and
###################################################### copied to the same dataframe
library("readxl")
xl_data <- "InputFiles/Irish Industries in Ireland.xlsx"
sheets <- excel_sheets(xl_data)
no_of_sheets <- length( excel_sheets( xl_data ) )
economy_CBIC_ind <- data.frame(economy_data)
economy_CBIC_ind <- exceldata[,1:2]
for(i in 1:no_of_sheets)
{
exceldata <- read_xlsx(xl_data, sheet = sheets[i])
sheetname <- sheets[i]
economy_CBIC_ind[, ncol(economy_CBIC_ind) + 1] <- rowSums(Filter(is.numeric, exceldata))
names(economy_CBIC_ind)[ncol(economy_CBIC_ind)] <- paste0(sheetname)
}
Econony_industry_basis <- economy_CBIC_ind
head(Econony_industry_basis)
###################################################### Adding the data of all industries to give yearly numbers for all tabs and
###################################################### copied to the same dataframe
economy_CBIC_year <- data.frame(c(2000:2017))
economy_column_name <- c("Years")
colnames(economy_CBIC_year) <- economy_column_name
for(i in 1:no_of_sheets)
{
library("readxl")
sheetname <- sheets[i]
exceldata <- read_xlsx(xl_data, sheet = sheetname)
economy_CBIC_year[, ncol(economy_CBIC_year) + 1] <- colSums(Filter(is.numeric, exceldata))
names(economy_CBIC_year)[ncol(economy_CBIC_year)] <- paste0(sheetname)
}
Econony_yearly_basis <- economy_CBIC_year
head(Econony_yearly_basis)
########################## Just the numerical data needs to be taken for PCA hence doing the same in further steps
########################## Further test here is carried out for Yearly numbers of all industries
data_numeric_variables <- sapply(Econony_industry_basis, is.numeric)
data_numeric_variables
######################### Removing the non numeric values
data_file_adjusted <- Econony_industry_basis[, data_numeric_variables]
###################### Numeric data passed to the into the prcomp() function by keeping the center and scale to TRUE
# Peek at the PCA object with is attained by summary().
pca <- prcomp(data_file_adjusted, center = TRUE, scale. = TRUE)
summary(pca)
# 18 principal components, are identified and are named from PC1-18.
str(pca)
######################################## Eigenvalues / Variances###################################################
library("factoextra")
eig_values <- get_eigenvalue(pca)
eig_values
# FactoMineR package is used here to display this information
fviz_eig(pca, addlabels = TRUE, ylim = c(0, 75))
# From the graph it could be seen that majority of the infomarion is availabe from one to fifth component so
#stopping it at that point should be enough, we might want to stop at the fifth principal component.
pca_for_variables <- get_pca_var(pca)
pca_for_variables
# -----------------------------------------------------------------------
# Using `Correlation` plot
# -----------------------------------------------------------------------
library("corrplot")
corrplot(pca_for_variables$cos2, is.corr = FALSE)
fviz_pca_var(pca, col.var = "blue")
# -----------------------------------------------------------------------
# Cos2 - quality of representation
# -----------------------------------------------------------------------
head(pca_for_variables$cos2, 10)
##################################################Plotting the PCA as a bargraph using fviz_cos2
fviz_cos2(pca, choice = "var", axes = 1:2)
# -----------------------------------------------------------------------
# Biplot
# -----------------------------------------------------------------------
# A biplot is a type of plot that will allow you to visualise how
# Colour by cos2 values: quality on the factor map
fviz_pca_var(pca, col.var = "cos2", gradient.cols = c(rainbow(10)), repel = TRUE)
# Contribution of variables to each PC
# The larger the value of the contribution, the more the variable contributes to the component.
head(pca_for_variables$contrib, 20)
# The most important (or, contributing) variables can be highlighted on the correlation plot as follows
fviz_pca_var(pca, col.var = "contrib", gradient.cols = c("red", "Blue", "Green"),)
# Now to plot the graph using fviz_contrib() in the form of bar graph from the factoextra package
# to draw a bar plot of variable contributions.
# Here only top 10 of them are made to be displayed
library(factoextra)
fviz_contrib(pca, choice = "var", axes = 1, top = 15)
# Contributions of variables to PC2
fviz_contrib(pca, choice = "var", axes = 2, top = 15)
# Contributions of variables to PC2
fviz_contrib(pca, choice = "var", axes = 3, top = 15)
# Contribution to PC3 - PC5
fviz_contrib(pca, choice = "var", axes = 3:5, top = 15)
# The red dashed line on the graphs indicate the expected
# average contribution.
# We can see that the following "Total Percentage of sales", "Irish Source service", "Payrol cost per employmeny" and "Irish source to Total Material"
# contribute most to Dim1 -
# "Total Percentage of sales", "Irish Source service", "Sales per employment", "Payrol cost per employmeny" and "Irish source to Total Material" contribute most to PC 2.
# "Total Percentage of sales", "Irish Source service", "Payrol cost per employmeny" contribute most and "Irish source to Total Material" just reaching the averagecontribute most to PC 2.
fviz_pca_ind(pca,
axes = c(1, 2),
geom.ind = "point", # show points only (but not "text values")
col.ind = economy_CBIC_ind$`Industry Type`, # colour by groups
palette = c(rainbow(3)),
addEllipses = TRUE, # Concentration ellipses
legend.title = "Industry_types"
)
# Graphical parameters
# We can change the graphical parameters using the function ggpar() from the ggpubr package.
biplot <- fviz_pca_ind(pca, geom = "point", col.ind = economy_CBIC_ind$`Industry Type`)
ggpubr::ggpar(biplot,
title = "Principal Component Analysis",
subtitle = "Business sector in Ireland",
xlab = "PC 1", ylab = "PC 2",
legend.title = "Industry_types", legend.position = "top",
ggtheme = theme_gray(), palette = "jco")
# Lets see how PC 3 and PC 4 represent Business data.
biplot <- fviz_pca_ind(pca,
axes = c(3, 4),
geom = "point",
col.ind = economy_CBIC_ind$`Industry Type`)
ggpubr::ggpar(biplot,
title = "Principal Component Analysis",
subtitle = "Business sector in Ireland",,
xlab = "PC 3", ylab = "PC 4",
legend.title = "Industry_types", legend.position = "top",
ggtheme = theme_gray(), palette = "jco")
|
/CA3/PCA.R
|
no_license
|
murthysn18/Data_Science
|
R
| false
| false
| 7,071
|
r
|
install.packages("readxl")
install.packages("matrixStats")
install.packages("factoextra")
library(matrixStats)
library(reshape2)
workingdic <- getwd()
workingdic <- trimws(workingdic) # Trimming and spaces at either ends
print(workingdic)
setwd(workingdic)
###################################################### Total of the data of for perticular industry over the years to give yearly numbers for all tabs and
###################################################### copied to the same dataframe
library("readxl")
xl_data <- "InputFiles/Irish Industries in Ireland.xlsx"
sheets <- excel_sheets(xl_data)
no_of_sheets <- length( excel_sheets( xl_data ) )
economy_CBIC_ind <- data.frame(economy_data)
economy_CBIC_ind <- exceldata[,1:2]
for(i in 1:no_of_sheets)
{
exceldata <- read_xlsx(xl_data, sheet = sheets[i])
sheetname <- sheets[i]
economy_CBIC_ind[, ncol(economy_CBIC_ind) + 1] <- rowSums(Filter(is.numeric, exceldata))
names(economy_CBIC_ind)[ncol(economy_CBIC_ind)] <- paste0(sheetname)
}
Econony_industry_basis <- economy_CBIC_ind
head(Econony_industry_basis)
###################################################### Adding the data of all industries to give yearly numbers for all tabs and
###################################################### copied to the same dataframe
economy_CBIC_year <- data.frame(c(2000:2017))
economy_column_name <- c("Years")
colnames(economy_CBIC_year) <- economy_column_name
for(i in 1:no_of_sheets)
{
library("readxl")
sheetname <- sheets[i]
exceldata <- read_xlsx(xl_data, sheet = sheetname)
economy_CBIC_year[, ncol(economy_CBIC_year) + 1] <- colSums(Filter(is.numeric, exceldata))
names(economy_CBIC_year)[ncol(economy_CBIC_year)] <- paste0(sheetname)
}
Econony_yearly_basis <- economy_CBIC_year
head(Econony_yearly_basis)
########################## Just the numerical data needs to be taken for PCA hence doing the same in further steps
########################## Further test here is carried out for Yearly numbers of all industries
data_numeric_variables <- sapply(Econony_industry_basis, is.numeric)
data_numeric_variables
######################### Removing the non numeric values
data_file_adjusted <- Econony_industry_basis[, data_numeric_variables]
###################### Numeric data passed to the into the prcomp() function by keeping the center and scale to TRUE
# Peek at the PCA object with is attained by summary().
pca <- prcomp(data_file_adjusted, center = TRUE, scale. = TRUE)
summary(pca)
# 18 principal components, are identified and are named from PC1-18.
str(pca)
######################################## Eigenvalues / Variances###################################################
library("factoextra")
eig_values <- get_eigenvalue(pca)
eig_values
# FactoMineR package is used here to display this information
fviz_eig(pca, addlabels = TRUE, ylim = c(0, 75))
# From the graph it could be seen that majority of the infomarion is availabe from one to fifth component so
#stopping it at that point should be enough, we might want to stop at the fifth principal component.
pca_for_variables <- get_pca_var(pca)
pca_for_variables
# -----------------------------------------------------------------------
# Using `Correlation` plot
# -----------------------------------------------------------------------
library("corrplot")
corrplot(pca_for_variables$cos2, is.corr = FALSE)
fviz_pca_var(pca, col.var = "blue")
# -----------------------------------------------------------------------
# Cos2 - quality of representation
# -----------------------------------------------------------------------
head(pca_for_variables$cos2, 10)
##################################################Plotting the PCA as a bargraph using fviz_cos2
fviz_cos2(pca, choice = "var", axes = 1:2)
# -----------------------------------------------------------------------
# Biplot
# -----------------------------------------------------------------------
# A biplot is a type of plot that will allow you to visualise how
# Colour by cos2 values: quality on the factor map
fviz_pca_var(pca, col.var = "cos2", gradient.cols = c(rainbow(10)), repel = TRUE)
# Contribution of variables to each PC
# The larger the value of the contribution, the more the variable contributes to the component.
head(pca_for_variables$contrib, 20)
# The most important (or, contributing) variables can be highlighted on the correlation plot as follows
fviz_pca_var(pca, col.var = "contrib", gradient.cols = c("red", "Blue", "Green"),)
# Now to plot the graph using fviz_contrib() in the form of bar graph from the factoextra package
# to draw a bar plot of variable contributions.
# Here only top 10 of them are made to be displayed
library(factoextra)
fviz_contrib(pca, choice = "var", axes = 1, top = 15)
# Contributions of variables to PC2
fviz_contrib(pca, choice = "var", axes = 2, top = 15)
# Contributions of variables to PC2
fviz_contrib(pca, choice = "var", axes = 3, top = 15)
# Contribution to PC3 - PC5
fviz_contrib(pca, choice = "var", axes = 3:5, top = 15)
# The red dashed line on the graphs indicate the expected
# average contribution.
# We can see that the following "Total Percentage of sales", "Irish Source service", "Payrol cost per employmeny" and "Irish source to Total Material"
# contribute most to Dim1 -
# "Total Percentage of sales", "Irish Source service", "Sales per employment", "Payrol cost per employmeny" and "Irish source to Total Material" contribute most to PC 2.
# "Total Percentage of sales", "Irish Source service", "Payrol cost per employmeny" contribute most and "Irish source to Total Material" just reaching the averagecontribute most to PC 2.
fviz_pca_ind(pca,
axes = c(1, 2),
geom.ind = "point", # show points only (but not "text values")
col.ind = economy_CBIC_ind$`Industry Type`, # colour by groups
palette = c(rainbow(3)),
addEllipses = TRUE, # Concentration ellipses
legend.title = "Industry_types"
)
# Graphical parameters
# We can change the graphical parameters using the function ggpar() from the ggpubr package.
biplot <- fviz_pca_ind(pca, geom = "point", col.ind = economy_CBIC_ind$`Industry Type`)
ggpubr::ggpar(biplot,
title = "Principal Component Analysis",
subtitle = "Business sector in Ireland",
xlab = "PC 1", ylab = "PC 2",
legend.title = "Industry_types", legend.position = "top",
ggtheme = theme_gray(), palette = "jco")
# Lets see how PC 3 and PC 4 represent Business data.
biplot <- fviz_pca_ind(pca,
axes = c(3, 4),
geom = "point",
col.ind = economy_CBIC_ind$`Industry Type`)
ggpubr::ggpar(biplot,
title = "Principal Component Analysis",
subtitle = "Business sector in Ireland",,
xlab = "PC 3", ylab = "PC 4",
legend.title = "Industry_types", legend.position = "top",
ggtheme = theme_gray(), palette = "jco")
|
library(data.table)
all_movement_locations <- function(location_dt) {
start_column <- 2
location_columns <- c(start_column, grep("Location", colnames(location_dt)))
locations <- unique(as.vector(as.matrix(location_dt[, ..location_columns])))
locations[is.finite(locations)]
}
test_that("bloodmeal density assigns bites at location", {
params <- list(
human_cnt = 8L,
location_cnt = 5L,
duration = 10,
day_duration = 1,
dispersion = 1.5,
day_cnt = 10,
biting_weight = 0.5
)
day_start <- 1
health_dt <- sample_health_infection_status(params$human_cnt, params$duration)
bite_weight <- runif(params$human_cnt, 0.2, 0.8)
movement_dt <- sample_move_location(
params$human_cnt, params$location_cnt, params$duration)
mosquito_dt <- sample_mosquito_myz(params$location_cnt, params$duration)
response = bld_bloodmeal_process(
health_dt, movement_dt, mosquito_dt, day_start, params)
mosquito_infections <- response[[1]]
human_infections <- response[[2]]
expect_equal(nrow(mosquito_infections), params$day_cnt * params$location_cnt)
expect_equal(colnames(human_infections), c("times", "human"))
})
test_that("bloodmeal density processes sample data", {
params <- list(
human_cnt = 8L,
location_cnt = 5L,
duration = 10,
day_duration = 1,
day_start = 1,
dispersion = 1.5,
day_cnt = 10,
biting_weight = 0.5
)
health_dt <- sample_health_infection_status(params$human_cnt, params$duration)
bite_weight <- runif(params$human_cnt, 0.2, 0.8)
movement_dt <- sample_move_location(
params$human_cnt, params$location_cnt, params$duration)
mosquito_dt <- sample_mosquito_myz(params$location_cnt, params$duration)
bloodmeal <- bloodmeal_density_module(params)
time_id <- list(time = 0, duration = params$duration)
bloodmeal <- mash_step(
bloodmeal, time_id, health_dt, movement_dt, mosquito_dt)
human_infections <- infects_human_path(bloodmeal)
mosquito_infections <- infects_mosquito_path(bloodmeal)
expect_equal(nrow(mosquito_infections), params$day_cnt * params$location_cnt)
expect_equal(colnames(human_infections), c("times", "human"))
})
|
/macro/tests/testthat/test-bloodmeal_density.R
|
no_license
|
dd-harp/MASH
|
R
| false
| false
| 2,166
|
r
|
library(data.table)
all_movement_locations <- function(location_dt) {
start_column <- 2
location_columns <- c(start_column, grep("Location", colnames(location_dt)))
locations <- unique(as.vector(as.matrix(location_dt[, ..location_columns])))
locations[is.finite(locations)]
}
test_that("bloodmeal density assigns bites at location", {
params <- list(
human_cnt = 8L,
location_cnt = 5L,
duration = 10,
day_duration = 1,
dispersion = 1.5,
day_cnt = 10,
biting_weight = 0.5
)
day_start <- 1
health_dt <- sample_health_infection_status(params$human_cnt, params$duration)
bite_weight <- runif(params$human_cnt, 0.2, 0.8)
movement_dt <- sample_move_location(
params$human_cnt, params$location_cnt, params$duration)
mosquito_dt <- sample_mosquito_myz(params$location_cnt, params$duration)
response = bld_bloodmeal_process(
health_dt, movement_dt, mosquito_dt, day_start, params)
mosquito_infections <- response[[1]]
human_infections <- response[[2]]
expect_equal(nrow(mosquito_infections), params$day_cnt * params$location_cnt)
expect_equal(colnames(human_infections), c("times", "human"))
})
test_that("bloodmeal density processes sample data", {
params <- list(
human_cnt = 8L,
location_cnt = 5L,
duration = 10,
day_duration = 1,
day_start = 1,
dispersion = 1.5,
day_cnt = 10,
biting_weight = 0.5
)
health_dt <- sample_health_infection_status(params$human_cnt, params$duration)
bite_weight <- runif(params$human_cnt, 0.2, 0.8)
movement_dt <- sample_move_location(
params$human_cnt, params$location_cnt, params$duration)
mosquito_dt <- sample_mosquito_myz(params$location_cnt, params$duration)
bloodmeal <- bloodmeal_density_module(params)
time_id <- list(time = 0, duration = params$duration)
bloodmeal <- mash_step(
bloodmeal, time_id, health_dt, movement_dt, mosquito_dt)
human_infections <- infects_human_path(bloodmeal)
mosquito_infections <- infects_mosquito_path(bloodmeal)
expect_equal(nrow(mosquito_infections), params$day_cnt * params$location_cnt)
expect_equal(colnames(human_infections), c("times", "human"))
})
|
#' Plotting predicted risk
#'
#' Show predicted risk obtained by a risk prediction model as a function of
#' time.
#'
#' @aliases plot.riskRegression
#' @usage
#' \method{plot}{riskRegression}(x,
##' cause,
##' newdata,
##' xlab,
##' ylab,
##' xlim,
##' ylim,
##' lwd,
##' col,
##' lty,
##' axes=TRUE,
##' percent=TRUE,
##' legend=TRUE,
##' add=FALSE,
##' ...)
#' @param x Fitted object obtained with one of \code{ARR}, \code{LRR},
#' \code{riskRegression}.
#' @param cause For CauseSpecificCox models the cause of interest.
#' @param newdata A data frame containing predictor variable combinations for
#' which to compute predicted risk.
#' @param xlim See \code{plot}
#' @param ylim See \code{plot}
#' @param xlab See \code{plot}
#' @param ylab See \code{plot}
#' @param lwd A vector of line thicknesses for the regression coefficients.
#' @param col A vector of colors for the regression coefficients.
#' @param lty A vector of line types for the regression coefficients.
#' @param axes Logical. If \code{FALSE} then do not draw axes.
#' @param percent If true the y-axis is labeled in percent.
#' @param legend If true draw a legend.
#' @param add Logical. If \code{TRUE} then add lines to an existing plot.
#' @param \dots Used for transclusion of smart arguments for \code{plot},
#' \code{lines}, \code{axis} and \code{background}. See function
#' \code{\link{SmartControl}} from prodlim.
#' @author Thomas Alexander Gerds <tag@@biostat.ku.dk>
#' @keywords survival
##' @examples
##'
##' library(survival)
##' library(prodlim)
##' data(Melanoma)
##' fit.arr <- ARR(Hist(time,status)~invasion+age+strata(sex),data=Melanoma,cause=1)
##' plot(fit.arr,xlim=c(500,3000))
##'
##'
#' @export
plot.riskRegression <- function(x,
cause,
newdata,
xlab,
ylab,
xlim,
ylim,
lwd,
col,
lty,
axes=TRUE,
percent=TRUE,
legend=TRUE,
add=FALSE,
...){
# {{{ getting predicted risk
if ("CauseSpecificCox"%in%class(x))
plot.times <- x$eventTimes
else
plot.times <- x$timeVaryingEffects$coef[,"time"]
if (class(x)=="predictedRisk")
Y <- split(x$risk,1:NROW(x$risk))
else{
if (missing(newdata)){
ff <- eval(x$call$formula)
xdat <- unique(eval(x$call$data)[all.vars(update(ff,NULL~.))])
if (NROW(xdat)<5){
if ("CauseSpecificCox"%in%class(x)){
p1 <- predictRisk(x,newdata=xdat,times=plot.times,cause=cause)
}
else{
p1 <- stats::predict(x,newdata=xdat,times=plot.times)$risk}
rownames(p1) <- paste("id",1:NROW(xdat))
}
else{
if ("CauseSpecificCox"%in%class(x)){
P1 <- predictRisk(x,newdata=eval(x$call$data),times=plot.times,cause=cause)
}
else{
P1 <- stats::predict(x,
newdata=eval(x$call$data),
times=plot.times)$risk
}
medianP1 <- P1[,prodlim::sindex(plot.times,median(plot.times))]
P1 <- P1[order(medianP1),]
p1 <- P1[round(quantile(1:NROW(P1))),]
rownames(p1) <- paste("Predicted risk",c("Min","q25","Median","q75","Max"),sep="=")
warning("Argument newdata is missing.\n",
"Shown are the cumulative incidence curves from the original data set.\nSelected are curves based on individual risk (min,q25,median,q75,max) at the median time:",
median(plot.times))
}
}
else{
p1 <- stats::predict(x,newdata=newdata,time=plot.times)$risk
}
Y <- lapply(1:NROW(p1),function(i){p1[i,]})
if (!is.null(rownames(p1)))
names(Y) <- rownames(p1)
}
nlines <- NROW(Y)
# }}}
# {{{ labels, limits etc
if (missing(ylab)) ylab <- "Cumulative incidence"
if (missing(xlab)) xlab <- "Time"
if (missing(xlim)) xlim <- c(0, max(plot.times))
if (missing(ylim)) ylim <- c(0, 1)
if (missing(lwd)) lwd <- rep(3,nlines)
if (missing(col)) col <- 1:nlines
if (missing(lty)) lty <- rep(1, nlines)
if (length(lwd) < nlines) lwd <- rep(lwd, nlines)
if (length(lty) < nlines) lty <- rep(lty, nlines)
if (length(col) < nlines) col <- rep(col, nlines)
# }}}
# {{{ defaults
plot.DefaultArgs <- list(x=0,y=0,type = "n",ylim = ylim,xlim = xlim,xlab = xlab,ylab = ylab)
lines.DefaultArgs <- list(type="s")
axis1.DefaultArgs <- list()
axis2.DefaultArgs <- list(at=seq(0,1,.25),side=2)
legend.DefaultArgs <- list(legend=names(Y),
lwd=lwd,
col=col,
lty=lty,
cex=1.5,
bty="n",
y.intersp=1.3,
x="topleft",
trimnames=TRUE)
# }}}
# {{{ smart control
smartA <- prodlim::SmartControl(call= list(...),
keys=c("plot","lines","legend","conf.int","marktime","axis1","axis2"),
ignore=c("x","type","cause","newdata","add","col","lty","lwd","ylim","xlim","xlab","ylab","legend","marktime","conf.int","automar","atrisk","timeOrigin","percent","axes","atrisk.args","conf.int.args","legend.args"),
ignore.case=TRUE,
defaults=list("plot"=plot.DefaultArgs,
"axis1"=axis1.DefaultArgs,
"axis2"=axis2.DefaultArgs,
"legend"=legend.DefaultArgs,
"lines"=lines.DefaultArgs),
forced=list("plot"=list(axes=FALSE),
"axis1"=list(side=1)),
verbose=TRUE)
# }}}
# {{{ empty plot
if (!add) {
do.call("plot",smartA$plot)
if (axes){
do.call("axis",smartA$axis1)
if (percent & is.null(smartA$axis1$labels))
smartA$axis2$labels <- paste(100*smartA$axis2$at,"%")
do.call("axis",smartA$axis2)
}
}
# }}}
# {{{ adding the lines
lines.type <- smartA$lines$type
nix <- lapply(1:nlines, function(s) {
lines(x = plot.times,
y = Y[[s]],
type = lines.type,
col = col[s],
lty = lty[s],
lwd = lwd[s])
})
# }}}
# {{{ legend
if(legend[[1]]==TRUE && !add[[1]] && !is.null(names(Y))){
if (all(smartA$legend$trimnames==TRUE) && all(sapply((nlist <- strsplit(names(Y),"=")),function(x)length(x))==2)){
smartA$legend$legend <- sapply(nlist,function(x)x[[2]])
smartA$legend$title <- unique(sapply(nlist,function(x)x[[1]]))
}
smartA$legend <- smartA$legend[-match("trimnames",names(smartA$legend))]
save.xpd <- par()$xpd
par(xpd=TRUE)
do.call("legend",smartA$legend)
par(xpd=save.xpd)
}
# }}}
}
|
/R/plot.riskRegression.R
|
no_license
|
LoSerigne/riskRegression
|
R
| false
| false
| 7,673
|
r
|
#' Plotting predicted risk
#'
#' Show predicted risk obtained by a risk prediction model as a function of
#' time.
#'
#' @aliases plot.riskRegression
#' @usage
#' \method{plot}{riskRegression}(x,
##' cause,
##' newdata,
##' xlab,
##' ylab,
##' xlim,
##' ylim,
##' lwd,
##' col,
##' lty,
##' axes=TRUE,
##' percent=TRUE,
##' legend=TRUE,
##' add=FALSE,
##' ...)
#' @param x Fitted object obtained with one of \code{ARR}, \code{LRR},
#' \code{riskRegression}.
#' @param cause For CauseSpecificCox models the cause of interest.
#' @param newdata A data frame containing predictor variable combinations for
#' which to compute predicted risk.
#' @param xlim See \code{plot}
#' @param ylim See \code{plot}
#' @param xlab See \code{plot}
#' @param ylab See \code{plot}
#' @param lwd A vector of line thicknesses for the regression coefficients.
#' @param col A vector of colors for the regression coefficients.
#' @param lty A vector of line types for the regression coefficients.
#' @param axes Logical. If \code{FALSE} then do not draw axes.
#' @param percent If true the y-axis is labeled in percent.
#' @param legend If true draw a legend.
#' @param add Logical. If \code{TRUE} then add lines to an existing plot.
#' @param \dots Used for transclusion of smart arguments for \code{plot},
#' \code{lines}, \code{axis} and \code{background}. See function
#' \code{\link{SmartControl}} from prodlim.
#' @author Thomas Alexander Gerds <tag@@biostat.ku.dk>
#' @keywords survival
##' @examples
##'
##' library(survival)
##' library(prodlim)
##' data(Melanoma)
##' fit.arr <- ARR(Hist(time,status)~invasion+age+strata(sex),data=Melanoma,cause=1)
##' plot(fit.arr,xlim=c(500,3000))
##'
##'
#' @export
plot.riskRegression <- function(x,
cause,
newdata,
xlab,
ylab,
xlim,
ylim,
lwd,
col,
lty,
axes=TRUE,
percent=TRUE,
legend=TRUE,
add=FALSE,
...){
# {{{ getting predicted risk
if ("CauseSpecificCox"%in%class(x))
plot.times <- x$eventTimes
else
plot.times <- x$timeVaryingEffects$coef[,"time"]
if (class(x)=="predictedRisk")
Y <- split(x$risk,1:NROW(x$risk))
else{
if (missing(newdata)){
ff <- eval(x$call$formula)
xdat <- unique(eval(x$call$data)[all.vars(update(ff,NULL~.))])
if (NROW(xdat)<5){
if ("CauseSpecificCox"%in%class(x)){
p1 <- predictRisk(x,newdata=xdat,times=plot.times,cause=cause)
}
else{
p1 <- stats::predict(x,newdata=xdat,times=plot.times)$risk}
rownames(p1) <- paste("id",1:NROW(xdat))
}
else{
if ("CauseSpecificCox"%in%class(x)){
P1 <- predictRisk(x,newdata=eval(x$call$data),times=plot.times,cause=cause)
}
else{
P1 <- stats::predict(x,
newdata=eval(x$call$data),
times=plot.times)$risk
}
medianP1 <- P1[,prodlim::sindex(plot.times,median(plot.times))]
P1 <- P1[order(medianP1),]
p1 <- P1[round(quantile(1:NROW(P1))),]
rownames(p1) <- paste("Predicted risk",c("Min","q25","Median","q75","Max"),sep="=")
warning("Argument newdata is missing.\n",
"Shown are the cumulative incidence curves from the original data set.\nSelected are curves based on individual risk (min,q25,median,q75,max) at the median time:",
median(plot.times))
}
}
else{
p1 <- stats::predict(x,newdata=newdata,time=plot.times)$risk
}
Y <- lapply(1:NROW(p1),function(i){p1[i,]})
if (!is.null(rownames(p1)))
names(Y) <- rownames(p1)
}
nlines <- NROW(Y)
# }}}
# {{{ labels, limits etc
if (missing(ylab)) ylab <- "Cumulative incidence"
if (missing(xlab)) xlab <- "Time"
if (missing(xlim)) xlim <- c(0, max(plot.times))
if (missing(ylim)) ylim <- c(0, 1)
if (missing(lwd)) lwd <- rep(3,nlines)
if (missing(col)) col <- 1:nlines
if (missing(lty)) lty <- rep(1, nlines)
if (length(lwd) < nlines) lwd <- rep(lwd, nlines)
if (length(lty) < nlines) lty <- rep(lty, nlines)
if (length(col) < nlines) col <- rep(col, nlines)
# }}}
# {{{ defaults
plot.DefaultArgs <- list(x=0,y=0,type = "n",ylim = ylim,xlim = xlim,xlab = xlab,ylab = ylab)
lines.DefaultArgs <- list(type="s")
axis1.DefaultArgs <- list()
axis2.DefaultArgs <- list(at=seq(0,1,.25),side=2)
legend.DefaultArgs <- list(legend=names(Y),
lwd=lwd,
col=col,
lty=lty,
cex=1.5,
bty="n",
y.intersp=1.3,
x="topleft",
trimnames=TRUE)
# }}}
# {{{ smart control
smartA <- prodlim::SmartControl(call= list(...),
keys=c("plot","lines","legend","conf.int","marktime","axis1","axis2"),
ignore=c("x","type","cause","newdata","add","col","lty","lwd","ylim","xlim","xlab","ylab","legend","marktime","conf.int","automar","atrisk","timeOrigin","percent","axes","atrisk.args","conf.int.args","legend.args"),
ignore.case=TRUE,
defaults=list("plot"=plot.DefaultArgs,
"axis1"=axis1.DefaultArgs,
"axis2"=axis2.DefaultArgs,
"legend"=legend.DefaultArgs,
"lines"=lines.DefaultArgs),
forced=list("plot"=list(axes=FALSE),
"axis1"=list(side=1)),
verbose=TRUE)
# }}}
# {{{ empty plot
if (!add) {
do.call("plot",smartA$plot)
if (axes){
do.call("axis",smartA$axis1)
if (percent & is.null(smartA$axis1$labels))
smartA$axis2$labels <- paste(100*smartA$axis2$at,"%")
do.call("axis",smartA$axis2)
}
}
# }}}
# {{{ adding the lines
lines.type <- smartA$lines$type
nix <- lapply(1:nlines, function(s) {
lines(x = plot.times,
y = Y[[s]],
type = lines.type,
col = col[s],
lty = lty[s],
lwd = lwd[s])
})
# }}}
# {{{ legend
if(legend[[1]]==TRUE && !add[[1]] && !is.null(names(Y))){
if (all(smartA$legend$trimnames==TRUE) && all(sapply((nlist <- strsplit(names(Y),"=")),function(x)length(x))==2)){
smartA$legend$legend <- sapply(nlist,function(x)x[[2]])
smartA$legend$title <- unique(sapply(nlist,function(x)x[[1]]))
}
smartA$legend <- smartA$legend[-match("trimnames",names(smartA$legend))]
save.xpd <- par()$xpd
par(xpd=TRUE)
do.call("legend",smartA$legend)
par(xpd=save.xpd)
}
# }}}
}
|
training = read.csv("C:/Users/Avinash Vallur/Desktop/Arjhun/Data Mining/Datasets/income_tr.csv")
testing = read.csv("C:/Users/Avinash Vallur/Desktop/Arjhun/Data Mining/Datasets/income_te.csv")
setwd("C:/Users/Avinash Vallur/Desktop/Arjhun/Data Mining/Datasets/")
#training=training[1:100,]
#testing=testing[1:100,]
#function that calculates the mode
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#function that calculates the eucledian distances for each of the test set rows
euclid_income <- function(k, test, train) {
final=data.frame()
ncols = ncol(test)-1
# imputing the mean for the attributes 'age' and 'hours_per_week'.
# imputing the mode for the 2 ratio attributes 'capital_gain' and 'capital_loss'
# since its almost always zero. Hence taking the mean for those would not be the right thing.
# Imputing the Mode for all the nominal and ordinal attributes
for(i in 1:nrow(train))
{
for( j in 2:(ncol(train)-1))
{
if(is.na(train[i,j]) || train[i,j]==" ?"){
if(colnames(train)[j]== "age" || colnames(train)[j]== "hour_per_week"){
train[i,j] = mean(train[,j])
}else{
train[i,j] = as.character( Mode(train[,j]) )
}
}
}
}
#min max normalization for the 4 ratio attributes
maxage = as.numeric(max(train$age))
minage = as.numeric(min(train$age))
diffage = maxage - minage
maxgain = as.numeric(max(train$capital_gain))
mingain = as.numeric(min(train$capital_gain))
diffgain = maxgain - mingain
maxloss = as.numeric(max(train$capital_loss))
minloss = as.numeric(min(train$capital_loss))
diffloss = maxloss - minloss
maxhour = as.numeric(max(train$hour_per_week))
minhour = as.numeric(min(train$hour_per_week))
diffhour = maxhour - minhour
for(i in 1:nrow(train))
{
train[i,"age"] = (train[i,"age"]-minage) / diffage
train[i,"capital_gain"] = (train[i,"capital_gain"]-mingain) / diffgain
train[i,"capital_loss"] = (train[i,"capital_loss"]-minloss) / diffloss
train[i,"hour_per_week"] = (train[i,"hour_per_week"]-minhour) / diffhour
}
#normalizing the test data
for(i in 1:nrow(test))
{
if(test[i,"age"] != " ?") test[i,"age"] = (test[i,"age"]-minage) / diffage
if(test[i,"capital_gain"] != " ?") test[i,"capital_gain"] = (test[i,"capital_gain"]-mingain) / diffgain
if(test[i,"capital_loss"] != " ?") test[i,"capital_loss"] = (test[i,"capital_loss"]-minloss) / diffloss
if(test[i,"hour_per_week"] != " ?") test[i,"hour_per_week"] = (test[i,"hour_per_week"]-minhour) / diffhour
}
for (i in 1:nrow(test))
{
distance=data.frame()
for(j in 1:nrow(train))
{
#caclualte distance for nominal data
if((as.character(test$workclass[i]) == as.character(train$workclass[j])) || (as.character(test$workclass[i])== " ?"))
workclassdiff = 0
else workclassdiff = 1
if((as.character(test$marital_status[i]) == as.character(train$marital_status[j])) || (as.character(test$marital_status[i]) == " ?"))
marital_statusdiff = 0
else marital_statusdiff = 1
if((as.character(test$occupation[i]) == as.character(train$occupation[j])) || (as.character(test$occupation[i]) == " ?"))
occupationdiff = 0
else occupationdiff = 1
if((as.character(test$relationship[i]) == as.character(train$relationship[j])) || (as.character(test$relationship[i]) == " ?"))
relationshipdiff = 0
else relationshipdiff = 1
if((as.character(test$race[i]) == as.character(train$race[j])) || (as.character(test$race[i]) == " ?"))
racediff = 0
else racediff = 1
if((as.character(test$gender[i]) == as.character(train$gender[j])) || (as.character(test$gender[i]) == " ?"))
genderdiff = 0
else genderdiff = 1
if((as.character(test$native_country[i]) == as.character(train$native_country[j])) || (as.character(test$native_country[i]) == " ?"))
native_countrydiff = 0
else native_countrydiff = 1
ndist=(workclassdiff+marital_statusdiff+occupationdiff+
relationshipdiff+racediff+genderdiff+native_countrydiff)/7
#Calculate Euclidean Distance for ratio data
if(test$age[i] != " ?"){
rdist = (test$age[i]-train$age[j])^2
}else rdist = 0
if(test$capital_gain[i] != " ?"){
rdist = rdist + (test$capital_gain[i]-train$capital_gain[j])^2
}
if(test$capital_loss[i] != " ?"){
rdist = rdist + (test$capital_loss[i]-train$capital_loss[j])^2
}
if(test$hour_per_week[i] != " ?"){
rdist = rdist + (test$hour_per_week[i]-train$hour_per_week[j])^2
}
rdist=sqrt(rdist)
#Calculate distance for ordinal data
if(test$education_cat[i] != " ?")
odist=abs(test$education_cat[i]-train$education_cat[j])/15
else odist =0;
#Final distance is the average of the ordinal, nominal and ratio distances
dist=(ndist+rdist+odist)/3
#Write this distance to the distance matrix(data frame)
distance[j,1] = test[i,1]
distance[j,2]=train[j,1]
distance[j,3]=dist
}
#sort the distance matrix by distance
distance = distance[order(distance[,3]),]
#pick the top k rows with least distances and write it to the final data frame in required format
distance=distance[1:k,]
final[i,1]=distance[1,1]
count=2
for (s in 1:k)
{
for (t in 2:3)
{
final[i,count]=distance[s,t]
count=count+1
}
}
}
#rename the columns of the final data frame
colnames(final)[1]="Transaction ID"
count=2
for (c in 1:k)
{
colnames(final)[count]=paste("ID", as.character(c))
count=count+1
colnames(final)[count]=paste("Prox", as.character(c))
count=count+1
}
final[,ncol(final)+1]=test$class
colnames(final)[ncol(final)]="Class"
#write.csv(final,"result_income_euclid.csv")
final
}
#function that calculates the manhattan distances for each of the test set rows
manhat_income <- function(k, test, train) {
final=data.frame()
ncols = ncol(test)-1
# imputing the mean for the attributes 'age' and 'hours_per_week'.
# imputing the mode for the 2 ratio attributes 'capital_gain' and 'capital_loss'
# since its almost always zero. Hence taking the mean for those would not be the right thing.
# Imputing the Mode for all the nominal and ordinal attributes
for(i in 1:nrow(train))
{
for( j in 2:(ncol(train)-1))
{
if(is.na(train[i,j]) || train[i,j]==" ?"){
if(colnames(train)[j]== "age" || colnames(train)[j]== "hour_per_week"){
train[i,j] = mean(train[,j])
}else{
train[i,j] = as.character( Mode(train[,j]) )
}
}
}
}
#min max normalization for the 4 ratio attributes
maxage = as.numeric(max(train$age))
minage = as.numeric(min(train$age))
diffage = maxage - minage
maxgain = as.numeric(max(train$capital_gain))
mingain = as.numeric(min(train$capital_gain))
diffgain = maxgain - mingain
maxloss = as.numeric(max(train$capital_loss))
minloss = as.numeric(min(train$capital_loss))
diffloss = maxloss - minloss
maxhour = as.numeric(max(train$hour_per_week))
minhour = as.numeric(min(train$hour_per_week))
diffhour = maxhour - minhour
for(i in 1:nrow(train))
{
train[i,"age"] = (train[i,"age"]-minage) / diffage
train[i,"capital_gain"] = (train[i,"capital_gain"]-mingain) / diffgain
train[i,"capital_loss"] = (train[i,"capital_loss"]-minloss) / diffloss
train[i,"hour_per_week"] = (train[i,"hour_per_week"]-minhour) / diffhour
}
#normalizing the test data
for(i in 1:nrow(test))
{
if(test[i,"age"] != " ?") test[i,"age"] = (test[i,"age"]-minage) / diffage
if(test[i,"capital_gain"] != " ?") test[i,"capital_gain"] = (test[i,"capital_gain"]-mingain) / diffgain
if(test[i,"capital_loss"] != " ?") test[i,"capital_loss"] = (test[i,"capital_loss"]-minloss) / diffloss
if(test[i,"hour_per_week"] != " ?") test[i,"hour_per_week"] = (test[i,"hour_per_week"]-minhour) / diffhour
}
for (i in 1:nrow(test))
{
distance=data.frame()
for(j in 1:nrow(train))
{
#caclualte distance for nominal data
if((as.character(test$workclass[i]) == as.character(train$workclass[j])) || (as.character(test$workclass[i])== " ?"))
workclassdiff = 0
else workclassdiff = 1
if((as.character(test$marital_status[i]) == as.character(train$marital_status[j])) || (as.character(test$marital_status[i]) == " ?"))
marital_statusdiff = 0
else marital_statusdiff = 1
if((as.character(test$occupation[i]) == as.character(train$occupation[j])) || (as.character(test$occupation[i]) == " ?"))
occupationdiff = 0
else occupationdiff = 1
if((as.character(test$relationship[i]) == as.character(train$relationship[j])) || (as.character(test$relationship[i]) == " ?"))
relationshipdiff = 0
else relationshipdiff = 1
if((as.character(test$race[i]) == as.character(train$race[j])) || (as.character(test$race[i]) == " ?"))
racediff = 0
else racediff = 1
if((as.character(test$gender[i]) == as.character(train$gender[j])) || (as.character(test$gender[i]) == " ?"))
genderdiff = 0
else genderdiff = 1
if((as.character(test$native_country[i]) == as.character(train$native_country[j])) || (as.character(test$native_country[i]) == " ?"))
native_countrydiff = 0
else native_countrydiff = 1
ndist=(workclassdiff+marital_statusdiff+occupationdiff+
relationshipdiff+racediff+genderdiff+native_countrydiff)/7
#Calculate Manhattan Distance for ratio data
if(test$age[i] != " ?"){
rdist = abs(test$age[i]-train$age[j])
}else rdist = 0
if(test$capital_gain[i] != " ?"){
rdist = rdist + abs(test$capital_gain[i]-train$capital_gain[j])
}
if(test$capital_loss[i] != " ?"){
rdist = rdist + abs(test$capital_loss[i]-train$capital_loss[j])
}
if(test$hour_per_week[i] != " ?"){
rdist = rdist + abs(test$hour_per_week[i]-train$hour_per_week[j])
}
#Calculate distance for ordinal data
if(test$education_cat[i] != " ?")
odist=abs(test$education_cat[i]-train$education_cat[j])/15
else odist =0;
#Final distance is the average of the ordinal, nominal and ratio distances
dist=(ndist+rdist+odist)/3
#Write this distance to the distance matrix(data frame)
distance[j,1] = test[i,1]
distance[j,2]=train[j,1]
distance[j,3]=dist
}
#sort the distance matrix by distance
distance = distance[order(distance[,3]),]
#pick the top k rows with least distances and write it to the final data frame in required format
distance=distance[1:k,]
final[i,1]=distance[1,1]
count=2
for (s in 1:k)
{
for (t in 2:3)
{
final[i,count]=distance[s,t]
count=count+1
}
}
}
#rename the columns of the final data frame
colnames(final)[1]="Transaction ID"
count=2
for (c in 1:k)
{
colnames(final)[count]=paste("ID", as.character(c))
count=count+1
colnames(final)[count]=paste("Prox", as.character(c))
count=count+1
}
final[,ncol(final)+1]=test$class
colnames(final)[ncol(final)]="Class"
#write.csv(final,"result_income_euclid.csv")
final
}
#function that predicts the class of the test dataset using knn
knn_income <- function(k, test, train, proximity) {
#calculate the eucledian or manhattan distances depending on the parameter in
# the knn_income function
if(proximity == "euclid") final = euclid_income (k, test, train)
else if(proximity == "manhat") final = manhat_income (k, test, train)
if(proximity == "euclid" || proximity == "manhat"){
results=data.frame()
#calucate the number in each class for the k nearest neighbors
for(i in 1:nrow(final)){
countLess=0
countMore=0
for(j in 1:k){
row=final[i,j*2]
if(as.character(train[train$ID==row,16]) == " <=50K") countLess=countLess+1
if(as.character(train[train$ID==row,16]) ==" >50K") countMore=countMore+1
}
#calculate their probabilites
probLess=countLess/k
probMore=countMore/k
#choose the max probability and assign that class
if(max(probLess,probMore) == probLess)predClass = " <=50K"
else if(max(probLess,probMore) == probMore)predClass = " >50K"
#write this to the dataframe in the required format
results[i,1] = final$`Transaction ID`[i]
results[i,2] = final$Class[i]
results[i,3] = predClass
results[i,4] = max(probLess,probMore)
}
#rename the columns of the final dataframe
colnames(results) = c("Transaction ID","Actual Class", "Predicted Class", "Probability")
results
}
else print("Error! Enter either euclid or manhat for proximity")
}
#(void main equivalent)
results=knn_income(3, testing, training, "euclid")
#results=knn_income(3, testing, training, "manhat")
#results=knn_income(3, testing, training, "abc")
write.csv(results,"result_income.csv")
|
/income_class_combined.R
|
no_license
|
arjhunh/kNNClassifier
|
R
| false
| false
| 13,547
|
r
|
training = read.csv("C:/Users/Avinash Vallur/Desktop/Arjhun/Data Mining/Datasets/income_tr.csv")
testing = read.csv("C:/Users/Avinash Vallur/Desktop/Arjhun/Data Mining/Datasets/income_te.csv")
setwd("C:/Users/Avinash Vallur/Desktop/Arjhun/Data Mining/Datasets/")
#training=training[1:100,]
#testing=testing[1:100,]
#function that calculates the mode
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#function that calculates the eucledian distances for each of the test set rows
euclid_income <- function(k, test, train) {
final=data.frame()
ncols = ncol(test)-1
# imputing the mean for the attributes 'age' and 'hours_per_week'.
# imputing the mode for the 2 ratio attributes 'capital_gain' and 'capital_loss'
# since its almost always zero. Hence taking the mean for those would not be the right thing.
# Imputing the Mode for all the nominal and ordinal attributes
for(i in 1:nrow(train))
{
for( j in 2:(ncol(train)-1))
{
if(is.na(train[i,j]) || train[i,j]==" ?"){
if(colnames(train)[j]== "age" || colnames(train)[j]== "hour_per_week"){
train[i,j] = mean(train[,j])
}else{
train[i,j] = as.character( Mode(train[,j]) )
}
}
}
}
#min max normalization for the 4 ratio attributes
maxage = as.numeric(max(train$age))
minage = as.numeric(min(train$age))
diffage = maxage - minage
maxgain = as.numeric(max(train$capital_gain))
mingain = as.numeric(min(train$capital_gain))
diffgain = maxgain - mingain
maxloss = as.numeric(max(train$capital_loss))
minloss = as.numeric(min(train$capital_loss))
diffloss = maxloss - minloss
maxhour = as.numeric(max(train$hour_per_week))
minhour = as.numeric(min(train$hour_per_week))
diffhour = maxhour - minhour
for(i in 1:nrow(train))
{
train[i,"age"] = (train[i,"age"]-minage) / diffage
train[i,"capital_gain"] = (train[i,"capital_gain"]-mingain) / diffgain
train[i,"capital_loss"] = (train[i,"capital_loss"]-minloss) / diffloss
train[i,"hour_per_week"] = (train[i,"hour_per_week"]-minhour) / diffhour
}
#normalizing the test data
for(i in 1:nrow(test))
{
if(test[i,"age"] != " ?") test[i,"age"] = (test[i,"age"]-minage) / diffage
if(test[i,"capital_gain"] != " ?") test[i,"capital_gain"] = (test[i,"capital_gain"]-mingain) / diffgain
if(test[i,"capital_loss"] != " ?") test[i,"capital_loss"] = (test[i,"capital_loss"]-minloss) / diffloss
if(test[i,"hour_per_week"] != " ?") test[i,"hour_per_week"] = (test[i,"hour_per_week"]-minhour) / diffhour
}
for (i in 1:nrow(test))
{
distance=data.frame()
for(j in 1:nrow(train))
{
#caclualte distance for nominal data
if((as.character(test$workclass[i]) == as.character(train$workclass[j])) || (as.character(test$workclass[i])== " ?"))
workclassdiff = 0
else workclassdiff = 1
if((as.character(test$marital_status[i]) == as.character(train$marital_status[j])) || (as.character(test$marital_status[i]) == " ?"))
marital_statusdiff = 0
else marital_statusdiff = 1
if((as.character(test$occupation[i]) == as.character(train$occupation[j])) || (as.character(test$occupation[i]) == " ?"))
occupationdiff = 0
else occupationdiff = 1
if((as.character(test$relationship[i]) == as.character(train$relationship[j])) || (as.character(test$relationship[i]) == " ?"))
relationshipdiff = 0
else relationshipdiff = 1
if((as.character(test$race[i]) == as.character(train$race[j])) || (as.character(test$race[i]) == " ?"))
racediff = 0
else racediff = 1
if((as.character(test$gender[i]) == as.character(train$gender[j])) || (as.character(test$gender[i]) == " ?"))
genderdiff = 0
else genderdiff = 1
if((as.character(test$native_country[i]) == as.character(train$native_country[j])) || (as.character(test$native_country[i]) == " ?"))
native_countrydiff = 0
else native_countrydiff = 1
ndist=(workclassdiff+marital_statusdiff+occupationdiff+
relationshipdiff+racediff+genderdiff+native_countrydiff)/7
#Calculate Euclidean Distance for ratio data
if(test$age[i] != " ?"){
rdist = (test$age[i]-train$age[j])^2
}else rdist = 0
if(test$capital_gain[i] != " ?"){
rdist = rdist + (test$capital_gain[i]-train$capital_gain[j])^2
}
if(test$capital_loss[i] != " ?"){
rdist = rdist + (test$capital_loss[i]-train$capital_loss[j])^2
}
if(test$hour_per_week[i] != " ?"){
rdist = rdist + (test$hour_per_week[i]-train$hour_per_week[j])^2
}
rdist=sqrt(rdist)
#Calculate distance for ordinal data
if(test$education_cat[i] != " ?")
odist=abs(test$education_cat[i]-train$education_cat[j])/15
else odist =0;
#Final distance is the average of the ordinal, nominal and ratio distances
dist=(ndist+rdist+odist)/3
#Write this distance to the distance matrix(data frame)
distance[j,1] = test[i,1]
distance[j,2]=train[j,1]
distance[j,3]=dist
}
#sort the distance matrix by distance
distance = distance[order(distance[,3]),]
#pick the top k rows with least distances and write it to the final data frame in required format
distance=distance[1:k,]
final[i,1]=distance[1,1]
count=2
for (s in 1:k)
{
for (t in 2:3)
{
final[i,count]=distance[s,t]
count=count+1
}
}
}
#rename the columns of the final data frame
colnames(final)[1]="Transaction ID"
count=2
for (c in 1:k)
{
colnames(final)[count]=paste("ID", as.character(c))
count=count+1
colnames(final)[count]=paste("Prox", as.character(c))
count=count+1
}
final[,ncol(final)+1]=test$class
colnames(final)[ncol(final)]="Class"
#write.csv(final,"result_income_euclid.csv")
final
}
#function that calculates the manhattan distances for each of the test set rows
manhat_income <- function(k, test, train) {
final=data.frame()
ncols = ncol(test)-1
# imputing the mean for the attributes 'age' and 'hours_per_week'.
# imputing the mode for the 2 ratio attributes 'capital_gain' and 'capital_loss'
# since its almost always zero. Hence taking the mean for those would not be the right thing.
# Imputing the Mode for all the nominal and ordinal attributes
for(i in 1:nrow(train))
{
for( j in 2:(ncol(train)-1))
{
if(is.na(train[i,j]) || train[i,j]==" ?"){
if(colnames(train)[j]== "age" || colnames(train)[j]== "hour_per_week"){
train[i,j] = mean(train[,j])
}else{
train[i,j] = as.character( Mode(train[,j]) )
}
}
}
}
#min max normalization for the 4 ratio attributes
maxage = as.numeric(max(train$age))
minage = as.numeric(min(train$age))
diffage = maxage - minage
maxgain = as.numeric(max(train$capital_gain))
mingain = as.numeric(min(train$capital_gain))
diffgain = maxgain - mingain
maxloss = as.numeric(max(train$capital_loss))
minloss = as.numeric(min(train$capital_loss))
diffloss = maxloss - minloss
maxhour = as.numeric(max(train$hour_per_week))
minhour = as.numeric(min(train$hour_per_week))
diffhour = maxhour - minhour
for(i in 1:nrow(train))
{
train[i,"age"] = (train[i,"age"]-minage) / diffage
train[i,"capital_gain"] = (train[i,"capital_gain"]-mingain) / diffgain
train[i,"capital_loss"] = (train[i,"capital_loss"]-minloss) / diffloss
train[i,"hour_per_week"] = (train[i,"hour_per_week"]-minhour) / diffhour
}
#normalizing the test data
for(i in 1:nrow(test))
{
if(test[i,"age"] != " ?") test[i,"age"] = (test[i,"age"]-minage) / diffage
if(test[i,"capital_gain"] != " ?") test[i,"capital_gain"] = (test[i,"capital_gain"]-mingain) / diffgain
if(test[i,"capital_loss"] != " ?") test[i,"capital_loss"] = (test[i,"capital_loss"]-minloss) / diffloss
if(test[i,"hour_per_week"] != " ?") test[i,"hour_per_week"] = (test[i,"hour_per_week"]-minhour) / diffhour
}
for (i in 1:nrow(test))
{
distance=data.frame()
for(j in 1:nrow(train))
{
#caclualte distance for nominal data
if((as.character(test$workclass[i]) == as.character(train$workclass[j])) || (as.character(test$workclass[i])== " ?"))
workclassdiff = 0
else workclassdiff = 1
if((as.character(test$marital_status[i]) == as.character(train$marital_status[j])) || (as.character(test$marital_status[i]) == " ?"))
marital_statusdiff = 0
else marital_statusdiff = 1
if((as.character(test$occupation[i]) == as.character(train$occupation[j])) || (as.character(test$occupation[i]) == " ?"))
occupationdiff = 0
else occupationdiff = 1
if((as.character(test$relationship[i]) == as.character(train$relationship[j])) || (as.character(test$relationship[i]) == " ?"))
relationshipdiff = 0
else relationshipdiff = 1
if((as.character(test$race[i]) == as.character(train$race[j])) || (as.character(test$race[i]) == " ?"))
racediff = 0
else racediff = 1
if((as.character(test$gender[i]) == as.character(train$gender[j])) || (as.character(test$gender[i]) == " ?"))
genderdiff = 0
else genderdiff = 1
if((as.character(test$native_country[i]) == as.character(train$native_country[j])) || (as.character(test$native_country[i]) == " ?"))
native_countrydiff = 0
else native_countrydiff = 1
ndist=(workclassdiff+marital_statusdiff+occupationdiff+
relationshipdiff+racediff+genderdiff+native_countrydiff)/7
#Calculate Manhattan Distance for ratio data
if(test$age[i] != " ?"){
rdist = abs(test$age[i]-train$age[j])
}else rdist = 0
if(test$capital_gain[i] != " ?"){
rdist = rdist + abs(test$capital_gain[i]-train$capital_gain[j])
}
if(test$capital_loss[i] != " ?"){
rdist = rdist + abs(test$capital_loss[i]-train$capital_loss[j])
}
if(test$hour_per_week[i] != " ?"){
rdist = rdist + abs(test$hour_per_week[i]-train$hour_per_week[j])
}
#Calculate distance for ordinal data
if(test$education_cat[i] != " ?")
odist=abs(test$education_cat[i]-train$education_cat[j])/15
else odist =0;
#Final distance is the average of the ordinal, nominal and ratio distances
dist=(ndist+rdist+odist)/3
#Write this distance to the distance matrix(data frame)
distance[j,1] = test[i,1]
distance[j,2]=train[j,1]
distance[j,3]=dist
}
#sort the distance matrix by distance
distance = distance[order(distance[,3]),]
#pick the top k rows with least distances and write it to the final data frame in required format
distance=distance[1:k,]
final[i,1]=distance[1,1]
count=2
for (s in 1:k)
{
for (t in 2:3)
{
final[i,count]=distance[s,t]
count=count+1
}
}
}
#rename the columns of the final data frame
colnames(final)[1]="Transaction ID"
count=2
for (c in 1:k)
{
colnames(final)[count]=paste("ID", as.character(c))
count=count+1
colnames(final)[count]=paste("Prox", as.character(c))
count=count+1
}
final[,ncol(final)+1]=test$class
colnames(final)[ncol(final)]="Class"
#write.csv(final,"result_income_euclid.csv")
final
}
#function that predicts the class of the test dataset using knn
knn_income <- function(k, test, train, proximity) {
#calculate the eucledian or manhattan distances depending on the parameter in
# the knn_income function
if(proximity == "euclid") final = euclid_income (k, test, train)
else if(proximity == "manhat") final = manhat_income (k, test, train)
if(proximity == "euclid" || proximity == "manhat"){
results=data.frame()
#calucate the number in each class for the k nearest neighbors
for(i in 1:nrow(final)){
countLess=0
countMore=0
for(j in 1:k){
row=final[i,j*2]
if(as.character(train[train$ID==row,16]) == " <=50K") countLess=countLess+1
if(as.character(train[train$ID==row,16]) ==" >50K") countMore=countMore+1
}
#calculate their probabilites
probLess=countLess/k
probMore=countMore/k
#choose the max probability and assign that class
if(max(probLess,probMore) == probLess)predClass = " <=50K"
else if(max(probLess,probMore) == probMore)predClass = " >50K"
#write this to the dataframe in the required format
results[i,1] = final$`Transaction ID`[i]
results[i,2] = final$Class[i]
results[i,3] = predClass
results[i,4] = max(probLess,probMore)
}
#rename the columns of the final dataframe
colnames(results) = c("Transaction ID","Actual Class", "Predicted Class", "Probability")
results
}
else print("Error! Enter either euclid or manhat for proximity")
}
#(void main equivalent)
results=knn_income(3, testing, training, "euclid")
#results=knn_income(3, testing, training, "manhat")
#results=knn_income(3, testing, training, "abc")
write.csv(results,"result_income.csv")
|
#' variable influence of BRT (gbm)
#' @export
Brt_imp = function(variabledf, opti = F, ntree = 1000, y_varname = c("day_value", "night_value", "value_mean"), interaction.depth = 6,bag.fraction = 0.5,shrinkage= 0.01, training, test, grepstring, ...) {
prenres = paste(y_varname, "|", grepstring, sep = "")
pre_mat = subset_grep(variabledf[training, ], prenres)
if (opti) {
Xmat = subset_grep(variabledf[training, ], grepstring)
rf3 <- gbm.step(data = pre_mat, gbm.x = names(Xmat), gbm.y = y_varname, family = "gaussian", n.trees=ntree, tree.complexity = interaction.depth, shrinkage = shrinkage, bag.fraction = bag.fraction)
ntree = rf3$gbm.call$best.trees
} else {
formu = as.formula(paste(y_varname, "~.", sep = ""))
rf3 = gbm(formula = formu, data = pre_mat, distribution = "gaussian", n.trees = ntree, interaction.depth = interaction.depth, shrinkage = shrinkage, bag.fraction = bag.fraction)
}
m = summary(rf3, method = permutation.test.gbm, plotit = F)
rownames(m) = m$var
m = m %>% select(rel.inf)
}
|
/R/Brt_imp.R
|
no_license
|
mengluchu/APMtools
|
R
| false
| false
| 1,086
|
r
|
#' variable influence of BRT (gbm)
#' @export
Brt_imp = function(variabledf, opti = F, ntree = 1000, y_varname = c("day_value", "night_value", "value_mean"), interaction.depth = 6,bag.fraction = 0.5,shrinkage= 0.01, training, test, grepstring, ...) {
prenres = paste(y_varname, "|", grepstring, sep = "")
pre_mat = subset_grep(variabledf[training, ], prenres)
if (opti) {
Xmat = subset_grep(variabledf[training, ], grepstring)
rf3 <- gbm.step(data = pre_mat, gbm.x = names(Xmat), gbm.y = y_varname, family = "gaussian", n.trees=ntree, tree.complexity = interaction.depth, shrinkage = shrinkage, bag.fraction = bag.fraction)
ntree = rf3$gbm.call$best.trees
} else {
formu = as.formula(paste(y_varname, "~.", sep = ""))
rf3 = gbm(formula = formu, data = pre_mat, distribution = "gaussian", n.trees = ntree, interaction.depth = interaction.depth, shrinkage = shrinkage, bag.fraction = bag.fraction)
}
m = summary(rf3, method = permutation.test.gbm, plotit = F)
rownames(m) = m$var
m = m %>% select(rel.inf)
}
|
##############################################################################
############### calculate governance score for each fisheries stock ##########
##############################################################################
## ReadMe : ##################################################################
## assigns a resilience score to each stock, based on the fisheries governance score
## of EEZs and high seas where the stock is caught, weighted by the relative proportion
## with which it is caught in each jurisdiction (using mean catch since 1980).
## For stocks that are exclusively coastal, it is simply based on the governance score of the respective EEZs.
## For stocks overlapping the high seas, it is based on the score of the RFMOs responsible for that HS area.
## If none of the RFMOs overlapping a stock do not explicitly manage that species,
## the stock is assumed to be unmanaged and gets a management score of 0.
## The resulting score is used to decide whether to assess that stock with the CMSY model
## that uses uniform priors (managed stock), or constrained priors (unmanaged stock).
## In 2014 the cutoff point for a stock to be considered managed was a score equal or greater than 0.6
##############################################################################
## NOTE: this requires SAUP fisheries data where castal(=eez) and high seas are combined (distinguished by saup_id == 0 for high seas) ##
source('../ohiprep/src/R/common.R') # set dir_neptune_data
library(tidyr)
#################################
## Formatting new SAUP catch data
#################################
## Identify and select the stocks that b/bmsy was calculated for (cmsy method):
stocks <- read.csv('globalprep/SAUP_FIS/v2015/tmp/b_bmsy_v16072015.csv') %>%
select(stock_id) %>%
unique()
meanCatch <- read.csv('globalprep/SAUP_FIS/v2015/tmp/mean_catch_saup_fao.csv')
meanCatch <- meanCatch %>%
filter(EEZID != 274) %>% #remove Gaza strip
mutate(stock_id = paste(TaxonKey, FAOAreaID, sep="_")) %>%
filter(stock_id %in% stocks$stock_id) %>%
select(stock_id, EEZID, FAOAreaID, TaxonKey, mean_catch) %>%
unique()
# calculate relative catch within each eez/hs region for each stock
meanCatch <- meanCatch %>%
mutate(stock_saup_id = paste(stock_id, EEZID, sep="_")) %>%
group_by(TaxonKey, FAOAreaID) %>%
mutate (rel_ct = mean_catch/sum(mean_catch)) %>%
dplyr::select(stock_saup_id, stock_id, saup_id=EEZID, fao_id=FAOAreaID, TaxonKey, mean_catch, rel_ct) %>%
ungroup()
################################################################
## converting spatial scale of Mora scores (ohi 2013 regions) to saup regions
################################################################
# file to convert saup regions to ohi regions
saup_ohi <- read.csv(file.path('../ohiprep/src/LookupTables/new_saup_to_ohi_rgn.csv'), stringsAsFactors = F) %>%
select (saup_id, rgn_id = ohi_id_2013) ; head(saup_ohi)
# fisheries governance by EEZ from OHI2013 Mora scores for fis governance (layer unchanged)
eez_r <- read.csv('globalprep/Fisheries_stock_gov_score/raw/r_fishing_v2_eez_2013a_wMontenegro.csv', stringsAsFactors = F) %>%
select(rgn_id, Score=resilience.score); head(eez_r)
eez_r <- eez_r %>%
left_join(saup_ohi) %>% #N=287
group_by(saup_id) %>%
summarize(Score = mean(Score)) #N=278, average score for cases where several ohi regions are represented by one saup region
head(eez_r)
eez_r[duplicated(eez_r$saup_id),] #should be no duplicates
res_eez <- meanCatch %>%
filter(saup_id > 0) %>%
left_join(eez_r) %>%
mutate(proparea = 1, rfmo = 0) %>%
select(fao_id, saup_id, rfmo, proparea, stock_saup_id, Score)
head(res_eez); summary(res_eez)
#N=15763
################################################################
## High seas: determining resilience scores (saup_id=0)
################################################################
#########################
## dealing with species:
# get the list of stocks protected by each rfmo: rfmo_id, TaxonName
rfmo_sp <- read.csv('../ohiprep/HighSeas/tmp/rfmo_species.csv', stringsAsFactors = FALSE, check.names = FALSE) %>%
select(TaxonName, rfmo) %>%
unique()
# rename some of the taxa that have different names for the same species in the saup data
rfmo_sp$TaxonName[rfmo_sp$TaxonName =="Longfin mako"] = "Isurus paucus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Shortfin mako" ] = "Isurus oxyrinchus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Anguilla anguilla anguilla"] = "Anguilla anguilla"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Loligo vulgaris vulgaris"] = "Loligo vulgaris"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Mullus barbatus"] = "Mullus barbatus barbatus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Lepidonotothen kempi"] = "Lepidonotothen squamifrons"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Caranx ruber"] = "Carangoides ruber"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Alectis alexandrina"] = "Alectis alexandrinus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Kajikia audax"] = "Tetrapturus audax"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Istiompax indica"] = "Makaira indica"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Clupea Harengus"] = "Clupea harengus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Raja alba"] = "Rostroraja alba"
# get taxon key based on TaxonName
tax <- read.csv(file.path(dir_neptune_data, 'git-annex/globalprep/SAUP_FIS_data/v2015/raw/ohi_taxon.csv')) %>%
dplyr::select(Taxonkey=taxonkey, TaxonName=scientific.name); head(tax)
sort(setdiff(tax$TaxonName, rfmo_sp$TaxonName)) #lots of names on the taxon list that aren't in the RFMO protected species, not surprising
setdiff(rfmo_sp$TaxonName, tax$TaxonName) # some names that aren't in species list as catch
rfmo_sp <- rfmo_sp %>%
left_join(tax) %>%
filter(!(is.na(Taxonkey)))
head(rfmo_sp)
#############################################
## Scores for rfmos for protectiveness:
### get the rfmo score and join: rfmo_id, TaxonName, rfmo score
rfmo_sc <- read.csv('HighSeas/HS_Resilience_v2014/RFMO/tmp/RFMOscores.csv', stringsAsFactors = F) ; head(rfmo_sc)
# remove capital letters from rfmo names and rows with NAs
rfmo_sc <- rfmo_sc %>%
mutate (rfmo = tolower(rfmo_sc$RFMO)) %>%
select (rfmo, Score) %>%
filter (!is.na(Score))
# Coverage of each rfmo in each fao_rgn
# need table to translate fao_rgn_id to fao_rgn
fao_id_rgn <- read.csv('../ohiprep/HighSeas/HS_other_v2014/FAOregions.csv', stringsAsFactors = F) %>%
select(rgn_id_2013, fao_id); head(fao_id_rgn)
# get the prop rfmo area in fao_id and join by region ID
rfmo_fao <- read.csv('HighSeas/HS_Resilience_v2014/RFMO/tmp/RFMOperFAO.csv', stringsAsFactors = F) ; head(rfmo_fao)
rfmo_fao <- rfmo_fao %>%
left_join(fao_id_rgn, by=c("rgn_id" = "rgn_id_2013")) ; rfmo_fao # Joining by: "rgn_id"
rfmo_fao <- gather(rfmo_fao, key = rfmo, value = proparea, -rgn_id, -rgn_name, -total, -fao_id)
rfmo_fao$rfmo <- as.character(rfmo_fao$rfmo)
head(rfmo_fao)
# add Antarctica:
Ant_rgn <- data.frame( cbind ('rgn_id' = as.numeric(c(268, 271, 278)),
'rgn_name' = c('Antarctic', 'Antarctic', 'Antarctic'),
'total' = as.numeric(c(1, 1, 1)),
'fao_id' = as.numeric(c(48, 58, 88)),
'rfmo' = c('ccamlr', 'ccamlr', 'ccamlr'),
'proparea' = as.numeric(c(1, 1, 1)) ), stringsAsFactors = F)
Ant_rgn$proparea <- as.numeric(as.character(Ant_rgn$proparea))
Ant_rgn$total <- as.numeric(as.character(Ant_rgn$total))
Ant_rgn$fao_id <- as.numeric(as.character(Ant_rgn$fao_id))
rfmo_fao <- rbind(Ant_rgn, rfmo_fao)
head(rfmo_fao)
res_hs <- rfmo_fao %>%
# mutate(relarea = proparea/total) %>% # rel area will be NaN if the RFMO doesn't overlap the FAO region, MRF: don't think this is needed
left_join (rfmo_sc) %>% # add in rfmo resilience scores, joining by: "rfmo"
left_join (rfmo_sp)
head(res_hs)
res_hs <- res_hs %>%
mutate (saup_id = 0,
stock_saup_id = paste(Taxonkey, fao_id, saup_id, sep = '_')) %>% # make the stock_id and add the saup_id
select(fao_id, saup_id, rfmo, proparea, stock_saup_id, Score)
head(res_hs)
###############################
## Join scores with catch data
# rbind high seas with eez resilience scores
r_c <- rbind(res_hs, res_eez) # unique(r_c$saup_id[is.na(r_c$Score)]) # check
FinalRes <- meanCatch %>%
left_join(r_c) %>%
# dups <- FinalRes$stock_saup_id[duplicated(FinalRes$stock_saup_id)]
# tmp2 <- FinalRes[FinalRes$stock_saup_id %in% dups, ]
# tmp2[tmp2$stock_saup_id=="600094_47_0", ]
mutate(part_score = rel_ct * proparea * Score) %>% # NAs are stocks not protected by any rfmo, these get a 0 score.
group_by(stock_saup_id, stock_id, TaxonKey, fao_id, saup_id) %>%
summarize(part_score = max(part_score, na.rm=TRUE)) %>% # when a stock is protected by >1 rfmo within an FAO, select the highest score
ungroup()
#NAs result for high seas taxa that are not protected by any RFMO
FinalRes <- FinalRes %>%
group_by(stock_id) %>%
summarize(final_score=sum(part_score, na.rm=TRUE)) %>%
mutate (unif_prior = ifelse( final_score > 0.6, 1, 0))
head(FinalRes)
summary(FinalRes)
hist(FinalRes$final_score)
write.csv(FinalRes, 'globalprep/SAUP_FIS/v2015/tmp/stock_resil_06cutoff_2015.csv', row.names=FALSE)
|
/globalprep/fis/v2015/Stock_resilience.R
|
no_license
|
OHI-Science/ohiprep_v2018
|
R
| false
| false
| 9,317
|
r
|
##############################################################################
############### calculate governance score for each fisheries stock ##########
##############################################################################
## ReadMe : ##################################################################
## assigns a resilience score to each stock, based on the fisheries governance score
## of EEZs and high seas where the stock is caught, weighted by the relative proportion
## with which it is caught in each jurisdiction (using mean catch since 1980).
## For stocks that are exclusively coastal, it is simply based on the governance score of the respective EEZs.
## For stocks overlapping the high seas, it is based on the score of the RFMOs responsible for that HS area.
## If none of the RFMOs overlapping a stock do not explicitly manage that species,
## the stock is assumed to be unmanaged and gets a management score of 0.
## The resulting score is used to decide whether to assess that stock with the CMSY model
## that uses uniform priors (managed stock), or constrained priors (unmanaged stock).
## In 2014 the cutoff point for a stock to be considered managed was a score equal or greater than 0.6
##############################################################################
## NOTE: this requires SAUP fisheries data where castal(=eez) and high seas are combined (distinguished by saup_id == 0 for high seas) ##
source('../ohiprep/src/R/common.R') # set dir_neptune_data
library(tidyr)
#################################
## Formatting new SAUP catch data
#################################
## Identify and select the stocks that b/bmsy was calculated for (cmsy method):
stocks <- read.csv('globalprep/SAUP_FIS/v2015/tmp/b_bmsy_v16072015.csv') %>%
select(stock_id) %>%
unique()
meanCatch <- read.csv('globalprep/SAUP_FIS/v2015/tmp/mean_catch_saup_fao.csv')
meanCatch <- meanCatch %>%
filter(EEZID != 274) %>% #remove Gaza strip
mutate(stock_id = paste(TaxonKey, FAOAreaID, sep="_")) %>%
filter(stock_id %in% stocks$stock_id) %>%
select(stock_id, EEZID, FAOAreaID, TaxonKey, mean_catch) %>%
unique()
# calculate relative catch within each eez/hs region for each stock
meanCatch <- meanCatch %>%
mutate(stock_saup_id = paste(stock_id, EEZID, sep="_")) %>%
group_by(TaxonKey, FAOAreaID) %>%
mutate (rel_ct = mean_catch/sum(mean_catch)) %>%
dplyr::select(stock_saup_id, stock_id, saup_id=EEZID, fao_id=FAOAreaID, TaxonKey, mean_catch, rel_ct) %>%
ungroup()
################################################################
## converting spatial scale of Mora scores (ohi 2013 regions) to saup regions
################################################################
# file to convert saup regions to ohi regions
saup_ohi <- read.csv(file.path('../ohiprep/src/LookupTables/new_saup_to_ohi_rgn.csv'), stringsAsFactors = F) %>%
select (saup_id, rgn_id = ohi_id_2013) ; head(saup_ohi)
# fisheries governance by EEZ from OHI2013 Mora scores for fis governance (layer unchanged)
eez_r <- read.csv('globalprep/Fisheries_stock_gov_score/raw/r_fishing_v2_eez_2013a_wMontenegro.csv', stringsAsFactors = F) %>%
select(rgn_id, Score=resilience.score); head(eez_r)
eez_r <- eez_r %>%
left_join(saup_ohi) %>% #N=287
group_by(saup_id) %>%
summarize(Score = mean(Score)) #N=278, average score for cases where several ohi regions are represented by one saup region
head(eez_r)
eez_r[duplicated(eez_r$saup_id),] #should be no duplicates
res_eez <- meanCatch %>%
filter(saup_id > 0) %>%
left_join(eez_r) %>%
mutate(proparea = 1, rfmo = 0) %>%
select(fao_id, saup_id, rfmo, proparea, stock_saup_id, Score)
head(res_eez); summary(res_eez)
#N=15763
################################################################
## High seas: determining resilience scores (saup_id=0)
################################################################
#########################
## dealing with species:
# get the list of stocks protected by each rfmo: rfmo_id, TaxonName
rfmo_sp <- read.csv('../ohiprep/HighSeas/tmp/rfmo_species.csv', stringsAsFactors = FALSE, check.names = FALSE) %>%
select(TaxonName, rfmo) %>%
unique()
# rename some of the taxa that have different names for the same species in the saup data
rfmo_sp$TaxonName[rfmo_sp$TaxonName =="Longfin mako"] = "Isurus paucus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Shortfin mako" ] = "Isurus oxyrinchus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Anguilla anguilla anguilla"] = "Anguilla anguilla"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Loligo vulgaris vulgaris"] = "Loligo vulgaris"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Mullus barbatus"] = "Mullus barbatus barbatus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Lepidonotothen kempi"] = "Lepidonotothen squamifrons"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Caranx ruber"] = "Carangoides ruber"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Alectis alexandrina"] = "Alectis alexandrinus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Kajikia audax"] = "Tetrapturus audax"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Istiompax indica"] = "Makaira indica"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Clupea Harengus"] = "Clupea harengus"
rfmo_sp$TaxonName[rfmo_sp$TaxonName == "Raja alba"] = "Rostroraja alba"
# get taxon key based on TaxonName
tax <- read.csv(file.path(dir_neptune_data, 'git-annex/globalprep/SAUP_FIS_data/v2015/raw/ohi_taxon.csv')) %>%
dplyr::select(Taxonkey=taxonkey, TaxonName=scientific.name); head(tax)
sort(setdiff(tax$TaxonName, rfmo_sp$TaxonName)) #lots of names on the taxon list that aren't in the RFMO protected species, not surprising
setdiff(rfmo_sp$TaxonName, tax$TaxonName) # some names that aren't in species list as catch
rfmo_sp <- rfmo_sp %>%
left_join(tax) %>%
filter(!(is.na(Taxonkey)))
head(rfmo_sp)
#############################################
## Scores for rfmos for protectiveness:
### get the rfmo score and join: rfmo_id, TaxonName, rfmo score
rfmo_sc <- read.csv('HighSeas/HS_Resilience_v2014/RFMO/tmp/RFMOscores.csv', stringsAsFactors = F) ; head(rfmo_sc)
# remove capital letters from rfmo names and rows with NAs
rfmo_sc <- rfmo_sc %>%
mutate (rfmo = tolower(rfmo_sc$RFMO)) %>%
select (rfmo, Score) %>%
filter (!is.na(Score))
# Coverage of each rfmo in each fao_rgn
# need table to translate fao_rgn_id to fao_rgn
fao_id_rgn <- read.csv('../ohiprep/HighSeas/HS_other_v2014/FAOregions.csv', stringsAsFactors = F) %>%
select(rgn_id_2013, fao_id); head(fao_id_rgn)
# get the prop rfmo area in fao_id and join by region ID
rfmo_fao <- read.csv('HighSeas/HS_Resilience_v2014/RFMO/tmp/RFMOperFAO.csv', stringsAsFactors = F) ; head(rfmo_fao)
rfmo_fao <- rfmo_fao %>%
left_join(fao_id_rgn, by=c("rgn_id" = "rgn_id_2013")) ; rfmo_fao # Joining by: "rgn_id"
rfmo_fao <- gather(rfmo_fao, key = rfmo, value = proparea, -rgn_id, -rgn_name, -total, -fao_id)
rfmo_fao$rfmo <- as.character(rfmo_fao$rfmo)
head(rfmo_fao)
# add Antarctica:
Ant_rgn <- data.frame( cbind ('rgn_id' = as.numeric(c(268, 271, 278)),
'rgn_name' = c('Antarctic', 'Antarctic', 'Antarctic'),
'total' = as.numeric(c(1, 1, 1)),
'fao_id' = as.numeric(c(48, 58, 88)),
'rfmo' = c('ccamlr', 'ccamlr', 'ccamlr'),
'proparea' = as.numeric(c(1, 1, 1)) ), stringsAsFactors = F)
Ant_rgn$proparea <- as.numeric(as.character(Ant_rgn$proparea))
Ant_rgn$total <- as.numeric(as.character(Ant_rgn$total))
Ant_rgn$fao_id <- as.numeric(as.character(Ant_rgn$fao_id))
rfmo_fao <- rbind(Ant_rgn, rfmo_fao)
head(rfmo_fao)
res_hs <- rfmo_fao %>%
# mutate(relarea = proparea/total) %>% # rel area will be NaN if the RFMO doesn't overlap the FAO region, MRF: don't think this is needed
left_join (rfmo_sc) %>% # add in rfmo resilience scores, joining by: "rfmo"
left_join (rfmo_sp)
head(res_hs)
res_hs <- res_hs %>%
mutate (saup_id = 0,
stock_saup_id = paste(Taxonkey, fao_id, saup_id, sep = '_')) %>% # make the stock_id and add the saup_id
select(fao_id, saup_id, rfmo, proparea, stock_saup_id, Score)
head(res_hs)
###############################
## Join scores with catch data
# rbind high seas with eez resilience scores
r_c <- rbind(res_hs, res_eez) # unique(r_c$saup_id[is.na(r_c$Score)]) # check
FinalRes <- meanCatch %>%
left_join(r_c) %>%
# dups <- FinalRes$stock_saup_id[duplicated(FinalRes$stock_saup_id)]
# tmp2 <- FinalRes[FinalRes$stock_saup_id %in% dups, ]
# tmp2[tmp2$stock_saup_id=="600094_47_0", ]
mutate(part_score = rel_ct * proparea * Score) %>% # NAs are stocks not protected by any rfmo, these get a 0 score.
group_by(stock_saup_id, stock_id, TaxonKey, fao_id, saup_id) %>%
summarize(part_score = max(part_score, na.rm=TRUE)) %>% # when a stock is protected by >1 rfmo within an FAO, select the highest score
ungroup()
#NAs result for high seas taxa that are not protected by any RFMO
FinalRes <- FinalRes %>%
group_by(stock_id) %>%
summarize(final_score=sum(part_score, na.rm=TRUE)) %>%
mutate (unif_prior = ifelse( final_score > 0.6, 1, 0))
head(FinalRes)
summary(FinalRes)
hist(FinalRes$final_score)
write.csv(FinalRes, 'globalprep/SAUP_FIS/v2015/tmp/stock_resil_06cutoff_2015.csv', row.names=FALSE)
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test UserInfoOut")
model.instance <- UserInfoOut$new()
test_that("uid", {
# tests for the property `uid` (character)
# uncomment below to test the property
#expect_equal(model.instance$`uid`, "EXPECTED_RESULT")
})
test_that("email", {
# tests for the property `email` (character)
# uncomment below to test the property
#expect_equal(model.instance$`email`, "EXPECTED_RESULT")
})
test_that("phone_number", {
# tests for the property `phone_number` (character)
# uncomment below to test the property
#expect_equal(model.instance$`phone_number`, "EXPECTED_RESULT")
})
test_that("email_verified", {
# tests for the property `email_verified` (character)
# uncomment below to test the property
#expect_equal(model.instance$`email_verified`, "EXPECTED_RESULT")
})
test_that("display_name", {
# tests for the property `display_name` (character)
# uncomment below to test the property
#expect_equal(model.instance$`display_name`, "EXPECTED_RESULT")
})
test_that("photo_url", {
# tests for the property `photo_url` (character)
# uncomment below to test the property
#expect_equal(model.instance$`photo_url`, "EXPECTED_RESULT")
})
test_that("disabled", {
# tests for the property `disabled` (character)
# uncomment below to test the property
#expect_equal(model.instance$`disabled`, "EXPECTED_RESULT")
})
test_that("first_known_ip_address", {
# tests for the property `first_known_ip_address` (character)
# uncomment below to test the property
#expect_equal(model.instance$`first_known_ip_address`, "EXPECTED_RESULT")
})
test_that("provider_id", {
# tests for the property `provider_id` (character)
# uncomment below to test the property
#expect_equal(model.instance$`provider_id`, "EXPECTED_RESULT")
})
test_that("time_stamp", {
# tests for the property `time_stamp` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`time_stamp`, "EXPECTED_RESULT")
})
test_that("verify_token", {
# tests for the property `verify_token` (character)
# uncomment below to test the property
#expect_equal(model.instance$`verify_token`, "EXPECTED_RESULT")
})
test_that("api_key", {
# tests for the property `api_key` (character)
# uncomment below to test the property
#expect_equal(model.instance$`api_key`, "EXPECTED_RESULT")
})
test_that("stripe_perishable_key", {
# tests for the property `stripe_perishable_key` (character)
# uncomment below to test the property
#expect_equal(model.instance$`stripe_perishable_key`, "EXPECTED_RESULT")
})
test_that("stripe_customer_id", {
# tests for the property `stripe_customer_id` (character)
# uncomment below to test the property
#expect_equal(model.instance$`stripe_customer_id`, "EXPECTED_RESULT")
})
test_that("other_infos", {
# tests for the property `other_infos` (UserInfoOut)
# uncomment below to test the property
#expect_equal(model.instance$`other_infos`, "EXPECTED_RESULT")
})
|
/tests/testthat/test_user_info_out.R
|
no_license
|
namsor/namsor-r-sdk2
|
R
| false
| false
| 3,096
|
r
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test UserInfoOut")
model.instance <- UserInfoOut$new()
test_that("uid", {
# tests for the property `uid` (character)
# uncomment below to test the property
#expect_equal(model.instance$`uid`, "EXPECTED_RESULT")
})
test_that("email", {
# tests for the property `email` (character)
# uncomment below to test the property
#expect_equal(model.instance$`email`, "EXPECTED_RESULT")
})
test_that("phone_number", {
# tests for the property `phone_number` (character)
# uncomment below to test the property
#expect_equal(model.instance$`phone_number`, "EXPECTED_RESULT")
})
test_that("email_verified", {
# tests for the property `email_verified` (character)
# uncomment below to test the property
#expect_equal(model.instance$`email_verified`, "EXPECTED_RESULT")
})
test_that("display_name", {
# tests for the property `display_name` (character)
# uncomment below to test the property
#expect_equal(model.instance$`display_name`, "EXPECTED_RESULT")
})
test_that("photo_url", {
# tests for the property `photo_url` (character)
# uncomment below to test the property
#expect_equal(model.instance$`photo_url`, "EXPECTED_RESULT")
})
test_that("disabled", {
# tests for the property `disabled` (character)
# uncomment below to test the property
#expect_equal(model.instance$`disabled`, "EXPECTED_RESULT")
})
test_that("first_known_ip_address", {
# tests for the property `first_known_ip_address` (character)
# uncomment below to test the property
#expect_equal(model.instance$`first_known_ip_address`, "EXPECTED_RESULT")
})
test_that("provider_id", {
# tests for the property `provider_id` (character)
# uncomment below to test the property
#expect_equal(model.instance$`provider_id`, "EXPECTED_RESULT")
})
test_that("time_stamp", {
# tests for the property `time_stamp` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`time_stamp`, "EXPECTED_RESULT")
})
test_that("verify_token", {
# tests for the property `verify_token` (character)
# uncomment below to test the property
#expect_equal(model.instance$`verify_token`, "EXPECTED_RESULT")
})
test_that("api_key", {
# tests for the property `api_key` (character)
# uncomment below to test the property
#expect_equal(model.instance$`api_key`, "EXPECTED_RESULT")
})
test_that("stripe_perishable_key", {
# tests for the property `stripe_perishable_key` (character)
# uncomment below to test the property
#expect_equal(model.instance$`stripe_perishable_key`, "EXPECTED_RESULT")
})
test_that("stripe_customer_id", {
# tests for the property `stripe_customer_id` (character)
# uncomment below to test the property
#expect_equal(model.instance$`stripe_customer_id`, "EXPECTED_RESULT")
})
test_that("other_infos", {
# tests for the property `other_infos` (UserInfoOut)
# uncomment below to test the property
#expect_equal(model.instance$`other_infos`, "EXPECTED_RESULT")
})
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12389708290749e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613114544-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 251
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12389708290749e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Database.R
\name{get_label}
\alias{get_label}
\title{Get label information}
\usage{
get_label(
label_id,
token = NA
)
}
\arguments{
\item{label_id}{Integer value representing a valid release ID}
\item{token}{(optional) Token object obtained from authorize() or a string containing your personal access token}
}
\description{
Get label information
}
\examples{
# Retrieve label information
label <- get_label(1)
}
|
/man/get_label.Rd
|
no_license
|
Pascallio/discogsAPI
|
R
| false
| true
| 494
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Database.R
\name{get_label}
\alias{get_label}
\title{Get label information}
\usage{
get_label(
label_id,
token = NA
)
}
\arguments{
\item{label_id}{Integer value representing a valid release ID}
\item{token}{(optional) Token object obtained from authorize() or a string containing your personal access token}
}
\description{
Get label information
}
\examples{
# Retrieve label information
label <- get_label(1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetBranchPolygon.R
\name{do_some_cal}
\alias{do_some_cal}
\title{Simple function to test}
\usage{
do_some_cal(x)
}
\arguments{
\item{x}{some number}
}
\value{
some number
}
\description{
Simple function to test
}
\examples{
do_some_cal(5)
}
|
/man/do_some_cal.Rd
|
no_license
|
onehungano1/maptools
|
R
| false
| true
| 319
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetBranchPolygon.R
\name{do_some_cal}
\alias{do_some_cal}
\title{Simple function to test}
\usage{
do_some_cal(x)
}
\arguments{
\item{x}{some number}
}
\value{
some number
}
\description{
Simple function to test
}
\examples{
do_some_cal(5)
}
|
## Pair of functions that like the examples given calculate the
## inverse of a matrix, or gets the value from the cache
## Makes a special "vector", which is really a list containing
##a function to
##set the value of the matrix
##get the value of the matrix
##set the value of the inverse
##get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Gets the inverse from the cache or caluates it if the value
## isn't stored
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
EthanAskew/ProgrammingAssignment2
|
R
| false
| false
| 1,024
|
r
|
## Pair of functions that like the examples given calculate the
## inverse of a matrix, or gets the value from the cache
## Makes a special "vector", which is really a list containing
##a function to
##set the value of the matrix
##get the value of the matrix
##set the value of the inverse
##get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Gets the inverse from the cache or caluates it if the value
## isn't stored
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops-function.R
\name{register_native_user_function}
\alias{register_native_user_function}
\title{Register Native UserFunction}
\usage{
register_native_user_function(op_id, module_name, factory_method_name)
}
\arguments{
\item{factory_method_name}{}
}
\description{
Registers a native user-defined Function that can be subsequently
instantiated using the ‘native_user_function’ method.
}
|
/man/register_native_user_function.Rd
|
permissive
|
Bhaskers-Blu-Org2/CNTK-R
|
R
| false
| true
| 469
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops-function.R
\name{register_native_user_function}
\alias{register_native_user_function}
\title{Register Native UserFunction}
\usage{
register_native_user_function(op_id, module_name, factory_method_name)
}
\arguments{
\item{factory_method_name}{}
}
\description{
Registers a native user-defined Function that can be subsequently
instantiated using the ‘native_user_function’ method.
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gram_mat.R
\name{gram_mat}
\alias{gram_mat}
\title{Gram Matrix}
\usage{
gram_mat(mydat, sigma)
}
\arguments{
\item{mydat}{is the training data.}
\item{sigma}{is the scale parameter for use in the gaussian kernel.}
}
\description{
This function calculates the NxN gram matrix -- Kernel(X, X)
}
\examples{
trainingData <- dataTrain(10, 5)
gram_mat(trainingData, 1.5)
}
|
/man/gram_mat.Rd
|
no_license
|
waitalone/svdd
|
R
| false
| false
| 455
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gram_mat.R
\name{gram_mat}
\alias{gram_mat}
\title{Gram Matrix}
\usage{
gram_mat(mydat, sigma)
}
\arguments{
\item{mydat}{is the training data.}
\item{sigma}{is the scale parameter for use in the gaussian kernel.}
}
\description{
This function calculates the NxN gram matrix -- Kernel(X, X)
}
\examples{
trainingData <- dataTrain(10, 5)
gram_mat(trainingData, 1.5)
}
|
#Loading required libraries
library(dplyr)
library(reshape2)
#Metadata
activity_labels <- read.table("./activity_labels.txt", quote="\"")
names(activity_labels) <- c("activity_id","activity_label")
features <- read.table("./features.txt", quote="\"")
#Getting the test data
subject_test <- read.table("./test/subject_test.txt", quote="\"")
X_test <- read.table("./test/X_test.txt", quote="\"")
y_test <- read.table("./test/y_test.txt", quote="\"")
#Combining X and y test data
syX_test <- cbind(subject_test, y_test, X_test)
#Getting the train data
subject_train <- read.table("./train/subject_train.txt", quote="\"")
X_train <- read.table("./train/X_train.txt", quote="\"")
y_train <- read.table("./train/y_train.txt", quote="\"")
#Combining X and y train data
syX_train <- cbind(subject_train,y_train,X_train)
#Fist Required Step
#Merging test and train data
CombinedTestTrain <- rbind(syX_test,syX_train)
#Fourth step performed before the second and third
#Assigning variable names
names(CombinedTestTrain) <- c("subject", "activity_id", as.character(features$V2))
#Cleaning duplicate columns
duplicateCols <- duplicated(names(CombinedTestTrain))
CleanData <- CombinedTestTrain[,!duplicateCols]
#Second Step:Extracts only the measurements on the mean and standard deviation for each measurement.
#Retrieving only cols with mean or std in their labels along with the later needed subject and activity
CleanDataMeanStd <- select(CleanData, matches("(subject|activity|std|mean)", ignore.case=TRUE))
#Third Step: Uses descriptive activity names to name the activities in the data set
CleanDataMeanStd_withActivityLabels <- inner_join(activity_labels,CleanDataMeanStd, by="activity_id")
CleanDataMeanStd_withActivityLabels <- select(CleanDataMeanStd_withActivityLabels, -contains("activity_id"))
#Fifth Step:From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
meltData <- melt(CleanDataMeanStd_withActivityLabels, id=c("subject","activity_label"))
castedData <- dcast(meltData, subject + activity_label ~ variable, mean)
write.table(castedData, "tidy.csv", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
nikan/GCDProject
|
R
| false
| false
| 2,173
|
r
|
#Loading required libraries
library(dplyr)
library(reshape2)
#Metadata
activity_labels <- read.table("./activity_labels.txt", quote="\"")
names(activity_labels) <- c("activity_id","activity_label")
features <- read.table("./features.txt", quote="\"")
#Getting the test data
subject_test <- read.table("./test/subject_test.txt", quote="\"")
X_test <- read.table("./test/X_test.txt", quote="\"")
y_test <- read.table("./test/y_test.txt", quote="\"")
#Combining X and y test data
syX_test <- cbind(subject_test, y_test, X_test)
#Getting the train data
subject_train <- read.table("./train/subject_train.txt", quote="\"")
X_train <- read.table("./train/X_train.txt", quote="\"")
y_train <- read.table("./train/y_train.txt", quote="\"")
#Combining X and y train data
syX_train <- cbind(subject_train,y_train,X_train)
#Fist Required Step
#Merging test and train data
CombinedTestTrain <- rbind(syX_test,syX_train)
#Fourth step performed before the second and third
#Assigning variable names
names(CombinedTestTrain) <- c("subject", "activity_id", as.character(features$V2))
#Cleaning duplicate columns
duplicateCols <- duplicated(names(CombinedTestTrain))
CleanData <- CombinedTestTrain[,!duplicateCols]
#Second Step:Extracts only the measurements on the mean and standard deviation for each measurement.
#Retrieving only cols with mean or std in their labels along with the later needed subject and activity
CleanDataMeanStd <- select(CleanData, matches("(subject|activity|std|mean)", ignore.case=TRUE))
#Third Step: Uses descriptive activity names to name the activities in the data set
CleanDataMeanStd_withActivityLabels <- inner_join(activity_labels,CleanDataMeanStd, by="activity_id")
CleanDataMeanStd_withActivityLabels <- select(CleanDataMeanStd_withActivityLabels, -contains("activity_id"))
#Fifth Step:From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
meltData <- melt(CleanDataMeanStd_withActivityLabels, id=c("subject","activity_label"))
castedData <- dcast(meltData, subject + activity_label ~ variable, mean)
write.table(castedData, "tidy.csv", row.name=FALSE)
|
helper <- '
var levels = function(a, lvl){ return _.uniq(_.pluck(a, lvl)) }
var months = levels(data, "age");
var newData = map(function(month){
//Select the data that correspond to each month
var itemInfo = {age: month}
var itemData = _.where(data, itemInfo)
//prepare data for a dictionary with the utilies (freq, length, Sem_PAC, Phono_PAC)
//frequency
//var value_freq = map(function(single){
//return _.object([single.item], [single.freq])
//}, itemData)
//Length
//var value_len = map(function(single){
//return _.object([single.item], [single.length])
//}, itemData)
//Sem PAC
var value_PAC_assoc = map(function(single){
return _.object([single.item], [single.PAC_assoc])
}, itemData)
//Sem PAT
var value_PAT_assoc = map(function(single){
return _.object([single.item], [single.PAT_assoc])
}, itemData)
//Phono PAC t2
var value_PAC_phono = map(function(single){
return _.object([single.item], [single.PAC_phono_t2])
}, itemData)
//Phono PAT t2
var value_PAT_phono = map(function(single){
return _.object([single.item], [single.PAT_phono_t2])
}, itemData)
//Also prepare data for a dictionary with "definitions" as keys and boolean "learned" as values
var learned_pair = map(function(single){
return _.object([single.item], [single.learned])
}, itemData)
//now get the items
var word = map(function(w){_.keys(w)[0]}, value_PAC_assoc)
//get the utilities scores
//var util_freq = map(function(w){w[_.keys(w)[0]]}, value_freq)
//var util_len = map(function(w){w[_.keys(w)[0]]}, value_len)
var util_PAC_assoc = map(function(w){w[_.keys(w)[0]]}, value_PAC_assoc)
var util_PAC_phono = map(function(w){w[_.keys(w)[0]]}, value_PAC_phono)
var util_PAT_assoc = map(function(w){w[_.keys(w)[0]]}, value_PAT_assoc)
var util_PAT_phono = map(function(w){w[_.keys(w)[0]]}, value_PAT_phono)
//get "if learned" boolean score
var learned_score = map(function(w){w[_.keys(w)[0]]}, learned_pair)
//output the utility dictionary
//var frequency= _.object(word, util_freq)
//var length= _.object(word, util_len)
var PAC_assoc= _.object(word, util_PAC_assoc)
var PAC_phono= _.object(word, util_PAC_phono)
var PAT_assoc= _.object(word, util_PAT_assoc)
var PAT_phono= _.object(word, util_PAT_phono)
//output the "if learned" dictionary
var isLearnt = _.object(word, learned_score)
//Combine everything
//var month_sub_dict = _.object(["frequency","length","PAC_assoc","PAC_phono", "PAT_assoc", "PAT_phono", "isLearnt"],
//[frequency, length, PAC_assoc, PAC_phono, PAT_assoc, PAT_phono, isLearnt])
var month_sub_dict = _.object(["PAC_assoc","PAC_phono", "PAT_assoc", "PAT_phono", "isLearnt"],
[PAC_assoc, PAC_phono, PAT_assoc, PAT_phono, isLearnt])
var month_dict = _.object([month], [month_sub_dict])
return month_dict
}, months)
var month_keys = map(function(w){_.keys(w)[0]}, newData )
var month_values = map(function(w){w[_.keys(w)[0]]}, newData)
var learning = _.object(month_keys, month_values)
'
|
/models/helpers.R
|
no_license
|
afourtassi/networks
|
R
| false
| false
| 2,928
|
r
|
helper <- '
var levels = function(a, lvl){ return _.uniq(_.pluck(a, lvl)) }
var months = levels(data, "age");
var newData = map(function(month){
//Select the data that correspond to each month
var itemInfo = {age: month}
var itemData = _.where(data, itemInfo)
//prepare data for a dictionary with the utilies (freq, length, Sem_PAC, Phono_PAC)
//frequency
//var value_freq = map(function(single){
//return _.object([single.item], [single.freq])
//}, itemData)
//Length
//var value_len = map(function(single){
//return _.object([single.item], [single.length])
//}, itemData)
//Sem PAC
var value_PAC_assoc = map(function(single){
return _.object([single.item], [single.PAC_assoc])
}, itemData)
//Sem PAT
var value_PAT_assoc = map(function(single){
return _.object([single.item], [single.PAT_assoc])
}, itemData)
//Phono PAC t2
var value_PAC_phono = map(function(single){
return _.object([single.item], [single.PAC_phono_t2])
}, itemData)
//Phono PAT t2
var value_PAT_phono = map(function(single){
return _.object([single.item], [single.PAT_phono_t2])
}, itemData)
//Also prepare data for a dictionary with "definitions" as keys and boolean "learned" as values
var learned_pair = map(function(single){
return _.object([single.item], [single.learned])
}, itemData)
//now get the items
var word = map(function(w){_.keys(w)[0]}, value_PAC_assoc)
//get the utilities scores
//var util_freq = map(function(w){w[_.keys(w)[0]]}, value_freq)
//var util_len = map(function(w){w[_.keys(w)[0]]}, value_len)
var util_PAC_assoc = map(function(w){w[_.keys(w)[0]]}, value_PAC_assoc)
var util_PAC_phono = map(function(w){w[_.keys(w)[0]]}, value_PAC_phono)
var util_PAT_assoc = map(function(w){w[_.keys(w)[0]]}, value_PAT_assoc)
var util_PAT_phono = map(function(w){w[_.keys(w)[0]]}, value_PAT_phono)
//get "if learned" boolean score
var learned_score = map(function(w){w[_.keys(w)[0]]}, learned_pair)
//output the utility dictionary
//var frequency= _.object(word, util_freq)
//var length= _.object(word, util_len)
var PAC_assoc= _.object(word, util_PAC_assoc)
var PAC_phono= _.object(word, util_PAC_phono)
var PAT_assoc= _.object(word, util_PAT_assoc)
var PAT_phono= _.object(word, util_PAT_phono)
//output the "if learned" dictionary
var isLearnt = _.object(word, learned_score)
//Combine everything
//var month_sub_dict = _.object(["frequency","length","PAC_assoc","PAC_phono", "PAT_assoc", "PAT_phono", "isLearnt"],
//[frequency, length, PAC_assoc, PAC_phono, PAT_assoc, PAT_phono, isLearnt])
var month_sub_dict = _.object(["PAC_assoc","PAC_phono", "PAT_assoc", "PAT_phono", "isLearnt"],
[PAC_assoc, PAC_phono, PAT_assoc, PAT_phono, isLearnt])
var month_dict = _.object([month], [month_sub_dict])
return month_dict
}, months)
var month_keys = map(function(w){_.keys(w)[0]}, newData )
var month_values = map(function(w){w[_.keys(w)[0]]}, newData)
var learning = _.object(month_keys, month_values)
'
|
testlist <- list(x = c(16777217L, -452984832L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962065-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 417
|
r
|
testlist <- list(x = c(16777217L, -452984832L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
setwd("C:/COURSERA/Exploratory_data_analysis/Project1/ExData_Plotting1")
library(dplyr)
library(data.table)
data<-read.table("household_power_consumption.txt", header=TRUE,sep=";",na.strings="?")
#Remove NAs
check_na<-complete.cases(data)
data<-data[check_na,]
# Check class of each variable
sapply(data,class)
#Create one date variable using
data$Date<-paste(data$Date,data$Time) #concatenate Date and Time objects into a character variable
data$Date<-strptime(data$Date,"%d/%m/%Y %H:%M:%S")
data$Time <- NULL #remove Time variable since this is now stored in Date variable
#Filter selected dates and put them in frame called test 1
test1<-data
test1$Date<-as.Date(test1$Date) #set Date variable from POSIXlt back to date...helped me to filter
date1 <- grepl('2007-02-01', test$Date)
date2<-grepl("2007-02-02",test$Date)
mydates<-date1|date2 #create logical vector of dates I want
test1<-test1[mydates,]
#Plot 2
png("plot2.png", width=480, height=480)
plot(test1$Date,test1$Global_active_power, type="l", ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
NickMaki/ExData_Plotting1
|
R
| false
| false
| 1,065
|
r
|
setwd("C:/COURSERA/Exploratory_data_analysis/Project1/ExData_Plotting1")
library(dplyr)
library(data.table)
data<-read.table("household_power_consumption.txt", header=TRUE,sep=";",na.strings="?")
#Remove NAs
check_na<-complete.cases(data)
data<-data[check_na,]
# Check class of each variable
sapply(data,class)
#Create one date variable using
data$Date<-paste(data$Date,data$Time) #concatenate Date and Time objects into a character variable
data$Date<-strptime(data$Date,"%d/%m/%Y %H:%M:%S")
data$Time <- NULL #remove Time variable since this is now stored in Date variable
#Filter selected dates and put them in frame called test 1
test1<-data
test1$Date<-as.Date(test1$Date) #set Date variable from POSIXlt back to date...helped me to filter
date1 <- grepl('2007-02-01', test$Date)
date2<-grepl("2007-02-02",test$Date)
mydates<-date1|date2 #create logical vector of dates I want
test1<-test1[mydates,]
#Plot 2
png("plot2.png", width=480, height=480)
plot(test1$Date,test1$Global_active_power, type="l", ylab="Global Active Power (kilowatts)")
dev.off()
|
\name{ghkvec}
\alias{ghkvec}
\concept{multivariate normal distribution}
\concept{GHK method}
\concept{integral}
\title{Compute GHK approximation to Multivariate Normal Integrals}
\description{
\code{ghkvec} computes the GHK approximation to the integral of a multivariate normal density over a half plane defined by a set of truncation points.
}
\usage{ghkvec(L, trunpt, above, r, HALTON=TRUE, pn)}
\arguments{
\item{L }{ lower triangular Cholesky root of covariance matrix }
\item{trunpt}{ vector of truncation points}
\item{above }{ vector of indicators for truncation above(1) or below(0) }
\item{r }{ number of draws to use in GHK }
\item{HALTON}{ if \code{TRUE}, uses Halton sequence. If \code{FALSE}, uses \code{R::runif} random number generator (def: \code{TRUE})}
\item{pn }{ prime number used for Halton sequence (def: the smallest prime numbers, i.e. 2, 3, 5, ...)}
}
\value{Approximation to integral}
\note{
\code{ghkvec} can accept a vector of truncations and compute more than one integral. That is, \code{length(trunpt)/length(above)} number of different integrals, each with the same variance and mean 0 but different truncation points. See 'examples' below for an example with two integrals at different truncation points.
The user can choose between two random number generators for the numerical integration: psuedo-random numbers by \code{R::runif} or quasi-random numbers by a Halton sequence. Generally, the quasi-random (Halton) sequence is more uniformly distributed within domain, so it shows lower error and improved convergence than the psuedo-random (\code{runif}) sequence (Morokoff and Caflisch, 1995).
For the prime numbers generating Halton sequence, we suggest to use the first smallest prime numbers. Halton (1960) and Kocis and Whiten (1997) prove that their discrepancy measures (how uniformly the sample points are distributed) have the upper bounds, which decrease as the generating prime number decreases.
Note: For a high dimensional integration (10 or more dimension), we suggest use of the psuedo-random number generator (\code{R::runif}) because, according to Kocis and Whiten (1997), Halton sequences may be highly correlated when the dimension is 10 or more.
}
\author{
Peter Rossi, Anderson School, UCLA, \email{perossichi@gmail.com}.\cr
Keunwoo Kim, Anderson School, UCLA, \email{keunwoo.kim@gmail.com}.
}
\references{
For further discussion, see \emph{Bayesian Statistics and Marketing} by Rossi, Allenby, and McCulloch, Chapter 2. \cr \url{http://www.perossi.org/home/bsm-1}
For Halton sequence, see Halton (1960, Numerische Mathematik), Morokoff and Caflisch (1995, Journal of Computational Physics), and Kocis and Whiten (1997, ACM Transactions on Mathematical Software).
}
\examples{
Sigma = matrix(c(1, 0.5, 0.5, 1), ncol=2)
L = t(chol(Sigma))
trunpt = c(0,0,1,1)
above = c(1,1)
# drawn by Halton sequence
ghkvec(L, trunpt, above, r=100)
# use prime number 11 and 13
ghkvec(L, trunpt, above, r=100, HALTON=TRUE, pn=c(11,13))
# drawn by R::runif
ghkvec(L, trunpt, above, r=100, HALTON=FALSE)
}
\keyword{distribution}
|
/man/ghkvec.Rd
|
no_license
|
nicoaramayo/bayesm
|
R
| false
| false
| 3,123
|
rd
|
\name{ghkvec}
\alias{ghkvec}
\concept{multivariate normal distribution}
\concept{GHK method}
\concept{integral}
\title{Compute GHK approximation to Multivariate Normal Integrals}
\description{
\code{ghkvec} computes the GHK approximation to the integral of a multivariate normal density over a half plane defined by a set of truncation points.
}
\usage{ghkvec(L, trunpt, above, r, HALTON=TRUE, pn)}
\arguments{
\item{L }{ lower triangular Cholesky root of covariance matrix }
\item{trunpt}{ vector of truncation points}
\item{above }{ vector of indicators for truncation above(1) or below(0) }
\item{r }{ number of draws to use in GHK }
\item{HALTON}{ if \code{TRUE}, uses Halton sequence. If \code{FALSE}, uses \code{R::runif} random number generator (def: \code{TRUE})}
\item{pn }{ prime number used for Halton sequence (def: the smallest prime numbers, i.e. 2, 3, 5, ...)}
}
\value{Approximation to integral}
\note{
\code{ghkvec} can accept a vector of truncations and compute more than one integral. That is, \code{length(trunpt)/length(above)} number of different integrals, each with the same variance and mean 0 but different truncation points. See 'examples' below for an example with two integrals at different truncation points.
The user can choose between two random number generators for the numerical integration: psuedo-random numbers by \code{R::runif} or quasi-random numbers by a Halton sequence. Generally, the quasi-random (Halton) sequence is more uniformly distributed within domain, so it shows lower error and improved convergence than the psuedo-random (\code{runif}) sequence (Morokoff and Caflisch, 1995).
For the prime numbers generating Halton sequence, we suggest to use the first smallest prime numbers. Halton (1960) and Kocis and Whiten (1997) prove that their discrepancy measures (how uniformly the sample points are distributed) have the upper bounds, which decrease as the generating prime number decreases.
Note: For a high dimensional integration (10 or more dimension), we suggest use of the psuedo-random number generator (\code{R::runif}) because, according to Kocis and Whiten (1997), Halton sequences may be highly correlated when the dimension is 10 or more.
}
\author{
Peter Rossi, Anderson School, UCLA, \email{perossichi@gmail.com}.\cr
Keunwoo Kim, Anderson School, UCLA, \email{keunwoo.kim@gmail.com}.
}
\references{
For further discussion, see \emph{Bayesian Statistics and Marketing} by Rossi, Allenby, and McCulloch, Chapter 2. \cr \url{http://www.perossi.org/home/bsm-1}
For Halton sequence, see Halton (1960, Numerische Mathematik), Morokoff and Caflisch (1995, Journal of Computational Physics), and Kocis and Whiten (1997, ACM Transactions on Mathematical Software).
}
\examples{
Sigma = matrix(c(1, 0.5, 0.5, 1), ncol=2)
L = t(chol(Sigma))
trunpt = c(0,0,1,1)
above = c(1,1)
# drawn by Halton sequence
ghkvec(L, trunpt, above, r=100)
# use prime number 11 and 13
ghkvec(L, trunpt, above, r=100, HALTON=TRUE, pn=c(11,13))
# drawn by R::runif
ghkvec(L, trunpt, above, r=100, HALTON=FALSE)
}
\keyword{distribution}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_tlo_contacts}
\alias{get_tlo_contacts}
\title{Get TFE and OPS Contacts}
\usage{
get_tlo_contacts(conn, org_ids)
}
\arguments{
\item{conn}{connection to the data warehouse}
\item{org_ids}{list of organisation ids}
}
\value{
tibble
}
\description{
One needs to provide both organisation ids and names
because the main accounts table contains organisation ids but only
tlo names.
}
\details{
Get list of of top level and organisational contacts for a list
of organisations.
}
\examples{
\dontrun{
conn <- connect_dw()
dt <- get_all_ncs(conn)
get_tlo_contacts(conn, dt$org_id)
}
}
|
/man/get_tlo_contacts.Rd
|
permissive
|
koad7/anahita
|
R
| false
| true
| 675
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_tlo_contacts}
\alias{get_tlo_contacts}
\title{Get TFE and OPS Contacts}
\usage{
get_tlo_contacts(conn, org_ids)
}
\arguments{
\item{conn}{connection to the data warehouse}
\item{org_ids}{list of organisation ids}
}
\value{
tibble
}
\description{
One needs to provide both organisation ids and names
because the main accounts table contains organisation ids but only
tlo names.
}
\details{
Get list of of top level and organisational contacts for a list
of organisations.
}
\examples{
\dontrun{
conn <- connect_dw()
dt <- get_all_ncs(conn)
get_tlo_contacts(conn, dt$org_id)
}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{drexplorer-package}
\alias{drexplorer}
\alias{drexplorer-package}
\title{Dose-response Explorer}
\description{
There is a great variety of models developed for dose-response
data, many of which have been implemented in the drc and DoseFinding
packages. drexplorer combines both packages to aid the user to visually
examine and compare how existing models perform on the data. Another
important feature for drexplorer is to allow the user to identify outlier
measurements and visually examine how these outliers affect the fitted
model.
}
\author{
Pan Tong, Kevin R Coombes
}
\seealso{
\code{\link{NewmanTest}, \link{drOutlier}, \link{drModels}, \link{drFit}}
}
|
/man/drexplorer-package.Rd
|
no_license
|
hjanime/drexplorer
|
R
| false
| false
| 757
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{drexplorer-package}
\alias{drexplorer}
\alias{drexplorer-package}
\title{Dose-response Explorer}
\description{
There is a great variety of models developed for dose-response
data, many of which have been implemented in the drc and DoseFinding
packages. drexplorer combines both packages to aid the user to visually
examine and compare how existing models perform on the data. Another
important feature for drexplorer is to allow the user to identify outlier
measurements and visually examine how these outliers affect the fitted
model.
}
\author{
Pan Tong, Kevin R Coombes
}
\seealso{
\code{\link{NewmanTest}, \link{drOutlier}, \link{drModels}, \link{drFit}}
}
|
## We will first import the data, clean it up some, and then subset it down to what we want to analyze
library(dplyr)
electricdata <- read.table("household_power_consumption.txt", sep = ";")
a1 <- as.vector(t(a)[,1])
names(electricdata) <- a1
electricdata <- electricdata[-c(1),]
electricdata_subset <- subset(electricdata, Date == "2/2/2007" | Date == "1/2/2007")
electricdata$Global_active_power <- as.numeric(as.character(C$Global_active_power))
## Now we make the graph
hist(electricdata$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power")
dev.copy(png, file = "plot1.png")
dev.off()
|
/plot1.R
|
no_license
|
totokamari/Exploratory_Data_Analysis
|
R
| false
| false
| 684
|
r
|
## We will first import the data, clean it up some, and then subset it down to what we want to analyze
library(dplyr)
electricdata <- read.table("household_power_consumption.txt", sep = ";")
a1 <- as.vector(t(a)[,1])
names(electricdata) <- a1
electricdata <- electricdata[-c(1),]
electricdata_subset <- subset(electricdata, Date == "2/2/2007" | Date == "1/2/2007")
electricdata$Global_active_power <- as.numeric(as.character(C$Global_active_power))
## Now we make the graph
hist(electricdata$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power")
dev.copy(png, file = "plot1.png")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.