content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
\name{biSBM-package}
\alias{biSBM-package}
\alias{biSBM}
\docType{package}
\title{
\packageTitle{biSBM}
}
\description{
\packageDescription{biSBM}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{biSBM}
\packageIndices{biSBM}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{biSBM}
Maintainer: \packageMaintainer{biSBM}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation ~~
~~ directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /biSBM/man/biSBM-package.Rd | no_license | amalag-19/bipartite_C | R | false | false | 775 | rd | \name{biSBM-package}
\alias{biSBM-package}
\alias{biSBM}
\docType{package}
\title{
\packageTitle{biSBM}
}
\description{
\packageDescription{biSBM}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{biSBM}
\packageIndices{biSBM}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{biSBM}
Maintainer: \packageMaintainer{biSBM}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation ~~
~~ directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
require(xgboost)
context("basic functions")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
set.seed(1994)
test_that("train and predict binary classification", {
nrounds = 2
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic")
, "train-error")
expect_equal(class(bst), "xgb.Booster")
expect_equal(bst$niter, nrounds)
expect_false(is.null(bst$evaluation_log))
expect_equal(nrow(bst$evaluation_log), nrounds)
expect_lt(bst$evaluation_log[, min(train_error)], 0.03)
pred <- predict(bst, test$data)
expect_length(pred, 1611)
pred1 <- predict(bst, train$data, ntreelimit = 1)
expect_length(pred1, 6513)
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
err_log <- bst$evaluation_log[1, train_error]
expect_lt(abs(err_pred1 - err_log), 10e-6)
})
test_that("train and predict softprob", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softprob", num_class=3)
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris) * 3)
# row sums add up to total probability of 1:
expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
# manually calculate error at the last iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
expect_equal(as.numeric(t(mpred)), pred)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
# manually calculate error at the 1st iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6)
})
test_that("train and predict softmax", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softmax", num_class=3)
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris))
err <- sum(pred != lb)/length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
})
test_that("train and predict RF", {
set.seed(11)
lb <- train$label
# single iteration
bst <- xgboost(data = train$data, label = lb, max_depth = 5,
nthread = 2, nrounds = 1, objective = "binary:logistic",
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
expect_equal(bst$niter, 1)
expect_equal(xgb.ntree(bst), 20)
pred <- predict(bst, train$data)
pred_err <- sum((pred > 0.5) != lb)/length(lb)
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
#expect_lt(pred_err, 0.03)
pred <- predict(bst, train$data, ntreelimit = 20)
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
expect_equal(pred_err_20, pred_err)
#pred <- predict(bst, train$data, ntreelimit = 1)
#pred_err_1 <- sum((pred > 0.5) != lb)/length(lb)
#expect_lt(pred_err, pred_err_1)
#expect_lt(pred_err, 0.08)
})
test_that("train and predict RF with softprob", {
lb <- as.numeric(iris$Species) - 1
nrounds <- 15
set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds,
objective = "multi:softprob", num_class=3,
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5)
expect_equal(bst$niter, 15)
expect_equal(xgb.ntree(bst), 15*3*4)
# predict for all iterations:
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE)
expect_equal(dim(pred), c(nrow(iris), 3))
pred_labels <- max.col(pred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6)
# predict for 7 iterations and adjust for 4 parallel trees per iteration
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4)
err <- sum((max.col(pred) - 1) != lb)/length(lb)
expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6)
})
test_that("use of multiple eval metrics works", {
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
, "train-error.*train-auc.*train-logloss")
expect_false(is.null(bst$evaluation_log))
expect_equal(dim(bst$evaluation_log), c(2, 4))
expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
})
test_that("training continuation works", {
dtrain <- xgb.DMatrix(train$data, label = train$label)
watchlist = list(train=dtrain)
param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2)
# for the reference, use 4 iterations at once:
set.seed(11)
bst <- xgb.train(param, dtrain, nrounds = 4, watchlist)
# first two iterations:
set.seed(11)
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist)
# continue for two more:
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1)
expect_equal(bst$raw, bst2$raw)
expect_false(is.null(bst2$evaluation_log))
expect_equal(dim(bst2$evaluation_log), c(4, 2))
expect_equal(bst2$evaluation_log, bst$evaluation_log)
# test continuing from raw model data
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1$raw)
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
# test continuing from a model in file
xgb.save(bst1, "xgboost.model")
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = "xgboost.model")
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
})
test_that("xgb.cv works", {
set.seed(11)
cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE)
expect_is(cv, 'xgb.cv.synchronous')
expect_false(is.null(cv$evaluation_log))
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
expect_equal(cv$niter, 2)
expect_false(is.null(cv$folds) && is.list(cv$folds))
expect_length(cv$folds, 5)
expect_false(is.null(cv$params) && is.list(cv$params))
expect_false(is.null(cv$callbacks))
expect_false(is.null(cv$call))
})
| /R-package/tests/testthat/test_basic.R | permissive | zoucuomiao/xgboost | R | false | false | 7,316 | r | require(xgboost)
context("basic functions")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
set.seed(1994)
test_that("train and predict binary classification", {
nrounds = 2
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = nrounds, objective = "binary:logistic")
, "train-error")
expect_equal(class(bst), "xgb.Booster")
expect_equal(bst$niter, nrounds)
expect_false(is.null(bst$evaluation_log))
expect_equal(nrow(bst$evaluation_log), nrounds)
expect_lt(bst$evaluation_log[, min(train_error)], 0.03)
pred <- predict(bst, test$data)
expect_length(pred, 1611)
pred1 <- predict(bst, train$data, ntreelimit = 1)
expect_length(pred1, 6513)
err_pred1 <- sum((pred1 > 0.5) != train$label)/length(train$label)
err_log <- bst$evaluation_log[1, train_error]
expect_lt(abs(err_pred1 - err_log), 10e-6)
})
test_that("train and predict softprob", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softprob", num_class=3)
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris) * 3)
# row sums add up to total probability of 1:
expect_equal(rowSums(matrix(pred, ncol=3, byrow=TRUE)), rep(1, nrow(iris)), tolerance = 1e-7)
# manually calculate error at the last iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE)
expect_equal(as.numeric(t(mpred)), pred)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
# manually calculate error at the 1st iteration:
mpred <- predict(bst, as.matrix(iris[, -5]), reshape = TRUE, ntreelimit = 1)
pred_labels <- max.col(mpred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[1, train_merror], err, tolerance = 5e-6)
})
test_that("train and predict softmax", {
lb <- as.numeric(iris$Species) - 1
set.seed(11)
expect_output(
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.5, nthread = 2, nrounds = 5,
objective = "multi:softmax", num_class=3)
, "train-merror")
expect_false(is.null(bst$evaluation_log))
expect_lt(bst$evaluation_log[, min(train_merror)], 0.025)
expect_equal(bst$niter * 3, xgb.ntree(bst))
pred <- predict(bst, as.matrix(iris[, -5]))
expect_length(pred, nrow(iris))
err <- sum(pred != lb)/length(lb)
expect_equal(bst$evaluation_log[5, train_merror], err, tolerance = 5e-6)
})
test_that("train and predict RF", {
set.seed(11)
lb <- train$label
# single iteration
bst <- xgboost(data = train$data, label = lb, max_depth = 5,
nthread = 2, nrounds = 1, objective = "binary:logistic",
num_parallel_tree = 20, subsample = 0.6, colsample_bytree = 0.1)
expect_equal(bst$niter, 1)
expect_equal(xgb.ntree(bst), 20)
pred <- predict(bst, train$data)
pred_err <- sum((pred > 0.5) != lb)/length(lb)
expect_lt(abs(bst$evaluation_log[1, train_error] - pred_err), 10e-6)
#expect_lt(pred_err, 0.03)
pred <- predict(bst, train$data, ntreelimit = 20)
pred_err_20 <- sum((pred > 0.5) != lb)/length(lb)
expect_equal(pred_err_20, pred_err)
#pred <- predict(bst, train$data, ntreelimit = 1)
#pred_err_1 <- sum((pred > 0.5) != lb)/length(lb)
#expect_lt(pred_err, pred_err_1)
#expect_lt(pred_err, 0.08)
})
test_that("train and predict RF with softprob", {
lb <- as.numeric(iris$Species) - 1
nrounds <- 15
set.seed(11)
bst <- xgboost(data = as.matrix(iris[, -5]), label = lb,
max_depth = 3, eta = 0.9, nthread = 2, nrounds = nrounds,
objective = "multi:softprob", num_class=3,
num_parallel_tree = 4, subsample = 0.5, colsample_bytree = 0.5)
expect_equal(bst$niter, 15)
expect_equal(xgb.ntree(bst), 15*3*4)
# predict for all iterations:
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE)
expect_equal(dim(pred), c(nrow(iris), 3))
pred_labels <- max.col(pred) - 1
err <- sum(pred_labels != lb)/length(lb)
expect_equal(bst$evaluation_log[nrounds, train_merror], err, tolerance = 5e-6)
# predict for 7 iterations and adjust for 4 parallel trees per iteration
pred <- predict(bst, as.matrix(iris[, -5]), reshape=TRUE, ntreelimit = 7 * 4)
err <- sum((max.col(pred) - 1) != lb)/length(lb)
expect_equal(bst$evaluation_log[7, train_merror], err, tolerance = 5e-6)
})
test_that("use of multiple eval metrics works", {
expect_output(
bst <- xgboost(data = train$data, label = train$label, max_depth = 2,
eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic",
eval_metric = 'error', eval_metric = 'auc', eval_metric = "logloss")
, "train-error.*train-auc.*train-logloss")
expect_false(is.null(bst$evaluation_log))
expect_equal(dim(bst$evaluation_log), c(2, 4))
expect_equal(colnames(bst$evaluation_log), c("iter", "train_error", "train_auc", "train_logloss"))
})
test_that("training continuation works", {
dtrain <- xgb.DMatrix(train$data, label = train$label)
watchlist = list(train=dtrain)
param <- list(objective = "binary:logistic", max_depth = 2, eta = 1, nthread = 2)
# for the reference, use 4 iterations at once:
set.seed(11)
bst <- xgb.train(param, dtrain, nrounds = 4, watchlist)
# first two iterations:
set.seed(11)
bst1 <- xgb.train(param, dtrain, nrounds = 2, watchlist)
# continue for two more:
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1)
expect_equal(bst$raw, bst2$raw)
expect_false(is.null(bst2$evaluation_log))
expect_equal(dim(bst2$evaluation_log), c(4, 2))
expect_equal(bst2$evaluation_log, bst$evaluation_log)
# test continuing from raw model data
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = bst1$raw)
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
# test continuing from a model in file
xgb.save(bst1, "xgboost.model")
bst2 <- xgb.train(param, dtrain, nrounds = 2, watchlist, xgb_model = "xgboost.model")
expect_equal(bst$raw, bst2$raw)
expect_equal(dim(bst2$evaluation_log), c(2, 2))
})
test_that("xgb.cv works", {
set.seed(11)
cv <- xgb.cv(data = train$data, label = train$label, max_depth = 2, nfold = 5,
eta = 1., nthread = 2, nrounds = 2, objective = "binary:logistic",
verbose=TRUE)
expect_is(cv, 'xgb.cv.synchronous')
expect_false(is.null(cv$evaluation_log))
expect_lt(cv$evaluation_log[, min(test_error_mean)], 0.03)
expect_lt(cv$evaluation_log[, min(test_error_std)], 0.004)
expect_equal(cv$niter, 2)
expect_false(is.null(cv$folds) && is.list(cv$folds))
expect_length(cv$folds, 5)
expect_false(is.null(cv$params) && is.list(cv$params))
expect_false(is.null(cv$callbacks))
expect_false(is.null(cv$call))
})
|
#Load packages
library(tidyverse)
library(extrafont)
library(ggtext)
library(Cairo)
#Get the data
tuesdata <- tidytuesdayR::tt_load(2021, week = 8)
freed_slaves_wide <- tuesdata$freed_slaves
#Long format data used to make the plot later
freed_slaves_wide %>%
mutate(Slave = case_when(Year == 1800 ~ Slave + 1,
TRUE ~ Slave)) %>%
pivot_longer(2:3,
names_to = "status",
values_to = "percentage") -> freed_slaves_long
#Percentage labels
freed_slaves_wide %>%
mutate(position = case_when(Year == 1800 ~ 91.25,
Free != 100 ~ Slave + 2,
TRUE ~ 91),
Free = paste0(Free, "%")) -> text_label
#Create df with values for the segments.
text_label %>%
filter(!Year %in% c(1790, 1870)) %>%
mutate(y = position - 3.25,
yend = 100) -> h_segments
#Colors
dubois_colors <- c("#278251", "black")
background <- "#DFD4C7"
#Plot
freed_slaves_long %>%
mutate(percentage = case_when(status == "Slave" ~ percentage - 5,
status == "Free" ~ percentage + 5),
percentage = case_when(percentage == -5 ~ 0,
percentage == 105 ~ 100,
TRUE ~ percentage)) %>%
#The mutate call alters the percentages values in order to allow for more space in which to put annotations. Without it the space is too narrow. The real percentage values are stated by the text_label.
ggplot() +
geom_area(aes(Year, percentage,
fill = status)) +
geom_text(text_label,
mapping = aes(Year, position - 5,
label = Free),
fontface = "bold") +
geom_text(text_label,
mapping = aes(Year, 102.75,
label = Year),
fontface = "bold",
family = "B52-ULCW00-ULC",
size = 5) +
annotate('text',
x = 1830,
y = 95,
label = "FREE - LIBRE",
size = 7,
fontface = "bold",
family = "B52-ULCW00-ULC") +
annotate('text',
x = 1830,
y = 60,
label = "SLAVES",
size = 7,
fontface = "bold",
color = background,
family = "B52-ULCW00-ULC") +
annotate('text',
x = 1830,
y = 56,
label = "ESCLAVES",
size = 7,
fontface = "bold",
color = background,
family = "B52-ULCW00-ULC") +
geom_segment(h_segments,
mapping = aes(x = Year,
xend = Year,
y = y,
yend = yend)) +
geom_segment(aes(x = 1830,
xend = 1830,
y = 92,
yend = 97),
color = "#278251") +
scale_fill_manual(values = dubois_colors) +
labs(y = NULL, x = NULL,
title = "PROPORTION OF FREEMEN AND SLAVES AMONG AMERICAN NEGROES .\n\nPROPORTION DES NÈGRES LIBRES ET DES ESCLAVES EN AMÈRIQUE .",
subtitle = "DONE BY ATLANTA UNIVERSITY .",
caption = "\n\nVISUALIZATION: **@LUISFREII** | SOURCE: **#DUBOISCHALLENGE**") +
theme(axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank(),
panel.background = element_rect(background),
plot.background = element_rect(background),
legend.position = "none",
plot.title = element_text(size = 16,
hjust = 0.5,
family = "B52-ULCW00-ULC"),
plot.subtitle = element_text(size = 11,
hjust = 0.5,
margin = margin(1, 0, 2, 0, unit = "cm"),
family = "B52-ULCW00-ULC"),
plot.caption = element_markdown(size = 8,
hjust = 0.5),
panel.grid = element_blank()
)
#Code to save the plot
# ggsave("dubois.png",
# width = 19.5,
# height = 25,
# units = "cm",
# dpi = 320,
# type = "cairo-png")
| /2021/W08_Dubois_Challenge/W08_Dubois_Challenge.R | no_license | dimbage/R_Tidytuesday | R | false | false | 4,196 | r | #Load packages
library(tidyverse)
library(extrafont)
library(ggtext)
library(Cairo)
#Get the data
tuesdata <- tidytuesdayR::tt_load(2021, week = 8)
freed_slaves_wide <- tuesdata$freed_slaves
#Long format data used to make the plot later
freed_slaves_wide %>%
mutate(Slave = case_when(Year == 1800 ~ Slave + 1,
TRUE ~ Slave)) %>%
pivot_longer(2:3,
names_to = "status",
values_to = "percentage") -> freed_slaves_long
#Percentage labels
freed_slaves_wide %>%
mutate(position = case_when(Year == 1800 ~ 91.25,
Free != 100 ~ Slave + 2,
TRUE ~ 91),
Free = paste0(Free, "%")) -> text_label
#Create df with values for the segments.
text_label %>%
filter(!Year %in% c(1790, 1870)) %>%
mutate(y = position - 3.25,
yend = 100) -> h_segments
#Colors
dubois_colors <- c("#278251", "black")
background <- "#DFD4C7"
#Plot
freed_slaves_long %>%
mutate(percentage = case_when(status == "Slave" ~ percentage - 5,
status == "Free" ~ percentage + 5),
percentage = case_when(percentage == -5 ~ 0,
percentage == 105 ~ 100,
TRUE ~ percentage)) %>%
#The mutate call alters the percentages values in order to allow for more space in which to put annotations. Without it the space is too narrow. The real percentage values are stated by the text_label.
ggplot() +
geom_area(aes(Year, percentage,
fill = status)) +
geom_text(text_label,
mapping = aes(Year, position - 5,
label = Free),
fontface = "bold") +
geom_text(text_label,
mapping = aes(Year, 102.75,
label = Year),
fontface = "bold",
family = "B52-ULCW00-ULC",
size = 5) +
annotate('text',
x = 1830,
y = 95,
label = "FREE - LIBRE",
size = 7,
fontface = "bold",
family = "B52-ULCW00-ULC") +
annotate('text',
x = 1830,
y = 60,
label = "SLAVES",
size = 7,
fontface = "bold",
color = background,
family = "B52-ULCW00-ULC") +
annotate('text',
x = 1830,
y = 56,
label = "ESCLAVES",
size = 7,
fontface = "bold",
color = background,
family = "B52-ULCW00-ULC") +
geom_segment(h_segments,
mapping = aes(x = Year,
xend = Year,
y = y,
yend = yend)) +
geom_segment(aes(x = 1830,
xend = 1830,
y = 92,
yend = 97),
color = "#278251") +
scale_fill_manual(values = dubois_colors) +
labs(y = NULL, x = NULL,
title = "PROPORTION OF FREEMEN AND SLAVES AMONG AMERICAN NEGROES .\n\nPROPORTION DES NÈGRES LIBRES ET DES ESCLAVES EN AMÈRIQUE .",
subtitle = "DONE BY ATLANTA UNIVERSITY .",
caption = "\n\nVISUALIZATION: **@LUISFREII** | SOURCE: **#DUBOISCHALLENGE**") +
theme(axis.text.y = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank(),
panel.background = element_rect(background),
plot.background = element_rect(background),
legend.position = "none",
plot.title = element_text(size = 16,
hjust = 0.5,
family = "B52-ULCW00-ULC"),
plot.subtitle = element_text(size = 11,
hjust = 0.5,
margin = margin(1, 0, 2, 0, unit = "cm"),
family = "B52-ULCW00-ULC"),
plot.caption = element_markdown(size = 8,
hjust = 0.5),
panel.grid = element_blank()
)
#Code to save the plot
# ggsave("dubois.png",
# width = 19.5,
# height = 25,
# units = "cm",
# dpi = 320,
# type = "cairo-png")
|
% FILE rqapp/man/rqa.Rd
\name{rqa}
\alias{rqa}
\title{
Recurrence Quantification Analysis
}
\description{
This function performs recurrence quantification anlaysis and its bivariate
extension, cross recurrence quantification analysis.
}
\usage{
rqa(ts1, ts2, embed, delay, normalize, rescale, mindiagline, minvertline,
t_win, radius, whiteline, recpt)
}
\arguments{
\item{ts1}{a numerical time series}
\item{ts2}{a numerical time series}
\item{embed}{embedding dimension}
\item{delay}{optimal time delay}
\item{normalize}{should time series be normalized? (0 = no, 1 = unit
interval, 2 = z-score)}
\item{rescale}{should distance matrix be rescaled? (0 = no, 1 = max
norm, 2 = min norm)}
\item{mindiagline}{smallest number of diagonal points to be considered
a line}
\item{minvertline}{smallest number of vertical points to be considered
a line}
\item{t_win}{theiler window}
\item{radius}{minimum distance within which points are considered
recurrent}
\item{whiteline}{not implemented}
\item{recpt}{should recurrence plot be returned? Not recommended for long
series.}
}
\details{
This function performs recurrence quantification anlaysis (RQA) and its
bivariate extension, cross recurrence quantification analysis (CRQA) on
time series data that have (potentially) been embedded in higher dimension
than the originating series. A common approach for univariate series
involves several steps: First, identify the optimal time \code{delay} as
either the first zero crossing of the autocorrelation function or the first
minimum of the average mutual information function. Second, the time series
is unfolded into \code{embed} dimensions by creating time-delayed copies of
the original series. One method for determining the number of dimensions is
by the method of False Nearest Neighbors. Third, a distance matrix is
computed among the embedded points of the series. A recurrence plot is
constructed by passing the distance matrix through a heavyside function:
distances less than or equal to the chosen \code{radius} are marked as 1
(recurrent); distances falling outside the radius are marked as 0 (not
recurrent).
The bivariate case involves completing steps one and two on each series. By
convention, the time series with the most extreme parameters (e.g., longer
delay and higher dimension) determines the parameters used in CRQA as both
time series are embedded in the same phase space. Note that, in the case
of categorical data, both series usually have the same parameters:
\code{delay = 0} and \code{embed = 1}. Once delay and embeeding parameters
have been selected, CRQA computes a distance matrix between the two embedded
series. As in the univariate case, distances less than or equal to the
chosen radius are marked as recurrent (i.e. 1) or not (i.e. 0).
After constructing the recurrence plot, a number of measures are computed
to characterize recurrent stucture in the time series. These measures
and their interpretation are well documented in the literature. We
provide simple definitions for each recurrence metric below. In addition,
we provide references to standard readings including a very readable
introduction to RQA (i.e., Webber & Zbilut, 2005; Marwan et al., 2007).
}
\value{
This function returns conventional RQA variables and (optionally) the
recurrence plot.
\item{rr}{recurrence rate: the overall percentage of recurrent points}
\item{det}{determinism: the percentage of recurrent points that fall on a
line}
\item{div}{divergenc: inverse of determinism i.e. 1/det}
\item{nrline}{number of lines: total number of lines in the upper triangle}
\item{ratio}{ratio: percent determinism/percent recurrence i.e det/rr}
\item{maxline}{longest line: the number points in the longest diagonal line}
\item{meanline}{average line: average length of diagonal lines}
\item{lam}{laminarity: perecentage of points that fall on vertical lines}
\item{tt}{trapping time: average length of vertical lines}
\item{vmax}{longest vertical line: the number of points in the longest
vertical line}
\item{entropy}{Shannon entropy: based on distribution of line lengths}
\item{rentropy}{relative entropy: Shannon entropy normalized by number of
lines}
}
\references{
Webber, C. L., & Zbilut, J. P. (2005). Recurrence quantification analysis
of nonlinear dynamical time series. In S. Riley and G. C. Van Orden
(eds). \emph{Tutorials in contemporary nonlinear methods for the
behavioral sciences}.
Marwan, N., Romano, M. C. Theil, M., & Kurths, J. (2007). Recurrence plots
for the analysis of complex systems. \emph{Physics Reports}, \emph{438},
237-329.
}
\examples{
\dontrun{
x <- sample(0:2, 100)
x.recpt <- rqa(x, x, 1, 1, 0, 1, 2, 2, 0, .0001, 0, 1)
recurrence_lot(x.recpt$rp)
}
}
| /man/rqa.Rd | no_license | aaronlikens/rqapp | R | false | false | 4,889 | rd | % FILE rqapp/man/rqa.Rd
\name{rqa}
\alias{rqa}
\title{
Recurrence Quantification Analysis
}
\description{
This function performs recurrence quantification anlaysis and its bivariate
extension, cross recurrence quantification analysis.
}
\usage{
rqa(ts1, ts2, embed, delay, normalize, rescale, mindiagline, minvertline,
t_win, radius, whiteline, recpt)
}
\arguments{
\item{ts1}{a numerical time series}
\item{ts2}{a numerical time series}
\item{embed}{embedding dimension}
\item{delay}{optimal time delay}
\item{normalize}{should time series be normalized? (0 = no, 1 = unit
interval, 2 = z-score)}
\item{rescale}{should distance matrix be rescaled? (0 = no, 1 = max
norm, 2 = min norm)}
\item{mindiagline}{smallest number of diagonal points to be considered
a line}
\item{minvertline}{smallest number of vertical points to be considered
a line}
\item{t_win}{theiler window}
\item{radius}{minimum distance within which points are considered
recurrent}
\item{whiteline}{not implemented}
\item{recpt}{should recurrence plot be returned? Not recommended for long
series.}
}
\details{
This function performs recurrence quantification anlaysis (RQA) and its
bivariate extension, cross recurrence quantification analysis (CRQA) on
time series data that have (potentially) been embedded in higher dimension
than the originating series. A common approach for univariate series
involves several steps: First, identify the optimal time \code{delay} as
either the first zero crossing of the autocorrelation function or the first
minimum of the average mutual information function. Second, the time series
is unfolded into \code{embed} dimensions by creating time-delayed copies of
the original series. One method for determining the number of dimensions is
by the method of False Nearest Neighbors. Third, a distance matrix is
computed among the embedded points of the series. A recurrence plot is
constructed by passing the distance matrix through a heavyside function:
distances less than or equal to the chosen \code{radius} are marked as 1
(recurrent); distances falling outside the radius are marked as 0 (not
recurrent).
The bivariate case involves completing steps one and two on each series. By
convention, the time series with the most extreme parameters (e.g., longer
delay and higher dimension) determines the parameters used in CRQA as both
time series are embedded in the same phase space. Note that, in the case
of categorical data, both series usually have the same parameters:
\code{delay = 0} and \code{embed = 1}. Once delay and embeeding parameters
have been selected, CRQA computes a distance matrix between the two embedded
series. As in the univariate case, distances less than or equal to the
chosen radius are marked as recurrent (i.e. 1) or not (i.e. 0).
After constructing the recurrence plot, a number of measures are computed
to characterize recurrent stucture in the time series. These measures
and their interpretation are well documented in the literature. We
provide simple definitions for each recurrence metric below. In addition,
we provide references to standard readings including a very readable
introduction to RQA (i.e., Webber & Zbilut, 2005; Marwan et al., 2007).
}
\value{
This function returns conventional RQA variables and (optionally) the
recurrence plot.
\item{rr}{recurrence rate: the overall percentage of recurrent points}
\item{det}{determinism: the percentage of recurrent points that fall on a
line}
\item{div}{divergenc: inverse of determinism i.e. 1/det}
\item{nrline}{number of lines: total number of lines in the upper triangle}
\item{ratio}{ratio: percent determinism/percent recurrence i.e det/rr}
\item{maxline}{longest line: the number points in the longest diagonal line}
\item{meanline}{average line: average length of diagonal lines}
\item{lam}{laminarity: perecentage of points that fall on vertical lines}
\item{tt}{trapping time: average length of vertical lines}
\item{vmax}{longest vertical line: the number of points in the longest
vertical line}
\item{entropy}{Shannon entropy: based on distribution of line lengths}
\item{rentropy}{relative entropy: Shannon entropy normalized by number of
lines}
}
\references{
Webber, C. L., & Zbilut, J. P. (2005). Recurrence quantification analysis
of nonlinear dynamical time series. In S. Riley and G. C. Van Orden
(eds). \emph{Tutorials in contemporary nonlinear methods for the
behavioral sciences}.
Marwan, N., Romano, M. C. Theil, M., & Kurths, J. (2007). Recurrence plots
for the analysis of complex systems. \emph{Physics Reports}, \emph{438},
237-329.
}
\examples{
\dontrun{
x <- sample(0:2, 100)
x.recpt <- rqa(x, x, 1, 1, 0, 1, 2, 2, 0, .0001, 0, 1)
recurrence_lot(x.recpt$rp)
}
}
|
rm( list=ls() )
library(lme4) #1.1-21
library(boot) #1.3-22
library(ggplot2) #3.2.1
library(LaplacesDemon) #16.1.1
library(plyr) #1.8.4
library(devtools) #2.2.1
library(piecewiseSEM) #2.1.0
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#####################
### DATA INIT ###
#####################
#to obtain the same random sequence
set.seed(12345)
#load data
expData <- read.table( "../data/dataForR.dat", as.is=TRUE )
names(expData) <- c('obs','trialNum','condition','subCondition','latency','selection')
latencyScalar <- 150
#ensure proper data-types and ranges
expData$obs <- as.factor(expData$obs)
expData$condition <- as.factor(expData$condition)
expData$trialNum <- as.double(expData$trialNum)
expData$latency <- (as.double(expData$latency))/latencyScalar #to avoid scaling warning
expData$selection <- as.factor(expData$selection)
expData$selection <- mapvalues(expData$selection, from = c("1", "3"), to = c("1", "0")) #recode to 1 and 0 (1=TARGET,3=DISTRACTOR)
expData$selection <- relevel(expData$selection,ref="0")
#####################
### MODEL FIT ###
#####################
#complete model
expData.completeModel <- glmer(selection ~ latency + condition + latency * condition + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
summary(expData.completeModel)
###comparisons fixed effects
#leave out interaction
expData.withoutInteraction <- glmer(selection ~ latency + condition + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
anova(expData.withoutInteraction,expData.completeModel)
#leave out condition
expData.withoutCondition <- glmer(selection ~ latency + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
anova(expData.withoutCondition,expData.withoutInteraction)
#leave out latency
expData.withoutLatency <- glmer(selection ~ condition + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
anova(expData.withoutLatency,expData.withoutInteraction)
expData.variabilityOnAll <- glmer(selection ~ latency + condition + latency * condition + (1 + latency + condition + latency*condition|obs), data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
summary(expData.variabilityOnAll)
| /Exp2/glmR/mixedModelSummary.R | no_license | Nian-Jingqing/ior | R | false | false | 2,316 | r | rm( list=ls() )
library(lme4) #1.1-21
library(boot) #1.3-22
library(ggplot2) #3.2.1
library(LaplacesDemon) #16.1.1
library(plyr) #1.8.4
library(devtools) #2.2.1
library(piecewiseSEM) #2.1.0
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#####################
### DATA INIT ###
#####################
#to obtain the same random sequence
set.seed(12345)
#load data
expData <- read.table( "../data/dataForR.dat", as.is=TRUE )
names(expData) <- c('obs','trialNum','condition','subCondition','latency','selection')
latencyScalar <- 150
#ensure proper data-types and ranges
expData$obs <- as.factor(expData$obs)
expData$condition <- as.factor(expData$condition)
expData$trialNum <- as.double(expData$trialNum)
expData$latency <- (as.double(expData$latency))/latencyScalar #to avoid scaling warning
expData$selection <- as.factor(expData$selection)
expData$selection <- mapvalues(expData$selection, from = c("1", "3"), to = c("1", "0")) #recode to 1 and 0 (1=TARGET,3=DISTRACTOR)
expData$selection <- relevel(expData$selection,ref="0")
#####################
### MODEL FIT ###
#####################
#complete model
expData.completeModel <- glmer(selection ~ latency + condition + latency * condition + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
summary(expData.completeModel)
###comparisons fixed effects
#leave out interaction
expData.withoutInteraction <- glmer(selection ~ latency + condition + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
anova(expData.withoutInteraction,expData.completeModel)
#leave out condition
expData.withoutCondition <- glmer(selection ~ latency + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
anova(expData.withoutCondition,expData.withoutInteraction)
#leave out latency
expData.withoutLatency <- glmer(selection ~ condition + (1|obs),data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
anova(expData.withoutLatency,expData.withoutInteraction)
expData.variabilityOnAll <- glmer(selection ~ latency + condition + latency * condition + (1 + latency + condition + latency*condition|obs), data=expData,family="binomial", control=glmerControl(optimizer="bobyqa"))
summary(expData.variabilityOnAll)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{startprofiles}
\alias{startprofiles}
\title{Initialize profile list object}
\usage{
startprofiles(logfile, frac = FALSE, sets = FALSE, progbar = FALSE,
ion_mode = "positive", until = FALSE)
}
\arguments{
\item{logfile}{logfile object of an enviMass project.}
\item{frac}{Numerical, \code{0<frac=<1}. Fraction of files to use; oldest are ommited.}
\item{sets}{Integer. Number of latest files to include.}
\item{progbar}{Logical. Should a progress bar be shown? Only for Windows.}
\item{ion_mode}{Character string, either "positive" or "negative".}
\item{until}{Integer, ID of file. All peaks of files up to the date of this file will be included.}
}
\value{
profile list
}
\description{
\code{startprofiles} initializes a list object containing all peaks from the files in an enviMass project, associated metadata and placeholder.
}
\details{
enviMass workflow function
}
\seealso{
\code{agglomer}, \code{partcluster}
}
| /man/startprofiles.Rd | no_license | dutchjes/enviMass | R | false | false | 986 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{startprofiles}
\alias{startprofiles}
\title{Initialize profile list object}
\usage{
startprofiles(logfile, frac = FALSE, sets = FALSE, progbar = FALSE,
ion_mode = "positive", until = FALSE)
}
\arguments{
\item{logfile}{logfile object of an enviMass project.}
\item{frac}{Numerical, \code{0<frac=<1}. Fraction of files to use; oldest are ommited.}
\item{sets}{Integer. Number of latest files to include.}
\item{progbar}{Logical. Should a progress bar be shown? Only for Windows.}
\item{ion_mode}{Character string, either "positive" or "negative".}
\item{until}{Integer, ID of file. All peaks of files up to the date of this file will be included.}
}
\value{
profile list
}
\description{
\code{startprofiles} initializes a list object containing all peaks from the files in an enviMass project, associated metadata and placeholder.
}
\details{
enviMass workflow function
}
\seealso{
\code{agglomer}, \code{partcluster}
}
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.4483337846829e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615766957-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,802 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.4483337846829e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
#' @title Compute vector of loglikelihood for fitted mash object on new data.
#'
#' @param g A mash object.
#'
#' @param data A set of data on which to compute the loglikelihood.
#'
#' @param algorithm.version Indicate R or Rcpp version
#'
#' @return The vector of log-likelihoods for each data point computed using g.
#'
#' @details The log-likelihood for each element is \eqn{p(Bhat_j |
#' Shat_j,g,\alpha)} where \eqn{Bhat_j | B_j, Shat_j \sim N(B_j,
#' Shat_j)} and \eqn{B_j/Shat_j^\alpha | Shat_j \sim g} Here the value
#' of \eqn{\alpha} is set when setting up the data object in
#' `mash_set_data`. If g is a mash object (safest!) then the function
#' will check that this value matches the \eqn{\alpha} used when
#' fitting `mash`. Note: as a convenience, this function can also be
#' called with g a mixture distribution with same structure as the
#' fitted_g from a mash object. This is mostly useful when doing
#' simulations, where you might want to compute the likelihood under
#' the "true" g. When used in this way the user is responsible for
#' making sure that the g makes sense with the alpha set in data.
#'
#' @export
#'
mash_compute_vloglik = function(g,data, algorithm.version= c("Rcpp","R")){
if(class(g)=="mash"){
alpha = g$alpha
g = g$fitted_g
if(alpha != data$alpha){
stop('The alpha in data does not match the one used to fit the mash model.')
}
}
else{
message('Warning: Please make sure the alpha in data is consistent with the `alpha` used to compute g.')
}
xUlist = expand_cov(g$Ulist,g$grid,g$usepointmass)
lm_res = calc_relative_lik_matrix(data,xUlist,algorithm.version=algorithm.version)
return(log(exp(lm_res$loglik_matrix) %*% g$pi) + lm_res$lfactors - rowSums(log(data$Shat_alpha)))
}
#' @title Compute loglikelihood for fitted mash object on new data.
#'
#' @param g A mash object or the fitted_g from a mash object.
#'
#' @param data A set of data on which to compute the loglikelihood.
#'
#' @param algorithm.version Indicate R or Rcpp version
#'
#' @return The log-likelihood for data computed using g.
#'
#' @details The log-likelihood for each element is \eqn{p(Bhat_j |
#' Shat_j,g,\alpha)} where \eqn{Bhat_j | B_j, Shat_j \sim N(B_j,
#' Shat_j)} and \eqn{B_j/Shat_j^\alpha | Shat_j \sim g}.
#'
#' @export
#'
mash_compute_loglik = function(g,data, algorithm.version=c("Rcpp", "R")){
return( sum( mash_compute_vloglik(g,data, algorithm.version = algorithm.version) ) )
}
#' Compute vector of alternative loglikelihoods from a matrix of log-likelihoods and fitted pi
#' @param pi_s the vector of mixture proportions, with first element corresponding to null
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' whose first column corresponds to null
#' @param Shat_alpha matrix of Shat^alpha
compute_alt_loglik_from_matrix_and_pi = function(pi_s,lm,Shat_alpha){
return(log(exp(lm$loglik_matrix[,-1,drop=FALSE]) %*% (pi_s[-1]/(1-pi_s[1])))+lm$lfactors-rowSums(log(Shat_alpha)))
}
#' Compute vector of null loglikelihoods from a matrix of log-likelihoods
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' whose first column corresponds to null
#' @param Shat_alpha matrix of Shat^alpha
compute_null_loglik_from_matrix = function(lm,Shat_alpha){
lm$loglik_matrix[,1]+lm$lfactors-rowSums(log(Shat_alpha))
}
#' Compute vector of loglikelihoods from a matrix of log-likelihoods and fitted pi
#' @param pi_s the vector of mixture proportions
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' @param Shat_alpha matrix of Shat^alpha
compute_vloglik_from_matrix_and_pi = function(pi_s,lm,Shat_alpha){
return(log(exp(lm$loglik_matrix) %*% pi_s)+lm$lfactors-rowSums(log(Shat_alpha)))
}
#' Compute total loglikelihood from a matrix of log-likelihoods and fitted pi
#' @param pi_s the vector of mixture proportions
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' @param Shat_alpha matrix of Shat^alpha
compute_loglik_from_matrix_and_pi = function(pi_s,lm,Shat_alpha){
return(sum(compute_vloglik_from_matrix_and_pi(pi_s,lm,Shat_alpha)))
}
| /R/likelihoods_origdata.R | permissive | surbut/mashr | R | false | false | 4,222 | r | #' @title Compute vector of loglikelihood for fitted mash object on new data.
#'
#' @param g A mash object.
#'
#' @param data A set of data on which to compute the loglikelihood.
#'
#' @param algorithm.version Indicate R or Rcpp version
#'
#' @return The vector of log-likelihoods for each data point computed using g.
#'
#' @details The log-likelihood for each element is \eqn{p(Bhat_j |
#' Shat_j,g,\alpha)} where \eqn{Bhat_j | B_j, Shat_j \sim N(B_j,
#' Shat_j)} and \eqn{B_j/Shat_j^\alpha | Shat_j \sim g} Here the value
#' of \eqn{\alpha} is set when setting up the data object in
#' `mash_set_data`. If g is a mash object (safest!) then the function
#' will check that this value matches the \eqn{\alpha} used when
#' fitting `mash`. Note: as a convenience, this function can also be
#' called with g a mixture distribution with same structure as the
#' fitted_g from a mash object. This is mostly useful when doing
#' simulations, where you might want to compute the likelihood under
#' the "true" g. When used in this way the user is responsible for
#' making sure that the g makes sense with the alpha set in data.
#'
#' @export
#'
mash_compute_vloglik = function(g,data, algorithm.version= c("Rcpp","R")){
if(class(g)=="mash"){
alpha = g$alpha
g = g$fitted_g
if(alpha != data$alpha){
stop('The alpha in data does not match the one used to fit the mash model.')
}
}
else{
message('Warning: Please make sure the alpha in data is consistent with the `alpha` used to compute g.')
}
xUlist = expand_cov(g$Ulist,g$grid,g$usepointmass)
lm_res = calc_relative_lik_matrix(data,xUlist,algorithm.version=algorithm.version)
return(log(exp(lm_res$loglik_matrix) %*% g$pi) + lm_res$lfactors - rowSums(log(data$Shat_alpha)))
}
#' @title Compute loglikelihood for fitted mash object on new data.
#'
#' @param g A mash object or the fitted_g from a mash object.
#'
#' @param data A set of data on which to compute the loglikelihood.
#'
#' @param algorithm.version Indicate R or Rcpp version
#'
#' @return The log-likelihood for data computed using g.
#'
#' @details The log-likelihood for each element is \eqn{p(Bhat_j |
#' Shat_j,g,\alpha)} where \eqn{Bhat_j | B_j, Shat_j \sim N(B_j,
#' Shat_j)} and \eqn{B_j/Shat_j^\alpha | Shat_j \sim g}.
#'
#' @export
#'
mash_compute_loglik = function(g,data, algorithm.version=c("Rcpp", "R")){
return( sum( mash_compute_vloglik(g,data, algorithm.version = algorithm.version) ) )
}
#' Compute vector of alternative loglikelihoods from a matrix of log-likelihoods and fitted pi
#' @param pi_s the vector of mixture proportions, with first element corresponding to null
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' whose first column corresponds to null
#' @param Shat_alpha matrix of Shat^alpha
compute_alt_loglik_from_matrix_and_pi = function(pi_s,lm,Shat_alpha){
return(log(exp(lm$loglik_matrix[,-1,drop=FALSE]) %*% (pi_s[-1]/(1-pi_s[1])))+lm$lfactors-rowSums(log(Shat_alpha)))
}
#' Compute vector of null loglikelihoods from a matrix of log-likelihoods
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' whose first column corresponds to null
#' @param Shat_alpha matrix of Shat^alpha
compute_null_loglik_from_matrix = function(lm,Shat_alpha){
lm$loglik_matrix[,1]+lm$lfactors-rowSums(log(Shat_alpha))
}
#' Compute vector of loglikelihoods from a matrix of log-likelihoods and fitted pi
#' @param pi_s the vector of mixture proportions
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' @param Shat_alpha matrix of Shat^alpha
compute_vloglik_from_matrix_and_pi = function(pi_s,lm,Shat_alpha){
return(log(exp(lm$loglik_matrix) %*% pi_s)+lm$lfactors-rowSums(log(Shat_alpha)))
}
#' Compute total loglikelihood from a matrix of log-likelihoods and fitted pi
#' @param pi_s the vector of mixture proportions
#' @param lm the results of a likelihood matrix calculation from \code{calc_relative_lik_matrix}
#' @param Shat_alpha matrix of Shat^alpha
compute_loglik_from_matrix_and_pi = function(pi_s,lm,Shat_alpha){
return(sum(compute_vloglik_from_matrix_and_pi(pi_s,lm,Shat_alpha)))
}
|
# Turn off double evaluation to make things faster
AUTO_DETECT_NEWVAR <- FALSE
match_call <- function(correct_call = NULL) {
e <- get("e", parent.frame())
# Trivial case
if(is.null(correct_call)) return(TRUE)
# Get full correct call
full_correct_call <- expand_call(correct_call)
# Expand user's expression
expr <- deparse(e$expr)
full_user_expr <- expand_call(expr)
# Compare function calls with full arg names
identical(full_correct_call, full_user_expr)
}
# Utility function for match_call answer test
# Fills out a function call with full argument names
expand_call <- function(call_string) {
# Quote expression
qcall <- parse(text=call_string)[[1]]
# If expression is not greater than length 1...
if(length(qcall) <= 1) return(qcall)
# See if it's an assignment
is_assign <- is(qcall, "<-")
# If assignment, process righthandside
if(is_assign) {
# Get righthand side
rhs <- qcall[[3]]
# If righthand side is not a call, can't use match.fun()
if(!is.call(rhs)) return(qcall)
# Get function from function name
fun <- match.fun(rhs[[1]])
# match.call() does not support primitive functions
if(is.primitive(fun)) return(qcall)
# Get expanded call
full_rhs <- match.call(fun, rhs)
# Full call
qcall[[3]] <- full_rhs
} else { # If not assignment, process whole thing
# Get function from function name
fun <- match.fun(qcall[[1]])
# match.call() does not support primitive functions
if(is.primitive(fun)) return(qcall)
# Full call
qcall <- match.call(fun, qcall)
}
# Return expanded function call
qcall
}
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Get the value which a user either entered directly or was computed
# by the command he or she entered.
getVal <- function(){
getState()$val
}
# Get the last expression which the user entered at the R console.
getExpr <- function(){
getState()$expr
}
coursera_on_demand <- function(){
selection <- getState()$val
if(selection == "Yes"){
email <- readline("What is your email address? ")
token <- readline("What is your assignment token? ")
payload <- sprintf('{
"assignmentKey": "avj0Oq8hEeW1-RKql4-XpQ",
"submitterEmail": "%s",
"secret": "%s",
"parts": {
"p3Hz9": {
"output": "correct"
}
}
}', email, token)
url <- 'https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1'
respone <- httr::POST(url, body = payload)
if(respone$status_code >= 200 && respone$status_code < 300){
message("Grade submission succeeded!")
} else {
message("Grade submission failed.")
message("Press ESC if you want to exit this lesson and you")
message("want to try to submit your grade at a later time.")
return(FALSE)
}
}
TRUE
}
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
#Submit to Google Forms
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
submit_log <- function(){
# Please edit the link below
pre_fill_link <- "https://docs.google.com/forms/d/e/1FAIpQLSdhjDPCpFasFN6YLBqUi5ekmK_2t2eqXTUL8xnNoWDgdI6c-w/viewform?usp=pp_url&entry.822553640"
# Do not edit the code below
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame(user = rep(log_$user, nrow_),
course_name = rep(log_$course_name, nrow_),
lesson_name = rep(log_$lesson_name, nrow_),
question_number = p(log_$question_number, nrow_, NA),
correct = p(log_$correct, nrow_, NA),
attempt = p(log_$attempt, nrow_, NA),
skipped = p(log_$skipped, nrow_, NA),
datetime = p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
browseURL(paste0(pre_fill_link, encoded_log))
}
| /Unit 2 - Data Sources & Manipulation/Manipulating_Data_with_dplyr/customTests.R | no_license | covenwu/RinEduApp | R | false | false | 4,728 | r | # Turn off double evaluation to make things faster
AUTO_DETECT_NEWVAR <- FALSE
match_call <- function(correct_call = NULL) {
e <- get("e", parent.frame())
# Trivial case
if(is.null(correct_call)) return(TRUE)
# Get full correct call
full_correct_call <- expand_call(correct_call)
# Expand user's expression
expr <- deparse(e$expr)
full_user_expr <- expand_call(expr)
# Compare function calls with full arg names
identical(full_correct_call, full_user_expr)
}
# Utility function for match_call answer test
# Fills out a function call with full argument names
expand_call <- function(call_string) {
# Quote expression
qcall <- parse(text=call_string)[[1]]
# If expression is not greater than length 1...
if(length(qcall) <= 1) return(qcall)
# See if it's an assignment
is_assign <- is(qcall, "<-")
# If assignment, process righthandside
if(is_assign) {
# Get righthand side
rhs <- qcall[[3]]
# If righthand side is not a call, can't use match.fun()
if(!is.call(rhs)) return(qcall)
# Get function from function name
fun <- match.fun(rhs[[1]])
# match.call() does not support primitive functions
if(is.primitive(fun)) return(qcall)
# Get expanded call
full_rhs <- match.call(fun, rhs)
# Full call
qcall[[3]] <- full_rhs
} else { # If not assignment, process whole thing
# Get function from function name
fun <- match.fun(qcall[[1]])
# match.call() does not support primitive functions
if(is.primitive(fun)) return(qcall)
# Full call
qcall <- match.call(fun, qcall)
}
# Return expanded function call
qcall
}
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Get the value which a user either entered directly or was computed
# by the command he or she entered.
getVal <- function(){
getState()$val
}
# Get the last expression which the user entered at the R console.
getExpr <- function(){
getState()$expr
}
coursera_on_demand <- function(){
selection <- getState()$val
if(selection == "Yes"){
email <- readline("What is your email address? ")
token <- readline("What is your assignment token? ")
payload <- sprintf('{
"assignmentKey": "avj0Oq8hEeW1-RKql4-XpQ",
"submitterEmail": "%s",
"secret": "%s",
"parts": {
"p3Hz9": {
"output": "correct"
}
}
}', email, token)
url <- 'https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1'
respone <- httr::POST(url, body = payload)
if(respone$status_code >= 200 && respone$status_code < 300){
message("Grade submission succeeded!")
} else {
message("Grade submission failed.")
message("Press ESC if you want to exit this lesson and you")
message("want to try to submit your grade at a later time.")
return(FALSE)
}
}
TRUE
}
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
#Submit to Google Forms
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
submit_log <- function(){
# Please edit the link below
pre_fill_link <- "https://docs.google.com/forms/d/e/1FAIpQLSdhjDPCpFasFN6YLBqUi5ekmK_2t2eqXTUL8xnNoWDgdI6c-w/viewform?usp=pp_url&entry.822553640"
# Do not edit the code below
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame(user = rep(log_$user, nrow_),
course_name = rep(log_$course_name, nrow_),
lesson_name = rep(log_$lesson_name, nrow_),
question_number = p(log_$question_number, nrow_, NA),
correct = p(log_$correct, nrow_, NA),
attempt = p(log_$attempt, nrow_, NA),
skipped = p(log_$skipped, nrow_, NA),
datetime = p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
browseURL(paste0(pre_fill_link, encoded_log))
}
|
# IODS final project
# Agata Dominowska
# 12.12.2017
# This file contains the R code for IODS 2017: final project
# The package used in this project comes from FactoMineR.
# The goal of the project is to perform Multiple Correspondence Analysis on the pre-processed data set.
# Accessing the necessary libraries.
library(FactoMineR)
library(dplyr)
# Importing the data set, having a look at it
data("hobbies")
glimpse(hobbies)
# There are 8403 observations and 23 variables in the data.
# The data will be explored in more detail on the final project website.
# Removing the data points without value attached - this way only complete cases are left in the data set.
complete_hobbies <- filter(hobbies, complete.cases(hobbies) == TRUE)
glimpse(complete_hobbies)
# The number of observations is now diminished to 6905.
# Renaming the fwo-factor levels, which are either 1 or 0, to 'yes' and 'no', in order to use them for labels later on.
# TV is the only one with more than 2 levels.
levels(complete_hobbies$Reading) <- c('No','Yes')
levels(complete_hobbies$`Listening music`) <- c('No','Yes')
levels(complete_hobbies$Cinema) <- c('No','Yes')
levels(complete_hobbies$Show) <- c('No','Yes')
levels(complete_hobbies$Exhibition) <- c('No','Yes')
levels(complete_hobbies$Computer) <- c('No','Yes')
levels(complete_hobbies$Sport) <- c('No','Yes')
levels(complete_hobbies$Walking) <- c('No','Yes')
levels(complete_hobbies$Travelling) <- c('No','Yes')
levels(complete_hobbies$`Playing music`) <- c('No','Yes')
levels(complete_hobbies$Collecting) <- c('No','Yes')
levels(complete_hobbies$Volunteering) <- c('No','Yes')
levels(complete_hobbies$Mechanic) <- c('No','Yes')
levels(complete_hobbies$Gardening) <- c('No','Yes')
levels(complete_hobbies$Knitting) <- c('No','Yes')
levels(complete_hobbies$Cooking) <- c('No','Yes')
levels(complete_hobbies$Fishing) <- c('No','Yes')
# The columns to be kept in the data. I am interested in the most of the variables, but I leave out music-related data
# (not very interesting from the point of view of my hypotheses) and the number of activities.
keep_columns <- c("Reading", "Cinema", "Show", "Exhibition", "Computer", "Sport", "Walking", "Travelling", "Collecting", "Volunteering",
"Mechanic", "Gardening", "Knitting", "Cooking", "Fishing", "Sex", "Age", "Marital status", "Profession")
complete_hobbies <-select(complete_hobbies, one_of(keep_columns))
summary(complete_hobbies)
# Saving the data set to a .csv file.
setwd("~\\GitHub\\IODS-final")
write.csv(complete_hobbies, file = "hobbies.csv", row.names=TRUE)
| /IODS-final-data-wrangling.R | no_license | aadomino/IODS-final | R | false | false | 2,577 | r | # IODS final project
# Agata Dominowska
# 12.12.2017
# This file contains the R code for IODS 2017: final project
# The package used in this project comes from FactoMineR.
# The goal of the project is to perform Multiple Correspondence Analysis on the pre-processed data set.
# Accessing the necessary libraries.
library(FactoMineR)
library(dplyr)
# Importing the data set, having a look at it
data("hobbies")
glimpse(hobbies)
# There are 8403 observations and 23 variables in the data.
# The data will be explored in more detail on the final project website.
# Removing the data points without value attached - this way only complete cases are left in the data set.
complete_hobbies <- filter(hobbies, complete.cases(hobbies) == TRUE)
glimpse(complete_hobbies)
# The number of observations is now diminished to 6905.
# Renaming the fwo-factor levels, which are either 1 or 0, to 'yes' and 'no', in order to use them for labels later on.
# TV is the only one with more than 2 levels.
levels(complete_hobbies$Reading) <- c('No','Yes')
levels(complete_hobbies$`Listening music`) <- c('No','Yes')
levels(complete_hobbies$Cinema) <- c('No','Yes')
levels(complete_hobbies$Show) <- c('No','Yes')
levels(complete_hobbies$Exhibition) <- c('No','Yes')
levels(complete_hobbies$Computer) <- c('No','Yes')
levels(complete_hobbies$Sport) <- c('No','Yes')
levels(complete_hobbies$Walking) <- c('No','Yes')
levels(complete_hobbies$Travelling) <- c('No','Yes')
levels(complete_hobbies$`Playing music`) <- c('No','Yes')
levels(complete_hobbies$Collecting) <- c('No','Yes')
levels(complete_hobbies$Volunteering) <- c('No','Yes')
levels(complete_hobbies$Mechanic) <- c('No','Yes')
levels(complete_hobbies$Gardening) <- c('No','Yes')
levels(complete_hobbies$Knitting) <- c('No','Yes')
levels(complete_hobbies$Cooking) <- c('No','Yes')
levels(complete_hobbies$Fishing) <- c('No','Yes')
# The columns to be kept in the data. I am interested in the most of the variables, but I leave out music-related data
# (not very interesting from the point of view of my hypotheses) and the number of activities.
keep_columns <- c("Reading", "Cinema", "Show", "Exhibition", "Computer", "Sport", "Walking", "Travelling", "Collecting", "Volunteering",
"Mechanic", "Gardening", "Knitting", "Cooking", "Fishing", "Sex", "Age", "Marital status", "Profession")
complete_hobbies <-select(complete_hobbies, one_of(keep_columns))
summary(complete_hobbies)
# Saving the data set to a .csv file.
setwd("~\\GitHub\\IODS-final")
write.csv(complete_hobbies, file = "hobbies.csv", row.names=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClusterAgnesSpearman.R
\name{ClusterAgnesSpearman}
\alias{ClusterAgnesSpearman}
\title{Cluster Agnes /Spearman}
\usage{
ClusterAgnesSpearman(expressionData)
}
\value{
[[data.frame]]
}
\description{
Cluster Agnes /Spearman
}
| /man/ClusterAgnesSpearman.Rd | no_license | roderickslieker/CONQUER | R | false | true | 302 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClusterAgnesSpearman.R
\name{ClusterAgnesSpearman}
\alias{ClusterAgnesSpearman}
\title{Cluster Agnes /Spearman}
\usage{
ClusterAgnesSpearman(expressionData)
}
\value{
[[data.frame]]
}
\description{
Cluster Agnes /Spearman
}
|
makeCacheMatrix <- function(x = matrix()) {
#Processes matrix object and
#sets list of functions to be passed to "cacheSolve" function
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of x
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
else{
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
return(m)
}
}
| /cachematrix.R | no_license | reche025/ProgrammingAssignment2 | R | false | false | 733 | r |
makeCacheMatrix <- function(x = matrix()) {
#Processes matrix object and
#sets list of functions to be passed to "cacheSolve" function
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of x
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
else{
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
return(m)
}
}
|
#"Дискретные предикторы в линейных моделях"
#' ## Глистогонные и рост коз
#'
#' Как связан прирост массы коз с начальным весом животного и интенсивностью профилактики паразитарных заболеваний?
#'
#'
#' - `Treatment` - обработка от глистов (стандартная, интенсивная)
#' - `Weightgain` - привес, кг
#' - `Initial.wt` - начальный вес, кг
#'Пример из библиотеки данных
#' http://www.statlab.uni-heidelberg.de/data/ancova/goats.story.html</div>
#'
#' ## Читаем данные и знакомимся с ними
library(readxl)
goat <- read_excel("data/goats.xlsx", sheet = 1)
head(goat)
str(goat)
colSums(is.na(goat))
# переименуем переменные для краткости
colnames(goat) <- c("Treatment", "Wt", "Init")
# объемы выборок
table(goat$Treatment)
goat$Treatment <- factor(goat$Treatment)
#' ## Есть ли выбросы?
library(ggplot2)
gg_dot <- ggplot(goat, aes(y = 1:nrow(goat))) + geom_point()
gg_dot + aes(x = Wt)
gg_dot + aes(x = Init)
##Строим модель#####
MG <- lm(Wt ~ Init + Treatment, data = goat)
#'
#' В этой модели мы молчаливо считаем, что характер связи прироста коз с начальным весом будет одинаковым (нет взаимодействия предикторов). Но! Это надо специально проверять (об этом далее)
#'
##Проверяем условия применимости #####
#' ## Нет ли колинеарности между начальным весом и тритментом
vif(MG)
ggplot(goat, aes(x = Treatment, y = Init)) + geom_boxplot()
MG_diag <- fortify(MG)
library(gridExtra)
grid.arrange(
ggplot(MG_diag, aes(x = 1:nrow(MG_diag), y = .cooksd)) +
geom_bar(stat = "identity"),
ggplot(data = MG_diag, aes(x = .fitted, y = .stdresid)) +
geom_point() + geom_hline(yintercept = 0),
ggplot(data = MG_diag, aes(x = Init, y = .stdresid)) +
geom_point() + geom_hline(yintercept = 0),
ggplot(data = MG_diag, aes(x = Treatment, y = .stdresid)) +
geom_boxplot(),
nrow = 1)
#' Все хорошо
#' ## Нормальнсть распределения остатков
library(car)
qqPlot(MG)
#' ## График модели
gg_g <- ggplot(data = goat, aes(y = Wt, x = Init, colour = Treatment)) +
geom_point() +
labs(x = "Начальный вес, кг",
y = "Привес, кг") +
scale_colour_discrete("Способ обработки",
breaks = c("intensive", "standard"),
labels = c("Интенсивный", "Стандартный"))
gg_g + geom_smooth(method = "lm")
#' ##Результаты #####
#'
summary(MG)
#'
#' ##Меняем базовый уровень
#'
#' Это чисто формальная процедура от которой ничего не измеяется по сути, но это иногда необходимо для более удобной визуализации
goat$Treatment <- relevel(goat$Treatment, ref = "standard")
levels(goat$Treatment)
MG1 <- lm(Wt ~ Init + Treatment, data = goat)
summary(MG1)
#'
library(car)
Anova(MG, type = 3)
#'
#' ## Влияет ли стаж работы на предприятиях, вырабатывающих кадмий, на жизненнй объем легких?
#'
#' Пример взят из книги:
#' P. Armitage and G. Berry (1987), Statistical Methods in Medical Research, 2nd ed., Blackwell, p.286.
#'
#' Данные представлены в пакете `ISwR`
#'
#' Переменные:
#'
#' `group` - Группа 1: Более 10 лет в отрасли; Группа 2 - менее 10 лет; Группа 3 - не подвергались воздействию.
#'
#' `age` - возраст
#'
#' `vital.capacity` - объем легких (л).
#'
## Загружаем данные #####
vit <- read.table("data/vitcap2.csv", header = TRUE, sep = ";")
#' ##Немного преобразуем исходный датасет
vit$Group [vit$group == 1] <- "Long exposed"
vit$Group [vit$group == 2] <- "Short exposed"
vit$Group [vit$group == 3] <- "Not exposed"
#' ## Меняем порядок уровней
vit$Group <- factor(vit$Group, levels = c("Not exposed", "Short exposed", "Long exposed"))
levels(vit$Group)
M1 <- lm(vital.capacity ~ Group, data = vit)
library(car)
Anova(M1, type = 3)
#' ## Геометрическая интерпретация модели с дискретным предиктором
#'
#' Это будет график, отражающий средние значения зависимой переменной, вычисленные для каждой градации дискретного фактора
#'
MyData <- data.frame(Group = levels(vit$Group))
MyData$Group <- factor(MyData$Group, levels = c("Not exposed", "Short exposed", "Long exposed"))
MyData$Predicted <- predict(M1, newdata = MyData, se.fit = TRUE)$fit
MyData$SE <- predict(M1, newdata = MyData, se.fit = TRUE)$se.fit
library(ggplot2)
ggplot(MyData, aes(x = Group, y = Predicted)) + geom_bar(stat = "identity", aes(fill = Group)) + geom_errorbar(aes(ymin = Predicted - SE, ymax = Predicted + SE), width = 0.2)
summary(M1)
#'
#' Куда делась одна градация фактора?
#'
#' >- В качестве базового уровня предиктора `Group` взято значение "Not exposed"
#'
#'
#'
#' ## Задание
#' 1. Измените базовый уровень переменной `Group` на "Long exposed"
#' 2. Постройте модель, аналогичную `M1`
#' 3. Вычислите предсказанные моделью значения для каждой градации фактора `Group`
#'
#'
#'
#' Можно ли доверять полученным результатам?
#'
M1_diag <- fortify(M1)
qplot(vit$age, M1_diag$.stdresid) + geom_smooth(method = "lm")
#'
#' Очевидный паттерн в остатках!
#'
#' Необходимо включать еще одну переменную - **`ковариату`**
#'
#' ## Analysis of covariance (ANCOVA)
#'
#' ###Меняем модель
M3 <- lm(vital.capacity ~ Group + age , data = vit)
#'
#' ##Диагностика модели
#'
M3_diag <- fortify(M3)
qplot(vit$age, M3_diag$.stdresid) + geom_smooth(method = "lm")
#'
#' Паттерн исчез!
#'
summary(M3)
anova(M3)
#'
#' Противоречие с результатами `summary()`!
#'
#' ## Поменяем порядок предикторов
anova(lm(formula = vital.capacity ~ age + Group, data = vit))
#' `
#' Результат тестирования зависит от порядка предикторов.
#' Почему?
#'
#'
#' ## Вариант 1. Последовательное тестирование (SS type I)
#'
#' Факторы тестируются в порядке включения в модель. Результат тестирования зависит от порядка включения.
anova(lm(formula = vital.capacity ~ Group + age, data = vit))
anova(lm(formula = vital.capacity ~ age + Group, data = vit))
#'
#' ## Вариант 2. Иерархическое тестирование (SS type III)
#'
#' Каждый из факторов по отношению к модели только без него, но со всеми остальными.
#' Нужно, если много факторов и выборки разного размера. Тогда результат не будет зависеть от порядка включения факторов в модель.
library(car)
Anova(M3, type = 3)
| /5.1_categorical-no_interactions_code.R | no_license | varmara/glmintro | R | false | false | 8,072 | r | #"Дискретные предикторы в линейных моделях"
#' ## Глистогонные и рост коз
#'
#' Как связан прирост массы коз с начальным весом животного и интенсивностью профилактики паразитарных заболеваний?
#'
#'
#' - `Treatment` - обработка от глистов (стандартная, интенсивная)
#' - `Weightgain` - привес, кг
#' - `Initial.wt` - начальный вес, кг
#'Пример из библиотеки данных
#' http://www.statlab.uni-heidelberg.de/data/ancova/goats.story.html</div>
#'
#' ## Читаем данные и знакомимся с ними
library(readxl)
goat <- read_excel("data/goats.xlsx", sheet = 1)
head(goat)
str(goat)
colSums(is.na(goat))
# переименуем переменные для краткости
colnames(goat) <- c("Treatment", "Wt", "Init")
# объемы выборок
table(goat$Treatment)
goat$Treatment <- factor(goat$Treatment)
#' ## Есть ли выбросы?
library(ggplot2)
gg_dot <- ggplot(goat, aes(y = 1:nrow(goat))) + geom_point()
gg_dot + aes(x = Wt)
gg_dot + aes(x = Init)
##Строим модель#####
MG <- lm(Wt ~ Init + Treatment, data = goat)
#'
#' В этой модели мы молчаливо считаем, что характер связи прироста коз с начальным весом будет одинаковым (нет взаимодействия предикторов). Но! Это надо специально проверять (об этом далее)
#'
##Проверяем условия применимости #####
#' ## Нет ли колинеарности между начальным весом и тритментом
vif(MG)
ggplot(goat, aes(x = Treatment, y = Init)) + geom_boxplot()
MG_diag <- fortify(MG)
library(gridExtra)
grid.arrange(
ggplot(MG_diag, aes(x = 1:nrow(MG_diag), y = .cooksd)) +
geom_bar(stat = "identity"),
ggplot(data = MG_diag, aes(x = .fitted, y = .stdresid)) +
geom_point() + geom_hline(yintercept = 0),
ggplot(data = MG_diag, aes(x = Init, y = .stdresid)) +
geom_point() + geom_hline(yintercept = 0),
ggplot(data = MG_diag, aes(x = Treatment, y = .stdresid)) +
geom_boxplot(),
nrow = 1)
#' Все хорошо
#' ## Нормальнсть распределения остатков
library(car)
qqPlot(MG)
#' ## График модели
gg_g <- ggplot(data = goat, aes(y = Wt, x = Init, colour = Treatment)) +
geom_point() +
labs(x = "Начальный вес, кг",
y = "Привес, кг") +
scale_colour_discrete("Способ обработки",
breaks = c("intensive", "standard"),
labels = c("Интенсивный", "Стандартный"))
gg_g + geom_smooth(method = "lm")
#' ##Результаты #####
#'
summary(MG)
#'
#' ##Меняем базовый уровень
#'
#' Это чисто формальная процедура от которой ничего не измеяется по сути, но это иногда необходимо для более удобной визуализации
goat$Treatment <- relevel(goat$Treatment, ref = "standard")
levels(goat$Treatment)
MG1 <- lm(Wt ~ Init + Treatment, data = goat)
summary(MG1)
#'
library(car)
Anova(MG, type = 3)
#'
#' ## Влияет ли стаж работы на предприятиях, вырабатывающих кадмий, на жизненнй объем легких?
#'
#' Пример взят из книги:
#' P. Armitage and G. Berry (1987), Statistical Methods in Medical Research, 2nd ed., Blackwell, p.286.
#'
#' Данные представлены в пакете `ISwR`
#'
#' Переменные:
#'
#' `group` - Группа 1: Более 10 лет в отрасли; Группа 2 - менее 10 лет; Группа 3 - не подвергались воздействию.
#'
#' `age` - возраст
#'
#' `vital.capacity` - объем легких (л).
#'
## Загружаем данные #####
vit <- read.table("data/vitcap2.csv", header = TRUE, sep = ";")
#' ##Немного преобразуем исходный датасет
vit$Group [vit$group == 1] <- "Long exposed"
vit$Group [vit$group == 2] <- "Short exposed"
vit$Group [vit$group == 3] <- "Not exposed"
#' ## Меняем порядок уровней
vit$Group <- factor(vit$Group, levels = c("Not exposed", "Short exposed", "Long exposed"))
levels(vit$Group)
M1 <- lm(vital.capacity ~ Group, data = vit)
library(car)
Anova(M1, type = 3)
#' ## Геометрическая интерпретация модели с дискретным предиктором
#'
#' Это будет график, отражающий средние значения зависимой переменной, вычисленные для каждой градации дискретного фактора
#'
MyData <- data.frame(Group = levels(vit$Group))
MyData$Group <- factor(MyData$Group, levels = c("Not exposed", "Short exposed", "Long exposed"))
MyData$Predicted <- predict(M1, newdata = MyData, se.fit = TRUE)$fit
MyData$SE <- predict(M1, newdata = MyData, se.fit = TRUE)$se.fit
library(ggplot2)
ggplot(MyData, aes(x = Group, y = Predicted)) + geom_bar(stat = "identity", aes(fill = Group)) + geom_errorbar(aes(ymin = Predicted - SE, ymax = Predicted + SE), width = 0.2)
summary(M1)
#'
#' Куда делась одна градация фактора?
#'
#' >- В качестве базового уровня предиктора `Group` взято значение "Not exposed"
#'
#'
#'
#' ## Задание
#' 1. Измените базовый уровень переменной `Group` на "Long exposed"
#' 2. Постройте модель, аналогичную `M1`
#' 3. Вычислите предсказанные моделью значения для каждой градации фактора `Group`
#'
#'
#'
#' Можно ли доверять полученным результатам?
#'
M1_diag <- fortify(M1)
qplot(vit$age, M1_diag$.stdresid) + geom_smooth(method = "lm")
#'
#' Очевидный паттерн в остатках!
#'
#' Необходимо включать еще одну переменную - **`ковариату`**
#'
#' ## Analysis of covariance (ANCOVA)
#'
#' ###Меняем модель
M3 <- lm(vital.capacity ~ Group + age , data = vit)
#'
#' ##Диагностика модели
#'
M3_diag <- fortify(M3)
qplot(vit$age, M3_diag$.stdresid) + geom_smooth(method = "lm")
#'
#' Паттерн исчез!
#'
summary(M3)
anova(M3)
#'
#' Противоречие с результатами `summary()`!
#'
#' ## Поменяем порядок предикторов
anova(lm(formula = vital.capacity ~ age + Group, data = vit))
#' `
#' Результат тестирования зависит от порядка предикторов.
#' Почему?
#'
#'
#' ## Вариант 1. Последовательное тестирование (SS type I)
#'
#' Факторы тестируются в порядке включения в модель. Результат тестирования зависит от порядка включения.
anova(lm(formula = vital.capacity ~ Group + age, data = vit))
anova(lm(formula = vital.capacity ~ age + Group, data = vit))
#'
#' ## Вариант 2. Иерархическое тестирование (SS type III)
#'
#' Каждый из факторов по отношению к модели только без него, но со всеми остальными.
#' Нужно, если много факторов и выборки разного размера. Тогда результат не будет зависеть от порядка включения факторов в модель.
library(car)
Anova(M3, type = 3)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/files.R
\name{GetDataDir}
\alias{GetDataDir}
\title{Find the local directory containing ISD-lite records}
\usage{
GetDataDir()
}
\value{
string - path of directory containing local ISD data.
}
\description{
There is no useable web version of ISD, so we need to work
with a local copy. Set the environment variable
ISD.dir on each system to identify this.
}
\details{
If the variable is not set - data won't be available.
}
| /man/GetDataDir.Rd | permissive | oldweather/ISDlite | R | false | false | 512 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/files.R
\name{GetDataDir}
\alias{GetDataDir}
\title{Find the local directory containing ISD-lite records}
\usage{
GetDataDir()
}
\value{
string - path of directory containing local ISD data.
}
\description{
There is no useable web version of ISD, so we need to work
with a local copy. Set the environment variable
ISD.dir on each system to identify this.
}
\details{
If the variable is not set - data won't be available.
}
|
#Dear Student,
#
#Welcome to the dataset for the homework exercise.
#
#Instructions for this dataset:
# You have only been supplied vectors. You will need
# to create the matrices yourself.
# Matrices:
# - FreeThrows
# - FreeThrowAttempts
#
#Sincerely,
#Kirill Eremenko
#www.superdatascience.com
#Copyright: These datasets were prepared using publicly available data.
# However, theses scripts are subject to Copyright Laws.
# If you wish to use these R scripts outside of the R Programming Course
# by Kirill Eremenko, you may do so by referencing www.superdatascience.com in your work.
#Comments:
#Seasons are labeled based on the first year in the season
#E.g. the 2012-2013 season is preseneted as simply 2012
#Notes and Corrections to the data:
#Kevin Durant: 2006 - College Data Used
#Kevin Durant: 2005 - Proxied With 2006 Data
#Derrick Rose: 2012 - Did Not Play
#Derrick Rose: 2007 - College Data Used
#Derrick Rose: 2006 - Proxied With 2007 Data
#Derrick Rose: 2005 - Proxied With 2007 Data
#Seasons
Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014")
#Players
Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade")
#Free Throws
KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196)
JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141)
LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375)
CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189)
DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143)
ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179)
ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289)
KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146)
DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152)
DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284)
#Matrix
FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
colnames(FreeThrows) <- Seasons
rownames(FreeThrows) <- Players
FreeThrows
#Free Throw Attempts
KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241)
JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176)
LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528)
CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237)
DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271)
ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232)
ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321)
KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171)
DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187)
DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370)
FreeThrowAttempts <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
colnames(FreeThrowAttempts) <- Seasons
rownames(FreeThrowAttempts) <- Players
FreeThrowAttempts
| /R Stuff/Section4-Homework-Data.R | no_license | brett-davi5/R-Studio-and-GGPlotMap | R | false | false | 3,169 | r | #Dear Student,
#
#Welcome to the dataset for the homework exercise.
#
#Instructions for this dataset:
# You have only been supplied vectors. You will need
# to create the matrices yourself.
# Matrices:
# - FreeThrows
# - FreeThrowAttempts
#
#Sincerely,
#Kirill Eremenko
#www.superdatascience.com
#Copyright: These datasets were prepared using publicly available data.
# However, theses scripts are subject to Copyright Laws.
# If you wish to use these R scripts outside of the R Programming Course
# by Kirill Eremenko, you may do so by referencing www.superdatascience.com in your work.
#Comments:
#Seasons are labeled based on the first year in the season
#E.g. the 2012-2013 season is preseneted as simply 2012
#Notes and Corrections to the data:
#Kevin Durant: 2006 - College Data Used
#Kevin Durant: 2005 - Proxied With 2006 Data
#Derrick Rose: 2012 - Did Not Play
#Derrick Rose: 2007 - College Data Used
#Derrick Rose: 2006 - Proxied With 2007 Data
#Derrick Rose: 2005 - Proxied With 2007 Data
#Seasons
Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014")
#Players
Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade")
#Free Throws
KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196)
JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141)
LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375)
CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189)
DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143)
ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179)
ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289)
KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146)
DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152)
DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284)
#Matrix
FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
colnames(FreeThrows) <- Seasons
rownames(FreeThrows) <- Players
FreeThrows
#Free Throw Attempts
KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241)
JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176)
LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528)
CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237)
DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271)
ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232)
ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321)
KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171)
DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187)
DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370)
FreeThrowAttempts <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
colnames(FreeThrowAttempts) <- Seasons
rownames(FreeThrowAttempts) <- Players
FreeThrowAttempts
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/editfile.R
\name{editfile}
\alias{editfile}
\title{Read edits edits from free-form textfile}
\usage{
editfile(file, type = c("all", "num", "cat", "mix"), ...)
}
\arguments{
\item{file}{name of text file to read in}
\item{type}{type of edits to extract. Currently, only 'num' (numerical), 'cat' (categorical) and 'all' are implemented.}
\item{...}{extra parameters that are currently ignored}
}
\value{
\code{\link{editset}} with all edits if \code{type=all}, \code{\link{editarray}} if \code{type='cat'},
\code{\link{editmatrix}} if \code{type='num'}, \code{\link{editset}} with conditional edits if \code{type='mix'}.
If the return value is a \code{list}, the elements are named \code{numedits} and \code{catedits}.
}
\description{
This utility function allows for free editrule definition in a file. One can extract
only the numerical (\code{type='num'}), only the categorical (\code{type='cat'}) or all
edits (default) in which case an \code{\link{editset}} is returned.
The function first parses all assignments in the file, so it is possible to compute or read
a list of categories defining a datamodel for example.
}
| /pkg/man/editfile.Rd | no_license | jenifferYingyiWu/editrules | R | false | false | 1,220 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/editfile.R
\name{editfile}
\alias{editfile}
\title{Read edits edits from free-form textfile}
\usage{
editfile(file, type = c("all", "num", "cat", "mix"), ...)
}
\arguments{
\item{file}{name of text file to read in}
\item{type}{type of edits to extract. Currently, only 'num' (numerical), 'cat' (categorical) and 'all' are implemented.}
\item{...}{extra parameters that are currently ignored}
}
\value{
\code{\link{editset}} with all edits if \code{type=all}, \code{\link{editarray}} if \code{type='cat'},
\code{\link{editmatrix}} if \code{type='num'}, \code{\link{editset}} with conditional edits if \code{type='mix'}.
If the return value is a \code{list}, the elements are named \code{numedits} and \code{catedits}.
}
\description{
This utility function allows for free editrule definition in a file. One can extract
only the numerical (\code{type='num'}), only the categorical (\code{type='cat'}) or all
edits (default) in which case an \code{\link{editset}} is returned.
The function first parses all assignments in the file, so it is possible to compute or read
a list of categories defining a datamodel for example.
}
|
fab_norm <- function(p) {
x <- c(1, 1, 2, 3, 5, 8)
(sum(abs(x)^p))^(1/p)
}
# fab_norm(c(1, 2)) # gives wrong result
# possible workaround
# c(fab_norm(1), fab_norm(2))
# map_dbl(c(1,2), fab_norm)
| /ch02-debug-profile/debug2.R | no_license | ucdavis-sta141c-2021-winter/sta141c-lectures | R | false | false | 204 | r | fab_norm <- function(p) {
x <- c(1, 1, 2, 3, 5, 8)
(sum(abs(x)^p))^(1/p)
}
# fab_norm(c(1, 2)) # gives wrong result
# possible workaround
# c(fab_norm(1), fab_norm(2))
# map_dbl(c(1,2), fab_norm)
|
singlepaths <- function(model, colors = NULL, equal.ranges = FALSE, subs = NULL){
coefs <- model$coefs
covar <- colnames(model$X)
acoefs <- model$acoefs
norm <- rowSums(abs(coefs%*%acoefs))
norm <- norm/max(norm)
m <- model$m
n.theta <- model$n.theta
labels <- model$labels
coefs <- model$coefs.repar
if(n.theta>0){
theta <- coefs[,1:n.theta,drop=FALSE]
}
intercepts <- coefs[,(n.theta+1):(n.theta+m)]
gamma <- coefs[,(n.theta+m+1):ncol(coefs)]
p <- ncol(gamma)/(m)
cols <- floor(sqrt(p))
rows <- ceiling((p)/cols)
layout(matrix(1:(rows*cols),nrow=rows,byrow=TRUE))
y.range <- range(gamma)
if(!is.null(model$deviances)){
x.axis.min <-norm[which.min(model$deviances)]
}
if(is.null(colors)){colors <- rep(1,m)}
index <- 1
for(i in 1:p){
if(!equal.ranges){y.range <- range(gamma[,index:(index+m-1)])}
plot(norm, gamma[,index],ylim=y.range,type="l",main=covar[i],ylab="",
xlab=expression(sum(sum(abs(beta["rj"]-beta["sj"]),"r<s"),"j")/max(sum(sum(abs(beta["rj"]-beta["sj"]),"r<s"),"j"))),
col=colors[1])
for(u in 1:(m-1)){
lines(norm,gamma[,index+u],col=colors[u+1])
}
axis(4,at=gamma[nrow(gamma),index:(index+m-1)],labels = labels, las=2)
mtext(subs[i],side=3,line=0.5,cex=par()$cex)
if(!is.null(model$deviances)){
abline(v=x.axis.min,lty=2,col=2)
}
index <- index+m
}
layout(1)
} | /BTLLasso/R/singlepaths.R | no_license | ingted/R-Examples | R | false | false | 1,508 | r | singlepaths <- function(model, colors = NULL, equal.ranges = FALSE, subs = NULL){
coefs <- model$coefs
covar <- colnames(model$X)
acoefs <- model$acoefs
norm <- rowSums(abs(coefs%*%acoefs))
norm <- norm/max(norm)
m <- model$m
n.theta <- model$n.theta
labels <- model$labels
coefs <- model$coefs.repar
if(n.theta>0){
theta <- coefs[,1:n.theta,drop=FALSE]
}
intercepts <- coefs[,(n.theta+1):(n.theta+m)]
gamma <- coefs[,(n.theta+m+1):ncol(coefs)]
p <- ncol(gamma)/(m)
cols <- floor(sqrt(p))
rows <- ceiling((p)/cols)
layout(matrix(1:(rows*cols),nrow=rows,byrow=TRUE))
y.range <- range(gamma)
if(!is.null(model$deviances)){
x.axis.min <-norm[which.min(model$deviances)]
}
if(is.null(colors)){colors <- rep(1,m)}
index <- 1
for(i in 1:p){
if(!equal.ranges){y.range <- range(gamma[,index:(index+m-1)])}
plot(norm, gamma[,index],ylim=y.range,type="l",main=covar[i],ylab="",
xlab=expression(sum(sum(abs(beta["rj"]-beta["sj"]),"r<s"),"j")/max(sum(sum(abs(beta["rj"]-beta["sj"]),"r<s"),"j"))),
col=colors[1])
for(u in 1:(m-1)){
lines(norm,gamma[,index+u],col=colors[u+1])
}
axis(4,at=gamma[nrow(gamma),index:(index+m-1)],labels = labels, las=2)
mtext(subs[i],side=3,line=0.5,cex=par()$cex)
if(!is.null(model$deviances)){
abline(v=x.axis.min,lty=2,col=2)
}
index <- index+m
}
layout(1)
} |
#' Get pages by tags
#'
#' @noRd
get_tags <- function(pages){
get_pages_by_groups(pages, gby = 'tags')
}
#' Get pages by categoris
#'
#' @noRd
get_categories <- function(pages){
get_pages_by_groups(pages, gby = 'categories')
}
#' Get pages by group
#'
#' @noRd
get_pages_by_groups <- function(pages, gby = 'tags'){
get_pages_by_group = function(g){
p = pages[sapply(pages, function(page) g %in% page[[gby]])]
p = lapply(p, '[', c('title', 'file', 'date', 'link'))
list(pages = p, name = g, count = length(p))
}
get_all_groups <- function(){
g = lapply(pages, '[[', gby)
Reduce('union', g)
}
x = lapply(get_all_groups(), get_pages_by_group)
y = setNames(x, lapply(x, pluck('name')))
y$all = x
y
}
| /R/bloghelpers.R | no_license | fusuma/slidify | R | false | false | 741 | r | #' Get pages by tags
#'
#' @noRd
get_tags <- function(pages){
get_pages_by_groups(pages, gby = 'tags')
}
#' Get pages by categoris
#'
#' @noRd
get_categories <- function(pages){
get_pages_by_groups(pages, gby = 'categories')
}
#' Get pages by group
#'
#' @noRd
get_pages_by_groups <- function(pages, gby = 'tags'){
get_pages_by_group = function(g){
p = pages[sapply(pages, function(page) g %in% page[[gby]])]
p = lapply(p, '[', c('title', 'file', 'date', 'link'))
list(pages = p, name = g, count = length(p))
}
get_all_groups <- function(){
g = lapply(pages, '[[', gby)
Reduce('union', g)
}
x = lapply(get_all_groups(), get_pages_by_group)
y = setNames(x, lapply(x, pluck('name')))
y$all = x
y
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modules-sc-utils.R
\name{load_scseq_qs}
\alias{load_scseq_qs}
\title{Load SingleCellExperiment from qs file}
\usage{
load_scseq_qs(
dataset_dir,
meta = NULL,
groups = NULL,
with_logs = FALSE,
with_counts = FALSE
)
}
\arguments{
\item{dataset_dir}{Path to folder with scseq.qs file}
}
\value{
SingleCellExperiment
}
\description{
Also attaches clusters from last applied leiden resolution and stores resolution.
}
\keyword{internal}
| /man/load_scseq_qs.Rd | permissive | saisaitian/dseqr | R | false | true | 520 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modules-sc-utils.R
\name{load_scseq_qs}
\alias{load_scseq_qs}
\title{Load SingleCellExperiment from qs file}
\usage{
load_scseq_qs(
dataset_dir,
meta = NULL,
groups = NULL,
with_logs = FALSE,
with_counts = FALSE
)
}
\arguments{
\item{dataset_dir}{Path to folder with scseq.qs file}
}
\value{
SingleCellExperiment
}
\description{
Also attaches clusters from last applied leiden resolution and stores resolution.
}
\keyword{internal}
|
# Import
# dat_long <- readxl::read_excel("personal_tt.xlsx")
clean_data <- function(dat) {
dat_long <-
dat %>%
mutate_all(~ as.character(.)) %>%
select(-starts_with("WEEK")) %>%
pivot_longer(
col = 3:ncol(.),
names_to = "date_ins",
values_to = "value"
) %>%
mutate(date_ins = mdy(date_ins)) %>%
filter(!is.na(date_ins), !is.na(activity)) %>%
select(-unit) %>%
pivot_wider(names_from = activity, values_from = value) %>%
mutate_at(
c("get_up", "bed_time"),
~ as.POSIXct(as.numeric(.) * 60 * 60 * 24, origin = "1899-12-30", tz = "GMT")
) %>%
mutate_at(
vars(
-get_up,
-bed_time,
-date_ins,
-parents,
-partner,
-brother,
-family_others,
-friends
),
as.numeric) %>%
clean_names() %>%
mutate(
sleep_quality = factor(sleep_quality, levels = 0:5),
month_year_ins = floor_date(date_ins, "month"),
week_year_ins = floor_date(date_ins, "week", week_start = 1),
day_ins = lubridate::wday(date_ins, label = TRUE, week_start = 1)
)
}
| /o_functions/tidy_data.R | no_license | c1au6i0/time_racoon | R | false | false | 1,123 | r | # Import
# dat_long <- readxl::read_excel("personal_tt.xlsx")
clean_data <- function(dat) {
dat_long <-
dat %>%
mutate_all(~ as.character(.)) %>%
select(-starts_with("WEEK")) %>%
pivot_longer(
col = 3:ncol(.),
names_to = "date_ins",
values_to = "value"
) %>%
mutate(date_ins = mdy(date_ins)) %>%
filter(!is.na(date_ins), !is.na(activity)) %>%
select(-unit) %>%
pivot_wider(names_from = activity, values_from = value) %>%
mutate_at(
c("get_up", "bed_time"),
~ as.POSIXct(as.numeric(.) * 60 * 60 * 24, origin = "1899-12-30", tz = "GMT")
) %>%
mutate_at(
vars(
-get_up,
-bed_time,
-date_ins,
-parents,
-partner,
-brother,
-family_others,
-friends
),
as.numeric) %>%
clean_names() %>%
mutate(
sleep_quality = factor(sleep_quality, levels = 0:5),
month_year_ins = floor_date(date_ins, "month"),
week_year_ins = floor_date(date_ins, "week", week_start = 1),
day_ins = lubridate::wday(date_ins, label = TRUE, week_start = 1)
)
}
|
library(tidyverse)
#script to call in cQ slopes for full record, baseflow only, individual stormflow events, and bulk stormflow events.
#all were created in Code/cQ_Slope_Calculations.R
all_full <- read_rds("Data/cQ_slopes/fullRecord.rds")
all_baseflow <- read_rds("Data/cQ_slopes/baseflow.rds")
all_individual_events <- read_rds("Data/cQ_slopes/individualEvents.rds")
all_bulkstorm <- read_rds("Data/cQ_slopes/bulkStormflow.rds")
| /Data/cQ_slopes/call_cQslope_datasets.R | no_license | LinneaRock/MendotaWatershed_SalinityRegimes | R | false | false | 433 | r | library(tidyverse)
#script to call in cQ slopes for full record, baseflow only, individual stormflow events, and bulk stormflow events.
#all were created in Code/cQ_Slope_Calculations.R
all_full <- read_rds("Data/cQ_slopes/fullRecord.rds")
all_baseflow <- read_rds("Data/cQ_slopes/baseflow.rds")
all_individual_events <- read_rds("Data/cQ_slopes/individualEvents.rds")
all_bulkstorm <- read_rds("Data/cQ_slopes/bulkStormflow.rds")
|
#' @title Write Sequoia Output to File
#'
#' @description The various list elements returned by \code{sequoia} are each
#' written to text files in the specified folder, or to separate sheets in a
#' single excel file (requires library \pkg{openxlsx}).
#'
#' @details The text files can be used as input for the stand-alone Fortran
#' version of sequoia, e.g. when the genotype data is too large for R. See
#' \code{vignette('sequoia')} for further details.
#'
#' @param SeqList list returned by \code{\link{sequoia}}, to be written out.
#' @param GenoM matrix with genetic data (optional). Ignored if
#' OutFormat='xls', as the resulting file could become too large for excel.
#' @param MaybeRel list with results from \code{\link{GetMaybeRel}} (optional).
#' @param PedComp list with results from \code{\link{PedCompare}} (optional).
#' \code{SeqList$DummyIDs} is combined with \code{PedComp$DummyMatch} if both
#' are provided.
#' @param OutFormat 'xls' or 'txt'.
#' @param folder the directory where the text files will be written; will be
#' created if it does not already exists. Relative to the current working
#' directory, or NULL for current working directory. Ignored if
#' \code{OutFormat='xls'}.
#' @param file the name of the excel file to write to, ignored if
#' \code{OutFormat='txt'}.
#' @param ForVersion choose '1' for back-compatibility with stand-alone sequoia
#' versions 1.x
#' @param quiet suppress messages.
#'
#' @seealso \code{\link{writeColumns}} to write to a text file, using white
#' space padding to keep columns aligned.
#'
#' @examples
#' \dontrun{
#' writeSeq(SeqList, OutFormat="xls", file="MyFile.xlsx")
#'
#' # add additional sheet to the excel file:
#' library(openxlsx)
#' wb <- loadWorkbook("MyFile.xlsx")
#' addWorksheet(wb, sheetName = "ExtraData")
#' writeData(wb, sheet = "ExtraData", MyData, rowNames=FALSE)
#' saveWorkbook(wb, "MyFile.xlsx", overwrite=TRUE, returnValue=TRUE)
#'
#' # or: (package requires java & is trickier to install)
#' xlsx::write.xlsx(MyData, file = "MyFile.xlsx", sheetName="ExtraData",
#' col.names=TRUE, row.names=FALSE, append=TRUE, showNA=FALSE)
#' }
#'
#' @export
writeSeq <- function(SeqList,
GenoM = NULL,
MaybeRel = NULL,
PedComp = NULL,
OutFormat = "txt",
folder = "Sequoia-OUT",
file = "Sequoia-OUT.xlsx",
ForVersion = 2,
quiet = FALSE) {
if (!OutFormat %in% c("xls", "xlsx", "txt")) stop("Invalid OutFormat")
if (!inherits(SeqList, 'list')) stop("SeqList should be a list")
if (!is.null(MaybeRel)) {
for (maybe in c("MaybeRel", "MaybeParent", "MaybeTrio")) {
if (maybe %in% names(MaybeRel)) {
SeqList[[maybe]] <- MaybeRel[[maybe]]
}
}
}
if (!is.null(GenoM)) {
if (!is.matrix(GenoM) & !is.data.frame(GenoM)) stop("GenoM should be a matrix")
if ("PedigreePar" %in% names(SeqList)) {
if (!all(rownames(GenoM) %in% SeqList$PedigreePar$id)) {
stop("Not all ids in 'GenoM' occur in SeqList$PedigreePar")
} else if (!all(rownames(GenoM) == SeqList$PedigreePar$id)) {
stop("rownames of 'GenoM' do not match order of ids in SeqList$PedigreePar")
}
}
if ("LifeHist" %in% names(SeqList)) {
if (length(intersect(SeqList[["LifeHist"]]$ID, rownames(GenoM))) == 0) {
stop("rownames of 'GenoM' shares no common IDs with 'LifeHist' in SeqList")
}
}
if(nrow(GenoM)!= SeqList$Specs$NumberIndivGenotyped) {
ANS <- readline(prompt = paste("Number of individuals according to Specs differs",
" from number of rows in GenoM (", SeqList$Specs$NumberIndivGenotyped, "/", nrow(GenoM),
").\n Press Y to continue and fix manually in `SequoiaSpecs.txt' "))
if (!substr(ANS, 1, 1) %in% c("Y", "y")) {
stop(call.=FALSE)
}
}
if(ncol(GenoM)!= SeqList$Specs$NumberSnps) {
stop(paste("Number of SNPs according to Specs differs from number of rows in GenoM (",
SeqList$Specs$NumberSnps, "vs", ncol(GenoM), ")"))
}
} else if (OutFormat == "txt") {
warning("No GenoM specified")
}
# write excel file ----
if (OutFormat == "xlsx") OutFormat <- "xls"
if (grepl('xls', file) & !grepl('Sequoia-OUT', file)) OutFormat <- "xls"
if (OutFormat == "xls") {
if (!requireNamespace("openxlsx", quietly = TRUE)) {
if (interactive() & !quiet) {
ANS <- readline(prompt = paste("library 'openxlsx' not found. Install Y/N? "))
if (!substr(ANS, 1, 1) %in% c("Y", "y")) stop(call.=FALSE)
}
utils::install.packages("openxlsx")
}
write.seq.xls(SeqList, file=file, PedComp=PedComp, quiet=quiet)
# txt: check if folder & files exist ----
} else if (OutFormat == "txt") {
curdir <- getwd()
if (is.null(folder)) folder = curdir
dir.create(folder, showWarnings = FALSE)
setwd(folder)
if (any(file.exists("Geno.txt", "Specs.txt", "AgePriors.txt", "Parents.txt",
"Readme.txt")) &
interactive() & !quiet) {
ANS <- readline(prompt = paste("Writing data to '", folder,
"' will overwrite existing file(s). Continue Y/N? "))
if (!substr(ANS, 1, 1) %in% c("Y", "y", "J", "j", "")) {
setwd(curdir)
stop(call.=FALSE)
}
}
# prep specs ----
SpecsOUT <- cbind("Genofile" = "Geno.txt",
"LHfile" = "LifeHist.txt",
"nIndLH" = nrow(SeqList[["LifeHist"]]),
SeqList$Specs)
SpecsOUT$Complexity <- c("mono"=0, "simp"=1, "full"=2)[SpecsOUT$Complexity]
SpecsOUT$Herm <- c("no"=0, "A"=1, "B"=2)[SpecsOUT$Herm]
SpecsOUT$UseAge <- c("extra"=2, "yes"=1, "no"=0)[SpecsOUT$UseAge]
if ('FindMaybeRel' %in% names(SpecsOUT)) { # dropped from version 2.4
SpecsOUT$FindMaybeRel <- as.numeric(SpecsOUT$FindMaybeRel)
}
SpecsOUT$CalcLLR <- as.numeric(SpecsOUT$CalcLLR)
for (x in c("SequoiaVersion", "TimeStart", "TimeEnd")) {
if (!x %in% names(SpecsOUT)) next # if SeqList from version 1.x
SpecsOUT[[x]] <- as.character(SpecsOUT[[x]])
}
if (ForVersion == 1) {
SpecsOUT <- SpecsOUT[!names(SpecsOUT) %in% c("MaxMismatchOH", "MaxMismatchME")]
if (SpecsOUT$MaxSibIter <= 0) SpecsOUT$MaxSibIter <- 10
} else {
SpecsOUT$MaxSibIter <- 42
}
SpecsOUT <- SpecsOUT[!names(SpecsOUT) %in% "Module"] # run-time option for stand-alone sequoia.
# write text files ----
OPT <- options()
options(scipen = 10)
write(paste("These files were created by R package sequoia on ", date()),
file="README.txt")
if (!is.null(GenoM)) utils::write.table(GenoM, file="Geno.txt",
quote=FALSE, row.names=TRUE, col.names=FALSE)
utils::write.table(as.data.frame(t(SpecsOUT)), file="SequoiaSpecs.txt",
sep = "\t,\t", quote = FALSE, col.names = FALSE)
writeColumns(SeqList$AgePriors, "AgePriors.txt", row.names=FALSE)
if (ncol(SeqList[["LifeHist"]])>6) {
writeColumns(SeqList[["LifeHist"]][,1:6], "LifeHist.txt", row.names=FALSE)
} else {
writeColumns(SeqList[["LifeHist"]], "LifeHist.txt", row.names=FALSE)
}
if ("PedigreePar" %in% names(SeqList)) {
write.parents(ParentDF = SeqList$PedigreePar, LifeHistData = SeqList[["LifeHist"]],
GenoM = GenoM)
# compatable to be read in by stand-alone sequoia
}
if ("Pedigree" %in% names(SeqList)) {
writeColumns(SeqList[["Pedigree"]], "Pedigree.txt", row.names=FALSE)
}
if ("DummyIDs" %in% names(SeqList)) {
if ("DummyMatch" %in% names(PedComp)) {
Dummies <- merge(PedComp$DummyMatch, SeqList$DummyIDs)
} else {
Dummies <- SeqList$DummyIDs
}
writeColumns(Dummies, "DummyIDs.txt", row.names=FALSE)
}
if ("MaybeRel" %in% names(SeqList)) {
MaybePairs <- SeqList$MaybeRel
} else if ("MaybeParent" %in% names(SeqList)) {
MaybePairs <- SeqList$MaybeParent
} else {
MaybePairs <- NULL
}
if (!is.null(MaybePairs)) {
writeColumns(MaybePairs, "MaybePairs.txt", row.names=FALSE)
}
if (!is.null(SeqList$MaybeTrio)) {
writeColumns(SeqList$MaybeTrio, "MaybeTrios.txt", row.names=FALSE)
}
if (!is.null(PedComp)) {
Counts.out <- cbind(Parent = rep(c("dam", "sire"), each=7),
Cat = rep(dimnames(PedComp$Counts)[[1]], 2))
Counts.out <- cbind(Counts.out, rbind(PedComp$Counts[,,"dam"],
PedComp$Counts[,,"sire"]))
writeColumns(Counts.out, "PedCompare-Counts.txt", row.names=FALSE)
writeColumns(PedComp$MergedPed, "PedCompare-MergedPed.txt",
row.names=FALSE)
}
options(OPT)
setwd(curdir)
if(!quiet) message(paste("Output written to", normalizePath(folder, winslash="/")))
} else {
stop("OutFormat not supported.")
}
}
#==========================================================================
write.parents <- function(ParentDF, LifeHistData, GenoM=NULL, file="Parents.txt") {
names(ParentDF)[1:3] <- c("id", "dam", "sire")
names(LifeHistData) <- c("ID", "Sex", "BirthYear")
if (!is.null(GenoM)) {
gID <- rownames(GenoM)
} else {
gID <- ParentDF$id
}
Par <- MergeFill(ParentDF,
data.frame(id = gID,
LLRdam = NA, LLRsire = NA, LLRpair = NA,
OHdam = NA, OHsire = NA, MEpair = NA,
rowid = NA, rowdam = NA, rowsire = NA,
stringsAsFactors=FALSE),
by = "id", all = TRUE)
rownames(Par) <- as.character(Par$id)
Par <- Par[gID, ] # merge ignores sort=FALSE
for (x in c("dam", "sire")) Par[is.na(Par[, x]), x] <- "NA"
for (x in c("LLRdam", "LLRsire", "LLRpair")) {
Par[is.na(Par[, x]), x] <- 999
}
for (x in c("OHdam", "OHsire", "MEpair")) {
Par[is.na(Par[, x]), x] <- -9
}
for (x in c("id", "dam", "sire")) {
Par[, paste0("row", x)] <- as.numeric(factor(Par[,x], levels=gID))
Par[is.na(Par[, paste0("row", x)]), paste0("row", x)] <- 0
}
if (any(is.na(Par$id))) {
if (!is.null(GenoM)) {
stop("Some id's in PedigreePar do not occur in GenoM!")
} else {
stop("Some id's in PedigreePar are <NA>")
}
}
writeColumns(Par, file = file, row.names=FALSE)
}
#==========================================================================
write.seq.xls <- function(SeqList, file, PedComp=NULL, quiet) {
if (!requireNamespace("openxlsx", quietly = TRUE)) {
stop("library 'openxlsx' not found")
}
# check if file exists
if (file.exists(file) & interactive() & !quiet) {
ANS <- readline(prompt = paste("Writing data to '", file,
"' will overwrite existing file. Continue Y/N? "))
if (!substr(ANS, 1, 1) %in% c("Y", "y")) stop(call.=FALSE)
}
out_list <- list() # do in wb straight away (?)
if ("Pedigree" %in% names(SeqList)) {
out_list[['Pedigree']] <- SeqList[["Pedigree"]]
}
if ('PedigreePar' %in% names(SeqList)) {
out_list[['Parents']] <- SeqList[['PedigreePar']]
}
if (!any(c("Pedigree", "PedigreePar") %in% names(SeqList))) {
out_list[['Parents']] <- 'None'
}
out_list[['Run parameters']] <- t(SeqList$Specs)
Names <- c("LifeHist", "DupGenotype", "DupLifeHistID",
"AgePriors", "DummyIDs", "TotLikPar", "TotLikSib",
"MaybeRel", "MaybeParent", "MaybeTrio")
for (x in Names) {
if (!x %in% names(SeqList)) next
if (is.null(dim(SeqList[[x]]))) next
if (nrow(SeqList[[x]])==0) next
if (x == "DummyIDs") {
if ("DummyMatch" %in% names(PedComp)) {
SeqList$DummyIDs <- merge(PedComp$DummyMatch, SeqList$DummyIDs)
}
}
out_list[[x]] <- SeqList[[x]]
}
if (!is.null(PedComp)) { # pedcompare output
Counts.out <- cbind(Parent = rep(c("dam", "sire"), each=7),
Cat = rep(dimnames(PedComp$Counts)[[1]], 2))
Counts.out <- cbind(Counts.out, rbind(PedComp$Counts[,,"dam"],
PedComp$Counts[,,"sire"]))
rownames(Counts.out) <- 1:nrow(Counts.out) # not used, but otherwise warning
out_list[['PedCompare-Counts']] <- Counts.out
out_list[['PedCompare-MergedPed']] <- PedComp$MergedPed
}
ShowRowName <- rep(FALSE, length(out_list))
names(ShowRowName) <- names(out_list)
ShowRowName[c('Run parameters', 'AgePriors')] <- TRUE
# openxlsx::write.xlsx does not give error if xlsx exists & is open
# --> turn into workbook & use saveWorkbook instead
wb <- openxlsx::buildWorkbook(out_list,
colNames = TRUE, na.string = '',
rowNames = ShowRowName,
headerStyle = openxlsx::createStyle(textDecoration = 'bold'))
WriteSuccess <- openxlsx::saveWorkbook(wb, file,
overwrite=TRUE, # overwrite warning generated by writeSeq()
returnValue=TRUE)
if (isTRUE(WriteSuccess)) {
if(!quiet) message(paste("Output written to", file))
} else {
stop(WriteSuccess$message, call.=FALSE)
}
}
#======================================================================
#' @title Write Data to a File Column-wise
#'
#' @description Write data.frame or matrix to a text file, using white
#' space padding to keep columns aligned as in \code{print}.
#'
#' @param x the object to be written, preferably a matrix or data frame.
#' If not, it is attempted to coerce x to a matrix.
#' @param file a character string naming a file.
#' @param row.names a logical value indicating whether the row names of x are
#' to be written along with x.
#' @param col.names a logical value indicating whether the column names of x
#' are to be written along with x.
#'
#' @export
writeColumns <- function(x, file="", row.names=TRUE,
col.names=TRUE) {
M <- as.matrix(x)
if(col.names) M <- rbind(colnames(x), M)
if (row.names) {
if (col.names) M <- cbind(c("", rownames(x)), M)
if (!col.names) M <- cbind(rownames(x), M)
}
write(t(format(M)), ncolumns = ncol(M), file = file)
}
| /R/WriteToFiles.R | no_license | JiscaH/sequoia | R | false | false | 14,176 | r | #' @title Write Sequoia Output to File
#'
#' @description The various list elements returned by \code{sequoia} are each
#' written to text files in the specified folder, or to separate sheets in a
#' single excel file (requires library \pkg{openxlsx}).
#'
#' @details The text files can be used as input for the stand-alone Fortran
#' version of sequoia, e.g. when the genotype data is too large for R. See
#' \code{vignette('sequoia')} for further details.
#'
#' @param SeqList list returned by \code{\link{sequoia}}, to be written out.
#' @param GenoM matrix with genetic data (optional). Ignored if
#' OutFormat='xls', as the resulting file could become too large for excel.
#' @param MaybeRel list with results from \code{\link{GetMaybeRel}} (optional).
#' @param PedComp list with results from \code{\link{PedCompare}} (optional).
#' \code{SeqList$DummyIDs} is combined with \code{PedComp$DummyMatch} if both
#' are provided.
#' @param OutFormat 'xls' or 'txt'.
#' @param folder the directory where the text files will be written; will be
#' created if it does not already exists. Relative to the current working
#' directory, or NULL for current working directory. Ignored if
#' \code{OutFormat='xls'}.
#' @param file the name of the excel file to write to, ignored if
#' \code{OutFormat='txt'}.
#' @param ForVersion choose '1' for back-compatibility with stand-alone sequoia
#' versions 1.x
#' @param quiet suppress messages.
#'
#' @seealso \code{\link{writeColumns}} to write to a text file, using white
#' space padding to keep columns aligned.
#'
#' @examples
#' \dontrun{
#' writeSeq(SeqList, OutFormat="xls", file="MyFile.xlsx")
#'
#' # add additional sheet to the excel file:
#' library(openxlsx)
#' wb <- loadWorkbook("MyFile.xlsx")
#' addWorksheet(wb, sheetName = "ExtraData")
#' writeData(wb, sheet = "ExtraData", MyData, rowNames=FALSE)
#' saveWorkbook(wb, "MyFile.xlsx", overwrite=TRUE, returnValue=TRUE)
#'
#' # or: (package requires java & is trickier to install)
#' xlsx::write.xlsx(MyData, file = "MyFile.xlsx", sheetName="ExtraData",
#' col.names=TRUE, row.names=FALSE, append=TRUE, showNA=FALSE)
#' }
#'
#' @export
writeSeq <- function(SeqList,
GenoM = NULL,
MaybeRel = NULL,
PedComp = NULL,
OutFormat = "txt",
folder = "Sequoia-OUT",
file = "Sequoia-OUT.xlsx",
ForVersion = 2,
quiet = FALSE) {
if (!OutFormat %in% c("xls", "xlsx", "txt")) stop("Invalid OutFormat")
if (!inherits(SeqList, 'list')) stop("SeqList should be a list")
if (!is.null(MaybeRel)) {
for (maybe in c("MaybeRel", "MaybeParent", "MaybeTrio")) {
if (maybe %in% names(MaybeRel)) {
SeqList[[maybe]] <- MaybeRel[[maybe]]
}
}
}
if (!is.null(GenoM)) {
if (!is.matrix(GenoM) & !is.data.frame(GenoM)) stop("GenoM should be a matrix")
if ("PedigreePar" %in% names(SeqList)) {
if (!all(rownames(GenoM) %in% SeqList$PedigreePar$id)) {
stop("Not all ids in 'GenoM' occur in SeqList$PedigreePar")
} else if (!all(rownames(GenoM) == SeqList$PedigreePar$id)) {
stop("rownames of 'GenoM' do not match order of ids in SeqList$PedigreePar")
}
}
if ("LifeHist" %in% names(SeqList)) {
if (length(intersect(SeqList[["LifeHist"]]$ID, rownames(GenoM))) == 0) {
stop("rownames of 'GenoM' shares no common IDs with 'LifeHist' in SeqList")
}
}
if(nrow(GenoM)!= SeqList$Specs$NumberIndivGenotyped) {
ANS <- readline(prompt = paste("Number of individuals according to Specs differs",
" from number of rows in GenoM (", SeqList$Specs$NumberIndivGenotyped, "/", nrow(GenoM),
").\n Press Y to continue and fix manually in `SequoiaSpecs.txt' "))
if (!substr(ANS, 1, 1) %in% c("Y", "y")) {
stop(call.=FALSE)
}
}
if(ncol(GenoM)!= SeqList$Specs$NumberSnps) {
stop(paste("Number of SNPs according to Specs differs from number of rows in GenoM (",
SeqList$Specs$NumberSnps, "vs", ncol(GenoM), ")"))
}
} else if (OutFormat == "txt") {
warning("No GenoM specified")
}
# write excel file ----
if (OutFormat == "xlsx") OutFormat <- "xls"
if (grepl('xls', file) & !grepl('Sequoia-OUT', file)) OutFormat <- "xls"
if (OutFormat == "xls") {
if (!requireNamespace("openxlsx", quietly = TRUE)) {
if (interactive() & !quiet) {
ANS <- readline(prompt = paste("library 'openxlsx' not found. Install Y/N? "))
if (!substr(ANS, 1, 1) %in% c("Y", "y")) stop(call.=FALSE)
}
utils::install.packages("openxlsx")
}
write.seq.xls(SeqList, file=file, PedComp=PedComp, quiet=quiet)
# txt: check if folder & files exist ----
} else if (OutFormat == "txt") {
curdir <- getwd()
if (is.null(folder)) folder = curdir
dir.create(folder, showWarnings = FALSE)
setwd(folder)
if (any(file.exists("Geno.txt", "Specs.txt", "AgePriors.txt", "Parents.txt",
"Readme.txt")) &
interactive() & !quiet) {
ANS <- readline(prompt = paste("Writing data to '", folder,
"' will overwrite existing file(s). Continue Y/N? "))
if (!substr(ANS, 1, 1) %in% c("Y", "y", "J", "j", "")) {
setwd(curdir)
stop(call.=FALSE)
}
}
# prep specs ----
SpecsOUT <- cbind("Genofile" = "Geno.txt",
"LHfile" = "LifeHist.txt",
"nIndLH" = nrow(SeqList[["LifeHist"]]),
SeqList$Specs)
SpecsOUT$Complexity <- c("mono"=0, "simp"=1, "full"=2)[SpecsOUT$Complexity]
SpecsOUT$Herm <- c("no"=0, "A"=1, "B"=2)[SpecsOUT$Herm]
SpecsOUT$UseAge <- c("extra"=2, "yes"=1, "no"=0)[SpecsOUT$UseAge]
if ('FindMaybeRel' %in% names(SpecsOUT)) { # dropped from version 2.4
SpecsOUT$FindMaybeRel <- as.numeric(SpecsOUT$FindMaybeRel)
}
SpecsOUT$CalcLLR <- as.numeric(SpecsOUT$CalcLLR)
for (x in c("SequoiaVersion", "TimeStart", "TimeEnd")) {
if (!x %in% names(SpecsOUT)) next # if SeqList from version 1.x
SpecsOUT[[x]] <- as.character(SpecsOUT[[x]])
}
if (ForVersion == 1) {
SpecsOUT <- SpecsOUT[!names(SpecsOUT) %in% c("MaxMismatchOH", "MaxMismatchME")]
if (SpecsOUT$MaxSibIter <= 0) SpecsOUT$MaxSibIter <- 10
} else {
SpecsOUT$MaxSibIter <- 42
}
SpecsOUT <- SpecsOUT[!names(SpecsOUT) %in% "Module"] # run-time option for stand-alone sequoia.
# write text files ----
OPT <- options()
options(scipen = 10)
write(paste("These files were created by R package sequoia on ", date()),
file="README.txt")
if (!is.null(GenoM)) utils::write.table(GenoM, file="Geno.txt",
quote=FALSE, row.names=TRUE, col.names=FALSE)
utils::write.table(as.data.frame(t(SpecsOUT)), file="SequoiaSpecs.txt",
sep = "\t,\t", quote = FALSE, col.names = FALSE)
writeColumns(SeqList$AgePriors, "AgePriors.txt", row.names=FALSE)
if (ncol(SeqList[["LifeHist"]])>6) {
writeColumns(SeqList[["LifeHist"]][,1:6], "LifeHist.txt", row.names=FALSE)
} else {
writeColumns(SeqList[["LifeHist"]], "LifeHist.txt", row.names=FALSE)
}
if ("PedigreePar" %in% names(SeqList)) {
write.parents(ParentDF = SeqList$PedigreePar, LifeHistData = SeqList[["LifeHist"]],
GenoM = GenoM)
# compatable to be read in by stand-alone sequoia
}
if ("Pedigree" %in% names(SeqList)) {
writeColumns(SeqList[["Pedigree"]], "Pedigree.txt", row.names=FALSE)
}
if ("DummyIDs" %in% names(SeqList)) {
if ("DummyMatch" %in% names(PedComp)) {
Dummies <- merge(PedComp$DummyMatch, SeqList$DummyIDs)
} else {
Dummies <- SeqList$DummyIDs
}
writeColumns(Dummies, "DummyIDs.txt", row.names=FALSE)
}
if ("MaybeRel" %in% names(SeqList)) {
MaybePairs <- SeqList$MaybeRel
} else if ("MaybeParent" %in% names(SeqList)) {
MaybePairs <- SeqList$MaybeParent
} else {
MaybePairs <- NULL
}
if (!is.null(MaybePairs)) {
writeColumns(MaybePairs, "MaybePairs.txt", row.names=FALSE)
}
if (!is.null(SeqList$MaybeTrio)) {
writeColumns(SeqList$MaybeTrio, "MaybeTrios.txt", row.names=FALSE)
}
if (!is.null(PedComp)) {
Counts.out <- cbind(Parent = rep(c("dam", "sire"), each=7),
Cat = rep(dimnames(PedComp$Counts)[[1]], 2))
Counts.out <- cbind(Counts.out, rbind(PedComp$Counts[,,"dam"],
PedComp$Counts[,,"sire"]))
writeColumns(Counts.out, "PedCompare-Counts.txt", row.names=FALSE)
writeColumns(PedComp$MergedPed, "PedCompare-MergedPed.txt",
row.names=FALSE)
}
options(OPT)
setwd(curdir)
if(!quiet) message(paste("Output written to", normalizePath(folder, winslash="/")))
} else {
stop("OutFormat not supported.")
}
}
#==========================================================================
write.parents <- function(ParentDF, LifeHistData, GenoM=NULL, file="Parents.txt") {
names(ParentDF)[1:3] <- c("id", "dam", "sire")
names(LifeHistData) <- c("ID", "Sex", "BirthYear")
if (!is.null(GenoM)) {
gID <- rownames(GenoM)
} else {
gID <- ParentDF$id
}
Par <- MergeFill(ParentDF,
data.frame(id = gID,
LLRdam = NA, LLRsire = NA, LLRpair = NA,
OHdam = NA, OHsire = NA, MEpair = NA,
rowid = NA, rowdam = NA, rowsire = NA,
stringsAsFactors=FALSE),
by = "id", all = TRUE)
rownames(Par) <- as.character(Par$id)
Par <- Par[gID, ] # merge ignores sort=FALSE
for (x in c("dam", "sire")) Par[is.na(Par[, x]), x] <- "NA"
for (x in c("LLRdam", "LLRsire", "LLRpair")) {
Par[is.na(Par[, x]), x] <- 999
}
for (x in c("OHdam", "OHsire", "MEpair")) {
Par[is.na(Par[, x]), x] <- -9
}
for (x in c("id", "dam", "sire")) {
Par[, paste0("row", x)] <- as.numeric(factor(Par[,x], levels=gID))
Par[is.na(Par[, paste0("row", x)]), paste0("row", x)] <- 0
}
if (any(is.na(Par$id))) {
if (!is.null(GenoM)) {
stop("Some id's in PedigreePar do not occur in GenoM!")
} else {
stop("Some id's in PedigreePar are <NA>")
}
}
writeColumns(Par, file = file, row.names=FALSE)
}
#==========================================================================
write.seq.xls <- function(SeqList, file, PedComp=NULL, quiet) {
if (!requireNamespace("openxlsx", quietly = TRUE)) {
stop("library 'openxlsx' not found")
}
# check if file exists
if (file.exists(file) & interactive() & !quiet) {
ANS <- readline(prompt = paste("Writing data to '", file,
"' will overwrite existing file. Continue Y/N? "))
if (!substr(ANS, 1, 1) %in% c("Y", "y")) stop(call.=FALSE)
}
out_list <- list() # do in wb straight away (?)
if ("Pedigree" %in% names(SeqList)) {
out_list[['Pedigree']] <- SeqList[["Pedigree"]]
}
if ('PedigreePar' %in% names(SeqList)) {
out_list[['Parents']] <- SeqList[['PedigreePar']]
}
if (!any(c("Pedigree", "PedigreePar") %in% names(SeqList))) {
out_list[['Parents']] <- 'None'
}
out_list[['Run parameters']] <- t(SeqList$Specs)
Names <- c("LifeHist", "DupGenotype", "DupLifeHistID",
"AgePriors", "DummyIDs", "TotLikPar", "TotLikSib",
"MaybeRel", "MaybeParent", "MaybeTrio")
for (x in Names) {
if (!x %in% names(SeqList)) next
if (is.null(dim(SeqList[[x]]))) next
if (nrow(SeqList[[x]])==0) next
if (x == "DummyIDs") {
if ("DummyMatch" %in% names(PedComp)) {
SeqList$DummyIDs <- merge(PedComp$DummyMatch, SeqList$DummyIDs)
}
}
out_list[[x]] <- SeqList[[x]]
}
if (!is.null(PedComp)) { # pedcompare output
Counts.out <- cbind(Parent = rep(c("dam", "sire"), each=7),
Cat = rep(dimnames(PedComp$Counts)[[1]], 2))
Counts.out <- cbind(Counts.out, rbind(PedComp$Counts[,,"dam"],
PedComp$Counts[,,"sire"]))
rownames(Counts.out) <- 1:nrow(Counts.out) # not used, but otherwise warning
out_list[['PedCompare-Counts']] <- Counts.out
out_list[['PedCompare-MergedPed']] <- PedComp$MergedPed
}
ShowRowName <- rep(FALSE, length(out_list))
names(ShowRowName) <- names(out_list)
ShowRowName[c('Run parameters', 'AgePriors')] <- TRUE
# openxlsx::write.xlsx does not give error if xlsx exists & is open
# --> turn into workbook & use saveWorkbook instead
wb <- openxlsx::buildWorkbook(out_list,
colNames = TRUE, na.string = '',
rowNames = ShowRowName,
headerStyle = openxlsx::createStyle(textDecoration = 'bold'))
WriteSuccess <- openxlsx::saveWorkbook(wb, file,
overwrite=TRUE, # overwrite warning generated by writeSeq()
returnValue=TRUE)
if (isTRUE(WriteSuccess)) {
if(!quiet) message(paste("Output written to", file))
} else {
stop(WriteSuccess$message, call.=FALSE)
}
}
#======================================================================
#' @title Write Data to a File Column-wise
#'
#' @description Write data.frame or matrix to a text file, using white
#' space padding to keep columns aligned as in \code{print}.
#'
#' @param x the object to be written, preferably a matrix or data frame.
#' If not, it is attempted to coerce x to a matrix.
#' @param file a character string naming a file.
#' @param row.names a logical value indicating whether the row names of x are
#' to be written along with x.
#' @param col.names a logical value indicating whether the column names of x
#' are to be written along with x.
#'
#' @export
writeColumns <- function(x, file="", row.names=TRUE,
col.names=TRUE) {
M <- as.matrix(x)
if(col.names) M <- rbind(colnames(x), M)
if (row.names) {
if (col.names) M <- cbind(c("", rownames(x)), M)
if (!col.names) M <- cbind(rownames(x), M)
}
write(t(format(M)), ncolumns = ncol(M), file = file)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motif_plot.R
\name{ggmotif_scale}
\alias{ggmotif_scale}
\title{ggmotif_scale}
\usage{
ggmotif_scale()
}
\description{
removes ugly legends and applies fixed coloring scheme to letters
}
\examples{
ggplot() + ggmotif("ACG") + ggmotif_scale()
}
| /man/ggmotif_scale.Rd | no_license | guoyanb/ggmotif | R | false | true | 322 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/motif_plot.R
\name{ggmotif_scale}
\alias{ggmotif_scale}
\title{ggmotif_scale}
\usage{
ggmotif_scale()
}
\description{
removes ugly legends and applies fixed coloring scheme to letters
}
\examples{
ggplot() + ggmotif("ACG") + ggmotif_scale()
}
|
library(openxlsx)
library(magrittr)
library(data.table)
# import the data (put correct path of data)
customer_invoices <- read.csv('customer_invoices_2 (1).csv',sep = ",", header= TRUE, stringsAsFactors = FALSE)
# change into datatable
setDT(customer_invoices)
#convert dates format from char to dates
customer_invoices[, c("status_datetime", "invoice_date", "payment_due_date") := lapply(.SD, function(x) as.Date(x, format = "%d/%m/%Y %H:%M")),
.SDcols = c("status_datetime", "invoice_date", "payment_due_date")]
# import client customer map data
Client_Customer_map <- read.csv('client_customer_map_2.csv',sep = ",", header= TRUE, stringsAsFactors = FALSE)
# change into datatable
setDT(Client_Customer_map)
#modify column names
setnames(Client_Customer_map, old = c("customer_id", "client_customer_id"), new = c("customer_id_small", "customer_id"))
#sort columns wrt customer id
setkey(Client_Customer_map, customer_id)
Dictionary <- read.xlsx("PATH/Dictionary.xlsx")
# Step1: part a
Double_Entered_Event <- customer_invoices[, .(gardenia_invoice_id, invoice_status)] %>%
.[invoice_status == "entered", .N, by = gardenia_invoice_id, ] %>%
.[N > 1]
#Step1:part b
###### Step1: part b
Cust_inv_date_check <- customer_invoices[
!gardenia_invoice_id %in% Double_Entered_Event$gardenia_invoice_id,
.(gardenia_invoice_id, invoice_version, status_datetime, invoice_status)
]
Cust_inv_date_check <- Cust_inv_date_check[
order(gardenia_invoice_id, invoice_version, status_datetime)
]
Cust_inv_date_check <- Cust_inv_date_check[
, status_datetime := as.numeric(status_datetime)
]
Cust_inv_date_check <- Cust_inv_date_check[
, c("invoice_version_check", "status_datetime_check") := lapply(.SD, function(x) c(NA, diff(x))),
by = .(gardenia_invoice_id), .SDcols = c("invoice_version", "status_datetime")
]
Cust_inv_date_check <- Cust_inv_date_check[!is.na(invoice_version_check),
comp_invoice_version_dete := ifelse(invoice_version_check > 0 & status_datetime_check >= 0, "P", "F")
]
# Step1: part c
customer_invoices_clean<-customer_invoices[!gardenia_invoice_id %in% Double_Entered_Event$gardenia_invoice_id,]
#step1: part d
Sanity_check_dates_1st_row<-customer_invoices_clean[1,.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_last_row<-customer_invoices_clean[.N,.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_min_status_datetime<-customer_invoices_clean[which.min(status_datetime),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_min_invoice_date<-customer_invoices_clean[which.min(invoice_date),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_min_payment_due_date<-customer_invoices_clean[which.min(payment_due_date),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_max_status_datetime<-customer_invoices_clean[which.max(status_datetime),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_max_invoice_date<-customer_invoices_clean[which.max(invoice_date),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_max_payment_due_date<-customer_invoices_clean[which.max(payment_due_date),.(status_datetime,invoice_date,payment_due_date)]
###### Step 2: part a and b
###### The function dcast converts the data from long to wide format. In the modified data, invoice_status (entered, paid, adjusted)
###### will be column names and status_datetime will be present under respective columns (entered, paid, adjusted). Since, there are
###### more than one date in the adjusted column per invoice, the maximum date will be taken as adjustment reflects
###### the invoice adjustmet (return of product) by customer
customer_invoices_normalized <- dcast(customer_invoices_clean, gardenia_invoice_id + customer_id + related_document + invoice_currency + invoice_date
+ payment_due_date + client_region + input_source + input_user + input_doc_type + client_accounting_currency + payment_terms
~ invoice_status,
value.var = "status_datetime", fun.aggregate = max
) %>%
.[, c("entered", "paid") := lapply(.SD, function(x) as.Date(x)),
.SDcols = c("entered", "paid")
] %>%
setkey(., gardenia_invoice_id)
###### Since there are more than one entries in Payment method and invoice amount per invoice_status, these fields were separted
###### from customer_invoices_normalized and will be merged using tmp varialbe below and by taking minimum amount from
###### the invoice as required
tmp <- customer_invoices_clean[, .(gardenia_invoice_id, payment_method, invoice_amount)][
, lapply(.SD, function(x) min(x, na.rm = TRUE)),
by = gardenia_invoice_id, .SDcols = c("payment_method", "invoice_amount")
] %>%
setkey(., gardenia_invoice_id)
##### part c & d. Firtst merge Payment method and invoice amount and then calculating the part c & d features
customer_invoices_normalized_2 <- tmp[customer_invoices_normalized][, Is_Late := ifelse(paid > payment_due_date, 1, 0)][
, Early_late_days := paid - payment_due_date
] %>% setkey(., customer_id)
customer_invoices_normalized_2 <- Client_Customer_map[customer_invoices_normalized_2]
setkey(customer_invoices_normalized_2, customer_id, entered)
| /Code.R | no_license | javeriaomar/Fast-and-efficient-data-manipulation-example-using-data.table-logic | R | false | false | 5,522 | r | library(openxlsx)
library(magrittr)
library(data.table)
# import the data (put correct path of data)
customer_invoices <- read.csv('customer_invoices_2 (1).csv',sep = ",", header= TRUE, stringsAsFactors = FALSE)
# change into datatable
setDT(customer_invoices)
#convert dates format from char to dates
customer_invoices[, c("status_datetime", "invoice_date", "payment_due_date") := lapply(.SD, function(x) as.Date(x, format = "%d/%m/%Y %H:%M")),
.SDcols = c("status_datetime", "invoice_date", "payment_due_date")]
# import client customer map data
Client_Customer_map <- read.csv('client_customer_map_2.csv',sep = ",", header= TRUE, stringsAsFactors = FALSE)
# change into datatable
setDT(Client_Customer_map)
#modify column names
setnames(Client_Customer_map, old = c("customer_id", "client_customer_id"), new = c("customer_id_small", "customer_id"))
#sort columns wrt customer id
setkey(Client_Customer_map, customer_id)
Dictionary <- read.xlsx("PATH/Dictionary.xlsx")
# Step1: part a
Double_Entered_Event <- customer_invoices[, .(gardenia_invoice_id, invoice_status)] %>%
.[invoice_status == "entered", .N, by = gardenia_invoice_id, ] %>%
.[N > 1]
#Step1:part b
###### Step1: part b
Cust_inv_date_check <- customer_invoices[
!gardenia_invoice_id %in% Double_Entered_Event$gardenia_invoice_id,
.(gardenia_invoice_id, invoice_version, status_datetime, invoice_status)
]
Cust_inv_date_check <- Cust_inv_date_check[
order(gardenia_invoice_id, invoice_version, status_datetime)
]
Cust_inv_date_check <- Cust_inv_date_check[
, status_datetime := as.numeric(status_datetime)
]
Cust_inv_date_check <- Cust_inv_date_check[
, c("invoice_version_check", "status_datetime_check") := lapply(.SD, function(x) c(NA, diff(x))),
by = .(gardenia_invoice_id), .SDcols = c("invoice_version", "status_datetime")
]
Cust_inv_date_check <- Cust_inv_date_check[!is.na(invoice_version_check),
comp_invoice_version_dete := ifelse(invoice_version_check > 0 & status_datetime_check >= 0, "P", "F")
]
# Step1: part c
customer_invoices_clean<-customer_invoices[!gardenia_invoice_id %in% Double_Entered_Event$gardenia_invoice_id,]
#step1: part d
Sanity_check_dates_1st_row<-customer_invoices_clean[1,.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_last_row<-customer_invoices_clean[.N,.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_min_status_datetime<-customer_invoices_clean[which.min(status_datetime),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_min_invoice_date<-customer_invoices_clean[which.min(invoice_date),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_min_payment_due_date<-customer_invoices_clean[which.min(payment_due_date),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_max_status_datetime<-customer_invoices_clean[which.max(status_datetime),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_max_invoice_date<-customer_invoices_clean[which.max(invoice_date),.(status_datetime,invoice_date,payment_due_date)]
Sanity_check_dates_max_payment_due_date<-customer_invoices_clean[which.max(payment_due_date),.(status_datetime,invoice_date,payment_due_date)]
###### Step 2: part a and b
###### The function dcast converts the data from long to wide format. In the modified data, invoice_status (entered, paid, adjusted)
###### will be column names and status_datetime will be present under respective columns (entered, paid, adjusted). Since, there are
###### more than one date in the adjusted column per invoice, the maximum date will be taken as adjustment reflects
###### the invoice adjustmet (return of product) by customer
customer_invoices_normalized <- dcast(customer_invoices_clean, gardenia_invoice_id + customer_id + related_document + invoice_currency + invoice_date
+ payment_due_date + client_region + input_source + input_user + input_doc_type + client_accounting_currency + payment_terms
~ invoice_status,
value.var = "status_datetime", fun.aggregate = max
) %>%
.[, c("entered", "paid") := lapply(.SD, function(x) as.Date(x)),
.SDcols = c("entered", "paid")
] %>%
setkey(., gardenia_invoice_id)
###### Since there are more than one entries in Payment method and invoice amount per invoice_status, these fields were separted
###### from customer_invoices_normalized and will be merged using tmp varialbe below and by taking minimum amount from
###### the invoice as required
tmp <- customer_invoices_clean[, .(gardenia_invoice_id, payment_method, invoice_amount)][
, lapply(.SD, function(x) min(x, na.rm = TRUE)),
by = gardenia_invoice_id, .SDcols = c("payment_method", "invoice_amount")
] %>%
setkey(., gardenia_invoice_id)
##### part c & d. Firtst merge Payment method and invoice amount and then calculating the part c & d features
customer_invoices_normalized_2 <- tmp[customer_invoices_normalized][, Is_Late := ifelse(paid > payment_due_date, 1, 0)][
, Early_late_days := paid - payment_due_date
] %>% setkey(., customer_id)
customer_invoices_normalized_2 <- Client_Customer_map[customer_invoices_normalized_2]
setkey(customer_invoices_normalized_2, customer_id, entered)
|
## Permutation test to assess for enrichment of GPCRs in genes with monoallelic expression
# setwd("~/Desktop/Permutation_testing/Allelic_expression/") # Please set appropriate working directory
aexp = read.table('../data/allelicexp_genes.txt',header=T)
background<-as.vector(unlist(aexp$Ensemble_ID))
#Genes with mono-allelic expression
mae<-as.vector(unlist(aexp[which(aexp$MAE.1_BAE.0 == 1),1]))
#GPCR drug targets with mono-allelic expression
recep<-as.vector(unlist(read.table('GPCR_drugtargets.txt')))
recep.aexp = intersect(recep,background) # GPCR drug targets with allele-specific expression data
recep.mae<-intersect(recep.aexp,mae)
# Randomizations
random<-matrix(ncol=100000,nrow=length(recep.aexp))
for (i in 1:100000){
sam<-sample(background, size=length(recep.aexp),replace=FALSE,prob=NULL)
random[,i]<-sam
}
result<-vector(length=100000)
for(i in 1:ncol(random)){
result[i]<-length(intersect(random[,i],mae))
}
l<-length(recep.mae)
m<-mean(result)
s<-sd(result)
z<-(length(recep.mae)-mean(result))/sd(result)
p<-length(which(result>=length(recep.mae)))/length(result)
res<-matrix(ncol=2, nrow=5)
res[,1]<-c("GPCRs with monoallelic expression","Mean of random expectation", "SD of random expectation", "Z-score","P-value")
res[,2]<-c(l,m,s,z,p)
write.table(res,'GPCRaexp_permut.txt',sep="\t",quote=F,row.names=F,col.names=F)
# if P-value is 0, then it should be reported as P<1E-5
| /scripts/allelicexp.R | no_license | AlexanderHauser/GPCR-Pharmacogenomics | R | false | false | 1,410 | r | ## Permutation test to assess for enrichment of GPCRs in genes with monoallelic expression
# setwd("~/Desktop/Permutation_testing/Allelic_expression/") # Please set appropriate working directory
aexp = read.table('../data/allelicexp_genes.txt',header=T)
background<-as.vector(unlist(aexp$Ensemble_ID))
#Genes with mono-allelic expression
mae<-as.vector(unlist(aexp[which(aexp$MAE.1_BAE.0 == 1),1]))
#GPCR drug targets with mono-allelic expression
recep<-as.vector(unlist(read.table('GPCR_drugtargets.txt')))
recep.aexp = intersect(recep,background) # GPCR drug targets with allele-specific expression data
recep.mae<-intersect(recep.aexp,mae)
# Randomizations
random<-matrix(ncol=100000,nrow=length(recep.aexp))
for (i in 1:100000){
sam<-sample(background, size=length(recep.aexp),replace=FALSE,prob=NULL)
random[,i]<-sam
}
result<-vector(length=100000)
for(i in 1:ncol(random)){
result[i]<-length(intersect(random[,i],mae))
}
l<-length(recep.mae)
m<-mean(result)
s<-sd(result)
z<-(length(recep.mae)-mean(result))/sd(result)
p<-length(which(result>=length(recep.mae)))/length(result)
res<-matrix(ncol=2, nrow=5)
res[,1]<-c("GPCRs with monoallelic expression","Mean of random expectation", "SD of random expectation", "Z-score","P-value")
res[,2]<-c(l,m,s,z,p)
write.table(res,'GPCRaexp_permut.txt',sep="\t",quote=F,row.names=F,col.names=F)
# if P-value is 0, then it should be reported as P<1E-5
|
library(data.table)
library(dplyr)
library(ggplot2)
a <- fread("../data/ukb_sqc_v2.txt", he=FALSE)
b <- fread("../data/ukb878_imp_chr10_v2_s487406.sample", he=FALSE, skip=2)
a <- subset(a, V66 == 1)
index <- a$V26 > (a$V27 * 0.5 + 45)
table(index)
p1 <- ggplot(a, aes(V27, V26)) +
geom_point(aes(colour=index)) +
geom_abline(intercept=45, slope=0.5)
ggsave("../images/eur-vs-non_eur.png")
# Assume that the order of b is the same as a after exclusions removed
write.table(b$V1[index], file="../data/non_europeans.txt", row=F, col=F, qu=F)
write.table(b$V1[!index], file="../data/europeans.txt", row=F, col=F, qu=F)
| /scripts/ancestry.r | no_license | explodecomputer/add-chip-design | R | false | false | 625 | r | library(data.table)
library(dplyr)
library(ggplot2)
a <- fread("../data/ukb_sqc_v2.txt", he=FALSE)
b <- fread("../data/ukb878_imp_chr10_v2_s487406.sample", he=FALSE, skip=2)
a <- subset(a, V66 == 1)
index <- a$V26 > (a$V27 * 0.5 + 45)
table(index)
p1 <- ggplot(a, aes(V27, V26)) +
geom_point(aes(colour=index)) +
geom_abline(intercept=45, slope=0.5)
ggsave("../images/eur-vs-non_eur.png")
# Assume that the order of b is the same as a after exclusions removed
write.table(b$V1[index], file="../data/non_europeans.txt", row=F, col=F, qu=F)
write.table(b$V1[!index], file="../data/europeans.txt", row=F, col=F, qu=F)
|
library(OPDOE)
### Name: heights
### Title: male / female heights data
### Aliases: heights
### Keywords: datasets
### ** Examples
data(heights)
attach(heights)
tt <- triangular.test.norm(x=female[1:3],
y=male[1:3], mu1=170,mu2=176,mu0=164,
alpha=0.05, beta=0.2,sigma=7)
# Test is yet unfinished, add the remaining values:
tt <- update(tt,x=female[4:7], y=male[4:7])
# Test is finished now
| /data/genthat_extracted_code/OPDOE/examples/heights.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 403 | r | library(OPDOE)
### Name: heights
### Title: male / female heights data
### Aliases: heights
### Keywords: datasets
### ** Examples
data(heights)
attach(heights)
tt <- triangular.test.norm(x=female[1:3],
y=male[1:3], mu1=170,mu2=176,mu0=164,
alpha=0.05, beta=0.2,sigma=7)
# Test is yet unfinished, add the remaining values:
tt <- update(tt,x=female[4:7], y=male[4:7])
# Test is finished now
|
library(doParallel)
library(foreach)
# Calculate the number of cores
getDoParWorkers()
detectCores()
cl=makeCluster(5)
registerDoParallel(cl)
getDoParWorkers()
#for(m in 1:sim)
myfunc = function(m)
{
library(geepack);library(MASS);library(ResourceSelection);library(ltmle); library(SuperLearner)
library(dplyr); library(glm2);
library(data.table)
#library(reshape2) #do not use for data frame only
setDTthreads(1)
logit <- function(term) {
return( ifelse(!is.na(term),log(term/(1-term)),NA) )
}
EXPIT <- function(term) {
return( ifelse(!is.na(term),exp(term)/(1+exp(term)),NA) )
}
source("datagen.R")
set.seed(1129)
seeds = floor(runif(1000)*10^8);
set.seed(seeds[m])
n <- 2500
K <- 5
delta=0.25
alpha0=-1; alpha1=-2; alpha2=-1; alpha3=1; alpha4=2;
beta1_0=-1; beta1_1=0; beta1_2=-1; beta1_3=1; beta1_4=-1; beta1_5=1; beta1_6=0; #rectal STI indicator
beta2_0=-1; beta2_1=0; beta2_2=0; beta2_3=1; beta2_4=-1; beta2_5=1; beta2_6=0; #cd4 count
beta3_0=1; beta3_1=0; beta3_2=1; beta3_3=1; beta3_4=0; beta3_5=1; beta3_6=0; #unprotected sexual activity (H/L)
theta0=1; theta1=0; theta2=3; theta3=-2; theta4=1; theta5=-1; theta6=0; #theta1 always 0
cens0=-2; cens1=0; cens2=1; cens3=-1
df <- lapply(as.list(1:n), FUN=function(ind){
datagen(ind, K=K, sigma=1,
alpha0=alpha0, alpha1=alpha1, alpha2=alpha2, alpha3=alpha3, alpha4 = alpha4,
beta1_0=beta1_0, beta1_1=beta1_1, beta1_2=beta1_2, beta1_3=beta1_3, beta1_4=beta1_4, beta1_5=beta1_5, beta1_6=beta1_6,
beta2_0=beta2_0, beta2_1=beta2_1, beta2_2=beta2_2, beta2_3=beta2_3, beta2_4=beta2_4, beta2_5=beta2_5, beta2_6=beta2_6,
beta3_0=beta3_0, beta3_1=beta3_1, beta3_2=beta3_2, beta3_3=beta3_3, beta3_4=beta3_4, beta3_5=beta3_5, beta3_6=beta3_6,
theta0=theta0, theta1=theta1, theta2=theta2, theta3=theta3, theta4=theta4, theta5=theta5, theta6=theta6,
cens0=cens0, cens1=cens1, cens2=cens2, cens3=cens3)
})
dffull <- rbindlist(df)
dffull[, paste("lag_A") := shift(A, 1, NA, type='lag'), by=id]
dffull$lag_A = ifelse(dffull$t0==0, 0, dffull$lag_A)
afit = glm2(A ~ L1 + L2 + L3, family = binomial(), data = dffull)
dffull$pred_obs = predict(afit, newdata = dffull, type="response")
dffull$pred_obs = ifelse(dffull$A==1, dffull$pred_obs, 1-dffull$pred_obs)
dffull$lag_A=NULL
#cfit = glm2(Cen~L2+L3,family=binomial,data=dffull)
dffull$pred_obsc = 1#predict(cfit, newdata = dffull, type="response")
#dffull$pred_obsc = ifelse(dffull$Cen==1, dffull$pred_obsc, 1-dffull$pred_obsc)
dffull$fint = (dffull$L1*delta + 1 - dffull$L1)*dffull$pred_obs + dffull$L1*dffull$A*(1-delta)
dffullwide = dcast(dffull, id ~ t0, value.var = c("L1","L2","L3","A","Cen","Y","pred_obs","pred_obsc","fint","U"))
tmpdata = dffullwide
#subset data
tmpdata$Y_1 = ifelse(tmpdata$Y_0==0,0,tmpdata$Y_1)
tmpdata$Y_2 = ifelse(!is.na(tmpdata$Y_1) & tmpdata$Y_1==0,0,tmpdata$Y_2)
tmpdata$Y_3 = ifelse(!is.na(tmpdata$Y_2) & tmpdata$Y_2==0,0,tmpdata$Y_3)
tmpdata$Y_4 = ifelse(!is.na(tmpdata$Y_3) & tmpdata$Y_3==0,0,tmpdata$Y_4)
tmpdata$Y_5 = tmpdata$Y_4
tmpdata$Y_4 = tmpdata$Y_3
tmpdata$Y_3 = tmpdata$Y_2
tmpdata$Y_2 = tmpdata$Y_1
tmpdata$Y_1 = tmpdata$Y_0
tmpdata$Y_0 = NULL
tmpdata$id = seq(1,n,by=1)
tmpdata$pi4 <- tmpdata$pi3 <- tmpdata$pi2 <- tmpdata$pi1 <- tmpdata$pi0 <- NA
tmpdata$pi4c <- tmpdata$pi3c <- tmpdata$pi2c <- tmpdata$pi1c <- tmpdata$pi0c <- NA
tmpdata$pi0 = tmpdata$pred_obs_0
tmpdata$pi1 = tmpdata$pi0*tmpdata$pred_obs_1
tmpdata$pi2 = tmpdata$pi1*tmpdata$pred_obs_2
tmpdata$pi3 = tmpdata$pi2*tmpdata$pred_obs_3
tmpdata$pi4 = tmpdata$pi3*tmpdata$pred_obs_4
tmpdata$fintc0 = tmpdata$fint_0
tmpdata$fintc1 = tmpdata$fintc0*tmpdata$fint_1
tmpdata$fintc2 = tmpdata$fintc1*tmpdata$fint_2
tmpdata$fintc3 = tmpdata$fintc2*tmpdata$fint_3
tmpdata$fintc4 = tmpdata$fintc3*tmpdata$fint_4
tmpdata$pi0c = tmpdata$pred_obsc_0
tmpdata$pi1c = tmpdata$pi0c*tmpdata$pred_obsc_1
tmpdata$pi2c = tmpdata$pi1c*tmpdata$pred_obsc_2
tmpdata$pi3c = tmpdata$pi2c*tmpdata$pred_obsc_3
tmpdata$pi4c = tmpdata$pi3c*tmpdata$pred_obsc_4
##################
######time 5######
##################
y4dat = tmpdata[tmpdata$Y_5<2 & tmpdata$Cen_4==0,];
y4fit = glm2(Y_5 ~ A_4 + L1_4 + L2_4 + L3_4, family=binomial(), weight = fintc4/(pi4*pi4c), data = y4dat) ;
y4dat = tmpdata[tmpdata$Y_4<2 & tmpdata$Cen_3==0,]; #Cen_3 is really Cen_4
tmp1= y4dat; tmp1$A_4=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_4*delta + 1-y4dat$L1_4) + predicttmp1*(1-delta)*(y4dat$L1_4);
y4dat$y4pred = ifelse(y4dat$Y_4==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_3*L1_3*L2_3*L3_3, family=binomial(), weight = fintc3/(pi3*pi3c), data = y4dat)
y4dat = tmpdata[tmpdata$Y_3<2 & tmpdata$Cen_2==0,];
tmp1= y4dat; tmp1$A_3=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_3*delta + 1-y4dat$L1_3) + predicttmp1*(1-delta)*(y4dat$L1_3);
y4dat$y4pred = ifelse(y4dat$Y_3==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_2*L1_2*L2_2*L3_2, family=binomial(), weight = fintc2/(pi2*pi2c), data = y4dat)
y4dat = tmpdata[tmpdata$Y_2<2 & tmpdata$Cen_1==0,];
tmp1= y4dat; tmp1$A_2=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_2*delta + 1-y4dat$L1_2) + predicttmp1*(1-delta)*(y4dat$L1_2);
y4dat$y4pred = ifelse(y4dat$Y_2==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_1*L1_1*L2_1*L3_1, family=binomial(), weight = fintc1/(pi1*pi1c), data = y4dat)
y4dat = tmpdata[tmpdata$Y_1<2 & tmpdata$Cen_0==0,];
tmp1= y4dat; tmp1$A_1=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_1*delta + 1-y4dat$L1_1) + predicttmp1*(1-delta)*(y4dat$L1_1);
y4dat$y4pred = ifelse(y4dat$Y_1==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_0*L1_0*L2_0*L3_0, family=binomial(), weight = fintc0/(pi0*pi0c), data = y4dat)
y4dat = tmpdata
tmp1= y4dat; tmp1$A_0=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_0*delta + 1-y4dat$L1_0) + predicttmp1*(1-delta)*(y4dat$L1_0);
meany4tmp = c(mean(y4dat$y4pred))
meany5 = (meany4tmp)
#meany5
myparam = cbind(meany5)
return(myparam)
}
test = foreach(m=1:1000) %dopar% myfunc(m)
test2 = do.call("rbind", test)
write.csv(test2,"dr_sat_wrongPS.csv")
stopCluster(cl) | /parametric_models/n2500/delta_0.25/dr_wrongPS.R | no_license | lw499/Stochastic_treatment_paper | R | false | false | 6,994 | r | library(doParallel)
library(foreach)
# Calculate the number of cores
getDoParWorkers()
detectCores()
cl=makeCluster(5)
registerDoParallel(cl)
getDoParWorkers()
#for(m in 1:sim)
myfunc = function(m)
{
library(geepack);library(MASS);library(ResourceSelection);library(ltmle); library(SuperLearner)
library(dplyr); library(glm2);
library(data.table)
#library(reshape2) #do not use for data frame only
setDTthreads(1)
logit <- function(term) {
return( ifelse(!is.na(term),log(term/(1-term)),NA) )
}
EXPIT <- function(term) {
return( ifelse(!is.na(term),exp(term)/(1+exp(term)),NA) )
}
source("datagen.R")
set.seed(1129)
seeds = floor(runif(1000)*10^8);
set.seed(seeds[m])
n <- 2500
K <- 5
delta=0.25
alpha0=-1; alpha1=-2; alpha2=-1; alpha3=1; alpha4=2;
beta1_0=-1; beta1_1=0; beta1_2=-1; beta1_3=1; beta1_4=-1; beta1_5=1; beta1_6=0; #rectal STI indicator
beta2_0=-1; beta2_1=0; beta2_2=0; beta2_3=1; beta2_4=-1; beta2_5=1; beta2_6=0; #cd4 count
beta3_0=1; beta3_1=0; beta3_2=1; beta3_3=1; beta3_4=0; beta3_5=1; beta3_6=0; #unprotected sexual activity (H/L)
theta0=1; theta1=0; theta2=3; theta3=-2; theta4=1; theta5=-1; theta6=0; #theta1 always 0
cens0=-2; cens1=0; cens2=1; cens3=-1
df <- lapply(as.list(1:n), FUN=function(ind){
datagen(ind, K=K, sigma=1,
alpha0=alpha0, alpha1=alpha1, alpha2=alpha2, alpha3=alpha3, alpha4 = alpha4,
beta1_0=beta1_0, beta1_1=beta1_1, beta1_2=beta1_2, beta1_3=beta1_3, beta1_4=beta1_4, beta1_5=beta1_5, beta1_6=beta1_6,
beta2_0=beta2_0, beta2_1=beta2_1, beta2_2=beta2_2, beta2_3=beta2_3, beta2_4=beta2_4, beta2_5=beta2_5, beta2_6=beta2_6,
beta3_0=beta3_0, beta3_1=beta3_1, beta3_2=beta3_2, beta3_3=beta3_3, beta3_4=beta3_4, beta3_5=beta3_5, beta3_6=beta3_6,
theta0=theta0, theta1=theta1, theta2=theta2, theta3=theta3, theta4=theta4, theta5=theta5, theta6=theta6,
cens0=cens0, cens1=cens1, cens2=cens2, cens3=cens3)
})
dffull <- rbindlist(df)
dffull[, paste("lag_A") := shift(A, 1, NA, type='lag'), by=id]
dffull$lag_A = ifelse(dffull$t0==0, 0, dffull$lag_A)
afit = glm2(A ~ L1 + L2 + L3, family = binomial(), data = dffull)
dffull$pred_obs = predict(afit, newdata = dffull, type="response")
dffull$pred_obs = ifelse(dffull$A==1, dffull$pred_obs, 1-dffull$pred_obs)
dffull$lag_A=NULL
#cfit = glm2(Cen~L2+L3,family=binomial,data=dffull)
dffull$pred_obsc = 1#predict(cfit, newdata = dffull, type="response")
#dffull$pred_obsc = ifelse(dffull$Cen==1, dffull$pred_obsc, 1-dffull$pred_obsc)
dffull$fint = (dffull$L1*delta + 1 - dffull$L1)*dffull$pred_obs + dffull$L1*dffull$A*(1-delta)
dffullwide = dcast(dffull, id ~ t0, value.var = c("L1","L2","L3","A","Cen","Y","pred_obs","pred_obsc","fint","U"))
tmpdata = dffullwide
#subset data
tmpdata$Y_1 = ifelse(tmpdata$Y_0==0,0,tmpdata$Y_1)
tmpdata$Y_2 = ifelse(!is.na(tmpdata$Y_1) & tmpdata$Y_1==0,0,tmpdata$Y_2)
tmpdata$Y_3 = ifelse(!is.na(tmpdata$Y_2) & tmpdata$Y_2==0,0,tmpdata$Y_3)
tmpdata$Y_4 = ifelse(!is.na(tmpdata$Y_3) & tmpdata$Y_3==0,0,tmpdata$Y_4)
tmpdata$Y_5 = tmpdata$Y_4
tmpdata$Y_4 = tmpdata$Y_3
tmpdata$Y_3 = tmpdata$Y_2
tmpdata$Y_2 = tmpdata$Y_1
tmpdata$Y_1 = tmpdata$Y_0
tmpdata$Y_0 = NULL
tmpdata$id = seq(1,n,by=1)
tmpdata$pi4 <- tmpdata$pi3 <- tmpdata$pi2 <- tmpdata$pi1 <- tmpdata$pi0 <- NA
tmpdata$pi4c <- tmpdata$pi3c <- tmpdata$pi2c <- tmpdata$pi1c <- tmpdata$pi0c <- NA
tmpdata$pi0 = tmpdata$pred_obs_0
tmpdata$pi1 = tmpdata$pi0*tmpdata$pred_obs_1
tmpdata$pi2 = tmpdata$pi1*tmpdata$pred_obs_2
tmpdata$pi3 = tmpdata$pi2*tmpdata$pred_obs_3
tmpdata$pi4 = tmpdata$pi3*tmpdata$pred_obs_4
tmpdata$fintc0 = tmpdata$fint_0
tmpdata$fintc1 = tmpdata$fintc0*tmpdata$fint_1
tmpdata$fintc2 = tmpdata$fintc1*tmpdata$fint_2
tmpdata$fintc3 = tmpdata$fintc2*tmpdata$fint_3
tmpdata$fintc4 = tmpdata$fintc3*tmpdata$fint_4
tmpdata$pi0c = tmpdata$pred_obsc_0
tmpdata$pi1c = tmpdata$pi0c*tmpdata$pred_obsc_1
tmpdata$pi2c = tmpdata$pi1c*tmpdata$pred_obsc_2
tmpdata$pi3c = tmpdata$pi2c*tmpdata$pred_obsc_3
tmpdata$pi4c = tmpdata$pi3c*tmpdata$pred_obsc_4
##################
######time 5######
##################
y4dat = tmpdata[tmpdata$Y_5<2 & tmpdata$Cen_4==0,];
y4fit = glm2(Y_5 ~ A_4 + L1_4 + L2_4 + L3_4, family=binomial(), weight = fintc4/(pi4*pi4c), data = y4dat) ;
y4dat = tmpdata[tmpdata$Y_4<2 & tmpdata$Cen_3==0,]; #Cen_3 is really Cen_4
tmp1= y4dat; tmp1$A_4=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_4*delta + 1-y4dat$L1_4) + predicttmp1*(1-delta)*(y4dat$L1_4);
y4dat$y4pred = ifelse(y4dat$Y_4==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_3*L1_3*L2_3*L3_3, family=binomial(), weight = fintc3/(pi3*pi3c), data = y4dat)
y4dat = tmpdata[tmpdata$Y_3<2 & tmpdata$Cen_2==0,];
tmp1= y4dat; tmp1$A_3=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_3*delta + 1-y4dat$L1_3) + predicttmp1*(1-delta)*(y4dat$L1_3);
y4dat$y4pred = ifelse(y4dat$Y_3==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_2*L1_2*L2_2*L3_2, family=binomial(), weight = fintc2/(pi2*pi2c), data = y4dat)
y4dat = tmpdata[tmpdata$Y_2<2 & tmpdata$Cen_1==0,];
tmp1= y4dat; tmp1$A_2=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_2*delta + 1-y4dat$L1_2) + predicttmp1*(1-delta)*(y4dat$L1_2);
y4dat$y4pred = ifelse(y4dat$Y_2==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_1*L1_1*L2_1*L3_1, family=binomial(), weight = fintc1/(pi1*pi1c), data = y4dat)
y4dat = tmpdata[tmpdata$Y_1<2 & tmpdata$Cen_0==0,];
tmp1= y4dat; tmp1$A_1=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_1*delta + 1-y4dat$L1_1) + predicttmp1*(1-delta)*(y4dat$L1_1);
y4dat$y4pred = ifelse(y4dat$Y_1==0,0,y4dat$y4pred);
y4fit = glm2(y4pred ~ A_0*L1_0*L2_0*L3_0, family=binomial(), weight = fintc0/(pi0*pi0c), data = y4dat)
y4dat = tmpdata
tmp1= y4dat; tmp1$A_0=1;
predicttmp1 = predict(y4fit, newdata = tmp1, type="response")
y4dat$y4pred = predict(y4fit, newdata = y4dat, type="response")*(y4dat$L1_0*delta + 1-y4dat$L1_0) + predicttmp1*(1-delta)*(y4dat$L1_0);
meany4tmp = c(mean(y4dat$y4pred))
meany5 = (meany4tmp)
#meany5
myparam = cbind(meany5)
return(myparam)
}
test = foreach(m=1:1000) %dopar% myfunc(m)
test2 = do.call("rbind", test)
write.csv(test2,"dr_sat_wrongPS.csv")
stopCluster(cl) |
setwd('C:\\Users\\Andy\\Documents\\GitHub\\ExData_Plotting1') | /wd.R | no_license | jagzuk/ExData_Plotting1 | R | false | false | 61 | r | setwd('C:\\Users\\Andy\\Documents\\GitHub\\ExData_Plotting1') |
# test make_filename
test_that("make_filename generates file name", {
expect_equal(make_filename(1992), "accident_1992.csv.bz2")
})
# test fars_read_years
test_that("fars_read_years return NULL if the file is missing", {
expect_equal(fars_read_years(2014), list(NULL))
})
| /tests/testthat/test_far.R | no_license | addyag93/farsReport | R | false | false | 280 | r | # test make_filename
test_that("make_filename generates file name", {
expect_equal(make_filename(1992), "accident_1992.csv.bz2")
})
# test fars_read_years
test_that("fars_read_years return NULL if the file is missing", {
expect_equal(fars_read_years(2014), list(NULL))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{positionsOfMutationsOfType}
\alias{positionsOfMutationsOfType}
\alias{Genome$positionsOfMutationsOfType}
\alias{.G$positionsOfMutationsOfType}
\title{SLiM method positionsOfMutationsOfType}
\usage{
positionsOfMutationsOfType(mutType)
}
\arguments{
\item{mutType}{An object of type integer or MutationType object. Must be of
length 1 (a singleton). See details for description.}
}
\value{
An object of type integer.
}
\description{
Documentation for SLiM function \code{positionsOfMutationsOfType}, which is a
method of the SLiM class \code{\link{Genome}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=645}{SLiM manual: page
645}.
Returns the positions of mutations that are of the type specified by
mutType, out of all of the mutations in the genome. If you need a vector of the
matching Mutation objects, rather than just positions, use -mutationsOfType().
This method is provided for speed; it is much faster than the corresponding
Eidos code.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other Genome:
\code{\link{G}},
\code{\link{addMutations}()},
\code{\link{addNewDrawnMutation}()},
\code{\link{addNewMutation}()},
\code{\link{containsMarkerMutation}()},
\code{\link{containsMutations}()},
\code{\link{countOfMutationsOfType}()},
\code{\link{mutationCountsInGenomes}()},
\code{\link{mutationFrequenciesInGenomes}()},
\code{\link{mutationsOfType}()},
\code{\link{nucleotides}()},
\code{\link{outputMS}()},
\code{\link{outputVCF}()},
\code{\link{output}()},
\code{\link{readFromMS}()},
\code{\link{readFromVCF}()},
\code{\link{removeMutations}()},
\code{\link{sumOfMutationsOfType}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{Genome}
| /man/positionsOfMutationsOfType.Rd | permissive | rdinnager/slimr | R | false | true | 2,584 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{positionsOfMutationsOfType}
\alias{positionsOfMutationsOfType}
\alias{Genome$positionsOfMutationsOfType}
\alias{.G$positionsOfMutationsOfType}
\title{SLiM method positionsOfMutationsOfType}
\usage{
positionsOfMutationsOfType(mutType)
}
\arguments{
\item{mutType}{An object of type integer or MutationType object. Must be of
length 1 (a singleton). See details for description.}
}
\value{
An object of type integer.
}
\description{
Documentation for SLiM function \code{positionsOfMutationsOfType}, which is a
method of the SLiM class \code{\link{Genome}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=645}{SLiM manual: page
645}.
Returns the positions of mutations that are of the type specified by
mutType, out of all of the mutations in the genome. If you need a vector of the
matching Mutation objects, rather than just positions, use -mutationsOfType().
This method is provided for speed; it is much faster than the corresponding
Eidos code.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other Genome:
\code{\link{G}},
\code{\link{addMutations}()},
\code{\link{addNewDrawnMutation}()},
\code{\link{addNewMutation}()},
\code{\link{containsMarkerMutation}()},
\code{\link{containsMutations}()},
\code{\link{countOfMutationsOfType}()},
\code{\link{mutationCountsInGenomes}()},
\code{\link{mutationFrequenciesInGenomes}()},
\code{\link{mutationsOfType}()},
\code{\link{nucleotides}()},
\code{\link{outputMS}()},
\code{\link{outputVCF}()},
\code{\link{output}()},
\code{\link{readFromMS}()},
\code{\link{readFromVCF}()},
\code{\link{removeMutations}()},
\code{\link{sumOfMutationsOfType}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{Genome}
|
best <- function(state, outcome) {
data_set <- read.csv("outcome-of-care-measures.csv",
colClasses = "character",
na.strings="Not Available")
good_data = c("heart attack","heart failure","pneumonia")
if (!outcome %in% good_data) { stop("disease is invalid")}
validState = unique(data_set[,7])
if (!state %in% validState) stop("state us invalid")
col_names <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
colName <- col_names[match(outcome,good_data)]
data_set.state <- data_set[data_set$State==state,]
idx <- which.min(as.double(data_set.state[,colName]))
data_set.state[idx,"Hospital.Name"]
}
| /best.R | no_license | canichet/ProgrammingAssignment2 | R | false | false | 854 | r | best <- function(state, outcome) {
data_set <- read.csv("outcome-of-care-measures.csv",
colClasses = "character",
na.strings="Not Available")
good_data = c("heart attack","heart failure","pneumonia")
if (!outcome %in% good_data) { stop("disease is invalid")}
validState = unique(data_set[,7])
if (!state %in% validState) stop("state us invalid")
col_names <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
colName <- col_names[match(outcome,good_data)]
data_set.state <- data_set[data_set$State==state,]
idx <- which.min(as.double(data_set.state[,colName]))
data_set.state[idx,"Hospital.Name"]
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/analysis.r
\name{methylaction}
\alias{methylaction}
\title{Detect differentially methylated regions (DMRs) from windowed read counts from MBD-isolated genome sequencing (MiGS/MBD-seq) and similar techniques}
\usage{
methylaction(samp, counts, reads, poifdr = 0.1, stageone.p = 0.05,
anodev.p = 0.05, post.p = 0.05, freq = 2/3, minsize = 150,
joindist = 200, adjust.var = NULL, nperms = 0, perm.boot = F,
ncore = 1)
}
\arguments{
\item{samp}{Sample data.frame from readSampleInfo()}
\item{counts}{Preprocessed count data from getCounts()}
\item{reads}{Preprocessed reads/fragments data from getReads()}
\item{poifdr}{False discovery rate to use during initial filtering and frequency calling. Changing this value will change the threshold used for calling the presence of methylation.}
\item{stageone.p}{P-value cutoff for the pairwise testing in stage one.}
\item{anodev.p}{P-value cutoff for the analysis of deviance (ANODEV) in stage two testing.}
\item{post.p}{P-value cutoff for post-tests in stage two testing.}
\item{freq}{Fraction of samples within groups that must agree with respect to methylation status in order for "frequent" to be "TRUE" in the output DMR list.}
\item{minsize}{Minimum size of DMRs to report (in bp)}
\item{joindist}{Extend significant windows into DMRs over non-significant stage one windows between them up to this distance large (in bp)}
\item{adjust.var}{Name of a column present in "samp" that will be used as a covariate adjustment in the stage two ANODEV generalized linear model (GLM)}
\item{nperms}{Optional, perform this number of permutations after calling DMRs. Will create a data.table called "FDR" in the output list. See also maPerm(), maPermMerge(), and maPermFdr() for manual permutation running and FDR calculation.}
\item{perm.boot}{If nperms > 0 and if TRUE, perform bootstrapping (sampling with replacement). Otherwise, perform permutations (sampling without permutations)}
\item{ncore}{Number of parallel processes to use}
}
\value{
A list containing detailed results from each stage of the analysis.
}
\description{
After the counts have been pre-processed, this function performs all the analysis. Detailed results from intermediate steps are stored in the output list object to analyze method performance and provide input for the summary, export, and plotting functions.
}
| /man/methylaction.Rd | permissive | jeffbhasin/methylaction | R | false | false | 2,435 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/analysis.r
\name{methylaction}
\alias{methylaction}
\title{Detect differentially methylated regions (DMRs) from windowed read counts from MBD-isolated genome sequencing (MiGS/MBD-seq) and similar techniques}
\usage{
methylaction(samp, counts, reads, poifdr = 0.1, stageone.p = 0.05,
anodev.p = 0.05, post.p = 0.05, freq = 2/3, minsize = 150,
joindist = 200, adjust.var = NULL, nperms = 0, perm.boot = F,
ncore = 1)
}
\arguments{
\item{samp}{Sample data.frame from readSampleInfo()}
\item{counts}{Preprocessed count data from getCounts()}
\item{reads}{Preprocessed reads/fragments data from getReads()}
\item{poifdr}{False discovery rate to use during initial filtering and frequency calling. Changing this value will change the threshold used for calling the presence of methylation.}
\item{stageone.p}{P-value cutoff for the pairwise testing in stage one.}
\item{anodev.p}{P-value cutoff for the analysis of deviance (ANODEV) in stage two testing.}
\item{post.p}{P-value cutoff for post-tests in stage two testing.}
\item{freq}{Fraction of samples within groups that must agree with respect to methylation status in order for "frequent" to be "TRUE" in the output DMR list.}
\item{minsize}{Minimum size of DMRs to report (in bp)}
\item{joindist}{Extend significant windows into DMRs over non-significant stage one windows between them up to this distance large (in bp)}
\item{adjust.var}{Name of a column present in "samp" that will be used as a covariate adjustment in the stage two ANODEV generalized linear model (GLM)}
\item{nperms}{Optional, perform this number of permutations after calling DMRs. Will create a data.table called "FDR" in the output list. See also maPerm(), maPermMerge(), and maPermFdr() for manual permutation running and FDR calculation.}
\item{perm.boot}{If nperms > 0 and if TRUE, perform bootstrapping (sampling with replacement). Otherwise, perform permutations (sampling without permutations)}
\item{ncore}{Number of parallel processes to use}
}
\value{
A list containing detailed results from each stage of the analysis.
}
\description{
After the counts have been pre-processed, this function performs all the analysis. Detailed results from intermediate steps are stored in the output list object to analyze method performance and provide input for the summary, export, and plotting functions.
}
|
#logical
#TRUE T
#FALSE F
2<5
10>100
4==5
#==
#!=
#<
#<=
#>=
#!
#|
#&
# isTRUE(x)
result <- 4<5
result
typeof(result)
result2 <- !(5>1)
result2
result | result2
result & result2
isTRUE(result)
| /logicalVarAndOperators.R | no_license | shubhamgse/Learning-R | R | false | false | 200 | r | #logical
#TRUE T
#FALSE F
2<5
10>100
4==5
#==
#!=
#<
#<=
#>=
#!
#|
#&
# isTRUE(x)
result <- 4<5
result
typeof(result)
result2 <- !(5>1)
result2
result | result2
result & result2
isTRUE(result)
|
plink.score <- function(coefs,
score.options="",
q.score.range=NULL,
q.score.range.data=NULL,
q.score.range.options="",
p.vals=T,
noscale=F,
...) {
### FUnction to call plink --score with coefs as weights
### Optionally can apply p-values cut-offs using Plink's
### --q-score-range option
### Specify q.score.range as a range of cut-offs,
### e.g.: c(0, 0.00001, 0.0001, 0.01, 0.1, 1)
### p.vals: Indicate that the cut-offs are p-values
### If they're not, then specify q.score.range in
### in two or three columns. (Only last two columns will
### be used.) See plink manual.
### noscale: By default, plink automatically scales the score so that
### it is proportional to the number of variants included. Use this
### option to disable this.
### ...: options passed to plink()
if(noscale) stop("noscale not supported any more. It's too dangerous, as I can't detect the actual number of SNPs used in plink.score.")
other.options <- list(...)
if(is.data.frame(coefs)) {
bim <- coefs
write.table2(bim, file=coefsfile <- tempfile(pattern="coefs"))
} else if(is.vector(coefs) && is.numeric(coefs)) {
if(is.null(other.options$bfile))
bfile <- get(".bfile", envir=.GlobalEnv, inherits=F) else
bfile <- other.options$bfile
bim <- read.bim(bfile)
stopifnot(nrow(bim) == length(coefs))
bim <- cbind(bim, coefs)
write.table2(bim, file=coefsfile <- tempfile(pattern="coefs"))
} else if(is.character(coefs) && length(coefs) == 1) {
coefsfile <- coefs
}
columns <- ifelse(grepl("^[0-9]+\\s[0-9]*\\s*[0-9]*\\s*",score.options),
"", "2 5 7")
options <- paste("--score", coefsfile, columns, score.options)
other.options$cmd <- paste(other.options$cmd, options)
## q-score-range
if(!is.null(q.score.range)) {
if(p.vals) {
cutoffs <- q.score.range
stopifnot(all(cutoffs >= 0 & cutoffs <= 1))
cutoffs <- cutoffs[cutoffs > 0]
n.cutoffs <- length(cutoffs)
matrix <- matrix(0, nrow=n.cutoffs, ncol=3)
matrix[,1] <- 1:n.cutoffs
matrix[,2] <- 0
matrix[,3] <- cutoffs
}
else {
stopifnot(length(dim(q.score.range)) == 2)
if(ncol(q.score.range) == 2) {
matrix <- cbind(1:nrow(q.score.range), q.score.range)
}
else {
matrix <- q.score.range
matrix[,1] <- 1:nrow(q.score.range)
}
}
write.table2(matrix, file=qfile <- tempfile(pattern="q"))
## Data file
if(is.character(q.score.range.data) && length(q.score.range.data) == 1) {
## A file is given
qdata <- q.score.range.data
}
else if(is.vector(q.score.range.data)) {
if(is.character(coefs)) stop("Cannot combine with a file input for coefs")
stopifnot(nrow(bim) == length(q.score.range.data))
write.table2(cbind(bim$ID, q.score.range.data),
file=qdata <- tempfile(pattern="qdata"))
}
else {
stop("I don''t know what to do with this q.score.range.data input.")
}
options <- paste("--q-score-range", qfile, qdata, q.score.range.options)
other.options$cmd <- paste(other.options$cmd, options)
}
#### run plink ####
if(!is.null(other.options$test)) return(do.call(plink, other.options))
outfile <- do.call(plink, other.options)
### Read in table ###
if(is.null(q.score.range)) {
table <- read.table.plink(outfile, ".profile")
if(noscale) table$SCORE <- table$SCORE * 2 * nrow(bim)
if(attributes(table)$table.exists) {
attr(table, "notes") <- attributes(outfile)
return(table)
}
else {
warning(attributes(outfile)$log)
return(outfile)
}
}
else {
new.table <- T
toreturn <- NULL
for(i in 1:length(cutoffs)) {
table <- read.table.plink(outfile, paste0(".", i, ".profile"))
if(noscale) table$SCORE <- table$SCORE * 2 * nrow(bim)
if(attributes(table)$table.exists) {
if(new.table) {
toreturn <- table
new.table <- F
}
else {
toreturn <- cbind(toreturn, table[,ncol(table)])
}
colnames(toreturn)[ncol(toreturn)] <- paste0("SCORE", i)
}
else warning(attributes(outfile)$log)
}
attr(toreturn, "notes") <- attributes(outfile)
return(toreturn)
}
}
| /Rplink/R/plink.score.R | no_license | unfated/cross-population-PRS | R | false | false | 4,621 | r | plink.score <- function(coefs,
score.options="",
q.score.range=NULL,
q.score.range.data=NULL,
q.score.range.options="",
p.vals=T,
noscale=F,
...) {
### FUnction to call plink --score with coefs as weights
### Optionally can apply p-values cut-offs using Plink's
### --q-score-range option
### Specify q.score.range as a range of cut-offs,
### e.g.: c(0, 0.00001, 0.0001, 0.01, 0.1, 1)
### p.vals: Indicate that the cut-offs are p-values
### If they're not, then specify q.score.range in
### in two or three columns. (Only last two columns will
### be used.) See plink manual.
### noscale: By default, plink automatically scales the score so that
### it is proportional to the number of variants included. Use this
### option to disable this.
### ...: options passed to plink()
if(noscale) stop("noscale not supported any more. It's too dangerous, as I can't detect the actual number of SNPs used in plink.score.")
other.options <- list(...)
if(is.data.frame(coefs)) {
bim <- coefs
write.table2(bim, file=coefsfile <- tempfile(pattern="coefs"))
} else if(is.vector(coefs) && is.numeric(coefs)) {
if(is.null(other.options$bfile))
bfile <- get(".bfile", envir=.GlobalEnv, inherits=F) else
bfile <- other.options$bfile
bim <- read.bim(bfile)
stopifnot(nrow(bim) == length(coefs))
bim <- cbind(bim, coefs)
write.table2(bim, file=coefsfile <- tempfile(pattern="coefs"))
} else if(is.character(coefs) && length(coefs) == 1) {
coefsfile <- coefs
}
columns <- ifelse(grepl("^[0-9]+\\s[0-9]*\\s*[0-9]*\\s*",score.options),
"", "2 5 7")
options <- paste("--score", coefsfile, columns, score.options)
other.options$cmd <- paste(other.options$cmd, options)
## q-score-range
if(!is.null(q.score.range)) {
if(p.vals) {
cutoffs <- q.score.range
stopifnot(all(cutoffs >= 0 & cutoffs <= 1))
cutoffs <- cutoffs[cutoffs > 0]
n.cutoffs <- length(cutoffs)
matrix <- matrix(0, nrow=n.cutoffs, ncol=3)
matrix[,1] <- 1:n.cutoffs
matrix[,2] <- 0
matrix[,3] <- cutoffs
}
else {
stopifnot(length(dim(q.score.range)) == 2)
if(ncol(q.score.range) == 2) {
matrix <- cbind(1:nrow(q.score.range), q.score.range)
}
else {
matrix <- q.score.range
matrix[,1] <- 1:nrow(q.score.range)
}
}
write.table2(matrix, file=qfile <- tempfile(pattern="q"))
## Data file
if(is.character(q.score.range.data) && length(q.score.range.data) == 1) {
## A file is given
qdata <- q.score.range.data
}
else if(is.vector(q.score.range.data)) {
if(is.character(coefs)) stop("Cannot combine with a file input for coefs")
stopifnot(nrow(bim) == length(q.score.range.data))
write.table2(cbind(bim$ID, q.score.range.data),
file=qdata <- tempfile(pattern="qdata"))
}
else {
stop("I don''t know what to do with this q.score.range.data input.")
}
options <- paste("--q-score-range", qfile, qdata, q.score.range.options)
other.options$cmd <- paste(other.options$cmd, options)
}
#### run plink ####
if(!is.null(other.options$test)) return(do.call(plink, other.options))
outfile <- do.call(plink, other.options)
### Read in table ###
if(is.null(q.score.range)) {
table <- read.table.plink(outfile, ".profile")
if(noscale) table$SCORE <- table$SCORE * 2 * nrow(bim)
if(attributes(table)$table.exists) {
attr(table, "notes") <- attributes(outfile)
return(table)
}
else {
warning(attributes(outfile)$log)
return(outfile)
}
}
else {
new.table <- T
toreturn <- NULL
for(i in 1:length(cutoffs)) {
table <- read.table.plink(outfile, paste0(".", i, ".profile"))
if(noscale) table$SCORE <- table$SCORE * 2 * nrow(bim)
if(attributes(table)$table.exists) {
if(new.table) {
toreturn <- table
new.table <- F
}
else {
toreturn <- cbind(toreturn, table[,ncol(table)])
}
colnames(toreturn)[ncol(toreturn)] <- paste0("SCORE", i)
}
else warning(attributes(outfile)$log)
}
attr(toreturn, "notes") <- attributes(outfile)
return(toreturn)
}
}
|
# Title : TODO
# Objective : TODO
# Created by: fdrennan
# Created on: 5/14/20
# library(biggr)
# library(ipify)
#
# sgd <- security_group_data()
#
# ips <- sgd %>%
# filter(group_name == 'Router', from_port == 22)
#
# current_ip <-
# ips %>%
# filter(str_detect(ip_ranges, my_ip))
#
# if(nrow(current_ip) == 0) {
# security_group_revoke(sg_name = 'Router', ports = 22, ips = str_remove_all(ips$ip_ranges, "/32"))
# my_ip = get_ip()
# security_group_envoke(sg_name = 'Router', ports = 22, ips = my_ip)
# }
library(biggr)
library(ggplot2)
library(tidyverse)
configure_aws(
aws_access_key_id = Sys.getenv('AWS_ACCESS'),
aws_secret_access_key = Sys.getenv('AWS_SECRET'),
default.region = Sys.getenv('AWS_REGION')
)
days_ago <- 300
response <- cost_get(from = Sys.Date() - days_ago, to = Sys.Date())
response %>%
mutate(start = as.Date(start)) %>%
# filter(between(start, floor_date(Sys.Date(), 'month'), Sys.Date())) %>%
mutate(total_cost = cumsum(unblended_cost)) %>%
pivot_longer(cols = c(unblended_cost, blended_cost, usage_quantity, total_cost)) %>%
ggplot() +
aes(x = as.Date(start), y = value) +
geom_col() +
facet_wrap(name ~ ., scales = 'free') +
xlab(label = 'Month to Date') +
ylab('Amount') +
ggtitle('AWS Checkup')
response %>%
mutate(start = as.Date(start)) %>%
filter(between(start, floor_date(floor_date(Sys.Date(), 'month')-3, 'month'), Sys.Date())) %>%
mutate(total_cost = cumsum(unblended_cost)) %>%
pivot_longer(cols = c(unblended_cost, blended_cost, usage_quantity, total_cost)) %>%
ggplot() +
aes(x = as.Date(start), y = value) +
geom_col() +
facet_wrap(name ~ ., scales = 'free') +
xlab(label = 'Month to Date') +
ylab('Amount') +
ggtitle('AWS Checkup')
response %>%
mutate(start = as.Date(start)) %>%
filter(between(start, floor_date(Sys.Date(), 'month'), Sys.Date())) %>%
mutate(total_cost = cumsum(unblended_cost)) %>%
pivot_longer(cols = c(unblended_cost, blended_cost, usage_quantity, total_cost)) %>%
ggplot() +
aes(x = as.Date(start), y = value) +
geom_col() +
facet_wrap(name ~ ., scales = 'free') +
xlab(label = 'Month to Date') +
ylab('Amount') +
ggtitle('AWS Checkup')
ggplot(response) +
geom_line(aes(x = as.Date(start), y = unblended_cost, colour='Cost')) +
geom_line(aes(x = as.Date(start), y = cumsum(unblended_cost), colour='Cost')) +
geom_line(aes(x = as.Date(start), y = log(usage_quantity), colour = 'Log of Usage Quantity'))
| /biggr/scratch.R | no_license | cjayrans/ndexr-platform | R | false | false | 2,478 | r | # Title : TODO
# Objective : TODO
# Created by: fdrennan
# Created on: 5/14/20
# library(biggr)
# library(ipify)
#
# sgd <- security_group_data()
#
# ips <- sgd %>%
# filter(group_name == 'Router', from_port == 22)
#
# current_ip <-
# ips %>%
# filter(str_detect(ip_ranges, my_ip))
#
# if(nrow(current_ip) == 0) {
# security_group_revoke(sg_name = 'Router', ports = 22, ips = str_remove_all(ips$ip_ranges, "/32"))
# my_ip = get_ip()
# security_group_envoke(sg_name = 'Router', ports = 22, ips = my_ip)
# }
library(biggr)
library(ggplot2)
library(tidyverse)
configure_aws(
aws_access_key_id = Sys.getenv('AWS_ACCESS'),
aws_secret_access_key = Sys.getenv('AWS_SECRET'),
default.region = Sys.getenv('AWS_REGION')
)
days_ago <- 300
response <- cost_get(from = Sys.Date() - days_ago, to = Sys.Date())
response %>%
mutate(start = as.Date(start)) %>%
# filter(between(start, floor_date(Sys.Date(), 'month'), Sys.Date())) %>%
mutate(total_cost = cumsum(unblended_cost)) %>%
pivot_longer(cols = c(unblended_cost, blended_cost, usage_quantity, total_cost)) %>%
ggplot() +
aes(x = as.Date(start), y = value) +
geom_col() +
facet_wrap(name ~ ., scales = 'free') +
xlab(label = 'Month to Date') +
ylab('Amount') +
ggtitle('AWS Checkup')
response %>%
mutate(start = as.Date(start)) %>%
filter(between(start, floor_date(floor_date(Sys.Date(), 'month')-3, 'month'), Sys.Date())) %>%
mutate(total_cost = cumsum(unblended_cost)) %>%
pivot_longer(cols = c(unblended_cost, blended_cost, usage_quantity, total_cost)) %>%
ggplot() +
aes(x = as.Date(start), y = value) +
geom_col() +
facet_wrap(name ~ ., scales = 'free') +
xlab(label = 'Month to Date') +
ylab('Amount') +
ggtitle('AWS Checkup')
response %>%
mutate(start = as.Date(start)) %>%
filter(between(start, floor_date(Sys.Date(), 'month'), Sys.Date())) %>%
mutate(total_cost = cumsum(unblended_cost)) %>%
pivot_longer(cols = c(unblended_cost, blended_cost, usage_quantity, total_cost)) %>%
ggplot() +
aes(x = as.Date(start), y = value) +
geom_col() +
facet_wrap(name ~ ., scales = 'free') +
xlab(label = 'Month to Date') +
ylab('Amount') +
ggtitle('AWS Checkup')
ggplot(response) +
geom_line(aes(x = as.Date(start), y = unblended_cost, colour='Cost')) +
geom_line(aes(x = as.Date(start), y = cumsum(unblended_cost), colour='Cost')) +
geom_line(aes(x = as.Date(start), y = log(usage_quantity), colour = 'Log of Usage Quantity'))
|
library(cobs)
### Name: globtemp
### Title: Annual Average Global Surface Temperature
### Aliases: globtemp
### Keywords: datasets
### ** Examples
data(globtemp)
plot(globtemp, main = "Annual Global Temperature Deviations")
str(globtemp)
## forget about time-series, just use numeric vectors:
year <- as.vector(time(globtemp))
temp <- as.vector(globtemp)
##---- Code for Figure 1a of He and Ng (1999) ----------
a50 <- cobs(year, temp, knots.add = TRUE, degree = 1, constraint = "increase")
summary(a50)
## As suggested in the warning message, we increase the number of knots to 9
a50 <- cobs(year, temp, nknots = 9, knots.add = TRUE, degree = 1,
constraint = "increase")
summary(a50)
## Here, we use the same knots sequence chosen for the 50th percentile
a10 <- cobs(year, temp, nknots = length(a50$knots), knots = a50$knot,
degree = 1, tau = 0.1, constraint = "increase")
summary(a10)
a90 <- cobs(year, temp, nknots = length(a50$knots), knots = a50$knot,
degree = 1, tau = 0.9, constraint = "increase")
summary(a90)
which(hot.idx <- temp >= a90$fit)
which(cold.idx <- temp <= a10$fit)
normal.idx <- !hot.idx & !cold.idx
plot(year, temp, type = "n", ylab = "Temperature (C)", ylim = c(-.7,.6))
lines(predict(a50, year, interval = "both"), col = 2)
lines(predict(a10, year, interval = "both"), col = 3)
lines(predict(a90, year, interval = "both"), col = 3)
points(year, temp, pch = c(1,3)[2 - normal.idx])
## label the "hot" and "cold" days
text(year[hot.idx], temp[hot.idx] + .03, labels = year[hot.idx])
text(year[cold.idx],temp[cold.idx]- .03, labels = year[cold.idx])
| /data/genthat_extracted_code/cobs/examples/globtemp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,621 | r | library(cobs)
### Name: globtemp
### Title: Annual Average Global Surface Temperature
### Aliases: globtemp
### Keywords: datasets
### ** Examples
data(globtemp)
plot(globtemp, main = "Annual Global Temperature Deviations")
str(globtemp)
## forget about time-series, just use numeric vectors:
year <- as.vector(time(globtemp))
temp <- as.vector(globtemp)
##---- Code for Figure 1a of He and Ng (1999) ----------
a50 <- cobs(year, temp, knots.add = TRUE, degree = 1, constraint = "increase")
summary(a50)
## As suggested in the warning message, we increase the number of knots to 9
a50 <- cobs(year, temp, nknots = 9, knots.add = TRUE, degree = 1,
constraint = "increase")
summary(a50)
## Here, we use the same knots sequence chosen for the 50th percentile
a10 <- cobs(year, temp, nknots = length(a50$knots), knots = a50$knot,
degree = 1, tau = 0.1, constraint = "increase")
summary(a10)
a90 <- cobs(year, temp, nknots = length(a50$knots), knots = a50$knot,
degree = 1, tau = 0.9, constraint = "increase")
summary(a90)
which(hot.idx <- temp >= a90$fit)
which(cold.idx <- temp <= a10$fit)
normal.idx <- !hot.idx & !cold.idx
plot(year, temp, type = "n", ylab = "Temperature (C)", ylim = c(-.7,.6))
lines(predict(a50, year, interval = "both"), col = 2)
lines(predict(a10, year, interval = "both"), col = 3)
lines(predict(a90, year, interval = "both"), col = 3)
points(year, temp, pch = c(1,3)[2 - normal.idx])
## label the "hot" and "cold" days
text(year[hot.idx], temp[hot.idx] + .03, labels = year[hot.idx])
text(year[cold.idx],temp[cold.idx]- .03, labels = year[cold.idx])
|
\name{MaternCorr}
\alias{Matern}
\alias{MaternCorr}
\alias{MaternCorr.default}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Matern correlation function and Matern formula term.
}
\description{
The Matérn correlation function describes realizations of Gaussian spatial processes with different smoothnesses
(i.e. either smooth or rugged surfaces). It also includes a scaling and a 'nugget' parameter. It can be invoked in two ways. First, the \code{MaternCorr} function evaluates these correlations, using distances as input. Second, a term of the form \code{Matern(1|}\emph{<...>}\code{)} in a \code{formula} specifies a random effect with Matérn correlation function, using coordinates found in a data frame as input. In the latter case, the correlations between realizations of the random effect for any two observations in the data will be the value of the Matérn function at the scaled Euclidean distance between coordinates specified in \emph{<...>}, using \dQuote{+} as separator (e.g., \code{Matern(1|latitude + longitude)}).
}
\usage{
\method{MaternCorr}{default}(d, rho = 1, smoothness, nu = smoothness, Nugget = 0L)
# Matern(1|...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{d}{A distance, typically an Euclidean distance}
\item{rho}{A scaling factor for distance. The 'range' considered in some
formulations is the reciprocal of this scaling factor}
\item{smoothness}{The smoothness parameter, >0. \eqn{\nu=0.5} corresponds to the exponential correlation function,
and the limit function when \eqn{\mu} goes to \eqn{\infty} is the squared exponential function (as in a Gaussian).}
\item{nu}{Same as smoothness}
\item{Nugget}{(Following the jargon of Kriging) a parameter describing a discontinuous decrease in
correlation at zero distance. Correlation will always be 1 at \eqn{d=0}, and from which it immediately drops to
(1-Nugget)}
\item{...}{Names of coordinates, using \dQuote{+} as separator (e.g., \code{Matern(1|latitude + longitude)}}
}
\details{
The correlation at distance \eqn{d>0} is
\deqn{(1-\textrm{Nugget}) \frac{(\rho d)^\nu K_\nu(\rho d)}{2^{(\nu - 1)} \Gamma(\nu)}}{%
(1-Nugget) 2^(1-\nu) (\rho d)^\nu K_\nu(\rho d) / \Gamma(\nu)}
where
\eqn{K_\nu} is the \code{\link{besselK}} function of order \eqn{\nu}.
}
\value{Scalar/vector/matrix depending on input.}
\references{
Stein, M.L. (1999) Statistical Interpolation of Spatial Data: Some Theory for Kriging. Springer, New York.
}
\seealso{
See \code{\link{corMatern}} for an implementation of this correlation function as a \code{corSpatial} object for use with \code{lme} or \code{glmmPQL}.
By default the Nugget is set to 0. See one of the examples on data set \code{\link{Loaloa}}
for a fit including the estimation of the Nugget.
}
\examples{
## See examples in help("spaMM"), help("HLCor"), help("Loaloa"), etc.
## The Matérn function can be used in Euclidean spaces of any dimension:
set.seed(123)
randpts <- matrix(rnorm(20),nrow=5)
distMatrix <- as.matrix(proxy::dist(randpts))
MaternCorr(distMatrix,nu=2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
| /CRAN/contrib/spaMM/man/Matern.corr.Rd | no_license | PRL-PRG/dyntrace-instrumented-packages | R | false | false | 3,290 | rd | \name{MaternCorr}
\alias{Matern}
\alias{MaternCorr}
\alias{MaternCorr.default}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Matern correlation function and Matern formula term.
}
\description{
The Matérn correlation function describes realizations of Gaussian spatial processes with different smoothnesses
(i.e. either smooth or rugged surfaces). It also includes a scaling and a 'nugget' parameter. It can be invoked in two ways. First, the \code{MaternCorr} function evaluates these correlations, using distances as input. Second, a term of the form \code{Matern(1|}\emph{<...>}\code{)} in a \code{formula} specifies a random effect with Matérn correlation function, using coordinates found in a data frame as input. In the latter case, the correlations between realizations of the random effect for any two observations in the data will be the value of the Matérn function at the scaled Euclidean distance between coordinates specified in \emph{<...>}, using \dQuote{+} as separator (e.g., \code{Matern(1|latitude + longitude)}).
}
\usage{
\method{MaternCorr}{default}(d, rho = 1, smoothness, nu = smoothness, Nugget = 0L)
# Matern(1|...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{d}{A distance, typically an Euclidean distance}
\item{rho}{A scaling factor for distance. The 'range' considered in some
formulations is the reciprocal of this scaling factor}
\item{smoothness}{The smoothness parameter, >0. \eqn{\nu=0.5} corresponds to the exponential correlation function,
and the limit function when \eqn{\mu} goes to \eqn{\infty} is the squared exponential function (as in a Gaussian).}
\item{nu}{Same as smoothness}
\item{Nugget}{(Following the jargon of Kriging) a parameter describing a discontinuous decrease in
correlation at zero distance. Correlation will always be 1 at \eqn{d=0}, and from which it immediately drops to
(1-Nugget)}
\item{...}{Names of coordinates, using \dQuote{+} as separator (e.g., \code{Matern(1|latitude + longitude)}}
}
\details{
The correlation at distance \eqn{d>0} is
\deqn{(1-\textrm{Nugget}) \frac{(\rho d)^\nu K_\nu(\rho d)}{2^{(\nu - 1)} \Gamma(\nu)}}{%
(1-Nugget) 2^(1-\nu) (\rho d)^\nu K_\nu(\rho d) / \Gamma(\nu)}
where
\eqn{K_\nu} is the \code{\link{besselK}} function of order \eqn{\nu}.
}
\value{Scalar/vector/matrix depending on input.}
\references{
Stein, M.L. (1999) Statistical Interpolation of Spatial Data: Some Theory for Kriging. Springer, New York.
}
\seealso{
See \code{\link{corMatern}} for an implementation of this correlation function as a \code{corSpatial} object for use with \code{lme} or \code{glmmPQL}.
By default the Nugget is set to 0. See one of the examples on data set \code{\link{Loaloa}}
for a fit including the estimation of the Nugget.
}
\examples{
## See examples in help("spaMM"), help("HLCor"), help("Loaloa"), etc.
## The Matérn function can be used in Euclidean spaces of any dimension:
set.seed(123)
randpts <- matrix(rnorm(20),nrow=5)
distMatrix <- as.matrix(proxy::dist(randpts))
MaternCorr(distMatrix,nu=2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dendextend_options.R
\name{assign_dendextend_options}
\alias{assign_dendextend_options}
\title{Populates dendextend functions into dendextend_options}
\usage{
assign_dendextend_options()
}
\description{
Populates dendextend functions into dendextend_options
}
| /man/assign_dendextend_options.Rd | no_license | timelyportfolio/dendextend | R | false | false | 347 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dendextend_options.R
\name{assign_dendextend_options}
\alias{assign_dendextend_options}
\title{Populates dendextend functions into dendextend_options}
\usage{
assign_dendextend_options()
}
\description{
Populates dendextend functions into dendextend_options
}
|
require ("sqldf")
# I have a french computer so I'm setting the system local to english
# in order to display day names in english
Sys.setlocale("LC_TIME","English" )
# Reading data file
hpc = "./household_power_consumption.txt"
# the SQL statement for selecting these two dates only
Sql <- "SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'"
# Read the only the needed data
subData <- read.csv2.sql(hpc, Sql, filter=NULL)
# Get the data in the proper format
subData$Date <- as.Date(subData$Date, format = "%d/%m/%Y")
subData$DateTime <- as.POSIXct(paste(subData$Date, subData$Time))
# Creating the plot3:
png("plot3.png", width = 480, height = 480)
plot(subData$DateTime, subData$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(subData$DateTime, subData$Sub_metering_2, type="l", col="red")
lines(subData$DateTime, subData$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off()
| /Plot3.R | no_license | ranachamsi/ExData_Plotting1 | R | false | false | 1,030 | r | require ("sqldf")
# I have a french computer so I'm setting the system local to english
# in order to display day names in english
Sys.setlocale("LC_TIME","English" )
# Reading data file
hpc = "./household_power_consumption.txt"
# the SQL statement for selecting these two dates only
Sql <- "SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'"
# Read the only the needed data
subData <- read.csv2.sql(hpc, Sql, filter=NULL)
# Get the data in the proper format
subData$Date <- as.Date(subData$Date, format = "%d/%m/%Y")
subData$DateTime <- as.POSIXct(paste(subData$Date, subData$Time))
# Creating the plot3:
png("plot3.png", width = 480, height = 480)
plot(subData$DateTime, subData$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(subData$DateTime, subData$Sub_metering_2, type="l", col="red")
lines(subData$DateTime, subData$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_vennLasso.R
\name{predict.cv.vennLasso}
\alias{predict.cv.vennLasso}
\title{Prediction for Cross Validation Hierarchical Lasso Object}
\usage{
\method{predict}{cv.vennLasso}(object, newx, group.mat, s = c("lambda.min"), use.refit = FALSE, ...)
}
\arguments{
\item{object}{fitted cv.vennLasso object}
\item{newx}{new matrix for predictions}
\item{group.mat}{A matrix of the group memberships for now. Ignore the rest:
A list of length equal to the number of groups containing vectors of integers
indicating the variable IDs for each group. For example, groups=list(c(1,2), c(2,3), c(3,4,5)) specifies
that Group 1 contains variables 1 and 2, Group 2 contains variables 2 and 3, and Group 3 contains
variables 3, 4, and 5. Can also be a matrix of 0s and 1s with the number of columns equal to the
number of groups and the number of rows equal to the number of variables. A value of 1 in row i and
column j indicates that variable i is in group j and 0 indicates that variable i is not in group j.}
\item{s}{lambda value for the predictions. defaults to all values computed in the vennLasso object}
\item{use.refit}{Should the refitted beta estimates be used for prediction? Defaults to FALSE. If TRUE
then the beta estimates from the model refit on just the selected covariates are used}
\item{...}{parameters to be passed to predict.vennLasso}
}
\value{
predictions or coefficients
}
\description{
Prediction for Cross Validation Hierarchical Lasso Object
}
| /fuzzedpackages/vennLasso/man/predict.cv.vennLasso.Rd | no_license | akhikolla/testpackages | R | false | true | 1,548 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_vennLasso.R
\name{predict.cv.vennLasso}
\alias{predict.cv.vennLasso}
\title{Prediction for Cross Validation Hierarchical Lasso Object}
\usage{
\method{predict}{cv.vennLasso}(object, newx, group.mat, s = c("lambda.min"), use.refit = FALSE, ...)
}
\arguments{
\item{object}{fitted cv.vennLasso object}
\item{newx}{new matrix for predictions}
\item{group.mat}{A matrix of the group memberships for now. Ignore the rest:
A list of length equal to the number of groups containing vectors of integers
indicating the variable IDs for each group. For example, groups=list(c(1,2), c(2,3), c(3,4,5)) specifies
that Group 1 contains variables 1 and 2, Group 2 contains variables 2 and 3, and Group 3 contains
variables 3, 4, and 5. Can also be a matrix of 0s and 1s with the number of columns equal to the
number of groups and the number of rows equal to the number of variables. A value of 1 in row i and
column j indicates that variable i is in group j and 0 indicates that variable i is not in group j.}
\item{s}{lambda value for the predictions. defaults to all values computed in the vennLasso object}
\item{use.refit}{Should the refitted beta estimates be used for prediction? Defaults to FALSE. If TRUE
then the beta estimates from the model refit on just the selected covariates are used}
\item{...}{parameters to be passed to predict.vennLasso}
}
\value{
predictions or coefficients
}
\description{
Prediction for Cross Validation Hierarchical Lasso Object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predictor.R
\name{predict.Predictor}
\alias{predict.Predictor}
\title{S3 method that wraps Predictor Class}
\usage{
\method{predict}{Predictor}(
object,
newdata,
serializer = csv_serializer,
deserializer = csv_deserializer,
...
)
}
\arguments{
\item{object}{a sagemaker model}
\item{newdata}{data for model to predict}
\item{serializer}{method class to serializer data to sagemaker model. Requires to be
a class inherited from \link{BaseSerializer}. (Default: \link{csv_serializer})}
\item{deserializer}{method class to deserializer return data streams from sagemaker model.
Requires to be a class inherited from \link{BaseDeserializer}.
(Default: \link{csv_deserializer})}
\item{...}{arguments passed to ``Predictor$predict``}
}
\description{
Predicted values returned from endpoint
}
| /man/predict.Predictor.Rd | permissive | OwenGarrity/sagemaker-r-sdk | R | false | true | 878 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predictor.R
\name{predict.Predictor}
\alias{predict.Predictor}
\title{S3 method that wraps Predictor Class}
\usage{
\method{predict}{Predictor}(
object,
newdata,
serializer = csv_serializer,
deserializer = csv_deserializer,
...
)
}
\arguments{
\item{object}{a sagemaker model}
\item{newdata}{data for model to predict}
\item{serializer}{method class to serializer data to sagemaker model. Requires to be
a class inherited from \link{BaseSerializer}. (Default: \link{csv_serializer})}
\item{deserializer}{method class to deserializer return data streams from sagemaker model.
Requires to be a class inherited from \link{BaseDeserializer}.
(Default: \link{csv_deserializer})}
\item{...}{arguments passed to ``Predictor$predict``}
}
\description{
Predicted values returned from endpoint
}
|
source("dataHelper.R")
NEI <- LoadData("summarySCC_PM25")
SCC <- LoadData("Source_Classification_Code")
png(filename = "plot1.png",
width = 480, height = 480,
units = "px")
yearlyEmissions <- NEI %>%
select(Emissions, year) %>%
group_by(year) %>%
summarise(total = sum(Emissions))
plot(yearlyEmissions, type = "l", xlab = "Year", ylab = expression("PM"[2.5]*" Emissions"),
main = "Total Emissions in the United States (1999 to 2008)")
dev.off()
| /plot1.R | permissive | vnaidu/ExData_Plotting2 | R | false | false | 471 | r | source("dataHelper.R")
NEI <- LoadData("summarySCC_PM25")
SCC <- LoadData("Source_Classification_Code")
png(filename = "plot1.png",
width = 480, height = 480,
units = "px")
yearlyEmissions <- NEI %>%
select(Emissions, year) %>%
group_by(year) %>%
summarise(total = sum(Emissions))
plot(yearlyEmissions, type = "l", xlab = "Year", ylab = expression("PM"[2.5]*" Emissions"),
main = "Total Emissions in the United States (1999 to 2008)")
dev.off()
|
# Alienability enhancements
#
# This file is an AUTOTYP aggregation
#
# For questions, open an issue
#
# Copyright 2022 Taras Zakharko (CC BY 4.0).
# ███████╗███████╗████████╗██╗ ██╗██████╗
# ██╔════╝██╔════╝╚══██╔══╝██║ ██║██╔══██╗
# ███████╗█████╗ ██║ ██║ ██║██████╔╝
# ╚════██║██╔══╝ ██║ ██║ ██║██╔═══╝
# ███████║███████╗ ██║ ╚██████╔╝██║
# ╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝
#
source("R/plugin-support.R")
source("R/expand_na.R")
Clusivity <- Clusivity %>%
# Clusivity distinction type
mutate(
ClusivityType = case_when(
HasClusivityWithMinimalNumberSystem ~ "min/aug type",
HasClusivityAsPerson ~ "excl as person type",
HasClusivity ~ "plain i/e type",
!HasClusivity ~ "no i/e"
) %>%
factor(levels = c("no i/e", "plain i/e type", "excl as person type", "min/aug type")),
.after = HasClusivity
)
# ██████╗ ██╗ ██╗████████╗██████╗ ██╗ ██╗████████╗
# ██╔═══██╗██║ ██║╚══██╔══╝██╔══██╗██║ ██║╚══██╔══╝
# ██║ ██║██║ ██║ ██║ ██████╔╝██║ ██║ ██║
# ██║ ██║██║ ██║ ██║ ██╔═══╝ ██║ ██║ ██║
# ╚██████╔╝╚██████╔╝ ██║ ██║ ╚██████╔╝ ██║
# ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝
descriptor <- .metadata$Clusivity
descriptor$fields$ClusivityType <- describe_data(
computed = "enhancements/Clusivity.R",
ptype = factor(),
levels = tribble(
~ level, ~ description,
"no i/e", "no clusivity distinction",
"plain i/e type", "there is clusivity distinction",
"excl as person type", "clusivity is a person category",
"min/aug type", "clusivity is present with a minimal/augmented number system"
),
description = glue::trim("
Type of inclusive/exclusive distinction, as defined in Bickel & Nichols
2005 in Filimonova ed. Clusivity, Amsterdam: Benjamins, 47-70"
)
)
export_dataset(
"Clusivity",
Clusivity,
descriptor,
"Categories"
)
| /aggregation-scripts/enhancements/Clusivity.R | permissive | autotyp/autotyp-data | R | false | false | 2,741 | r | # Alienability enhancements
#
# This file is an AUTOTYP aggregation
#
# For questions, open an issue
#
# Copyright 2022 Taras Zakharko (CC BY 4.0).
# ███████╗███████╗████████╗██╗ ██╗██████╗
# ██╔════╝██╔════╝╚══██╔══╝██║ ██║██╔══██╗
# ███████╗█████╗ ██║ ██║ ██║██████╔╝
# ╚════██║██╔══╝ ██║ ██║ ██║██╔═══╝
# ███████║███████╗ ██║ ╚██████╔╝██║
# ╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝
#
source("R/plugin-support.R")
source("R/expand_na.R")
Clusivity <- Clusivity %>%
# Clusivity distinction type
mutate(
ClusivityType = case_when(
HasClusivityWithMinimalNumberSystem ~ "min/aug type",
HasClusivityAsPerson ~ "excl as person type",
HasClusivity ~ "plain i/e type",
!HasClusivity ~ "no i/e"
) %>%
factor(levels = c("no i/e", "plain i/e type", "excl as person type", "min/aug type")),
.after = HasClusivity
)
# ██████╗ ██╗ ██╗████████╗██████╗ ██╗ ██╗████████╗
# ██╔═══██╗██║ ██║╚══██╔══╝██╔══██╗██║ ██║╚══██╔══╝
# ██║ ██║██║ ██║ ██║ ██████╔╝██║ ██║ ██║
# ██║ ██║██║ ██║ ██║ ██╔═══╝ ██║ ██║ ██║
# ╚██████╔╝╚██████╔╝ ██║ ██║ ╚██████╔╝ ██║
# ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝
descriptor <- .metadata$Clusivity
descriptor$fields$ClusivityType <- describe_data(
computed = "enhancements/Clusivity.R",
ptype = factor(),
levels = tribble(
~ level, ~ description,
"no i/e", "no clusivity distinction",
"plain i/e type", "there is clusivity distinction",
"excl as person type", "clusivity is a person category",
"min/aug type", "clusivity is present with a minimal/augmented number system"
),
description = glue::trim("
Type of inclusive/exclusive distinction, as defined in Bickel & Nichols
2005 in Filimonova ed. Clusivity, Amsterdam: Benjamins, 47-70"
)
)
export_dataset(
"Clusivity",
Clusivity,
descriptor,
"Categories"
)
|
##Load relevant libraries
library(ggplot2)
##Read in data
bat_data <- read.csv("./Data/2020-01_Swanson-et-al_Bats-Forest-Str_Wing-Loading-Aspect-Ratio.csv")
##Create aspect ratio vs. loading plot
ggplot(bat_data, aes(x = AspectRatio, y = Wingloading, label = Species)) +
geom_point() +
labs(x = "Aspect Ratio", y = "Wing Loading") +
geom_text(hjust = 0.01, nudge_x = 0.005) +
theme(axis.title = element_text(size = 16), panel.background = element_rect(fill = "white", color = "black", size = 2))
| /wingloading_aspect.R | no_license | thatsciencegal/flying_under_lidar | R | false | false | 513 | r | ##Load relevant libraries
library(ggplot2)
##Read in data
bat_data <- read.csv("./Data/2020-01_Swanson-et-al_Bats-Forest-Str_Wing-Loading-Aspect-Ratio.csv")
##Create aspect ratio vs. loading plot
ggplot(bat_data, aes(x = AspectRatio, y = Wingloading, label = Species)) +
geom_point() +
labs(x = "Aspect Ratio", y = "Wing Loading") +
geom_text(hjust = 0.01, nudge_x = 0.005) +
theme(axis.title = element_text(size = 16), panel.background = element_rect(fill = "white", color = "black", size = 2))
|
# Functions for the spaghetti tutorial
# Created by Kaija Gahm
# 31 January 2020
# You are given a box of pasta and an empty pot
box <- data.frame(ingredient = as.character("pasta"), cooked = 0)
pot <- data.frame(ingredient = as.character("air"), cooked = 0)
# Fill the pot
fill <- function(df){
df %>% mutate(ingredient = recode(ingredient, "air" = "water")) %>% return()
}
#Add pasta to the pot
add <- function(what = "pasta", from = box, to = pot){
from %>% filter(ingredient == what) %>% full_join(to) %>% return()
}
# add <- function(from = box, to = pot){
# full_join(box, pot) %>% return()
# box <<- data.frame(ingredient = "air",
# cooked = 0)
# }
# Cook (heat) the pasta (water)
cook <- function(df, what = NULL, minutes = 1){
if(is.null(what)){
stop("Which ingredient would you like to cook?")
} else if(what == "pasta" & !("water" %in% df$ingredient)){
stop("If you try to cook pasta without water, it'll burn! Don't do that.")
} else if(what %in% c("pasta", "water") & !(what %in% df$ingredient)){
stop("That ingredient isn't in the pot yet.")
} else if(!(what %in% c("pasta", "water"))){
stop("Ew, why are you putting THAT in your pasta water? Try again.")
} else{
df <- df %>% mutate(cooked = case_when(cooked + minutes < 10 & ingredient == what ~ cooked + minutes,
cooked + minutes >= 10 & ingredient == what ~ 10,
TRUE ~ cooked)) %>% return()
}
}
# Strain the pasta from the water
drain <- function(df){
df %>% filter(ingredient != "water") %>% return()
}
| /spaghetti_functions.R | no_license | kaijagahm/tutorials | R | false | false | 1,629 | r | # Functions for the spaghetti tutorial
# Created by Kaija Gahm
# 31 January 2020
# You are given a box of pasta and an empty pot
box <- data.frame(ingredient = as.character("pasta"), cooked = 0)
pot <- data.frame(ingredient = as.character("air"), cooked = 0)
# Fill the pot
fill <- function(df){
df %>% mutate(ingredient = recode(ingredient, "air" = "water")) %>% return()
}
#Add pasta to the pot
add <- function(what = "pasta", from = box, to = pot){
from %>% filter(ingredient == what) %>% full_join(to) %>% return()
}
# add <- function(from = box, to = pot){
# full_join(box, pot) %>% return()
# box <<- data.frame(ingredient = "air",
# cooked = 0)
# }
# Cook (heat) the pasta (water)
cook <- function(df, what = NULL, minutes = 1){
if(is.null(what)){
stop("Which ingredient would you like to cook?")
} else if(what == "pasta" & !("water" %in% df$ingredient)){
stop("If you try to cook pasta without water, it'll burn! Don't do that.")
} else if(what %in% c("pasta", "water") & !(what %in% df$ingredient)){
stop("That ingredient isn't in the pot yet.")
} else if(!(what %in% c("pasta", "water"))){
stop("Ew, why are you putting THAT in your pasta water? Try again.")
} else{
df <- df %>% mutate(cooked = case_when(cooked + minutes < 10 & ingredient == what ~ cooked + minutes,
cooked + minutes >= 10 & ingredient == what ~ 10,
TRUE ~ cooked)) %>% return()
}
}
# Strain the pasta from the water
drain <- function(df){
df %>% filter(ingredient != "water") %>% return()
}
|
library(DES)
### Name: newsim,schedevnt,getnextevnt,mainloop,newqueue,appendfcfs,delfcfs,cancelevnt,exparrivals
### Title: Discrete-event simulation routines.
### Aliases: newsim cancelevnt schedevnt getnextevnt mainloop newqueue
### appendfcfs delfcfs exparrivals
### ** Examples
# from MachRep.R in examples/
# create a sim list that will run for 100000 simulated time, with 3
# rows allocated for the event set, and application-specific columns
# named 'startqtime' and 'startuptime'
simlist <- newsim(100000,3,appcols=c('startqtime','startuptime'))
# create a queue
simlist$queue <- newqueue(simlist)
| /data/genthat_extracted_code/DES/examples/newsim.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 618 | r | library(DES)
### Name: newsim,schedevnt,getnextevnt,mainloop,newqueue,appendfcfs,delfcfs,cancelevnt,exparrivals
### Title: Discrete-event simulation routines.
### Aliases: newsim cancelevnt schedevnt getnextevnt mainloop newqueue
### appendfcfs delfcfs exparrivals
### ** Examples
# from MachRep.R in examples/
# create a sim list that will run for 100000 simulated time, with 3
# rows allocated for the event set, and application-specific columns
# named 'startqtime' and 'startuptime'
simlist <- newsim(100000,3,appcols=c('startqtime','startuptime'))
# create a queue
simlist$queue <- newqueue(simlist)
|
library(dplyr)
# Read Training Data
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Read Test Data
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# Read Data Description
variable_names <- read.table("./UCI HAR Dataset/features.txt")
# Read Activity Labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 1. Merges the training and the test sets to create one data set.
x_total <- rbind(x_train, x_test)
y_total <- rbind(y_train, y_test)
subject_total <- rbind(subject_train, subject_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
x_total <- x_total[,selected_var[,1]]
# 3. Uses descriptive activity names to name the activities in the data set
colnames(y_total) <- "activity"
y_total$activitylabel <- factor(y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- y_total[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(x_total) <- variable_names[selected_var[,1],2]
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(subject_total) <- "subject"
total <- cbind(x_total, activitylabel, subject_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(list(mean=mean))
write.table(total_mean, file = "./Tidy_Data_Summary.txt", row.names = FALSE, col.names = TRUE)
message("The script 'run_analysis.R was executed successfully.\n",
"As a result, a new tidy data set was created with name \n",
"'Tidy_Data_Summary.txt' in the working directory.")
| /run_analysis.R | no_license | smeah25/Getting-and-Cleaning-Data_Assignment | R | false | false | 2,010 | r | library(dplyr)
# Read Training Data
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Read Test Data
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# Read Data Description
variable_names <- read.table("./UCI HAR Dataset/features.txt")
# Read Activity Labels
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
# 1. Merges the training and the test sets to create one data set.
x_total <- rbind(x_train, x_test)
y_total <- rbind(y_train, y_test)
subject_total <- rbind(subject_train, subject_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
selected_var <- variable_names[grep("mean\\(\\)|std\\(\\)",variable_names[,2]),]
x_total <- x_total[,selected_var[,1]]
# 3. Uses descriptive activity names to name the activities in the data set
colnames(y_total) <- "activity"
y_total$activitylabel <- factor(y_total$activity, labels = as.character(activity_labels[,2]))
activitylabel <- y_total[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
colnames(x_total) <- variable_names[selected_var[,1],2]
# 5. From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
colnames(subject_total) <- "subject"
total <- cbind(x_total, activitylabel, subject_total)
total_mean <- total %>% group_by(activitylabel, subject) %>% summarize_each(list(mean=mean))
write.table(total_mean, file = "./Tidy_Data_Summary.txt", row.names = FALSE, col.names = TRUE)
message("The script 'run_analysis.R was executed successfully.\n",
"As a result, a new tidy data set was created with name \n",
"'Tidy_Data_Summary.txt' in the working directory.")
|
rm(list=ls())
library(nnrr)
legeskjema <- read.table('P:/MinData/nnrr/DataDump_1b%3aRegistreringsskjema+poliklinikk_2017-09-27.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
pasientsvar_pre <- read.table('P:/MinData/nnrr/DataDump_1a%3aSpørreskjema+før+behandling_2017-09-22.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
pasientsvar_pre2 <- read.table('P:/MinData/nnrr/DataDump_1a%3aSpørreskjema+før+behandling_2017-09-27.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
# # pasientsvar_pre3 <- read.table('P:/MinData/nnrr/DataDump_1a%3aSpørreskjema+før+behandling_2017-09-27_2.csv', sep=';',
# header=T, stringsAsFactors = F)
pasientsvar_post <- read.table('P:/MinData/nnrr/DataDump_2%3aSpørreskjema+etter+behandling_2017-09-27.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
varnavn_1b <- read.table('P:/MinData/nnrr/Kodebok1b.csv', sep=';', header=T, stringsAsFactors = F)
boolske_var1b <- as.character(varnavn_1b$DataDumpnavn)[which(as.character(varnavn_1b$Felttype) == 'Avkrysning')]
legeskjema[, boolske_var1b] <- apply(legeskjema[, boolske_var1b], 2, as.logical)
varnavn_1a <- read.table('P:/MinData/nnrr/Kodebok1a.csv', sep=';', header=T, stringsAsFactors = F)
boolske_var1a <- as.character(varnavn_1a$DataDumpnavn)[which(as.character(varnavn_1a$Felttype) == 'Avkrysning')]
pasientsvar_pre[, boolske_var1a] <- apply(pasientsvar_pre[, boolske_var1a], 2, as.logical)
varnavn_2 <- read.table('P:/MinData/nnrr/Kodebok2.csv', sep=';', header=T, stringsAsFactors = F)
boolske_var2 <- as.character(varnavn_2$DataDumpnavn)[which(as.character(varnavn_2$Felttype) == 'Avkrysning')]
pasientsvar_post[, boolske_var2] <- apply(pasientsvar_post[, boolske_var2], 2, as.logical)
legeskjema$regstatus <- 1
pasientsvar_pre$regstatus <- 1
pasientsvar_post$regstatus <- 1
names(pasientsvar_pre)[names(pasientsvar_pre)=='SkjemaGUID'] <- 'SkjemaGUID_pre'
names(pasientsvar_post)[names(pasientsvar_post)=='SkjemaGUID'] <- 'SkjemaGUID_post'
names(legeskjema)[names(legeskjema)=='S1b_DateOfCompletion'] <- 'Besoksdato'
names(pasientsvar_pre)[names(pasientsvar_pre)=='DateOfCompletion'] <- 'Besoksdato'
names(pasientsvar_post)[names(pasientsvar_post)=='DateOfCompletion'] <- 'Besoksdato'
RegData <- merge(legeskjema, pasientsvar_pre, by.x = 'SkjemaGUID', by.y = 'HovedskjemaGUID', suffixes = c('', '_pre'), all = TRUE)
RegData <- merge(RegData, pasientsvar_post, by.x = 'SkjemaGUID', by.y = 'HovedskjemaGUID', suffixes = c('', '_post'), all = TRUE)
RegData <- nnrrPreprosess(RegData = RegData)
RegData$Aar_pre <- RegData$Besoksdato_pre$year+1900
RegData$Aar_post <- RegData$Besoksdato_post$year+1900
legeskjema2016 <- RegData[which(RegData$Aar == 2016 & RegData$regstatus==1), ]
pasientsvar_pre2016 <- RegData[which(RegData$Aar_pre == 2016 & RegData$regstatus_pre==1), ]
pasientsvar_post2016 <- RegData[which(RegData$Aar_post == 2016 & RegData$regstatus_post==1), ]
# Antall basert på besøksdato 2016 i klinikerskjema
addmargins(table(legeskjema2016$Besoksdato$mon+1, legeskjema2016$SykehusNavn, useNA = 'ifany'))
addmargins(table(legeskjema2016$Besoksdato$mon[legeskjema2016$regstatus_pre==1]+1,
legeskjema2016$SykehusNavn[legeskjema2016$regstatus_pre==1], useNA = 'ifany'))
addmargins(table(legeskjema2016$Besoksdato$mon[legeskjema2016$regstatus_post==1]+1,
legeskjema2016$SykehusNavn[legeskjema2016$regstatus_post==1], useNA = 'ifany'))
# legeskjema2016$guid
# Pasient pre besøksdato 2016
addmargins(table(pasientsvar_pre2016$Besoksdato_pre$mon+1, pasientsvar_pre2016$SykehusNavn, useNA = 'ifany'))
# Pasient post besøksdato 2016
addmargins(table(pasientsvar_post2016$Besoksdato_post$mon+1, pasientsvar_post2016$SykehusNavn, useNA = 'ifany'))
legeskjema$Besoksdato <- as.POSIXlt(legeskjema$Besoksdato, format="%d.%m.%Y")
pasientsvar_pre$Besoksdato <- as.POSIXlt(pasientsvar_pre$Besoksdato, format="%d.%m.%Y")
pasientsvar_post$Besoksdato <- as.POSIXlt(pasientsvar_post$Besoksdato, format="%d.%m.%Y")
legeskjema$Aar <- legeskjema$Besoksdato$year+1900
pasientsvar_pre$Aar <- pasientsvar_pre$Besoksdato$year+1900
pasientsvar_post$Aar <- pasientsvar_post$Besoksdato$year+1900
legeskjema$SykehusNavn <- NA
legeskjema$SykehusNavn[legeskjema$ReshId == 102959] <- 'Haukeland'
legeskjema$SykehusNavn[legeskjema$ReshId == 104293] <- 'St. Olavs'
legeskjema$SykehusNavn[legeskjema$ReshId == 109834] <- 'OUS'
legeskjema$SykehusNavn[legeskjema$ReshId == 601032] <- 'UNN'
legeskjema2016 <- legeskjema[legeskjema$Aar==2016, ]
table(legeskjema2016$SykehusNavn)
| /doc/feilsok.R | no_license | Rapporteket/nnrr | R | false | false | 4,769 | r | rm(list=ls())
library(nnrr)
legeskjema <- read.table('P:/MinData/nnrr/DataDump_1b%3aRegistreringsskjema+poliklinikk_2017-09-27.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
pasientsvar_pre <- read.table('P:/MinData/nnrr/DataDump_1a%3aSpørreskjema+før+behandling_2017-09-22.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
pasientsvar_pre2 <- read.table('P:/MinData/nnrr/DataDump_1a%3aSpørreskjema+før+behandling_2017-09-27.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
# # pasientsvar_pre3 <- read.table('P:/MinData/nnrr/DataDump_1a%3aSpørreskjema+før+behandling_2017-09-27_2.csv', sep=';',
# header=T, stringsAsFactors = F)
pasientsvar_post <- read.table('P:/MinData/nnrr/DataDump_2%3aSpørreskjema+etter+behandling_2017-09-27.csv', sep=';',
header=T, fileEncoding = 'UTF-8-BOM', stringsAsFactors = F)
varnavn_1b <- read.table('P:/MinData/nnrr/Kodebok1b.csv', sep=';', header=T, stringsAsFactors = F)
boolske_var1b <- as.character(varnavn_1b$DataDumpnavn)[which(as.character(varnavn_1b$Felttype) == 'Avkrysning')]
legeskjema[, boolske_var1b] <- apply(legeskjema[, boolske_var1b], 2, as.logical)
varnavn_1a <- read.table('P:/MinData/nnrr/Kodebok1a.csv', sep=';', header=T, stringsAsFactors = F)
boolske_var1a <- as.character(varnavn_1a$DataDumpnavn)[which(as.character(varnavn_1a$Felttype) == 'Avkrysning')]
pasientsvar_pre[, boolske_var1a] <- apply(pasientsvar_pre[, boolske_var1a], 2, as.logical)
varnavn_2 <- read.table('P:/MinData/nnrr/Kodebok2.csv', sep=';', header=T, stringsAsFactors = F)
boolske_var2 <- as.character(varnavn_2$DataDumpnavn)[which(as.character(varnavn_2$Felttype) == 'Avkrysning')]
pasientsvar_post[, boolske_var2] <- apply(pasientsvar_post[, boolske_var2], 2, as.logical)
legeskjema$regstatus <- 1
pasientsvar_pre$regstatus <- 1
pasientsvar_post$regstatus <- 1
names(pasientsvar_pre)[names(pasientsvar_pre)=='SkjemaGUID'] <- 'SkjemaGUID_pre'
names(pasientsvar_post)[names(pasientsvar_post)=='SkjemaGUID'] <- 'SkjemaGUID_post'
names(legeskjema)[names(legeskjema)=='S1b_DateOfCompletion'] <- 'Besoksdato'
names(pasientsvar_pre)[names(pasientsvar_pre)=='DateOfCompletion'] <- 'Besoksdato'
names(pasientsvar_post)[names(pasientsvar_post)=='DateOfCompletion'] <- 'Besoksdato'
RegData <- merge(legeskjema, pasientsvar_pre, by.x = 'SkjemaGUID', by.y = 'HovedskjemaGUID', suffixes = c('', '_pre'), all = TRUE)
RegData <- merge(RegData, pasientsvar_post, by.x = 'SkjemaGUID', by.y = 'HovedskjemaGUID', suffixes = c('', '_post'), all = TRUE)
RegData <- nnrrPreprosess(RegData = RegData)
RegData$Aar_pre <- RegData$Besoksdato_pre$year+1900
RegData$Aar_post <- RegData$Besoksdato_post$year+1900
legeskjema2016 <- RegData[which(RegData$Aar == 2016 & RegData$regstatus==1), ]
pasientsvar_pre2016 <- RegData[which(RegData$Aar_pre == 2016 & RegData$regstatus_pre==1), ]
pasientsvar_post2016 <- RegData[which(RegData$Aar_post == 2016 & RegData$regstatus_post==1), ]
# Antall basert på besøksdato 2016 i klinikerskjema
addmargins(table(legeskjema2016$Besoksdato$mon+1, legeskjema2016$SykehusNavn, useNA = 'ifany'))
addmargins(table(legeskjema2016$Besoksdato$mon[legeskjema2016$regstatus_pre==1]+1,
legeskjema2016$SykehusNavn[legeskjema2016$regstatus_pre==1], useNA = 'ifany'))
addmargins(table(legeskjema2016$Besoksdato$mon[legeskjema2016$regstatus_post==1]+1,
legeskjema2016$SykehusNavn[legeskjema2016$regstatus_post==1], useNA = 'ifany'))
# legeskjema2016$guid
# Pasient pre besøksdato 2016
addmargins(table(pasientsvar_pre2016$Besoksdato_pre$mon+1, pasientsvar_pre2016$SykehusNavn, useNA = 'ifany'))
# Pasient post besøksdato 2016
addmargins(table(pasientsvar_post2016$Besoksdato_post$mon+1, pasientsvar_post2016$SykehusNavn, useNA = 'ifany'))
legeskjema$Besoksdato <- as.POSIXlt(legeskjema$Besoksdato, format="%d.%m.%Y")
pasientsvar_pre$Besoksdato <- as.POSIXlt(pasientsvar_pre$Besoksdato, format="%d.%m.%Y")
pasientsvar_post$Besoksdato <- as.POSIXlt(pasientsvar_post$Besoksdato, format="%d.%m.%Y")
legeskjema$Aar <- legeskjema$Besoksdato$year+1900
pasientsvar_pre$Aar <- pasientsvar_pre$Besoksdato$year+1900
pasientsvar_post$Aar <- pasientsvar_post$Besoksdato$year+1900
legeskjema$SykehusNavn <- NA
legeskjema$SykehusNavn[legeskjema$ReshId == 102959] <- 'Haukeland'
legeskjema$SykehusNavn[legeskjema$ReshId == 104293] <- 'St. Olavs'
legeskjema$SykehusNavn[legeskjema$ReshId == 109834] <- 'OUS'
legeskjema$SykehusNavn[legeskjema$ReshId == 601032] <- 'UNN'
legeskjema2016 <- legeskjema[legeskjema$Aar==2016, ]
table(legeskjema2016$SykehusNavn)
|
library(shinydashboard)
#setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("GenerateRGraphs.R")
library(shiny)
dashboardPage(
dashboardHeader(title = "Contagion Analysis"),
dashboardSidebar(sidebarMenu(
menuItem(
"Graph Analysis",
tabName = "graph",
icon = icon("signal")
),
menuItem("Contagion Simulation", tabName = "network", icon = icon("book"))
)),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "MyStyle.css"),
tags$link(rel="shortcut icon", href="/www/favicon.ico")
),
tabItems(
# First tab content
tabItem(
tabName = "graph",
fluidRow(
box(
numericInput("assets", label = "Total Value of Assets", value = 100000),
sliderInput(
"N",
"No. Of Banks",
min = 0,
max = 100,
value = 25,
step = 1
),
sliderInput(
"prob",
"Interconnectedness:",
min = 0,
max = 1,
value = 0.2,
step = 0.01
),
sliderInput(
"gamma",
"Capitalization",
min = 0,
max = 0.2,
value = 0.05,
step = 0.01
),
sliderInput(
"theta",
"Interbank Assets to Total Assets Ratio",
min = 0,
max = 1,
value = 0.2,
step = 0.01
), radioButtons(
"variable",
label = strong("Variable"),
choices = list(
"Capitalization" = "G",
"Interconnectedness" = "P",
"Interbank Asset to Asset Ratio" = "T",
"No. of Banks" = "N"
),
selected = "N"
),
sliderInput(
"simulations",
"MC Runs",
min = 0,
max = 1000,
value = 100,
step = 10
),
actionButton("update", "Run MC Simulation", class="action-button2"), width=3),
fluidRow(
box(plotOutput("plot",width = 75), class="plotbox"),
box(plotOutput("plot2",width = 75), class="plotbox"),
box(plotOutput("entropyPlot",width = 75), class="plotbox")
)
)
),
# Second tab content
tabItem(
tabName = "network",
fluidRow(
box(
numericInput("assets2", label = "Total Value of Assets", value = 10000),
sliderInput(
"N2",
"No. Of Banks",
min = 0,
max = 100,
value = 25,
step = 1
),
sliderInput(
"prob2",
"Interconnectedness:",
min = 0,
max = 1,
value = 0.2,
step = 0.01
),
sliderInput(
"gamma2",
"Capitalization",
min = 0,
max = 0.2,
value = 0.03,
step = 0.01
),
sliderInput(
"theta2",
"Interbank Assets to Total Assets Ratio",
min = 0,
max = 1,
value = 0.2,
step = 0.01
),
actionButton("update2", "Run Simulation", class="action-button2"), width = 3
),
box(numericInput("snapshot", label="Snapshot", value = 0, min = 0, max=100, width = 60),
plotOutput("plot3", width = 100, height=800),
downloadButton('downloadData', 'Download Graph', class="download-button"), class="box-body-custom"
)
)
)
)
)
) | /ui.R | no_license | Allisterh/NetworkAnalysis | R | false | false | 3,669 | r | library(shinydashboard)
#setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("GenerateRGraphs.R")
library(shiny)
dashboardPage(
dashboardHeader(title = "Contagion Analysis"),
dashboardSidebar(sidebarMenu(
menuItem(
"Graph Analysis",
tabName = "graph",
icon = icon("signal")
),
menuItem("Contagion Simulation", tabName = "network", icon = icon("book"))
)),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "MyStyle.css"),
tags$link(rel="shortcut icon", href="/www/favicon.ico")
),
tabItems(
# First tab content
tabItem(
tabName = "graph",
fluidRow(
box(
numericInput("assets", label = "Total Value of Assets", value = 100000),
sliderInput(
"N",
"No. Of Banks",
min = 0,
max = 100,
value = 25,
step = 1
),
sliderInput(
"prob",
"Interconnectedness:",
min = 0,
max = 1,
value = 0.2,
step = 0.01
),
sliderInput(
"gamma",
"Capitalization",
min = 0,
max = 0.2,
value = 0.05,
step = 0.01
),
sliderInput(
"theta",
"Interbank Assets to Total Assets Ratio",
min = 0,
max = 1,
value = 0.2,
step = 0.01
), radioButtons(
"variable",
label = strong("Variable"),
choices = list(
"Capitalization" = "G",
"Interconnectedness" = "P",
"Interbank Asset to Asset Ratio" = "T",
"No. of Banks" = "N"
),
selected = "N"
),
sliderInput(
"simulations",
"MC Runs",
min = 0,
max = 1000,
value = 100,
step = 10
),
actionButton("update", "Run MC Simulation", class="action-button2"), width=3),
fluidRow(
box(plotOutput("plot",width = 75), class="plotbox"),
box(plotOutput("plot2",width = 75), class="plotbox"),
box(plotOutput("entropyPlot",width = 75), class="plotbox")
)
)
),
# Second tab content
tabItem(
tabName = "network",
fluidRow(
box(
numericInput("assets2", label = "Total Value of Assets", value = 10000),
sliderInput(
"N2",
"No. Of Banks",
min = 0,
max = 100,
value = 25,
step = 1
),
sliderInput(
"prob2",
"Interconnectedness:",
min = 0,
max = 1,
value = 0.2,
step = 0.01
),
sliderInput(
"gamma2",
"Capitalization",
min = 0,
max = 0.2,
value = 0.03,
step = 0.01
),
sliderInput(
"theta2",
"Interbank Assets to Total Assets Ratio",
min = 0,
max = 1,
value = 0.2,
step = 0.01
),
actionButton("update2", "Run Simulation", class="action-button2"), width = 3
),
box(numericInput("snapshot", label="Snapshot", value = 0, min = 0, max=100, width = 60),
plotOutput("plot3", width = 100, height=800),
downloadButton('downloadData', 'Download Graph', class="download-button"), class="box-body-custom"
)
)
)
)
)
) |
library(eclust)
library(magrittr)
parametersDf <- expand.grid(rho = c(0.90),
p = c(500),
# SNR = c(0.2,1,2),
SNR = 1,
n = c(100), # this is the total train + test sample size
# nActive = c(300), # must be even because its being split among two modules
#n0 = 200,
cluster_distance = c("tom","corr"),
Ecluster_distance = c("difftom", "diffcorr"),
rhoOther = 0.6,
betaMean = c(2),
alphaMean = c(1),
betaE = 3,
includeInteraction = FALSE,
includeStability = TRUE,
distanceMethod = "euclidean",
clustMethod = "hclust",
#cutMethod = "gap",
cutMethod = "dynamic",
agglomerationMethod = "average",
# agglomerationMethod = "ward.D",
K.max = 10, B = 10, stringsAsFactors = FALSE)
parametersDf <- transform(parametersDf, n0 = n/2, nActive = p*0.10)
parametersDf <- parametersDf[which(parametersDf$cluster_distance=="tom" & parametersDf$Ecluster_distance=="difftom" |
parametersDf$cluster_distance=="corr" & parametersDf$Ecluster_distance=="diffcorr"),]
parameterIndex = 2
simulationParameters <- parametersDf[parameterIndex,, drop = F]
print(simulationParameters)
## ---- generate-data ----
message("generating data")
p <- simulationParameters[,"p"];
n <- simulationParameters[,"n"];
n0 <- simulationParameters[,"n0"];
SNR <- simulationParameters[,"SNR"]
n1 <- n - n0
cluster_distance <- simulationParameters[,"cluster_distance"]
Ecluster_distance <- simulationParameters[,"Ecluster_distance"]
rhoOther <- simulationParameters[,"rhoOther"];
betaMean <- simulationParameters[,"betaMean"];
betaE <- simulationParameters[,"betaE"]
alphaMean <- simulationParameters[,"alphaMean"];
rho <- simulationParameters[,"rho"];
nActive <- simulationParameters[,"nActive"];
includeInteraction <- simulationParameters[,"includeInteraction"]
includeStability <- simulationParameters[,"includeStability"]
distanceMethod <- simulationParameters[,"distanceMethod"]
clustMethod <- simulationParameters[,"clustMethod"]
cutMethod <- simulationParameters[,"cutMethod"]
agglomerationMethod <- simulationParameters[,"agglomerationMethod"]
K.max <- simulationParameters[,"K.max"]
B <- simulationParameters[,"B"]
# in this simulation its blocks 3 and 4 that are important
# leaveOut: optional specification of modules that should be left out
# of the simulation, that is their genes will be simulated as unrelated
# ("grey"). This can be useful when simulating several sets, in some which a module
# is present while in others it is absent.
d0 <- s_modules(n = n0, p = p, rho = c(0,0), exposed = FALSE,
modProportions = c(0.15,0.15,0.15,0.15,0.15,0.25),
minCor = 0.01,
maxCor = 1,
corPower = 1,
propNegativeCor = 0.3,
backgroundNoise = 0.5,
signed = FALSE,
leaveOut = 1:4)
d1 <- s_modules(n = n1, p = p, rho = c(rho, rho), exposed = TRUE,
modProportions = c(0.15,0.15,0.15,0.15,0.15,0.25),
minCor = 0.4,
maxCor = 1,
corPower = 0.3,
propNegativeCor = 0.3,
backgroundNoise = 0.5,
signed = FALSE)
# these should be the same. if they arent, its because I removed the red and
# green modules from the E=0 group
truemodule0 <- d0$setLabels
t0 <- table(truemodule0)
truemodule1 <- d1$setLabels
t1 <- table(truemodule1)
table(truemodule0,truemodule1)
# Convert labels to colors for plotting
moduleColors <- WGCNA::labels2colors(truemodule1)
table(moduleColors, truemodule1)
# does not contain E, needs to bind unexposed first
X <- rbind(d0$datExpr, d1$datExpr) %>%
magrittr::set_colnames(paste0("Gene", 1:p)) %>%
magrittr::set_rownames(paste0("Subject",1:n))
dim(X)
# pheatmap::pheatmap(cor(X))
# pheatmap::pheatmap(cor(d1$datExpr))
# pheatmap::pheatmap(cor(d0$datExpr))
# pheatmap(cor(d1$datExpr)-cor(d0$datExpr))
# pheatmap(WGCNA::TOMsimilarityFromExpr(X))
# pheatmap(WGCNA::TOMsimilarityFromExpr(d1$datExpr))
# pheatmap(WGCNA::TOMsimilarityFromExpr(d1$datExpr)-WGCNA::TOMsimilarityFromExpr(d0$datExpr))
# betaMainEffect <- vector("double", length = p)
# betaMainInteractions <- vector("double", length = p)
# # first assign random uniform to every gene in cluster 3 and 4,
# # then randomly remove so that thers only nActive left
# betaMainEffect[which(truemodule1 %in% 3:4)] <- runif(sum(truemodule1 %in% 3:4),
# betaMean - 0.1, betaMean + 0.1)
#
# # randomly set some coefficients to 0 so that there are only nActive non zero
# betaMainEffect <- replace(betaMainEffect,
# sample(which(truemodule1 %in% 3:4), sum(truemodule1 %in% 3:4) - nActive,
# replace = FALSE), 0)
#
# betaMainInteractions[which(betaMainEffect!=0)] <- runif(nActive, alphaMean - 0.1, alphaMean + 0.1)
#
# beta <- c(betaMainEffect,
# betaE,
# betaMainInteractions)
# plot(beta)
betaMainEffect <- vector("double", length = p)
# betaMainInteractions <- vector("double", length = p)
# the first nActive/2 in the 3rd block are active
betaMainEffect[which(truemodule1 %in% 3)[1:(nActive/2)]] <- runif(
nActive/2, betaMean - 0.1, betaMean + 0.1)
# the first nActive/2 in the 4th block are active
betaMainEffect[which(truemodule1 %in% 4)[1:(nActive/2)]] <- runif(
nActive/2, betaMean+2 - 0.1, betaMean+2 + 0.1)
# betaMainInteractions[which(betaMainEffect!=0)] <- runif(nActive, alphaMean - 0.1, alphaMean + 0.1)
# must be in this order!!!! main effects, E, and then interactions... this order is being used
# by the generate_data function
beta <- c(betaMainEffect,
betaE)#,betaMainInteractions)
plot(beta)
simulated_data <- s_generate_data(p = p,
X = X,
beta = beta,
include_interaction = includeInteraction,
cluster_distance = cluster_distance,
n = n,
n0 = n0,
eclust_distance = Ecluster_distance,
signal_to_noise_ratio = SNR,
distance_method = distanceMethod,
cluster_method = clustMethod,
cut_method = cutMethod,
agglomeration_method = agglomerationMethod,
K.max = K.max, B = B, nPC = 1)
# devtools::use_data(simulated_data)
simulated_data$similarity %>% dim
simulated_data$DT %>% dim
simulated_data$DT %>% str
pryr::object_size(simulated_data$DT)
# pryr::object_size(simulated_data$DT[,1:1002])
#
# pryr::object_size(simulated_data$DT)
simdata <- simulated_data$DT[,c(1,502,2:501)]
simdata[1:5,1:5]
simdata %>% dim
devtools::use_data(simdata, overwrite = TRUE)
devtools::check()
| /data-raw/simulated-data-processing.R | no_license | sahirbhatnagar/eclust | R | false | false | 7,384 | r | library(eclust)
library(magrittr)
parametersDf <- expand.grid(rho = c(0.90),
p = c(500),
# SNR = c(0.2,1,2),
SNR = 1,
n = c(100), # this is the total train + test sample size
# nActive = c(300), # must be even because its being split among two modules
#n0 = 200,
cluster_distance = c("tom","corr"),
Ecluster_distance = c("difftom", "diffcorr"),
rhoOther = 0.6,
betaMean = c(2),
alphaMean = c(1),
betaE = 3,
includeInteraction = FALSE,
includeStability = TRUE,
distanceMethod = "euclidean",
clustMethod = "hclust",
#cutMethod = "gap",
cutMethod = "dynamic",
agglomerationMethod = "average",
# agglomerationMethod = "ward.D",
K.max = 10, B = 10, stringsAsFactors = FALSE)
parametersDf <- transform(parametersDf, n0 = n/2, nActive = p*0.10)
parametersDf <- parametersDf[which(parametersDf$cluster_distance=="tom" & parametersDf$Ecluster_distance=="difftom" |
parametersDf$cluster_distance=="corr" & parametersDf$Ecluster_distance=="diffcorr"),]
parameterIndex = 2
simulationParameters <- parametersDf[parameterIndex,, drop = F]
print(simulationParameters)
## ---- generate-data ----
message("generating data")
p <- simulationParameters[,"p"];
n <- simulationParameters[,"n"];
n0 <- simulationParameters[,"n0"];
SNR <- simulationParameters[,"SNR"]
n1 <- n - n0
cluster_distance <- simulationParameters[,"cluster_distance"]
Ecluster_distance <- simulationParameters[,"Ecluster_distance"]
rhoOther <- simulationParameters[,"rhoOther"];
betaMean <- simulationParameters[,"betaMean"];
betaE <- simulationParameters[,"betaE"]
alphaMean <- simulationParameters[,"alphaMean"];
rho <- simulationParameters[,"rho"];
nActive <- simulationParameters[,"nActive"];
includeInteraction <- simulationParameters[,"includeInteraction"]
includeStability <- simulationParameters[,"includeStability"]
distanceMethod <- simulationParameters[,"distanceMethod"]
clustMethod <- simulationParameters[,"clustMethod"]
cutMethod <- simulationParameters[,"cutMethod"]
agglomerationMethod <- simulationParameters[,"agglomerationMethod"]
K.max <- simulationParameters[,"K.max"]
B <- simulationParameters[,"B"]
# in this simulation its blocks 3 and 4 that are important
# leaveOut: optional specification of modules that should be left out
# of the simulation, that is their genes will be simulated as unrelated
# ("grey"). This can be useful when simulating several sets, in some which a module
# is present while in others it is absent.
d0 <- s_modules(n = n0, p = p, rho = c(0,0), exposed = FALSE,
modProportions = c(0.15,0.15,0.15,0.15,0.15,0.25),
minCor = 0.01,
maxCor = 1,
corPower = 1,
propNegativeCor = 0.3,
backgroundNoise = 0.5,
signed = FALSE,
leaveOut = 1:4)
d1 <- s_modules(n = n1, p = p, rho = c(rho, rho), exposed = TRUE,
modProportions = c(0.15,0.15,0.15,0.15,0.15,0.25),
minCor = 0.4,
maxCor = 1,
corPower = 0.3,
propNegativeCor = 0.3,
backgroundNoise = 0.5,
signed = FALSE)
# these should be the same. if they arent, its because I removed the red and
# green modules from the E=0 group
truemodule0 <- d0$setLabels
t0 <- table(truemodule0)
truemodule1 <- d1$setLabels
t1 <- table(truemodule1)
table(truemodule0,truemodule1)
# Convert labels to colors for plotting
moduleColors <- WGCNA::labels2colors(truemodule1)
table(moduleColors, truemodule1)
# does not contain E, needs to bind unexposed first
X <- rbind(d0$datExpr, d1$datExpr) %>%
magrittr::set_colnames(paste0("Gene", 1:p)) %>%
magrittr::set_rownames(paste0("Subject",1:n))
dim(X)
# pheatmap::pheatmap(cor(X))
# pheatmap::pheatmap(cor(d1$datExpr))
# pheatmap::pheatmap(cor(d0$datExpr))
# pheatmap(cor(d1$datExpr)-cor(d0$datExpr))
# pheatmap(WGCNA::TOMsimilarityFromExpr(X))
# pheatmap(WGCNA::TOMsimilarityFromExpr(d1$datExpr))
# pheatmap(WGCNA::TOMsimilarityFromExpr(d1$datExpr)-WGCNA::TOMsimilarityFromExpr(d0$datExpr))
# betaMainEffect <- vector("double", length = p)
# betaMainInteractions <- vector("double", length = p)
# # first assign random uniform to every gene in cluster 3 and 4,
# # then randomly remove so that thers only nActive left
# betaMainEffect[which(truemodule1 %in% 3:4)] <- runif(sum(truemodule1 %in% 3:4),
# betaMean - 0.1, betaMean + 0.1)
#
# # randomly set some coefficients to 0 so that there are only nActive non zero
# betaMainEffect <- replace(betaMainEffect,
# sample(which(truemodule1 %in% 3:4), sum(truemodule1 %in% 3:4) - nActive,
# replace = FALSE), 0)
#
# betaMainInteractions[which(betaMainEffect!=0)] <- runif(nActive, alphaMean - 0.1, alphaMean + 0.1)
#
# beta <- c(betaMainEffect,
# betaE,
# betaMainInteractions)
# plot(beta)
betaMainEffect <- vector("double", length = p)
# betaMainInteractions <- vector("double", length = p)
# the first nActive/2 in the 3rd block are active
betaMainEffect[which(truemodule1 %in% 3)[1:(nActive/2)]] <- runif(
nActive/2, betaMean - 0.1, betaMean + 0.1)
# the first nActive/2 in the 4th block are active
betaMainEffect[which(truemodule1 %in% 4)[1:(nActive/2)]] <- runif(
nActive/2, betaMean+2 - 0.1, betaMean+2 + 0.1)
# betaMainInteractions[which(betaMainEffect!=0)] <- runif(nActive, alphaMean - 0.1, alphaMean + 0.1)
# must be in this order!!!! main effects, E, and then interactions... this order is being used
# by the generate_data function
beta <- c(betaMainEffect,
betaE)#,betaMainInteractions)
plot(beta)
simulated_data <- s_generate_data(p = p,
X = X,
beta = beta,
include_interaction = includeInteraction,
cluster_distance = cluster_distance,
n = n,
n0 = n0,
eclust_distance = Ecluster_distance,
signal_to_noise_ratio = SNR,
distance_method = distanceMethod,
cluster_method = clustMethod,
cut_method = cutMethod,
agglomeration_method = agglomerationMethod,
K.max = K.max, B = B, nPC = 1)
# devtools::use_data(simulated_data)
simulated_data$similarity %>% dim
simulated_data$DT %>% dim
simulated_data$DT %>% str
pryr::object_size(simulated_data$DT)
# pryr::object_size(simulated_data$DT[,1:1002])
#
# pryr::object_size(simulated_data$DT)
simdata <- simulated_data$DT[,c(1,502,2:501)]
simdata[1:5,1:5]
simdata %>% dim
devtools::use_data(simdata, overwrite = TRUE)
devtools::check()
|
source("../../bin/elasticNetPred.R")
library(pheatmap)
require(parallel)
source("../../bin/cBioPortalData.R")
library(synapseClient)
synapseLogin()
tcga.list<<-c("allTcga",tcga.cancer.types)
ccle.list<<-c("allCcle","BREAST","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE","LUNG","SKIN","CENTRAL_NERVOUS_SYSTEM","LARGE_INTESTINE","OVARY")
#'Get mutation-like score for each cell line/gene put into function
#' @param genelist list of genes to compare across
#' @param cancerType TCGA cancer abbreviation
#' @param minPat number of patients to require in predictor
scoreNFforGene<-function(gene,datasetList,testExpr,mut.vec2,dataset,minPat=3,fullMut=NA,fullExpr=NA){
#iterate through the gene list
if(is.na(fullMut))
fullMut<-getDisMutationData('') #getCcleMutationData('')
if(is.na(fullExpr))
fullExpr<-getDisExpressionData('',getZscores=TRUE)#getCcleExpressionData('',getZscores=T)
dlist<-lapply(datasetList,function(ds){
# dlist<-lapply(genelist,function(g){
print(paste('Creating predictive model for',ds,'across for gene',gene,' to run against',dataset))
##get mutation data, including patients with mutation
##get training dataset - expression and mutation
# mutMatrix=fullMut
# exprMatrix=fullExpr
if(ds=='allTcga'){
mutMatrix=fullMut
exprMatrix=fullExpr
}else{
samps<-sapply(getSamplesForDisease(ds),function(x) gsub('-','.',x))
mutMatrix<-fullMut[,intersect(samps,colnames(fullMut))]
exprMatrix<-fullExpr[,intersect(samps,colnames(fullExpr))]
}
gr<-which(rownames(mutMatrix)==gene)
genevals=rep(0,length(datasetList))
names(genevals)<-datasetList
if(length(gr)>0){
gmuts<-which(mutMatrix[gr,])
}else{
gmuts<-c()
}
print(paste('Found',length(gmuts),'samples with mutated',gene))
mut.vec=rep('WT',ncol(exprMatrix))
#create a factor vector to feed into predictive model
mut.vec[gmuts]<-'MUTANT'
mut.vec=factor(mut.vec,levels=c("WT","MUTANT"))
if(length(which(mut.vec=='MUTANT'))<=minPat){
print("Not enough mutants here, returning predictions of 0")
return(0)
}
#build model, currently only elastic net
# testMatrix<-testMut
#make sure we're looking at common patients
comm.pats<-intersect(colnames(testExpr),names(mut.vec2))
testExpr<-testExpr[,comm.pats]
mut.vec2<-mut.vec2[comm.pats]
##and common genes between train and test
comm.genes<-intersect(rownames(exprMatrix),rownames(testExpr))
fit=model.build(exprMatrix[comm.genes,],mut.vec,pref=gene,doPlot=FALSE)
#testExpr<-testExpr[comm.genes,]
res=model.pred(fit,testExpr[comm.genes,],mut.vec2,pref=paste(ds,'to',dataset,'forGene',gene,sep='_'),doPlot=T)
return(res$AUC)
})
dlist<-unlist(dlist)
names(dlist)<-datasetList
df<-data.frame(Sample=datasetList,AUC=as.numeric(unlist(dlist)))
print(df)
require(ggplot2)
pdf(paste(gene,'mutationsPredictedIn',dataset,'.pdf',sep=''))
g<-ggplot(df)+geom_point(aes(x=Sample,y=AUC))+ggtitle(paste('Predicting NF1 status in dataset from',gene,'in cell lines'))+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g)
dev.off()
# if(mean(dmat,na.rm=T)!=0){
# pheatmap(dmat,cellheight=10,cellwidth=10,main=paste(ifelse(cancerType=='','All',cancerType),prefix,'predictor AUC values'),filename=paste(cancerType,'min',minPat,'patientPredictorAUCvals.pdf',sep=''))
#}
#write.table(df,quote=F,file=paste(ifelse(cancerType=='','All',cancerType),prefix,'min',minPat,'patientPredictorAUCvals.txt',sep=''),sep='\t')
#return(df)
return(dlist)
}
genelist=c("RASA1","SPRED1","NF1","TP53","NRAS","KRAS","BRAF","EGFR","SHC1","GRB2","MAP2K1","MAP2K","CDK4","RB1","PAK1","SOS1","PTEN","AKT1","PDK1","MTOR")
#genelist=c("RASA1","NF1","TP53","NRAS","KRAS","EGFR","SHC1","CDK4","RB1","PAK1","SOS1","PTEN","AKT1","PDK1","MTOR")
#genelist<-c("KRAS","SPRED1","NF1")
gene='NF1'
#res<-crossDataScoresPerGene(datasetList=ccle.list,gene=gene)
pnfData<-read.table(synGet('syn7124098')@filePath,sep='\t',header=T)
colnames(pnfData)<- gsub('..',' (',gsub('.clone.',' clone)',colnames(pnfData),fixed=T),fixed=T)
phenoData<-read.table(synGet('syn7139168')@filePath,sep='\t',header=T)
mutVec=phenoData$Genotype
names(mutVec)<-rownames(phenoData)
mutVecHetsAsNeg<-rep('MUTANT',length(mutVec))
mutVecHetsAsNeg[which(mutVec=='++')]<-'WT'
mutVecHetsAsNeg<-factor(mutVecHetsAsNeg,levels=c("WT","MUTANT"))
names(mutVecHetsAsNeg)<-rownames(phenoData)
mutVecHetsAsPos<-rep('MUTANT',length(mutVec))
mutVecHetsAsPos[which(mutVec!='--')]<-'WT'
mutVecHetsAsPos<-factor(mutVecHetsAsPos,levels=c("WT","MUTANT"))
names(mutVecHetsAsPos)<-rownames(phenoData)
cl<-makeCluster(min(5,length(genelist)),outfile='pnf_cluster.txt')
##get all data
load('exprData.Rdata')
fullExpr<-exprData
load('mutData.Rdata')
fullMut<-mutData
# fullMut<-getDisMutationData('') #getCcleMutationData('')
# fullExpr<-getDisExpressionData('',getZscores=TRUE)#getCcleExpressionData('',getZscores=T)
clusterExport(cl,c("scoreNFforGene","mutVecHetsAsNeg","mutVecHetsAsPos","pnfData",'tcga.list','fullMut','fullExpr'))
clusterEvalQ(cl,source("../../bin/elasticNetPred.R"))
clusterEvalQ(cl,source("../../bin/cBioPortalData.R"))
dlist<-parLapply(cl,as.list(genelist),function(g){
datasetList<-tcga.list
#for(g in genelist){
##sample all combinations of datasets - ccle, tcga, to see how each predicts the other.
res<-scoreNFforGene(g,datasetList,pnfData,mutVecHetsAsNeg,'pnfCellsHetsareMuts',minPat=3,fullMut,fullExpr)
res2<-scoreNFforGene(g,datasetList,pnfData,mutVecHetsAsPos,'pnfCellsHetsAreWT',minPat=3,fullMut,fullExpr)
})
stopCluster(cl)
| /analysis/2016-08-23/applyModelsToPnfCellsFromTCGA.R | no_license | sgosline/RASPathwaySig | R | false | false | 5,881 | r | source("../../bin/elasticNetPred.R")
library(pheatmap)
require(parallel)
source("../../bin/cBioPortalData.R")
library(synapseClient)
synapseLogin()
tcga.list<<-c("allTcga",tcga.cancer.types)
ccle.list<<-c("allCcle","BREAST","HAEMATOPOIETIC_AND_LYMPHOID_TISSUE","LUNG","SKIN","CENTRAL_NERVOUS_SYSTEM","LARGE_INTESTINE","OVARY")
#'Get mutation-like score for each cell line/gene put into function
#' @param genelist list of genes to compare across
#' @param cancerType TCGA cancer abbreviation
#' @param minPat number of patients to require in predictor
scoreNFforGene<-function(gene,datasetList,testExpr,mut.vec2,dataset,minPat=3,fullMut=NA,fullExpr=NA){
#iterate through the gene list
if(is.na(fullMut))
fullMut<-getDisMutationData('') #getCcleMutationData('')
if(is.na(fullExpr))
fullExpr<-getDisExpressionData('',getZscores=TRUE)#getCcleExpressionData('',getZscores=T)
dlist<-lapply(datasetList,function(ds){
# dlist<-lapply(genelist,function(g){
print(paste('Creating predictive model for',ds,'across for gene',gene,' to run against',dataset))
##get mutation data, including patients with mutation
##get training dataset - expression and mutation
# mutMatrix=fullMut
# exprMatrix=fullExpr
if(ds=='allTcga'){
mutMatrix=fullMut
exprMatrix=fullExpr
}else{
samps<-sapply(getSamplesForDisease(ds),function(x) gsub('-','.',x))
mutMatrix<-fullMut[,intersect(samps,colnames(fullMut))]
exprMatrix<-fullExpr[,intersect(samps,colnames(fullExpr))]
}
gr<-which(rownames(mutMatrix)==gene)
genevals=rep(0,length(datasetList))
names(genevals)<-datasetList
if(length(gr)>0){
gmuts<-which(mutMatrix[gr,])
}else{
gmuts<-c()
}
print(paste('Found',length(gmuts),'samples with mutated',gene))
mut.vec=rep('WT',ncol(exprMatrix))
#create a factor vector to feed into predictive model
mut.vec[gmuts]<-'MUTANT'
mut.vec=factor(mut.vec,levels=c("WT","MUTANT"))
if(length(which(mut.vec=='MUTANT'))<=minPat){
print("Not enough mutants here, returning predictions of 0")
return(0)
}
#build model, currently only elastic net
# testMatrix<-testMut
#make sure we're looking at common patients
comm.pats<-intersect(colnames(testExpr),names(mut.vec2))
testExpr<-testExpr[,comm.pats]
mut.vec2<-mut.vec2[comm.pats]
##and common genes between train and test
comm.genes<-intersect(rownames(exprMatrix),rownames(testExpr))
fit=model.build(exprMatrix[comm.genes,],mut.vec,pref=gene,doPlot=FALSE)
#testExpr<-testExpr[comm.genes,]
res=model.pred(fit,testExpr[comm.genes,],mut.vec2,pref=paste(ds,'to',dataset,'forGene',gene,sep='_'),doPlot=T)
return(res$AUC)
})
dlist<-unlist(dlist)
names(dlist)<-datasetList
df<-data.frame(Sample=datasetList,AUC=as.numeric(unlist(dlist)))
print(df)
require(ggplot2)
pdf(paste(gene,'mutationsPredictedIn',dataset,'.pdf',sep=''))
g<-ggplot(df)+geom_point(aes(x=Sample,y=AUC))+ggtitle(paste('Predicting NF1 status in dataset from',gene,'in cell lines'))+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g)
dev.off()
# if(mean(dmat,na.rm=T)!=0){
# pheatmap(dmat,cellheight=10,cellwidth=10,main=paste(ifelse(cancerType=='','All',cancerType),prefix,'predictor AUC values'),filename=paste(cancerType,'min',minPat,'patientPredictorAUCvals.pdf',sep=''))
#}
#write.table(df,quote=F,file=paste(ifelse(cancerType=='','All',cancerType),prefix,'min',minPat,'patientPredictorAUCvals.txt',sep=''),sep='\t')
#return(df)
return(dlist)
}
genelist=c("RASA1","SPRED1","NF1","TP53","NRAS","KRAS","BRAF","EGFR","SHC1","GRB2","MAP2K1","MAP2K","CDK4","RB1","PAK1","SOS1","PTEN","AKT1","PDK1","MTOR")
#genelist=c("RASA1","NF1","TP53","NRAS","KRAS","EGFR","SHC1","CDK4","RB1","PAK1","SOS1","PTEN","AKT1","PDK1","MTOR")
#genelist<-c("KRAS","SPRED1","NF1")
gene='NF1'
#res<-crossDataScoresPerGene(datasetList=ccle.list,gene=gene)
pnfData<-read.table(synGet('syn7124098')@filePath,sep='\t',header=T)
colnames(pnfData)<- gsub('..',' (',gsub('.clone.',' clone)',colnames(pnfData),fixed=T),fixed=T)
phenoData<-read.table(synGet('syn7139168')@filePath,sep='\t',header=T)
mutVec=phenoData$Genotype
names(mutVec)<-rownames(phenoData)
mutVecHetsAsNeg<-rep('MUTANT',length(mutVec))
mutVecHetsAsNeg[which(mutVec=='++')]<-'WT'
mutVecHetsAsNeg<-factor(mutVecHetsAsNeg,levels=c("WT","MUTANT"))
names(mutVecHetsAsNeg)<-rownames(phenoData)
mutVecHetsAsPos<-rep('MUTANT',length(mutVec))
mutVecHetsAsPos[which(mutVec!='--')]<-'WT'
mutVecHetsAsPos<-factor(mutVecHetsAsPos,levels=c("WT","MUTANT"))
names(mutVecHetsAsPos)<-rownames(phenoData)
cl<-makeCluster(min(5,length(genelist)),outfile='pnf_cluster.txt')
##get all data
load('exprData.Rdata')
fullExpr<-exprData
load('mutData.Rdata')
fullMut<-mutData
# fullMut<-getDisMutationData('') #getCcleMutationData('')
# fullExpr<-getDisExpressionData('',getZscores=TRUE)#getCcleExpressionData('',getZscores=T)
clusterExport(cl,c("scoreNFforGene","mutVecHetsAsNeg","mutVecHetsAsPos","pnfData",'tcga.list','fullMut','fullExpr'))
clusterEvalQ(cl,source("../../bin/elasticNetPred.R"))
clusterEvalQ(cl,source("../../bin/cBioPortalData.R"))
dlist<-parLapply(cl,as.list(genelist),function(g){
datasetList<-tcga.list
#for(g in genelist){
##sample all combinations of datasets - ccle, tcga, to see how each predicts the other.
res<-scoreNFforGene(g,datasetList,pnfData,mutVecHetsAsNeg,'pnfCellsHetsareMuts',minPat=3,fullMut,fullExpr)
res2<-scoreNFforGene(g,datasetList,pnfData,mutVecHetsAsPos,'pnfCellsHetsAreWT',minPat=3,fullMut,fullExpr)
})
stopCluster(cl)
|
library(tidyverse)
library(gridExtra)
library(stringr)
pre <- "/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_go_model_folding_temperature/simulation/"
target <- str_c(pre, "data")
data <- read_csv(target)
ggplot(data)+
aes(x=step, qw) +
geom_point(aes(color = run))+
geom_smooth()+
theme_bw() +
theme(axis.text=element_text(size=20)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
# data <- read_csv("../../T0766/simulation_v2/data")
# data <- read_csv("../../T0766/simulation_iteration_1/data")
# data <- read_csv("../../T0766_single/simulation_iteration_1/data")
data <- read_csv("../../T0766_single/simulation/data")
# data <- read_csv("../../T0778_single/simulation/data")
# data <- read_csv("../../T0778/simulation_iteration_1/data")
# data <- read_csv("../../T0778/simulation/data")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/qValue_v2/simulation/350")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/qValue_v3/simulation/350")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/qValue_v3/simulation/350/0")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag0.2/simulation")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag0.1/simulation")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag0.3/simulation")
data1 <- read_csv("data")
data2 <- read_csv("data")
data3 <- read_csv("data")
data <- as_tibble( read.table("energy.dat", header = TRUE) )
data <- data[-Shake]
data
ggsave("~/Desktop/pulling_data/frag0.01_qw_run.png")
ggsave("~/Desktop/pulling_data/frag0.01_qw_temp.png")
var = c(0.2, 0.1, 0.01)
pre = "/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/"
dir = str_c(pre, var, "/simulation/data")
dat <- function(var){
pre = "/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag"
dir = str_c(pre, var, "/simulation/data")
data <- read_csv(dir)
data %>% mutate(frag = var) %>% mutate(temp = 200 + step/(2*10^4))
}
set <- function(var){
name <- str_c("frag", var)
assign(name, value = dat(var))
}
var <- list("0.2","0.1","0.01")
var %>%
walk(str_c("frag",.),value = dat(.), assign)
frag0.2 <- dat(0.2)
frag0.1 <- dat(0.1)
frag0.01 <- dat(0.01)
data <- rbind(frag0.2,frag0.1,frag0.01)
ggplot(data)+
aes(x=temp, qw) +
geom_point()+
geom_smooth()+
ylim(0.2, 1) +
facet_wrap(~frag) +
theme_bw() +
theme(axis.text=element_text(size=20)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
ggplot(data)+
aes(x=temp, qw, color=run) +
geom_point()+
geom_smooth()+
ylim(0.2, 1) +
facet_wrap(~frag)+
theme_bw() +
theme(axis.text=element_text(size=20)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
ggsave("~/Desktop/pulling_data/frag_folding_color.png", width = 14, height =4)
plots <- data %>%
split(.$frag) %>%
map(~ggplot(., aes(temp, qw)) + geom_point()+ geom_smooth())
folder <- "~/Desktop/pulling_data/frag"
mylist = c(0.2, 0.1, 0.01)
paths <- stringr::str_c(folder, mylist, ".png")
pwalk(list(paths, plots), ggsave)
ggplot(data) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
xlab("") +
ylab("Qw") +
coord_flip() +
theme_bw()
data_end <- data %>% filter(step > 7000)
ggplot(data) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
xlab("") +
ylab("Qw") +
coord_flip() +
theme_bw() +
theme(axis.text=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
# qw run
ggplot(data_end) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
coord_flip()
ggplot(data_end) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
xlab("") +
ylab("Qw") +
ylim(0.35, 0.55) +
coord_flip() +
theme_bw() +
theme(axis.text=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
# energy run
data_energy <- data %>% filter(step > 7000)
ggplot(data_energy) +
aes(reorder(run, -energy, FUN = median), energy) +
geom_boxplot() +
xlab("") +
ylab("Energy") +
coord_flip() +
theme_bw() +
theme(axis.text=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
ggplot(data_last) +
aes(qw, energy, color=run) +
geom_point()
ggplot(data %>% filter(step > 7000)) +
aes(energy) +
geom_histogram() +
facet_wrap(~run, scales = "free") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p1 <- ggplot(r0) +
aes(qw) +
geom_histogram()
summary(r0)
r0 <- data %>% filter(run == "run_11", step > 7000)
ggplot(r0) +
aes(qw) +
geom_histogram()
summary(r0)
grid.arrange(p1,p2)
ggsave("~/Desktop/T0766_round3_run_qw.png")
ggsave("~/Desktop/T0766_round3_run_qw.png") | /R/qw_run.R | permissive | luwei0917/awsemmd_script | R | false | false | 5,049 | r | library(tidyverse)
library(gridExtra)
library(stringr)
pre <- "/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_go_model_folding_temperature/simulation/"
target <- str_c(pre, "data")
data <- read_csv(target)
ggplot(data)+
aes(x=step, qw) +
geom_point(aes(color = run))+
geom_smooth()+
theme_bw() +
theme(axis.text=element_text(size=20)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
# data <- read_csv("../../T0766/simulation_v2/data")
# data <- read_csv("../../T0766/simulation_iteration_1/data")
# data <- read_csv("../../T0766_single/simulation_iteration_1/data")
data <- read_csv("../../T0766_single/simulation/data")
# data <- read_csv("../../T0778_single/simulation/data")
# data <- read_csv("../../T0778/simulation_iteration_1/data")
# data <- read_csv("../../T0778/simulation/data")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/qValue_v2/simulation/350")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/qValue_v3/simulation/350")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/qValue_v3/simulation/350/0")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag0.2/simulation")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag0.1/simulation")
setwd("/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag0.3/simulation")
data1 <- read_csv("data")
data2 <- read_csv("data")
data3 <- read_csv("data")
data <- as_tibble( read.table("energy.dat", header = TRUE) )
data <- data[-Shake]
data
ggsave("~/Desktop/pulling_data/frag0.01_qw_run.png")
ggsave("~/Desktop/pulling_data/frag0.01_qw_temp.png")
var = c(0.2, 0.1, 0.01)
pre = "/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/"
dir = str_c(pre, var, "/simulation/data")
dat <- function(var){
pre = "/Users/weilu/Research/server/project/freeEnergy_2xov/2xov_folding_temperature/frag"
dir = str_c(pre, var, "/simulation/data")
data <- read_csv(dir)
data %>% mutate(frag = var) %>% mutate(temp = 200 + step/(2*10^4))
}
set <- function(var){
name <- str_c("frag", var)
assign(name, value = dat(var))
}
var <- list("0.2","0.1","0.01")
var %>%
walk(str_c("frag",.),value = dat(.), assign)
frag0.2 <- dat(0.2)
frag0.1 <- dat(0.1)
frag0.01 <- dat(0.01)
data <- rbind(frag0.2,frag0.1,frag0.01)
ggplot(data)+
aes(x=temp, qw) +
geom_point()+
geom_smooth()+
ylim(0.2, 1) +
facet_wrap(~frag) +
theme_bw() +
theme(axis.text=element_text(size=20)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
ggplot(data)+
aes(x=temp, qw, color=run) +
geom_point()+
geom_smooth()+
ylim(0.2, 1) +
facet_wrap(~frag)+
theme_bw() +
theme(axis.text=element_text(size=20)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
ggsave("~/Desktop/pulling_data/frag_folding_color.png", width = 14, height =4)
plots <- data %>%
split(.$frag) %>%
map(~ggplot(., aes(temp, qw)) + geom_point()+ geom_smooth())
folder <- "~/Desktop/pulling_data/frag"
mylist = c(0.2, 0.1, 0.01)
paths <- stringr::str_c(folder, mylist, ".png")
pwalk(list(paths, plots), ggsave)
ggplot(data) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
xlab("") +
ylab("Qw") +
coord_flip() +
theme_bw()
data_end <- data %>% filter(step > 7000)
ggplot(data) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
xlab("") +
ylab("Qw") +
coord_flip() +
theme_bw() +
theme(axis.text=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
# qw run
ggplot(data_end) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
coord_flip()
ggplot(data_end) +
aes(reorder(run, qw, FUN = median), qw) +
geom_boxplot() +
xlab("") +
ylab("Qw") +
ylim(0.35, 0.55) +
coord_flip() +
theme_bw() +
theme(axis.text=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
# energy run
data_energy <- data %>% filter(step > 7000)
ggplot(data_energy) +
aes(reorder(run, -energy, FUN = median), energy) +
geom_boxplot() +
xlab("") +
ylab("Energy") +
coord_flip() +
theme_bw() +
theme(axis.text=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.title.y=element_text(size=30))
ggplot(data_last) +
aes(qw, energy, color=run) +
geom_point()
ggplot(data %>% filter(step > 7000)) +
aes(energy) +
geom_histogram() +
facet_wrap(~run, scales = "free") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p1 <- ggplot(r0) +
aes(qw) +
geom_histogram()
summary(r0)
r0 <- data %>% filter(run == "run_11", step > 7000)
ggplot(r0) +
aes(qw) +
geom_histogram()
summary(r0)
grid.arrange(p1,p2)
ggsave("~/Desktop/T0766_round3_run_qw.png")
ggsave("~/Desktop/T0766_round3_run_qw.png") |
#' Simulate a mutation dataset with survival annotation
#'
#' To simulate a dataset we need a real pathway.
#'
#' @param pathway a Pathway
#' @param impacted_genes the genes of a module with the aberrant signal chain
#' @param patients_fractions the fraction of patients with n mutated genes
#' @param ann survival annotations
#' @param omicName the name of the omics to consider
#' @param mutation_rate the basel mutation rate
#' @param mut_cliques_genes genes that have low mutation rate
#'
#' @return list
#' \item{exprs}{expression}
#' \item{annotation}{the survival annotation}
#' \item{graph}{a graphNEL}
#' \item{chain}{the chain of selectP}
#'
#' @importFrom checkmate assertClass
#' @importFrom graphite pathwayGraph
#' @importFrom graph nodes randomNodeGraph
#' @importFrom simPATHy simPATHy
#' @rdname simulate_mutation_dataset
#' @export
#'
makeTheMutationDataset <- function(pathway, impacted_genes, patients_fractions, ann, omicName="x",
mutation_rate=0.001, mut_cliques_genes=NULL) {
checkmate::assertClass(pathway, "Pathway")
if (!(omicName %in% names(ann))) {
stop(paste0(omicName, " does not apper in ann data.frame."))
}
nd = graphite::nodes(pathway)
missing_genes <- setdiff(impacted_genes, nd)
if (length(missing_genes) != 0) {
stop(paste0("some of impactThisGenes are missing: ", paste(missing_genes, collapse = ', ')))
}
n_patients = nrow(ann)
mut <- create_random_uniform_mutatios(nd, n_patients, mutation_rate=mutation_rate, col_names=row.names(ann))
if (!is.null(mut_cliques_genes)) {
not_found_genes <- setdiff(mut_cliques_genes, nd)
if (length(not_found_genes) > 0 )
stop(paste0("some clique genes were not found: ", paste(not_found_genes, collapse = ", ")))
low_rate_mut <- create_random_uniform_mutatios(mut_cliques_genes, n_patients, mutation_rate=0.0005, col_names=row.names(ann))
mut[row.names(low_rate_mut), ] <- low_rate_mut
}
mut_binomial <- create_bimodal_mutations(ann[[omicName]], impacted_genes,
patients_fractions, mutation_rate=0.0005)
mut[row.names(mut_binomial), ] <- mut_binomial
if (!identical(colnames(mut), row.names(ann)))
stop("something wrong 340")
annotations <- data.frame(status=ann$status, days=ann$stop,
class=ann[[omicName]], row.names=row.names(ann), stringsAsFactors=FALSE)
return(list(exprs=mut, annotation=annotations, graph=graphite::pathwayGraph(pathway), chain=NULL))
}
#' Simulate a flat mutation dataset with survival annotation
#'
#' To simulate a dataset we need a real pathway.
#'
#' @inheritParams makeTheMutationDataset
#'
#' @importFrom checkmate assertClass
#' @importFrom graphite pathwayGraph
#' @importFrom graph nodes randomNodeGraph
#' @importFrom simPATHy simPATHy
#'
#' @rdname simulate_mutation_dataset
#' @export
#'
makeUniformMutationsDataset <- function(pathway, ann, omicName="x", mutation_rate=0.001) {
checkmate::assertClass(pathway, "Pathway")
if (!(omicName %in% names(ann))) {
stop(paste0(omicName, " does not apper in ann data.frame."))
}
nd = graphite::nodes(pathway)
n_patients = nrow(ann)
mut <- create_random_uniform_mutatios(nd, n_patients, mutation_rate=mutation_rate, col_names=row.names(ann))
if (!identical(colnames(mut), row.names(ann)))
stop("something wrong 340")
annotations <- data.frame(status=ann$status, days=ann$stop,
class=ann[[omicName]], row.names=row.names(ann), stringsAsFactors=FALSE)
return(list(exprs=mut, annotation=annotations, graph=graphite::pathwayGraph(pathway), chain=NULL))
}
create_bimodal_mutations <- function(classes, genes=letters[1:5], patients_fractions = c(0.05,0.1,0.1,0.2),
mutation_rate=0.0005) {
if (!(length(genes)-1 == length(patients_fractions))) {
stop("something wrong 322")
}
cl1 <- sum(classes==1)
numeric_order <- order(classes, decreasing = T)
patients_fake_names = paste(seq_along(classes), sep="_", classes)
ordered_patinets_fake_names <- patients_fake_names[numeric_order]
patientsPerCategory <- splitPatients(cl1, patients_fractions)
maxRunningSum <- length(genes)
maxSum=rep(rev(seq_len(maxRunningSum)), times=patientsPerCategory)
mutated_data <- sapply(maxSum, function(x) {
patient_mutation_profile <- rep(0, length(genes))
patient_mutation_profile[seq_len(x)] <- 1
patient_mutation_profile <- sample(patient_mutation_profile)
})
non_mutated_data <- create_random_uniform_mutatios(genes, n_patients = sum(classes==0))
data <- cbind(mutated_data, non_mutated_data)
colnames(data) <- ordered_patinets_fake_names
data <- data[, patients_fake_names]
data
}
splitPatients <- function(n, fractions = c(0.05,0.1,0.1,0.2)) {
blocks <- ceiling(n*fractions)
last_block <- n - sum(blocks)
c(blocks, last_block)
}
create_random_uniform_mutatios <- function(genes, n_patients, mutation_rate=0.0005, col_names=NULL) {
mutation_random_profiles <- lapply(seq_along(genes), function(i) {
rbinom(n_patients, 2, mutation_rate)
})
mut <- do.call(rbind, mutation_random_profiles)
row.names(mut) <- genes
colnames(mut) <- col_names
mut
}
# sample(size = 100,x = seq(-2,2,1),prob = c(0.05,0.15,0.5,0.20,0.1), replace = T)
| /R/mutation-cnv_dataset_generation.R | no_license | cavei/houseOfClipUtility | R | false | false | 5,317 | r | #' Simulate a mutation dataset with survival annotation
#'
#' To simulate a dataset we need a real pathway.
#'
#' @param pathway a Pathway
#' @param impacted_genes the genes of a module with the aberrant signal chain
#' @param patients_fractions the fraction of patients with n mutated genes
#' @param ann survival annotations
#' @param omicName the name of the omics to consider
#' @param mutation_rate the basel mutation rate
#' @param mut_cliques_genes genes that have low mutation rate
#'
#' @return list
#' \item{exprs}{expression}
#' \item{annotation}{the survival annotation}
#' \item{graph}{a graphNEL}
#' \item{chain}{the chain of selectP}
#'
#' @importFrom checkmate assertClass
#' @importFrom graphite pathwayGraph
#' @importFrom graph nodes randomNodeGraph
#' @importFrom simPATHy simPATHy
#' @rdname simulate_mutation_dataset
#' @export
#'
makeTheMutationDataset <- function(pathway, impacted_genes, patients_fractions, ann, omicName="x",
mutation_rate=0.001, mut_cliques_genes=NULL) {
checkmate::assertClass(pathway, "Pathway")
if (!(omicName %in% names(ann))) {
stop(paste0(omicName, " does not apper in ann data.frame."))
}
nd = graphite::nodes(pathway)
missing_genes <- setdiff(impacted_genes, nd)
if (length(missing_genes) != 0) {
stop(paste0("some of impactThisGenes are missing: ", paste(missing_genes, collapse = ', ')))
}
n_patients = nrow(ann)
mut <- create_random_uniform_mutatios(nd, n_patients, mutation_rate=mutation_rate, col_names=row.names(ann))
if (!is.null(mut_cliques_genes)) {
not_found_genes <- setdiff(mut_cliques_genes, nd)
if (length(not_found_genes) > 0 )
stop(paste0("some clique genes were not found: ", paste(not_found_genes, collapse = ", ")))
low_rate_mut <- create_random_uniform_mutatios(mut_cliques_genes, n_patients, mutation_rate=0.0005, col_names=row.names(ann))
mut[row.names(low_rate_mut), ] <- low_rate_mut
}
mut_binomial <- create_bimodal_mutations(ann[[omicName]], impacted_genes,
patients_fractions, mutation_rate=0.0005)
mut[row.names(mut_binomial), ] <- mut_binomial
if (!identical(colnames(mut), row.names(ann)))
stop("something wrong 340")
annotations <- data.frame(status=ann$status, days=ann$stop,
class=ann[[omicName]], row.names=row.names(ann), stringsAsFactors=FALSE)
return(list(exprs=mut, annotation=annotations, graph=graphite::pathwayGraph(pathway), chain=NULL))
}
#' Simulate a flat mutation dataset with survival annotation
#'
#' To simulate a dataset we need a real pathway.
#'
#' @inheritParams makeTheMutationDataset
#'
#' @importFrom checkmate assertClass
#' @importFrom graphite pathwayGraph
#' @importFrom graph nodes randomNodeGraph
#' @importFrom simPATHy simPATHy
#'
#' @rdname simulate_mutation_dataset
#' @export
#'
makeUniformMutationsDataset <- function(pathway, ann, omicName="x", mutation_rate=0.001) {
checkmate::assertClass(pathway, "Pathway")
if (!(omicName %in% names(ann))) {
stop(paste0(omicName, " does not apper in ann data.frame."))
}
nd = graphite::nodes(pathway)
n_patients = nrow(ann)
mut <- create_random_uniform_mutatios(nd, n_patients, mutation_rate=mutation_rate, col_names=row.names(ann))
if (!identical(colnames(mut), row.names(ann)))
stop("something wrong 340")
annotations <- data.frame(status=ann$status, days=ann$stop,
class=ann[[omicName]], row.names=row.names(ann), stringsAsFactors=FALSE)
return(list(exprs=mut, annotation=annotations, graph=graphite::pathwayGraph(pathway), chain=NULL))
}
create_bimodal_mutations <- function(classes, genes=letters[1:5], patients_fractions = c(0.05,0.1,0.1,0.2),
mutation_rate=0.0005) {
if (!(length(genes)-1 == length(patients_fractions))) {
stop("something wrong 322")
}
cl1 <- sum(classes==1)
numeric_order <- order(classes, decreasing = T)
patients_fake_names = paste(seq_along(classes), sep="_", classes)
ordered_patinets_fake_names <- patients_fake_names[numeric_order]
patientsPerCategory <- splitPatients(cl1, patients_fractions)
maxRunningSum <- length(genes)
maxSum=rep(rev(seq_len(maxRunningSum)), times=patientsPerCategory)
mutated_data <- sapply(maxSum, function(x) {
patient_mutation_profile <- rep(0, length(genes))
patient_mutation_profile[seq_len(x)] <- 1
patient_mutation_profile <- sample(patient_mutation_profile)
})
non_mutated_data <- create_random_uniform_mutatios(genes, n_patients = sum(classes==0))
data <- cbind(mutated_data, non_mutated_data)
colnames(data) <- ordered_patinets_fake_names
data <- data[, patients_fake_names]
data
}
splitPatients <- function(n, fractions = c(0.05,0.1,0.1,0.2)) {
blocks <- ceiling(n*fractions)
last_block <- n - sum(blocks)
c(blocks, last_block)
}
create_random_uniform_mutatios <- function(genes, n_patients, mutation_rate=0.0005, col_names=NULL) {
mutation_random_profiles <- lapply(seq_along(genes), function(i) {
rbinom(n_patients, 2, mutation_rate)
})
mut <- do.call(rbind, mutation_random_profiles)
row.names(mut) <- genes
colnames(mut) <- col_names
mut
}
# sample(size = 100,x = seq(-2,2,1),prob = c(0.05,0.15,0.5,0.20,0.1), replace = T)
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802977892e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125220-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 323 | r | testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802977892e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
require(shinyBS)
# HTML <- function(s1, s2=NULL)
# {
# # browser()
# out1 <- as.character( s1 )
# out2 <- s2
# out <- shiny::HTML(out1, out2)
# return(out)
# }
getquarters <- function() {
# browser()
curdir <- paste0( DATADIR, 'quarters')
flist <- list.files( curdir, all.files = TRUE,
ignore.case = TRUE, full.names = FALSE, no..=TRUE)
myquarters <- substr( flist, 1, 8)
myquarters <- sort( myquarters, decreasing = TRUE)
mydates <- as.Date(myquarters, '%Y%m%d')
s <- quarters(mydates)
names(myquarters) <- paste( s, substr(myquarters,1,4 ) )
return( as.list( myquarters ) )
}
getdrugvarchoices <- function(){
openfdavars <- c(
'generic_name',
'substance_name',
'brand_name',
'pharm_class_moa',
'pharm_class_cs',
'pharm_class_pe',
'pharm_class_epc')
openfdavars <- paste0( 'patient.drug.openfda.', openfdavars )
s <- c( openfdavars, 'patient.drug.medicinalproduct',
'patient.drug.drugindication',
'patient.drug.activesubstance.activesubstancename')
return(s)
}
getdevicevarchoices <- function(){
openfdavars <- c(
'device_name')
openfdavars <- paste0( 'device.openfda.', openfdavars )
s <- c( openfdavars)
return(s)
}
wordcloudtabset <- function(cloud, table,
types= c('table', 'plot'),
names=c( "Tables","Word Cloud" ),
popheads = c('Frequency Table',tt('word1') ),
poptext = c('Counts', tt('word2') ) ) {
tabsetPanel(
tabPanel(names[1],
wellPanel(
if (types[1] == 'table'){
htmlOutput_p(table,
HTML( popheads[1] ), HTML(poptext[1]),
placement='top')
} else if (types[1] == 'datatable') {
dataTableOutput( table )
}
)
),
tabPanel( names[2],
if (types[2] == 'table'){
htmlOutput_p(cloud,
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
} else {
plotOutput_p(cloud,
HTML( popheads[2] ), poptext[2],
placement='top')
}
), selected = names[1]
)
}
maketabset <- function( outputs, types=c('html', 'plot'),
names=c( "Table2","Word Cloud2" )
,
popheads = c(NULL, NULL, NULL) ,
poptext = c(NULL, NULL, NULL )
) {
tabsetPanel(
tabPanel(names[1],
wellPanel(
if (types[1] == 'html'){
htmlOutput_p(outputs[1],
HTML( popheads[1] ), HTML(poptext[1]),
placement='top' )
} else if ( types[1] == 'datatable' )
{
dataTableOutput( outputs[1] )
}
else {
plotOutput(outputs[1])
}
)
),
tabPanel( names[2],
if (types[2] == 'html'){
htmlOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
} else {
plotOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
}
),
tabPanel( names[3],
wellPanel(
tableOutput("hoverinfo")
),
if (types[3] == 'html'){
htmlOutput_p(outputs[3],
HTML( popheads[3] ), HTML(poptext[3]),
placement='top')
} else if (types[3] == 'html'){
htmlOutput_p(outputs[3],
HTML( popheads[3] ), HTML(poptext[3]),
placement='top')
} else {
plotOutput_p(outputs[3],
HTML( popheads[3] ), HTML(poptext[3]),
placement='left')
},
wellPanel(
tableOutput("info")
)
), selected = names[1]
)
}
maketabset2 <- function( outputs, types=c('html', 'plot'),
names=c( "Table2","Word Cloud2" )
,
popheads = c(NULL, NULL, NULL) ,
poptext = c(NULL, NULL, NULL )
) {
tabsetPanel(
tabPanel(names[1],
wellPanel(
if (types[1] == 'html'){
htmlOutput_p(outputs[1],
HTML( popheads[1] ), HTML(poptext[1]),
placement='top' )
} else if ( types[1] == 'datatable' )
{
dataTableOutput( outputs[1] )
}
else {
plotOutput(outputs[1])
}
)
),
tabPanel( names[2],
if (types[2] == 'html'){
htmlOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
} else {
plotOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
}
)
, selected = names[1]
)
}
getpopstrings <- function( myname, pophead, poptext )
{
helpfunname <- paste0('pop', myname )
# browser()
# if function called popmyname exists, call it to get pop heads
# otherwise if pophead or poptext are null get tt(mynametext) or tt(mynamehead)
if ( exists( helpfunname ) )
{
helpfun <- get( helpfunname)
s <- helpfun()
pophead <- s['head']
poptext <- s['text']
} else {
if (is.null(pophead))
{
pophead <- tt( paste0(myname, 'head' ) )
# print(pophead)
}
if (is.null(poptext))
{
poptext <- tt( paste0(myname, 'text' ) )
}
}
return ( c( pophead=pophead[[1]], poptext=poptext[[1]] ) )
}
htmlOutput_p <- function(table, pophead=NULL, poptext=NULL, placement='top')
{
s <- getpopstrings( table, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
htmlOutput(table),
HTML( paste('<b>', pophead,'</b>') ), poptext,
placement=placement)
}
else
{
tableOutput(table)
}
}
dataTableOutput_p <- function(table, pophead=NULL, poptext=NULL, placement='top')
{
s <- getpopstrings( table, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
dataTableOutput(table),
HTML( paste('<b>', pophead,'</b>') ), poptext,
placement=placement)
}
else
{
dataTableOutput(table)
}
}
plotOutput_p <- function(plot, pophead=NULL, poptext=NULL, placement='top', ...)
{
s <- getpopstrings( plot, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
plotOutput(plot, brush = "plot_brush", hover=hoverOpts(id='plot_hover'), ...),
HTML( pophead ), HTML(poptext),
placement=placement)
}
else
{
plotOutput(plot, brush = "plot_brush", hover=hoverOpts(id='plot_hover'), ...)
}
}
selectInput_p <- function( name, label, values, pophead=NULL, poptext=NULL,
placement='bottom', usepop=TRUE, selected=NULL, ...)
{
# browser()
s <- getpopstrings( name, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null( pophead ) & usepop )
{
popify(
selectInput(name, label , values, selected),
HTML( pophead ), poptext,
placement=placement)
}
else
{
selectInput(name, label , values, selected)
}
}
textInput_p <- function( name, label, value, pophead=NULL, poptext=NULL,
placement='bottom', ...)
{
s <- getpopstrings( name, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
textInput(name, label , value),
HTML( pophead ), poptext,
placement=placement)
}
else
{
textInput(name, label , value)
}
}
numericInput_p <- function( name, label, value, min=NA, max=NA, step=NA, pophead=NULL, poptext=NULL,
placement='bottom', ...)
{
s <- getpopstrings( name, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
numericInput(name, label , value, min, max, step),
HTML( pophead ), poptext,
placement=placement)
}
else
{
numericInput(name, label , value, min, max, step)
}
}
usepopup <- function()
{
usepop <- uiOutput('usepop')
# print(usepop)
if (is.null(usepop))
(
usepop <- TRUE
)
return( paste(usepop, 'Hello' ) )
}
renderDates <- function() {
( htmlOutput('date1') )
}
renderiframe <- function( s )
{
out <- paste0('<iframe src="', s, '" width=100% height=600 ></iframe>')
return(out)
} | /sharedscripts/uihelpers.R | no_license | jonathanglevine/openfdashinyapps | R | false | false | 9,542 | r | require(shinyBS)
# HTML <- function(s1, s2=NULL)
# {
# # browser()
# out1 <- as.character( s1 )
# out2 <- s2
# out <- shiny::HTML(out1, out2)
# return(out)
# }
getquarters <- function() {
# browser()
curdir <- paste0( DATADIR, 'quarters')
flist <- list.files( curdir, all.files = TRUE,
ignore.case = TRUE, full.names = FALSE, no..=TRUE)
myquarters <- substr( flist, 1, 8)
myquarters <- sort( myquarters, decreasing = TRUE)
mydates <- as.Date(myquarters, '%Y%m%d')
s <- quarters(mydates)
names(myquarters) <- paste( s, substr(myquarters,1,4 ) )
return( as.list( myquarters ) )
}
getdrugvarchoices <- function(){
openfdavars <- c(
'generic_name',
'substance_name',
'brand_name',
'pharm_class_moa',
'pharm_class_cs',
'pharm_class_pe',
'pharm_class_epc')
openfdavars <- paste0( 'patient.drug.openfda.', openfdavars )
s <- c( openfdavars, 'patient.drug.medicinalproduct',
'patient.drug.drugindication',
'patient.drug.activesubstance.activesubstancename')
return(s)
}
getdevicevarchoices <- function(){
openfdavars <- c(
'device_name')
openfdavars <- paste0( 'device.openfda.', openfdavars )
s <- c( openfdavars)
return(s)
}
wordcloudtabset <- function(cloud, table,
types= c('table', 'plot'),
names=c( "Tables","Word Cloud" ),
popheads = c('Frequency Table',tt('word1') ),
poptext = c('Counts', tt('word2') ) ) {
tabsetPanel(
tabPanel(names[1],
wellPanel(
if (types[1] == 'table'){
htmlOutput_p(table,
HTML( popheads[1] ), HTML(poptext[1]),
placement='top')
} else if (types[1] == 'datatable') {
dataTableOutput( table )
}
)
),
tabPanel( names[2],
if (types[2] == 'table'){
htmlOutput_p(cloud,
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
} else {
plotOutput_p(cloud,
HTML( popheads[2] ), poptext[2],
placement='top')
}
), selected = names[1]
)
}
maketabset <- function( outputs, types=c('html', 'plot'),
names=c( "Table2","Word Cloud2" )
,
popheads = c(NULL, NULL, NULL) ,
poptext = c(NULL, NULL, NULL )
) {
tabsetPanel(
tabPanel(names[1],
wellPanel(
if (types[1] == 'html'){
htmlOutput_p(outputs[1],
HTML( popheads[1] ), HTML(poptext[1]),
placement='top' )
} else if ( types[1] == 'datatable' )
{
dataTableOutput( outputs[1] )
}
else {
plotOutput(outputs[1])
}
)
),
tabPanel( names[2],
if (types[2] == 'html'){
htmlOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
} else {
plotOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
}
),
tabPanel( names[3],
wellPanel(
tableOutput("hoverinfo")
),
if (types[3] == 'html'){
htmlOutput_p(outputs[3],
HTML( popheads[3] ), HTML(poptext[3]),
placement='top')
} else if (types[3] == 'html'){
htmlOutput_p(outputs[3],
HTML( popheads[3] ), HTML(poptext[3]),
placement='top')
} else {
plotOutput_p(outputs[3],
HTML( popheads[3] ), HTML(poptext[3]),
placement='left')
},
wellPanel(
tableOutput("info")
)
), selected = names[1]
)
}
maketabset2 <- function( outputs, types=c('html', 'plot'),
names=c( "Table2","Word Cloud2" )
,
popheads = c(NULL, NULL, NULL) ,
poptext = c(NULL, NULL, NULL )
) {
tabsetPanel(
tabPanel(names[1],
wellPanel(
if (types[1] == 'html'){
htmlOutput_p(outputs[1],
HTML( popheads[1] ), HTML(poptext[1]),
placement='top' )
} else if ( types[1] == 'datatable' )
{
dataTableOutput( outputs[1] )
}
else {
plotOutput(outputs[1])
}
)
),
tabPanel( names[2],
if (types[2] == 'html'){
htmlOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
} else {
plotOutput_p(outputs[2],
HTML( popheads[2] ), HTML(poptext[2]),
placement='top')
}
)
, selected = names[1]
)
}
getpopstrings <- function( myname, pophead, poptext )
{
helpfunname <- paste0('pop', myname )
# browser()
# if function called popmyname exists, call it to get pop heads
# otherwise if pophead or poptext are null get tt(mynametext) or tt(mynamehead)
if ( exists( helpfunname ) )
{
helpfun <- get( helpfunname)
s <- helpfun()
pophead <- s['head']
poptext <- s['text']
} else {
if (is.null(pophead))
{
pophead <- tt( paste0(myname, 'head' ) )
# print(pophead)
}
if (is.null(poptext))
{
poptext <- tt( paste0(myname, 'text' ) )
}
}
return ( c( pophead=pophead[[1]], poptext=poptext[[1]] ) )
}
htmlOutput_p <- function(table, pophead=NULL, poptext=NULL, placement='top')
{
s <- getpopstrings( table, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
htmlOutput(table),
HTML( paste('<b>', pophead,'</b>') ), poptext,
placement=placement)
}
else
{
tableOutput(table)
}
}
dataTableOutput_p <- function(table, pophead=NULL, poptext=NULL, placement='top')
{
s <- getpopstrings( table, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
dataTableOutput(table),
HTML( paste('<b>', pophead,'</b>') ), poptext,
placement=placement)
}
else
{
dataTableOutput(table)
}
}
plotOutput_p <- function(plot, pophead=NULL, poptext=NULL, placement='top', ...)
{
s <- getpopstrings( plot, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
plotOutput(plot, brush = "plot_brush", hover=hoverOpts(id='plot_hover'), ...),
HTML( pophead ), HTML(poptext),
placement=placement)
}
else
{
plotOutput(plot, brush = "plot_brush", hover=hoverOpts(id='plot_hover'), ...)
}
}
selectInput_p <- function( name, label, values, pophead=NULL, poptext=NULL,
placement='bottom', usepop=TRUE, selected=NULL, ...)
{
# browser()
s <- getpopstrings( name, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null( pophead ) & usepop )
{
popify(
selectInput(name, label , values, selected),
HTML( pophead ), poptext,
placement=placement)
}
else
{
selectInput(name, label , values, selected)
}
}
textInput_p <- function( name, label, value, pophead=NULL, poptext=NULL,
placement='bottom', ...)
{
s <- getpopstrings( name, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
textInput(name, label , value),
HTML( pophead ), poptext,
placement=placement)
}
else
{
textInput(name, label , value)
}
}
numericInput_p <- function( name, label, value, min=NA, max=NA, step=NA, pophead=NULL, poptext=NULL,
placement='bottom', ...)
{
s <- getpopstrings( name, pophead, poptext)
pophead <- s['pophead']
poptext <- s['poptext']
if( !is.null(pophead) )
{
popify(
numericInput(name, label , value, min, max, step),
HTML( pophead ), poptext,
placement=placement)
}
else
{
numericInput(name, label , value, min, max, step)
}
}
usepopup <- function()
{
usepop <- uiOutput('usepop')
# print(usepop)
if (is.null(usepop))
(
usepop <- TRUE
)
return( paste(usepop, 'Hello' ) )
}
renderDates <- function() {
( htmlOutput('date1') )
}
renderiframe <- function( s )
{
out <- paste0('<iframe src="', s, '" width=100% height=600 ></iframe>')
return(out)
} |
library(tidyverse)
n <- 100000
#df <- data.frame(1:n, runif(n, 0, 6))
df <- data.frame(1:n, rnorm(n, 0, 1))
names(df) <- c('x', 'y')
ggplot(data=df, aes(x=x,y=y)) + geom_point()
hist(df$y)
mean(df$y)
# Compactly Display the Structure of an Arbitrary R Object
str(df)
# Write an Object to a File or Recreate it
dput(df)
df2x = list(x=c(1,2,3), y=c(2,4,6))
df2x_str = structure(df2x)
#scatter.smooth(x=df2x$x, y=df2x$y, main="X ~ Y")
| /stats.R | no_license | lmartinho/r-playground | R | false | false | 437 | r | library(tidyverse)
n <- 100000
#df <- data.frame(1:n, runif(n, 0, 6))
df <- data.frame(1:n, rnorm(n, 0, 1))
names(df) <- c('x', 'y')
ggplot(data=df, aes(x=x,y=y)) + geom_point()
hist(df$y)
mean(df$y)
# Compactly Display the Structure of an Arbitrary R Object
str(df)
# Write an Object to a File or Recreate it
dput(df)
df2x = list(x=c(1,2,3), y=c(2,4,6))
df2x_str = structure(df2x)
#scatter.smooth(x=df2x$x, y=df2x$y, main="X ~ Y")
|
#' Get database connection
#'
#'
#' @return #' A DBI dataconnect object
#' @export
#' @import here
#' @import RPostgres
#'
#' @examples
#' conn <- rdb_connect()
#'
rdb_connect <- function() {
# configdir <- Sys.getenv("RDCONFIG")
cnfg <- yaml::yaml.load_file(here("rconfig.yml"))
conSuper <- dbConnect( RPostgres::Postgres(),
dbname = cnfg$dbname,
host = cnfg$host,
port = cnfg$port,
password = cnfg$password,
user = cnfg$user )
conSuper
} | /R/rbd_connect.R | permissive | joeheywood/resdata | R | false | false | 603 | r |
#' Get database connection
#'
#'
#' @return #' A DBI dataconnect object
#' @export
#' @import here
#' @import RPostgres
#'
#' @examples
#' conn <- rdb_connect()
#'
rdb_connect <- function() {
# configdir <- Sys.getenv("RDCONFIG")
cnfg <- yaml::yaml.load_file(here("rconfig.yml"))
conSuper <- dbConnect( RPostgres::Postgres(),
dbname = cnfg$dbname,
host = cnfg$host,
port = cnfg$port,
password = cnfg$password,
user = cnfg$user )
conSuper
} |
testlist <- list(iK = 200411660L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) | /eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609869723-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 89 | r | testlist <- list(iK = 200411660L)
result <- do.call(eDMA:::PowerSet,testlist)
str(result) |
RadarIDM <- function(Eixo) {
dados <- IDM %>%
mutate(Variável = fct_recode(Variável,
"Equilíbrio Orçamentário" = "Equilíbrio Orçamentário do Município",
"Saúde Bucal" = "Cobertura ESF Saúde Bucal",
"Morte causas ext." = "Morte por causas externas",
"Var. dos empregos" = "Variação do numero de empregos formais",
"Escolaridade" = "Nível de escolaridade dos trabalhadores",
"Adeq. ens. infantil" = "Adequação dos professores do ensino infantil",
"Adeq. ens. fundamental" = "Adequação dos professores do ensino fundamental",
"Adeq. ens. médio" = "Adequação dos professores do ensino médio",
"Água tratada" = "Cobertura da rede de água tratada",
"Esgoto" = "Cobertura da rede de esgoto",
"Energia elétrica" = "Cobertura da rede de energia elétrica",
"Tel. fixa e internet" = "Cobertura da rede de telefonia fixa e internet",
"Atend. educ. 4 a 5 anos" = "Atendimento educacional da população de 4 a 5 anos",
"Atend. educ. 6 a 14 anos" = "Atendimento educacional da população de 6 a 14 anos",
"Atend. educ. 15 a 17 anos" = "Atendimento educacional da população de 15 a 17 anos",
"Infraestrutura das esc. públicas" = "Infraestrutura dos prédios das escolas publicas urbanas"
))
level1 <- paste("Mun. de",LocRef$Localidade)
tabela1 <- dados %>%
filter(!is.na(Valor) & Localidade == LocRef$Localidade & Ano == max(Ano) & IDM == Eixo) %>%
mutate(Referencia = level1) %>%
select(Variável,Valor,Referencia)
level2 <- paste("RP",LocRef$RPSEGPLAN)
tabela2 <- dados %>%
merge(RegioesGoias) %>%
filter(!is.na(Valor) & RPSEGPLAN == LocRef$RPSEGPLAN & Ano == max(Ano) & IDM == Eixo) %>%
group_by(Localidade,Variável) %>%
summarise(Valor = mean(Valor)) %>%
group_by(Variável) %>%
summarise(Valor = mean(Valor)) %>%
mutate(Referencia = level2)
level3 <- "Estado de Goiás"
tabela3 <- dados %>%
filter(!is.na(Valor) & Ano == max(Ano) & IDM == Eixo) %>%
group_by(Localidade,Variável) %>%
summarise(Valor = mean(Valor)) %>%
group_by(Variável) %>%
summarise(Valor = mean(Valor)) %>%
mutate(Referencia = level3)
rbind(tabela1,tabela2,tabela3) %>%
mutate(Referencia = factor(Referencia,ordered = T,levels = c(level1,level2,level3))) %>%
dcast(Referencia~Variável,value.var = "Valor") %>%
ggradar(grid.max = 10, grid.mid = 5, grid.min = 0,
grid.label.size = 5,
values.radar = c(0,5,10),
legend.position = "bottom",
legend.text.size = 6,
axis.label.offset = 1.1,
group.line.width = 0.5,
group.point.size = 2,
axis.label.size = 2.5) +
theme(plot.caption = element_text(size = 8)) +
labs(caption = "Fonte: Elaborado pelo núcleo de base do OMT/GYN a partir de dados do BDE/IMB, com acesso em 19/03/2020.")
} | /Functions/RadarIDM.R | no_license | supervedovatto/AnexoA | R | false | false | 3,386 | r | RadarIDM <- function(Eixo) {
dados <- IDM %>%
mutate(Variável = fct_recode(Variável,
"Equilíbrio Orçamentário" = "Equilíbrio Orçamentário do Município",
"Saúde Bucal" = "Cobertura ESF Saúde Bucal",
"Morte causas ext." = "Morte por causas externas",
"Var. dos empregos" = "Variação do numero de empregos formais",
"Escolaridade" = "Nível de escolaridade dos trabalhadores",
"Adeq. ens. infantil" = "Adequação dos professores do ensino infantil",
"Adeq. ens. fundamental" = "Adequação dos professores do ensino fundamental",
"Adeq. ens. médio" = "Adequação dos professores do ensino médio",
"Água tratada" = "Cobertura da rede de água tratada",
"Esgoto" = "Cobertura da rede de esgoto",
"Energia elétrica" = "Cobertura da rede de energia elétrica",
"Tel. fixa e internet" = "Cobertura da rede de telefonia fixa e internet",
"Atend. educ. 4 a 5 anos" = "Atendimento educacional da população de 4 a 5 anos",
"Atend. educ. 6 a 14 anos" = "Atendimento educacional da população de 6 a 14 anos",
"Atend. educ. 15 a 17 anos" = "Atendimento educacional da população de 15 a 17 anos",
"Infraestrutura das esc. públicas" = "Infraestrutura dos prédios das escolas publicas urbanas"
))
level1 <- paste("Mun. de",LocRef$Localidade)
tabela1 <- dados %>%
filter(!is.na(Valor) & Localidade == LocRef$Localidade & Ano == max(Ano) & IDM == Eixo) %>%
mutate(Referencia = level1) %>%
select(Variável,Valor,Referencia)
level2 <- paste("RP",LocRef$RPSEGPLAN)
tabela2 <- dados %>%
merge(RegioesGoias) %>%
filter(!is.na(Valor) & RPSEGPLAN == LocRef$RPSEGPLAN & Ano == max(Ano) & IDM == Eixo) %>%
group_by(Localidade,Variável) %>%
summarise(Valor = mean(Valor)) %>%
group_by(Variável) %>%
summarise(Valor = mean(Valor)) %>%
mutate(Referencia = level2)
level3 <- "Estado de Goiás"
tabela3 <- dados %>%
filter(!is.na(Valor) & Ano == max(Ano) & IDM == Eixo) %>%
group_by(Localidade,Variável) %>%
summarise(Valor = mean(Valor)) %>%
group_by(Variável) %>%
summarise(Valor = mean(Valor)) %>%
mutate(Referencia = level3)
rbind(tabela1,tabela2,tabela3) %>%
mutate(Referencia = factor(Referencia,ordered = T,levels = c(level1,level2,level3))) %>%
dcast(Referencia~Variável,value.var = "Valor") %>%
ggradar(grid.max = 10, grid.mid = 5, grid.min = 0,
grid.label.size = 5,
values.radar = c(0,5,10),
legend.position = "bottom",
legend.text.size = 6,
axis.label.offset = 1.1,
group.line.width = 0.5,
group.point.size = 2,
axis.label.size = 2.5) +
theme(plot.caption = element_text(size = 8)) +
labs(caption = "Fonte: Elaborado pelo núcleo de base do OMT/GYN a partir de dados do BDE/IMB, com acesso em 19/03/2020.")
} |
plot_end_activities <- function(x, ...) {
mapping <- attr(x, "mapping")
level <- attr(x, "level")
absolute <- NULL
if(level == "log") {
stop("Plot not available for this level of analysis")
}
else if(level == "case") {
stop("Plot not available for this level of analysis")
}
else if(level == "activity") {
x %>%
ggplot(aes_string(glue("reorder({mapping$activity_id}, absolute)"), "absolute")) +
geom_col(aes(fill = absolute)) +
scale_fill_continuous_tableau(name = "End Activity Frequency", palette = "Blue")+
theme_light() +
coord_flip() +
labs(x = "Activity", y = "End Activity Frequency") -> p
}
else if(level == "resource") {
x %>%
ggplot(aes_string(glue("reorder({mapping$resource_id}, absolute)"), "absolute")) +
geom_col(aes(fill = absolute)) +
scale_fill_continuous_tableau(name = "End Activity Resource Frequency", palette = "Blue")+
theme_light() +
coord_flip() +
labs(x = "Resource", y = "End Activity Resource Frequency") -> p
}
else if(level == "resource-activity") {
x %>%
ggplot(aes_string(mapping$resource_id, mapping$activity_id)) +
geom_tile(aes(fill = absolute)) +
geom_text(aes(label = absolute), fontface = "bold", color = "white") +
scale_fill_continuous_tableau(name = "End Resource-Activity Frequency", palette = "Blue")+
theme_light() +
coord_flip() +
labs(x = "Resource", y = "Activity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) -> p
}
if(!is.null(mapping$groups)) {
p <- p + facet_grid(as.formula(paste(c(paste(mapping$groups, collapse = "+"), "~." ), collapse = "")), scales = "free_y")
}
return(p)
}
| /R/plot.end_activity.R | no_license | cran/edeaR | R | false | false | 1,693 | r |
plot_end_activities <- function(x, ...) {
mapping <- attr(x, "mapping")
level <- attr(x, "level")
absolute <- NULL
if(level == "log") {
stop("Plot not available for this level of analysis")
}
else if(level == "case") {
stop("Plot not available for this level of analysis")
}
else if(level == "activity") {
x %>%
ggplot(aes_string(glue("reorder({mapping$activity_id}, absolute)"), "absolute")) +
geom_col(aes(fill = absolute)) +
scale_fill_continuous_tableau(name = "End Activity Frequency", palette = "Blue")+
theme_light() +
coord_flip() +
labs(x = "Activity", y = "End Activity Frequency") -> p
}
else if(level == "resource") {
x %>%
ggplot(aes_string(glue("reorder({mapping$resource_id}, absolute)"), "absolute")) +
geom_col(aes(fill = absolute)) +
scale_fill_continuous_tableau(name = "End Activity Resource Frequency", palette = "Blue")+
theme_light() +
coord_flip() +
labs(x = "Resource", y = "End Activity Resource Frequency") -> p
}
else if(level == "resource-activity") {
x %>%
ggplot(aes_string(mapping$resource_id, mapping$activity_id)) +
geom_tile(aes(fill = absolute)) +
geom_text(aes(label = absolute), fontface = "bold", color = "white") +
scale_fill_continuous_tableau(name = "End Resource-Activity Frequency", palette = "Blue")+
theme_light() +
coord_flip() +
labs(x = "Resource", y = "Activity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) -> p
}
if(!is.null(mapping$groups)) {
p <- p + facet_grid(as.formula(paste(c(paste(mapping$groups, collapse = "+"), "~." ), collapse = "")), scales = "free_y")
}
return(p)
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809718668e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615844947-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 736 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809718668e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
source("data-raw/generation-helpers.R")
set.seed(42L)
# generate indices for subsignals
ind_tbl <- gen_sim_ind(nrow = 100L, ncol = 42L, max_subsig = 3L)
# generate signals - needed mainly for testing purposes
# (e.g. for mbte_extract_subsignals())
sig_tbl <- gen_sim_sig_default(ind_tbl)
# save internal signal-dataset (meant for testing only)
devtools::use_data(sig_tbl, internal = TRUE, overwrite = TRUE)
# add 0-padding, convert dataset to long form and rename time column
raw_signals <- gen_sim_raw(sig_tbl) %>%
rename(t = time)
# dataset for examples (to avoid having to redo nesting and subsignal extraction
# over and over again)
filtered_signals <- raw_signals %>%
group_by(mv) %>%
new_tbl_mbte(t, value) %>%
mbte_nest_signals() %>%
mbte_extract_subsignals()
filtered_signals <- filtered_signals %>%
filter(map_int(signal, nrow) > 20) %>%
mbte_reconstruct(filtered_signals)
# save datasets for users
devtools::use_data(raw_signals, filtered_signals, overwrite = TRUE)
| /data-raw/gen_sample_datasets.R | no_license | mkerschbaumer/mbte | R | false | false | 1,028 | r | source("data-raw/generation-helpers.R")
set.seed(42L)
# generate indices for subsignals
ind_tbl <- gen_sim_ind(nrow = 100L, ncol = 42L, max_subsig = 3L)
# generate signals - needed mainly for testing purposes
# (e.g. for mbte_extract_subsignals())
sig_tbl <- gen_sim_sig_default(ind_tbl)
# save internal signal-dataset (meant for testing only)
devtools::use_data(sig_tbl, internal = TRUE, overwrite = TRUE)
# add 0-padding, convert dataset to long form and rename time column
raw_signals <- gen_sim_raw(sig_tbl) %>%
rename(t = time)
# dataset for examples (to avoid having to redo nesting and subsignal extraction
# over and over again)
filtered_signals <- raw_signals %>%
group_by(mv) %>%
new_tbl_mbte(t, value) %>%
mbte_nest_signals() %>%
mbte_extract_subsignals()
filtered_signals <- filtered_signals %>%
filter(map_int(signal, nrow) > 20) %>%
mbte_reconstruct(filtered_signals)
# save datasets for users
devtools::use_data(raw_signals, filtered_signals, overwrite = TRUE)
|
# TODO: move this to data_raw & use usethis::use_data
#' tidy dataframe of the USArrests data
#'
#' @import dplyr
#' @export
tidyUSArrests = function() {
USArrests %>%
mutate(sample = rownames(USArrests)) %>%
tidyr::pivot_longer(-sample, names_to = "feature", values_to = "value")
}
#' tidy dataframe of the USArrests data with co-occurence of features
#'
#' @import dplyr
#' @export
tidyUSArrestsCooccurrence = function() {
lhs = tidyUSArrests() %>% rename(feature1=feature, value1=value)
rhs = tidyUSArrests() %>% rename(feature2=feature, value2=value)
return(lhs %>% inner_join(rhs, by="sample"))
}
#' tidy dataframe of the USArrests data
#'
#' @import dplyr
#' @export
tidyDiscreteUSArrests = function() {
infotheo::discretize(USArrests) %>%
mutate(sample = rownames(USArrests)) %>%
tidyr::pivot_longer(-sample, names_to = "feature", values_to = "value")
}
#' tidy dataframe of the USArrests data with co-occurence of features
#'
#' @import dplyr
#' @export
tidyDiscreteUSArrestsCooccurrence = function() {
lhs = tidyDiscreteUSArrests() %>% rename(feature1=feature, value1=value)
rhs = tidyDiscreteUSArrests() %>% rename(feature2=feature, value2=value)
return(lhs %>% inner_join(rhs, by="sample"))
}
#' tidy dataframe of the Iris data with features & outcomes
#'
#' @import dplyr
#' @export
tidyIris = function() {
iris %>%
mutate(sample = row_number()) %>%
rename(
Sepal_Length = Sepal.Length,
Sepal_Width = Sepal.Width,
Petal_Length = Petal.Length,
Petal_Width = Petal.Width
) %>%
tidyr::pivot_longer(cols=c(Sepal_Length,Sepal_Width,Petal_Length,Petal_Width), names_to = "feature") %>% rename(outcome = Species)
}
#
# ```{r}
# # devtools::load_all("..")
# testData = bloodResultsSimulation(1000)$data
#
# #### Continuous probability estimation ----
#
# ggplot(
# testData %>% group_by(feature,outcome) %>% tidyinfostats::probabilitiesFromContinuous(value, method="SGolay"),
# aes(x=value,y=p_x, colour=outcome)) + geom_point() + facet_wrap(vars(feature))
#
# # debug(probabilitiesFromContinuous_SGolay)
# # debug(applySGolayFilter)
#
# ggplot(
# testData %>% group_by(feature,outcome) %>% tidyinfostats::probabilitiesFromContinuous(value, method="Kernel"),
# aes(x=value,y=p_x, colour=outcome)) + geom_point() + facet_wrap(vars(feature))
#
# ```
missingData = function() {
# start with a defintion for our test data
# feature A is present in 80% of outcome 1; 20% of outcome 2 - there is information in missingness
# feature B is present in 10% of outcome 1; 10% of outcome 2 - there is no information in missingness
# feature C is present in 40% of outcome 1; 20% of outcome 2 - there is information but less than in A
# feature D is present in 100% of outcome 1; 100% of outcome 2 - not missing / no information
missingness = tibble(
feature = c("A","A","B","B","C","C","D","D"),
outcome = c(1,2,1,2,1,2,1,2),
presence = c(0.8,0.2,0.1,0.1,0.4,0.2,1,1)
)
# outcome 1 seen in 60% of cases outcome 2 in 40%
expectedness = tibble(
outcome = c(1,2),
expected = c(60,40)
)
# generate a complete data set with a random value and missingness flag
equivData = expectedness %>% left_join(missingness, by="outcome") %>% group_by(feature,outcome,expected,presence) %>% group_modify(function(d,g,..) {
return(tibble(
value = sample.int(4,size = g$expected, replace = TRUE),
status = c(rep("present",round(g$presence*g$expected)),rep("absent",round((1-g$presence)*g$expected)))
))
}) %>% group_by(feature) %>% arrange(outcome) %>% mutate(sample = c(1:100))
# create test data set with missing values
data = equivData %>% filter(status != "absent")
return(list(missingness= missingness, expectedness = expectedness, data=data,equivData=equivData))
} | /R/testData.R | permissive | terminological/tidy-info-stats | R | false | false | 3,838 | r | # TODO: move this to data_raw & use usethis::use_data
#' tidy dataframe of the USArrests data
#'
#' @import dplyr
#' @export
tidyUSArrests = function() {
USArrests %>%
mutate(sample = rownames(USArrests)) %>%
tidyr::pivot_longer(-sample, names_to = "feature", values_to = "value")
}
#' tidy dataframe of the USArrests data with co-occurence of features
#'
#' @import dplyr
#' @export
tidyUSArrestsCooccurrence = function() {
lhs = tidyUSArrests() %>% rename(feature1=feature, value1=value)
rhs = tidyUSArrests() %>% rename(feature2=feature, value2=value)
return(lhs %>% inner_join(rhs, by="sample"))
}
#' tidy dataframe of the USArrests data
#'
#' @import dplyr
#' @export
tidyDiscreteUSArrests = function() {
infotheo::discretize(USArrests) %>%
mutate(sample = rownames(USArrests)) %>%
tidyr::pivot_longer(-sample, names_to = "feature", values_to = "value")
}
#' tidy dataframe of the USArrests data with co-occurence of features
#'
#' @import dplyr
#' @export
tidyDiscreteUSArrestsCooccurrence = function() {
lhs = tidyDiscreteUSArrests() %>% rename(feature1=feature, value1=value)
rhs = tidyDiscreteUSArrests() %>% rename(feature2=feature, value2=value)
return(lhs %>% inner_join(rhs, by="sample"))
}
#' tidy dataframe of the Iris data with features & outcomes
#'
#' @import dplyr
#' @export
tidyIris = function() {
iris %>%
mutate(sample = row_number()) %>%
rename(
Sepal_Length = Sepal.Length,
Sepal_Width = Sepal.Width,
Petal_Length = Petal.Length,
Petal_Width = Petal.Width
) %>%
tidyr::pivot_longer(cols=c(Sepal_Length,Sepal_Width,Petal_Length,Petal_Width), names_to = "feature") %>% rename(outcome = Species)
}
#
# ```{r}
# # devtools::load_all("..")
# testData = bloodResultsSimulation(1000)$data
#
# #### Continuous probability estimation ----
#
# ggplot(
# testData %>% group_by(feature,outcome) %>% tidyinfostats::probabilitiesFromContinuous(value, method="SGolay"),
# aes(x=value,y=p_x, colour=outcome)) + geom_point() + facet_wrap(vars(feature))
#
# # debug(probabilitiesFromContinuous_SGolay)
# # debug(applySGolayFilter)
#
# ggplot(
# testData %>% group_by(feature,outcome) %>% tidyinfostats::probabilitiesFromContinuous(value, method="Kernel"),
# aes(x=value,y=p_x, colour=outcome)) + geom_point() + facet_wrap(vars(feature))
#
# ```
missingData = function() {
# start with a defintion for our test data
# feature A is present in 80% of outcome 1; 20% of outcome 2 - there is information in missingness
# feature B is present in 10% of outcome 1; 10% of outcome 2 - there is no information in missingness
# feature C is present in 40% of outcome 1; 20% of outcome 2 - there is information but less than in A
# feature D is present in 100% of outcome 1; 100% of outcome 2 - not missing / no information
missingness = tibble(
feature = c("A","A","B","B","C","C","D","D"),
outcome = c(1,2,1,2,1,2,1,2),
presence = c(0.8,0.2,0.1,0.1,0.4,0.2,1,1)
)
# outcome 1 seen in 60% of cases outcome 2 in 40%
expectedness = tibble(
outcome = c(1,2),
expected = c(60,40)
)
# generate a complete data set with a random value and missingness flag
equivData = expectedness %>% left_join(missingness, by="outcome") %>% group_by(feature,outcome,expected,presence) %>% group_modify(function(d,g,..) {
return(tibble(
value = sample.int(4,size = g$expected, replace = TRUE),
status = c(rep("present",round(g$presence*g$expected)),rep("absent",round((1-g$presence)*g$expected)))
))
}) %>% group_by(feature) %>% arrange(outcome) %>% mutate(sample = c(1:100))
# create test data set with missing values
data = equivData %>% filter(status != "absent")
return(list(missingness= missingness, expectedness = expectedness, data=data,equivData=equivData))
} |
# Description:
#' Apply Wilcoxon rank-sum test for the differential abundance analysis of microbiome count data
# Parameters:
#' @param count.matrix A sample-by-taxon original count matrix. All the entry should be nonnegative counts
#' @param phenotype The phenotype indicator vector for all samples in \code{count.matrix}. Should only have 2 groups.
# Outputs:
#' @return A list of detailed wilcoxon rank-sum test output and the p-values
WilcoxonRankSum = function(count.matrix, phenotype){
# check the phenotype
pheno.level = levels(factor(phenotype))
if(length(pheno.level) >= 3){
stop("Wilcoxon rank sum can handle only two phenotype levels")
}
# covert to compositional data
RA.matrix = apply(count.matrix, 1, function(x) {
s = sum(x); if (s == 0) {warning("Sample has no counts")}; x / sum(x)} )
# perform the test
RA.matrix = t(RA.matrix)
WC.details = apply(RA.matrix, 2, function(x){wilcox.test(x[phenotype == pheno.level[1]], x[phenotype == pheno.level[2]], exact = FALSE)})
WC.pval = unlist(lapply(WC.details, function(x){x$p.value} ))
return(list(Wilcoxon.pval = WC.pval,
Wilcoxon.details = WC.details))
}
| /GymFunctions/WilcoxonRankSum.R | no_license | shuangj00/MicrobiomeGym | R | false | false | 1,180 | r | # Description:
#' Apply Wilcoxon rank-sum test for the differential abundance analysis of microbiome count data
# Parameters:
#' @param count.matrix A sample-by-taxon original count matrix. All the entry should be nonnegative counts
#' @param phenotype The phenotype indicator vector for all samples in \code{count.matrix}. Should only have 2 groups.
# Outputs:
#' @return A list of detailed wilcoxon rank-sum test output and the p-values
WilcoxonRankSum = function(count.matrix, phenotype){
# check the phenotype
pheno.level = levels(factor(phenotype))
if(length(pheno.level) >= 3){
stop("Wilcoxon rank sum can handle only two phenotype levels")
}
# covert to compositional data
RA.matrix = apply(count.matrix, 1, function(x) {
s = sum(x); if (s == 0) {warning("Sample has no counts")}; x / sum(x)} )
# perform the test
RA.matrix = t(RA.matrix)
WC.details = apply(RA.matrix, 2, function(x){wilcox.test(x[phenotype == pheno.level[1]], x[phenotype == pheno.level[2]], exact = FALSE)})
WC.pval = unlist(lapply(WC.details, function(x){x$p.value} ))
return(list(Wilcoxon.pval = WC.pval,
Wilcoxon.details = WC.details))
}
|
library(tidyverse)
library(httr)
library(xml2)
library(rvest)
source("src.R")
path <- "chat/messages.html"
df_messages <- fn_scraper_telegram(path)
| /script.R | no_license | rodrigoqaz/telegram_scraper | R | false | false | 150 | r | library(tidyverse)
library(httr)
library(xml2)
library(rvest)
source("src.R")
path <- "chat/messages.html"
df_messages <- fn_scraper_telegram(path)
|
if (!require("TFX")) {
install.packages("TFX")
library(TFX)
}
getwd()
setwd("./data/")
data = data.frame(QueryTrueFX())
for(i in c(1:((60*24)*30))) {
Sys.sleep(60)
data = rbind(data, data.frame(QueryTrueFX()))
data = subset(data, data$Symbol == "EUR/USD")
write.csv(data, 'data.csv')
}
| /tfx.query.r | permissive | MindMimicLabs/data-forex | R | false | false | 306 | r | if (!require("TFX")) {
install.packages("TFX")
library(TFX)
}
getwd()
setwd("./data/")
data = data.frame(QueryTrueFX())
for(i in c(1:((60*24)*30))) {
Sys.sleep(60)
data = rbind(data, data.frame(QueryTrueFX()))
data = subset(data, data$Symbol == "EUR/USD")
write.csv(data, 'data.csv')
}
|
cutoff = function(x, L){
return(cor(x[1:(length(x) - L)], x[(L+1):length(x)]))
} | /Batch Par. Codes/cutoff.R | no_license | aj-williamz/Atlas-Database-Project | R | false | false | 84 | r | cutoff = function(x, L){
return(cor(x[1:(length(x) - L)], x[(L+1):length(x)]))
} |
suppressPackageStartupMessages(library(Hmisc))
suppressPackageStartupMessages(library(lattice))
suppressPackageStartupMessages(require(grid))
library(Hmisc)#,warn.conflicts = FALSE,verbose = FALSE, quietly=TRUE)
library(lattice)
calculateIC <- function(metrics) {
ic <- numeric()
if (length(metrics) < 10) {
m <- mean(metrics)
#s <- sd(metrics)
s <- ifelse( length(metrics)==1, 0.00001, sd(metrics) )
ic[1] <- m
ic[2] <- m - s
ic[3] <- m + s
} else {
t <- t.test(metrics)
ic[1] <- t$estimate[[1]]
ic[2] <- t$conf.int[1]
ic[3] <- t$conf.int[2]
}
ic
}
toShortInstanceName <- function(instance) {
splitName <- unlist(strsplit(instance, split=".", fixed=T))
type <- paste(splitName[3], splitName[4], sep=".");
}
toFamilyName <- function(instance) {
splitName <- unlist(strsplit(instance, split=".", fixed=T))
#type <- paste(splitName[3], splitName[4], sep=".");
splitName[3]
}
getSimpleName <- function(path) {
splitName <- unlist(strsplit(path, split="/", fixed=T))
splitName[length(splitName)]
}
myformat <- function(arg, ...){
format( arg, format="d",digits=4, nsmall=2, justify = "right", decimal.mark = ",", big.mark = ".")
}
myformat2 <- function(arg, ...){
valuef <- format( arg * 100, format="d",digits=2, nsmall=2, justify = "right", decimal.mark = ",", big.mark = ".")
#paste(valuef, "\\%")
}
myformat3 <- function(arg, ...){
valuef <- format( arg , format="d",digits=4, nsmall=6, justify = "right", decimal.mark = ",", big.mark = ".")
}
getDatasetsNames <- function(){
files <- data.frame(
nPeers = numeric(),
nMachines = numeric(),
upp = numeric(),
gname = character(),
cname = character(),
instance = character(),
type = character(),
rodada = numeric()
)
#pega os nomes de todos os arquivos com resultados da cloud e da grade
for (rodada in rodadas) {
for (instance in instances) {
for (nPeers in numOfPeers) {
for (nMachines in numOfMachinesByPeer) {
for (upp in usersPerPeer) {
gname <- setUpGName(nPeers, nMachines, upp, oursimDir, scheduler, rodada)
cname <- setUpCName(nPeers, nMachines, upp, spotsimDir, scheduler, rodada, instance, spotLimit, groupedbypeer)
if ( file.exists(gname) && file.exists(cname) ) {
splitName <- unlist(strsplit(instance, split=".", fixed=T))
type <- paste(splitName[3], splitName[4], sep=".");
files <- rbind(
files,
data.frame(
nPeers = nPeers,
nMachines = nMachines,
upp = upp,
gname = gname,
cname = cname,
instance = instance,
type = type,
rodada = rodada
)
)
}
}
}
}
}
}
files
}
getGridDatasetsNames <- function(){
files <- data.frame( nPeers = numeric(),
nMachines = numeric(),
rodada = numeric(),
name = character()
)
#pega os nomes de todos os arquivos com resultados da grade
for (rodada in rodadas) {
for (nPeers in numOfPeers) {
for (nMachines in numOfMachinesByPeer) {
for (upp in usersPerPeer) {
gname <- setUpGName(nPeers, nMachines, upp, oursimDirO, scheduler, rodada)
#print(paste(rodada,nPeers,nMachines,gname,class(files)))
if ( file.exists(gname) ) {
files <- try(
rbind( files,
data.frame(
nPeers = nPeers,
nMachines = nMachines,
upp = upp,
rodada = rodada,
name = gname
)
)
)
if (class(files) == "try-error") {
print(paste("try-error",rodada,nPeers,nMachines,gname))
#exit
}
} else {
#print(paste(rodada,nPeers,nMachines,gname))
}
}
}
}
}
files
}
getCloudDatasetsNames <- function(nMachines){
files <- data.frame(
nPeers = numeric(),
instance = character(),
type = character(),
rodada = numeric(),
name = character()
)
#pega os nomes de todos os arquivos com resultados da cloud
for (rodada in rodadas) {
for (instance in instances) {
for (nPeers in numOfPeers) {
for (upp in usersPerPeer) {
if ( file.exists(cname) ) {
#nMachines <- 50
cname <- setUpCName(nPeers, nMachines, upp, spotsimDirO, scheduler, rodada, instance,spotLimit,groupedbypeer)
splitName <- unlist(strsplit(instance, upp, split=".", fixed=T))
type <- paste(splitName[3], splitName[4], sep=".");
files <- rbind( files,
data.frame(
nPeers = nPeers,
upp = upp,
instance = instance,
type = type,
rodada = rodada,
name = cname
)
)
}
}
}
}
}
files
}
saveDataset <- function( ds, outputFile ){
write.table(
format( ds, format="d", digits=22),
outputFile,
row.names=F,
quote=F
)
}
| /r_scripts/utils.R | no_license | edigley/communal-cloud | R | false | false | 5,176 | r | suppressPackageStartupMessages(library(Hmisc))
suppressPackageStartupMessages(library(lattice))
suppressPackageStartupMessages(require(grid))
library(Hmisc)#,warn.conflicts = FALSE,verbose = FALSE, quietly=TRUE)
library(lattice)
calculateIC <- function(metrics) {
ic <- numeric()
if (length(metrics) < 10) {
m <- mean(metrics)
#s <- sd(metrics)
s <- ifelse( length(metrics)==1, 0.00001, sd(metrics) )
ic[1] <- m
ic[2] <- m - s
ic[3] <- m + s
} else {
t <- t.test(metrics)
ic[1] <- t$estimate[[1]]
ic[2] <- t$conf.int[1]
ic[3] <- t$conf.int[2]
}
ic
}
toShortInstanceName <- function(instance) {
splitName <- unlist(strsplit(instance, split=".", fixed=T))
type <- paste(splitName[3], splitName[4], sep=".");
}
toFamilyName <- function(instance) {
splitName <- unlist(strsplit(instance, split=".", fixed=T))
#type <- paste(splitName[3], splitName[4], sep=".");
splitName[3]
}
getSimpleName <- function(path) {
splitName <- unlist(strsplit(path, split="/", fixed=T))
splitName[length(splitName)]
}
myformat <- function(arg, ...){
format( arg, format="d",digits=4, nsmall=2, justify = "right", decimal.mark = ",", big.mark = ".")
}
myformat2 <- function(arg, ...){
valuef <- format( arg * 100, format="d",digits=2, nsmall=2, justify = "right", decimal.mark = ",", big.mark = ".")
#paste(valuef, "\\%")
}
myformat3 <- function(arg, ...){
valuef <- format( arg , format="d",digits=4, nsmall=6, justify = "right", decimal.mark = ",", big.mark = ".")
}
getDatasetsNames <- function(){
files <- data.frame(
nPeers = numeric(),
nMachines = numeric(),
upp = numeric(),
gname = character(),
cname = character(),
instance = character(),
type = character(),
rodada = numeric()
)
#pega os nomes de todos os arquivos com resultados da cloud e da grade
for (rodada in rodadas) {
for (instance in instances) {
for (nPeers in numOfPeers) {
for (nMachines in numOfMachinesByPeer) {
for (upp in usersPerPeer) {
gname <- setUpGName(nPeers, nMachines, upp, oursimDir, scheduler, rodada)
cname <- setUpCName(nPeers, nMachines, upp, spotsimDir, scheduler, rodada, instance, spotLimit, groupedbypeer)
if ( file.exists(gname) && file.exists(cname) ) {
splitName <- unlist(strsplit(instance, split=".", fixed=T))
type <- paste(splitName[3], splitName[4], sep=".");
files <- rbind(
files,
data.frame(
nPeers = nPeers,
nMachines = nMachines,
upp = upp,
gname = gname,
cname = cname,
instance = instance,
type = type,
rodada = rodada
)
)
}
}
}
}
}
}
files
}
getGridDatasetsNames <- function(){
files <- data.frame( nPeers = numeric(),
nMachines = numeric(),
rodada = numeric(),
name = character()
)
#pega os nomes de todos os arquivos com resultados da grade
for (rodada in rodadas) {
for (nPeers in numOfPeers) {
for (nMachines in numOfMachinesByPeer) {
for (upp in usersPerPeer) {
gname <- setUpGName(nPeers, nMachines, upp, oursimDirO, scheduler, rodada)
#print(paste(rodada,nPeers,nMachines,gname,class(files)))
if ( file.exists(gname) ) {
files <- try(
rbind( files,
data.frame(
nPeers = nPeers,
nMachines = nMachines,
upp = upp,
rodada = rodada,
name = gname
)
)
)
if (class(files) == "try-error") {
print(paste("try-error",rodada,nPeers,nMachines,gname))
#exit
}
} else {
#print(paste(rodada,nPeers,nMachines,gname))
}
}
}
}
}
files
}
getCloudDatasetsNames <- function(nMachines){
files <- data.frame(
nPeers = numeric(),
instance = character(),
type = character(),
rodada = numeric(),
name = character()
)
#pega os nomes de todos os arquivos com resultados da cloud
for (rodada in rodadas) {
for (instance in instances) {
for (nPeers in numOfPeers) {
for (upp in usersPerPeer) {
if ( file.exists(cname) ) {
#nMachines <- 50
cname <- setUpCName(nPeers, nMachines, upp, spotsimDirO, scheduler, rodada, instance,spotLimit,groupedbypeer)
splitName <- unlist(strsplit(instance, upp, split=".", fixed=T))
type <- paste(splitName[3], splitName[4], sep=".");
files <- rbind( files,
data.frame(
nPeers = nPeers,
upp = upp,
instance = instance,
type = type,
rodada = rodada,
name = cname
)
)
}
}
}
}
}
files
}
saveDataset <- function( ds, outputFile ){
write.table(
format( ds, format="d", digits=22),
outputFile,
row.names=F,
quote=F
)
}
|
## this function fills holes (0s) less than 7-hours within each storms The data provided is the
# binary rain occurance data. The resulting data from this function represent the storm occurance
# data; this includes occurences of rain-dry-rain. The temporal length of the break (dry period),
# is defined within this function as 7
StormChunk_7hr <- function(PPT) {
rain_sum1 <-
vector(mode = "numeric", length = length(PPT))
rain_sum2 <-
vector(mode = "numeric", length = length(PPT))
rain_sum3 <-
vector(mode = "numeric", length = length(PPT))
rain_sum4 <-
vector(mode = "numeric", length = length(PPT))
rain_sum5 <-
vector(mode = "numeric", length = length(PPT))
rain_sum6 <-
vector(mode = "numeric", length = length(PPT))
rain_sum7 <-
vector(mode = "numeric", length = length(PPT))
rain_sum8 <-
vector(mode = "numeric", length = length(PPT))
storm_occurrence <-
vector(mode = "numeric", length = length(PPT))
PPT0 <- as.numeric(as.character(PPT))
PPT1 <- as.numeric(as.character(lead(PPT, 1)))
PPT2 <- as.numeric(as.character(lead(PPT1, 1)))
PPT3 <- as.numeric(as.character(lead(PPT2, 1)))
PPT4 <- as.numeric(as.character(lead(PPT3, 1)))
PPT5 <- as.numeric(as.character(lead(PPT4, 1)))
PPT6 <- as.numeric(as.character(lead(PPT5, 1)))
PPT7 <- as.numeric(as.character(lead(PPT6, 1)))
rainhour1 <- PPT0 + PPT1 + PPT2 + PPT3 + PPT4 + PPT5 + PPT6 + PPT7
rainhour2 <- lag(rainhour1, 1)
rainhour3 <- lag(rainhour2, 1)
rainhour4 <- lag(rainhour3, 1)
rainhour5 <- lag(rainhour4, 1)
rainhour6 <- lag(rainhour5, 1)
rainhour7 <- lag(rainhour6, 1)
rainhour8 <- lag(rainhour7, 1)
#return(rainhour1)
for(i in 1:nrow(Met_data_raw2)) {
rs1 <- as.numeric(rainhour1[i] > 0)
rs2 <- as.numeric(rainhour2[i] > 0)
rs3 <- as.numeric(rainhour3[i] > 0)
rs4 <- as.numeric(rainhour4[i] > 0)
rs5 <- as.numeric(rainhour5[i] > 0)
rs6 <- as.numeric(rainhour6[i] > 0)
rs7 <- as.numeric(rainhour7[i] > 0)
rs8 <- as.numeric(rainhour8[i] > 0)
RS <- rs1 + rs2 + rs3 + rs4 + rs5 + rs6 + rs7 + rs8
#print(RS)
RS_1 <- as.numeric(RS == 8)
#print(RS_1)
rain_sum1[i] <- rs1
rain_sum2[i] <- rs2
rain_sum3[i] <- rs3
rain_sum4[i] <- rs4
rain_sum5[i] <- rs5
rain_sum6[i] <- rs6
rain_sum7[i] <- rs7
rain_sum8[i] <- rs8
storm_occurrence[i] <- RS_1
R_Sum <- data.frame(Met_data_raw2, rain_sum1, rain_sum2, rain_sum3, rain_sum4, rain_sum5, rain_sum6, rain_sum7, rain_sum8, storm_occurrence)
assign("RainHours", R_Sum, envir = .GlobalEnv)
}
}
| /functions/StormChunk_7hr.R | no_license | tuleypa/Storm-Identification | R | false | false | 2,591 | r | ## this function fills holes (0s) less than 7-hours within each storms The data provided is the
# binary rain occurance data. The resulting data from this function represent the storm occurance
# data; this includes occurences of rain-dry-rain. The temporal length of the break (dry period),
# is defined within this function as 7
StormChunk_7hr <- function(PPT) {
rain_sum1 <-
vector(mode = "numeric", length = length(PPT))
rain_sum2 <-
vector(mode = "numeric", length = length(PPT))
rain_sum3 <-
vector(mode = "numeric", length = length(PPT))
rain_sum4 <-
vector(mode = "numeric", length = length(PPT))
rain_sum5 <-
vector(mode = "numeric", length = length(PPT))
rain_sum6 <-
vector(mode = "numeric", length = length(PPT))
rain_sum7 <-
vector(mode = "numeric", length = length(PPT))
rain_sum8 <-
vector(mode = "numeric", length = length(PPT))
storm_occurrence <-
vector(mode = "numeric", length = length(PPT))
PPT0 <- as.numeric(as.character(PPT))
PPT1 <- as.numeric(as.character(lead(PPT, 1)))
PPT2 <- as.numeric(as.character(lead(PPT1, 1)))
PPT3 <- as.numeric(as.character(lead(PPT2, 1)))
PPT4 <- as.numeric(as.character(lead(PPT3, 1)))
PPT5 <- as.numeric(as.character(lead(PPT4, 1)))
PPT6 <- as.numeric(as.character(lead(PPT5, 1)))
PPT7 <- as.numeric(as.character(lead(PPT6, 1)))
rainhour1 <- PPT0 + PPT1 + PPT2 + PPT3 + PPT4 + PPT5 + PPT6 + PPT7
rainhour2 <- lag(rainhour1, 1)
rainhour3 <- lag(rainhour2, 1)
rainhour4 <- lag(rainhour3, 1)
rainhour5 <- lag(rainhour4, 1)
rainhour6 <- lag(rainhour5, 1)
rainhour7 <- lag(rainhour6, 1)
rainhour8 <- lag(rainhour7, 1)
#return(rainhour1)
for(i in 1:nrow(Met_data_raw2)) {
rs1 <- as.numeric(rainhour1[i] > 0)
rs2 <- as.numeric(rainhour2[i] > 0)
rs3 <- as.numeric(rainhour3[i] > 0)
rs4 <- as.numeric(rainhour4[i] > 0)
rs5 <- as.numeric(rainhour5[i] > 0)
rs6 <- as.numeric(rainhour6[i] > 0)
rs7 <- as.numeric(rainhour7[i] > 0)
rs8 <- as.numeric(rainhour8[i] > 0)
RS <- rs1 + rs2 + rs3 + rs4 + rs5 + rs6 + rs7 + rs8
#print(RS)
RS_1 <- as.numeric(RS == 8)
#print(RS_1)
rain_sum1[i] <- rs1
rain_sum2[i] <- rs2
rain_sum3[i] <- rs3
rain_sum4[i] <- rs4
rain_sum5[i] <- rs5
rain_sum6[i] <- rs6
rain_sum7[i] <- rs7
rain_sum8[i] <- rs8
storm_occurrence[i] <- RS_1
R_Sum <- data.frame(Met_data_raw2, rain_sum1, rain_sum2, rain_sum3, rain_sum4, rain_sum5, rain_sum6, rain_sum7, rain_sum8, storm_occurrence)
assign("RainHours", R_Sum, envir = .GlobalEnv)
}
}
|
# convert PDFs to editable data using ABBY
require(abbyyR)
setapp(c("Tunisia Firms", "m+w3xiZFDw0D/4BMQSORs0E6"))
getAppInfo()
processImage(file_path = 'C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\Classement 2008.pdf',
imageSource = 'scanner',description = 'Classement2008')
# all 2010 individuals
all_2010s <- list.files(path = 'C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\',pattern='[0-9]+-[0-9].pdf')
lapply(all_2010s, function(x)
processImage(file_path = paste0('C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\',x),
imageSource = 'scanner',description = x)
)
results <- getResults(output='C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\')
# Load text files and process them with regexp
class2014 <- readLines('data/classement2014.txt',encoding='UTF-8')
class2014 <- class2014[grepl('\\h?[0-9]+',class2014,perl=TRUE)]
class2014 <- trimws(class2014)
writeLines(class2014,'data/class2014_firststep.txt')
#class2014 <- class2014[grepl('[0-9]+\\h+[0-9]+',class2014,perl=TRUE)]
| /pdf_convert.R | no_license | saudiwin/tunisia_firms | R | false | false | 1,148 | r | # convert PDFs to editable data using ABBY
require(abbyyR)
setapp(c("Tunisia Firms", "m+w3xiZFDw0D/4BMQSORs0E6"))
getAppInfo()
processImage(file_path = 'C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\Classement 2008.pdf',
imageSource = 'scanner',description = 'Classement2008')
# all 2010 individuals
all_2010s <- list.files(path = 'C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\',pattern='[0-9]+-[0-9].pdf')
lapply(all_2010s, function(x)
processImage(file_path = paste0('C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\',x),
imageSource = 'scanner',description = x)
)
results <- getResults(output='C:\\Users\\bobku\\Box Sync\\Dissertation\\Quantitative\\archieve classement\\')
# Load text files and process them with regexp
class2014 <- readLines('data/classement2014.txt',encoding='UTF-8')
class2014 <- class2014[grepl('\\h?[0-9]+',class2014,perl=TRUE)]
class2014 <- trimws(class2014)
writeLines(class2014,'data/class2014_firststep.txt')
#class2014 <- class2014[grepl('[0-9]+\\h+[0-9]+',class2014,perl=TRUE)]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02_hydroMet_compact.R
\docType{class}
\name{hydroMet_compact-class}
\alias{hydroMet_compact-class}
\alias{hydroMet_compact}
\title{\code{hydroMet} subclass for compact data}
\value{
A hydroMet_compact class object.
}
\description{
This subclass is useful for storing in a single data frame ready to use hydro-meteorological series or many variables of the same kind (e.g. lets say precipitacion series).
}
\section{Slots}{
\describe{
\item{\code{compact}}{data.frame with Date as first column (class 'Date' or 'POSIXct'). All other columns are the numeric hydro-meteorological variables (double). This subclass was though to join in a single table ready to use data (e.g. in modelling). You can also use it to put together variables of the same kind (e.g. precipitation records) to make some regional analysis.}
}}
| /man/hydroMet_compact-class.Rd | no_license | cran/hydroToolkit | R | false | true | 895 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02_hydroMet_compact.R
\docType{class}
\name{hydroMet_compact-class}
\alias{hydroMet_compact-class}
\alias{hydroMet_compact}
\title{\code{hydroMet} subclass for compact data}
\value{
A hydroMet_compact class object.
}
\description{
This subclass is useful for storing in a single data frame ready to use hydro-meteorological series or many variables of the same kind (e.g. lets say precipitacion series).
}
\section{Slots}{
\describe{
\item{\code{compact}}{data.frame with Date as first column (class 'Date' or 'POSIXct'). All other columns are the numeric hydro-meteorological variables (double). This subclass was though to join in a single table ready to use data (e.g. in modelling). You can also use it to put together variables of the same kind (e.g. precipitation records) to make some regional analysis.}
}}
|
/Ejemplo01.R | no_license | azchr4ds/Clase4R4DS | R | false | false | 5,245 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_indicators.R
\name{get_informal}
\alias{get_informal}
\title{Calcula el número de trabajadores informaels}
\usage{
get_informal(data)
}
\arguments{
\item{data}{\code{dataframe} de la ENE}
}
\value{
número de trabajadores informales
}
\description{
Calcula el número de trabajadores informaels
}
| /man/get_informal.Rd | no_license | Klauslehmann/ene | R | false | true | 381 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_indicators.R
\name{get_informal}
\alias{get_informal}
\title{Calcula el número de trabajadores informaels}
\usage{
get_informal(data)
}
\arguments{
\item{data}{\code{dataframe} de la ENE}
}
\value{
número de trabajadores informales
}
\description{
Calcula el número de trabajadores informaels
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_ma_df.R
\name{make_ma_df}
\alias{make_ma_df}
\title{Preprocessing for meta-analysis}
\usage{
make_ma_df(outcomes_df = outcomes_df, rob = rob, study_df = study_df, ...)
}
\arguments{
\item{outcomes_df}{Filtered outcome_df with selection of studies}
\item{rob}{Data frame of risk of bias assessments}
\item{study_df}{Data frame of study characteristics}
}
\description{
Make a data frame for analyses
}
| /man/make_ma_df.Rd | permissive | nkamboj06/cnap-review | R | false | true | 486 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_ma_df.R
\name{make_ma_df}
\alias{make_ma_df}
\title{Preprocessing for meta-analysis}
\usage{
make_ma_df(outcomes_df = outcomes_df, rob = rob, study_df = study_df, ...)
}
\arguments{
\item{outcomes_df}{Filtered outcome_df with selection of studies}
\item{rob}{Data frame of risk of bias assessments}
\item{study_df}{Data frame of study characteristics}
}
\description{
Make a data frame for analyses
}
|
/군집화, 회귀분석 코드/군집화와 분류 코드/12-4.R | no_license | frics/R_project | R | false | false | 927 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{palettes}
\alias{palettes}
\title{Palette repair data}
\format{A data frame with 20 observations on the following 3 variables.
\itemize{ \item{palettes}{ number of palettes repaired}
\item{employee}{ a factor with levels \code{A} \code{B} \code{C}
\code{D}} \item{day}{ a factor with levels \code{day1} \code{day2}
\code{day3} \code{day4} \code{day5}} }}
\source{
Michael Stob, Calvin College
}
\description{
The palettes data set contains data from a firm that recycles palettes.
Palettes from warehouses are bought, repaired, and resold. (Repairing a
palette typically involves replacing one or two boards.) The company has
four employees who do the repairs. The employer sampled five days for each
employee and recorded the number of palettes repaired.
}
\examples{
data(palettes)
# Do the employees differ in the rate at which they repair palettes?
pal.lm1 <- lm(palettes~employee,palettes)
anova(pal.lm1)
# Now using day as a blocking variable
pal.lm2 <- lm(palettes~employee+day,palettes)
anova(pal.lm2)
xyplot(palettes~day, data=palettes,
groups=employee,
main="Productivity by day and employee",
type='b',auto.key=list(columns=4,points=FALSE,lines=TRUE))
}
\keyword{datasets}
| /man/palettes.Rd | no_license | cran/fastR | R | false | true | 1,304 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{palettes}
\alias{palettes}
\title{Palette repair data}
\format{A data frame with 20 observations on the following 3 variables.
\itemize{ \item{palettes}{ number of palettes repaired}
\item{employee}{ a factor with levels \code{A} \code{B} \code{C}
\code{D}} \item{day}{ a factor with levels \code{day1} \code{day2}
\code{day3} \code{day4} \code{day5}} }}
\source{
Michael Stob, Calvin College
}
\description{
The palettes data set contains data from a firm that recycles palettes.
Palettes from warehouses are bought, repaired, and resold. (Repairing a
palette typically involves replacing one or two boards.) The company has
four employees who do the repairs. The employer sampled five days for each
employee and recorded the number of palettes repaired.
}
\examples{
data(palettes)
# Do the employees differ in the rate at which they repair palettes?
pal.lm1 <- lm(palettes~employee,palettes)
anova(pal.lm1)
# Now using day as a blocking variable
pal.lm2 <- lm(palettes~employee+day,palettes)
anova(pal.lm2)
xyplot(palettes~day, data=palettes,
groups=employee,
main="Productivity by day and employee",
type='b',auto.key=list(columns=4,points=FALSE,lines=TRUE))
}
\keyword{datasets}
|
source("/data2/3to5/I35/scripts/analysisfunctions.R")
library(ncdf4)
library(maps)
library(mapdata)
library(maptools)
library(fields)
library(sp)
library(raster)
library(rasterVis)
library(ggplot2)
library(modi)
#weighted.var <- function(x, w, na.rm = FALSE) {
# if (na.rm) {
# w <- w[i <- !is.na(x)]
# x <- x[i]
# }
# sum.w <- sum(w)
# sum.w2 <- sum(w^2)
# mean.w <- sum(x * w) / sum(w)
# (sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2, na.rm =
# na.rm)
#}
setwd("/home/woot0002/DS_ind/")
var = varin = "tmax"
type="ann"
weightingused = "full"
stateapplied = "full"
if(weightingused=="full"){
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,".Rdata",sep=""))
BMAweightsGCM = read.table("posterior_BMA_combo.txt")
BMAweightsLOCA = read.table("posterior_BMA_combo_LOCA.txt")
} else {
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,"_",weightingused,".Rdata",sep=""))
BMAweightsGCM = read.table(paste("posterior_BMA_combo_",var,"_",weightingused,".txt",sep=""))
BMAweightsLOCA = read.table(paste("posterior_BMA_combo_LOCA_",var,"_",weightingused,".txt",sep=""))
}
BMAweightst = t(BMAweightsGCM)
GCMhdat = cbind(GCMhdat,BMAweightst)
BMAweightst = t(BMAweightsLOCA)
LOCAhdat = cbind(LOCAhdat,BMAweightst)
GCMweights= GCMhdat
LOCAweights = LOCAhdat
# precip files
GCMfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*histclimo*.nc",intern=TRUE)
LOCAfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*histclimo*.nc",intern=TRUE)
GCMprojfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*projclimo*.nc",intern=TRUE)
LIVNEHfile_pr = system("ls /home/woot0002/monthlyclimo/pr_day*livneh*.nc",intern=TRUE)
# tasmax files
GCMfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*histclimo*.nc",intern=TRUE)
LOCAfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*histclimo*.nc",intern=TRUE)
GCMprojfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LIVNEHfile_tmax = system("ls /home/woot0002/monthlyclimo/tasmax_day*livneh*.nc",intern=TRUE)
# subset files down
load("/home/woot0002/DS_ind/manuscript1/GCMlist.Rdata")
GCM_hfiles_pr = GCM_pfiles_pr = LOCA_hfiles_pr = LOCA_pfiles_pr = c()
GCM_hfiles_tmax = GCM_pfiles_tmax = LOCA_hfiles_tmax = LOCA_pfiles_tmax = c()
for(i in 1:length(GCMlist)){
#pr
GCM_hfiles_pr[i] = GCMfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_pr)]
GCM_pfiles_pr[i] = GCMprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_pr)]
LOCA_hfiles_pr[i] = LOCAfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_pr)]
LOCA_pfiles_pr[i] = LOCAprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_pr)]
#tmax
GCM_hfiles_tmax[i] = GCMfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_tmax)]
GCM_pfiles_tmax[i] = GCMprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_tmax)]
LOCA_hfiles_tmax[i] = LOCAfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_tmax)]
LOCA_pfiles_tmax[i] = LOCAprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_tmax)]
}
###
# create full filelist + metadata table - historical
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMhdat = filelist2[,c(2,3,4,6)]
names(GCMhdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCAhdat = filelist2[,c(2,3,4,6)]
names(LOCAhdat) = names(GCMhdat)
#All metadata
GCM = rep(NA,1)
exp = rep(NA,1)
DS = rep(NA,1)
training = "LIVNEH"
obsdat = data.frame(GCM,exp,DS,training)
GCMhdat = rbind(GCMhdat,obsdat)
LOCAhdat= rbind(LOCAhdat,obsdat)
# all files
GCMgroup_pr = c(GCM_hfiles_pr,LIVNEHfile_pr)
LOCAgroup_pr = c(LOCA_hfiles_pr,LIVNEHfile_pr)
GCMgroup_tmax = c(GCM_hfiles_tmax,LIVNEHfile_tmax)
LOCAgroup_tmax = c(LOCA_hfiles_tmax,LIVNEHfile_tmax)
###
# create full filelist + metadata table - projected
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMpdat = filelist2[,c(2,3,4,6)]
names(GCMpdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCApdat = filelist2[,c(2,3,4,6)]
names(LOCApdat) = names(GCMpdat)
# all files
GCMpgroup_pr = GCM_pfiles_pr
LOCApgroup_pr = LOCA_pfiles_pr
GCMpgroup_tmax = GCM_pfiles_tmax
LOCApgroup_tmax = LOCA_pfiles_tmax
######
# Gather data
ncvarname = "prclimo"
### GCM hist + Livneh - pr
GCMhvardatalist_pr = list()
for(i in 1:length(GCMgroup_pr)){
nctest = nc_open(GCMgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_pr[[i]],NA)
} else {
GCMhvardatalist_pr[[i]] = ifelse(regionmask==1,GCMhvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_pr,mean,na.rm=TRUE)
### GCM projected change - pr
GCMpvardatalist_pr = list()
for(i in 1:length(GCMpgroup_pr)){
nctest = nc_open(GCMpgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_pr[[i]],NA)
} else {
GCMpvardatalist_pr[[i]] = ifelse(regionmask==1,GCMpvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_pr,mean,na.rm=TRUE)
### LOCA historical + Livneh - pr
LOCAhvardatalist_pr = list()
for(i in 1:length(LOCAgroup_pr)){
nctest = nc_open(LOCAgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_pr[[i]],NA)
} else{
LOCAhvardatalist_pr[[i]] = ifelse(regionmask==1,LOCAhvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_pr,mean,na.rm=TRUE)
### LOCA projected change - pr
LOCApvardatalist_pr = list()
for(i in 1:length(LOCApgroup_pr)){
nctest = nc_open(LOCApgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_pr[[i]],NA)
} else{
LOCApvardatalist_pr[[i]] = ifelse(regionmask==1,LOCApvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_pr,mean,na.rm=TRUE)
######
# Gather Data 2
ncvarname = "tmaxclimo"
### GCM hist + Livneh - tmax
GCMhvardatalist_tmax = list()
for(i in 1:length(GCMgroup_tmax)){
nctest = nc_open(GCMgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_tmax[[i]],NA)
} else{
GCMhvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMhvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_tmax,mean,na.rm=TRUE)
### GCM projected change - tmax
GCMpvardatalist_tmax = list()
for(i in 1:length(GCMpgroup_tmax)){
nctest = nc_open(GCMpgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_tmax[[i]],NA)
} else{
GCMpvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMpvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_tmax,mean,na.rm=TRUE)
### LOCA historical + Livneh - tmax
LOCAhvardatalist_tmax = list()
for(i in 1:length(LOCAgroup_tmax)){
nctest = nc_open(LOCAgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_tmax[[i]],NA)
} else{
LOCAhvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCAhvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_tmax,mean,na.rm=TRUE)
### LOCA projected change - tmax
LOCApvardatalist_tmax = list()
for(i in 1:length(LOCApgroup_tmax)){
nctest = nc_open(LOCApgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_tmax[[i]],NA)
} else{
LOCApvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCApvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_tmax,mean,na.rm=TRUE)
#######
# projected changes - _pr
GCMchange_pr = LOCAchange_pr = GCMproj_pr = LOCAproj_pr = GCMhist_pr = LOCAhist_pr = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_pr = LOCAhvardatalist_pr[[27]]
for(i in 1:26){
GCMchange_pr[,,i] = GCMpvardatalist_pr[[i]]-GCMhvardatalist_pr[[i]]
LOCAchange_pr[,,i] = LOCApvardatalist_pr[[i]]-LOCAhvardatalist_pr[[i]]
GCMproj_pr[,,i] = GCMpvardatalist_pr[[i]]
LOCAproj_pr[,,i] = LOCApvardatalist_pr[[i]]
GCMhist_pr[,,i] = GCMhvardatalist_pr[[i]]
LOCAhist_pr[,,i] = LOCAhvardatalist_pr[[i]]
}
#######
# projected changes - _tmax
GCMchange_tmax = LOCAchange_tmax = GCMproj_tmax = LOCAproj_tmax = GCMhist_tmax = LOCAhist_tmax = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_tmax = LOCAhvardatalist_tmax[[27]]
for(i in 1:26){
GCMchange_tmax[,,i] = GCMpvardatalist_tmax[[i]]-GCMhvardatalist_tmax[[i]]
LOCAchange_tmax[,,i] = LOCApvardatalist_tmax[[i]]-LOCAhvardatalist_tmax[[i]]
GCMproj_tmax[,,i] = GCMpvardatalist_tmax[[i]]
LOCAproj_tmax[,,i] = LOCApvardatalist_tmax[[i]]
GCMhist_tmax[,,i] = GCMhvardatalist_tmax[[i]]
LOCAhist_tmax[,,i] = LOCAhvardatalist_tmax[[i]]
}
######
# Calculate historical means (weighted and unweighted)
GCMBMAhistmean_pr = LOCABMAhistmean_pr = array(NA,dim=c(length(lon),length(lat),100))
GCMBMAhistmean_tmax = LOCABMAhistmean_tmax = array(NA,dim=c(length(lon),length(lat),100))
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMhist_pr[,,i]*GCMweights[i,b]
tmpL = LOCAhist_pr[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_hist = tmpG
LOCABMAmean_hist = tmpL
} else {
GCMBMAmean_hist = GCMBMAmean_hist+tmpG
LOCABMAmean_hist = LOCABMAmean_hist+tmpL
}
}
GCMBMAhistmean_pr[,,(b-9)]=GCMBMAmean_hist
LOCABMAhistmean_pr[,,(b-9)]=LOCABMAmean_hist
}
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMhist_tmax[,,i]*GCMweights[i,b]
tmpL = LOCAhist_tmax[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_hist = tmpG
LOCABMAmean_hist = tmpL
} else {
GCMBMAmean_hist = GCMBMAmean_hist+tmpG
LOCABMAmean_hist = LOCABMAmean_hist+tmpL
}
}
GCMBMAhistmean_tmax[,,(b-9)]=GCMBMAmean_hist
LOCABMAhistmean_tmax[,,(b-9)]=LOCABMAmean_hist
}
######
# Calculate change means (weighted and unweighted)
GCMBMAchangemean_pr = LOCABMAchangemean_pr = array(NA,dim=c(length(lon),length(lat),100))
GCMBMAchangemean_tmax = LOCABMAchangemean_tmax = array(NA,dim=c(length(lon),length(lat),100))
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMchange_pr[,,i]*GCMweights[i,b]
tmpL = LOCAchange_pr[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_change = tmpG
LOCABMAmean_change = tmpL
} else {
GCMBMAmean_change = GCMBMAmean_change+tmpG
LOCABMAmean_change = LOCABMAmean_change+tmpL
}
}
GCMBMAchangemean_pr[,,(b-9)]=GCMBMAmean_change
LOCABMAchangemean_pr[,,(b-9)]=LOCABMAmean_change
}
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMchange_tmax[,,i]*GCMweights[i,b]
tmpL = LOCAchange_tmax[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_change = tmpG
LOCABMAmean_change = tmpL
} else {
GCMBMAmean_change = GCMBMAmean_change+tmpG
LOCABMAmean_change = LOCABMAmean_change+tmpL
}
}
GCMBMAchangemean_tmax[,,(b-9)]=GCMBMAmean_change
LOCABMAchangemean_tmax[,,(b-9)]=LOCABMAmean_change
}
######
# ensemble variance
GCMBMAchangevar_pr = LOCABMAchangevar_pr = array(NA,dim=c(length(lon),length(lat),100))
GCMBMAchangevar_tmax = LOCABMAchangevar_tmax = array(NA,dim=c(length(lon),length(lat),100))
for(b in 10:109){
GCMBMAvar_change = LOCABMAvar_change = matrix(NA,nrow=length(lon),ncol=length(lat))
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_pr[R,C,])==TRUE)==FALSE){
GCMBMAvar_change[R,C] = weighted.var(GCMchange_pr[R,C,],w=GCMweights[,b],na.rm=TRUE)
LOCABMAvar_change[R,C] = weighted.var(LOCAchange_pr[R,C,],w=LOCAweights[,b],na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
GCMBMAchangevar_pr[,,(b-9)]=GCMBMAvar_change
LOCABMAchangevar_pr[,,(b-9)]=LOCABMAvar_change
message("Finished calcs for BMA member: ",b-9," / ",100)
}
for(b in 10:109){
GCMBMAvar_change = LOCABMAvar_change = matrix(NA,nrow=length(lon),ncol=length(lat))
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_tmax[R,C,])==TRUE)==FALSE){
GCMBMAvar_change[R,C] = weighted.var(GCMchange_tmax[R,C,],w=GCMweights[,b],na.rm=TRUE)
LOCABMAvar_change[R,C] = weighted.var(LOCAchange_tmax[R,C,],w=LOCAweights[,b],na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
GCMBMAchangevar_tmax[,,(b-9)]=GCMBMAvar_change
LOCABMAchangevar_tmax[,,(b-9)]=LOCABMAvar_change
message("Finished calcs for BMA member: ",b-9," / ",100)
}
save(list=c("GCMBMAhistmean_pr","GCMBMAchangemean_pr","GCMBMAchangevar_pr","LOCABMAhistmean_pr","LOCABMAchangemean_pr","LOCABMAchangevar_pr",
"GCMBMAhistmean_tmax","GCMBMAchangemean_tmax","GCMBMAchangevar_tmax","LOCABMAhistmean_tmax","LOCABMAchangemean_tmax","LOCABMAchangevar_tmax"),file=paste("/home/woot0002/DS_ind/BMAposterior_meansandvars_",var,"_WU",weightingused,".Rdata",sep=""))
| /Sanderson/Sanderson_BMA_posteriorcalcs_domainMV_tmaxfull.R | no_license | amwootte/analysisscripts | R | false | false | 15,736 | r | source("/data2/3to5/I35/scripts/analysisfunctions.R")
library(ncdf4)
library(maps)
library(mapdata)
library(maptools)
library(fields)
library(sp)
library(raster)
library(rasterVis)
library(ggplot2)
library(modi)
#weighted.var <- function(x, w, na.rm = FALSE) {
# if (na.rm) {
# w <- w[i <- !is.na(x)]
# x <- x[i]
# }
# sum.w <- sum(w)
# sum.w2 <- sum(w^2)
# mean.w <- sum(x * w) / sum(w)
# (sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2, na.rm =
# na.rm)
#}
setwd("/home/woot0002/DS_ind/")
var = varin = "tmax"
type="ann"
weightingused = "full"
stateapplied = "full"
if(weightingused=="full"){
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,".Rdata",sep=""))
BMAweightsGCM = read.table("posterior_BMA_combo.txt")
BMAweightsLOCA = read.table("posterior_BMA_combo_LOCA.txt")
} else {
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,"_",weightingused,".Rdata",sep=""))
BMAweightsGCM = read.table(paste("posterior_BMA_combo_",var,"_",weightingused,".txt",sep=""))
BMAweightsLOCA = read.table(paste("posterior_BMA_combo_LOCA_",var,"_",weightingused,".txt",sep=""))
}
BMAweightst = t(BMAweightsGCM)
GCMhdat = cbind(GCMhdat,BMAweightst)
BMAweightst = t(BMAweightsLOCA)
LOCAhdat = cbind(LOCAhdat,BMAweightst)
GCMweights= GCMhdat
LOCAweights = LOCAhdat
# precip files
GCMfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*histclimo*.nc",intern=TRUE)
LOCAfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*histclimo*.nc",intern=TRUE)
GCMprojfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*projclimo*.nc",intern=TRUE)
LIVNEHfile_pr = system("ls /home/woot0002/monthlyclimo/pr_day*livneh*.nc",intern=TRUE)
# tasmax files
GCMfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*histclimo*.nc",intern=TRUE)
LOCAfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*histclimo*.nc",intern=TRUE)
GCMprojfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LIVNEHfile_tmax = system("ls /home/woot0002/monthlyclimo/tasmax_day*livneh*.nc",intern=TRUE)
# subset files down
load("/home/woot0002/DS_ind/manuscript1/GCMlist.Rdata")
GCM_hfiles_pr = GCM_pfiles_pr = LOCA_hfiles_pr = LOCA_pfiles_pr = c()
GCM_hfiles_tmax = GCM_pfiles_tmax = LOCA_hfiles_tmax = LOCA_pfiles_tmax = c()
for(i in 1:length(GCMlist)){
#pr
GCM_hfiles_pr[i] = GCMfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_pr)]
GCM_pfiles_pr[i] = GCMprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_pr)]
LOCA_hfiles_pr[i] = LOCAfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_pr)]
LOCA_pfiles_pr[i] = LOCAprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_pr)]
#tmax
GCM_hfiles_tmax[i] = GCMfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_tmax)]
GCM_pfiles_tmax[i] = GCMprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_tmax)]
LOCA_hfiles_tmax[i] = LOCAfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_tmax)]
LOCA_pfiles_tmax[i] = LOCAprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_tmax)]
}
###
# create full filelist + metadata table - historical
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMhdat = filelist2[,c(2,3,4,6)]
names(GCMhdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCAhdat = filelist2[,c(2,3,4,6)]
names(LOCAhdat) = names(GCMhdat)
#All metadata
GCM = rep(NA,1)
exp = rep(NA,1)
DS = rep(NA,1)
training = "LIVNEH"
obsdat = data.frame(GCM,exp,DS,training)
GCMhdat = rbind(GCMhdat,obsdat)
LOCAhdat= rbind(LOCAhdat,obsdat)
# all files
GCMgroup_pr = c(GCM_hfiles_pr,LIVNEHfile_pr)
LOCAgroup_pr = c(LOCA_hfiles_pr,LIVNEHfile_pr)
GCMgroup_tmax = c(GCM_hfiles_tmax,LIVNEHfile_tmax)
LOCAgroup_tmax = c(LOCA_hfiles_tmax,LIVNEHfile_tmax)
###
# create full filelist + metadata table - projected
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMpdat = filelist2[,c(2,3,4,6)]
names(GCMpdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCApdat = filelist2[,c(2,3,4,6)]
names(LOCApdat) = names(GCMpdat)
# all files
GCMpgroup_pr = GCM_pfiles_pr
LOCApgroup_pr = LOCA_pfiles_pr
GCMpgroup_tmax = GCM_pfiles_tmax
LOCApgroup_tmax = LOCA_pfiles_tmax
######
# Gather data
ncvarname = "prclimo"
### GCM hist + Livneh - pr
GCMhvardatalist_pr = list()
for(i in 1:length(GCMgroup_pr)){
nctest = nc_open(GCMgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_pr[[i]],NA)
} else {
GCMhvardatalist_pr[[i]] = ifelse(regionmask==1,GCMhvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_pr,mean,na.rm=TRUE)
### GCM projected change - pr
GCMpvardatalist_pr = list()
for(i in 1:length(GCMpgroup_pr)){
nctest = nc_open(GCMpgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_pr[[i]],NA)
} else {
GCMpvardatalist_pr[[i]] = ifelse(regionmask==1,GCMpvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_pr,mean,na.rm=TRUE)
### LOCA historical + Livneh - pr
LOCAhvardatalist_pr = list()
for(i in 1:length(LOCAgroup_pr)){
nctest = nc_open(LOCAgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_pr[[i]],NA)
} else{
LOCAhvardatalist_pr[[i]] = ifelse(regionmask==1,LOCAhvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_pr,mean,na.rm=TRUE)
### LOCA projected change - pr
LOCApvardatalist_pr = list()
for(i in 1:length(LOCApgroup_pr)){
nctest = nc_open(LOCApgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_pr[[i]],NA)
} else{
LOCApvardatalist_pr[[i]] = ifelse(regionmask==1,LOCApvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_pr,mean,na.rm=TRUE)
######
# Gather Data 2
ncvarname = "tmaxclimo"
### GCM hist + Livneh - tmax
GCMhvardatalist_tmax = list()
for(i in 1:length(GCMgroup_tmax)){
nctest = nc_open(GCMgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_tmax[[i]],NA)
} else{
GCMhvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMhvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_tmax,mean,na.rm=TRUE)
### GCM projected change - tmax
GCMpvardatalist_tmax = list()
for(i in 1:length(GCMpgroup_tmax)){
nctest = nc_open(GCMpgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_tmax[[i]],NA)
} else{
GCMpvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMpvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_tmax,mean,na.rm=TRUE)
### LOCA historical + Livneh - tmax
LOCAhvardatalist_tmax = list()
for(i in 1:length(LOCAgroup_tmax)){
nctest = nc_open(LOCAgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_tmax[[i]],NA)
} else{
LOCAhvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCAhvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_tmax,mean,na.rm=TRUE)
### LOCA projected change - tmax
LOCApvardatalist_tmax = list()
for(i in 1:length(LOCApgroup_tmax)){
nctest = nc_open(LOCApgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_tmax[[i]],NA)
} else{
LOCApvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCApvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_tmax,mean,na.rm=TRUE)
#######
# projected changes - _pr
GCMchange_pr = LOCAchange_pr = GCMproj_pr = LOCAproj_pr = GCMhist_pr = LOCAhist_pr = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_pr = LOCAhvardatalist_pr[[27]]
for(i in 1:26){
GCMchange_pr[,,i] = GCMpvardatalist_pr[[i]]-GCMhvardatalist_pr[[i]]
LOCAchange_pr[,,i] = LOCApvardatalist_pr[[i]]-LOCAhvardatalist_pr[[i]]
GCMproj_pr[,,i] = GCMpvardatalist_pr[[i]]
LOCAproj_pr[,,i] = LOCApvardatalist_pr[[i]]
GCMhist_pr[,,i] = GCMhvardatalist_pr[[i]]
LOCAhist_pr[,,i] = LOCAhvardatalist_pr[[i]]
}
#######
# projected changes - _tmax
GCMchange_tmax = LOCAchange_tmax = GCMproj_tmax = LOCAproj_tmax = GCMhist_tmax = LOCAhist_tmax = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_tmax = LOCAhvardatalist_tmax[[27]]
for(i in 1:26){
GCMchange_tmax[,,i] = GCMpvardatalist_tmax[[i]]-GCMhvardatalist_tmax[[i]]
LOCAchange_tmax[,,i] = LOCApvardatalist_tmax[[i]]-LOCAhvardatalist_tmax[[i]]
GCMproj_tmax[,,i] = GCMpvardatalist_tmax[[i]]
LOCAproj_tmax[,,i] = LOCApvardatalist_tmax[[i]]
GCMhist_tmax[,,i] = GCMhvardatalist_tmax[[i]]
LOCAhist_tmax[,,i] = LOCAhvardatalist_tmax[[i]]
}
######
# Calculate historical means (weighted and unweighted)
GCMBMAhistmean_pr = LOCABMAhistmean_pr = array(NA,dim=c(length(lon),length(lat),100))
GCMBMAhistmean_tmax = LOCABMAhistmean_tmax = array(NA,dim=c(length(lon),length(lat),100))
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMhist_pr[,,i]*GCMweights[i,b]
tmpL = LOCAhist_pr[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_hist = tmpG
LOCABMAmean_hist = tmpL
} else {
GCMBMAmean_hist = GCMBMAmean_hist+tmpG
LOCABMAmean_hist = LOCABMAmean_hist+tmpL
}
}
GCMBMAhistmean_pr[,,(b-9)]=GCMBMAmean_hist
LOCABMAhistmean_pr[,,(b-9)]=LOCABMAmean_hist
}
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMhist_tmax[,,i]*GCMweights[i,b]
tmpL = LOCAhist_tmax[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_hist = tmpG
LOCABMAmean_hist = tmpL
} else {
GCMBMAmean_hist = GCMBMAmean_hist+tmpG
LOCABMAmean_hist = LOCABMAmean_hist+tmpL
}
}
GCMBMAhistmean_tmax[,,(b-9)]=GCMBMAmean_hist
LOCABMAhistmean_tmax[,,(b-9)]=LOCABMAmean_hist
}
######
# Calculate change means (weighted and unweighted)
GCMBMAchangemean_pr = LOCABMAchangemean_pr = array(NA,dim=c(length(lon),length(lat),100))
GCMBMAchangemean_tmax = LOCABMAchangemean_tmax = array(NA,dim=c(length(lon),length(lat),100))
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMchange_pr[,,i]*GCMweights[i,b]
tmpL = LOCAchange_pr[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_change = tmpG
LOCABMAmean_change = tmpL
} else {
GCMBMAmean_change = GCMBMAmean_change+tmpG
LOCABMAmean_change = LOCABMAmean_change+tmpL
}
}
GCMBMAchangemean_pr[,,(b-9)]=GCMBMAmean_change
LOCABMAchangemean_pr[,,(b-9)]=LOCABMAmean_change
}
for(b in 10:109){
for(i in 1:26){
## BMA
tmpG = GCMchange_tmax[,,i]*GCMweights[i,b]
tmpL = LOCAchange_tmax[,,i]*LOCAweights[i,b]
if(i==1){
GCMBMAmean_change = tmpG
LOCABMAmean_change = tmpL
} else {
GCMBMAmean_change = GCMBMAmean_change+tmpG
LOCABMAmean_change = LOCABMAmean_change+tmpL
}
}
GCMBMAchangemean_tmax[,,(b-9)]=GCMBMAmean_change
LOCABMAchangemean_tmax[,,(b-9)]=LOCABMAmean_change
}
######
# ensemble variance
GCMBMAchangevar_pr = LOCABMAchangevar_pr = array(NA,dim=c(length(lon),length(lat),100))
GCMBMAchangevar_tmax = LOCABMAchangevar_tmax = array(NA,dim=c(length(lon),length(lat),100))
for(b in 10:109){
GCMBMAvar_change = LOCABMAvar_change = matrix(NA,nrow=length(lon),ncol=length(lat))
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_pr[R,C,])==TRUE)==FALSE){
GCMBMAvar_change[R,C] = weighted.var(GCMchange_pr[R,C,],w=GCMweights[,b],na.rm=TRUE)
LOCABMAvar_change[R,C] = weighted.var(LOCAchange_pr[R,C,],w=LOCAweights[,b],na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
GCMBMAchangevar_pr[,,(b-9)]=GCMBMAvar_change
LOCABMAchangevar_pr[,,(b-9)]=LOCABMAvar_change
message("Finished calcs for BMA member: ",b-9," / ",100)
}
for(b in 10:109){
GCMBMAvar_change = LOCABMAvar_change = matrix(NA,nrow=length(lon),ncol=length(lat))
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_tmax[R,C,])==TRUE)==FALSE){
GCMBMAvar_change[R,C] = weighted.var(GCMchange_tmax[R,C,],w=GCMweights[,b],na.rm=TRUE)
LOCABMAvar_change[R,C] = weighted.var(LOCAchange_tmax[R,C,],w=LOCAweights[,b],na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
GCMBMAchangevar_tmax[,,(b-9)]=GCMBMAvar_change
LOCABMAchangevar_tmax[,,(b-9)]=LOCABMAvar_change
message("Finished calcs for BMA member: ",b-9," / ",100)
}
save(list=c("GCMBMAhistmean_pr","GCMBMAchangemean_pr","GCMBMAchangevar_pr","LOCABMAhistmean_pr","LOCABMAchangemean_pr","LOCABMAchangevar_pr",
"GCMBMAhistmean_tmax","GCMBMAchangemean_tmax","GCMBMAchangevar_tmax","LOCABMAhistmean_tmax","LOCABMAchangemean_tmax","LOCABMAchangevar_tmax"),file=paste("/home/woot0002/DS_ind/BMAposterior_meansandvars_",var,"_WU",weightingused,".Rdata",sep=""))
|
#' Calculates the repayment for a loan
#'
#' Based on period interest rate, number of periods, and loan amount, this function calculates
#' the repayment of the loan such that it would be paid off fully at the end of the loan.
#' This function is designed to be equivalent to the Excel function PMT.
#' It calculates based on a fixed interest rate, FV=0, and charging is
#' at the end of the period. Response is rounded to 2dp
#'
#' @param rate The nominal interest rate per period (should be positive)
#' @param nper Number of periods
#' @param pv Present value i.e. loan advance (should be positive)
#' @return pmt Instalment per period (should be negative)
#'
#' @keywords financial pv pmt
#' @seealso \code{\link{PV}} \code{\link{RATE}}
#' @family finance
#' @export
#'
#' @examples
#' PMT(0.1,12,3000) # =-440.29 taken from excel
#'
#' df<-data.frame(rate=c(.1,.2),nper=c(12,24),pv=c(3000,1000))
#' PMT(df$rate,df$nper,df$pv) # =-440.29,-202.55 taken from excel
PMT <- function(rate, nper, pv) {
stopifnot(rate > 0, rate < 1, nper >= 1, pv > 0)
return(round(-pv * rate/(1 - 1/(1 + rate)^nper), 2))
}
| /optiRum/R/PMT.R | no_license | ingted/R-Examples | R | false | false | 1,150 | r | #' Calculates the repayment for a loan
#'
#' Based on period interest rate, number of periods, and loan amount, this function calculates
#' the repayment of the loan such that it would be paid off fully at the end of the loan.
#' This function is designed to be equivalent to the Excel function PMT.
#' It calculates based on a fixed interest rate, FV=0, and charging is
#' at the end of the period. Response is rounded to 2dp
#'
#' @param rate The nominal interest rate per period (should be positive)
#' @param nper Number of periods
#' @param pv Present value i.e. loan advance (should be positive)
#' @return pmt Instalment per period (should be negative)
#'
#' @keywords financial pv pmt
#' @seealso \code{\link{PV}} \code{\link{RATE}}
#' @family finance
#' @export
#'
#' @examples
#' PMT(0.1,12,3000) # =-440.29 taken from excel
#'
#' df<-data.frame(rate=c(.1,.2),nper=c(12,24),pv=c(3000,1000))
#' PMT(df$rate,df$nper,df$pv) # =-440.29,-202.55 taken from excel
PMT <- function(rate, nper, pv) {
stopifnot(rate > 0, rate < 1, nper >= 1, pv > 0)
return(round(-pv * rate/(1 - 1/(1 + rate)^nper), 2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funs-ggplot.R
\name{ebl}
\alias{ebl}
\title{ebl}
\usage{
ebl()
}
\description{
See \code{ggplot2::\link[ggplot2:element_blank]{element_blank}} for details.
}
\keyword{internal}
| /man/ebl.Rd | permissive | mkearney/lop | R | false | true | 255 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funs-ggplot.R
\name{ebl}
\alias{ebl}
\title{ebl}
\usage{
ebl()
}
\description{
See \code{ggplot2::\link[ggplot2:element_blank]{element_blank}} for details.
}
\keyword{internal}
|
## taxonomic disparity for big tree
library(ape)
if(!exists('summary.by.elements')) {
source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/label.elements.R')
source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/summary.by.elements.R')
source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/tips.expected.R')
}
tr.big2.summary <-
summary.by.elements(drop.tip(tr.big2, grep('Quercus', tr.big2$tip.label, invert = T)),
delim= '[ _|]', fixed = F, returnNum=1:2)
tr.big2.summary$disparity.mat <-
tr.big2.summary$disparity.mat[grep('Quercus sp.|Quercus new', row.names(tr.big2.summary$disparity.mat), invert = T), ]
write.csv(tr.big2.summary$disparity.mat,
'../OUT/SUPPLEMENTS/TABLE.S.tr.big2.TDI.matrix.csv')
tr.big2.summary$TDI.clean <- as.data.frame(
tr.big2.summary$disparity.mat
)
tr.big2.summary$TDI.clean <- tr.big2.summary$TDI.clean[
grep('|', row.names(tr.big2.summary$TDI.clean), fixed = T, invert = T),
]
tr.big2.summary$stats <- c(
'All-tips tree stats',
'-------------------',
paste('Total Quercus spp:', dim(tr.big2.summary$TDI.clean)[1]),
paste('Total Quercus with 1 sample:', sum(tr.big2.summary$TDI.clean$count == 1)),
paste('Total Quercus with >1 sample:', sum(tr.big2.summary$TDI.clean$count > 1)),
paste('Mean number of samples for Quercus with > 1 sample:',
round(mean(tr.big2.summary$TDI.clean$count[which(tr.big2.summary$TDI.clean$count > 1)]), 2),
'+/-',
round(sd(tr.big2.summary$TDI.clean$count[which(tr.big2.summary$TDI.clean$count > 1)]), 2),
'(sd)'),
paste('Total Quercus with TDI > 0:', sum(tr.big2.summary$TDI.clean$disparity > 0)),
paste('Total Quercus with TDI 10 or more:', sum(tr.big2.summary$TDI.clean$disparity >= 10))
)
writeLines(tr.big2.summary$stats, '../OUT/ANALYSIS.PRODUCTS/treeSummaryStats.tr.big2.txt')
| /ANALYSES/2019-06_globalOaks-gitUpdate/SCRIPTS/00-8.taxonomicDisparityIndex.R | no_license | kieranalthaus/global-oaks-2019 | R | false | false | 1,919 | r | ## taxonomic disparity for big tree
library(ape)
if(!exists('summary.by.elements')) {
source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/label.elements.R')
source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/summary.by.elements.R')
source('https://raw.githubusercontent.com/andrew-hipp/morton/master/R/tips.expected.R')
}
tr.big2.summary <-
summary.by.elements(drop.tip(tr.big2, grep('Quercus', tr.big2$tip.label, invert = T)),
delim= '[ _|]', fixed = F, returnNum=1:2)
tr.big2.summary$disparity.mat <-
tr.big2.summary$disparity.mat[grep('Quercus sp.|Quercus new', row.names(tr.big2.summary$disparity.mat), invert = T), ]
write.csv(tr.big2.summary$disparity.mat,
'../OUT/SUPPLEMENTS/TABLE.S.tr.big2.TDI.matrix.csv')
tr.big2.summary$TDI.clean <- as.data.frame(
tr.big2.summary$disparity.mat
)
tr.big2.summary$TDI.clean <- tr.big2.summary$TDI.clean[
grep('|', row.names(tr.big2.summary$TDI.clean), fixed = T, invert = T),
]
tr.big2.summary$stats <- c(
'All-tips tree stats',
'-------------------',
paste('Total Quercus spp:', dim(tr.big2.summary$TDI.clean)[1]),
paste('Total Quercus with 1 sample:', sum(tr.big2.summary$TDI.clean$count == 1)),
paste('Total Quercus with >1 sample:', sum(tr.big2.summary$TDI.clean$count > 1)),
paste('Mean number of samples for Quercus with > 1 sample:',
round(mean(tr.big2.summary$TDI.clean$count[which(tr.big2.summary$TDI.clean$count > 1)]), 2),
'+/-',
round(sd(tr.big2.summary$TDI.clean$count[which(tr.big2.summary$TDI.clean$count > 1)]), 2),
'(sd)'),
paste('Total Quercus with TDI > 0:', sum(tr.big2.summary$TDI.clean$disparity > 0)),
paste('Total Quercus with TDI 10 or more:', sum(tr.big2.summary$TDI.clean$disparity >= 10))
)
writeLines(tr.big2.summary$stats, '../OUT/ANALYSIS.PRODUCTS/treeSummaryStats.tr.big2.txt')
|
print("hello")
y=5
z=7
print(y+z)
| /test.R | no_license | abdoraven/Temporary_add_to_version_control | R | false | false | 34 | r | print("hello")
y=5
z=7
print(y+z)
|
complexity =c(4,14,26)
library(splines)
Q1_fun_01 <- function(x) {
y = 0.3*cos(3*pi*x) - 0.4* cos(4*pi*x) - 10/(x**2 + 1)
return(y)
}
Q1_fun_02<- function(x){
y = 0.4*log(x**4+log(x-0.7)+exp(3*x))
return(y)
}
generate_y <-function(x, func,sig){
if(func == 1){
y = Q1_fun_01(x) + rnorm(1, 0, sig)
}else{
y = Q1_fun_02(x) + rnorm(1, 0, sig)
}
return(y)
}
# x_sample = sort(runif(number_of_sample, min=-10, max=10))
generate_samples <- function(x_sample,func,sig){
my.array <- array(0, dim=c(length(x_sample)))
for (i in 1 : length(x_sample)) {
my.array[i] = generate_y(x_sample[i], func, sig)
}
return(my.array)
}
number_of_sample = 1000
number_of_simulations = 100
sig = 2 #sigma value for noise
x_test1 <-sort(runif(number_of_sample, min=-10, max=10))
y_real1 <-Q1_fun_01(x_test1)
y_test1 <- generate_samples(x_test1, 1,2)
x_test2 <-sort(runif(number_of_sample, min=1, max=10))
y_real2 <-Q1_fun_02(x_test2)
y_test2 <- generate_samples(x_test2, 2,2)
##############plot for model fitting function 1
plot(x_test1, y_test1,
main="Q1_fun_01",col ='yellow',
ylab="hat(y)", cex.lab = 1.5)
model11 <- lm(y_test1 ~ poly(x_test1,4))
model12 <- lm(y_test1 ~ poly(x_test1,14))
model13 <- lm(y_test1 ~ poly(x_test1,26))
lines(x_test1, predict(model11, data.frame(x=x_test1)), col='black')
lines(x_test1, predict(model12, data.frame(x=x_test1)), col='green')
lines(x_test1, predict(model13, data.frame(x=x_test1)), col='blue')
legend("bottomright",c("degree 4","degree 14", "degree 26"), col=c("black","green","blue"),lwd=3)
###################plot for model fitting function 2
plot(x_test2, y_test2,
main="Q1_fun_02",col ='yellow',
ylab="hat(y)", cex.lab = 1.5)
model21 <- lm(y_test2 ~ poly(x_test2,4))
model22 <- lm(y_test2 ~ poly(x_test2,14))
model23 <- lm(y_test2 ~ poly(x_test2,26))
lines(x_test2, predict(model21, data.frame(x=x_test2)), col='black')
lines(x_test2, predict(model22, data.frame(x=x_test2)), col='green')
lines(x_test2, predict(model23, data.frame(x=x_test2)), col='blue')
legend("bottomright",c("degree 4","degree 14", "degree 26"), col=c("black","green","blue"),lwd=3)
######################################################################Plots for bias variance tradeoff with complexity for function1
allbias1 = matrix(0,1,length(complexity))
allvariance1 = matrix(0,1,length(complexity))
for(com in 1:length(complexity)){
predictions = matrix(0,number_of_simulations,number_of_sample)
for (simulation in 1:number_of_simulations) {
x_train <-sort(runif(number_of_sample, min=-10, max=10));
y_train <- generate_samples(x_train, 1,sig);
model <- lm(y_train ~ poly(x_train,complexity[com]));
pred = predict(model, data.frame(x=x_test1));
#print(typeof(p))
#print(class(pred));
predictions[simulation,]<-pred;
}
l = colMeans(predictions);
biasums = 0
for(j in 1:number_of_sample){
biasums = biasums + abs(l[j]-y_real1[j]);
}
bias = (biasums/number_of_sample);
#finding variance
varsum = 0;
for(n in 1:number_of_sample){
sums = 0;
for(k in 1:number_of_simulations){
sums = sums + (l[n]-predictions[k,n])^2
}
varsum =varsum + sums/number_of_simulations
}
variance = varsum/number_of_sample
allbias1[1,com]<-bias
allvariance1[1,com]<-variance
}
plot(complexity, allvariance1[1,], ylim =c(0,max(max(allbias1),max(allvariance1))),
main="plot for Q1_fun_01",
ylab="model behaviour",
type = "l",
col="blue", cex.lab = 1.5)
lines(complexity,allbias1[1,],col='black',lwd=1)
legend("bottomright",c("bias","variance"), col=c("black","blue"), lwd=3)
######################################################################Plots for bias variance tradeoff with complexity for function2
allbias2 = matrix(0,1,length(complexity))
allvariance2 = matrix(0,1,length(complexity))
for(com in 1:length(complexity)){
predictions = matrix(0,number_of_simulations,number_of_sample)
for (simulation in 1:number_of_simulations) {
x_train <-sort(runif(number_of_sample, min=1, max=10));
y_train <- generate_samples(x_train, 2,sig);
model <- lm(y_train ~ poly(x_train,complexity[com]));
pred = predict(model, data.frame(x=x_test2));
#print(typeof(p))
#print(class(pred));
predictions[simulation,]<-pred;
}
l = colMeans(predictions);
biasums = 0
for(j in 1:number_of_sample){
biasums = biasums + abs(l[j]-y_real2[j]);
}
bias = (biasums/number_of_sample);
# bias = abs(l[1]-y_real2[1])
#finding variance
varsum = 0
for(n in 1:number_of_sample){
sums = 0
for(k in 1:number_of_simulations){
sums = sums + (l[n]-predictions[k,n])^2
}
varsum =varsum + sums/number_of_simulations
}
print(varsum)
variance = varsum/number_of_sample
# sums = 0
# for(k in 1:number_of_simulations){
# sums = sums + (l[1]-predictions[k,1])^2
# }
# variance = sums/number_of_simulations
allbias2[1,com]<-bias
allvariance2[1,com]<-variance
}
plot(complexity, allvariance2[1,], ylim =c(0,max(max(allbias2),max(allvariance2))),
main="plot for Q1_fun_02",
ylab="model behaviour",
type = "l",
col="blue", cex.lab = 1.5)
lines(complexity,allbias2[1,],col='black',lwd=1)
legend("bottomright",c("bias","variance"), col=c("black","blue"), lwd=3)
print(max(allvariance2))
print(l)
varsum = 0;
for(n in 1:number_of_sample){
sums = 0;
for(k in 1:number_of_simulations){
sums = sums + (l[n]-predictions[k,n])^2
}
varsum =varsum + sums/number_of_simulations
}
variance = varsum/number_of_sample
print(variance) | /Assignment_1/question1.R | no_license | rajanskumarsoni/Concepts-in-Statistical-Learning-Theory | R | false | false | 5,768 | r |
complexity =c(4,14,26)
library(splines)
Q1_fun_01 <- function(x) {
y = 0.3*cos(3*pi*x) - 0.4* cos(4*pi*x) - 10/(x**2 + 1)
return(y)
}
Q1_fun_02<- function(x){
y = 0.4*log(x**4+log(x-0.7)+exp(3*x))
return(y)
}
generate_y <-function(x, func,sig){
if(func == 1){
y = Q1_fun_01(x) + rnorm(1, 0, sig)
}else{
y = Q1_fun_02(x) + rnorm(1, 0, sig)
}
return(y)
}
# x_sample = sort(runif(number_of_sample, min=-10, max=10))
generate_samples <- function(x_sample,func,sig){
my.array <- array(0, dim=c(length(x_sample)))
for (i in 1 : length(x_sample)) {
my.array[i] = generate_y(x_sample[i], func, sig)
}
return(my.array)
}
number_of_sample = 1000
number_of_simulations = 100
sig = 2 #sigma value for noise
x_test1 <-sort(runif(number_of_sample, min=-10, max=10))
y_real1 <-Q1_fun_01(x_test1)
y_test1 <- generate_samples(x_test1, 1,2)
x_test2 <-sort(runif(number_of_sample, min=1, max=10))
y_real2 <-Q1_fun_02(x_test2)
y_test2 <- generate_samples(x_test2, 2,2)
##############plot for model fitting function 1
plot(x_test1, y_test1,
main="Q1_fun_01",col ='yellow',
ylab="hat(y)", cex.lab = 1.5)
model11 <- lm(y_test1 ~ poly(x_test1,4))
model12 <- lm(y_test1 ~ poly(x_test1,14))
model13 <- lm(y_test1 ~ poly(x_test1,26))
lines(x_test1, predict(model11, data.frame(x=x_test1)), col='black')
lines(x_test1, predict(model12, data.frame(x=x_test1)), col='green')
lines(x_test1, predict(model13, data.frame(x=x_test1)), col='blue')
legend("bottomright",c("degree 4","degree 14", "degree 26"), col=c("black","green","blue"),lwd=3)
###################plot for model fitting function 2
plot(x_test2, y_test2,
main="Q1_fun_02",col ='yellow',
ylab="hat(y)", cex.lab = 1.5)
model21 <- lm(y_test2 ~ poly(x_test2,4))
model22 <- lm(y_test2 ~ poly(x_test2,14))
model23 <- lm(y_test2 ~ poly(x_test2,26))
lines(x_test2, predict(model21, data.frame(x=x_test2)), col='black')
lines(x_test2, predict(model22, data.frame(x=x_test2)), col='green')
lines(x_test2, predict(model23, data.frame(x=x_test2)), col='blue')
legend("bottomright",c("degree 4","degree 14", "degree 26"), col=c("black","green","blue"),lwd=3)
######################################################################Plots for bias variance tradeoff with complexity for function1
allbias1 = matrix(0,1,length(complexity))
allvariance1 = matrix(0,1,length(complexity))
for(com in 1:length(complexity)){
predictions = matrix(0,number_of_simulations,number_of_sample)
for (simulation in 1:number_of_simulations) {
x_train <-sort(runif(number_of_sample, min=-10, max=10));
y_train <- generate_samples(x_train, 1,sig);
model <- lm(y_train ~ poly(x_train,complexity[com]));
pred = predict(model, data.frame(x=x_test1));
#print(typeof(p))
#print(class(pred));
predictions[simulation,]<-pred;
}
l = colMeans(predictions);
biasums = 0
for(j in 1:number_of_sample){
biasums = biasums + abs(l[j]-y_real1[j]);
}
bias = (biasums/number_of_sample);
#finding variance
varsum = 0;
for(n in 1:number_of_sample){
sums = 0;
for(k in 1:number_of_simulations){
sums = sums + (l[n]-predictions[k,n])^2
}
varsum =varsum + sums/number_of_simulations
}
variance = varsum/number_of_sample
allbias1[1,com]<-bias
allvariance1[1,com]<-variance
}
plot(complexity, allvariance1[1,], ylim =c(0,max(max(allbias1),max(allvariance1))),
main="plot for Q1_fun_01",
ylab="model behaviour",
type = "l",
col="blue", cex.lab = 1.5)
lines(complexity,allbias1[1,],col='black',lwd=1)
legend("bottomright",c("bias","variance"), col=c("black","blue"), lwd=3)
######################################################################Plots for bias variance tradeoff with complexity for function2
allbias2 = matrix(0,1,length(complexity))
allvariance2 = matrix(0,1,length(complexity))
for(com in 1:length(complexity)){
predictions = matrix(0,number_of_simulations,number_of_sample)
for (simulation in 1:number_of_simulations) {
x_train <-sort(runif(number_of_sample, min=1, max=10));
y_train <- generate_samples(x_train, 2,sig);
model <- lm(y_train ~ poly(x_train,complexity[com]));
pred = predict(model, data.frame(x=x_test2));
#print(typeof(p))
#print(class(pred));
predictions[simulation,]<-pred;
}
l = colMeans(predictions);
biasums = 0
for(j in 1:number_of_sample){
biasums = biasums + abs(l[j]-y_real2[j]);
}
bias = (biasums/number_of_sample);
# bias = abs(l[1]-y_real2[1])
#finding variance
varsum = 0
for(n in 1:number_of_sample){
sums = 0
for(k in 1:number_of_simulations){
sums = sums + (l[n]-predictions[k,n])^2
}
varsum =varsum + sums/number_of_simulations
}
print(varsum)
variance = varsum/number_of_sample
# sums = 0
# for(k in 1:number_of_simulations){
# sums = sums + (l[1]-predictions[k,1])^2
# }
# variance = sums/number_of_simulations
allbias2[1,com]<-bias
allvariance2[1,com]<-variance
}
plot(complexity, allvariance2[1,], ylim =c(0,max(max(allbias2),max(allvariance2))),
main="plot for Q1_fun_02",
ylab="model behaviour",
type = "l",
col="blue", cex.lab = 1.5)
lines(complexity,allbias2[1,],col='black',lwd=1)
legend("bottomright",c("bias","variance"), col=c("black","blue"), lwd=3)
print(max(allvariance2))
print(l)
varsum = 0;
for(n in 1:number_of_sample){
sums = 0;
for(k in 1:number_of_simulations){
sums = sums + (l[n]-predictions[k,n])^2
}
varsum =varsum + sums/number_of_simulations
}
variance = varsum/number_of_sample
print(variance) |
#' @title Information Matrix-Based Information Criterion
#'
#' @description Calculates Information Matrix-Based Information Criterion (IBIC) for "lm" and "glm" objects.
#'
#' @param model a "lm" or "glm" object
#'
#' @details
#' IBIC (Bollen et al., 2012) is calculated as
#'
#' \deqn{-2LL(theta) + klog(n/(2pi)) + log(|F|)}
#'
#' \eqn{F} is the fisher information matrix.
#'
#' While calculating the Fisher information matrix (\eqn{F}), we used
#' the joint parameters (\eqn{beta,sigma^2}) of the models.
#'
#' @return IBIC measurement of the model
#'
#' @importFrom stats logLik
#' @examples
#' x1 <- rnorm(100, 3, 2)
#' x2 <- rnorm(100, 5, 3)
#' x3 <- rnorm(100, 67, 5)
#' err <- rnorm(100, 0, 4)
#'
#' ## round so we can use it for Poisson regression
#' y <- round(3 + 2*x1 - 5*x2 + 8*x3 + err)
#'
#' m1 <- lm(y~x1 + x2 + x3)
#' m2 <- glm(y~x1 + x2 + x3, family = "gaussian")
#' m3 <- glm(y~x1 + x2 + x3, family = "poisson")
#'
#'IBIC(m1)
#'IBIC(m2)
#'IBIC(m3)
#'
#' @references
#' Bollen, K. A., Ray, S., Zavisca, J., & Harden, J. J. (2012). A comparison of Bayes factor approximation methods including two new methods. Sociological Methods & Research, 41(2), 294-324.
#'
#' @export
IBIC <- function(model) {
LL <- logLik(object = model)
df <- attr(LL, "df")
n <- length(model$residuals)
c(-2*LL + df*log(n/(2*pi) + log(det(solve(reverse_fisher(model))))))
}
| /R/IBIC.R | no_license | cran/ICglm | R | false | false | 1,420 | r | #' @title Information Matrix-Based Information Criterion
#'
#' @description Calculates Information Matrix-Based Information Criterion (IBIC) for "lm" and "glm" objects.
#'
#' @param model a "lm" or "glm" object
#'
#' @details
#' IBIC (Bollen et al., 2012) is calculated as
#'
#' \deqn{-2LL(theta) + klog(n/(2pi)) + log(|F|)}
#'
#' \eqn{F} is the fisher information matrix.
#'
#' While calculating the Fisher information matrix (\eqn{F}), we used
#' the joint parameters (\eqn{beta,sigma^2}) of the models.
#'
#' @return IBIC measurement of the model
#'
#' @importFrom stats logLik
#' @examples
#' x1 <- rnorm(100, 3, 2)
#' x2 <- rnorm(100, 5, 3)
#' x3 <- rnorm(100, 67, 5)
#' err <- rnorm(100, 0, 4)
#'
#' ## round so we can use it for Poisson regression
#' y <- round(3 + 2*x1 - 5*x2 + 8*x3 + err)
#'
#' m1 <- lm(y~x1 + x2 + x3)
#' m2 <- glm(y~x1 + x2 + x3, family = "gaussian")
#' m3 <- glm(y~x1 + x2 + x3, family = "poisson")
#'
#'IBIC(m1)
#'IBIC(m2)
#'IBIC(m3)
#'
#' @references
#' Bollen, K. A., Ray, S., Zavisca, J., & Harden, J. J. (2012). A comparison of Bayes factor approximation methods including two new methods. Sociological Methods & Research, 41(2), 294-324.
#'
#' @export
IBIC <- function(model) {
LL <- logLik(object = model)
df <- attr(LL, "df")
n <- length(model$residuals)
c(-2*LL + df*log(n/(2*pi) + log(det(solve(reverse_fisher(model))))))
}
|
#
# Copyright 2007-2018 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Created by Mike Cheung
require(OpenMx)
yi <- c(-0.264,-0.230,0.166,0.173,0.225,0.291,0.309,0.435,0.476,0.617,0.651,0.718,0.740,0.745,0.758,0.922,0.938,0.962,1.522,1.844)
vi <- c(0.086,0.106,0.055,0.084,0.071,0.078,0.051,0.093,0.149,0.095,0.110,0.054,0.081,0.084,0.087,0.103,0.113,0.083,0.100,0.141)
my.df <- cbind(yi,vi)
test <- mxModel("test", type="default",
mxMatrix("Zero", ncol=1, nrow=1, free=F, name="Amat"),
mxAlgebra(Amat[1,1], name="A"), # just to test with A as an algebra
mxMatrix("Full", ncol=1, nrow=1, free=F, values=0, labels="data.vi", name="V"),
mxMatrix("Full", ncol=1, nrow=1, free=T, values=0.1, lbound=0.0000001, name="Tau"),
mxMatrix("Full", ncol=1, nrow=1, free=T, values=0, name="M"),
mxMatrix("Iden", ncol=1, nrow=1, name="F"),
mxAlgebra(V+Tau, name="S"),
mxFitFunctionML(),mxExpectationRAM("A", "S", "F", "M", dimnames=c("yi")),
mxData(observed=my.df, type="raw")
)
out <- mxRun(test, suppressWarnings=TRUE)
omxCheckCloseEnough(mxEval(objective, out), 27.8, 0.01)
| /SilveR/R-3.5.1/library/OpenMx/models/passing/RAM-FIML-defvars.R | permissive | kevinmiles/SilveR | R | false | false | 1,623 | r | #
# Copyright 2007-2018 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Created by Mike Cheung
require(OpenMx)
yi <- c(-0.264,-0.230,0.166,0.173,0.225,0.291,0.309,0.435,0.476,0.617,0.651,0.718,0.740,0.745,0.758,0.922,0.938,0.962,1.522,1.844)
vi <- c(0.086,0.106,0.055,0.084,0.071,0.078,0.051,0.093,0.149,0.095,0.110,0.054,0.081,0.084,0.087,0.103,0.113,0.083,0.100,0.141)
my.df <- cbind(yi,vi)
test <- mxModel("test", type="default",
mxMatrix("Zero", ncol=1, nrow=1, free=F, name="Amat"),
mxAlgebra(Amat[1,1], name="A"), # just to test with A as an algebra
mxMatrix("Full", ncol=1, nrow=1, free=F, values=0, labels="data.vi", name="V"),
mxMatrix("Full", ncol=1, nrow=1, free=T, values=0.1, lbound=0.0000001, name="Tau"),
mxMatrix("Full", ncol=1, nrow=1, free=T, values=0, name="M"),
mxMatrix("Iden", ncol=1, nrow=1, name="F"),
mxAlgebra(V+Tau, name="S"),
mxFitFunctionML(),mxExpectationRAM("A", "S", "F", "M", dimnames=c("yi")),
mxData(observed=my.df, type="raw")
)
out <- mxRun(test, suppressWarnings=TRUE)
omxCheckCloseEnough(mxEval(objective, out), 27.8, 0.01)
|
#' Plot faceted violin plots for scenarios, showing performance metrics
#'
#' @param ps A plot setup object as output by [setup_mse_plot_objects()]
#' @param quants Quantile values as limits to remove tail data from plot
#'
#' @return A [ggplot2::ggplot()] object
#' @export
#' @importFrom forcats fct_relevel
#' @importFrom PNWColors pnw_palette
plot_violins <- function(ps = NULL,
quants = c(0.05, 0.95)){
inds <- c("SSB < 0.10 SSB0",
"0.10 < SSB < 0.4 SSB0",
"SSB > 0.4 SSB0",
"AAV",
"Short term catch",
"Long term catch")
d <- ps$df_ssb_catch_indicators
stopifnot("value" %in% names(d))
stopifnot("scenario" %in% names(d))
df <- d %>%
filter(indicator %in% inds) %>%
mutate(indicator = fct_relevel(indicator, inds))
# Remove tails of data
qs <- df %>%
group_by(scenario, indicator) %>%
summarize(qlow = quantile(value, quants[1]), qhigh = quantile(value, quants[2])) %>%
ungroup()
df <- df %>% left_join(qs, by = c("scenario", "indicator")) %>%
group_by(scenario, indicator) %>%
filter(value >= qlow & value <= qhigh) %>%
ungroup() %>%
select(-c(qlow, qhigh))
# Standardize short and long-term catch values
df_st <- df %>%
filter(indicator == "Short term catch") %>%
mutate(value = value / max(value))
df_lt <- df %>%
filter(indicator == "Long term catch") %>%
mutate(value = value / max(value))
df <- df %>%
filter(!indicator %in% c("Short term catch", "Long term catch")) %>%
bind_rows(df_st, df_lt)
cols <- ps$cols
g <- ggplot(df, aes(x = scenario, y = value, fill = scenario)) +
geom_violin() +
geom_boxplot(width = 0.15, col = "black", outlier.shape = NA) +
scale_fill_manual(values = cols) +
facet_wrap(~indicator, scales = "free_y", ncol = 3, dir = "h") +
theme(legend.position = "none",
axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_x_discrete(name = "") +
scale_y_continuous(name = "")
g
} | /R/plot_violins.R | no_license | aaronmberger-nwfsc/pacifichakemse-1 | R | false | false | 2,043 | r | #' Plot faceted violin plots for scenarios, showing performance metrics
#'
#' @param ps A plot setup object as output by [setup_mse_plot_objects()]
#' @param quants Quantile values as limits to remove tail data from plot
#'
#' @return A [ggplot2::ggplot()] object
#' @export
#' @importFrom forcats fct_relevel
#' @importFrom PNWColors pnw_palette
plot_violins <- function(ps = NULL,
quants = c(0.05, 0.95)){
inds <- c("SSB < 0.10 SSB0",
"0.10 < SSB < 0.4 SSB0",
"SSB > 0.4 SSB0",
"AAV",
"Short term catch",
"Long term catch")
d <- ps$df_ssb_catch_indicators
stopifnot("value" %in% names(d))
stopifnot("scenario" %in% names(d))
df <- d %>%
filter(indicator %in% inds) %>%
mutate(indicator = fct_relevel(indicator, inds))
# Remove tails of data
qs <- df %>%
group_by(scenario, indicator) %>%
summarize(qlow = quantile(value, quants[1]), qhigh = quantile(value, quants[2])) %>%
ungroup()
df <- df %>% left_join(qs, by = c("scenario", "indicator")) %>%
group_by(scenario, indicator) %>%
filter(value >= qlow & value <= qhigh) %>%
ungroup() %>%
select(-c(qlow, qhigh))
# Standardize short and long-term catch values
df_st <- df %>%
filter(indicator == "Short term catch") %>%
mutate(value = value / max(value))
df_lt <- df %>%
filter(indicator == "Long term catch") %>%
mutate(value = value / max(value))
df <- df %>%
filter(!indicator %in% c("Short term catch", "Long term catch")) %>%
bind_rows(df_st, df_lt)
cols <- ps$cols
g <- ggplot(df, aes(x = scenario, y = value, fill = scenario)) +
geom_violin() +
geom_boxplot(width = 0.15, col = "black", outlier.shape = NA) +
scale_fill_manual(values = cols) +
facet_wrap(~indicator, scales = "free_y", ncol = 3, dir = "h") +
theme(legend.position = "none",
axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_x_discrete(name = "") +
scale_y_continuous(name = "")
g
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_summary.R
\name{plot_complete_pairs}
\alias{plot_complete_pairs}
\title{Plot a heat map of frequency of "complete" (both non-NA) pairs of variables}
\usage{
plot_complete_pairs(dat, subject = FALSE, width = 700, height = 700,
thresh = 0.95, ...)
}
\arguments{
\item{dat}{data frame}
\item{subject}{should subject-level (TRUE) or time-varying (FALSE) variables be plotted?}
\item{width}{width of plot in pixels}
\item{height}{height of plot in pixels}
\item{thresh}{percentage NA threshold above which variables will be ignored (to help deal with cases involving many variables)}
\item{\ldots}{additional parameters passed to \code{\link{figure}}}
}
\description{
Plot a heat map of frequency of "complete" (both non-NA) pairs of variables
}
\details{
Subject-level variables are treated differently than time-varying variables in that they are repeated for each subject. When plotting summaries of subject-level variables, the data is first subset to one record per subject.
}
\examples{
plot_complete_pairs(cpp)
plot_complete_pairs(cpp, subject = TRUE)
}
| /man/plot_complete_pairs.Rd | permissive | hafen/hbgd | R | false | true | 1,147 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_summary.R
\name{plot_complete_pairs}
\alias{plot_complete_pairs}
\title{Plot a heat map of frequency of "complete" (both non-NA) pairs of variables}
\usage{
plot_complete_pairs(dat, subject = FALSE, width = 700, height = 700,
thresh = 0.95, ...)
}
\arguments{
\item{dat}{data frame}
\item{subject}{should subject-level (TRUE) or time-varying (FALSE) variables be plotted?}
\item{width}{width of plot in pixels}
\item{height}{height of plot in pixels}
\item{thresh}{percentage NA threshold above which variables will be ignored (to help deal with cases involving many variables)}
\item{\ldots}{additional parameters passed to \code{\link{figure}}}
}
\description{
Plot a heat map of frequency of "complete" (both non-NA) pairs of variables
}
\details{
Subject-level variables are treated differently than time-varying variables in that they are repeated for each subject. When plotting summaries of subject-level variables, the data is first subset to one record per subject.
}
\examples{
plot_complete_pairs(cpp)
plot_complete_pairs(cpp, subject = TRUE)
}
|
# R code to replicate Figures S1 and S2 in Hsiang, Burke, and Miguel 2013
# First replace the following directory with the directory where you unzipped
setwd("/Documents/Dropbox/Marshall-Sol/drafts/Science_review_v4/replication/")
# get median effect size
data=read.csv("data/standardized_effects.csv")
eff=as.numeric(as.character(dta$effect_1sd[dta$indiv_grou==1]))
med <- median(eff)*100
###############################################
# Figure S1
###############################################
data=read.csv("data/Supplement_Data/FigS1_data.csv")
toplot=1:dim(data)[1] ##this is the plot order which we can change to whatever.
# right now it's just in the order of the spreadsheet, which we sorted by hand
eff=as.numeric(as.character(data$effect_1sd))
effsd=as.numeric(as.character(data$SE_1sd))
hi=(eff+1.96*effsd)*100
lo=(eff-1.96*effsd)*100
pdf(file="output/FigureS1.pdf",width=8,height=5,useDingbats=F)
par(mar=c(8,4,3,1))
plot(1,xlim=c(1,dim(data)[1]),ylim=c(-15,46),type="n",ylab=bquote(paste("% change per 1",sigma," change in climate",sep="")),xlab="",xaxt="n",yaxt="n")
#rect(numg+0.5,-30,dim(data)[1]+2,60,col="grey93",border=NA)
axis(2,at=seq(-100,100,10),las=1)
abline(h=seq(-100,100,10),col="grey85",lty=1)
abline(h=0,lty=1,lwd=1.5)
ll=1:length(toplot)
wd=0.08 #width of end whiskers
segments(ll,lo[toplot],ll,hi[toplot],lwd=3,lty=1)
segments(ll,lo[toplot],ll,hi[toplot],lwd=1.5,lty=1,col="red")
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=3)
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=0.5,col="red")
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=3)
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=0.5,col="red")
points(ll,eff[toplot]*100,cex=1,pch=21,bg="red",lwd=2)
mtext(paste(data$Study[toplot],data$study_year[toplot],sep=" "),side=1,at=ll-0.15,cex=0.7,las=2,line=1)
mtext(data$notes[toplot],side=1,at=ll+0.15,cex=0.7,las=2,line=1)
abline(h=med,lty=2,lwd=1.5)
box()
dev.off()
###############################################
# Figure S2
###############################################
data=read.csv("data/Supplement_Data/FigS2_data.csv")
toplot=1:dim(data)[1] ##this is the plot order which we can change to whatever.
eff=as.numeric(as.character(data$temp_stdeff))
effsd=as.numeric(as.character(data$temp_stdeff_se))
hi=(eff+1.96*effsd)
lo=(eff-1.96*effsd)
# make shape of point
type=as.character(unique(data$climvar))
pchs=c(1:2)
ptype=pchs[match(data$climvar,type)]
lags=paste("lags = ",data$lags[toplot],sep="")
lags[5]=paste(lags[5],"**",sep="")
pdf(file="output/FigureS2.pdf",width=7,height=4,useDingbats=F)
par(mar=c(5,4,3,1))
plot(1,xlim=c(1,dim(data)[1]),ylim=c(-90,170),type="n",ylab=bquote(paste("% change per 1",sigma," change in climate",sep="")),xlab="",xaxt="n",yaxt="n")
rect(6.5,-100,17,200,col="grey93",border=NA)
abline(v=6.5)
abline(h=med,lty=2,lwd=1.5) #median
axis(2,at=seq(-100,150,25),las=1)
abline(h=seq(-100,150,25),col="grey85",lty=1)
abline(h=0,lty=1,lwd=1.5)
ll=1:length(toplot)
wd=0.08 #width of end whiskers
segments(ll,lo[toplot],ll,hi[toplot],lwd=3,lty=1)
segments(ll,lo[toplot],ll,hi[toplot],lwd=1.5,lty=1,col="red")
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=3)
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=0.5,col="red")
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=3)
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=0.5,col="red")
points(ll,eff[toplot],cex=1,pch=ptype,bg="red",lwd=2)
mtext(lags,side=1,at=ll,cex=0.7,las=2,line=1)
mtext("Onset",side=3,at=3.5,cex=1.4,las=1,line=1)
mtext("Incidence",side=3,at=9.5,cex=1.4,las=1,line=1)
box()
dev.off()
| /code/Make_Fig_S1_S2.R | no_license | emilylaiken/climatereplicationcode | R | false | false | 3,554 | r |
# R code to replicate Figures S1 and S2 in Hsiang, Burke, and Miguel 2013
# First replace the following directory with the directory where you unzipped
setwd("/Documents/Dropbox/Marshall-Sol/drafts/Science_review_v4/replication/")
# get median effect size
data=read.csv("data/standardized_effects.csv")
eff=as.numeric(as.character(dta$effect_1sd[dta$indiv_grou==1]))
med <- median(eff)*100
###############################################
# Figure S1
###############################################
data=read.csv("data/Supplement_Data/FigS1_data.csv")
toplot=1:dim(data)[1] ##this is the plot order which we can change to whatever.
# right now it's just in the order of the spreadsheet, which we sorted by hand
eff=as.numeric(as.character(data$effect_1sd))
effsd=as.numeric(as.character(data$SE_1sd))
hi=(eff+1.96*effsd)*100
lo=(eff-1.96*effsd)*100
pdf(file="output/FigureS1.pdf",width=8,height=5,useDingbats=F)
par(mar=c(8,4,3,1))
plot(1,xlim=c(1,dim(data)[1]),ylim=c(-15,46),type="n",ylab=bquote(paste("% change per 1",sigma," change in climate",sep="")),xlab="",xaxt="n",yaxt="n")
#rect(numg+0.5,-30,dim(data)[1]+2,60,col="grey93",border=NA)
axis(2,at=seq(-100,100,10),las=1)
abline(h=seq(-100,100,10),col="grey85",lty=1)
abline(h=0,lty=1,lwd=1.5)
ll=1:length(toplot)
wd=0.08 #width of end whiskers
segments(ll,lo[toplot],ll,hi[toplot],lwd=3,lty=1)
segments(ll,lo[toplot],ll,hi[toplot],lwd=1.5,lty=1,col="red")
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=3)
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=0.5,col="red")
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=3)
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=0.5,col="red")
points(ll,eff[toplot]*100,cex=1,pch=21,bg="red",lwd=2)
mtext(paste(data$Study[toplot],data$study_year[toplot],sep=" "),side=1,at=ll-0.15,cex=0.7,las=2,line=1)
mtext(data$notes[toplot],side=1,at=ll+0.15,cex=0.7,las=2,line=1)
abline(h=med,lty=2,lwd=1.5)
box()
dev.off()
###############################################
# Figure S2
###############################################
data=read.csv("data/Supplement_Data/FigS2_data.csv")
toplot=1:dim(data)[1] ##this is the plot order which we can change to whatever.
eff=as.numeric(as.character(data$temp_stdeff))
effsd=as.numeric(as.character(data$temp_stdeff_se))
hi=(eff+1.96*effsd)
lo=(eff-1.96*effsd)
# make shape of point
type=as.character(unique(data$climvar))
pchs=c(1:2)
ptype=pchs[match(data$climvar,type)]
lags=paste("lags = ",data$lags[toplot],sep="")
lags[5]=paste(lags[5],"**",sep="")
pdf(file="output/FigureS2.pdf",width=7,height=4,useDingbats=F)
par(mar=c(5,4,3,1))
plot(1,xlim=c(1,dim(data)[1]),ylim=c(-90,170),type="n",ylab=bquote(paste("% change per 1",sigma," change in climate",sep="")),xlab="",xaxt="n",yaxt="n")
rect(6.5,-100,17,200,col="grey93",border=NA)
abline(v=6.5)
abline(h=med,lty=2,lwd=1.5) #median
axis(2,at=seq(-100,150,25),las=1)
abline(h=seq(-100,150,25),col="grey85",lty=1)
abline(h=0,lty=1,lwd=1.5)
ll=1:length(toplot)
wd=0.08 #width of end whiskers
segments(ll,lo[toplot],ll,hi[toplot],lwd=3,lty=1)
segments(ll,lo[toplot],ll,hi[toplot],lwd=1.5,lty=1,col="red")
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=3)
segments(ll-wd,lo[toplot],ll+wd,lo[toplot],lwd=0.5,col="red")
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=3)
segments(ll-wd,hi[toplot],ll+wd,hi[toplot],lwd=0.5,col="red")
points(ll,eff[toplot],cex=1,pch=ptype,bg="red",lwd=2)
mtext(lags,side=1,at=ll,cex=0.7,las=2,line=1)
mtext("Onset",side=3,at=3.5,cex=1.4,las=1,line=1)
mtext("Incidence",side=3,at=9.5,cex=1.4,las=1,line=1)
box()
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/no2.R
\docType{data}
\name{no2}
\alias{no2}
\title{Tibble con i dati di no2 per 19 stazioni della regione UMBRIA}
\format{
Un tibble con 8 colonne e 13889 osservazioni
}
\usage{
no2
}
\description{
Tibble con i dati di no2 per 19 stazioni della regione UMBRIA
}
\keyword{datasets}
| /man/no2.Rd | permissive | progettopulvirus/umbria | R | false | true | 359 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/no2.R
\docType{data}
\name{no2}
\alias{no2}
\title{Tibble con i dati di no2 per 19 stazioni della regione UMBRIA}
\format{
Un tibble con 8 colonne e 13889 osservazioni
}
\usage{
no2
}
\description{
Tibble con i dati di no2 per 19 stazioni della regione UMBRIA
}
\keyword{datasets}
|
# Install necessary packages
#cleanup before start
rm(list=ls(all=T))
packages <- list('data.table', 'MASS') #Add all your packages here. Don't change the rest of the file, or file name
#Example: packages <- list('data.table','rpart','rgdal')
InstallAll <- function(packageName){
if(!(packageName %in% installed.packages())){
install.packages(packageName,dependencies = T)
}
}
sapply(packages, InstallAll)
| /Central Limit Theorem+Regression/R/installPackages.R | no_license | iankurgarg/Foundations-of-Data-Science | R | false | false | 431 | r |
# Install necessary packages
#cleanup before start
rm(list=ls(all=T))
packages <- list('data.table', 'MASS') #Add all your packages here. Don't change the rest of the file, or file name
#Example: packages <- list('data.table','rpart','rgdal')
InstallAll <- function(packageName){
if(!(packageName %in% installed.packages())){
install.packages(packageName,dependencies = T)
}
}
sapply(packages, InstallAll)
|
\name{predict.sprinter}
\alias{predict.sprinter}
\title{Predict method for objects of class \code{sprinter}}
\description{
Evaluates the linear predictor from a Cox proportional Hazards model fitted by \code{\link{sprinter}}.
}
\usage{
\method{predict}{sprinter}(object, newdata=NULL,\ldots)
}
\arguments{
\item{object}{Cox proportional Hazards model from a \code{\link{sprinter}} call.}
\item{newdata}{\code{n.new * p} matrix with new covariate values. If just prediction for the training data is wanted, it can be omitted.}
\item{\dots}{additional arguments.}
}
\value{
The linear predictor, a vector of length \code{n.new}, is returned.
}
\author{
Isabell Hoffmann \email{isabell.hoffmann@uni-mainz.de}
}
\examples{
simulation <- simul.int(287578,n = 200, p = 500,
beta.int = 1.0,
beta.main = 0.9,
censparam = 1/20,
lambda = 1/20)
data <- simulation$data
simulation$info
set.seed(123)
\dontrun{
testcb <- sprinter( x=data[,1:500],
time = data$obs.time,
status= data$obs.status,
repetitions = 10,
mandatory = c("ID1","ID2"),
n.inter.candidates = 1000,
screen.main = fit.CoxBoost,
fit.final = fit.CoxBoost,
args.screen.main = list(seed=123,stepno = 10, K = 10,
criterion ='pscore', nu = 0.05),
parallel = FALSE, trace=TRUE)
summary(testcb)
# true coefficients:
# Clin.cov1 Clin.cov2 ID5:ID6 ID7:ID8
# 0.9 -0.9 1 -1
# Simulate New Data:
newSimulation <- simul.int(12345,n = 200, p = 500,
beta.int = 1.0,
beta.main = 0.9,
censparam = 1/20,
lambda = 1/20)
newdata <- newSimulation$data
newSimulation$info
predict(testcb, newdata = newdata[,1:500])
}
}
| /man/predict.sprinter.Rd | no_license | cran/sprinter | R | false | false | 2,108 | rd | \name{predict.sprinter}
\alias{predict.sprinter}
\title{Predict method for objects of class \code{sprinter}}
\description{
Evaluates the linear predictor from a Cox proportional Hazards model fitted by \code{\link{sprinter}}.
}
\usage{
\method{predict}{sprinter}(object, newdata=NULL,\ldots)
}
\arguments{
\item{object}{Cox proportional Hazards model from a \code{\link{sprinter}} call.}
\item{newdata}{\code{n.new * p} matrix with new covariate values. If just prediction for the training data is wanted, it can be omitted.}
\item{\dots}{additional arguments.}
}
\value{
The linear predictor, a vector of length \code{n.new}, is returned.
}
\author{
Isabell Hoffmann \email{isabell.hoffmann@uni-mainz.de}
}
\examples{
simulation <- simul.int(287578,n = 200, p = 500,
beta.int = 1.0,
beta.main = 0.9,
censparam = 1/20,
lambda = 1/20)
data <- simulation$data
simulation$info
set.seed(123)
\dontrun{
testcb <- sprinter( x=data[,1:500],
time = data$obs.time,
status= data$obs.status,
repetitions = 10,
mandatory = c("ID1","ID2"),
n.inter.candidates = 1000,
screen.main = fit.CoxBoost,
fit.final = fit.CoxBoost,
args.screen.main = list(seed=123,stepno = 10, K = 10,
criterion ='pscore', nu = 0.05),
parallel = FALSE, trace=TRUE)
summary(testcb)
# true coefficients:
# Clin.cov1 Clin.cov2 ID5:ID6 ID7:ID8
# 0.9 -0.9 1 -1
# Simulate New Data:
newSimulation <- simul.int(12345,n = 200, p = 500,
beta.int = 1.0,
beta.main = 0.9,
censparam = 1/20,
lambda = 1/20)
newdata <- newSimulation$data
newSimulation$info
predict(testcb, newdata = newdata[,1:500])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HobbyShop.R
\name{HobbyShop}
\alias{HobbyShop}
\title{HobbyShop}
\usage{
HobbyShop(id = NULL, priceRange = NULL, paymentAccepted = NULL,
openingHours = NULL, currenciesAccepted = NULL, branchOf = NULL,
telephone = NULL, specialOpeningHoursSpecification = NULL,
smokingAllowed = NULL, reviews = NULL, review = NULL,
publicAccess = NULL, photos = NULL, photo = NULL,
openingHoursSpecification = NULL, maximumAttendeeCapacity = NULL,
maps = NULL, map = NULL, logo = NULL, isicV4 = NULL,
isAccessibleForFree = NULL, hasMap = NULL, globalLocationNumber = NULL,
geo = NULL, faxNumber = NULL, events = NULL, event = NULL,
containsPlace = NULL, containedInPlace = NULL, containedIn = NULL,
branchCode = NULL, amenityFeature = NULL, aggregateRating = NULL,
address = NULL, additionalProperty = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{priceRange}{(Text type.) The price range of the business, for example ```$$$```.}
\item{paymentAccepted}{(Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.}
\item{openingHours}{(Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.}
\item{currenciesAccepted}{(Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".}
\item{branchOf}{(Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].}
\item{telephone}{(Text or Text or Text or Text type.) The telephone number.}
\item{specialOpeningHoursSpecification}{(OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].}
\item{smokingAllowed}{(Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.}
\item{reviews}{(Review or Review or Review or Review or Review type.) Review of the item.}
\item{review}{(Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.}
\item{publicAccess}{(Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value}
\item{photos}{(Photograph or ImageObject type.) Photographs of this place.}
\item{photo}{(Photograph or ImageObject type.) A photograph of this place.}
\item{openingHoursSpecification}{(OpeningHoursSpecification type.) The opening hours of a certain place.}
\item{maximumAttendeeCapacity}{(Integer or Integer type.) The total number of individuals that may attend an event or venue.}
\item{maps}{(URL type.) A URL to a map of the place.}
\item{map}{(URL type.) A URL to a map of the place.}
\item{logo}{(URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.}
\item{isicV4}{(Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.}
\item{isAccessibleForFree}{(Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.}
\item{hasMap}{(URL or Map type.) A URL to a map of the place.}
\item{globalLocationNumber}{(Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.}
\item{geo}{(GeoShape or GeoCoordinates type.) The geo coordinates of the place.}
\item{faxNumber}{(Text or Text or Text or Text type.) The fax number.}
\item{events}{(Event or Event type.) Upcoming or past events associated with this place or organization.}
\item{event}{(Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.}
\item{containsPlace}{(Place type.) The basic containment relation between a place and another that it contains.}
\item{containedInPlace}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{containedIn}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{branchCode}{(Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.}
\item{amenityFeature}{(LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.}
\item{aggregateRating}{(AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.}
\item{address}{(Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.}
\item{additionalProperty}{(PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
}
\value{
a list object corresponding to a schema:HobbyShop
}
\description{
A store that sells materials useful or necessary for various hobbies.
}
| /man/HobbyShop.Rd | no_license | cboettig/schemar | R | false | true | 9,739 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HobbyShop.R
\name{HobbyShop}
\alias{HobbyShop}
\title{HobbyShop}
\usage{
HobbyShop(id = NULL, priceRange = NULL, paymentAccepted = NULL,
openingHours = NULL, currenciesAccepted = NULL, branchOf = NULL,
telephone = NULL, specialOpeningHoursSpecification = NULL,
smokingAllowed = NULL, reviews = NULL, review = NULL,
publicAccess = NULL, photos = NULL, photo = NULL,
openingHoursSpecification = NULL, maximumAttendeeCapacity = NULL,
maps = NULL, map = NULL, logo = NULL, isicV4 = NULL,
isAccessibleForFree = NULL, hasMap = NULL, globalLocationNumber = NULL,
geo = NULL, faxNumber = NULL, events = NULL, event = NULL,
containsPlace = NULL, containedInPlace = NULL, containedIn = NULL,
branchCode = NULL, amenityFeature = NULL, aggregateRating = NULL,
address = NULL, additionalProperty = NULL, url = NULL, sameAs = NULL,
potentialAction = NULL, name = NULL, mainEntityOfPage = NULL,
image = NULL, identifier = NULL, disambiguatingDescription = NULL,
description = NULL, alternateName = NULL, additionalType = NULL)
}
\arguments{
\item{id}{identifier for the object (URI)}
\item{priceRange}{(Text type.) The price range of the business, for example ```$$$```.}
\item{paymentAccepted}{(Text type.) Cash, Credit Card, Cryptocurrency, Local Exchange Tradings System, etc.}
\item{openingHours}{(Text or Text type.) The general opening hours for a business. Opening hours can be specified as a weekly time range, starting with days, then times per day. Multiple days can be listed with commas ',' separating each day. Day or time ranges are specified using a hyphen '-'.* Days are specified using the following two-letter combinations: ```Mo```, ```Tu```, ```We```, ```Th```, ```Fr```, ```Sa```, ```Su```.* Times are specified using 24:00 time. For example, 3pm is specified as ```15:00```. * Here is an example: <code><time itemprop="openingHours" datetime="Tu,Th 16:00-20:00">Tuesdays and Thursdays 4-8pm</time></code>.* If a business is open 7 days a week, then it can be specified as <code><time itemprop="openingHours" datetime="Mo-Su">Monday through Sunday, all day</time></code>.}
\item{currenciesAccepted}{(Text type.) The currency accepted.Use standard formats: [ISO 4217 currency format](http://en.wikipedia.org/wiki/ISO_4217) e.g. "USD"; [Ticker symbol](https://en.wikipedia.org/wiki/List_of_cryptocurrencies) for cryptocurrencies e.g. "BTC"; well known names for [Local Exchange Tradings Systems](https://en.wikipedia.org/wiki/Local_exchange_trading_system) (LETS) and other currency types e.g. "Ithaca HOUR".}
\item{branchOf}{(Organization type.) The larger organization that this local business is a branch of, if any. Not to be confused with (anatomical)[[branch]].}
\item{telephone}{(Text or Text or Text or Text type.) The telephone number.}
\item{specialOpeningHoursSpecification}{(OpeningHoursSpecification type.) The special opening hours of a certain place.Use this to explicitly override general opening hours brought in scope by [[openingHoursSpecification]] or [[openingHours]].}
\item{smokingAllowed}{(Boolean type.) Indicates whether it is allowed to smoke in the place, e.g. in the restaurant, hotel or hotel room.}
\item{reviews}{(Review or Review or Review or Review or Review type.) Review of the item.}
\item{review}{(Review or Review or Review or Review or Review or Review or Review or Review type.) A review of the item.}
\item{publicAccess}{(Boolean type.) A flag to signal that the [[Place]] is open to public visitors. If this property is omitted there is no assumed default boolean value}
\item{photos}{(Photograph or ImageObject type.) Photographs of this place.}
\item{photo}{(Photograph or ImageObject type.) A photograph of this place.}
\item{openingHoursSpecification}{(OpeningHoursSpecification type.) The opening hours of a certain place.}
\item{maximumAttendeeCapacity}{(Integer or Integer type.) The total number of individuals that may attend an event or venue.}
\item{maps}{(URL type.) A URL to a map of the place.}
\item{map}{(URL type.) A URL to a map of the place.}
\item{logo}{(URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject or URL or ImageObject type.) An associated logo.}
\item{isicV4}{(Text or Text or Text type.) The International Standard of Industrial Classification of All Economic Activities (ISIC), Revision 4 code for a particular organization, business person, or place.}
\item{isAccessibleForFree}{(Boolean or Boolean or Boolean or Boolean type.) A flag to signal that the item, event, or place is accessible for free.}
\item{hasMap}{(URL or Map type.) A URL to a map of the place.}
\item{globalLocationNumber}{(Text or Text or Text type.) The [Global Location Number](http://www.gs1.org/gln) (GLN, sometimes also referred to as International Location Number or ILN) of the respective organization, person, or place. The GLN is a 13-digit number used to identify parties and physical locations.}
\item{geo}{(GeoShape or GeoCoordinates type.) The geo coordinates of the place.}
\item{faxNumber}{(Text or Text or Text or Text type.) The fax number.}
\item{events}{(Event or Event type.) Upcoming or past events associated with this place or organization.}
\item{event}{(Event or Event or Event or Event or Event or Event or Event type.) Upcoming or past event associated with this place, organization, or action.}
\item{containsPlace}{(Place type.) The basic containment relation between a place and another that it contains.}
\item{containedInPlace}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{containedIn}{(Place type.) The basic containment relation between a place and one that contains it.}
\item{branchCode}{(Text type.) A short textual code (also called "store code") that uniquely identifies a place of business. The code is typically assigned by the parentOrganization and used in structured URLs.For example, in the URL http://www.starbucks.co.uk/store-locator/etc/detail/3047 the code "3047" is a branchCode for a particular branch.}
\item{amenityFeature}{(LocationFeatureSpecification or LocationFeatureSpecification or LocationFeatureSpecification type.) An amenity feature (e.g. a characteristic or service) of the Accommodation. This generic property does not make a statement about whether the feature is included in an offer for the main accommodation or available at extra costs.}
\item{aggregateRating}{(AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating or AggregateRating type.) The overall rating, based on a collection of reviews or ratings, of the item.}
\item{address}{(Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress or Text or PostalAddress type.) Physical address of the item.}
\item{additionalProperty}{(PropertyValue or PropertyValue or PropertyValue or PropertyValue type.) A property-value pair representing an additional characteristics of the entitity, e.g. a product feature or another characteristic for which there is no matching property in schema.org.Note: Publishers should be aware that applications designed to use specific schema.org properties (e.g. http://schema.org/width, http://schema.org/color, http://schema.org/gtin13, ...) will typically expect such data to be provided using those properties, rather than using the generic property/value mechanism.}
\item{url}{(URL type.) URL of the item.}
\item{sameAs}{(URL type.) URL of a reference Web page that unambiguously indicates the item's identity. E.g. the URL of the item's Wikipedia page, Wikidata entry, or official website.}
\item{potentialAction}{(Action type.) Indicates a potential Action, which describes an idealized action in which this thing would play an 'object' role.}
\item{name}{(Text type.) The name of the item.}
\item{mainEntityOfPage}{(URL or CreativeWork type.) Indicates a page (or other CreativeWork) for which this thing is the main entity being described. See [background notes](/docs/datamodel.html#mainEntityBackground) for details.}
\item{image}{(URL or ImageObject type.) An image of the item. This can be a [[URL]] or a fully described [[ImageObject]].}
\item{identifier}{(URL or Text or PropertyValue type.) The identifier property represents any kind of identifier for any kind of [[Thing]], such as ISBNs, GTIN codes, UUIDs etc. Schema.org provides dedicated properties for representing many of these, either as textual strings or as URL (URI) links. See [background notes](/docs/datamodel.html#identifierBg) for more details.}
\item{disambiguatingDescription}{(Text type.) A sub property of description. A short description of the item used to disambiguate from other, similar items. Information from other properties (in particular, name) may be necessary for the description to be useful for disambiguation.}
\item{description}{(Text type.) A description of the item.}
\item{alternateName}{(Text type.) An alias for the item.}
\item{additionalType}{(URL type.) An additional type for the item, typically used for adding more specific types from external vocabularies in microdata syntax. This is a relationship between something and a class that the thing is in. In RDFa syntax, it is better to use the native RDFa syntax - the 'typeof' attribute - for multiple types. Schema.org tools may have only weaker understanding of extra types, in particular those defined externally.}
}
\value{
a list object corresponding to a schema:HobbyShop
}
\description{
A store that sells materials useful or necessary for various hobbies.
}
|
##########
#UI
##########
output$selectorTaxlevelOrd <- renderUI({
radioButtons("taxlevel_ord", "Choose taxonomic level:", as.list(colnames(taxonomy())))
})
output$selectorTaxgroupsOrd <- renderUI({
checkboxGroupInput("taxgroups_ord", "Choose taxonomic group:", taxgroups_ord()$taxvec)
})
output$selectorColor <- renderUI({
radioButtons("color_aes", "Choose env. variable to color samples:", colnames(envdata()))
})
output$selectorShape <- renderUI({
radioButtons("shape_aes", "Choose env. variable to shape samples:", colnames(envdata()))
})
output$sjekk <- renderTable({
ordination()$ord_table
})
output$nmdsplot1 <- renderPlot({
ordination()$nmdsplot
})
#
# ##########
# #Server
# #########
taxgroups_ord <- reactive({
taxlev <- input$taxlevel_ord
taxvec <- taxonomy() %>% pull(taxlev) %>% levels()
list(taxvec = taxvec)
})
#
# ord_aes <- reactive({
# color_aes <- envdata() %>% pull(input$color_aes)
# shape_aes <- envdata() %>% pull(input$shape_aes)
# list(color_aes = color_aes, shape_aes = shape_aes)
# })
ordination <- eventReactive(input$actionb_ord1, {
# # Prepare OTU table for first taxonomic group #
otutab_tax_filter_list <- list()
otuid_list <- list()
sjekk1 <- input$taxgroups_ord
sjekk2 <- input$taxlevel_ord
sjekk <- c(sjekk1, sjekk2)
for (i in 1:length(input$taxgroups_ord)) {
otutab_tax_filter_list[[i]] <- otutab_prop() %>% filter(.data[[input$taxlevel_ord]] %in% .env$input$taxgroups_ord[i]) %>% select_if(is.numeric)
otuid_list[[i]] <- otutab_prop() %>% filter(.data[[input$taxlevel_ord]] %in% .env$input$taxgroups_ord[i]) %>% pull(otuid) #Not hard code 'otuid'
rownames(otutab_tax_filter_list[[i]]) <- otuid_list[[i]]
}
### Run ordination ####
ordination_taxgroups_list <- list()
for (i in 1:length(input$taxgroups_ord)) {
tmp <- mp_nmds(otutab_tax_filter_list[[i]])
ordination_taxgroups_list[[i]] <- cbind.data.frame(tmp$points[,1], tmp$points[,2])
names(ordination_taxgroups_list[[i]])[c(1,2)] <- c("nmds_axis1", "nmds_axis2")
}
###Plotting NMDS ####
color_aes <- envdata() %>% pull(input$color_aes)
shape_aes <- envdata() %>% pull(input$shape_aes)
plot_nmds_list <- list()
for (i in 1:length(input$taxgroups_ord)) {
plot_nmds_list[[i]] <- nmdsplot(ordination_taxgroups_list[[i]], color_aes, shape_aes)+
xlim(min(ordination_taxgroups_list[[i]]$nmds_axis1)-.1,max(ordination_taxgroups_list[[i]]$nmds_axis1)+.1)+
ylim(min(ordination_taxgroups_list[[i]]$nmds_axis2)-.1,max(ordination_taxgroups_list[[i]]$nmds_axis2)+.1)#+
}
nmdsplots <- ggarrange(plotlist = plot_nmds_list, common.legend = TRUE, labels = input$taxgroups_ord)
#### Select environmental variable for color aes ####
#list of taxlevel_ord ?
# otutab_prop_tax_filter_list[2] <- prop_tax()$otutab_prop_tax %>% filter(.data[[input$taxlevel2_ord]] %in% .env$input$taxgroup_ord) #list of taxlevel_ord ?
# otutab_ord_list <- list()
# for (i in c(1,2)) {
# otutab_ord_list[i] <- otutab_prop_tax_filter_list[i] %>% select_if(is.numeric)
#
# if (input$propselect_ord == TRUE) {
# otutab_ord_prop <- otutab_ord %>% sweep(., 2 , colSums(.), FUN = "/")
# } else {
# otutab_ord_prop <- otutab_ord_prop
# }
# rownames(otutab_ord_prop) <- otutab_prop_tax_filter$otuid
#
#
# #geom_path(aes(x = V1, y = V2), data = ordih1)
#
# #coord_cartesian(xlim = ranges2$x, ylim = ranges2$y, expand = FALSE)
# nmdsplotly <- ggplotly(plot_nmds, tooltip = "text")
# }
#
# list(nmdsplotly = nmdsplotly)
# return(sjekk)
list(ord_table = otutab_tax_filter_list[[1]], nmdsplot = nmdsplots)
}) | /panels/panel_server_ordinationCompareTaxgroups.R | no_license | EEgge/ShinyProtists | R | false | false | 3,659 | r | ##########
#UI
##########
output$selectorTaxlevelOrd <- renderUI({
radioButtons("taxlevel_ord", "Choose taxonomic level:", as.list(colnames(taxonomy())))
})
output$selectorTaxgroupsOrd <- renderUI({
checkboxGroupInput("taxgroups_ord", "Choose taxonomic group:", taxgroups_ord()$taxvec)
})
output$selectorColor <- renderUI({
radioButtons("color_aes", "Choose env. variable to color samples:", colnames(envdata()))
})
output$selectorShape <- renderUI({
radioButtons("shape_aes", "Choose env. variable to shape samples:", colnames(envdata()))
})
output$sjekk <- renderTable({
ordination()$ord_table
})
output$nmdsplot1 <- renderPlot({
ordination()$nmdsplot
})
#
# ##########
# #Server
# #########
taxgroups_ord <- reactive({
taxlev <- input$taxlevel_ord
taxvec <- taxonomy() %>% pull(taxlev) %>% levels()
list(taxvec = taxvec)
})
#
# ord_aes <- reactive({
# color_aes <- envdata() %>% pull(input$color_aes)
# shape_aes <- envdata() %>% pull(input$shape_aes)
# list(color_aes = color_aes, shape_aes = shape_aes)
# })
ordination <- eventReactive(input$actionb_ord1, {
# # Prepare OTU table for first taxonomic group #
otutab_tax_filter_list <- list()
otuid_list <- list()
sjekk1 <- input$taxgroups_ord
sjekk2 <- input$taxlevel_ord
sjekk <- c(sjekk1, sjekk2)
for (i in 1:length(input$taxgroups_ord)) {
otutab_tax_filter_list[[i]] <- otutab_prop() %>% filter(.data[[input$taxlevel_ord]] %in% .env$input$taxgroups_ord[i]) %>% select_if(is.numeric)
otuid_list[[i]] <- otutab_prop() %>% filter(.data[[input$taxlevel_ord]] %in% .env$input$taxgroups_ord[i]) %>% pull(otuid) #Not hard code 'otuid'
rownames(otutab_tax_filter_list[[i]]) <- otuid_list[[i]]
}
### Run ordination ####
ordination_taxgroups_list <- list()
for (i in 1:length(input$taxgroups_ord)) {
tmp <- mp_nmds(otutab_tax_filter_list[[i]])
ordination_taxgroups_list[[i]] <- cbind.data.frame(tmp$points[,1], tmp$points[,2])
names(ordination_taxgroups_list[[i]])[c(1,2)] <- c("nmds_axis1", "nmds_axis2")
}
###Plotting NMDS ####
color_aes <- envdata() %>% pull(input$color_aes)
shape_aes <- envdata() %>% pull(input$shape_aes)
plot_nmds_list <- list()
for (i in 1:length(input$taxgroups_ord)) {
plot_nmds_list[[i]] <- nmdsplot(ordination_taxgroups_list[[i]], color_aes, shape_aes)+
xlim(min(ordination_taxgroups_list[[i]]$nmds_axis1)-.1,max(ordination_taxgroups_list[[i]]$nmds_axis1)+.1)+
ylim(min(ordination_taxgroups_list[[i]]$nmds_axis2)-.1,max(ordination_taxgroups_list[[i]]$nmds_axis2)+.1)#+
}
nmdsplots <- ggarrange(plotlist = plot_nmds_list, common.legend = TRUE, labels = input$taxgroups_ord)
#### Select environmental variable for color aes ####
#list of taxlevel_ord ?
# otutab_prop_tax_filter_list[2] <- prop_tax()$otutab_prop_tax %>% filter(.data[[input$taxlevel2_ord]] %in% .env$input$taxgroup_ord) #list of taxlevel_ord ?
# otutab_ord_list <- list()
# for (i in c(1,2)) {
# otutab_ord_list[i] <- otutab_prop_tax_filter_list[i] %>% select_if(is.numeric)
#
# if (input$propselect_ord == TRUE) {
# otutab_ord_prop <- otutab_ord %>% sweep(., 2 , colSums(.), FUN = "/")
# } else {
# otutab_ord_prop <- otutab_ord_prop
# }
# rownames(otutab_ord_prop) <- otutab_prop_tax_filter$otuid
#
#
# #geom_path(aes(x = V1, y = V2), data = ordih1)
#
# #coord_cartesian(xlim = ranges2$x, ylim = ranges2$y, expand = FALSE)
# nmdsplotly <- ggplotly(plot_nmds, tooltip = "text")
# }
#
# list(nmdsplotly = nmdsplotly)
# return(sjekk)
list(ord_table = otutab_tax_filter_list[[1]], nmdsplot = nmdsplots)
}) |
test_that("format.timespan properly indicates entirely abscent timespans", {
expect_silent(tmspn <- as.timespan(""))
expect_silent(fmt <- crayon::strip_style(format(tmspn)))
expect_equal(fmt, "NA")
})
| /tests/testthat/test_timespan_format.R | permissive | dgkf/parttime | R | false | false | 207 | r | test_that("format.timespan properly indicates entirely abscent timespans", {
expect_silent(tmspn <- as.timespan(""))
expect_silent(fmt <- crayon::strip_style(format(tmspn)))
expect_equal(fmt, "NA")
})
|
library("MCMCpack")
design <- read.csv("D:/univ/2014-2015/thesis/KERMIT/results february/design_results_febr_counts.csv")
counts <- design$LMG7866
evenness <- design$evenness
cellcount <- log10(design$cellcount)
treatment
posterior <- MCMCpoisson(counts ~ evenness + cellcount, b0 = -30, thin = 5)
plot(posterior)
m1 <- glm.nb(counts ~ evenness + cellcount,-10)
summary(m1)
summary(posterior)
out<-posterior
xyplot(posterior)
xyplot(out) # traceplots
gelman.plot(out) # should be below 1.05 or 1.1
geweke.plot(out) # should all be between -2 and 2
acfplot(out) # should wander around 0
# Results
plot(out)
summary(out)
HPDinterval(out)
### -----------------------------------------
## Using JAGGS
### -----------------------------------------
#-- Example zero-inflated negative binomial regression
zinb <- read.csv("http://www.ats.ucla.edu/stat/data/fish.csv")
zinb
zinb <- within(zinb, {
nofish <- factor(nofish)
livebait <- factor(livebait)
camper <- factor(camper)
})
head(zinb)
ggplot(zinb, aes(count, fill = camper)) +
geom_histogram() +
scale_x_log10() +
facet_grid(camper ~ ., margins=TRUE, scales="free_y")
library(pscl)
m1 <- zeroinfl(count ~ child + camper | persons,
data = zinb, dist = "negbin", EM = TRUE)
summary(m1)
summary(model <- zeroinfl(counts ~ evenness + cellcount), dist = "negbin", EM = TRUE)
m0 <- update(m1, . ~ 1)
pchisq(2 * (logLik(m1) - logLik(m0)), df = 3, lower.tail=FALSE)
summary(m2 <- glm.nb(count ~ child + camper, data = zinb))
newdata1 <- expand.grid(0:3, factor(0:1), 1:4)
colnames(newdata1) <- c("child", "camper", "persons")
newdata1$phat <- predict(m1, newdata1)
ggplot(newdata1, aes(x = child, y = phat, colour = factor(persons))) +
geom_point() +
geom_line() +
facet_wrap(~camper) +
labs(x = "Number of Children", y = "Predicted Fish Caught")
####-----------------------------------------
# Example nr. 2-
#########------------------------------------
## data
data("bioChemists", package = "pscl")
## without inflation
## ("art ~ ." is "art ~ fem + mar + kid5 + phd + ment")
fm_pois <- glm(art ~ ., data = bioChemists, family = poisson)
fm_qpois <- glm(art ~ ., data = bioChemists, family = quasipoisson)
fm_nb <- glm.nb(art ~ ., data = bioChemists)
## with simple inflation (no regressors for zero component)
fm_zip <- zeroinfl(art ~ . | 1, data = bioChemists)
fm_zinb <- zeroinfl(art ~ . | 1, data = bioChemists, dist = "negbin")
## inflation with regressors
## ("art ~ . | ." is "art ~ fem + mar + kid5 + phd + ment | fem + mar + kid5 + phd + ment")
fm_zip2 <- zeroinfl(art ~ . | ., data = bioChemists)
fm_zinb2 <- zeroinfl(art ~ . | ., data = bioChemists, dist = "negbin")
head(bioChemists)
bioChemists$
###########################
## Self experiments
#############################
# Source: http://www.inside-r.org/packages/cran/pscl/docs/zeroinfl
# Changed example
setwd("D:/univ/2014-2015/thesis/KERMIT/results february")
bacteria <- read.csv("design_results_febr_counts.csv")
evenness <- bacteria$evenness
cellcount <- bacteria$cellcount
pathogen <- bacteria$LMG7866
bacteria
df <- data.frame(evenness=bacteria$evenness, cellcount=log10(bacteria$cellcount), LMG7866=pathogen)
m1 <- zeroinfl(LMG7866~.|cellcount, data=df, dist = "negbin")
m1 <- hurdle(LMG7866~.|., data=df, dist = "negbin")
summary(m1)
sum(bacteria$LMG7866 == 0)/length(bacteria$LMG7866)
plot(cellcount, pathogen)
x <- order(evenness)
points(cellcount[x],predict(m1)[x], col = "red")
predict1 <- predict(m1, se=T)
predict1$fit
m1 <- zeroinfl(counts ~ evenness + cellcount,
dist = "negbin", EM = TRUE)
summary(m1)
mean(counts)
n <- length(pathogen)
modelstring <- "
model
{
# priors
beta0 ~ dnorm(0,0.001)
beta1 ~ dnorm(0,0.001)
beta2 ~ dnorm(0,0.001)
# likelihood
for(i in 1:n)
{
n50[i] ~ dpois(lambda[i])
log(lambda[i]) <- beta0 + beta1*elev50[i] + beta2*pow(elev50[i],2)
# this part is here in order to make nice prediction curves:
prediction[i] ~ dpois(lambda[i])
}
}"
| /Labwork/Statistical Analysis/results february/statistics/poissonMCMC.R | no_license | watsang/masterthesis-2014-2015 | R | false | false | 4,006 | r | library("MCMCpack")
design <- read.csv("D:/univ/2014-2015/thesis/KERMIT/results february/design_results_febr_counts.csv")
counts <- design$LMG7866
evenness <- design$evenness
cellcount <- log10(design$cellcount)
treatment
posterior <- MCMCpoisson(counts ~ evenness + cellcount, b0 = -30, thin = 5)
plot(posterior)
m1 <- glm.nb(counts ~ evenness + cellcount,-10)
summary(m1)
summary(posterior)
out<-posterior
xyplot(posterior)
xyplot(out) # traceplots
gelman.plot(out) # should be below 1.05 or 1.1
geweke.plot(out) # should all be between -2 and 2
acfplot(out) # should wander around 0
# Results
plot(out)
summary(out)
HPDinterval(out)
### -----------------------------------------
## Using JAGGS
### -----------------------------------------
#-- Example zero-inflated negative binomial regression
zinb <- read.csv("http://www.ats.ucla.edu/stat/data/fish.csv")
zinb
zinb <- within(zinb, {
nofish <- factor(nofish)
livebait <- factor(livebait)
camper <- factor(camper)
})
head(zinb)
ggplot(zinb, aes(count, fill = camper)) +
geom_histogram() +
scale_x_log10() +
facet_grid(camper ~ ., margins=TRUE, scales="free_y")
library(pscl)
m1 <- zeroinfl(count ~ child + camper | persons,
data = zinb, dist = "negbin", EM = TRUE)
summary(m1)
summary(model <- zeroinfl(counts ~ evenness + cellcount), dist = "negbin", EM = TRUE)
m0 <- update(m1, . ~ 1)
pchisq(2 * (logLik(m1) - logLik(m0)), df = 3, lower.tail=FALSE)
summary(m2 <- glm.nb(count ~ child + camper, data = zinb))
newdata1 <- expand.grid(0:3, factor(0:1), 1:4)
colnames(newdata1) <- c("child", "camper", "persons")
newdata1$phat <- predict(m1, newdata1)
ggplot(newdata1, aes(x = child, y = phat, colour = factor(persons))) +
geom_point() +
geom_line() +
facet_wrap(~camper) +
labs(x = "Number of Children", y = "Predicted Fish Caught")
####-----------------------------------------
# Example nr. 2-
#########------------------------------------
## data
data("bioChemists", package = "pscl")
## without inflation
## ("art ~ ." is "art ~ fem + mar + kid5 + phd + ment")
fm_pois <- glm(art ~ ., data = bioChemists, family = poisson)
fm_qpois <- glm(art ~ ., data = bioChemists, family = quasipoisson)
fm_nb <- glm.nb(art ~ ., data = bioChemists)
## with simple inflation (no regressors for zero component)
fm_zip <- zeroinfl(art ~ . | 1, data = bioChemists)
fm_zinb <- zeroinfl(art ~ . | 1, data = bioChemists, dist = "negbin")
## inflation with regressors
## ("art ~ . | ." is "art ~ fem + mar + kid5 + phd + ment | fem + mar + kid5 + phd + ment")
fm_zip2 <- zeroinfl(art ~ . | ., data = bioChemists)
fm_zinb2 <- zeroinfl(art ~ . | ., data = bioChemists, dist = "negbin")
head(bioChemists)
bioChemists$
###########################
## Self experiments
#############################
# Source: http://www.inside-r.org/packages/cran/pscl/docs/zeroinfl
# Changed example
setwd("D:/univ/2014-2015/thesis/KERMIT/results february")
bacteria <- read.csv("design_results_febr_counts.csv")
evenness <- bacteria$evenness
cellcount <- bacteria$cellcount
pathogen <- bacteria$LMG7866
bacteria
df <- data.frame(evenness=bacteria$evenness, cellcount=log10(bacteria$cellcount), LMG7866=pathogen)
m1 <- zeroinfl(LMG7866~.|cellcount, data=df, dist = "negbin")
m1 <- hurdle(LMG7866~.|., data=df, dist = "negbin")
summary(m1)
sum(bacteria$LMG7866 == 0)/length(bacteria$LMG7866)
plot(cellcount, pathogen)
x <- order(evenness)
points(cellcount[x],predict(m1)[x], col = "red")
predict1 <- predict(m1, se=T)
predict1$fit
m1 <- zeroinfl(counts ~ evenness + cellcount,
dist = "negbin", EM = TRUE)
summary(m1)
mean(counts)
n <- length(pathogen)
modelstring <- "
model
{
# priors
beta0 ~ dnorm(0,0.001)
beta1 ~ dnorm(0,0.001)
beta2 ~ dnorm(0,0.001)
# likelihood
for(i in 1:n)
{
n50[i] ~ dpois(lambda[i])
log(lambda[i]) <- beta0 + beta1*elev50[i] + beta2*pow(elev50[i],2)
# this part is here in order to make nice prediction curves:
prediction[i] ~ dpois(lambda[i])
}
}"
|
#' Length of Left Middle Finger and Height for an Unequal-Probability Sample of Size 200
#'
#' Length of left middle finger and height for an unequal-probability sample
#' of criminals of size 200 from the anthrop dataset. The probability of selection,
#' psi[i], was proportional to 24 for y < 65, 12 for y = 65, 2 for y = 66 or 67,
#' and 1 for y > 67.
#' @name anthuneq
#' @docType data
#' @format Data frame with the following 3 variables:
#' \describe{
#' \item{finger}{length of left middle finger (cm)}
#' \item{height}{height (inches)}
#' \item{prob}{probability of selection}
#' }
#' @source Macdonell, W. R. (1901). On criminal anthropometry and the
#' identification of criminals, \emph{Biometrika}, 1: 177--227.
#' @references Lohr (1999). Sampling: Design and Analysis, Duxbury, p. TODO and
#' 438.
#' @export
NULL
| /SDaA/R/anthuneq.R | no_license | ingted/R-Examples | R | false | false | 844 | r | #' Length of Left Middle Finger and Height for an Unequal-Probability Sample of Size 200
#'
#' Length of left middle finger and height for an unequal-probability sample
#' of criminals of size 200 from the anthrop dataset. The probability of selection,
#' psi[i], was proportional to 24 for y < 65, 12 for y = 65, 2 for y = 66 or 67,
#' and 1 for y > 67.
#' @name anthuneq
#' @docType data
#' @format Data frame with the following 3 variables:
#' \describe{
#' \item{finger}{length of left middle finger (cm)}
#' \item{height}{height (inches)}
#' \item{prob}{probability of selection}
#' }
#' @source Macdonell, W. R. (1901). On criminal anthropometry and the
#' identification of criminals, \emph{Biometrika}, 1: 177--227.
#' @references Lohr (1999). Sampling: Design and Analysis, Duxbury, p. TODO and
#' 438.
#' @export
NULL
|
## R Programming Assignment 2
##
## The makeCacheMatrix function creates a special "matrix" object that
## can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## makeVector <- function(x = numeric())
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setMatrix <- function(solve) m <<- solve
getMatrix <- function() m
list(set = set, get = get,
setMatrix = setMatrix,
getMatrix = getMatrix)
}
## The cacheSolve function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x = matrix(), ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getMatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setMatrix(m)
m
}
| /cachematrix.R | no_license | tcpsoft/ProgrammingAssignment2 | R | false | false | 1,035 | r | ## R Programming Assignment 2
##
## The makeCacheMatrix function creates a special "matrix" object that
## can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## makeVector <- function(x = numeric())
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setMatrix <- function(solve) m <<- solve
getMatrix <- function() m
list(set = set, get = get,
setMatrix = setMatrix,
getMatrix = getMatrix)
}
## The cacheSolve function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x = matrix(), ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getMatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setMatrix(m)
m
}
|
#Top Ten Words in an Episode of Star Trek: The Next Generation, Curated Stopwords Removed
#Call libraries used in the script
#Set the working directory
setwd("~/Text-Analysis/")
options(mc.cores = 1)
#Call libraries used in the script
library(tm)
#Creat a corpus -- this creates a corpus of the entire STNG series
#corpus <- VCorpus(DirSource("data/StarTrekNextGenClean/series"))
#Read in the text of a single episode
text_raw<-scan("data/StarTrekNextGenClean/series/277.txt", what="character", sep="\n")
#Create a corpus from single episode
corpus <- VCorpus(VectorSource(text_raw))
#Clean the corpus
corpus <- tm_map(corpus, content_transformer(tolower))
#To change the stopword list, use other dictionaries available with the tm package
#Add early modern stopwords by u adding "myStopWords
myStopWords <- scan("data/earlyModernStopword.txt", what="character", sep="\n")
corpus <- tm_map(corpus, removeWords, c(stopwords("english"), myStopWords))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
#corpus <- tm_map(corpus, PlainTextDocument)
dtm <- DocumentTermMatrix(corpus)
freq <- sort(colSums(as.matrix(dtm)), decreasing = TRUE)
#Remix ideas: 1) Play around with the number of word on the chart by changing it from "10"
# 2) Don't forget the adjust your plot's title if you have changed the input text
# 3) Look the frequency of all the words in the corpus by typing "freq" into the console
par(mar=c(15,4,4,0))
barplot(head(freq, 10), ylim=c(0,100), col=c("red3", "orange3","yellow3","green3","blue3","darkorchid3","darkred", "darkorange", "gold", "darkgreen"), col.main="Gold", col.lab="red", col.axis="gray28", las=2,
main="Star Trek: The Next Generation, Final Episode", xlab="Top Ten Words", ylab="Number of Occurences", xaxt="s")
| /RScripts/topTenPlainText.R | no_license | fiore969/Text-Analysis | R | false | false | 1,796 | r | #Top Ten Words in an Episode of Star Trek: The Next Generation, Curated Stopwords Removed
#Call libraries used in the script
#Set the working directory
setwd("~/Text-Analysis/")
options(mc.cores = 1)
#Call libraries used in the script
library(tm)
#Creat a corpus -- this creates a corpus of the entire STNG series
#corpus <- VCorpus(DirSource("data/StarTrekNextGenClean/series"))
#Read in the text of a single episode
text_raw<-scan("data/StarTrekNextGenClean/series/277.txt", what="character", sep="\n")
#Create a corpus from single episode
corpus <- VCorpus(VectorSource(text_raw))
#Clean the corpus
corpus <- tm_map(corpus, content_transformer(tolower))
#To change the stopword list, use other dictionaries available with the tm package
#Add early modern stopwords by u adding "myStopWords
myStopWords <- scan("data/earlyModernStopword.txt", what="character", sep="\n")
corpus <- tm_map(corpus, removeWords, c(stopwords("english"), myStopWords))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
#corpus <- tm_map(corpus, PlainTextDocument)
dtm <- DocumentTermMatrix(corpus)
freq <- sort(colSums(as.matrix(dtm)), decreasing = TRUE)
#Remix ideas: 1) Play around with the number of word on the chart by changing it from "10"
# 2) Don't forget the adjust your plot's title if you have changed the input text
# 3) Look the frequency of all the words in the corpus by typing "freq" into the console
par(mar=c(15,4,4,0))
barplot(head(freq, 10), ylim=c(0,100), col=c("red3", "orange3","yellow3","green3","blue3","darkorchid3","darkred", "darkorange", "gold", "darkgreen"), col.main="Gold", col.lab="red", col.axis="gray28", las=2,
main="Star Trek: The Next Generation, Final Episode", xlab="Top Ten Words", ylab="Number of Occurences", xaxt="s")
|
# Copyright 2019 Robert Carnell
# Internal function to take means across time
#
# dat = a data.frame with columns pre_post, trt, and val at least
# pre_post_name = The value of pre_post to use to subset the data
# trt_name = The value of trt to use to subset the data
#
# return the mean vector
.mean_across_time <- function(dat, pre_post_name, trt_name)
{
mean(with(dat, val[pre_post == pre_post_name & trt == trt_name]), na.rm = TRUE)
}
# Internal function used in the group means bootstrap
#
# boot_dat = a data.frame of measurement unit id's and strata to boostrap
# i = the row numbers of the bootstrap sample
# full_dat = the full data.frame containing the raw data
# method = diff or ratio
# val_name = name of the val column
# pre_name = the name of the pre level
# post_name = the name of the post level
# test_name = the name of the test treatment
# ctrl_name = the name of the ctrl treatment
#
# @return the test statistic
.boot_func_group_mean <- function(boot_dat, i, full_dat, method, val_name,
pre_name, post_name, test_name,
ctrl_name)
{
int_dat <- NULL
for (j in i)
{
# this is highly memory intensive - would be better to do with sequential means
# The time steps makes it hard
# would normally use subset here but for R CMD check
ind <- with(full_dat, id == boot_dat$id[j] & trt == boot_dat$strata[j])
int_dat <- rbind(int_dat, full_dat[ind,])
}
#temp <- plyr::ddply(int_dat, .(pre_post, time, trt), summarize, val = mean(val, na.rm = TRUE))
temp <- plyr::ddply(int_dat, c("pre_post", "time", "trt"), function(x) {
data.frame(pre_post = x$pre_post[1],
time = x$time[1],
trt = x$trt[1],
val = mean(x[[val_name]], na.rm = TRUE))
})
ctrl_avg_pre <- .mean_across_time(temp, pre_name, ctrl_name)
test_avg_pre <- .mean_across_time(temp, pre_name, test_name)
ctrl_avg_post <- .mean_across_time(temp, post_name, ctrl_name)
test_avg_post <- .mean_across_time(temp, post_name, test_name)
if (method == "diff")
{
test_stat <- (test_avg_post - ctrl_avg_post) - (test_avg_pre - ctrl_avg_pre)
} else
{
test_stat <- test_avg_post - test_avg_pre / ctrl_avg_pre * ctrl_avg_post
}
return(test_stat)
}
# Bootstrap function for the 1-1 type
#
# boot_dat data.frame of ids of measurement units (with an associated matched pair)
# i the rows of the bootstrap sample
# full_dat the full data.frame where the matched pairs are merged
# method ratio or diff
# pre_name = the name of the pre level
# post_name = the name of the post level
#
# @return the boostrap test statistic
.boot_func_1_1 <- function(boot_dat, i, full_dat, method,
pre_name = pre_name, post_name = post_name)
{
int_dat <- NULL
for (j in i)
{
# this is highly memory intensive - would be better to do with sequential means
# The time steps makes it hard
# These are the test group ids in full_dat
#int_dat <- rbind(int_dat, subset(full_dat, id == boot_dat$id[j]))
int_dat <- rbind(int_dat, full_dat[full_dat$id == boot_dat$id[j],])
}
#boottemp <- plyr::ddply(int_dat, .(time, pre_post), val = mean(val, na.rm = TRUE))
boottemp <- plyr::ddply(int_dat, c("time", "pre_post"), function(x){
data.frame(time = x$time[1],
pre_post = x$pre_post[1],
val = mean(x$val, na.rm = TRUE))
})
pre <- mean(boottemp$val[boottemp$pre_post == pre_name], na.rm = TRUE)
post <- mean(boottemp$val[boottemp$pre_post == post_name], na.rm = TRUE)
test_stat <- post - pre
return(test_stat)
}
#' Test Level Shift in Test-Control Experiment
#'
#' @param dat a dataframe with columns including pre_post, time, id, trt, val
#' @param type \code{group} for testing two groups of size m and n, \code{1-1} for testing matched pairs, \code{1-m} for testing matches between one test and m ctrl
#' @param method \code{diff} for normalizing by the difference in test=control and \code{ratio} for normalizing by the ratio of test / control
#' @param val_name the name the value column in the data.frame dat
#' @param test_name the name of the test group in the trt column
#' @param ctrl_name the name of the control group in the trt column
#' @param pre_name the name of the pre time period in the pre_post column
#' @param post_name the name of the post time period in the pre_post column
#' @param R the number of boostrap replicates
#'
#' @return impactResult object
#' @export
#'
#' @importFrom assertthat assert_that
#' @importFrom boot boot
#' @importFrom plyr ddply
#' @importFrom stats quantile
#'
#' @examples
#' dat <- data.frame(trt = rep(c("test","ctrl"), each = 6),
#' pre_post = rep(c("pre","post"), each = 3, times = 2),
#' id = as.character(c(1,2,3,1,2,3,4,5,6,4,5,6)),
#' time = as.character(c(1,1,1,2,2,2,1,1,1,2,2,2)),
#' val = c(10,11,12,13,14,15,10,11,10,12,10,10.5))
#' test_result <- test_level_shift(dat, type = "group", method = "diff", R = 100)
test_level_shift <- function(dat, type = "group", method = "diff",
val_name = "val",
test_name = "test", ctrl_name = "ctrl",
pre_name = "pre", post_name = "post",
R = 1000)
{
# type = "group"
# method = "diff"
# test_name = "test"
# ctrl_name = "ctrl"
# pre_name = "pre"
# post_name = "post"
# R = 100
validate_impact_data(dat)
assertthat::assert_that(type %in% c("group", "1-1", "1-m"),
msg = "The type variable must be one of group, 1-1, or 1-m")
assertthat::assert_that(method %in% c("diff", "ratio"),
msg = "The method variable must be either diff or ratio")
if (type == "group")
{
# mean across units, within time
# would normally use "summarize" here, but using an alternate to stop R CMD check notes
#temp <- plyr::ddply(dat, .(pre_post, time, trt), summarize, val = mean(val, na.rm = TRUE))
temp <- plyr::ddply(dat, c("pre_post", "time", "trt"), function(x) {
data.frame(pre_post = x$pre_post[1],
time = x$time[1],
trt = x$trt[1],
val = mean(x[[val_name]], na.rm = TRUE))
})
ctrl_avg_pre <- .mean_across_time(temp, pre_name, ctrl_name)
test_avg_pre <- .mean_across_time(temp, pre_name, test_name)
ctrl_avg_post <- .mean_across_time(temp, post_name, ctrl_name)
test_avg_post <- .mean_across_time(temp, post_name, test_name)
if (method == "diff")
{
test_stat <- (test_avg_post - ctrl_avg_post) - (test_avg_pre - ctrl_avg_pre)
} else
{
test_stat <- test_avg_post - test_avg_pre / ctrl_avg_pre * ctrl_avg_post
}
# bootstrap measurement units
# If there is more than one unique treatment per id, then this will fail
#boot_dat <- plyr::ddply(dat, c("id"), summarize, strata = unique(trt))
boot_dat <- plyr::ddply(dat, c("id"), function(x){
data.frame(id = x$id[1],
strata = x$trt[1])
})
b1 <- boot::boot(boot_dat, .boot_func_group_mean, R = R,
stype = "i", strata = boot_dat$strata,
full_dat = dat, method = method, val_name = val_name,
pre_name = pre_name, post_name = post_name,
test_name = test_name, ctrl_name = ctrl_name)
} else if (type == "1-1")
{
assertthat::assert_that("matchid" %in% names(dat),
msg = "the input data.frame must contain a matchid column for the 1-1 type")
ind_ctrl <- which(dat$trt == ctrl_name)
ind_test <- which(dat$trt == test_name)
temp <- merge(dat[ind_ctrl,], dat[ind_test,],
by.x = c("pre_post","time","matchid"),
by.y = c("pre_post","time","id"))
if (method == "diff")
{
temp$val <- temp[[paste0(val_name, ".y")]] - temp[[paste0(val_name, ".x")]]
} else
{
temp$val <- temp[[paste0(val_name, ".y")]] / temp[[paste0(val_name, ".x")]]
}
pre <- mean(temp$val[temp$pre_post == pre_name], na.rm = TRUE)
post <- mean(temp$val[temp$pre_post == post_name], na.rm = TRUE)
test_stat <- post - pre
# bootstrap pairs together
boot_dat <- data.frame(id = unique(temp$id))
b1 <- boot::boot(boot_dat, .boot_func_1_1, R = R, stype = "i",
full_dat = temp, method = method, pre_name = pre_name,
post_name = post_name)
}
ret <- list(result = test_stat,
bootstrap_mean = b1$t0,
bootstrap_results = b1$t,
bootstrap_interval = stats::quantile(b1$t, probs = c(0.025, 0.975)),
pvalue = length(which(b1$t < 0)) / length(b1$t),
type = type,
method = method
)
class(ret) <- "impactResult"
return(ret)
}
| /R/test_level_shift.R | permissive | bertcarnell/impact | R | false | false | 8,921 | r | # Copyright 2019 Robert Carnell
# Internal function to take means across time
#
# dat = a data.frame with columns pre_post, trt, and val at least
# pre_post_name = The value of pre_post to use to subset the data
# trt_name = The value of trt to use to subset the data
#
# return the mean vector
.mean_across_time <- function(dat, pre_post_name, trt_name)
{
mean(with(dat, val[pre_post == pre_post_name & trt == trt_name]), na.rm = TRUE)
}
# Internal function used in the group means bootstrap
#
# boot_dat = a data.frame of measurement unit id's and strata to boostrap
# i = the row numbers of the bootstrap sample
# full_dat = the full data.frame containing the raw data
# method = diff or ratio
# val_name = name of the val column
# pre_name = the name of the pre level
# post_name = the name of the post level
# test_name = the name of the test treatment
# ctrl_name = the name of the ctrl treatment
#
# @return the test statistic
.boot_func_group_mean <- function(boot_dat, i, full_dat, method, val_name,
pre_name, post_name, test_name,
ctrl_name)
{
int_dat <- NULL
for (j in i)
{
# this is highly memory intensive - would be better to do with sequential means
# The time steps makes it hard
# would normally use subset here but for R CMD check
ind <- with(full_dat, id == boot_dat$id[j] & trt == boot_dat$strata[j])
int_dat <- rbind(int_dat, full_dat[ind,])
}
#temp <- plyr::ddply(int_dat, .(pre_post, time, trt), summarize, val = mean(val, na.rm = TRUE))
temp <- plyr::ddply(int_dat, c("pre_post", "time", "trt"), function(x) {
data.frame(pre_post = x$pre_post[1],
time = x$time[1],
trt = x$trt[1],
val = mean(x[[val_name]], na.rm = TRUE))
})
ctrl_avg_pre <- .mean_across_time(temp, pre_name, ctrl_name)
test_avg_pre <- .mean_across_time(temp, pre_name, test_name)
ctrl_avg_post <- .mean_across_time(temp, post_name, ctrl_name)
test_avg_post <- .mean_across_time(temp, post_name, test_name)
if (method == "diff")
{
test_stat <- (test_avg_post - ctrl_avg_post) - (test_avg_pre - ctrl_avg_pre)
} else
{
test_stat <- test_avg_post - test_avg_pre / ctrl_avg_pre * ctrl_avg_post
}
return(test_stat)
}
# Bootstrap function for the 1-1 type
#
# boot_dat data.frame of ids of measurement units (with an associated matched pair)
# i the rows of the bootstrap sample
# full_dat the full data.frame where the matched pairs are merged
# method ratio or diff
# pre_name = the name of the pre level
# post_name = the name of the post level
#
# @return the boostrap test statistic
.boot_func_1_1 <- function(boot_dat, i, full_dat, method,
pre_name = pre_name, post_name = post_name)
{
int_dat <- NULL
for (j in i)
{
# this is highly memory intensive - would be better to do with sequential means
# The time steps makes it hard
# These are the test group ids in full_dat
#int_dat <- rbind(int_dat, subset(full_dat, id == boot_dat$id[j]))
int_dat <- rbind(int_dat, full_dat[full_dat$id == boot_dat$id[j],])
}
#boottemp <- plyr::ddply(int_dat, .(time, pre_post), val = mean(val, na.rm = TRUE))
boottemp <- plyr::ddply(int_dat, c("time", "pre_post"), function(x){
data.frame(time = x$time[1],
pre_post = x$pre_post[1],
val = mean(x$val, na.rm = TRUE))
})
pre <- mean(boottemp$val[boottemp$pre_post == pre_name], na.rm = TRUE)
post <- mean(boottemp$val[boottemp$pre_post == post_name], na.rm = TRUE)
test_stat <- post - pre
return(test_stat)
}
#' Test Level Shift in Test-Control Experiment
#'
#' @param dat a dataframe with columns including pre_post, time, id, trt, val
#' @param type \code{group} for testing two groups of size m and n, \code{1-1} for testing matched pairs, \code{1-m} for testing matches between one test and m ctrl
#' @param method \code{diff} for normalizing by the difference in test=control and \code{ratio} for normalizing by the ratio of test / control
#' @param val_name the name the value column in the data.frame dat
#' @param test_name the name of the test group in the trt column
#' @param ctrl_name the name of the control group in the trt column
#' @param pre_name the name of the pre time period in the pre_post column
#' @param post_name the name of the post time period in the pre_post column
#' @param R the number of boostrap replicates
#'
#' @return impactResult object
#' @export
#'
#' @importFrom assertthat assert_that
#' @importFrom boot boot
#' @importFrom plyr ddply
#' @importFrom stats quantile
#'
#' @examples
#' dat <- data.frame(trt = rep(c("test","ctrl"), each = 6),
#' pre_post = rep(c("pre","post"), each = 3, times = 2),
#' id = as.character(c(1,2,3,1,2,3,4,5,6,4,5,6)),
#' time = as.character(c(1,1,1,2,2,2,1,1,1,2,2,2)),
#' val = c(10,11,12,13,14,15,10,11,10,12,10,10.5))
#' test_result <- test_level_shift(dat, type = "group", method = "diff", R = 100)
test_level_shift <- function(dat, type = "group", method = "diff",
val_name = "val",
test_name = "test", ctrl_name = "ctrl",
pre_name = "pre", post_name = "post",
R = 1000)
{
# type = "group"
# method = "diff"
# test_name = "test"
# ctrl_name = "ctrl"
# pre_name = "pre"
# post_name = "post"
# R = 100
validate_impact_data(dat)
assertthat::assert_that(type %in% c("group", "1-1", "1-m"),
msg = "The type variable must be one of group, 1-1, or 1-m")
assertthat::assert_that(method %in% c("diff", "ratio"),
msg = "The method variable must be either diff or ratio")
if (type == "group")
{
# mean across units, within time
# would normally use "summarize" here, but using an alternate to stop R CMD check notes
#temp <- plyr::ddply(dat, .(pre_post, time, trt), summarize, val = mean(val, na.rm = TRUE))
temp <- plyr::ddply(dat, c("pre_post", "time", "trt"), function(x) {
data.frame(pre_post = x$pre_post[1],
time = x$time[1],
trt = x$trt[1],
val = mean(x[[val_name]], na.rm = TRUE))
})
ctrl_avg_pre <- .mean_across_time(temp, pre_name, ctrl_name)
test_avg_pre <- .mean_across_time(temp, pre_name, test_name)
ctrl_avg_post <- .mean_across_time(temp, post_name, ctrl_name)
test_avg_post <- .mean_across_time(temp, post_name, test_name)
if (method == "diff")
{
test_stat <- (test_avg_post - ctrl_avg_post) - (test_avg_pre - ctrl_avg_pre)
} else
{
test_stat <- test_avg_post - test_avg_pre / ctrl_avg_pre * ctrl_avg_post
}
# bootstrap measurement units
# If there is more than one unique treatment per id, then this will fail
#boot_dat <- plyr::ddply(dat, c("id"), summarize, strata = unique(trt))
boot_dat <- plyr::ddply(dat, c("id"), function(x){
data.frame(id = x$id[1],
strata = x$trt[1])
})
b1 <- boot::boot(boot_dat, .boot_func_group_mean, R = R,
stype = "i", strata = boot_dat$strata,
full_dat = dat, method = method, val_name = val_name,
pre_name = pre_name, post_name = post_name,
test_name = test_name, ctrl_name = ctrl_name)
} else if (type == "1-1")
{
assertthat::assert_that("matchid" %in% names(dat),
msg = "the input data.frame must contain a matchid column for the 1-1 type")
ind_ctrl <- which(dat$trt == ctrl_name)
ind_test <- which(dat$trt == test_name)
temp <- merge(dat[ind_ctrl,], dat[ind_test,],
by.x = c("pre_post","time","matchid"),
by.y = c("pre_post","time","id"))
if (method == "diff")
{
temp$val <- temp[[paste0(val_name, ".y")]] - temp[[paste0(val_name, ".x")]]
} else
{
temp$val <- temp[[paste0(val_name, ".y")]] / temp[[paste0(val_name, ".x")]]
}
pre <- mean(temp$val[temp$pre_post == pre_name], na.rm = TRUE)
post <- mean(temp$val[temp$pre_post == post_name], na.rm = TRUE)
test_stat <- post - pre
# bootstrap pairs together
boot_dat <- data.frame(id = unique(temp$id))
b1 <- boot::boot(boot_dat, .boot_func_1_1, R = R, stype = "i",
full_dat = temp, method = method, pre_name = pre_name,
post_name = post_name)
}
ret <- list(result = test_stat,
bootstrap_mean = b1$t0,
bootstrap_results = b1$t,
bootstrap_interval = stats::quantile(b1$t, probs = c(0.025, 0.975)),
pvalue = length(which(b1$t < 0)) / length(b1$t),
type = type,
method = method
)
class(ret) <- "impactResult"
return(ret)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.