blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a90b29d8afb835be152e774ecf5aa23066f97d45
|
b1fcc6ac0a4f2c8021f40c4e93a5a18efae678f1
|
/loss_reserving_glm/Script_Loss_reserving_GLM.R
|
ccec0f2ed07c444780d4ea0a72a376f912891f89
|
[] |
no_license
|
augustod-prieto/projects
|
9bede5f3fdece5729abf8f96b97dd50e06b6f0aa
|
53292a29df57b42b4ce711cf455e5ebb74bedceb
|
refs/heads/main
| 2023-07-01T22:14:47.777558
| 2021-08-14T06:35:59
| 2021-08-14T06:35:59
| 389,704,287
| 0
| 0
| null | 2021-08-11T23:59:10
| 2021-07-26T16:47:18
|
R
|
UTF-8
|
R
| false
| false
| 2,526
|
r
|
Script_Loss_reserving_GLM.R
|
## Aplicación de GLM en Loss Reserving. ##
library(tidyverse)
library(ChainLadder)
## Preparación de datos.
data_2011 <- readxl::read_excel("ssn_20162017_desarrollo_siniestros_automotores.xlsx", sheet = 8)[15,c(7,11,15,19,23)]
data_2010 <- readxl::read_excel("ssn_20162017_desarrollo_siniestros_automotores.xlsx", sheet = 9)[15,c(7,11,15,19,23)]
data_2009 <- readxl::read_excel("ssn_20162017_desarrollo_siniestros_automotores.xlsx", sheet = 10)[15,c(7,11,15,19,23)]
data_2008 <- readxl::read_excel("ssn_20162017_desarrollo_siniestros_automotores.xlsx", sheet = 11)[15,c(7,11,15,19,23)]
data_2007 <- readxl::read_excel("ssn_20162017_desarrollo_siniestros_automotores.xlsx", sheet = 12)[15,c(7,11,15,19,23)]
matrix_data <- matrix(c(data_2007,data_2008,data_2009,data_2010,data_2011), 5, 5, byrow = TRUE)
rownames(matrix_data) <- c(2007:2011)
colnames(matrix_data) <- c(1:5)
true_data <- as.triangle(matrix_data)
model_data <- true_data
for(i in 5:2){
for(j in (7-i):5){
model_data[i,j] <- NA
}
}
## Método Chain-Ladder.
mack_reserves <- MackChainLadder(model_data)
## Chain-Ladder como caso particular de GLM. Over-dispersed Poisson, i.e, tweedie p = 1.
od_poisson_reserves <- glmReserve(model_data)
## GLM - Distribución Tweedie con "p" estimado, donde 1 < p < 2, es decir, es una distribución compuesta Poisson/Gamma.
poisson_gamma_reserves <- glmReserve(model_data, var.power = NULL)
## GLM - Distribución normal, i.e, tweedie p = 0.
gaussian_reserves <- glmReserve(model_data, var.power = 0)
## Validación por Out Of Sample validation en el contexto de Loss Reserving.
triangle_rmse <- function(true_data, predicted_data){
triangle_rmse <- list("pointwise_rmse"=c(), "total_rmse" = "")
triangle_rmse[[1]] <- seq(1,5)
if(class(predicted_data)[1] == "MackChainLadder"){
for(l in 1:5){
triangle_rmse[[1]][l] <- sqrt((true_data[l,5]-predicted_data[[3]][l,5])^2/5)
}
} else if (class(predicted_data) == "glmReserve"){
for(l in 1:5){
triangle_rmse[[1]][l] <- sqrt((true_data[l,5]-predicted_data[[4]][l,5])^2/5)
}
} else{print("error. predicted data is not 'MackChainLadder' nor 'glmReserve'")}
triangle_rmse[[2]] <- sum(triangle_rmse[[1]])
triangle_rmse
}
mack_rmse <- triangle_rmse(true_data, mack_reserves)
odp_rmse <- triangle_rmse(true_data, od_poisson_reserves)
pg_rmse <- triangle_rmse(true_data, poisson_gamma_reserves)
gaussian_rmse <- triangle_rmse(true_data, gaussian_reserves)
|
3a73e16cdaf0128108beddd8276733204cb8d847
|
cd545db34cc4e68b1b5ff7f726d0f4039e9792f8
|
/R/module.compare.R
|
b346f85af5c39d8f7c6e64ccfea6bdaa63271d45
|
[] |
no_license
|
menggf/DEComplexDisease
|
3c3598cf7403fda0c0a2fbc4c988196fe229c6cb
|
c7ad81ba12b3274c4d2d36a77cc22baea16caf60
|
refs/heads/master
| 2022-07-09T00:53:35.770287
| 2022-06-25T06:13:24
| 2022-06-25T06:13:24
| 95,849,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,223
|
r
|
module.compare.R
|
#' Compare and plot the overlap among predicted modules
#'
#' Plot the overlap among predicted DEG modules
#'
#' @docType methods
#' @import ComplexHeatmap
#' @import grid
#'
#' @name module.compare
#' @param res.module1 a 'seed.module' or 'cluster.module' object returned by \code{\link{seed.module}} or \code{\link{cluster.module}}
#' @param res.module2 a 'seed.module' or 'cluster.module' object returned by \code{\link{seed.module}} or \code{\link{cluster.module}}
#' @param used.mods1 the modules to display
#' @param used.mods2 the modules to display
#' @param type the module type to display
#' @param max.n1 the maximum number of modules to display. If 'used.mods1' is set, this option will be ignored.
#' @param max.n2 the maximum number of modules to display. If 'used.mods2' is set, this option will be ignored.
#' @param show.overlap boolean, display the overlap number
#' @param cex the font cex to display the overlap number
#'
#' @author Guofeng Meng
#'
#'
#'
#' @details This function is to compare the modules from different studies, e.g. the different diseases or the different data for the same disease.
#'
#' @return The heatmap plot for gene overlaps.
#'
#' @examples
#' module.compare(res.mod1,res.mod2, type='model', max.n1=5)
#' @export
module.compare <- function(res.module1, res.module2, used.mods1 = NULL, used.mods2 = NULL,
type = c("model", "max.patients", "max.genes")[1], max.n1 = 30, max.n2 = max.n1,
show.overlap = TRUE, cex = 10) {
if (!is(res.module1, "seed.module") & !is(res.module1, "cluster.module"))
stop("Error: reg.module1: must the output of 'seed.module' or 'cluster.module'!")
if (!is(res.module2, "seed.module") & !is(res.module2, "cluster.module"))
stop("Error: reg.module2: must the output of 'seed.module' or 'cluster.module'!")
if (!any(c("model", "max.patients", "max.genes") == type))
stop("Error: type: should one of model, max.patients and max.genes!")
if (is.null(res.module1[[1]][[type]]))
stop("Error: res.module1 has no selected type")
if (is.null(res.module2[[1]][[type]]))
stop("Error: res.module2 has no selected type")
all.mod1 = names(res.module1)
all.mod1 = all.mod1[all.mod1 != "decd.specific" & all.mod1 != "decd.input" &
all.mod1 != "decd.clustering"]
all.mod2 = names(res.module2)
all.mod2 = all.mod2[all.mod2 != "decd.specific" & all.mod2 != "decd.input" &
all.mod2 != "decd.clustering"]
if (is.null(used.mods1)) {
mod1 = .select.mod(res.module1, max.n1, type = type)
} else {
mod1 = used.mods1[used.mods1 %in% all.mod1]
if (length(mod1) == 0)
stop("Error: used.mods1: no ID is recognized!")
}
if (is.null(used.mods2)) {
mod2 = .select.mod(res.module2, max.n2, type = type)
} else {
mod2 = used.mods2[used.mods2 %in% all.mod2]
if (length(mod2) == 0)
stop("Error: used.mods2: no ID is recognized!")
}
n1 = length(mod1)
n2 = length(mod2)
olp2 = matrix(ncol = n2, nrow = n1)
olp4 = matrix(ncol = n2, nrow = n1)
len3 = vapply(seq_len(n1), function(x) length(res.module1[[mod1[x]]][[type]][["genes"]]), 10)
len4 = vapply(seq_len(n2), function(x) length(res.module2[[mod2[x]]][[type]][["genes"]]), 10)
for (i in seq_len(n1)) {
ges1 = res.module1[[mod1[i]]][[type]][["genes"]]
for (j in seq_len(n2)) {
ges2 = res.module2[[mod2[j]]][[type]][["genes"]]
olp2[i, j] = length(intersect(ges1, ges2))
olp4[i, j] = olp2[i, j]/max(len3[i], len4[j])
}
}
lab1 = paste(mod1, "(", len3, ")", sep = "")
lab2 = paste(mod2, "(", len4, ")", sep = "")
row.names(olp2) <- lab1
colnames(olp2) <- lab2
row.names(olp4) <- lab1
colnames(olp4) <- lab2
if (show.overlap) {
Heatmap(olp4, cluster_rows = FALSE, cluster_columns = FALSE, name = "Genes",
column_title = "genes", cell_fun = function(j, i, x, y, w, h, col) {
grid.text(olp2[i, j], x, y, gp = gpar(fontsize = cex))
})
} else {
Heatmap(olp4, cluster_rows = FALSE, cluster_columns = FALSE, name = "Genes",
column_title = "genes")
}
}
|
b0cc18d7c5c6ade9c2385af03640cf050c80e60e
|
eb46f2f05ac23d8fa7d3b3b88c3d582cdaccdb41
|
/rprog-032/quiz4.R
|
9a94fcc59fc5d3f1545370adf893277780304b3e
|
[] |
no_license
|
wangjiezhe/cousera
|
9a041c6eb612cbfc39d708e4455a34ca2c1c7b3f
|
0db137134508bf3942bad5c1841ae24e602b124a
|
refs/heads/master
| 2021-01-01T15:51:22.663852
| 2015-10-24T01:53:12
| 2015-10-24T01:53:12
| 42,147,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
quiz4.R
|
set.seed(1)
rpois(5, 2)
set.seed(10)
x <- rep(0:1, each = 5)
e <- rnorm(10, 0, 20)
y <- 0.5 + 2 * x + e
# plot(x, y)
library(datasets)
x1 <- rnorm(100, 1)
x2 <- rnorm(100, 2)
y <- rnorm(100, 3)
Rprof(tmp <- tempfile())
fit <- lm(y ~ x1 + x2)
Rprof(NULL)
summaryRprof(tmp)
unlink(tmp)
|
1a7045b3b57893209c801cdfdf36d95dbe02fdfa
|
110731767c52d468a6eb958fe2a7bd1d76b405fb
|
/vignettes/VLP_Brown_oilwell-01.R
|
71fa995c654fd286553354a0badc0aaaf8e52339
|
[] |
no_license
|
JFernandez696/rNodal
|
1a7e2fc3ab9930bbc208701688e31bac9fe84ce6
|
09bc0fedbc0f5517233b8a2d2cbe94f659e5473a
|
refs/heads/master
| 2021-10-20T00:09:33.150787
| 2017-10-27T19:35:22
| 2017-10-27T19:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,279
|
r
|
VLP_Brown_oilwell-01.R
|
## ----rows.print=30-------------------------------------------------------
library(rNodal)
library(tibble)
# Example from Prosper oil well 01. Dry version
# roughness = 0.0006
input_example <- setWellInput(field.name = "HAGBR.MOD",
well.name = "Oilwell_01_Dry",
depth.wh = 0, depth.bh = 9275,
diam.in = 4.052,
GLR = 800, liq.rt = 983, wcut = 0.0,
thp = 100, tht = 60, bht = 210,
API = 37, oil.visc = 5.0,
gas.sg = 0.76, wat.sg = 1.07, if.tens = 30,
salinity = 23000
)
well_model <- setVLPmodel(vlp.model = "hagbr.mod", segments = 29, tol = 0.00001)
as.tibble(runVLP(well.input = input_example, well_model))
## ------------------------------------------------------------------------
# in Prosper the angle is measured againt the vertical
library(rNodal)
md_tvd_01 <- "
MD TVD
0 0
600 600
1005 1000
4075 4000
7700 7500
9275 9000
"
md_tvd <- set_deviation_survey(md_tvd_01)
md_tvd
deviation_survey <- compute_angle_deviation_survey(md_tvd, reference = "vertical")
dataFrame <- deviation_survey
dataFrame
## ------------------------------------------------------------------------
# split deviated well in two ways: by and length.out
library(rNodal)
md <- deviation_survey[["MD"]] # get MD vector
add_md_by <- rNodal:::split_well_in_deltas(md, by = 50)
add_md_by
## ------------------------------------------------------------------------
# split deviated well in two ways: by and length.out
library(rNodal)
md <- deviation_survey[["MD"]] # get MD vector
add_md_lo <- rNodal:::split_well_in_deltas(md, length.out = 40)
add_md_lo
## ------------------------------------------------------------------------
rNodal:::build_survey_with_deltas(deviation_survey, add_md_by)
rNodal:::build_survey_with_deltas(deviation_survey, add_md_lo)
## ------------------------------------------------------------------------
# split the MD of the well in equal parts but a total of "n" segments
split <- seq.int(deviation_survey[1, "MD"], deviation_survey[nrow(deviation_survey), "MD"],
length.out = 100)
# add the known MD values to the sequence. Now the length is little bit longer
md <- deviation_survey[["MD"]]
add_md <- sort(unique(c(md, split)))
add_md
# reconstruct MD v TVD but for the partitioned well in delta-x
df <- data.frame() # new long dataframe
index <- 1 # index the small dataframe
tvd <- 0
for (j in 1:length(add_md)) { # iterate through the sequence
row = dataFrame[index, ] # get a row of the deviation survey
# cat(index)
df[j, "md"] <- add_md[j] # assign MD in sequence to md in long dataframe
df[j, "seg"] <- index # assign
if (j == 1) # if it is the first row
df[j, "delta.md"] <- add_md[j]
else
df[j, "delta.md"] <- add_md[j] - df[j-1, "md"]
df[j, "radians"] <- row[["radians"]]
df[j, "degrees"] <- row[["degrees"]]
df[j, "delta.tvd"] <- cos(row[["radians"]]) * df[j, "delta.md"] # calculate delta TVD
tvd <- tvd + df[j, "delta.tvd"] # add delta.tvd
df[j, "tvd"] <- tvd # tvd column
if (add_md[j] >= row[["MD"]]) { # switch to next deviation branch
index <- index + 1
}
}
df
## ------------------------------------------------------------------------
# iterate through dataframe
for (index in 1:nrow(dataFrame)) {
row = dataFrame[index, ]
# do stuff with the row
# print(row[["MD"]])
cat(row[["MD"]], "\n")
}
## ------------------------------------------------------------------------
for (index in 1:nrow(dataFrame)) {
row = dataFrame[index, ]
# cat(row, "\n")
for (j in add_md) {
if (j <= row[["MD"]]) {
cat(sprintf("%12f %12f \n", j, row[["MD"]]))
# print(row[["MD"]][index] * sin(row[["radians"]][index]))
}
}
}
## ------------------------------------------------------------------------
# split the tubing in dx pieces
apply(deviation_survey, 1, function(x) x["MD"]
)
|
a594914aea7bf42bbffdc107d4099682467fb0c7
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/apollo/man/apollo_writeF12.Rd
|
7dbb712b3d8664ac530add74d4bd88b2a3625215
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 583
|
rd
|
apollo_writeF12.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apollo_writeF12.R
\name{apollo_writeF12}
\alias{apollo_writeF12}
\title{Writes an F12 file with the results of a model estimation.}
\usage{
apollo_writeF12(model, truncateCoeffNames = TRUE)
}
\arguments{
\item{model}{Model object. Estimated model object as returned by function \link{apollo_estimate}.}
\item{truncateCoeffNames}{Boolean. TRUE to truncate parameter names to 10 characters.
FALSE by default.}
}
\value{
Nothing.
}
\description{
Writes an F12 file with the results of a model estimation.
}
|
c0994633bffc5f70b5becd104f403df0107251e8
|
1b872282a8fcfa99273958a7f95fab6a813d7d09
|
/R/calculateIndividualLogFC.R
|
09fff41e5b32e81be7efe2f0971d91cc994d93d9
|
[
"MIT"
] |
permissive
|
mirax87/multiGSEA
|
cecc8e1b6ebedbe92a87ecb7a91034635b3b69c3
|
b8747abc1815ab4fa08ef024e77eee2a864ac6ed
|
refs/heads/master
| 2020-04-02T13:49:21.057653
| 2018-06-01T22:07:22
| 2018-06-01T22:07:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,825
|
r
|
calculateIndividualLogFC.R
|
##' Utility function to run limma differential expression analysis
##'
##' @details
##' This function fits linear modles (or glms) to perform differential
##' expression analyses. If the \code{x} object is a \code{DGEList} the
##' analysis will be performed using edgeR's quasi-likelihood framework,
##' otherwise limma will be used for all other scenarios.
##'
##' If \code{x} is a \code{DGEList} we require that \code{estimateDisp} has
##' already been called. If you prefer to analyze rnaseq data using voom,
##' be sure that \code{x} is the object that has been returned from a
##' call to \code{\link[limma]{voom}} (or
##' \code{\link[limma]{voomWithQualityWeights}}.
##'
##' The documentation here is speaking the language of a "limma" analysis,
##' however for each parameter, there is an analagous function/parameter that
##' will be delegated to.
##'
##' Lastly, if \code{x} is simply a single column matrix, we assume that we are
##' just passing a single pre-ranked vector of statistics through multiGSEA's
##' analysis pipelines (for use in methods like "fgsea", "cameraPR", etc.), and
##' a logFC-like data.frame is constructed with these statistics in the
##' \code{logFC} and \code{t} columns.
##'
##' @export
##'
##' @param x The expression object. This can be 1 column matrix if you are not
##' running any analysis, and this function essentially is just a "pass through"
##' @param design The design matrix for the experiment
##' @param contrast The contrast you want to test and provide stats for. By
##' default this tests the last column of the \code{design} matrix. If you
##' want to test a custom contrast, this can be a contrast vector, which
##' means that it should be as long as \code{ncol(design)} and it most-often
##' sum to one. In the future, the user will be able to specify a range of
##' coefficients over \code{design} to perform an ANOVA analysis.
##' @param robust.fit The value of the \code{robust} parameter to pass down to
##' the \code{\link[limma]{lmFit}} function. Defaults to \code{FALSE}.
##' @param robust.eBayes The value of the \code{robust} parameter to pass down
##' to the \code{\link[limma]{eBayes}} function.
##' @param trend.eBayes The value of the \code{trend} parameter to pass down to
##' the \code{\link[limma]{eBayes}} function.
##' @param treat.lfc If this is numeric, this activates limma's "treat"
##' functionality and tests for differential expression against this
##' specified log fold change threshold. This defaults to \code{NULL}.
##' @param confint add confidence intervals to \code{topTable} output (default
##' \code{TRUE})? Ignored if \code{x} is a \code{DGEList}.
##' @param with.fit If \code{TRUE}, this function returns a list object with
##' both the fit and the table of logFC statistics, otherwise just the
##' logFC statistics table is returned.
##' @param use.qlf If \code{TRUE} (default), will use edgeR's quasilikelihood
##' framework for analysis, otherwise uses glmFit/glmTest.
##' @param ... parameters passed down into the relevant limma/edgeR based
##' functions.
##' @param as.dt Return the result as a \code{data.table}? Defaults to
##' \code{FALSE}.
##' @return If \code{with.fit == FALSE} (the default) a \code{data.table} of
##' logFC statistics for the contrast under test. Otherwise, a list is
##' returned with \code{$result} containing the logFC statistics, and
##' \code{$fit} has the limma fit for the data/design/contrast under test.
calculateIndividualLogFC <- function(x, design, contrast=ncol(design),
robust.fit=FALSE, robust.eBayes=FALSE,
trend.eBayes=FALSE, treat.lfc=NULL,
confint=TRUE, with.fit=FALSE,
use.qlf = TRUE, ..., as.dt=FALSE) {
do.contrast <- !is.vector(x) &&
ncol(x) > 1L &&
!is.null(design) &&
length(contrast) > 1L
if (do.contrast) {
if (length(contrast) != ncol(design)) {
stop("Invalid contrast vector, must be as long as columns in design")
}
} else if (!is.null(design) && !is.null(contrast) &&
length(contrast) != 1 &&
contrast > 0 && contrast <= ncol(design)) {
stop("Illegal coefficient to test in design")
}
use.treat <- FALSE
if (is.numeric(treat.lfc)) {
stopifnot(length(treat.lfc) == 1L, treat.lfc > 0)
use.treat <- TRUE
}
if (is(x, 'DGEList')) {
## We default to using the quasi-likelihood piepline with edgeR with
## robust fitting. Setting robust=TRUE here should be roughly equivalent
## to eBayes(..., robust=TRUE) in the limma world.
if (!disp.estimated(x)) {
stop("Dispersions not estimated, need to run estimateDisp first")
}
if (use.qlf) {
fit <- glmQLFit(x, design, robust=TRUE)
} else {
fit <- glmFit(x, design)
}
if (use.treat) {
if (do.contrast) {
tt <- glmTreat(fit, contrast=contrast, lfc=treat.lfc)
} else {
tt <- glmTreat(fit, coef=contrast, lfc=treat.lfc)
}
} else {
etest <- if (use.qlf) glmQLFTest else glmLRT
if (do.contrast) {
tt <- etest(fit, contrast = contrast)
} else {
tt <- etest(fit, coef=contrast)
}
}
tt <- as.data.frame(topTags(tt, Inf, sort.by='none'))
tt <- transform(setDT(tt), t=NA_real_, featureId=rownames(x))
setnames(tt, c('logCPM', 'PValue', 'FDR'), c('AveExpr', 'pval', 'padj'))
out <- tt
} else if (ncol(x) > 1L) {
## If x is matrix-like but not a DGEList, we assume you are OK to run the
## limma pipeline.
fit <- lmFit(x, design, method=if (robust.fit) 'robust' else 'ls', ...)
if (do.contrast) {
fit <- contrasts.fit(fit, contrast)
contrast <- 1L
}
if (use.treat) {
fit <- treat(fit, lfc=treat.lfc, robust=robust.eBayes, trend=trend.eBayes)
tt <- topTreat(fit, contrast, number=Inf, sort.by='none', confint=confint)
} else {
fit <- eBayes(fit, robust=robust.eBayes, trend=trend.eBayes)
tt <- topTable(fit, contrast, number=Inf, sort.by='none', confint=confint)
}
tt <- transform(setDT(tt), featureId=rownames(x))
setnames(tt, c('P.Value', 'adj.P.Val'), c('pval', 'padj'))
out <- tt
} else {
## The user passed in a vector of statistics. Only a very few number of
## GSEA methods support this, but ...
out <- data.table(logFC=x[, 1L], AveExpr=NA_real_, t=x[, 1L],
pval=NA_real_, padj=NA_real_, confint=NA_real_,
featureId=rownames(x))
fit <- NULL
}
# x.idx <- ID <- NULL # silence R CMD check NOTEs
out[, x.idx := 1:nrow(x)]
if ('ID' %in% names(out)) {
out[, ID := NULL]
}
if (!as.dt) setDF(out)
if (with.fit) list(result=out, fit=fit) else out
}
##' Checks that a provided table is "similar enough" the the result generated
##' from calculateIndividualLogFC
##'
##' @param logFC the table to check
##' @param x An expression-like object to further test against.
is.logFC.like <- function(logFC, x, as.error=FALSE) {
ref.dt <- data.table(logFC=numeric(), t=numeric(), pval=numeric(),
padj=numeric(), featureId=character())
ref.check <- check.dt(logFC, ref.dt)
if (isTRUE(ref.check)) {
if (!missing(x)) {
if (nrow(logFC) != nrow(x)) {
ref.check <- "nrow(logFC) != nrow(x)"
} else {
missed.features <- setdiff(rownames(x), logFC$featureId)
if (length(missed.features)) {
ref.check <- sprintf("%d features missing from featureId",
length(missed.features))
}
}
}
}
if (as.error && is.character(ref.check)) {
stop("Provided logFC is not a valid table:\n ",
paste(ref.check, collapse="\n "))
}
isTRUE(ref.check)
}
|
50271d242ada8ffb398fd19fe6bb6697a9c100fb
|
4593482fd92e47efa639320dd362c4342d6fde13
|
/tests/testthat/test-lets_midpoint.R
|
96d98fea69ef3d317634f79269dc91dc38274828
|
[] |
no_license
|
BrunoVilela/letsR
|
65db89bd7e67c7d705871edb0ada0200292a06bb
|
85f82d3a4f697cac1f1d75f9d55f9fae00c05c8f
|
refs/heads/master
| 2023-08-18T15:30:38.529793
| 2023-08-07T20:19:06
| 2023-08-07T20:19:06
| 17,603,336
| 7
| 3
| null | 2015-04-27T20:25:36
| 2014-03-10T18:19:38
|
R
|
UTF-8
|
R
| false
| false
| 1,018
|
r
|
test-lets_midpoint.R
|
context("Test for lets.midpoint")
data(PAM)
test_that("lets.midpoint works fine", {
resu_test <- lets.midpoint(PAM)
expect_equal(class(resu_test)[1], "data.frame")
expect_true(ncol(resu_test) == 3)
#expect_true(!any(is.na(resu_test)))
})
test_that("lets.midpoint works fine, method = GM and planar = TRUE", {
resu_test <- lets.midpoint(PAM, method = "GM", planar = TRUE)
expect_equal(class(resu_test)[1], "data.frame")
expect_true(ncol(resu_test) == 3)
# expect_true(!any(is.na(resu_test)))
})
test_that("lets.midpoint works fine, method = GM", {
resu_test <- lets.midpoint(PAM, method = "GM")
expect_equal(class(resu_test)[1], "data.frame")
expect_true(ncol(resu_test) == 3)
# expect_true(!any(is.na(resu_test)))
})
test_that("lets.midpoint works fine, method = CMD", {
resu_test <- lets.midpoint(PAM, method = "CMD")
expect_equal(class(resu_test)[1], "data.frame")
expect_true(ncol(resu_test) == 3)
# expect_true(!any(is.na(resu_test)))
})
|
20a3331b2adb0864125e30e6a3999f9682653506
|
c294c7c8acc3fdc86b726fb2b4d94f073ad153ef
|
/log/feb-26.r
|
f4e2b5e32cb8a02d54ae59e8f9e79aacb7cc0fa0
|
[] |
no_license
|
alexjgriffith/r-workspace
|
347a1d764cce86de86f01a0d41a4942998d14a3a
|
229ab551ffeed1b5c60b51123e5d065cde065a7b
|
refs/heads/master
| 2020-04-06T07:09:58.395966
| 2016-09-10T07:25:31
| 2016-09-10T07:25:31
| 52,470,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,953
|
r
|
feb-26.r
|
source("~/r-workspace/nov-functions.r")
source("~/r-workspace/dec-functions.r")
source("~/r-workspace/jan-functions.r")
source("~/r-workspace/feb-functions.r")
source("~/Masters/CCCA/inst/scipts/eboxFrequency.r")
library(seqLogo)
db<-read.table("~/feb-homer/motifAnnotations_20_2-alt.table",header=T)
fm<-sapply(
as.character(unique(db$filename[ grep("NONE",db$dataset)]))
, function(x) loadPWM(x))
psl<-function(data){
seqLogo(makePWM(data),xaxis=FALSE,yaxis=FALSE,ic.scale=FALSE)
}
l=c(6,6,6)
motif<-c("ACCACA","CATCSG","CACCTG")
name=c("Leukemia","Leukemia","Leukemia")
makeLogos<-function(l,motif,name)
mapply(function(l,motif,name){
fi<-paste0("~/thesis-feb/",name,"_NONE_pvalue=20_len=",l,"_sd=2_alt.motif")
loc<-grep(motif,fm[[fi]][,1])
png(paste0("~/Dropbox/UTX-Alex/br-data/logos/",name,"_",l,"_",motif,".png"))
psl(as.data.frame(fm[[fi]][loc,3]))
dev.off()
}, l,motif,name)
makeLogos(c(7,7,7),c("CGGAARC","TGTTTTC","GATAACA"),c("ECFC","ECFC","ECFC"))
makeLogos(c(6,6,6,7,8),c("CCACAG","GGGGGC","KGCGTG","CTGGCTG","CSCYCCTC"),rep("HSC",5))
makeLogos(c(7,7,8,8),c("CTTATCT","CCAGCTG","GCCTTGTC","GGCTGTCA"),rep("Erythroid",4))
### search for motif with prefered distances
source("~/Masters/CCCA/inst/scipts/eboxFrequency.r")
source("~/r-workspace/preferedDistance.r")
jaspar<-CCCA::loadPWM("~/Masters/mulcal/inst/data/jaspar_motifs.txt",version="jaspar")
jasparMotifs<-cbind(unlist(jaspar[,2]),unlist(lapply(jaspar[,3],function(x) PWMtoCons(x))))
env<-addFasta(getPRC20(2))
ebox<-IUPACtoBase("CANNTG")
mList<-sapply(jasparMotifs[,2],IUPACtoBase)
motifs<-c(mList,ebox)
cList<-sapply(motifs,compliment)
locationsM<-lapply(motifs,grep,env$fasta)
locationsC<-lapply(cList,grep,env$fasta)
## h is backwards need to flip ebox
h<-lapply(1:129,function(i){
list(num=i,mota=motifs[i],motb=motifs[length(motifs)],distribution=motifHist(env$fasta,motifs,cList,locationsM,locationsC,i,length(motifs),env$reg[,"ALL"]))})
stats<-do.call(rbind,lapply(h,heightStat))
write.table(stats,"~/thesis-feb/jaspar_ebox_stats.txt")
stats[28:dim(stats)[1],]
33,82
n<-6
qstem(h[[n]][[4]],xlim=c(-128,128))
stats[n,]
jasparMotifs[n,]
plotFastMot(env,motifs,cList,locationsM,locationsC,129,length(motifs),xlim=c(-64,64))
plotFastMot(env,motifs,mList,locationsM,locationsC,82,length(motifs),xlim=c(-128,128))
comEboxs<-function(env,motif,xlim=c(-32,32),reg="ALL",q=FALSE){
motifs2<-c(genEboxCombs(),motif)
cList2<-sapply(motifs2,compliment)
lM2<-lapply(motifs2,grep,env$fasta)
lC2<-lapply(cList2,grep,env$fasta)
p<-lapply(1:10,function(i)
motifHist(env$fasta,motifs2,cList2,lM2,lC2,i,11,env$reg[,reg]))
if(! q){
par(mfrow=c(5,2))
mapply(function(k,n) qstem(k,title=paste0(consenusIUPAC(motifs2[11]),"_",n),xlim),p,as.list(motifs2[1:10]))
}
p
}
PCSpread<-function(env,motifa,motifb,PC,xlim=c(-32,32)){
prc<-env$prc$eigenVectors[,PC]
norm<-normalize(prc)
motifs2<-c(IUPACtoBase(motifa),IUPACtoBase(motifb))
cList2<-sapply(motifs2,compliment)
lM2<-lapply(motifs2,grep,env$fasta)
lC2<-lapply(cList2,grep,env$fasta)
x<-seq(-1,1,length=7)
p<-lapply(seq(6),function(i) {
reg<-norm>x[i]&norm<x[i+1]
motifHist(env$fasta,motifs2,cList2,lM2,lC2,1,2,reg)
})
par(mfrow=c(3,2))
mapply(function(k,n) if(! is.na(k[[1]]))qstem(k,title=paste0(motifa,"_",motifb,"_",n),xlim),p,seq(6))
p
}
PCSpreadE<-function(env,motifa,motifb,PC,xlim=c(-32,32),q=FALSE){
prc<-env$prc$eigenVectors[,PC]
norm<-normalize(prc)
snorm<-sort(norm)
step<-floor(length(prc)/10)
motifs2<-c(IUPACtoBase(motifa),IUPACtoBase(motifb))
cList2<-sapply(motifs2,compliment)
lM2<-lapply(motifs2,grep,env$fasta)
lC2<-lapply(cList2,grep,env$fasta)
x<-seq(10)
p<-lapply(x,function(i) {
mrange<-c(snorm[step*(i-1)+1], snorm[step*i])
reg<-norm>mrange[1] & norm < mrange[2]
motifHist(env$fasta,motifs2,cList2,lM2,lC2,1,2,reg)
})
if(! q){
par(mfrow=c(5,2))
mapply(function(k,i) if(! is.na(k[[1]]))qstem(k,title=paste0(motifa,"_",motifb,"_",round(snorm[(step*(i-1)+1)],2) ,":",round(snorm[(step*i)],2)),xlim),p,x)
}
p
}
#png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CANNTG_H1F1A_ARNT.png")
p<-PCSpreadE(env,consenusIUPAC(motifs[129]),"CANNTG",2,xlim=c(-62,62))
#dev.off()
imBody<-function(p,mrange){
par(mar=c(0,0,0,0))
image(t(as.matrix(do.call(rbind,lapply(lapply(p,qstem,xlim=mrange,q=TRUE),getHeights,mrange)))), xaxt='n', yaxt='n', ann=FALSE)
}
makeImage<-function(env,motifa,motifb,pc,mrange=c(-32,32)){
p<-PCSpreadE(env,motifa,motifb,pc,xlim=mrange,q=TRUE)
imBody(p,mrange)
}
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CANNTG-GATAA.png")
makeImage(env,"CANNTG","GATAA",1)
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CANNTG-GATAA.png")
makeImage(env,"CANNTG","GATAA",2)
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CANNTG-GATAA.png")
makeImage(env,"CANNTG","GATAA",4)
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CANNTG-ARNT.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[129]),1,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CANNTG-ARNT.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[129]),2,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CANNTG-ARNT.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[129]),4,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CANNTG-TEB.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[69]),1,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CANNTG-TEB.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[69]),2,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CANNTG-TEB.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[69]),4,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CANNTG-PAX5.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[6]),1,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CANNTG-PAX5.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[6]),2,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CANNTG-PAX5.png")
makeImage(env,"CANNTG",consenusIUPAC(motifs[6]),4,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CANNTG-AGGCCG.png")
makeImage(env,"CANNTG","AGGCCG",1,c(-32,32))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CANNTG-AGGCCG.png")
makeImage(env,"CANNTG","AGGCCG",2,c(-32,32))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CANNTG-AGGCCG.png")
makeImage(env,"CANNTG","AGGCCG",4,c(-32,32))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CACCTG-TEB.png")
makeImage(env,"CACCTG",consenusIUPAC(motifs[69]),1,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CACCTG-TEB.png")
makeImage(env,"CACCTG",consenusIUPAC(motifs[69]),2,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CACCTG-TEB.png")
makeImage(env,"CACCTG",consenusIUPAC(motifs[69]),4,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_CAGCTG-TEB.png")
makeImage(env,"CAGCTG",consenusIUPAC(motifs[69]),1,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_CAGCTG-TEB.png")
makeImage(env,"CAGCTG",consenusIUPAC(motifs[69]),2,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_CAGCTG-TEB.png")
makeImage(env,"CAGCTG",consenusIUPAC(motifs[69]),4,c(-62,62))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/CANNTG_H1F1A_ARNT.png",height=240,width=240)
qstem(h[[n]][[4]]*-1,xlim=c(-64,64))
dev.off()
PCSpread2<-function(env,motifa,motifb,PC,xlim=c(-32,32)){
prc<-env$prc$eigenVectors[,PC]
norm<-normalize(prc)
motifs2<-c(IUPACtoBase(motifa),IUPACtoBase(motifb))
cList2<-sapply(motifs2,compliment)
lM2<-lapply(motifs2,grep,env$fasta)
lC2<-lapply(cList2,grep,env$fasta)
p<-list(motifHist(env$fasta,motifs2,cList2,lM2,lC2,1,2,prc<(-0.5)),
motifHist(env$fasta,motifs2,cList2,lM2,lC2,1,2,prc>(0.5))
)
par(mfrow=c(1,2))
mapply(function(k,n) if(! is.na(k))qstem(k,title=paste0(motifa,"_",motifb,"_",n),xlim),p,seq(2))
p
}
qstem(hist2Motifs(env,"CANNTG",consenusIUPAC( motifs[129]),"ALL"),xlim=c(-62,62))
plotFastMot(env,motifs,cList,locationsM,locationsC,129,length(motifs),xlim=c(-64,64))
### Fix the motifHist function
motifs2<-c(genEboxCombs(),"GATAA",ebox,"AGGCCG")
cList2<-sapply(motifs2,compliment)
lM2<-lapply(motifs2,grep,env$fasta)
lC2<-lapply(cList2,grep,env$fasta)
par(mfrow=c(1,2))
qstem(cbind(0,motifHist(env$fasta,motifs2,cList2,lM2,lC2,12,11,env$reg[,"Erythroid"]))
,"ALL")
qstem(cbind(0,motifHist(env$fasta,motifs2,cList2,lM2,lC2,12,11,env$reg[,"Erythroid"]))
,"ALL")
plotFastMot(env,motifs2,cList2,lM2,lC2,12,13)
mplotFastMot(env,motifs2,cList2,lM2,lC2,13,1:10,"Leukemia")
reg<-"ALL"
p<-lapply(1:10,function(i)
motifHist(env$fasta,motifs2,cList2,lM2,lC2,i,13,env$reg[,reg]))
#png("~/Dropbox/UTX-Alex/br-data/ebox/Eboxs-AGGCCG.png",width=240,height=240)
par(mfrow=c(5,2))
mapply(function(k,n) qstem(k,title=paste0(motifs2[13],"_",n),xlim=c(-32,32)),p,as.list(motifs2[1:10]))
#dev.off()
#png("~/Dropbox/UTX-Alex/br-data/ebox/Leukemic_CANNTG-AGGCCG.png",width=240)
par(mfrow=c(2,1))
qstem(motifHist(env$fasta,motifs2,cList2,lM2,lC2,6,13,env$reg[,"Leukemia"]),"Leukemic")
qstem(motifHist(env$fasta,motifs2,cList2,lM2,lC2,6,13,!env$reg[,"Leukemia"]),"Non Leukemic")
#dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/CACCTG-TBP.png",height=240,width=240)
qstem(hist2Motifs(env,"CACCTG",consenusIUPAC(motifs[69]),"ALL"),xlim=c(-64,64))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/CAGCTG-TBP.png",height=240,width=240)
p<-qstem(hist2Motifs(env,"CAGCTG",consenusIUPAC(motifs[69]),"ALL"),xlim=c(-64,64))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/CANNTG-MIZF.png",height=240,width=240)
qstem(hist2Motifs(env,"CANNTG",consenusIUPAC(motifs[82]),"ALL"),xlim=c(-32,32))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/CANNTG-MIZF.png",height=240,width=240)
qstem(hist2Motifs(env,"CANNTG",consenusIUPAC(motifs[82]),"ALL"),xlim=c(-32,32))
dev.off()
munit<-function(x) (x+min(x))/(max(x)-min(x))
eboxImage<-function(env,motif,mrange=c(-32,32),reg="ALL",norm=pass){
imBody<-function(p,mrange,norm=pass){
par(mar=c(0,0,0,0))
makeData<-function(x)
norm(getHeights(qstem(x,xlim=mrange,q=TRUE),mrange))
data<-lapply(p, makeData)
image(t(as.matrix(do.call(rbind,data))), xaxt='n', yaxt='n', ann=FALSE)
}
p<-comEboxs(env,motif,xlim,reg=reg,q=TRUE)
imBody(p,mrange,norm)
}
generateHML<-function(){
png("~/Dropbox/UTX-Alex/br-data/ebox/HM_leukemia_CANNTG-GATA.png")
eboxImage(env,"GATAA",norm=function(x){log(x+1,10)},reg="Leukemia")
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/HM_erythroid_CANNTG-GATA.png")
eboxImage(env,"GATAA",norm=function(x){log(x+1,10)},reg="Erythroid")
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/HM_ecfc_CANNTG-GATA.png")
eboxImage(env,"GATAA",norm=function(x){log(x+1,10)},reg="ECFC")
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/HM_hsc_CANNTG-GATA.png")
eboxImage(env,"GATAA",norm=function(x){log(x+1,10)},reg="HSC")
dev.off()
}
eboxImage(env,motifs[69],norm=function(x){log(x+1,10)},reg="ALL",mrange=c(-62,62))
eboxImage(env,motifs[69],norm=pass,mrange=c(-62,62))
png("~/Dropbox/UTX-Alex/br-data/ebox/eboxs-tbp.png")
comEboxs(env,motifs[69],c(-62,62))
dev.off()
comEboxs(env,"TAGTTA",c(-32,32))
png("~/Dropbox/UTX-Alex/br-data/ebox/AGGCGG-SCACTG.png",height=240,width=240)
qstem(hist2Motifs(env,"AGGCGG","SCACTG","ALL"),xlim=c(-64,64))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC1_AGGCGG-SCACTG.png")
makeImage(env,"AGGCGG","SCACTG",1,c(-64,64))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC2_AGGCGG-SCACTG.png")
makeImage(env,"AGGCGG","SCACTG",2,c(-64,64))
dev.off()
png("~/Dropbox/UTX-Alex/br-data/ebox/PC4_AGGCGG-SCACTG.png")
makeImage(env,"AGGCGG","SCACTG",4,c(-64,64))
dev.off()
fastMot2(env,"AGGCGG","SCACTG")
|
61a81e7e1bdcde33cd1bac9a107c09c2eb1853e1
|
d55ad9b51ee605f03e0a9ee5b2430a899c135c8a
|
/Assignment dplyr.R
|
f43b67ab939dddc46d5bff0b7ae194b95bba9b49
|
[] |
no_license
|
A-Sanjay/R-Programming
|
a004c2b2c65bed8573dd1d9956476c2c37db662a
|
0f5bdf53f396598ab85abc8c204031abc80d1c0f
|
refs/heads/master
| 2023-02-23T05:10:41.985014
| 2021-01-20T12:06:34
| 2021-01-20T12:06:34
| 331,287,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,538
|
r
|
Assignment dplyr.R
|
library(dplyr)
sample(murders,3)
#1.Taking sample from the data of size 5
sample_murder=sample_n(murders,5)
sample_murder
#2.Taking sample of fraction about 10% frm the data
sample_frac<-sample_frac(murders,0.1)
View(sample_frac)
#3.Selecting the data
select_murder<-select(sample_murder,state,population)
View(select_murder)
#4.selecting variable starting with a letter
letterstart = select(sample_murder, starts_with("p"))
View(letterstart)
letterremove = select(sample_murder, -starts_with("p"))
View(letterremove)
#5.Renaming a variable
rename(sample_murder, people=population)
# 6.Filtering a data that as murder greater than 100:
#filtermurder<-filter(sample_murder,state=='Colorado')
#View(filtermurder)
filtermurder2<-filter(sample_murder,total>100)
View(filtermurder2)
filter3 = filter(murders, state%in% c("Colorado", "Texas"))
filter3
#7.summarize
summary<-summarise(murders, total_mean = mean(population))
View(summary)
#8.Arrange
arrange<-arrange(sample_murder,total,population)
View(arrange)
filter3%>%arrange(total,population)
#9.group_by
group_by(murders, south)
#10. Slice:
slice(murders,0:3)
#11.Mutatate
mutat<-mutate(murders,new=population/total)
View(mutat)
#12. Intersect:
murders$region <- rownames(murders)
first <- murders[1:20, ]
second <- murders[10:32, ]
intersect(first,second)
#13.Multiply a variables by 1000:
multiplemurder =mutate_all(letterstart, funs("new_population" = .* 1000))
View(multiplemurder)
#14.
rankmurder= mutate_at(murders, vars(Y2008:Y2010), funs(Rank=min_rank(.)))
|
b0814c10c322689b94586552aea688e6ee52bb92
|
9af49f996a65835e7a729077c1704f146153c026
|
/TanAssig2.R
|
c072e4e5ea3bd1b08f0e0dec6db054d6a90a8915
|
[
"MIT"
] |
permissive
|
jtan189/NaiveBayesClassifier
|
85cac985cabc4a78451633880944408e8ec28ba0
|
3630acf3a75ba96ca6cb37e49be0cf622614b73b
|
refs/heads/master
| 2016-09-05T20:41:20.406905
| 2013-11-06T04:02:50
| 2013-11-06T04:02:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,764
|
r
|
TanAssig2.R
|
## Implementation of the Naive Bayes Classifier in R (for numeric data)
## Josh Tan
## CSCI 479
## 11/5/13
## input parameters
train.file = "ExampleDatasets/irisTraining.txt"
test.file = "ExampleDatasets/irisTesting.txt"
## TRAINING
## read in training data
train.data = as.matrix(read.table(train.file))
train.nrows = nrow(train.data)
ncols = ncol(train.data) - 1 # last attribute is the class label
## separate data based on class
train.pos = which(train.data[, (ncols + 1)] == 1)
train.neg = which(train.data[, (ncols + 1)] == -1)
## for each class, calculate means for each attribute
train.pos.mean = colMeans(train.data[train.pos, 1:ncols])
train.neg.mean = colMeans(train.data[train.neg, 1:ncols])
## for each class, calculate standard deviations for each attribute
train.pos.sd = apply(train.data[train.pos, 1:ncols], 2, sd)
train.neg.sd = apply(train.data[train.neg, 1:ncols], 2, sd)
## for each class, calculate priors
prior.pos = length(train.pos) / train.nrows
prior.neg = length(train.neg) / train.nrows
## TESTING
## read in testing data
test.data = as.matrix(read.table(test.file))
test.nrows = nrow(test.data)
## initialize class predictions for test data
test.pred = vector()
## initialize assessment variables
true.pos = 0;
true.neg = 0;
false.pos = 0;
false.neg = 0;
for (i in 1:test.nrows) {
## for each class, calculate likelihoods
likelihood.pos = dnorm(test.data[i,1:ncols], train.pos.mean, train.pos.sd)
likelihood.neg = dnorm(test.data[i,1:ncols], train.neg.mean, train.neg.sd)
## posterior numerator = likelikehood * prior
post.pos[i] = prod(likelihood.pos) * prior.pos
post.neg[i] = prod(likelihood.neg) * prior.neg
## compare posterior numerators to determine which is greater; corresponding class will be used
if (post.pos[i] >= post.neg[i]) { # if equal, just choose the positive class
test.pred[i] = 1
} else {
test.pred[i] = -1
}
## increment appropriate assessment variables
if (test.pred[i] == 1) {
if (test.data[i, ncols + 1] == 1) {
true.pos = true.pos + 1
} else {
false.pos = false.pos + 1
}
} else {
if (test.data[i, ncols + 1] == -1) {
true.neg = true.neg + 1
} else {
false.neg = false.neg + 1
}
}
}
## calculate performance metrics
accuracy = (true.pos + true.neg) / test.nrows
precision = true.pos / (true.pos + false.pos)
recall = true.pos / (true.pos + false.neg)
cat("True Positives: ", true.pos, "\n")
cat("False Positives: ", false.pos, "\n")
cat("True Negatives: ", true.neg, "\n")
cat("False Negatives: ", false.neg, "\n")
cat("Accuracy: ", accuracy, "\n")
cat("Precision: ", precision, "\n")
cat("Recall: ", recall, "\n")
|
0e32fac7f79ad9d9ad08823bc5d3109b00ad6824
|
b8b761c788ccc47ae7f94e227d372ca960f991c0
|
/data/2017_09_01_SubFeature.R
|
7a0cbe716f1bfa7e1b88e2c6fe7fdf0994ad24f9
|
[] |
no_license
|
yonghuang526/Facial_dysmorphism
|
0b8d51173d397910ab75eb0a1f7c50d4f6d95fb5
|
d87dfc99d5714f9a4ca087f29a1e72fccd8c9712
|
refs/heads/master
| 2021-09-06T13:11:17.199253
| 2018-02-06T22:11:23
| 2018-02-06T22:11:23
| 112,124,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,536
|
r
|
2017_09_01_SubFeature.R
|
data = read.csv('/wdata/devGenes/Kevin - Gait Analyses/DevGenesDatabases_2017-08-29_1147.csv')
feature = read.csv('/wdata/devGenes/CURRENT DATA /kevin_facial_point_extraction/features.csv')
Gait_Model= read.csv('/wdata/devGenes/Kevin - Gait Analyses/9.19.16 Data to Kevin for Gait Model.csv')
matchdata = data[match(row.names(feature),row.names(data)),]
subdata = data[match(feature[,1],data[,1]),]
na_data = subdata[-NA,]
subdata_new = subdata[-match(na_data[,1],subdata[,1]),]
number_of_affected <- c(0);
number_of_unaffected <- c(0);
for(i in 1:nrow(subdata_new)){
for(j in 5:ncol(subdata_new)){
if(!is.na(subdata_new[i,j])){
if(subdata_new[i,j] == 'TRUE'|subdata_new[i,j] == 'True'){
subdata_new[i,4] <-TRUE;
}
}
}
}
for(k in 1:nrow(subdata_new)){
if(is.na(subdata_new[k,4])){
subdata_new[k,4] <-FALSE;
number_of_unaffected <-number_of_unaffected+1;
}
else{
number_of_affected <- number_of_affected+1;
}
}
#################################################
for(m in 1:nrow(subdata_new)){
if(is.na(subdata_new[m,5])){
subdata_new[m,5]<-""
}
if(is.na(subdata_new[m,6])){
subdata_new[m,6]<-""
}
if(is.na(subdata_new[m,7])){
subdata_new[m,7]<-""
}
if(is.na(subdata_new[m,8])){
subdata_new[m,8]<-""
}
if(is.na(subdata_new[m,9])){
subdata_new[m,9]<-""
}
if(is.na(subdata_new[m,10])){
subdata_new[m,10]<-""
}
if(is.na(subdata_new[m,11])){
subdata_new[m,11]<-""
}
if(is.na(subdata_new[m,12])){
subdata_new[m,12]<-""
}
}
subfeature<-cbind(data.frame(paste(subdata_new[,5],subdata_new[,9],sep = "",collapse = NULL)),data.frame(paste(subdata_new[,6],subdata_new[,10],sep = "",collapse = NULL)))
subfeature<-cbind(subfeature,data.frame(paste(subdata_new[,7],subdata_new[,11],sep = "",collapse = NULL)))
subfeature<-cbind(subfeature,data.frame(paste(subdata_new[,8],subdata_new[,12],sep = "",collapse = NULL)))
subfeature <- cbind(subdata_new[,1],subfeature)
rownames(subfeature) <-subfeature[,1]
subfeature[,1] <-NULL
names(subfeature) <- c("Autism(reported)","Epilepsy status","Language-impair","ID" )
write.csv(subfeature,file = "/wdata/rotating_students/yonghuang/data/subfeatures(merged).csv")
################################################
new_features <- data.frame(subdata_new[,1],subdata_new[,2],subdata_new[,3],subdata_new[,4],stringsAsFactors=FALSE)
names(new_features) <- c("ID","Sex","Age","Affected status")
new_features[,3] <- as.character(new_features[,3])
new_features[152,3] <-NA
new_features[222,3] <-NA
new_features[498,3] <-NA
library("lubridate")
for(m in 1:nrow(new_features)){
if(!is.na(new_features[m,3])){
new_features[m,3]<-2017-year(new_features[m,3])
}
}
write.csv(new_features, file = "/wdata/rotating_students/yonghuang/data/sub_features.csv",row.names=FALSE)
update_feature <-feature[match(subdata_new[,1],feature[,1]),]
rownames(update_feature) <-update_feature[,1]
update_feature <- update_feature[,-1]
dimension <- update_feature[,1:2]
scale = as.vector(t(dimension[match(row.names(update_feature),row.names(dimension)),]))
update_feature <- update_feature[,-1]
update_feature <- update_feature[,-1]
update_feature.scale = sapply(1:nrow(update_feature), function(n) update_feature[n,] / scale[(2*n):(2*(n-1) + 1)])
# transpose back to initial format
update_feature.scale= t(update_feature.scale)
rownames(update_feature.scale) = rownames(update_feature)
write.csv(update_feature.scale, file = "/wdata/rotating_students/yonghuang/data/scaled_features.csv")
|
44c6f20765bcf30d3de6cd333869a5d2b962f91b
|
bc6e80b41dd479133651b42f995a5107c70df71e
|
/tests/testthat.R
|
3e0b54163401f9655de45435af35e255099ac2ac
|
[] |
no_license
|
baumer-lab/fec20
|
ccb5c1c1882796be6684f1f04ab5bc84b91f48cd
|
de87e69c6ef6fe066c3f2ea647c3a286cbeeb4b2
|
refs/heads/main
| 2023-07-09T22:02:47.431564
| 2021-08-10T09:04:50
| 2021-08-10T09:04:50
| 312,653,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(fec20)
test_check("fec20")
|
a931ff73e2195aea0215c4ccca31dfa8bb23937c
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/inst/snippet/qqline-fig.R
|
5f526444ab683a8d3fec2567878af19528248a07
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
qqline-fig.R
|
gf_qq( ~ age | substance, data = HELPrct, alpha = 0.4) %>%
gf_qqline(color = "red") %>%
gf_qqline(color = "skyblue", tail = 0.10)
|
fc6acaa5fddf6bc67ef8a44deabecba54f79b78d
|
b1b1a838bd401163e0cb256b713e19293e5d9ad8
|
/APFE18800685_Geemly_Assignment.R
|
012783a23afe364cc152efd82341979f5c4ee846
|
[] |
no_license
|
kabhilas/Geely-automobile-linear-regression-problem
|
3b02b3cc9c3142f8aafd70dcf8fbdfc79ba77e34
|
f77bfc5f4c3ff013bff8becba6aea5596105dbac
|
refs/heads/main
| 2020-03-31T23:01:30.052283
| 2018-10-11T18:50:29
| 2018-10-11T18:50:29
| 152,639,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,625
|
r
|
APFE18800685_Geemly_Assignment.R
|
# Load required libraries.
library(tidyr)
library(plyr)
library(dplyr)
library(stringr)
library(ggplot2)
library(MASS)
library(car)
# DATA DICTIONARY
# 1 Car_ID Unique id of each observation (Interger)
# 2 Symboling Its assigned insurance risk rating, A value of +3 indicates that the auto is risky, -3 that it is probably pretty safe.(Categorical)
# 3 carCompany Name of car company (Categorical)
# 4 fueltype Car fuel type i.e gas or diesel (Categorical) 2 levels
# 5 aspiration Aspiration used in a car (Categorical) 2 levels
# 6 doornumber Number of doors in a car (Categorical) 2 levels
# 7 carbody body of car (Categorical) 5 levels
# 8 drivewheel type of drive wheel (Categorical) 3 levels
# 9 enginelocation Location of car engine (Categorical) 2 levels
# 10 wheelbase Weelbase of car (Numeric)
# 11 carlength Length of car (Numeric)
# 12 carwidth Width of car (Numeric)
# 13 carheight height of car (Numeric)
# 14 curbweight The weight of a car without occupants or baggage. (Numeric)
# 15 enginetype Type of engine. (Categorical) 7 levels
# 16 cylindernumber cylinder placed in the car (Categorical) 7 levels
# 17 enginesize Size of car (Numeric)
# 18 fuelsystem Fuel system of car (Categorical) 8 levels
# 19 boreratio Boreratio of car (Numeric)
# 20 stroke Stroke or volume inside the engine (Numeric)
# 21 compressionratio compression ratio of car (Numeric)
# 22 horsepower Horsepower (Numeric)
# 23 peakrpm car peak rpm (Numeric)
# 24 citympg Mileage in city (Numeric)
# 25 highwaympg Mileage on highway (Numeric)
# 26 price(Dependent variable) Price of car (Numeric)
#*********************************** Data Sourcing ***************************************************
# Read the dataset using read.csv function
geely <- geely.cars <- read.csv("CarPrice_Assignment.csv")
str(geely)
head(geely)
summary(geely)
# There are 205 obs. of 26 variables
################################################ EDA ##############################################
# There are no nulls in the data set
sum(is.null(geely))
colnames(geely)
# There are no column with value as ""
sapply(colnames(geely), function(x) length(which(geely[,x] == "")))
# Check if car_ID is unique or not
sum(duplicated(geely$car_ID)) # The car_IDs are unique
### Univariate Analysis
#check numerical data for outliers
# wheelbase
boxplot(geely$wheelbase)
quantile(geely$wheelbase)
summary(geely$wheelbase)
# carlength
boxplot(geely$carlength)
quantile(geely$carlength)
summary(geely$carlength)
# carwidth
boxplot(geely$carwidth)
quantile(geely$carwidth)
summary(geely$carwidth)
# carheight
boxplot(geely$carheight)
quantile(geely$carheight)
summary(geely$carheight)
# curbweight
boxplot(geely$curbweight)
quantile(geely$curbweight)
summary(geely$curbweight)
# enginesize
boxplot(geely$enginesize)
quantile(geely$enginesize)
summary(geely$enginesize)
# boreratio
boxplot(geely$boreratio)
quantile(geely$boreratio)
summary(geely$boreratio)
# stroke
boxplot(geely$stroke)
quantile(geely$stroke)
summary(geely$stroke)
# compressionratio
boxplot(geely$compressionratio)
quantile(geely$compressionratio)
summary(geely$compressionratio)
# horsepower
boxplot(geely$horsepower)
quantile(geely$horsepower)
summary(geely$horsepower)
# peakrpm
boxplot(geely$peakrpm)
quantile(geely$peakrpm)
summary(geely$peakrpm)
# citympg
boxplot(geely$citympg)
quantile(geely$citympg)
summary(geely$citympg)
# highwaympg
boxplot(geely$highwaympg)
quantile(geely$highwaympg)
summary(geely$highwaympg)
# As per above univariate analysis on continuous data, there did not seem to be any issue for outliers.
# Also, as the number of observations are less i.e. 205 only, there is no critical outliers in the continuous variables.
# Converting 2 levels of categorical data as numeric
# 3. carName : Name of car company (Categorical)
# Seperate the car names as carCompany and carModel
geely$CarName = as.character(geely$CarName)
CarName_split = str_split_fixed(geely$CarName, "[ ]", 2)
CarName_split = as.data.frame(CarName_split)
#View(CarName_split)
geely$CarCompany = CarName_split$V1
geely$CarModel = CarName_split$V2
geely[,c("CarModel","CarName","car_ID")] <- list(NULL)
#View(geely)
# Correcting the car names
levels(as.factor(geely$CarCompany))
company_name <- mapvalues(geely$CarCompany, from = c("maxda", "porcshce", "vokswagen",
"vw", "Nissan", "toyouta"), to = c("mazda",
"porsche", "volkswagen", "volkswagen", "nissan", "toyota"))
geely <- cbind(geely[,-25],company_name)
geely$company_name <- as.factor(geely$company_name)
summary(geely$company_name)
# company_name : company names of cars (Categorical) 28 levels
summary(geely$company_name)
dummy_companyName <- data.frame(model.matrix( ~company_name, data = geely))
#View(dummy_companyName)
dummy_companyName <- dummy_companyName[,-1]
# 4. fueltype : Car fuel type i.e gas or diesel (Categorical)
summary(geely$fueltype)
levels(geely$fueltype) <- c(0,1) # diesel (0), gas(1)
geely$fueltype <- as.numeric(levels(geely$fueltype))[geely$fueltype]
table(geely$fueltype)
# 5. aspiration : Aspiration used in a car (Categorical)
summary(geely$aspiration)
levels(geely$aspiration) <- c(0,1) # gas(0), turbo(1)
geely$aspiration <- as.numeric(levels(geely$aspiration))[geely$aspiration]
table(geely$aspiration)
# 6. doornumber : Number of doors in a car (Categorical)
summary(geely$doornumber)
levels(geely$doornumber) <- c(0,1) # four(0), two(1)
geely$doornumber <- as.numeric(levels(geely$doornumber))[geely$doornumber]
table(geely$doornumber)
# 9. enginelocation : Location of car engine (Categorical)
summary(geely$enginelocation)
levels(geely$enginelocation) <- c(0,1) # front(0), rear(1)
geely$enginelocation <- as.numeric(levels(geely$enginelocation))[geely$enginelocation]
table(geely$enginelocation)
### Converting multi level of categorical data as numeric
# 2. symboling : converting it to factor
levels(as.factor(geely$symboling))
geely$symboling <- as.factor(geely$symboling)
summary(geely$symboling)
dummy_symboling <- data.frame(model.matrix( ~symboling, data = geely))
#View(dummy_symboling)
dummy_symboling <- dummy_symboling[,-1]
# 7. carbody : body of car (Categorical) 5 levels
summary(geely$carbody)
dummy_carBody <- data.frame(model.matrix( ~carbody, data = geely))
#View(dummy_carBody)
dummy_carBody <- dummy_carBody[,-1]
# 8. drivewheel : type of drive wheel (Categorical) 3 levels
summary(geely$drivewheel)
dummy_driveWheel <- data.frame(model.matrix( ~drivewheel, data = geely))
#View(dummy_driveWheel)
dummy_driveWheel <- dummy_driveWheel[,-1]
# 15. enginetype : Type of engine. (Categorical) 7 levels
summary(geely$enginetype)
dummy_engType <- data.frame(model.matrix( ~enginetype, data = geely))
#View(dummy_engType)
dummy_engType <- dummy_engType[,-1]
# 16. cylindernumber : cylinder placed in the car (Categorical) 7 levels
summary(geely$cylindernumber)
dummy_cylNumber <- data.frame(model.matrix( ~cylindernumber, data = geely))
#View(dummy_cylNumber)
dummy_cylNumber <- dummy_cylNumber[,-1]
# 18. fuelsystem : Fuel system of car (Categorical) 8 levels
summary(geely$fuelsystem)
dummy_fuelSys <- data.frame(model.matrix( ~fuelsystem, data = geely))
#View(dummy_fuelSys)
dummy_fuelSys <- dummy_fuelSys[,-1]
# Replacing multi level categorical columns with dummy variables
dummy_geely <- data.frame(dummy_carBody, dummy_driveWheel, dummy_engType, dummy_cylNumber, dummy_fuelSys, dummy_companyName, dummy_symboling)
geely[,c("carbody","drivewheel","enginetype","cylindernumber","fuelsystem", "company_name", "symboling")] <- list(NULL)
geely <- cbind(geely, dummy_geely)
## Derived metrices
# Average mpg
geely$Totalmpg <- round(mean(geely$citympg + geely$highwaympg),2)
# Following guidline has been used for model building :
#1. Build a model containing all variables
#2. Run stepAIC on a model containing all variables
#3. Take the last model call from the step function after the variables were reduced, and take the remaining variables in another model - model_2
#4. Proceed as you did in backward selection
#5. Remove variables with high VIF (>2 generally) and which are insignificant (p>0.05), one by one
#6. If the model has variables which have high VIF and are significant, check and remove other insignificant variables
#7. After removing the insignificant variables, the VIFs should decline
#8. If some variables still have a high VIF, remove the variable which is relatively less significant
#9. Now variables must be significant. If the number of variables is still high, remove them in order of insignificance until you arrive at a limited number of variables, that explain the model well.
## Setting seed to achieve reproducibility
set.seed(100)
## seperating Training and test datasets
trainindices= sample(1:nrow(geely), 0.7*nrow(geely))
train = geely[trainindices,]
test = geely[-trainindices,]
# Build model 1 containing all variables
model_1 <-lm(price~.,data=train)
summary(model_1)
## using stepAIC to estimate the model
step <- stepAIC(model_1, direction = "both")
step
## using last step of AIC for finalisation of our model
model_2 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd + enginetypedohcv +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree + fuelsystem2bbl +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemercury + company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen + symboling.1 +
symboling0 + symboling3, data = train)
summary(model_2)
vif(model_2)
# remove enginetypedohcv
model_3 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree + fuelsystem2bbl +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemercury + company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen + symboling.1 +
symboling0 + symboling3, data = train)
summary(model_3)
vif(model_3)
# remove fuelsystem2bbl
model_4 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemercury + company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen + symboling.1 +
symboling0 + symboling3, data = train)
summary(model_4)
vif(model_4)
# remove symboling0
model_5 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemercury + company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen + symboling.1 +
symboling3, data = train)
summary(model_5)
vif(model_5)
# remove symboling.1
model_6 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemercury + company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen +
symboling3, data = train)
summary(model_6)
vif(model_6)
#remove company_namemercury
model_7 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen +
symboling3, data = train)
summary(model_7)
vif(model_7)
#remove symboling3
model_8 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhardtop + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_8)
vif(model_8)
#remove carbodyhardtop
model_9 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhatchback +
carbodysedan + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_9)
vif(model_9)
#remove carbodysedan
model_10 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodyhatchback +
carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_10)
vif(model_10)
# remove carbodyhatchback
model_11 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + carbodywagon + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_11)
vif(model_11)
# remove carbodywagon
model_12 <- lm(price ~ aspiration + enginelocation + carwidth + curbweight +
enginesize + stroke + peakrpm + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_12)
vif(model_12)
#remove curbweight
model_13 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + stroke + peakrpm + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + cylindernumberthree +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_13)
vif(model_13)
# remove cylindernumberthree
model_14 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + stroke + peakrpm + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
cylindernumberfive + company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_14)
vif(model_14)
# remove cylindernumberfive
model_15 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + stroke + peakrpm + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_15)
vif(model_15)
# remove peakrpm
model_16 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + stroke + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault + company_namesaab +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_16)
vif(model_16)
# remove company_namesaab
model_17 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + stroke + drivewheelrwd +
enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_17)
vif(model_17)
# remove stroke
model_18 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + drivewheelrwd + enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_18)
vif(model_18)
# remove drivewheelrwd
model_19 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf + enginetyperotor +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_19)
vif(model_19)
# remove enginetyperotor
model_20 <- lm(price ~ aspiration + enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_20)
vif(model_20)
#remove aspiration
model_21 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namejaguar + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_21)
vif(model_21)
# remove company_namejaguar
model_22 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick + company_namedodge +
company_namehonda + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_22)
vif(model_22)
# remove company_namedodge
model_23 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namehonda + company_namemazda +
company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_23)
vif(model_23)
# remove company_namehonda
model_24 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namemazda + company_namemitsubishi + company_namenissan +
company_nameplymouth + company_namerenault +
company_nametoyota + company_namevolkswagen, data = train)
summary(model_24)
vif(model_24)
# remove company_nameplymouth
model_25 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namemazda + company_namemitsubishi + company_namenissan +
company_namerenault + company_nametoyota + company_namevolkswagen, data = train)
summary(model_25)
vif(model_25)
# remove company_namevolkswagen
model_26 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namemazda + company_namemitsubishi + company_namenissan +
company_namerenault + company_nametoyota, data = train)
summary(model_26)
vif(model_26)
# remove company_namemazda
model_27 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namemitsubishi + company_namenissan +
company_namerenault + company_nametoyota, data = train)
summary(model_27)
vif(model_27)
# remove company_namenissan
model_27 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namemitsubishi + company_namerenault + company_nametoyota, data = train)
summary(model_27)
vif(model_27)
# remove company_namemitsubishi
model_28 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_namerenault + company_nametoyota, data = train)
summary(model_28)
vif(model_28)
# remove company_namerenault
model_29 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick +
company_nametoyota, data = train)
summary(model_29)
vif(model_29)
# remove company_nametoyota
model_30 <- lm(price ~ enginelocation + carwidth +
enginesize + enginetypel + enginetypeohc + enginetypeohcf +
company_namebmw + company_namebuick, data = train)
summary(model_30)
vif(model_30)
### The below variables are considered a the most significant for the model
#enginelocation
#carwidth
#enginesize
#enginetypel
#enginetypeohc
#enginetypeohcf
#company_namebmw
#company_namebuick
### Let's predict the model on test data
# Predict the car prices in the testing dataset
Predict_1 <- predict(model_30,test[,-1])
test$test_price <- Predict_1
# Accuracy of the predictions
# Calculate correlation
r <- cor(test$price,test$test_price)
# calculate R squared by squaring correlation
rsquared <- cor(test$price,test$test_price)^2
# check R-squared
rsquared
# Train data r squared : 0.929 Adjusted r squared : 0.9248
# Test data r squared : 0.8454
# difference : 0.07
|
817cc8552f0ea98baf98decc8a11c0dce8304802
|
90381544a6ed821f57a06d20b6f90ac6590b6a53
|
/testing-party-list.R
|
f023703fabf93d9f7f01fbca46343e9f244cbadb
|
[] |
no_license
|
gilmore-lab/databrary-r-api
|
b8d102fa217df3a96b418dfb749b34d6fecd5fad
|
189f7cea6161311604cf6a711b91c5320d995b42
|
refs/heads/master
| 2021-05-02T06:57:34.524410
| 2018-03-15T19:27:29
| 2018-03-15T19:27:29
| 32,077,201
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,522
|
r
|
testing-party-list.R
|
# This gets authorized investigators, but only the first page of names
r <- GET(url = "https://nyu.databrary.org/api/search?volume=false&f.party_authorization=4&f.party_is_institution=false")
# This gets affliates? f.party_authorization=3
r <- GET(url = "https://nyu.databrary.org/api/search?volume=false&f.party_authorization=3&f.party_is_institution=false")
# Staff are f.party_authorization=5
# f.party_authorization=2, 6, 7 returns no one
# f.party_authorization=1 returns
# $response$docs
# id sortname prename affiliation institution
# 1 162 de Jong Marjanneke Utrecht University FALSE
# 2 155 Libertus Klaus University of Pittsburgh FALSE
# 3 280 Richards John E. University of South Carolina FALSE
# f.party_authorization=0 returns
# $response$docs
# id sortname prename affiliation institution
# 1 606 Lederberg Amy Georgia State University FALSE
# 2 827 Kohler Peter Stanford University FALSE
# 3 877 Ahn Soo George Mason University FALSE
# 4 829 Monroy Claire Donders Institute for Brain, Cognition and Behaviour FALSE
# 5 783 Holt Nicholas University of Louisville FALSE
# 6 819 Spencer Hannah Utrecht University FALSE
# 7 830 Stubblefield Edward michigan state FALSE
# 8 847 Murphy P. Karen The Pennsylvania State University FALSE
# 9 600 West Kelsey Louise University of Pittsburgh FALSE
# 10 831 Miller Kevin Francis University of Michigan FALSE
# 11 906 Hajal Nastassia UCLA Semel Institute for Neuroscience & Human Behavior FALSE
# 12 848 Wong Sissy University of Houston FALSE
# 13 862 Escobar Kelly New York University FALSE
# 14 898 Smith Craig Elliott University of Michigan FALSE
# 15 861 Kuchirko Yana New York University FALSE
# 16 1041 JARLAN Pierre umps FALSE
# 17 790 Kim Kaeun University of Massachusetts Amherst FALSE
# 18 1023 Fernandes Sara New York University FALSE
# 19 1032 Dodkins Cindy Kaye Swinburne BabyLab, Swinburne University of Technology FALSE
# 20 109 Zimmermann Laura Georgetown University FALSE
# 21 132 Flom Ross Brigham Young University FALSE
# 22 151 Balas Benjamin North Dakota State University FALSE
# 23 283 Ulrich Beverly University of Michigan FALSE
# 24 267 Fivush Robyn Emory University FALSE
# 25 270 Margulis Katherine Temple University FALSE
|
a6b8f03db85f212cb777e9f66dad3514de03fada
|
ebd6f21764e2c16c1cc8695985b0eec96ad676cd
|
/man/format_data_cl_json.Rd
|
d75eaec5c4283566ee89be953c4146cb5ce52e77
|
[
"MIT"
] |
permissive
|
worldbank/povcalnetR
|
88dbf13fc1073c0b0adebde4cd7aad0fb14688a8
|
d3e3bb66df359a441585390e1a552d237ac6474d
|
refs/heads/master
| 2023-01-20T14:19:23.658455
| 2023-01-10T09:10:24
| 2023-01-10T09:10:24
| 149,777,997
| 8
| 7
|
NOASSERTION
| 2020-09-15T09:34:45
| 2018-09-21T14:54:24
|
R
|
UTF-8
|
R
| false
| true
| 308
|
rd
|
format_data_cl_json.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{format_data_cl_json}
\alias{format_data_cl_json}
\title{format_data_cl_json}
\usage{
format_data_cl_json(x)
}
\arguments{
\item{x}{data.frame: API response}
}
\value{
data.frame
}
\description{
format_data_cl_json
}
|
07e29602bf514fc0cb535c3eb2800efbf2481569
|
6706ae648844d45fadf23c3486b0aa27304d5d4b
|
/TextMining.r
|
02436c6b6f4db7906abfa4ad8426eab92b98cdbc
|
[] |
no_license
|
4deepanshu/BigData-Machine-Learning-with-R
|
368bb394467522b41bdf820c0775fc302a60ecb0
|
8b58e87110b24ebd8fab24117476601bbb3d5a03
|
refs/heads/master
| 2020-03-24T22:49:10.939568
| 2019-08-11T09:46:51
| 2019-08-11T09:46:51
| 143,104,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 748
|
r
|
TextMining.r
|
require(tm)
data <- readLines("https://www.stanford.edu/")
data <- readLines("D:/Python3/google.txt")
corpus = Corpus(VectorSource(data)) # for preprocessing the data
corpus
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus,removeNumbers)
corpus <- tm_map(corpus,removePunctuation)
corpus <- tm_map(corpus,removeWords, c(stopwords("english")))
dtm <- TermDocumentMatrix(corpus) # Takes out words from the lines and separates them
dtm
m <- as.matrix(dtm) # Explicit Coersion
View(m)
wordfreq <- rowSums(m)
d <- data.frame(word = names(wordfreq),freq=wordfreq)
View(d)
require(wordcloud)
set.seed(1234)
wordcloud(words= d$word, freq =d$freq, min.freq = 15,max.words = Inf,colors=brewer.pal(n=8,name="Dark2"))
|
b386afd11ccbf47e4e39738edf7c21857d25ccec
|
12961aa3efe519b058b44f88b76d15eaa9bc60c9
|
/end_to_end_Model.R
|
8facb8865db2e8cd4bba9e95fc079ed6bada0d9c
|
[] |
no_license
|
Bwarule/Data-Mining
|
7f6642bc9acc86890714d47db106453e2e54a36a
|
a04d4e51c70acb8956f54f9db989fc24a6a9556c
|
refs/heads/master
| 2020-12-24T05:41:53.665142
| 2020-04-04T21:58:15
| 2020-04-04T21:58:15
| 9,514,358
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,185
|
r
|
end_to_end_Model.R
|
## Read the data file
modeling_data <- read.csv("C:\\Papers\\training\\shar\\4\\ds1.10.csv")
## we will assume missing value impuatation is done.
## outlier cappin is done.
## split the into the train and test data
set.seed(1)
dev_sample <- 0.8
x3 <- nrow(modeling_data)
x2 <- as.integer(x3*(dev_sample))
sub <- c(sample(1:x3, x2))
train_sample <- modeling_data[sub,]
test_sample <- modeling_data[-sub,]
## Run the Data_redconstvar code for further analysis
source("C:\\Papers\\training\\shar\\4\\Data_redconstvar.R")
modeling_data$uniq_var <- "IND"
modeling_data$income <- 0
output <-
Datareduc_Subroutine_withConstvar (train_sample,
constVAR = TRUE, max_cat = 10, idvars = NULL, RemovconstVAR = TRUE)
## use of library FSelector for varies measure
library(FSelector)
weights <- information.gain(target~., train_sample)
print(weights)
subset <- cutoff.k(weights, 3)
f <- as.simple.formula(subset, "target")
print(f)
weights <- gain.ratio(target~., train_sample)
print(weights)
subset <- cutoff.k(weights, 3)
f <- as.simple.formula(subset, "target")
print(f)
weights <- symmetrical.uncertainty(target~., train_sample)
print(weights)
subset <- cutoff.biggest.diff(weights)
f <- as.simple.formula(subset, "target")
print(f)
## Variable selection process
library(leaps)
library(MASS)
model <-
regsubsets(target ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 , data = train_sample, nvmax = 4)
model <-
regsubsets(target ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 , data = train_sample, method = "backward")
## ("exhaustive","backward", "forward", "seqrep"
model <-
regsubsets(target ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 , data = train_sample, method = "backward")
library(Hmisc)
lr <- glm(target~x3,data=train_sample,family = binomial)
predit <- fitted.values(lr);
n <- length(train_sample$target);
target <- train_sample$target
## target<-rep(0,times=n);
## for (i in 1:n) {if (treedata$y[i]=='yes') target[i]=1};
iv <- function(predit,target) # my somer's D function
{
data<-data.frame(predit,target);
data_sort<-data[order(predit),]
ttl_num<-length(target);
bin<-10;
n<-ttl_num%/%bin;
iv_bin<-rep(0,times=bin);
good<-rep(0,times=bin);
bad<-rep(0,times=bin);
for (i in 1:bin) # calculate PSI for ith bin
{
if(i!=bin) {good[i]<-sum(data_sort$target[((i-1)*n+1):(n*i)]);bad[i]<-n-good[i]} else
{good[i]<-sum(data_sort$target[((i-1)*n+1):ttl_num]);bad[i]<-ttl_num-n*(i-1)-good[i]}
}
good_pct<-good/sum(good)
bad_pct<-bad/sum(bad)
for (i in 1:bin)
{
iv_bin[i]<-(bad_pct[i]-good_pct[i])*log(bad_pct[i]/good_pct[i])
}
iv=sum(iv_bin)
return (iv)
}
iv(predit,target)
## Fit the logistic
library(forward)
input_vars <- c("x2","x3","x4")
formulas <- paste("target", sep="~",
fwd.combn(input_vars, length(input_vars), fun=function(x){paste(x,collapse="+")}))
Ffit_LR <- glm(eval(parse(text=formulas)),
data=train_sample,family=binomial(link="logit"))
## Model validation
library(ROCR)
library(doBy)
library(gdata)
train_sample$LR_pred <- predict(Ffit_LR, type='response', train_sample)
test_sample$LR_pred <- predict(Ffit_LR, type='response', test_sample)
test_sample_out <- test_sample
test_sample_out$predicted_val <-
predict(Ffit_LR, type='response', test_sample)
retain_vars_out <- c("target","predicted_val")
test_sample_out <- test_sample_out[retain_vars_out]
## Adding Model Code ##
test_sample_out$model_code <- "cs"
## Decile ##
test_sample_out$predicted_val <-
as.numeric(test_sample_out$predicted_val)
test_sample_out <-
test_sample_out[order(-test_sample_out$predicted_val),]
test_sample_out$rank <- rep(1:dim(test_sample_out)[1],each=1)
test_sample_out$decile1 <-
test_sample_out$rank/dim(test_sample_out)[1] * 10
test_sample_out$decile <- floor(test_sample_out$decile1)
test_sample_out$decile[test_sample_out$decile > 9] <- 9
test_sample_out$decile1 <- NULL
test_sample_out$rank <- NULL
test_sample_out$model_code <- as.factor(test_sample_out$model_code)
myfun1 <- function(x){
c(mean=mean(x, na.rm=TRUE))
}
attach(test_sample_out)
summaryBy_Decile <-
summaryBy(predicted_val ~decile, data=test_sample_out,
FUN=myfun1,keep.names=TRUE)
detach(test_sample_out)
names(summaryBy_Decile)[names(summaryBy_Decile)=="predicted_val"] <-
"avg_decileby"
##summaryBy_Decile$mdl_cde <- args_2
## COMPUTING ROC CURVE (x-axis: fpr, y-axis: tpr)
pred_LR1 <- prediction(train_sample$LR_pred, train_sample[,"target"])
perf_LR11 <- performance(pred_LR1,"tpr","fpr")
pred_LR2 <- prediction(test_sample$LR_pred, test_sample[,"target"])
perf_LR21 <- performance(pred_LR2,"tpr","fpr")
## PRECISION/RECALL CURVE (x-axis: recall, y-axis: precision)
perf_LR12 <- performance(pred_LR1, "prec", "rec")
perf_LR22 <- performance(pred_LR2, "prec", "rec")
## SENSITIVITY/SPECIFICITY CURVE
## (x-axis: specificity, y-axis: sensitivity)
perf_LR13 <- performance(pred_LR1, "sens", "spec")
perf_LR23 <- performance(pred_LR2, "sens", "spec")
## LIFT CHART
perf_LR14 <- performance(pred_LR1,"lift","rpp")
perf_LR24 <- performance(pred_LR2,"lift","rpp")
## CALCULATING KS STATISTICS
KS_LR1 <-
max(attr(perf_LR11,'y.values')[[1]]-attr(perf_LR11,'x.values')[[1]])
KS_LR2 <-
max(attr(perf_LR21,'y.values')[[1]]-attr(perf_LR21,'x.values')[[1]])
## CALCULATING AREA UNDER ROC & Gini(Accuracy Ratio)
ROC_LR1 <- attr(performance(pred_LR1,"auc"),'y.values')
ROC_LR2 <- attr(performance(pred_LR2,"auc"),'y.values')
ROC_LR1 <- ROC_LR1[[1]]
ROC_LR2 <- ROC_LR2[[1]]
Gini_LR1 <-(ROC_LR1-0.5)*2
Gini_LR2 <-(ROC_LR2-0.5)*2
## LR MODEL SUMMARY
cexmain <- 1.2
#pdf(file= file.path(op_dir_path,
#paste(model_version_string ,"Result.pdf", sep="_" )))
textplot(c("Logistic Regression Result",
paste("Date:",as.character(Sys.Date()))),
col='purple',cex=cexmain,
valign="top")
textplot(capture.output(summary(Ffit_LR )),valign="top")
title(main = list("Parameter Estimates", cex=cexmain,col="purple", font=3))
textplot(capture.output(t(t(data.frame(VIF=vif(Ffit_LR))))),
valign="top",halign="center",fixed.width=TRUE,cex= 0.5)
title(main = list("Collinearity diagnostics:VIF",
cex=cexmain, col="purple", font=3))
plot(perf_LR11,col='blue',lty=1);
plot(perf_LR21, col='green',add=TRUE,lty=2);
legend("bottomright", c('Train','Test'),inset=.05,
fill=c('blue','green'),horiz=FALSE)
title(main = list("ROC Curve",
cex=cexmain, col="purple", font=3))
plot(perf_LR12,col='blue',lty=1);
plot(perf_LR22, col='green',add=TRUE,lty=2);
legend("topright", c('Train','Test'),inset=.05,
fill=c('blue','green'),horiz=FALSE)
title(main = list("Precision/Recall Curve",
cex=cexmain, col="purple", font=3))
plot(perf_LR13,col='blue',lty=1);
plot(perf_LR23, col='green',add=TRUE,lty=2);
legend("topright", c('Train','Test'),inset=.05,
fill=c('blue','green'),horiz=FALSE)
title(main = list("Sensitivity/Specificity",
cex=cexmain, col="purple", font=3))
plot(perf_LR14,col='blue',lty=1);
plot(perf_LR24, col='green',add=TRUE,lty=2);
legend("topright", c('Train','Test'),inset=.05,
fill=c('blue','green'),horiz=FALSE)
title(main = list("Lift Chart", cex=cexmain ,col="purple", font=3))
Outtime_chart <- file.path(param_path_code,"Integrated_Outtime_chart.Rc")
loadcmp(file=Outtime_chart, envir = .GlobalEnv, chdir = FALSE)
DataOut <-
Outtimechart(test_sample_out, plotTrue=TRUE,outputpathGraph=op_dir_path)
##.......... Coefficient Plot for Logistic Regression ............###
FfitData <- summary(Ffit_LR)
coefficients.Data <- FfitData$coefficients
colinsert <- c(as.numeric(coefficients.Data[,"Pr(>|z|)"]))
colinsert[colinsert < 0.05] <- 'green'
colinsert[colinsert!='green'] <- 'red'
colinsert <- colinsert[-1] ##..remove of Intercept
coefplot(Ffit_LR,cex.var=0.65, cex.pts=1.6,
mar=c(1,14,5.1,2), col.pts=colinsert, col='blue')
legend("bottomright", c('Significant variables','UnSignificant variables'),
fill=c("green","red"), horiz=FALSE, cex=0.55)
dev.off()
## GET MODEL SIGNIFICANT VARIABLES
LR_mdl_sig_vars <- all.vars(as.formula(Ffit_LR))
del_list <- list(LR_mdl_sig_vars[1])
LR_mdl_sig_vars <- LR_mdl_sig_vars [!LR_mdl_sig_vars %in% del_list]
Sig_vars <-
data.frame(model_type="LR",significant_variable_name=LR_mdl_sig_vars)
assign("Sig_vars", Sig_vars, envir = .GlobalEnv)
assign("ROC_LR1", ROC_LR1, envir = .GlobalEnv)
assign("ROC_LR2", ROC_LR2, envir = .GlobalEnv)
assign("Gini_LR1", Gini_LR1, envir = .GlobalEnv)
assign("Gini_LR2", Gini_LR2, envir = .GlobalEnv)
assign("KS_LR1", KS_LR1, envir = .GlobalEnv)
assign("KS_LR2", KS_LR2, envir = .GlobalEnv)
## SAVE MODEL TO REPOSITORY
save(Ffit_LR,
file = file.path(mdl_repo,paste(model_version_string,"RData", sep=".")))
write.csv(test_sample_out , file = file.path(op_dir_path,
gsub(" ","",paste(model_version_string,".csv"))),row.names = FALSE)
Dname_out <- paste(model_version_string,"Decile",sep="_")
write.csv(summaryBy_Decile, file = file.path(mdl_result_path,
gsub(" ","",paste(Dname_out,".csv"))),row.names = FALSE)
|
d47129254b2bbcda868b51181ecafbb7e8c04d46
|
a4dded77031478686d3e0d522743ec951b10a3a0
|
/lcyR/R/lcy.is.matrix.in.R
|
39d0fa9b4d44b8132c5d900d35bf134eee492654
|
[] |
no_license
|
farscape2012/rpkg
|
5008bc4a8ad1e1f8a97ab23a9d23710f98507abb
|
7701bce8e07f3785ac9ab48c3e52f0bb1f9e8d7f
|
refs/heads/master
| 2020-06-13T21:31:08.237563
| 2016-12-04T21:27:14
| 2016-12-04T21:28:06
| 75,551,451
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
r
|
lcy.is.matrix.in.R
|
lcy.is.matrix.in <- function(m1, m2, order=TRUE,byrow=TRUE){
# this function is used to check whether rows or columns exist in the other matrix.
dim.m1 <- dim(m1)
dim.m2 <- dim(m2)
if(byrow){
num <- dim.m1[1]
margin <- 1
}else{
num <- dim.m1[2]
margin <- 2
}
if(order){
m1 <- apply(m1,margin,paste, collapse='->')
m2 <- apply(m2,margin,paste, collapse='->')
index <- match(m1,m2)
index <- !is.na(index)
}else{
x <- lapply(seq_len(num), function(i) if(byrow) m1[i,] else m1[,i])
z <- lapply(x, function(y){tf <- apply(m2,margin, function(z){tf <- setequal(y,z)})})
z <- do.call(rbind,z)
index <- rowSums(z) != 0
}
return(index)
}
|
3f7d5dd64af4fdfaa5018a620448c92f8bb3a9ad
|
5500300830c24d899cf019450a4576b98f0fa95c
|
/R/rgl.Map.R
|
d4d12cbf1874ea1998a6c9bbb87cfc4905e18497
|
[] |
no_license
|
svats2k/TeachingDemos
|
4af2ce6f3b6c2c82c8ccba2f38a6901a6b2eb1b7
|
936e6088484f7c4e43e5ce835b50d7b0a161c8cf
|
refs/heads/master
| 2020-04-06T05:23:05.076622
| 2013-01-21T00:00:00
| 2013-01-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 625
|
r
|
rgl.Map.R
|
"rgl.Map" <-
function(Map,which,...) {
if (missing(which)) which <- T
if(!require(rgl)) stop("This function depends on the 'rgl' package which is not available")
lapply(Map$Shapes[which], function(shape) {
long <- shape$verts[,1] * pi/180
lat <- pi/2 - shape$verts[,2] * pi/180
# x <- cos(lat)*sin(long)
# y <- -sin(lat)*sin(long)
# z <- cos(lat)*cos(long)
z <- cos(long)*sin(lat)
y <- cos(lat)
x <- sin(long)*sin(lat)
tmp.i <- rep( seq(along=x), each=2)
tmp.i <- c(tmp.i[-1],1)
rgl.lines(x[tmp.i], y[tmp.i], z[tmp.i],...)
})
invisible()
}
|
0fd30587ebb3905f91bc88d1a388efdfe7961973
|
437be4a0471c6f4dddd48ebf54e72dcc675b5307
|
/sbfc-package/man/logposterior_plot.Rd
|
35b8b04927f01650e5d35170e7d4e2566436ea27
|
[] |
no_license
|
vkrakovna/sbfc
|
c49d2429d5ad93d98388cbda0d04ec76d05a0651
|
97feb1795b512608b65d7d57d2cc9faf2bbdaf76
|
refs/heads/master
| 2022-02-05T14:11:31.326338
| 2022-01-15T16:40:21
| 2022-01-15T16:40:21
| 38,595,118
| 17
| 4
| null | 2021-11-25T21:02:15
| 2015-07-06T03:16:41
|
C++
|
UTF-8
|
R
| false
| true
| 743
|
rd
|
logposterior_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sbfc.R
\name{logposterior_plot}
\alias{logposterior_plot}
\title{Log posterior plot}
\usage{
logposterior_plot(sbfc_result, start = 0, end = 1, type = "trace")
}
\arguments{
\item{sbfc_result}{An object of class \code{sbfc}.}
\item{start}{The start of the included range of MCMC iterations (default=0, i.e. starting with the first iteration).}
\item{end}{The end of the included range of MCMC iterations (default=1, i.e. ending with the last iteration).}
\item{type}{Type of plot (either \code{trace} or \code{acf}, default=\code{trace}).}
}
\description{
Plots the log posterior for a range of the MCMC iterations (indicated by \code{start} and \code{end}).
}
|
f40b18dd22a9b5052b05445e2eedbeef493cc73b
|
650ad2ff77a641ccf274adb0a8efabf140f37392
|
/TelechargementDataMeteo_20160212.r
|
82f9f50b374c127d3eff8bb132dc085366347c07
|
[] |
no_license
|
PinaudDavid/cebcscripts
|
0725f938848b21fbacedeeeb1097f642c9a35fbe
|
bfe8d1826431dc6a7e54fc4ee36fb5b36ff6a3e4
|
refs/heads/main
| 2023-07-30T10:02:03.110135
| 2021-09-16T14:47:49
| 2021-09-16T14:47:49
| 402,428,777
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 10,222
|
r
|
TelechargementDataMeteo_20160212.r
|
####################################################################################
###### Téléchargement de données météo sur ftp://ftp.ncdc.noaa.gov/pub/data/gsod avec R
### David Pinaud CEBC-CNRS/ULR
### pinaud@cebc.cnrs.fr, 12/02/2016
# les lignes suivantes sont à modifier (marqué **A DEFINIR**) et copier-coller sour R
# Marche à suivre :
# - repérer le code USAF des stations d'intérêt
# - renseigner ce code et l'année dans le script (création d'un fichier de référence)
# - récupérer les fichiers disponibles pour les stations et les années (zippés) sur le serveur et les copier en local
# - décompresser et donne un fichier texte par an et station (.txt, lisible sous Excel)
#
######### Les codes des stations ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.txt :
# Integrated Surface Hourly Database Station History, March 2007
#
#USAF = Air Force Datsav3 station number
#WBAN = NCDC WBAN number
#CTRY = WMO historical country ID, followed by FIPS country ID
#ST = State for US stations
#CALL = ICAO call sign
#LAT = Latitude in thousandths of decimal degrees
#LON = Longitude in thousandths of decimal degrees
#ELEV = Elevation in tenths of meters
#BEGIN = Beginning Period Of Record (YYYYMMDD). There may be reporting gaps within the P.O.R.
#END = Ending Period Of Record (YYYYMMDD). There may be reporting gaps within the P.O.R.
#
#Notes:
#- Missing station name, etc indicate the metadata are not currently available.
#- The term "bogus" indicates that the station name, etc are not available.
#- For a small % of the station entries in this list, climatic data are not
# available. These issues will be addressed. To determine data availability
# for each location, see the 'ish-inventory.txt' or 'ish-inventory.csv' file.
#
# Quelques stations d'intérêt :
#USAF WBAN STATION NAME CTRY ST CALL LAT LON ELEV(.1M) BEGIN END
#619960 99999 MARTIN DE VIVIES /I FR FS -37800 +077500 +00290 19730101 20120618
#619970 99999 ALFRED FAURE /ILES FR FS -46433 +051850 +01420 19740903 20120618
#619980 99999 PORT-AUX-FRANCAIS FR FS -49300 +070200 +00300 19730101 20120618
#619800 99999 SAINT-DENIS/GILLOT RE RE FMEE -20900 +055533 +00250 19730101 20120618
#896420 99999 DUMONT DURVILLE AY AY -66667 +140017 +00430 19850701 20120618
#896430 99999 PORT MARTIN AY AY -66817 +141383 +00390 19910924 20090131
#073300 99999 NIORT FR FR LFBN +46316 -000400 +00610 19860101 20130705
# le listing des inventaires disponibles : ftp://ftp.ncdc.noaa.gov/pub/data/inventories/ISH-INVENTORY.TXT
## The daily elements included in the dataset (as available from each station) are:
#Mean temperature (.1 Fahrenheit)
#Mean dew point (.1 Fahrenheit)
#Mean sea level pressure (.1 mb)
#Mean station pressure (.1 mb)
#Mean visibility (.1 miles)
#Mean wind speed (.1 knots)
#Maximum sustained wind speed (.1 knots)
#Maximum wind gust (.1 knots)
#Maximum temperature (.1 Fahrenheit)
#Minimum temperature (.1 Fahrenheit)
#Precipitation amount (.01 inches)
#Snow depth (.1 inches)
#Indicator for occurrence of: Fog
# Rain or Drizzle
# Snow or Ice Pellets
# Hail
# Thunder
# Tornado/Funnel Cloud
#
# plus de détails sur ftp://ftp.ncdc.noaa.gov/pub/data/gsod/GSOD_DESC.txt
#### début du script :
# définir le répertoire existant où seront placés les fichiers **A DEFINIR**
setwd("D:/Documents/Outils/R/Scripts R/ExtractionDataInternet")
## D'abord, tu dois renseigner les années et stations d'intérêt (dans un dataframe "dat2")
# exemple avec 2 stations et 5 années: **A DEFINIR**
year <- c(1999, 2000, 2001, 2002, 2003) # *liste des annees qui t'interessent*
USAF <- c("017452", "562488") # *code USAF des stations qui t'interessent*
WBAN <- c(99999, 99999) # *code WBAN*
# copier les lignes suivantes jusqu'à la fin :
dat2 <- expand.grid(year=year, USAF=USAF, WBAN=WBAN) # objet qui liste les stations d'intérêt
# liste des chemins d'accès :
meteopaths <- paste("ftp://ftp.ncdc.noaa.gov/pub/data/gsod/", dat2$year, "/", dat2$USAF, "-", dat2$WBAN, "-", dat2$year, ".op.gz", sep= "")
#tu trouves ces codes dans le metafichier ISH-inventory.txt recuperable sur ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.txt
## la fonction downloadNCDC telecharge un fichier dont tu as donne l'annee, le WBAN et USAF codes
downloadNCDC <- function(i)
{
download.file(zz <- meteopaths[i], destfile= paste(dat2$USAF[i], "-", dat2$WBAN[i], "-", dat2$year[i], ".op.gz", sep= ""), method= "internal", mode= "wb")
}
## boucle pour télécharger tous les fichiers (même si pas dispo), test pour chaque fichier et ajout du résultat (disponible ou pas) dans "dat2$Metavail"
# un fichier est créé sur le disque dur pour chaque station (dispo ou pas)
test <- vector("list", nrow(dat2)) # objet d'accueil
# boucle d'importation et de test
for(i in 1:nrow(dat2)){
test[[i]] <- try(downloadNCDC(i))
}
dat2$Metavail<- sapply(test, function(x) !inherits(x, "try-error")) # répond T ou F si pas d'erreur
## boucle pour lire et transformer les fichiers (seulement les disponibles) sous R à partir du disque dur
# header :
heade <- c("STN---", "WBAN", "YEARMODA", "TEMP", "count.temp", "DEWP", "count.dewp", "SLP", "count.slp", "STP", "count.stp", "VISIB", "count.visib", "WDSP", "count.wdsp", "MXSPD", "GUST", "MAX", "flag.max", "MIN", "flag.min", "PRCP", "flag.prcp", "SNDP", "fog", "rain", "snow", "hail", "thunder", "tornado")
# boucle pour chaque fichier :
for(i in 1:nrow(dat2))
{
if(dat2$Metavail[i]) # pour les fichiers dispo uniquement
{
# connexion
dd <- readLines(zz <- gzfile(paste(dat2$USAF[i], "-", dat2$WBAN[i], "-", dat2$year[i], ".op.gz", sep= ""), "r"))
close(zz)
# écriture du fichier brut
write.table(dd, "dd.txt", sep="\t", quote=F, row.names=F)
# importation avec les colonnes délimitées
a <- read.fwf("dd.txt", skip=2, widths=c(6, -1, 5, -2, 8, -2, 6, -1, 2, -2, 6, -1, 2, -2, 6, -1, 2, -2, 6, -1, 2, -2, 5, -1, 2, -2, 5, -1, 2, -2, 5, -2, 5, -2, 6, 1, -1, 6, 1, -1, 5, 1, -1, 5, -2, 1, 1, 1, 1, 1, 1))
# remplacement du header
colnames(a) <- heade
a[, paste(heade[1])] <- as.character(rep(dat2$USAF[i], nrow(a)))
# exportation en .txt, nom du fichier = nom de la station (codeUSAF puis codeWBAN) et année
write.table(a, paste(dat2$USAF[i], "-", dat2$WBAN[i], "-", dat2$year[i], ".txt", sep = ""), sep="\t", quote=F, row.names=F)
rm(a, dd)
}
}
# variante : 1 seul fichier en sortie :
i <- min(which(dat2$Metavail))
dd <- readLines(zz <- gzfile(paste(dat2$USAF[i], "-", dat2$WBAN[i], "-", dat2$year[i], ".op.gz", sep= ""), "r"))
close(zz)
# écriture du fichier brut
write.table(dd, "dd.txt", sep="\t", quote=F, row.names=F)
# importation avec les colonnes délimitées
a <- read.fwf("dd.txt", skip=2, widths=c(6, -1, 5, -2, 8, -2, 6, -1, 2, -2, 6, -1, 2, -2, 6, -1, 2, -2, 6, -1, 2, -2, 5, -1, 2, -2, 5, -1, 2, -2, 5, -2, 5, -2, 6, 1, -1, 6, 1, -1, 5, 1, -1, 5, -2, 1, 1, 1, 1, 1, 1))
# remplacement du header
colnames(a) <- heade
a[, paste(heade[1])] <- as.character(rep(dat2$USAF[i], nrow(a)))
mm <- a[FALSE,]
for(i in 1:nrow(dat2))
{
if(dat2$Metavail[i]) # pour les fichiers dispo uniquement
{
# connexion
dd <- readLines(zz <- gzfile(paste(dat2$USAF[i], "-", dat2$WBAN[i], "-", dat2$year[i], ".op.gz", sep= ""), "r"))
close(zz)
# écriture du fichier brut
write.table(dd, "dd.txt", sep="\t", quote=F, row.names=F)
# importation avec les colonnes délimitées
a <- read.fwf("dd.txt", skip=2, widths=c(6, -1, 5, -2, 8, -2, 6, -1, 2, -2, 6, -1, 2, -2, 6, -1, 2, -2, 6, -1, 2, -2, 5, -1, 2, -2, 5, -1, 2, -2, 5, -2, 5, -2, 6, 1, -1, 6, 1, -1, 5, 1, -1, 5, -2, 1, 1, 1, 1, 1, 1))
# remplacement du header
colnames(a) <- heade
a[, paste(heade[1])] <- as.character(rep(dat2$USAF[i], nrow(a)))
# on colle tout
mm <- rbind(mm, a)
rm(a, dd)
}
}
write.table(mm, paste("donnéesMétéo_", Sys.Date(), ".txt", sep=""), sep="\t", quote=F, row.names=F)
### fin
a <- read.table(paste(dat2$USAF[i], "-", dat2$WBAN[i], "-", dat2$year[i], ".txt", sep = ""), sep="\t", h=T)
a <- mm
#Mean temperature (.1 Fahrenheit)
#Mean dew point (.1 Fahrenheit)
#Mean sea level pressure (.1 mb)
#Mean station pressure (.1 mb)
#Mean visibility (.1 miles)
#Mean wind speed (.1 knots)
#Maximum sustained wind speed (.1 knots)
#Maximum wind gust (.1 knots)
#Maximum temperature (.1 Fahrenheit)
#Minimum temperature (.1 Fahrenheit)
#Precipitation amount (.01 inches)
#Snow depth (.1 inches)
#Indicator for occurrence of: Fog, Rain or Drizzle, Snow or Ice Pellets, Hail, Thunder, Tornado/Funnel Cloud
# Conversions diverses en unités métriques (°C, km/h, mm):
b <- data.frame(Date=strptime(a$YEARMODA, "%Y%m%d"),
MeanTemp=(a$TEMP-32)/1.8,
MinTemp=(a$MIN-32)/1.8,
MaxTemp=(a$MAX-32)/1.8,
MeanSeaLevelPres=a$SLP,
MeanWindSpeed=a$WDSP/1.852,
PrecTot=a$PRCP*25.4)
plot(MeanTemp~Date, b, t="l", xaxt="n")
r <- range(b$Date)
axis.POSIXct(1, at = seq(r[1], r[2], by = "months"), format = "%b")
abline(v=seq(r[1], r[2], by = "weeks"), col="grey")
points(MeanTemp~Date, b, t="l", col="orange", lwd=2)
b$months <- cut(b$Date, seq(r[1], r[2], by = "months"))
write.table(b, paste("donnéesMétéo_", Sys.Date(), ".txt", sep=""), sep="\t", quote=F, row.names=F)
t <- tapply(b$PrecTot, b$months, sum)
mt <- tapply(b$MeanTemp, b$months, mean)
dimnames(t)[[1]]
sum(t)
barplot(t, xaxt="n")
axis.POSIXct(1, at = seq(r[1], r[2], by = "months"), "months")
|
0911d545a1b2200fafa2cfd7d3409950c70e08ef
|
4abd7b3daeb680eec449ca3785e3a71b5b0d4356
|
/R/example_LFL.R
|
f4f1df16ef21004794aefe246436972a8371dd90
|
[] |
no_license
|
matheusbarroso/dboot
|
24b6da9447cbd393bb5785eee39fa495da02e0c0
|
e54dcfe0310fb24f977fc00ff40d645abab35a42
|
refs/heads/master
| 2021-09-14T20:44:33.796439
| 2018-03-06T19:16:47
| 2018-03-06T19:16:47
| 117,690,628
| 0
| 0
| null | 2018-01-24T20:53:24
| 2018-01-16T14:00:40
|
R
|
UTF-8
|
R
| false
| false
| 758
|
r
|
example_LFL.R
|
#'A realization of a \emph{Gamma-GARMA} model
#'
#'A dataset contaning the realization of a \emph{Gamma-GARMA
#'(0,1)} model, with \eqn{\phi=0.1,\theta=0.1, \Beta=2,
#'\mu_{0}=10,\sigma^2=2 }
#'
#'@format A data frame with 1000 rows and 4 variables:
#'\describe{
#' \item{indext}{indext, time index of the series}
#' \item{mu.t}{mu.t, the observed \eqn{\mu_{t}} value}
#' \item{yt}{\eqn{y_{t}}, the realization of the process}
#' \item{x}{x, an intercept variable}
#' }
#'
#'@references For the \emph{GARMA} model:
#'
#' Benjamin, Michael A., Rigby, Robert A. and Stasinopoulos, D. Mikis. 2003. Generalized Autoregressive Moving Average Models. Journal of the American Statistical Association. Mar, 2003, Vol. 98, 461, pp. 214-223.
#'
#'
"example_LFL"
|
099f9957fd1783143c3014d3d61127c8c1f6b2ae
|
c774252b7929cdd9da0acbfaea6375078d1bbbb3
|
/R/roxygen.R
|
5b56c645fc1eb86b5382a82605f88bb0dbf9bcc0
|
[] |
no_license
|
lmw40/mpMap2
|
35032fef23cf204f62eb7c8c7107a1034ba23d5e
|
0ac4569b251ccc041cfe965dfe41091aff18e3bc
|
refs/heads/master
| 2021-01-22T19:22:11.104705
| 2016-08-23T00:52:51
| 2016-08-23T00:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
roxygen.R
|
#' @import qtl
#' @import igraph
#' @import methods
#' @importFrom pryr address
#' @importFrom nnls nnls
#' @exportClass pedigreeGraph
#' @exportMethod subset
#' @exportMethod plot
#' @importClassesFrom Matrix index dspMatrix dppMatrix
#' @importFrom methods setClass
#' @useDynLib mpMap2
NULL
|
a46a18515c8c5008622976f0550399202ce023d3
|
6816c9572c45d5dd4772b710b7e331dc418f4208
|
/R/PubMedMiner.R
|
bef2f45428fcf57a8df73c484c53708eefb5b587
|
[
"MIT"
] |
permissive
|
vijayr1912/proteomicsliteratureminer
|
7a57dd7e0953b1f9d3cc174b195bdb99f5e607af
|
f14d13e2bf204d6812bd7f9a9a9449bc7d2cab14
|
refs/heads/master
| 2020-06-07T10:44:22.962535
| 2019-06-21T00:39:42
| 2019-06-21T00:39:42
| 193,003,972
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,777
|
r
|
PubMedMiner.R
|
#####################################################################################################
# Literature Miner Entry Functions
# Jemma Wu jemma.wu@mq.edu.au
# Last modified 11 July 2018
# Usage: type the following commands in R console
# rm(list=ls())
# setwd("C:\\Pascal_ProteinLiteratureFinder\\GeneLiteratureMinerV.55_uniprotsyn") or
# setwd("\\\\apaf-hpv-file\\BioInfo_Project\\Projects\\TextMining\\Pascal_ProteinLiteratureFinder\\GeneLiteratureMinerV.55_uniprotsyn")
# source('ProteinLiteratureMiner_functions.R')
# source('pubmedMiner_entry.R')
# res <- system.time(pubmedMiner_entry(query.file="potential_marker.xlsx", output.file = "potential_marker_pubmed_results.xlsx"))
#####################################################################################################
#' Generates a list of Pubmed query results.
#'
#' @param query.file Path to the input file having the UniProtID.
#' @param output.file Path to the output file with the results of the Pubmed query.
#' @return Generates an Excel file \code{output.file} with Pubmed query results using the UniProt identifers and keyword search in \code{query.file}.
#' @examples
#' pubmedMiner_entry(query.file="potential_marker.xlsx", output.file = "potential_marker_pubmed_results.xlsx")
pubmedMiner_entry <- function(query.file, output.file = "pubmed_results.xlsx") {
#library(rentrez)
### Theese libraries will need to be commented when in package. no library() or no require()
library(RISmed)
library(wordcloud)
library(openxlsx)
library(cluster)
library(httr)
library(tm)
dat.input = openxlsx::readWorkbook(query.file)
list.datquery = list()
list.datpubmed = list()
for(query.idx in 1:nrow(dat.input)) {
cat(paste('query.idx', query.idx))
UniProtID = dat.input[query.idx, 'UniProtID']
IDType = dat.input[query.idx, "IDType"]
taxid = dat.input[query.idx, 'TaxID']
keyword = dat.input[query.idx, 'Keyword']
ti.only = dat.input[query.idx,5]
pd.res <- try(pubmed_miner(UniProtID, IDType, taxid, keyword, ti.only, query.idx=query.idx))
if(!inherits(pd.res, 'try-error')) {
list.datquery[[query.idx]] = pd.res$dat.query
list.datpubmed[[query.idx]] = pd.res$dat.pubmed
} else {
list.datquery[[query.idx]] = data.frame('UniProtID'=UniProtID, 'GeneID' = NA, 'TaxID'=taxid, 'Synonyms' = paste(synonyms, collapse=','),
'Keywords' = keyword, 'KeywordInTitleOnly'=ti.only, 'TotalResults'=0,
'Category' = 3, 'False' = 0, 'PubmedQuery'=NA)
list.datpubmed[[query.idx]] = 'No result'
}
}
all.datquery = do.call(rbind, list.datquery)
all.datquery$NQuery = 1:nrow(all.datquery)
all.datquery = all.datquery[,c(ncol(all.datquery),1:(ncol(all.datquery)-1))]
all.datquery[is.na(all.datquery)] = ''
# output
wb = openxlsx::createWorkbook()
openxlsx::addWorksheet(wb, sheetName="query")
writeData(wb,"query", all.datquery)
for(ii in 1:length(list.datpubmed) ) {
list.datquery[[ii]][is.na(list.datquery[[ii]])] = ''
openxlsx::addWorksheet(wb, sheetName=paste("pubmed result", ii) )
writeData(wb, paste("pubmed result", ii), list.datquery[[ii]])
writeData(wb, paste("pubmed result", ii), list.datpubmed[[ii]], startRow=4)
if(file.exists(paste('barplotNwordcloud', ii, '.png', sep='') ) )
openxlsx::insertImage(wb, paste("pubmed result", ii), paste('barplotNwordcloud', ii, '.png', sep=''),
width=5, height=8, startRow = 3, startCol=12)
if(file.exists(paste('plot_dist_mesh', ii, '.png', sep='') ) )
openxlsx::insertImage(wb, paste("pubmed result", ii), paste('plot_dist_mesh', ii, '.png', sep=''),
width=5, height=5, startRow = 3, startCol=20)
}
openxlsx::saveWorkbook(wb, output.file, overwrite=T)
list(all.datquery=all.datquery, list.datpubmed=list.datpubmed, list.datquery=list.datquery)
}
|
85a56f718bffc1551913340d4244240a97f3ef71
|
2d58f8634a26d46b6a8b4cb967698c5629ab7a02
|
/extract_keywords.r
|
3a9a7d5faf76713f49a7ab37e0b8298bcb3bd436
|
[] |
no_license
|
mcandocia/website_keywords
|
3bcb36e7efe135426c2e6c9377d380273b0d9360
|
cd231c5fc668c505c54df96075b455ff0cf82f39
|
refs/heads/master
| 2021-01-10T07:28:58.888625
| 2016-09-19T02:49:03
| 2016-09-19T02:49:03
| 51,411,024
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
r
|
extract_keywords.r
|
args<-commandArgs(TRUE)
require(stringr)
block_id = as.character(args[1])
bs = str_pad(block_id,3,pad="0")
require(pROC)
setwd('/home/max/workspace/webclass')
load('keyword_training/roc_environment.rdata')
test_data = read.csv('webpages/keyword_data.csv',encoding='utf-8')
new_odds_ratios = predict(sm,test_data)
keydata = test_data[new_odds_ratios >= selected_threshold,c('file_id','gram')]
keydata$preds = new_odds_ratios[new_odds_ratios >= selected_threshold]
write.csv(keydata,file=paste0('webpages/keyword_file_',bs,'.csv'),row.names = FALSE,fileEncoding='utf-8')
filename = paste0('webpages/keyword_file_',bs,'.csv')
print(paste0("wrote keywords to ",filename))
|
f188a81f948bf403b2ed285bbc47439c8464bade
|
f06d08ebb0e44fa531b3bc119a4417548e4b1c74
|
/man/lpba40.Rd
|
4a68c80166e56e29ae8457c1f36b416d40c1ee4c
|
[] |
no_license
|
luojiahuli/brainGraph
|
f2a17eda5146831f9445ce9be61080e379a116ff
|
3abd4be7c3fe88dc8f165e21053865ba265a7240
|
refs/heads/master
| 2021-01-17T08:44:14.966576
| 2015-08-27T10:36:19
| 2015-08-27T10:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,798
|
rd
|
lpba40.Rd
|
\name{lpba40}
\alias{lpba40}
\docType{data}
\title{Coordinates for data from the LONI probabilistic brain atlas}
\description{
This is a list of spatial coordinates for the LPBA40 atlas, along with
indices for the major lobes of the brain. The coordinates were obtained
from some colleagues.
}
\usage{data("lpba40")}
\format{
A data frame with 56 observations on the following 10 variables.
\describe{
\item{\code{name}}{a character vector of region names}
\item{\code{x}}{a numeric vector of x-coordinates (internal to \code{brainGraph})}
\item{\code{y}}{a numeric vector of y-coordinates (internal to \code{brainGraph})}
\item{\code{z}}{a numeric vector of z-coordinates (internal to \code{brainGraph})}
\item{\code{x.mni}}{a numeric vector of x-coordinates (in MNI space)}
\item{\code{y.mni}}{a numeric vector of y-coordinates (in MNI space)}
\item{\code{z.mni}}{a numeric vector of z-coordinates (in MNI space)}
\item{\code{lobe}}{a factor with levels \code{Frontal} \code{Parietal} \code{Temporal} \code{Occipital} \code{Insula} \code{Cingulate} \code{SCGM}}
\item{\code{hemi}}{a factor with levels \code{} \code{L} \code{R}}
\item{\code{index}}{a numeric vector}
}
}
\source{
Shattuck DW, Mirza M, Adisetiyo V, Hojatkashani C, Salamon G, Narr KL,
Poldrack RA, Bilder RM, Toga AW (2007) \emph{Construction of a 3D probabilistic
atlas of human cortical structures}. NeuroImage,
doi:10.1016/j.neuroimage.2007.09.031
}
\references{
Shattuck DW, Mirza M, Adisetiyo V, Hojatkashani C, Salamon G, Narr KL,
Poldrack RA, Bilder RM, Toga AW (2007) \emph{Construction of a 3D probabilistic
atlas of human cortical structures}. NeuroImage,
doi:10.1016/j.neuroimage.2007.09.031
}
\examples{
data(lpba40)
str(lpba40)
}
\keyword{datasets}
|
0f925b28b4e05764b751072aaa8f0de5f5586cda
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NISTunits/examples/NISTukThUnITPerSecTOwatt.Rd.R
|
d2043622e32d175a62eadabf443d745e434f6a0a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
NISTukThUnITPerSecTOwatt.Rd.R
|
library(NISTunits)
### Name: NISTukThUnITPerSecTOwatt
### Title: Convert British thermal unitIT per second to watt
### Aliases: NISTukThUnITPerSecTOwatt
### Keywords: programming
### ** Examples
NISTukThUnITPerSecTOwatt(10)
|
97370fb4b807b0fee0d6f53c45ef8fe6865f7765
|
7bdfa5ebc03385d99604a39a362df4a7a329d591
|
/validation_test.r
|
f76b0942f79046f446831728c6fa1a78b5ec60de
|
[] |
no_license
|
julmue/rlang_libmnl_validation
|
e1937c327064edc476a872bfeea48d727bd76660
|
2f3b76a87a4bd9536c4d7e0819b5a18c0e38addb
|
refs/heads/master
| 2020-04-14T20:24:25.161686
| 2019-01-04T10:17:42
| 2019-01-04T10:17:42
| 164,092,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,776
|
r
|
validation_test.r
|
library(validate)
library(magrittr)
library(tibble)
library(dplyr)
library(purrr)
# -----------------------------------------------------------------------------
# helpers
test_group <- function(.validator_list) {
.validator_list %>% purrr::reduce(`+`)
}
test_run <- function(.test_data, .validator) {
confront(.test_data, .validator, key="INDEX", raise = "all") %>%
as.data.frame
}
add_index <- function(.data, .index = "INDEX") {
assertthat:: assert_that(!(.index %in% names(.data)))
.data_names <- names(.data)
.data$INDEX <- c(1:nrow(.data))
.data <- .data[,c(.index, .data_names)]
return(.data)
}
# validation helpers
filter_failing <- function(.data) {
filter(.data, value == FALSE)
}
# -----------------------------------------------------------------------------
# test data
df <- tibble(
gender = c("male", "female", "male", "vemale","male", "female"),
animal = c("dog", "cat", "cat", "dog", "donkey", "dog")
) %>% add_index
# reference data
cats_gender = c("male", "female")
cats_animal = c("dog", "cat")
# -----------------------------------------------------------------------------
# collector
results <- list()
results$test_na <- list(
validator(
!is.na(gender)) %>%
set_names("test_na_gender"),
validator(
!is.na(animal)) %>%
set_names("test_na_animal")
)%>%
test_group %>%
test_run(.test_data = df)
results$test_cats <- list(
validator(
gender %in% cats_gender) %>%
set_names("test_gender"),
validator(
animal %in% cats_animal) %>%
set_names("test_animals")
) %>%
test_group %>%
test_run(.test_data = df)
# collector
results %<>% flatten
# -----------------------------------------------------------------------------
# collector
failing <- filter_failing(results)
|
cef8e0464409c396a594a6a18426954c87063f90
|
9e51c85c1ff0ac0fd82467c0463ea238f7c8fc0a
|
/classyfire/Rscript_ChemicalClass_Heatmap.R
|
c418bae52e11e524a0915aea1be8bbe5f776e544
|
[
"BSD-3-Clause"
] |
permissive
|
IvanV87/amazon-urbanization
|
87004125f673c4eb95e4cf7ced1d357ee9d11412
|
70478517407cc430cd8186a9d31aa990b418a47b
|
refs/heads/master
| 2023-03-21T01:06:16.432568
| 2019-08-14T18:01:57
| 2019-08-14T18:01:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,233
|
r
|
Rscript_ChemicalClass_Heatmap.R
|
# #############################################################
# #
# Create heatmap of in silico predicted chemical classes #
# #
###############################################################
# load libraries
library(dendextend)
library(vegan)
library(gplots)
library(RColorBrewer)
library(plyr)
library(plotrix)
################################
# Superclass Heatmap #
################################
metadata <- read.csv("full_norm_Apr25metadata_noPeruvianSwabs_housing_metadata.csv",sep=",")
ft <- read.csv("featuretable_superclass.tsv",sep="\t",row.names = 1)
ft <- t(ft)
my_palette <- colorRampPalette(c("blue", "white", "red"))(n = 15)
snames <- as.character(metadata$X.SampleID[match(rownames(ft),as.character(metadata$Sample.name))])
snames[is.na(snames)] <- "blank"
## change according to metadata category
Rowside_species <- as.character(metadata$village_socio[match(rownames(ft),as.character(metadata$Sample.name))])
Rowside_species[is.na(Rowside_species)] <- "blank"
rownames(ft) <- snames
# order featuretable according to village_socio category
Rowside_species[which(Rowside_species == "blank")] <- "1_blank"
Rowside_species[which(Rowside_species == "Checherta")] <- "2_Checherta"
Rowside_species[which(Rowside_species == "Puerto Almendras")] <- "3_Puerto Almendras"
Rowside_species[which(Rowside_species == "Iquitos")] <- "4_Iquitos"
Rowside_species[which(Rowside_species == "Manaus low")] <- "5_Manaus low"
Rowside_species[which(Rowside_species == "Manaus middle")] <- "6_Manaus middle"
ft <- ft[order(Rowside_species),]
Rowside_species <- Rowside_species[order(Rowside_species)]
colpal <- c("#1b9e77","#FF0000", "#008000", "#0000FF","#FFFF00","#f27304")
Rowside_cols <- colpal[as.numeric(as.factor(Rowside_species))]
## import ClassyFire scores and map them on the heatmap (calculate average per chemical class)
cl <- read.table("ClassyFire_InputforCytoscape_Amazon.csv",sep="\t",header=T,comment.char = "",stringsAsFactors = F,quote="")
cl$CF_superclass_score <- as.numeric(cl$CF_superclass_score)
scores <- ddply(cl, .(CF_superclass), summarize, score=mean(CF_superclass_score))
Colside_cols <-gray(1-scores$score[match(colnames(ft),scores$CF_superclass)])
Nnodes <- table(cl$CF_superclass)
NnodesNam <- names(Nnodes)
Nnodes <- as.vector(Nnodes)
names(Nnodes) <- NnodesNam
colnames(ft) <- paste(Nnodes[match(colnames(ft),names(Nnodes))],colnames(ft), sep = " / ")
pdf(file="Superclass_HeatMapDendrogram.pdf", width=9, height=10)
par(mar=c(0, 0, 0, 0))
heatmap.2(ft,Rowv=FALSE, Colv=TRUE,cexCol = 1,cexRow = 0.3,scale="col",col = my_palette, RowSideColors=Rowside_cols, ColSideColors=Colside_cols, keysize = 0.8, key.ylab = NA, key.title = NA,density.info="none",tracecol=NA,margin=c(5,15)) #margin=c(5,15)
legend(y=0.9, x=0.9, xpd=TRUE,
legend = sort(unique(Rowside_species)),
col = colpal,
lty= 1,
lwd = 5,
cex=.7)
color.legend(0.9,0.93,1,0.96,c(0,0.5,1),gray.colors(100, start = 1, end = 0),cex = 0.7,align="rb")
text(x=0.95, y = 0.98,labels = "ClassyFire Score",cex=0.8, font =2)
dev.off()
|
f0ebf9bece5c09975ab5f6959c3da70767de0296
|
7c243482c07f0a2363c6723c1a90ceea595f4bd0
|
/cachematrix.R
|
4626d684c63258262bafabcbffd26429e8dd0dd0
|
[] |
no_license
|
oobii65/ProgrammingAssignment2
|
2fe09863b3df896ad06e3daec0865180404c1c3a
|
d7ea78f37c4744a2ed36f304e1af286d0eea1ba9
|
refs/heads/master
| 2021-01-14T14:35:07.600898
| 2014-11-16T19:19:56
| 2014-11-16T19:19:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,214
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##
## Use the R lexical scope and the <<- operator to
## to make a variabel in the "parent" environment that
## can be used to hold a cached version of the inverse
## of a matrix.
##
##
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
minv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
##
## Returns the inverse of a matrix that is declared using the
## makeCacheMatrix(...) function.
## When an inverse is computed it is cached in the "parent" environment
## using the <<- operator.
## If a cached version exists, this will be returned without performing
## any computations.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
b577d9e44ce70dcb5439c7b56c73f23b9e31896d
|
3875910f927debbe78f30387404e2309faae00c9
|
/R/api_response.R
|
2ea88d2af9dc3252f573dbba420db8d6950f9e5e
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/client-library-r
|
0fd44372e19196ab6b90c9c0ea5a86fd87c57a1e
|
e752cadf58135e2ebe9a4632a112acb95eec846f
|
refs/heads/main
| 2023-06-24T06:03:04.769727
| 2021-07-20T07:42:05
| 2021-07-20T07:42:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
api_response.R
|
#' @docType class
#' @title ApiResponse
#' @description ApiResponse Class
#' @format An \code{R6Class} generator object
#' @field response The raw response from the endpoint.
#' @export
ApiResponse <- R6::R6Class(
'ApiResponse',
public = list(
response = NULL,
initialize = function(response){
self$response <- response
},
getContent = function () {
httr::content(self$response, "text", encoding = "UTF-8")
}
)
)
#' @docType class
#' @title ApiFileResponse
#' @description ApiFileResponse Class
#' @format An \code{R6Class} generator object
#' @field response The raw response from the endpoint.
#' @export
ApiFileResponse <- R6::R6Class(
'ApiFileResponse',
public = list(
response = NULL,
initialize = function(response){
self$response <- response
},
getContent = function () {
httr::content(self$response)
},
getFileName = function () {
content_disposition <- httr::headers(self$response)[['content-disposition']]
file_name <- stringr::str_trim(substring(content_disposition, 23, stringr::str_length(content_disposition)-1))
}
))
|
8343122f58c2939e49089c46444723eee7ce4c8d
|
3f06f78873e8cfabceff52eb2287f1e963b1f682
|
/tests/testthat/test-get_truth.R
|
0126bc21e50365a72e224b68d60118f92181ff62
|
[] |
no_license
|
reichlab/covidHubUtils
|
449fdd47b4cd20fb035b520088ff2dbc21464f81
|
7258bc1b146906b31e9d31d19fd13cf73259b5a0
|
refs/heads/master
| 2023-05-25T02:19:54.033459
| 2023-05-22T17:06:49
| 2023-05-22T17:06:49
| 289,015,121
| 18
| 17
| null | 2023-05-22T16:53:28
| 2020-08-20T13:34:43
|
HTML
|
UTF-8
|
R
| false
| false
| 32,115
|
r
|
test-get_truth.R
|
context("get_truth")
library(covidHubUtils)
library(covidData)
library(zoltr)
library(dplyr)
library(mockery)
test_that("preprocess_jhu files has expected columns", {
actual <- covidHubUtils::preprocess_jhu(
save_location = "."
)
actual_cumulative_deaths_column_names <- colnames(actual$cumulative_deaths)
actual_incident_deaths_column_names <- colnames(actual$incident_deaths)
actual_cumulative_cases_column_names <- colnames(actual$cumulative_cases)
actual_incident_cases_column_names <- colnames(actual$incident_cases)
expected_column_names <- c("date", "location", "location_name", "value")
expect_equal(actual_cumulative_deaths_column_names, expected_column_names)
expect_equal(actual_incident_deaths_column_names, expected_column_names)
expect_equal(actual_cumulative_cases_column_names, expected_column_names)
expect_equal(actual_incident_cases_column_names, expected_column_names)
})
test_that("preprocess_jhu files has expected combinations of location, week", {
# Location name
location_names <- covidData::fips_codes[, c("location", "location_name")]
# Spatial resolutions
spatial_resolutions <- c("national", "state", "county")
actual <- covidHubUtils::preprocess_jhu(
save_location = "."
)
actual_cumulative_deaths <- actual$cumulative_deaths %>%
dplyr::select(date, location)
actual_incident_deaths <- actual$incident_deaths %>%
dplyr::select(date, location)
expected_deaths <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "daily",
measure = "deaths"
) %>%
dplyr::left_join(location_names, by = "location") %>%
dplyr::filter(location != "11001")
expected_cumulative_deaths <- expected_deaths[, c("date", "location", "location_name", "cum")] %>%
tidyr::drop_na(any_of(c("location_name", "cum"))) %>%
dplyr::select(date, location)
expected_incident_deaths <- expected_deaths[, c("date", "location", "location_name", "inc")] %>%
tidyr::drop_na(any_of(c("location_name", "inc"))) %>%
dplyr::select(date, location)
expect_equal(actual_cumulative_deaths, expected_cumulative_deaths)
expect_equal(actual_incident_deaths, expected_incident_deaths)
actual_cumulative_cases <- actual$cumulative_cases %>%
dplyr::select(date, location)
actual_incident_cases <- actual$incident_cases %>%
dplyr::select(date, location)
expected_cases <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "daily",
measure = "cases"
) %>%
dplyr::left_join(location_names, by = "location") %>%
dplyr::filter(location != "11001")
expected_cumulative_cases <- expected_cases[, c("date", "location", "location_name", "cum")] %>%
tidyr::drop_na(any_of(c("location_name", "cum"))) %>%
dplyr::select(date, location)
expected_incident_cases <- expected_cases[, c("date", "location", "location_name", "inc")] %>%
tidyr::drop_na(any_of(c("location_name", "inc"))) %>%
dplyr::select(date, location)
expect_equal(actual_cumulative_cases, expected_cumulative_cases)
expect_equal(actual_incident_cases, expected_incident_cases)
})
test_that("preprocess_jhu files has the same cumulative and incident values as output from covidData", {
# Location name
location_names <- covidData::fips_codes[, c("location", "location_name")]
# Spatial resolutions
spatial_resolutions <- c("national", "state", "county")
actual <- covidHubUtils::preprocess_jhu(
save_location = "."
)
actual_cumulative_deaths <- actual$cumulative_deaths %>%
dplyr::select(value)
actual_incident_deaths <- actual$incident_deaths %>%
dplyr::select(value)
expected_deaths <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "daily",
measure = "deaths"
) %>%
dplyr::left_join(location_names, by = "location") %>%
dplyr::filter(location != "11001")
expected_cumulative_deaths <- expected_deaths[, c("date", "location", "location_name", "cum")] %>%
tidyr::drop_na(any_of(c("location_name", "cum"))) %>%
dplyr::rename(value = cum) %>%
dplyr::select(value)
expected_incident_deaths <- expected_deaths[, c("date", "location", "location_name", "inc")] %>%
tidyr::drop_na(any_of(c("location_name", "inc"))) %>%
dplyr::rename(value = inc) %>%
dplyr::select(value)
expect_equal(actual_cumulative_deaths, expected_cumulative_deaths)
expect_equal(actual_incident_deaths, expected_incident_deaths)
actual_cumulative_cases <- actual$cumulative_cases %>%
dplyr::select(value)
actual_incident_cases <- actual$incident_cases %>%
dplyr::select(value)
expected_cases <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "daily",
measure = "cases"
) %>%
dplyr::left_join(location_names, by = "location") %>%
dplyr::filter(location != "11001")
expected_cumulative_cases <- expected_cases[, c("date", "location", "location_name", "cum")] %>%
tidyr::drop_na(any_of(c("location_name", "cum"))) %>%
dplyr::rename(value = cum) %>%
dplyr::select(value)
expected_incident_cases <- expected_cases[, c("date", "location", "location_name", "inc")] %>%
tidyr::drop_na(any_of(c("location_name", "inc"))) %>%
dplyr::rename(value = inc) %>%
dplyr::select(value)
expect_equal(actual_cumulative_cases, expected_cumulative_cases)
expect_equal(actual_incident_cases, expected_incident_cases)
})
test_that("preprocess_visualization_truth files has expected columns", {
actual <- covidHubUtils::preprocess_visualization_truth(
save_location = "."
)
actual_cumulative_deaths_column_names <- colnames(actual$cumulative_deaths)
actual_incident_deaths_column_names <- colnames(actual$incident_deaths)
actual_cumulative_cases_column_names <- colnames(actual$cumulative_cases)
actual_incident_cases_column_names <- colnames(actual$incident_cases)
expected_column_names <- c("location", "epiweek", "value")
expect_equal(actual_cumulative_deaths_column_names, expected_column_names)
expect_equal(actual_incident_deaths_column_names, expected_column_names)
expect_equal(actual_cumulative_cases_column_names, expected_column_names)
expect_equal(actual_incident_cases_column_names, expected_column_names)
})
test_that("preprocess_visualization_truth files has incident values greater than 0", {
actual <- covidHubUtils::preprocess_visualization_truth(
save_location = "."
)
# Test incident deaths
actual_incident_deaths <- actual$incident_deaths %>%
dplyr::select(value)
actual_incident_deaths_ge_0 <- data.frame(actual_incident_deaths > 0)
expected_incident_deaths_ge_0 <- rep(c(TRUE), times = nrow(actual_incident_deaths))
expected_incident_deaths_ge_0 <- data.frame("value" = expected_incident_deaths_ge_0)
expect_equal(actual_incident_deaths_ge_0, expected_incident_deaths_ge_0)
# Test incident cases
actual_incident_cases <- actual$incident_cases %>%
dplyr::select(value)
actual_incident_cases_ge_0 <- data.frame(actual_incident_cases > 0)
expected_incident_cases_ge_0 <- rep(c(TRUE), times = nrow(actual_incident_cases))
expected_incident_cases_ge_0 <- data.frame("value" = expected_incident_cases_ge_0)
expect_equal(actual_incident_cases_ge_0, expected_incident_cases_ge_0)
})
test_that("preprocess_visualization_truth files has state and national location from covidData", {
actual <- covidHubUtils::preprocess_visualization_truth(
save_location = "."
)
# Actual locations
actual_incident_deaths_locations <- actual$incident_deaths %>%
dplyr::select(location)
actual_incident_cases_locations <- actual$incident_cases %>%
dplyr::select(location)
actual_cumulative_deaths_locations <- actual$cumulative_deaths %>%
dplyr::select(location)
actual_cumulative_cases_locations <- actual$cumulative_cases %>%
dplyr::select(location)
# Expected locations
# Location name
location_names <- covidData::fips_codes[, c("location", "location_name", "abbreviation")]
# Spatial resolutions
spatial_resolutions <- c("national", "state")
# Expected data
cases_dframes <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "weekly",
measure = "cases"
)
cases_dframes_locations <- dplyr::left_join(cases_dframes, location_names, by = "location") %>%
dplyr::select(abbreviation) %>%
dplyr::mutate(abbreviation = replace(abbreviation, abbreviation == "US", "nat")) %>%
dplyr::rename(location = abbreviation)
deaths_dframes <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "weekly",
measure = "deaths"
)
deaths_dframes_locations <- dplyr::left_join(deaths_dframes, location_names, by = "location") %>%
dplyr::select(abbreviation) %>%
dplyr::mutate(abbreviation = replace(abbreviation, abbreviation == "US", "nat")) %>%
dplyr::rename(location = abbreviation)
# Test equality
expect_equal(actual_incident_deaths_locations, deaths_dframes_locations)
expect_equal(actual_cumulative_deaths_locations, deaths_dframes_locations)
expect_equal(actual_incident_cases_locations, cases_dframes_locations)
expect_equal(actual_cumulative_cases_locations, cases_dframes_locations)
})
test_that("preprocess_visualization_truth files has correct date formats", {
actual <- covidHubUtils::preprocess_visualization_truth(
save_location = "."
)
# Actual dates
actual_incident_deaths_dates <- actual$incident_deaths$epiweek
actual_incident_cases_dates <- actual$incident_cases$epiweek
actual_cumulative_deaths_dates <- actual$cumulative_deaths$epiweek
actual_cumulative_cases_dates <- actual$cumulative_cases$epiweek
# Expected dates
# Spatial resolutions
spatial_resolutions <- c("national", "state")
cases_dframes <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "weekly",
measure = "cases"
)
cases_mmwr <- lapply(cases_dframes["date"], MMWRweek::MMWRweek)$date
# shift epiweek on axis
cases_mmwr_week <- cases_mmwr["MMWRweek"] + 1
cases_mmwr_year <- cases_mmwr["MMWRyear"]
cases_mmwr_year[which(cases_mmwr_week > 53), ] <- cases_mmwr_year[which(cases_mmwr_week > 53), ] + 1
cases_mmwr_week[which(cases_mmwr_week > 53), ] <- 1
# format date as "{epiyear}{epiweek}". Exp: "202005"
cases_mmwr_week <- data.frame(lapply(cases_mmwr_week, sprintf, fmt = "%02d"))
cases_dates <- paste(cases_mmwr_year$MMWRyear, cases_mmwr_week$MMWRweek, sep = "")
deaths_dframes <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "weekly",
measure = "deaths"
)
deaths_mmwr <- lapply(deaths_dframes["date"], MMWRweek::MMWRweek)$date
# shift epiweek on axis
deaths_mmwr_week <- deaths_mmwr["MMWRweek"] + 1
deaths_mmwr_year <- deaths_mmwr["MMWRyear"]
deaths_mmwr_year[which(deaths_mmwr_week > 53), ] <- deaths_mmwr_year[which(deaths_mmwr_week > 53), ] + 1
deaths_mmwr_week[which(deaths_mmwr_week > 53), ] <- 1
# format date as "{epiyear}{epiweek}". Exp: "202005"
deaths_mmwr_week <- data.frame(lapply(deaths_mmwr_week, sprintf, fmt = "%02d"))
deaths_dates <- paste(deaths_mmwr_year$MMWRyear, deaths_mmwr_week$MMWRweek, sep = "")
# Test equality
expect_equal(actual_incident_deaths_dates, deaths_dates)
expect_equal(actual_cumulative_deaths_dates, deaths_dates)
expect_equal(actual_incident_cases_dates, cases_dates)
expect_equal(actual_cumulative_cases_dates, cases_dates)
})
test_that("preprocess_visualization_truth files has correct values from covidData", {
actual <- covidHubUtils::preprocess_visualization_truth(
save_location = "."
)
# Actual values
actual_incident_deaths_values <- actual$incident_deaths$value
actual_incident_cases_values <- actual$incident_cases$value
actual_cumulative_deaths_values <- actual$cumulative_deaths$value
actual_cumulative_cases_values <- actual$cumulative_cases$value
# Expected values
# Spatial resolutions
spatial_resolutions <- c("national", "state")
cases_dframes <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "weekly",
measure = "cases"
)
# Threshold to 0 for incident values < 0, for visualization purpose only
cases_dframes$inc[cases_dframes$inc <= 0] <- 0.1
cases_dframes$cum[cases_dframes$cum == 0] <- 0.1
deaths_dframes <- covidData::load_jhu_data(
spatial_resolution = spatial_resolutions,
temporal_resolution = "weekly",
measure = "deaths"
)
# Threshold to 0 for incident values < 0, for visualization purpose only
deaths_dframes$inc[deaths_dframes$inc <= 0] <- 0.1
deaths_dframes$cum[deaths_dframes$cum == 0] <- 0.1
# Test equality
expect_equal(actual_incident_deaths_values, deaths_dframes$inc)
expect_equal(actual_cumulative_deaths_values, deaths_dframes$cum)
expect_equal(actual_incident_cases_values, cases_dframes$inc)
expect_equal(actual_cumulative_cases_values, cases_dframes$cum)
})
test_that("preprocess_hospitalization files has expected combinations of location, week", {
# Location name
location_names <- covidData::fips_codes[, c("location", "location_name")]
actual <- covidHubUtils::preprocess_hospitalization(
save_location = "."
)
actual_incident_hosp <- actual$incident_hosp %>%
dplyr::select(date, location)
actual_cumulative_hosp <- actual$cumulative_hosp %>%
dplyr::select(date, location)
expected <- covidData::load_healthdata_data(
spatial_resolution = c("national", "state"),
temporal_resolution = "daily",
measure = "hospitalizations"
)
expected_incident_hosp <- expected[, c("date", "location", "inc")] %>%
dplyr::left_join(location_names, by = "location") %>%
tidyr::drop_na(any_of(c("location_names", "inc"))) %>%
dplyr::select(date, location)
expected_cumulative_hosp <- expected[, c("date", "location", "cum")] %>%
dplyr::left_join(location_names, by = "location") %>%
tidyr::drop_na(any_of(c("location_names", "cum"))) %>%
dplyr::select(date, location)
expect_equal(actual_incident_hosp, expected_incident_hosp)
expect_equal(actual_cumulative_hosp, expected_cumulative_hosp)
})
test_that("preprocess_hospitalization can correctly reconstruct cumulative and incident values", {
# Location name
location_names <- covidData::fips_codes[, c("location", "location_name")]
actual <- covidHubUtils::preprocess_hospitalization(
save_location = "."
)
actual_incident_hosp <- actual$incident_hosp %>%
dplyr::select(value)
actual_cumulative_hosp <- actual$cumulative_hosp %>%
dplyr::select(value)
expected <- covidData::load_healthdata_data(
spatial_resolution = c("national", "state"),
temporal_resolution = "daily",
measure = "hospitalizations"
)
expected_incident_hosp <- expected[, c("date", "location", "inc")] %>%
dplyr::left_join(location_names, by = "location") %>%
tidyr::drop_na(any_of(c("location_names", "inc"))) %>%
dplyr::rename(value = inc) %>%
dplyr::select(value)
expected_cumulative_hosp <- expected[, c("date", "location", "cum")] %>%
dplyr::left_join(location_names, by = "location") %>%
tidyr::drop_na(any_of(c("location_names", "cum"))) %>%
dplyr::rename(value = cum) %>%
dplyr::select(value)
expect_equal(actual_incident_hosp, expected_incident_hosp)
expect_equal(actual_cumulative_hosp, expected_cumulative_hosp)
})
test_that("preprocess_truth_for_zoltar works: get exactly all combinations of locations,
targets and timezeros", {
cum_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Cumulative Deaths"
) %>%
# get horizon number
tidyr::separate(target, into = c("horizon", "other"), remove = FALSE, extra = "merge") %>%
dplyr::mutate(horizon = as.integer(horizon)) %>%
dplyr::select(timezero, unit, horizon)
inc_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Incident Deaths"
) %>%
# get horizon number
tidyr::separate(target, into = c("horizon", "other"), remove = FALSE, extra = "merge") %>%
dplyr::mutate(horizon = as.integer(horizon)) %>%
dplyr::select(timezero, unit, horizon)
# set up Zoltar connection
zoltar_connection <- zoltr::new_connection()
if (Sys.getenv("Z_USERNAME") == "" | Sys.getenv("Z_PASSWORD") == "") {
zoltr::zoltar_authenticate(zoltar_connection, "zoltar_demo", "Dq65&aP0nIlG")
} else {
zoltr::zoltar_authenticate(
zoltar_connection,
Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD")
)
}
# construct Zoltar project url
the_projects <- zoltr::projects(zoltar_connection)
project_url <- the_projects[the_projects$name == "COVID-19 Forecasts", "url"]
# get all valid timezeros from zoltar
zoltar_timezeros <- zoltr::timezeros(zoltar_connection, project_url)$timezero_date
all_valid_fips <- covidHubUtils::hub_locations %>%
dplyr::filter(geo_type == "state", fips != 74) %>%
dplyr::pull(fips)
# create a data frame with all combination of timezeros, units and horizons
expected <- expand.grid(
timezero = zoltar_timezeros,
unit = all_valid_fips, horizon = 1:20
) %>%
# calculate corresponding target end date
dplyr::mutate(
unit = as.character(unit),
timezero = as.Date(timezero),
target_end_date =
covidHubUtils::calc_target_week_end_date(timezero, horizon)
) %>%
# filter dates
dplyr::filter(
timezero <= Sys.Date(),
target_end_date <= Sys.Date(),
target_end_date >= as.Date("2020-01-25")
) %>%
dplyr::select(-target_end_date)
expect_true(dplyr::all_equal(cum_deaths, expected))
expect_true(dplyr::all_equal(inc_deaths, expected))
})
test_that("preprocess_truth_for_zoltar works: truth values for all duplicated locations
and targets are identical for all timezeros in the same week span", {
cum_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Cumulative Deaths"
) %>%
# get horizon number
tidyr::separate(target,
into = c("horizon", "other"),
remove = FALSE, extra = "merge"
) %>%
# calculate target_end_date
dplyr::mutate(
target_end_date =
covidHubUtils::calc_target_week_end_date(
timezero, as.numeric(horizon)
)
) %>%
# get number of possible unique values
# Note: conditioning on a forecast horizon, timezeros in the same week span
# will have the same target end date
dplyr::group_by(target_end_date, unit, target) %>%
dplyr::summarise(num_unique_values = length(unique(value)))
inc_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Incident Deaths"
) %>%
# get horizon number
tidyr::separate(target,
into = c("horizon", "other"),
remove = FALSE, extra = "merge"
) %>%
# calculate target_end_date
dplyr::mutate(
target_end_date =
covidHubUtils::calc_target_week_end_date(
timezero, as.numeric(horizon)
)
) %>%
# get number of possible unique values
# Note: conditioning on a forecast horizon, timezeros in the same week span
# will have the same target end date
dplyr::group_by(target_end_date, unit, target) %>%
dplyr::summarise(num_unique_values = length(unique(value)))
# expect only one unique value for each combination of target_end_date, unit and target
expect_true(all(cum_deaths$num_unique_values == 1))
expect_true(all(inc_deaths$num_unique_values == 1))
})
test_that("preprocess_truth_for_zoltar works: could construct cumulative values in JHU time
series data from cumulative values in function output
with a date in an earlier week as issue date", {
issue_date <- as.Date("2020-12-12")
# load function output from configure zoltar_truth
# and calculate target end date
cum_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Cumulative Deaths",
issue_date = issue_date
) %>%
# get horizon number
tidyr::separate(target,
into = c("horizon", "other"),
remove = FALSE, extra = "merge"
) %>%
# calculate target_end_date
dplyr::mutate(target_end_date = covidHubUtils::calc_target_week_end_date(
timezero, as.numeric(horizon)
)) %>%
dplyr::select(timezero, target_end_date, unit, horizon, value)
# read in JHU time series data
# aggregate county-level counts to get cumulative deaths for each state
expected_state_cum_deaths <- readr::read_csv(
paste0(
"test-data/test-preprocess_truth_for_zoltar/",
as.character(issue_date), "/", as.character(issue_date),
"_time_series_covid19_deaths_US.csv"
)
) %>%
tidyr::pivot_longer(
matches("^\\d{1,2}\\/\\d{1,2}\\/\\d{2,4}$"),
names_to = "date",
values_to = "cum"
) %>%
dplyr::mutate(
date = as.character(lubridate::mdy(date))
) %>%
dplyr::group_by(Province_State, date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::left_join(covidHubUtils::hub_locations,
by = c("Province_State" = "location_name")
) %>%
dplyr::filter(geo_type == "state", fips != 74) %>%
dplyr::select(-population, -geo_type, -geo_value, -abbreviation)
# aggregate state-level counts to get national cumulative deaths
expected_national_cum_deaths <- expected_state_cum_deaths %>%
dplyr::group_by(date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::mutate(fips = "US")
# merge JHU cumulative counts with function output and calculate count difference
cum_deaths <- cum_deaths %>%
dplyr::left_join(
dplyr::bind_rows(expected_state_cum_deaths, expected_national_cum_deaths),
by = c("unit" = "fips", "target_end_date" = "date")
) %>%
# Note from covidData: we are off by a total of 3 deaths attributed to Diamond Princess.
dplyr::mutate(diff = value - cum)
# the only possible values for difference should be 0, 1, 3
expect_true(all(unique(cum_deaths$diff) == c(0, 1, 3)))
# differences should only occur at national level
expect_true(all(cum_deaths[cum_deaths$diff == 3, ]$unit == "US"))
expect_true(all(cum_deaths[cum_deaths$diff == 1, ]$unit == "US"))
})
test_that("preprocess_truth_for_zoltar works: could construct cumulative values in JHU time
series data from cumulative values in function output
with a date in a later week as issue date", {
issue_date <- as.Date("2020-12-14")
# load function output from configure zoltar_truth
# and calculate target end date
cum_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Cumulative Deaths",
issue_date = issue_date
) %>%
# get horizon number
tidyr::separate(target,
into = c("horizon", "other"),
remove = FALSE, extra = "merge"
) %>%
# calculate target_end_date
dplyr::mutate(target_end_date = covidHubUtils::calc_target_week_end_date(
timezero, as.numeric(horizon)
)) %>%
dplyr::select(timezero, target_end_date, unit, horizon, value)
# read in JHU time series data
# aggregate county-level counts to get cumulative deaths for each state
expected_state_cum_deaths <- readr::read_csv(
paste0(
"test-data/test-preprocess_truth_for_zoltar/",
as.character(issue_date), "/", as.character(issue_date),
"_time_series_covid19_deaths_US.csv"
)
) %>%
tidyr::pivot_longer(
matches("^\\d{1,2}\\/\\d{1,2}\\/\\d{2,4}$"),
names_to = "date",
values_to = "cum"
) %>%
dplyr::mutate(
date = as.character(lubridate::mdy(date))
) %>%
dplyr::group_by(Province_State, date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::left_join(covidHubUtils::hub_locations,
by = c("Province_State" = "location_name")
) %>%
dplyr::filter(geo_type == "state", fips != 74) %>%
dplyr::select(-population, -geo_type, -geo_value, -abbreviation)
# aggregate state-level counts to get national cumulative deaths
expected_national_cum_deaths <- expected_state_cum_deaths %>%
dplyr::group_by(date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::mutate(fips = "US")
# merge JHU cumulative counts with function output and calculate count difference
cum_deaths <- cum_deaths %>%
dplyr::left_join(
dplyr::bind_rows(expected_state_cum_deaths, expected_national_cum_deaths),
by = c("unit" = "fips", "target_end_date" = "date")
) %>%
# Note from covidData: we are off by a total of 3 deaths attributed to Diamond Princess.
dplyr::mutate(diff = value - cum)
# the only possible values for difference should be 0, 1, 3
expect_true(all(unique(cum_deaths$diff) == c(0, 1, 3)))
# differences should only occur at national level
expect_true(all(cum_deaths[cum_deaths$diff == 3, ]$unit == "US"))
expect_true(all(cum_deaths[cum_deaths$diff == 1, ]$unit == "US"))
})
test_that("preprocess_truth_for_zoltar works: could construct cumulative values in JHU time
series data from incident values in function output
with a date in an earlier week as issue date", {
issue_date <- as.Date("2020-12-12")
inc_to_cum_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Incident Deaths",
issue_date = issue_date
) %>%
# get horizon number
tidyr::separate(target,
into = c("horizon", "other"),
remove = FALSE, extra = "merge"
) %>%
# calculate target_end_date
dplyr::mutate(target_end_date = covidHubUtils::calc_target_week_end_date(
timezero, as.numeric(horizon)
)) %>%
dplyr::select(target_end_date, unit, value) %>%
# rows with timezero in the same week span are duplicate
dplyr::distinct(target_end_date, unit, .keep_all = TRUE) %>%
dplyr::mutate(inc = value) %>%
# calculate cumulative counts from incident counts
dplyr::group_by(unit) %>%
dplyr::mutate(tentative_cum = cumsum(inc)) %>%
dplyr::ungroup()
# read in JHU time series data
# aggregate county-level counts to get cumulative deaths for each state
expected_state_cum_deaths <- readr::read_csv(
paste0(
"test-data/test-preprocess_truth_for_zoltar/",
as.character(issue_date), "/", as.character(issue_date),
"_time_series_covid19_deaths_US.csv"
)
) %>%
tidyr::pivot_longer(
matches("^\\d{1,2}\\/\\d{1,2}\\/\\d{2,4}$"),
names_to = "date",
values_to = "cum"
) %>%
dplyr::mutate(
date = as.character(lubridate::mdy(date))
) %>%
dplyr::group_by(Province_State, date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::left_join(covidHubUtils::hub_locations,
by = c("Province_State" = "location_name")
) %>%
dplyr::filter(geo_type == "state", fips != 74) %>%
dplyr::select(-population, -geo_type, -geo_value, -abbreviation)
# aggregate state-level counts to get national cumulative deaths
expected_national_cum_deaths <- expected_state_cum_deaths %>%
dplyr::group_by(date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::mutate(fips = "US")
inc_to_cum_deaths <- inc_to_cum_deaths %>%
dplyr::left_join(
dplyr::bind_rows(expected_state_cum_deaths, expected_national_cum_deaths),
by = c("unit" = "fips", "target_end_date" = "date")
) %>%
# Note from covidData: we are off by a total of 3 deaths attributed to Diamond Princess.
dplyr::mutate(diff = tentative_cum - cum)
# the only possible values for difference should be 0, 1, 3
expect_true(all(unique(inc_to_cum_deaths$diff) == c(0, 1, 3)))
# differences should only occur at national level
expect_true(all(inc_to_cum_deaths[inc_to_cum_deaths$diff == 3, ]$unit == "US"))
expect_true(all(inc_to_cum_deaths[inc_to_cum_deaths$diff == 1, ]$unit == "US"))
})
test_that("preprocess_truth_for_zoltar works: could construct cumulative values in JHU time
series data from incident values in function output
with a date in an later week as issue date", {
issue_date <- as.Date("2020-12-14")
inc_to_cum_deaths <- covidHubUtils::preprocess_truth_for_zoltar(
target = "Incident Deaths",
issue_date = issue_date
) %>%
# get horizon number
tidyr::separate(target,
into = c("horizon", "other"),
remove = FALSE, extra = "merge"
) %>%
# calculate target_end_date
dplyr::mutate(target_end_date = covidHubUtils::calc_target_week_end_date(
timezero, as.numeric(horizon)
)) %>%
dplyr::select(target_end_date, unit, value) %>%
# rows with timezero in the same week span are duplicate
dplyr::distinct(target_end_date, unit, .keep_all = TRUE) %>%
dplyr::mutate(inc = value) %>%
# calculate cumulative counts from incident counts
dplyr::group_by(unit) %>%
dplyr::mutate(tentative_cum = cumsum(inc)) %>%
dplyr::ungroup()
# read in JHU time series data
# aggregate county-level counts to get cumulative deaths for each state
expected_state_cum_deaths <- readr::read_csv(
paste0(
"test-data/test-preprocess_truth_for_zoltar/",
as.character(issue_date), "/", as.character(issue_date),
"_time_series_covid19_deaths_US.csv"
)
) %>%
tidyr::pivot_longer(
matches("^\\d{1,2}\\/\\d{1,2}\\/\\d{2,4}$"),
names_to = "date",
values_to = "cum"
) %>%
dplyr::mutate(
date = as.character(lubridate::mdy(date)),
) %>%
dplyr::group_by(Province_State, date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::left_join(covidHubUtils::hub_locations,
by = c("Province_State" = "location_name")
) %>%
dplyr::filter(geo_type == "state", fips != 74) %>%
dplyr::select(-population, -geo_type, -geo_value, -abbreviation)
# aggregate state-level counts to get national cumulative deaths
expected_national_cum_deaths <- expected_state_cum_deaths %>%
dplyr::group_by(date) %>%
dplyr::summarize(cum = sum(cum)) %>%
dplyr::ungroup() %>%
dplyr::mutate(fips = "US")
inc_to_cum_deaths <- inc_to_cum_deaths %>%
dplyr::left_join(
dplyr::bind_rows(expected_state_cum_deaths, expected_national_cum_deaths),
by = c("unit" = "fips", "target_end_date" = "date")
) %>%
# Note from covidData: we are off by a total of 3 deaths attributed to Diamond Princess.
dplyr::mutate(diff = tentative_cum - cum)
# the only possible values for difference should be 0, 1, 3
expect_true(all(unique(inc_to_cum_deaths$diff) == c(0, 1, 3)))
# differences should only occur at national level
expect_true(all(inc_to_cum_deaths[inc_to_cum_deaths$diff == 3, ]$unit == "US"))
expect_true(all(inc_to_cum_deaths[inc_to_cum_deaths$diff == 1, ]$unit == "US"))
})
test_that("save_truth_for_zoltar works: could calculate and save
differences between new version and old version", {
mock_preprocess <- mock(
readr::read_csv("test-data/test-get_truth/test_cum_death.csv"),
readr::read_csv("test-data/test-get_truth/test_inc_death.csv")
)
mock_connection <- mock(zoltr::new_connection("http://example.com"))
mock_url <- mock("http://example.com/api/project/1/")
mock_zoltar_query <- mock(readr::read_csv("test-data/test-get_truth/test_old_zoltar_truth.csv"))
testthat::with_mock("preprocess_truth_for_zoltar" = mock_preprocess, {
testthat::with_mock("setup_zoltar_connection" = mock_connection, {
testthat::with_mock("get_zoltar_project_url" = mock_url, {
testthat::with_mock("zoltr::do_zoltar_query" = mock_zoltar_query, {
save_truth_for_zoltar(save_location = "test-data/test-get_truth") # uses mock value zoltar_truth_old, saves to zoltar-truth.csv
zoltar_truth_diff <- readr::read_csv("test-data/test-get_truth/zoltar-truth.csv")
exp_zoltar_truth_diff <- readr::read_csv("test-data/test-get_truth/test_expected_diff.csv")
expect_equal(zoltar_truth_diff, exp_zoltar_truth_diff)
})
})
})
})
})
|
d801278c87b7102f456252b9cfbd6570186291cf
|
d5c75d40b898f9ca0ceda13fe05f2a33a8a94512
|
/R/convHull.R
|
8a80102ddf46f982b3eabd5cfb45e2ddc2242fc4
|
[] |
no_license
|
antoine-182/dismo
|
2c5e73bc2db03182d9010b2d0cce554f238c46c3
|
21f6983f618f0cdbe285f0031129c624594bd2bd
|
refs/heads/master
| 2021-06-09T18:05:55.739415
| 2017-01-09T08:49:36
| 2017-01-09T08:49:36
| 109,248,869
| 1
| 0
| null | 2017-11-02T10:15:26
| 2017-11-02T10:15:26
| null |
UTF-8
|
R
| false
| false
| 2,139
|
r
|
convHull.R
|
# Author: Robert J. Hijmans
# Date : Febrary 2010
# Version 0.1
# Licence GPL v3
setClass('ConvexHull',
contains = 'DistModel',
representation (
polygons='SpatialPolygonsDataFrame'
),
prototype (
),
validity = function(object) {
return(TRUE)
}
)
setMethod("polygons", "ConvexHull",
function(obj) {
obj@polygons
}
)
setMethod("geometry", "ConvexHull",
function(obj) {
geometry(obj@polygons)
}
)
setMethod("plot", signature(x='ConvexHull', y='missing'),
function(x, ...) {
sp::plot(x@polygons, ...)
}
)
if (!isGeneric("convHull")) {
setGeneric("convHull", function(p, ...)
standardGeneric("convHull"))
}
setMethod('convHull', signature(p='matrix'),
function(p, n=1, crs=NA, ...) {
ch <- new('ConvexHull')
ch@presence <- data.frame(p)
ch@polygons <- .generateConvexHulls(p, n, dissolve=FALSE)
crs(ch@polygons) <- crs
return(ch)
}
)
setMethod('convHull', signature(p='data.frame'),
function(p, ...) {
convHull(as.matrix(p), ...)
}
)
setMethod('convHull', signature(p='SpatialPoints'),
function(p, ...) {
convHull(coordinates(p), crs=p@proj4string, ...)
}
)
.generate_k_ConvexHulls <- function(xy, k, dissolve=FALSE) {
cl <- kmeans(xy, k, 100)$cluster
clusters <- unique(cl)
subp <- list()
for (i in clusters) {
pts <- xy[cl==i, ]
h <- pts[chull(pts), ]
r <- spPolygons(h)
subp <- c(subp, r)
}
aggregate(do.call(bind, subp), dissolve=dissolve)
}
.generateConvexHulls <- function(xy, n=1, dissolve=FALSE) {
xy <- unique( stats::na.omit(xy[, 1:2]) )
if (nrow(xy) < 3) { stop ('Insufficient number of points to make a Convex Hull; you need at least 3 unique points' ) }
n <- pmax(1, round(n))
n <- pmin(n, floor(nrow(xy) / 3))
n <- unique(n)
if (length(n) == 1) {
if (n == 1) {
h <- xy[chull(xy), ]
r <- spPolygons(h)
} else {
r <- .generate_k_ConvexHulls(xy, n, dissolve=dissolve)
}
} else { # multiple number of clusters
pols <- list()
for (k in n) {
pols <- c(pols, .generate_k_ConvexHulls(xy, k, dissolve=dissolve))
}
r <- do.call(bind, pols)
}
SpatialPolygonsDataFrame(r, data.frame(id=1:length(r)))
}
|
0984b489772661f3bf959820c64b055ac491aeef
|
993a981d6cc20f97e68d5fb4bbac24b2bb9b1995
|
/server.R
|
e6c03e30bcd690d20270f0be593f70b82ad19809
|
[] |
no_license
|
foolszany/dataProductsCourseProject1
|
11685fa26c4d115e56138e2d461e4559e4229dcd
|
ac04f2ce82fd3c4d50d268edcbb3d2bd3e9374ad
|
refs/heads/master
| 2016-09-05T19:53:01.986859
| 2015-02-23T18:27:13
| 2015-02-23T18:27:13
| 31,048,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,319
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(caret)
library(ggvis)
WCraw <- read.csv("data/worldCup.csv")
## eliminate all players less than 50 minutes
WCraw <- WCraw[WCraw$MINUTES_PLAYED>=50,]
WCtrim <- WCraw[,-c(1:5)]
## calculate normalized values of minutes played
ninetyMinutes <- function (x){
return((x/WCtrim$MINUTES_PLAYED)*90)
}
WCstats <- as.data.frame(lapply(WCtrim, ninetyMinutes))
## replace variables that should not have been divided by minutes played
WCstats$TOP_SPEED <- WCtrim$TOP_SPEED
WCstats$PASSES_COMPLETED_PERCENT <- WCtrim$PASSES_COMPLETED_PERCENT
WCstats$CROSSES_COMPLETED_PERCENT <- WCtrim$CROSSES_COMPLETED_PERCENT
# combine with player name and add back column name
WCstats <- cbind(WCraw[,1:4],WCstats)
colnames(WCstats)[1:4] <- c("PLAYER", "LEAGUE", "POSITION", "TEAM")
playerData <- WCstats
## training set
training <- playerData[,c("PLAYER", "POSITION", "TOP_SPEED", "TOTAL_GOALS_SCORED", "DISTANCE_COVERED", "TOTAL_PASSES", "CROSSES", "SOLO_RUNS_INTO_AREA")]
training1 <- lapply(training[,-c(1,2)], scale)
training2 <- (as.data.frame(training1))
nrow(training2) ## 523
training2 <- replace(training2, is.na(training2), 0)
km <- kmeans(training2, 50, iter.max=100, nstart=1)
plot(training2$TOP_SPEED, training2$CROSSES)
points(km$centers, col="red", pch=20)
myResults <- cbind(training, km$cluster)
colnames(myResults) <- c("PLAYER", "POSITION", "TOP_SPEED", "TOTAL_GOALS_SCORED", "DISTANCE_COVERED", "TOTAL_PASSES", "CROSSES", "SOLO_RUNS_INTO_AREA", "CLUSTER")
compResults <- function(x){
myResults[myResults$CLUSTER==x,c(1,2)]
}
yy <- myResults[myResults$CLUSTER==3, c(1,2)]
closest.cluster <- function(x) {
cluster.dist <- apply(km$centers, 1, function(y) sqrt(sum((x-y)^2)))
return(which.min(cluster.dist)[1])
}
shinyServer(
function(input, output){
selectedData <- reactive({
playerData[, c(input$xcol, input$ycol)]
})
##selectedData <- as.data.frame(playerData[,c(input$xcol, input$ycol, playerData$POSITION)])
output$oPosition <- renderPrint({input$Position})
output$graph1 <- renderPlot ({
plot(selectedData(), col="red")
})
## data for kmeans
df <- reactive({c(input$topSpeed, input$goals, input$distance, input$passes, input$crosses, input$soloRuns) })
output$view <- renderText({
## aa <- c(input$topSpeed, input$goals, input$distance, input$passes, input$crosses, input$soloRuns)
df()
##cluster2 <- apply(aa, 1, closest.cluster)
})
output$oresults <- renderText({
df1 <- df()
means <- apply(training[,-c(1,2)], 2, mean, na.rm=TRUE)
sds <- apply(training[,-c(1,2)], 2, sd, na.rm=TRUE)
df1 <- (df1-means)/sds
#df1 <- c(((df1[1]-mean(training[,3]))/sd(training[,3])), ((df1[2]-mean(training[,4]))/sd(training[,4])), ((df1[3]-mean(training[,5]))/sd(training[,5])), ((df1[4]-mean(training[,6]))/sd(training[,6])), ((df1[5]-mean(training[,7]))/sd(training[,7])), ((df1[6]-mean(training[,8]))/sd(training[,8])) )
closest.cluster(df1)
})
output$oresults2 <- renderTable({
df1 <- df()
means <- apply(training[,-c(1,2)], 2, mean, na.rm=TRUE)
sds <- apply(training[,-c(1,2)], 2, sd, na.rm=TRUE)
df1 <- (df1-means)/sds
compResults(closest.cluster(df1))
})
})
|
bb426de1412ea3caa4b6d343fefd64766bb11999
|
9326d857c238ff56f993437fb44a5c90961d0753
|
/tests/testthat/test-input_field.R
|
d39143ef2993d15146cf340fe317c9bdf39655fe
|
[] |
no_license
|
moj-analytical-services/shinyGovstyle
|
e1e9b4062710b229f269f9b0bb58c1398383f7e1
|
a033342e971b9f090c06b6e17b82b20d27dce50c
|
refs/heads/master
| 2023-07-11T05:45:21.430131
| 2022-02-22T10:36:38
| 2022-02-22T10:36:38
| 192,864,104
| 34
| 4
| null | 2022-02-07T12:41:32
| 2019-06-20T06:41:58
|
CSS
|
UTF-8
|
R
| false
| false
| 781
|
r
|
test-input_field.R
|
test_that("field works", {
field_check <- input_field(
legend ="List of three text boxes in a field",
labels = c("Field 1", "Field 2", "Field 3"),
inputIds = c("field1", "field2", "field3"),
widths = c(30,20,10),
error = TRUE)
expect_equal(length(field_check), 3)
expect_identical(
paste(field_check$children[[2]]$`Field 1`$children[[2]]$attribs$class,
field_check$children[[2]]$`Field 1`$children[[2]]$attribs[3]$class),
"govuk-error-message shinyjs-hide"
)
})
test_that("field works with null width", {
field_check <- input_field(
legend ="List of three text boxes in a field",
labels = c("Field 1", "Field 2", "Field 3"),
inputIds = c("field1", "field2", "field3")
)
expect_equal(length(field_check), 3)
})
|
90a1347376dd52286a9cb41ea00fb49f100dbca7
|
216af4351d41dfa6796d87a497498d57d0cb60a9
|
/cachematrix.R
|
af4f53e4cc11732ff70ef92407ceefc9bfc79860
|
[] |
no_license
|
Gtole/ProgrammingAssignment2
|
d37d39d4ba43b5d2fc0b834cdf7b5cfc37139504
|
7925275857eca3b7114b75425c8dfa87306fab58
|
refs/heads/master
| 2022-12-07T15:01:47.424398
| 2020-09-01T04:54:17
| 2020-09-01T04:54:17
| 291,897,501
| 0
| 0
| null | 2020-09-01T04:40:37
| 2020-09-01T04:40:36
| null |
UTF-8
|
R
| false
| false
| 865
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## once the matrix is created, I call the function with the matrix and compute the inverse
A<-matrix(c(0,1,0,2,1,1,2,1,2),3,3)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x<<-y
m <<- NULL
}
get<-function()x
setinverse<-function(inverse) m<<-inverse
getinverse<-function(inverse) m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## recover the inverse from cache and check if is not null
cachesolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
## And finally print the inverse
A1<-makeCacheMatrix(A)
cachesolve(A1)
|
0ee6d452f7b3d98c4788e409bddc3dfd2aa93f21
|
9463190790003d3cf888a8f5cff0f5a4c0383530
|
/Biostrings1/man/gapletterReplace.Rd
|
9433a61b611be32903001cadbbca41a3739f0be0
|
[] |
no_license
|
nw328/Biostrings
|
06a9f2d73466c28bdbf7ba90cbaa6002cabbcd75
|
ff7cfd71cd2b388b68234b17577a2ae919ee62f0
|
refs/heads/master
| 2020-12-25T21:13:20.681088
| 2015-08-26T08:55:38
| 2015-08-26T08:55:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
rd
|
gapletterReplace.Rd
|
\name{gapletter<-}
\alias{gapletter<-}
\title{ function to modify the gap letter in an alphabet }
\description{
This function modifies the gap letter in a "BioAlphabet" object.
}
\usage{
gapletter(x) <- value
}
\arguments{
\item{x}{ An object of class "BioAlphabet" }
\item{value}{ A single letter. }
}
\value{
The modified object of class "BioAlphabet"
}
\author{ Saikat DebRoy }
\seealso{\code{\link{BioAlphabet-class}} }
\examples{
d <- DNAAlphabet()
gapletter(d) <- 'N'
d
r <- RNAPatternAlphabet()
gapletter(r) <- '*'
r
}
\keyword{classes}
|
867d39d36c86499cd95304a50d057a69c909c65b
|
6e363f94fc8e336f9cd55a8ddd0b6fdf9b682214
|
/man/gene.remove.Rd
|
f6c571c2c6ff6b52bf4720e3c1a6d9010161ad86
|
[] |
no_license
|
cran/caRpools
|
52ac70eb3c8523b09dbe543fe594c09b904f2420
|
8162263c8aab9aa14bd7ea54b2d4af4cda0dfda4
|
refs/heads/master
| 2016-09-06T16:14:22.489946
| 2015-12-06T13:31:59
| 2015-12-06T13:31:59
| 40,494,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,711
|
rd
|
gene.remove.Rd
|
\name{gene.remove}
\alias{gene.remove}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Remove gene information from sgRNA data.frame
}
\description{
This function is used to remove genes/gene information from a data.frame containing pooled CRISPR screen data.
It is meant to exclude genes from the analysis and removes all entries belonging to a gene from the sgRNA data.frame.
}
\usage{
gene.remove(data, namecolumn = 1, toremove = NULL,
extractpattern = expression("^(.+?)_.+") )
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
data.frame with sgRNA readcounts. Must have one column with sgRNA names and one column with readcounts. Please note that the data must be formatted in a way, that gene names are included within the sgRNA name and can be extracted using the extractpattern expression.
e.g. GENE_sgRNA1 -> GENE as gene name, _ as the separator and sgRNA1 as the sgRNA identifier.
}
\item{namecolumn}{
%% ~~Describe \code{namecolumn} here~~
integer, indicates in which column the names are stored
}
\item{toremove}{
Vector of gene names that will be removed from sgRNA dataset. The gene name must be included in the sgRNA names in order to be extracted using the pattern defined in extractpattern. e.g. c=("gene1","gene2")
}
\item{extractpattern}{
Regular Expression, used to extract the gene name from the sgRNA name. Please make sure that the gene name extracted is accesible by putting its regular expression in brackets (). The default value expression("^(.+?)_.+") will look for the gene name (.+?) in front of the separator _ and any character afterwards .+ e.g. gene1_anything .
}
}
\details{
In a table with
\tabular{rr}{
DesignID \tab fullmatch \cr
AAK1_104_0 \tab 0 \cr
AAK1_105_0 \tab 197 \cr
AAK1_106_0 \tab 271 \cr
AAK1_107_0 \tab 1 \cr
AAK1_108_0 \tab 0 \cr
}
calling gene.remove(data.frame, toremove="AAK1", extractpattern = expression("^(.+?)_.+")) will remove all entries shown above, since AAK1 is the gene name, separated by an undescore _ from the sgRNA identifier.
}
\value{
gene.remove returns a data.frame that has the same column dimensions as the input data.frame, however all rows in which toremove=gene is present, are deleted.
}
\author{
Jan Winter
}
\note{
%% ~~further notes~~
none
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
data(caRpools)
gene.remove(CONTROL1, toremove="AAK1", extractpattern = expression("^(.+?)_.+"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~Gene }% __ONLY ONE__ keyword per line
|
3f67fa99807ec13b4101eea9052885ab988e596a
|
8299490a74006b76949315b2f704dc55b6842ac5
|
/scripts/6a.R
|
04dee7ebb1b45d6755545b50db000ff7b42c6b41
|
[] |
no_license
|
yuzhenpeng/hnscc_nsd1
|
ae52384d80a56774e68d0e46661a23af2c54d11a
|
44da0e4034bcd4382d899feb7c749a41f6f6c9f5
|
refs/heads/master
| 2022-12-06T17:39:25.358341
| 2020-09-03T22:10:01
| 2020-09-03T22:10:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,701
|
r
|
6a.R
|
library(data.table)
library(tidyverse)
library(readxl)
library(SummarizedExperiment)
library(DESeq2)
library(tximport)
library(limma)
library(patchwork)
library(pals)
load("misc/tx2gene.rda")
load('misc/g33.rda')
e2g <- g33[g33$type == 'gene'] %>%
{setNames(.$gene_name, .$ID)}
load('tcga/DNA.rda')
load('tcga/RNA.rda')
RNA <- data
load('tcga/DNAme.rda')
DNAme <- data
load('data/mCG.450k.rda')
load('misc/barcodeData.rda')
samps <- list.files('data', pattern = '.sf.txt.gz$', full.names = T) %>%
setNames(., sub('.*/(.*).sf.*', '\\1', .))
mdat <- data.frame(samp = names(samps)) %>%
mutate(type = case_when(
grepl('KO',samp) ~ 'KO',
grepl('^S|^B', samp) ~ 'MT',
TRUE ~ 'WT'
)) %>%
dplyr::filter(!(samp %in% c('Cal27_KO17', 'Det562_KO1', 'Det562_KO4'))) %>%
mutate(line = factor(sub('_.*', '', samp)),
type = factor(type)) %>%
column_to_rownames("samp")
m <- mdat[mdat$type %in% c('WT', 'KO'),] %>% droplevels()
s <- samps[names(samps) %in% rownames(m)] %>%
tximport(type = "salmon", tx2gene = tx2gene)
s$length[s$length == 0] <- 1
dds <- DESeqDataSetFromTximport(s, m, ~type + line)
dds2 <- DESeqDataSet(RNA, design = ~patient)
d2 <- counts(dds2)
d1 <- counts(dds) %>%
`rownames<-`(sub('\\..*', '', rownames(.))) %>%
{.[rownames(.) %in% rownames(d2),]}
mm <- rownames(d1) %>%
sort() %>%
{cbind(d1[.,], d2[.,])}
md <- tibble(samp = colnames(mm)) %>%
mutate(src = case_when(grepl('^TCGA', samp) ~ 'TCGA',
T ~ 'cell')) %>%
column_to_rownames('samp')
dds3 <- DESeqDataSetFromMatrix(mm, md, ~src)
vsd <- vst(dds3, blind = T)
v <- assay(vsd)
cp <- cor(v, method = 'pearson')
cs <- cor(v, method = 'spearman')
muts <- lapply(DNA, function(x) {
x[,c('Tumor_Sample_Barcode', 'Hugo_Symbol', 'Variant_Classification')] %>%
dplyr::filter(Hugo_Symbol == 'NSD1')
}) %>% bind_rows(.id = 'Caller') %>%
dplyr::filter(Variant_Classification != 'Silent') %>%
pull(Tumor_Sample_Barcode) %>%
sub('^(TCGA-[A-Z0-9]*-[A-Za-z0-9]*).*', '\\1', .) %>%
unique()
clus <- read_xlsx('tcga/papillon2017.xlsx', skip = 1) %>%
dplyr::select(bc = bcr_patient_barcode, grp = methylation_group) %>%
dplyr::filter(grp != 'NA') %>%
{setNames(.$grp, .$bc)}
smps <- mdat[mdat$type %in% c('WT', 'KO'),] %>%
rownames_to_column('samp') %>%
mutate(samp = as.character(samp)) %>%
split(., .$type, drop = T) %>%
lapply(`[[`, 'samp')
tsamps <- rownames(md)[md$src != 'cell']
cor.rna <- list(pearson = cp, spearman = cs) %>%
lapply(function(x) {
lapply(smps, function(y) {
x[rownames(x) %in% tsamps, y] %>%
rowMeans() %>%
data.frame(v = .) %>%
rownames_to_column('samp')
}) %>%
bind_rows(.id = 'cond') %>%
mutate(samp = sub('^(TCGA-[A-Z0-9]*-[A-Za-z0-9]*).*', '\\1', samp)) %>%
group_by(samp, cond) %>%
summarise(v = mean(v)) %>%
summarise(v = v[cond == 'WT'] - v[cond == 'KO']) %>%
deframe()
})
r <- rowRanges(DNAme) %>%
as.data.frame() %>%
dplyr::rename(ref = Composite.Element.REF) %>%
dplyr::select(chr = seqnames, start, end, ref) %>%
as.data.table()
d <- as.data.table(assays(data)[[1]])
mat <- mat[, .SD, .SDcols = grep('Cal27|Det|FaDu', names(mat), value = T)]
ok <- lapply(list(mat, d), function(x) {
which(apply(x, 1, function(y) any(is.finite(y))))
}) %>% Reduce(intersect, .)
cor.5mc <- c('spearman', 'pearson') %>%
setNames(., .) %>%
lapply(function(meth) {
lapply(mat[ok,], function(x) {
cor(x, d[ok,], use = 'complete.obs', method = meth) %>%
t() %>%
data.frame(v = .) %>%
rownames_to_column('samp')
}) %>%
bind_rows(.id = 'ref') %>%
dplyr::filter(grepl('Cal27|Detroit|FaDu', ref)) %>%
mutate(samp = sub('^(TCGA-[A-Z0-9]*-[A-Za-z0-9]*).*', '\\1', samp),
cond = case_when(grepl('NSD1KO', ref) ~ 'KO',
T ~ 'WT')) %>%
group_by(samp, cond) %>%
summarise(v = mean(v)) %>%
summarise(v = v[cond == 'WT'] - v[cond == 'KO']) %>%
deframe()
})
ps <- mapply(function(dd, nm) {
bd <- list('NSD1-' = names(dd) %in% muts) %>%
{c(., list('NDS1+' = !.[[1]]))} %>%
lapply(function(x) {
barcodeData(dd, which(x))$worm %>%
tibble(y = .) %>%
mutate(x = 1:n())
}) %>%
bind_rows(.id = 'kind')
eclrs <- range(bd$y) %>%
{setNames(c(seq(.[1], 0, length.out = 51),
seq(0, .[2], length.out = 51)[2:51]),
pals::coolwarm(101))}
xs <- sort(dd) %>%
names() %>%
{. %in% muts} %>%
which()
p1 <- tibble(y = sort(dd),
nm = nm) %>%
mutate(x = 1:n()) %>%
ggplot(aes(x, y)) +
geom_line() +
labs(y = expression(rho('WT') - rho('KO'))) +
coord_cartesian(clip = 'off', xlim = c(0,525)) +
theme(panel.background = element_rect(fill = 'grey90'),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.grid.major.y = element_line(color = 'grey70',
linetype = 'dashed'),
axis.text.x = element_blank(),
axis.text = element_text(color = 'black'),
axis.ticks = element_blank(),
legend.justification = c(0, 0),
legend.position = c(0, 0),
legend.title = element_blank(),
legend.key = element_blank(),
axis.title.x = element_blank(),
legend.background = element_blank(),
strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
axis.line = element_blank())
p2 <- hist(xs, 10, plot = F) %>%
{tibble(x = .$mids, y = .$counts)} %>%
ggplot(aes(x, y)) +
geom_col(width = 50, fill = pals::tableau20(11)[11]) +
geom_rug(aes(x = x), data = tibble(x = xs),
inherit.aes = F, sides = 'b',
size = 0.1,
length = unit(0.1, "npc"), color = 'black') +
labs(y = '# of NSD1-') +
coord_cartesian(xlim = c(0,525), clip = 'off') +
theme(panel.background = element_blank(),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.grid.major.y = element_line(color = 'grey70',
linetype = 'dashed'),
axis.text = element_text(color = 'black'),
axis.ticks.y = element_blank(),
legend.justification = c(1, 1),
legend.position = c(1, 1),
legend.title = element_blank(),
legend.key = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
legend.background = element_blank(),
strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
axis.line = element_blank())
p3 <- ggplot(bd, aes(x, y)) +
geom_hline(yintercept = 1) +
geom_line(aes(color = kind), size = 1) +
scale_color_manual(values = pals::tableau20(3)[c(1,3)]) +
labs(y = 'Enrichment') +
scale_x_continuous(breaks = c(1, 500)) +
coord_cartesian(xlim = c(0,525), clip = 'off') +
theme(panel.background = element_rect(fill = 'grey90'),
plot.background = element_blank(),
panel.grid = element_blank(),
panel.grid.major.y = element_line(color = 'grey70',
linetype = 'dashed'),
axis.text = element_text(color = 'black'),
axis.ticks.y = element_blank(),
legend.justification = c(1, 1),
legend.position = c(1, 1),
legend.title = element_blank(),
legend.key = element_blank(),
axis.line.x = element_line(color = 'black'),
axis.title.x = element_blank(),
legend.background = element_blank(),
strip.background = element_rect(fill = 'black'),
strip.text = element_text(color = 'white'),
plot.caption = element_text(size = 13.1, vjust = 0),
axis.line = element_blank())
if (nm == 'Gene expression') {
p1 <- p1 + facet_grid(. ~ nm)
p3 <- p3 + theme(legend.position = 'none')
} else {
p1 <- p1 + facet_grid('Relative similarity' ~ nm) +
theme(axis.title.y = element_blank())
p2 <- p2 + facet_grid('Similarity Ranking' ~ .) +
theme(axis.title.y = element_blank())
p3 <- p3 + facet_grid('Overrepresentation' ~ .) +
theme(axis.title.y = element_blank())
}
wrap_plots(p1, p2, p3, ncol = 1)
}, list(rna = cor.rna$spearman, dna = cor.5mc$spearman),
c('Gene expression', 'CpG methylation'), SIMPLIFY = F)
wrap_plots(ps, ncol = 2) %>%
ggsave('figs/6a.pdf', ., height = 4.8, width = 7.8)
|
a44e0d3fb3c2c9367ffd1d462a6245bae4a38a13
|
31c5558a50749d1c4bc9be7d6a86c59e3bbb9db7
|
/man/cut_markers.Rd
|
808874a90516bbb68cb97f2e43da2cd63da0a0e6
|
[
"MIT"
] |
permissive
|
crsky1023/matchSCore2
|
735b06f9451cbb030cd5821ffe99e1036459813b
|
9f838d43073d3043774d2032c2a07aad173e5075
|
refs/heads/master
| 2023-03-18T03:37:06.895410
| 2020-04-05T11:01:43
| 2020-04-05T11:01:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 592
|
rd
|
cut_markers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cut_markers.R
\name{cut_markers}
\alias{cut_markers}
\title{Select the top n cluster markers}
\usage{
cut_markers(clusters, markers, ntop)
}
\arguments{
\item{clusters}{A vector of cluster labels.}
\item{markers}{A \code{data.frame} as in the output of the \code{FindAllMarkers} Seurat
function.}
\item{ntop}{The number of top markers you want.}
}
\value{
A list of ntop markers per cluster.
}
\description{
This function identifies true label groups between reference groups and clusters.
}
\examples{
# TODO
}
|
11acfb7e78ee07e3de0f0e4049f76199e1561b9a
|
7665057b28fb224108b09ce4231981c472de38e3
|
/tests/testthat/test-rc.calibrate.ri.R
|
ca14f42a45def5a29e2feebecc2185a159e31332
|
[] |
no_license
|
cbroeckl/RAMClustR
|
e60e01a0764d9cb1b690a5f5f73f3ecc35c40a32
|
e0ca67df1f993a89be825fcbf3bfa6eaabbadfef
|
refs/heads/master
| 2023-08-03T10:26:16.494752
| 2023-06-21T14:24:06
| 2023-06-21T14:24:06
| 13,795,865
| 10
| 13
| null | 2023-08-01T01:42:17
| 2013-10-23T07:04:07
|
R
|
UTF-8
|
R
| false
| false
| 387
|
r
|
test-rc.calibrate.ri.R
|
test_that("RAMClustR rc.calibrate.ri", {
calibrant.data <- file.path("testdata", "calibrant.data.csv")
ramclustObj <- readRDS(file.path("testdata", "input.rc.calibrate.ri.rds"))
expected <- readRDS(file.path("testdata", "rc.calibrate.ri.rds"))
actual <- rc.calibrate.ri(
ramclustObj = ramclustObj,
calibrant.data = calibrant.data
)
expect_equal(actual, expected)
})
|
b09a9903ee6f10d66395e7e3f61c8753db34ba39
|
da7ad84936816cbf9388df36ec18aa2c2d789325
|
/src/DumpToDB
|
c99a971ce9c7d394eacb61ded103c44bf1b2fd28
|
[] |
no_license
|
yaesoubilab/TBABM
|
14fbf673c9b4fc8b8b1462d30dc3782ae6c7aa18
|
7c8165db1a86f2f011a10601602c46b85a7bdc90
|
refs/heads/master
| 2023-08-09T02:27:45.410474
| 2023-07-26T01:26:40
| 2023-07-26T01:26:40
| 114,150,710
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,310
|
DumpToDB
|
#!/usr/bin/Rscript
library(docopt)
library(DBI)
library(RSQLite)
library(tibble)
library(stringr)
library(readr)
library(purrr)
library(dplyr)
'usage:
DumpToDB.R -o <sqlite> [--exclude <excludes>] [--excludeCols <cols>]
DumpToDB.R -h | --help
options:
-o <sqlite> Where the SQLite db is. If it doesn\'t exist, it will be created.
--exclude <excludes> Files that should not be dumped to the database, separate w/ commas
--excludeCols <cols> Always exclude cols in `cols`. Separate w/ commas
-h --help Show this screen' -> doc
opts <- docopt(doc)
# Empty schemas, keyed on basename
schemas <- list()
tables <- list()
n <- 0
conn <- dbConnect(RSQLite::SQLite(), dbname = ":memory:")
conn2 <- dbConnect(RSQLite::SQLite(), dbname = opts$o)
excludes <-
if (is.null(opts$exclude)) character(0) else stringr::str_split(opts$exclude, ',')[[1]];
excludeCols <-
if (is.null(opts$excludeCols)) character(0) else stringr::str_split(opts$excludeCols, ',')[[1]];
sanitizeTableName <- function(paths)
stringr::str_match(paths, '(.*)\\.[a-zA-Z0-9]+$')[,2]
updateSchema <- function(schemas, path) {
schemas[[basename(path)]] <- readr::spec_csv(path)
schemas
}
loadTableIntoList <- function(lst, path, schemas, id) {
basename_ <- basename(path)
rows <- readr::read_csv(path, col_types = schemas[[basename_]])
# rows <- readr::read_csv(path, col_types = schemas[[basename_]], lazy = FALSE)
rows$id <- id
rows[,excludeCols] <- NULL
if (!basename_ %in% names(lst))
lst[[basename_]] <- rows
else
lst[[basename_]] <- bind_rows(lst[[basename_]], rows)
lst
}
loadRowsIntoTable <- function(rows, basename_, conn) {
tableName <- sanitizeTableName(basename_)
if (!dbExistsTable(conn, tableName))
dbCreateTable(conn, name = tableName, fields = rows)
dbAppendTable(conn, tableName, rows)
}
tryInform <- function(code, message) {
handleError <- function(c) {
c$message <- paste0(c$message, "\n", '(', message, ')')
stop(c)
}
tryCatch(code, error=handleError)
}
# Open 'stdin' as a file descriptor
f <- tryInform({f <- file('stdin'); open(f); f},
"Could not open 'stdin' for reading")
# Read one line at a time until EOF
while(length(lineStr <- readLines(f, n = 1)) > 0) {
# Each line should begin with a number, which is the folder name
# containing the files to be calibrated
dirname_ <- stringr::str_extract(lineStr, "^\\d+")
runID <- dirname_
paths <- normalizePath(
Sys.glob(file.path(dirname_, '*.csv')),
mustWork = TRUE
)
paths <- setdiff(paths, normalizePath(
Sys.glob(file.path(dirname_, excludes)))
)
# If this is the first line read, generate a schema for the data in that
# directory
if (n == 0)
schemas <<- purrr::reduce(paths, updateSchema, .init = schemas)
tables <<- purrr::reduce(
paths, loadTableIntoList,
schemas = schemas,
id = runID,
.init = tables
)
if (n %% 20 == 0) {
iwalk(tables, loadRowsIntoTable, conn = conn)
# Reset the tables-buffer
tables <<- list()
}
write(lineStr, stdout())
n <- n + 1
}
if (length(tables) != 0)
iwalk(tables, loadRowsIntoTable, conn = conn)
sqliteCopyDatabase(conn, conn2)
dbDisconnect(conn)
dbDisconnect(conn2)
|
|
60916358e24a398263369a04384daf00f927f37d
|
3ae7a3665d4a6e975c6b0970fa96dd1d48ee4a78
|
/data-raw/airport_flowmap.R
|
f1c2503e57575e7bba42e148e37ea4f747e910ca
|
[] |
no_license
|
espinielli/ifrflows
|
cc3793e01ecd608d82b7c540d982a75b2b73ede9
|
51fc5f85e12019575588675f5fea9a2937167715
|
refs/heads/master
| 2021-06-24T12:01:03.925492
| 2020-10-14T07:08:33
| 2020-10-14T07:08:33
| 134,963,683
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,338
|
r
|
airport_flowmap.R
|
library(readr)
library(dplyr)
library(stringr)
library(tidyr)
library(lubridate)
# from (adapted to include country ISO_A2) Muriel's query
# see first skip line in the file
wef <- "2020-01-01"
til <- "2020-09-13"
filename <- str_glue("city-pairs_{wef}_{til}.csv.gz")
if (FALSE) {
source("R/retrieve_daily_airport_flows.R")
cp <- retrieve_daily_airport_flows(wef, til) %>%
as_tibble() %>%
mutate(ENTRY_DAY = as_date(ENTRY_DAY)) %>%
janitor::clean_names()
cp %>% write_csv(here::here("data-raw", filename), na = "")
} else {
cp <- read_csv(here::here("data-raw", filename), na = c(""))
}
# fix adep KVPC
cp <- cp %>%
mutate(
adep_country_iso_code = if_else(adep == "KVPC", "US", adep_country_iso_code),
ades_country_iso_code = if_else(ades == "KVPC", "US", ades_country_iso_code))
country_pair <- cp %>%
mutate(airport_country_pair = paste0(adep_country_iso_code, ">", ades_country_iso_code)) %>%
group_by(entry_day, airport_country_pair) %>%
summarize(
count = sum(flight),
source = adep_country_iso_name %>% first() %>% str_to_title(),
target = ades_country_iso_name %>% first() %>% str_to_title(),
source_iso2c = adep_country_iso_code %>% first(),
target_iso2c = ades_country_iso_code %>% first()
)
# %>%
# arrange(desc(count))
# %>%
# # remove BOGUS: c("##", "AN", "XF" -> French Antilles in EUROCONTROL)
# filter(!((source_iso2c %in% c("##")) | (target_iso2c %in% c("##"))))
countries <- country_pair %>%
ungroup() %>%
select(entry_day, source_iso2c, target_iso2c) %>%
pivot_longer(
-entry_day,
names_to = "col",
values_to = "iso_a2",
values_ptypes = list(iso_a2 = character())) %>%
select(iso_a2) %>%
distinct() %>%
filter(!is.na(iso_a2))
# adep_countries <- country_pair %>%
# ungroup() %>%
# select(source_iso2c) %>%
# rename(iso_2c = source_iso2c)
# ades_countries <- country_pair %>%
# ungroup() %>%
# select(target_iso2c) %>%
# rename(iso_2c = target_iso2c)
# countries <- bind_rows(adep_countries, ades_countries) %>% distinct()
# country centroids from https://worldmap.harvard.edu/data/geonode:country_centroids_az8
filename <- "country_centroids_az8.csv"
file <- here::here("data-raw", filename)
fs::file_exists(file)
# avoid to interpret "NA" as NA: it is iso_a2 for Namibia
centroids <- read_csv(file, na = c("")) %>%
select(name, iso_a3, iso_a2, Longitude, Latitude) %>%
janitor::clean_names()
# # extra centroids
extra_centroids <- tribble(
~name, ~iso_a3, ~iso_a2, ~latitude, ~longitude,
# "Guadeloupe", NA, "GP",16.1488972,-61.957529,
# "Martinique", NA, "MQ", 14.6345865,-61.2939723,
"French Antilles", NA, "XF", 14.6345865,-61.2939723,
"Mayotte", NA, "YT", -12.843056, 45.138333,
"Netherland Antilles", NA, "AN", 14.6339221,-61.153894, # Martinique
"UNKNOWN", NA, "##", 0.0, 0.0
# "Barbados", "BRB", "BB", 13.17, -59.5525,
# "Uruguay", "URY", "UY", -32.6005596,-58.0283107,
# "Philippines", "PHL", "PH", 13, 122,
# "Saint Lucia", "LCA", "LC", 13.883333, -60.966667,
# "Guinea", "GIN", "GN", 11, -10,
# "Puerto Rico", "PRI", "PR",18.2, -66.5,
# "Rwanda", "RWA", "RW", -1.95, 29.866667,
)
centroids <- centroids %>%
bind_rows(extra_centroids)
d <- countries %>%
left_join(centroids, by = c("iso_a2" = "iso_a2")) %>%
mutate(
# Canary Islands
latitude = ifelse(iso_a2 == "ES-CN", 28.4398708, latitude),
longitude = ifelse(iso_a2 == "ES-CN", -16.9743268, longitude),
# France (avoid French Guinea effect)
latitude = ifelse(iso_a2 == "FR", 47.5, latitude),
longitude = ifelse(iso_a2 == "FR", 3.2, longitude),
# Norway: way too North
latitude = ifelse(iso_a2 == "NO", 60.980820, latitude),
longitude = ifelse(iso_a2 == "NO", 8.855597, longitude),
# Serbia and Montenegro
latitude = ifelse(iso_a2 == "CS", 44.2215031993, latitude),
longitude = ifelse(iso_a2 == "CS", 20.7895833363, longitude),
# Gibraltar
latitude = ifelse(iso_a2 == "GI", 36.131667, latitude),
longitude = ifelse(iso_a2 == "GI", -5.351667, longitude),
# Reunion
latitude = ifelse(iso_a2 == "RE", -21.114444, latitude),
longitude = ifelse(iso_a2 == "RE", 55.5325, longitude),
# Slovenia
latitude = ifelse(iso_a2 == "SI", 46.1155477207, latitude),
longitude = ifelse(iso_a2 == "SI", 14.8044423776, longitude),
# USA center it (Raleigh 35.843965,-78.7851414)
latitude = ifelse(iso_a2 == "US", 35.843965, latitude),
longitude = ifelse(iso_a2 == "US", -78.7851414, longitude),
# Russia (center at Nizhny Novgorod: 56.292129,43.6460081)
latitude = ifelse(iso_a2 == "RU", 56.292129, latitude),
longitude = ifelse(iso_a2 == "RU", 43.6460081, longitude),
NULL
)
# %>%
# # exclude BOGUS
# filter(!(iso_a2 %in% c("##", "AN", "XF", "YT")))
# arrange for flowmap.blue/in-browser
# locations
locations <- d %>%
mutate(id = row_number(), lat = latitude, lon = longitude) %>%
select(id, name, iso_a2, lat, lon) %>%
mutate(
name = ifelse(iso_a2 == "CS", "Serbia and Montenegro", name),
name = ifelse(iso_a2 == "GF", "French Guiana", name),
# fix French Guiana
lat = ifelse(iso_a2 == "GF", 4.100192, lat),
lon = ifelse(iso_a2 == "GF", -53.165735, lon),
# ---
name = ifelse(iso_a2 == "GI", "Gibraltar", name),
name = ifelse(iso_a2 == "RE", "Réunion", name),
# fix Svalbard et al.
name = ifelse(iso_a2 == "SJ", "Svalbard and Jan Mayen", name),
lat = ifelse(iso_a2 == "SJ", 75.5300298, lat),
lon = ifelse(iso_a2 == "SJ", 3.2220993, lon),
# ---
# fix
NULL
)
# flows
flows <- country_pair %>%
# select(source_iso2c, target_iso2c, count) %>%
left_join(locations, by = c("source_iso2c" = "iso_a2")) %>%
rename(origin = id) %>%
left_join(locations, by = c("target_iso2c" = "iso_a2")) %>%
rename(dest = id) %>%
select(origin, dest, count) %>%
# filter NAs
filter(!(is.na(origin) | is.na(dest)))
library(googlesheets4)
# # with dropdown menu for date
# sheet_id <- gs4_find("daily_country_flows")
#
# locations %>%
# select(-iso_a2) %>%
# sheet_write(sheet_id, sheet = "locations")
#
# threshold <- 1
#
# flows %>%
# # filter on relevant counts
# filter(count >= threshold) %>%
# mutate(sheet_name = entry_day %>% format("%Y-%m-%d")) %>%
# group_walk(~sheet_write(.x %>% select(-sheet_name), sheet_id, sheet = first(.$sheet_name)))
#
# # sheets
# flows %>%
# # filter on relevant counts
# filter(count >= threshold) %>%
# mutate(sheet_name = entry_day) %>%
# distinct(entry_day, sheet_name) %>%
# arrange(desc(entry_day)) %>%
# pull(sheet_name) %>% paste(collapse = ",") %>%
# as_tibble() %>%
# range_write(data = ., sheet_id, sheet = "properties", range = "B15:B15", col_names = FALSE)
#
########################################
#-----Single timeline
sheet_id <- gs4_find("daily_country_flows_timeline")
threshold <- 1
locations %>%
select(-iso_a2) %>%
sheet_write(sheet_id, sheet = "locations")
flows %>%
ungroup() %>%
# filter on relevant counts
filter(count >= threshold) %>%
rename(time = entry_day) %>%
select(origin, dest, count, time) %>%
arrange(time, origin, dest, count) %>%
# distinct(time, origin, dest) %>%
mutate(time = format(time, "%Y-%m-%d")) %>%
sheet_write(sheet_id, sheet = "flows")
my_properties <- c(
"title" = "Daily country flight flows",
"description" = "Daily flight flows from/to countries in the EUROCONTROL area",
"source.name" = "EUROCONTROL",
"source.url" = "https://eurocontrol.int",
"createdBy.name" = "Aviation Intelligence Unit",
"createdBy.email" = "PRU-Support@eurocontrol.int",
"createdBy.url" = "https://ansperformance.eu",
"mapbox.accessToken" = "pk.eyJ1IjoicGVyZm9ybWFuY2VyZXZpZXd1bml0IiwiYSI6ImNrZXlnNzhmYzA1aWIyc3Bybmo3eWpmb3AifQ.NQVbySxKjPGpF64_KamtPQ",
"mapbox.mapStyle" = NA,
"map.bbox" = NA, # west, south, east, north
"colors.scheme" = "Default",
"colors.darkMode" = "no",
"animate.flows" = "no",
"clustering" = "yes",
"flows.sheets" = "flows",
"msg.locationTooltip.incoming" = "Incoming flights",
"msg.locationTooltip.outgoing" = "Outgoing flights",
"msg.locationTooltip.internal" = "Internal flights",
"msg.flowTooltip.numOfTrips" = "Number of flights",
"msg.totalCount.allTrips" = "{0} flights",
"msg.totalCount.countOfTrips" = "{0} of {1} flights"
)
tibble(property = names(my_properties)) %>%
mutate(value = my_properties[property]) %>%
write_sheet(sheet_id,"properties")
# # upload flows.csv and locations.csv in a (secret or public) GitHub gist
# # then get the location of the files:
# flows_file <- "https://gist.githubusercontent.com/espinielli/8581282046fb0cf364a1874bd1429801/raw/8184275c555f6d789817f5a159208d1037c59855/flows.csv"
# locations_file <- "https://gist.githubusercontent.com/espinielli/8581282046fb0cf364a1874bd1429801/raw/695eb88e6884d9ab88b35933391681685f0eb6c3/locations.csv"
#
# # the URL where to see the flowmap.blue
# str_glue("http://flowmap.blue/from-url?flows={flows_file}&&locations={locations_file}")
|
492cc5cfbe632fd8438b8fc4e970f50f8bef9393
|
590142f535831def89b5b2d0f6ac1d47b8306850
|
/R/reduce.R
|
0de377662192575f88e825daf8883441024debc6
|
[] |
no_license
|
jfontestad/makeParallel
|
2b62704c9e26477bc89d505de313ea07aaebdcca
|
6e43f34f51a23692907ec1563d3d47a8e189d7bf
|
refs/heads/master
| 2023-01-13T03:27:16.260825
| 2020-11-17T16:41:04
| 2020-11-17T16:41:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
reduce.R
|
#' Construct ReduceFun Objects
#'
#' @export
reduceFun = function(reduce, summary = reduce, combine = "c", query = summary, predicate = function(...) TRUE)
{
if(!is.character(reduce))
stop("Expected the name of a reducible function for reduce argument.")
funClasses = sapply(list(summary, combine, query), class)
if(all(funClasses == "character")){
return(SimpleReduce(reduce = reduce, summary = summary
, combine = combine, query = query
, predicate = predicate))
}
UserDefinedReduce(reduce = reduce, summary = summary
, combine = combine, query = query
, predicate = predicate)
}
combine_two_tables = function(x, y)
{
# Assume not all values will appear in each table
levels = union(names(x), names(y))
out = rep(0L, length(levels))
out[levels %in% names(x)] = out[levels %in% names(x)] + x
out[levels %in% names(y)] = out[levels %in% names(y)] + y
names(out) = levels
as.table(out)
}
#' @export
combine_tables = function(...){
dots = list(...)
Reduce(combine_two_tables, dots, init = table(logical()))
}
|
b51923d54ede7c2b236f89d1594fcc9f3c8b5b7e
|
1fd23ecab6b0ddb920ac1bc160414c6263c228ff
|
/man/tidy_paf_tags.Rd
|
bef004daa9233cc1fc525da114346b61b87f9882
|
[
"MIT"
] |
permissive
|
pgpmartin/NanoBAC
|
c2c476c3f4555d19c9f6ecf0fcee4a3b2a6a96e6
|
822a57969c3876c17ea4fd6be9b4c752007f15d0
|
refs/heads/master
| 2023-01-30T15:33:38.718472
| 2020-12-09T16:12:26
| 2020-12-09T16:12:26
| 260,939,543
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 514
|
rd
|
tidy_paf_tags.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_paf.R
\name{tidy_paf_tags}
\alias{tidy_paf_tags}
\title{Tidy the flag columns imported from a paf file}
\usage{
tidy_paf_tags(Somedata)
}
\arguments{
\item{Somedata}{A \code{tibble} as imported during the [NanoBAC::read_paf()] function}
}
\value{
tibble
}
\description{
Tidy the flag columns imported from a paf file
}
\details{
This function is not intended to be used directly but is used in the [NanoBAC::read_paf()] function
}
|
42ac7810572910d8c4d1ca6f7c0ba07518af0674
|
922dbeda7fda82321819d68252c73356968b6ba4
|
/parm_cvs.R
|
e8e3942109ae22e05a374149c890bd0cbcc3a100
|
[
"Apache-2.0"
] |
permissive
|
megaa/TKDD2019
|
b22e6d362cdb0b524c5061de47383d3897523358
|
7d33d41b14d57597e6edcb258277dc854b3a5fb9
|
refs/heads/master
| 2020-08-24T04:07:16.652750
| 2019-12-11T03:17:15
| 2019-12-11T03:17:15
| 216,759,474
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,502
|
r
|
parm_cvs.R
|
setwd("~/TKDD2019")
source("globalSetting.R")
if (interactive()) {
stopifnot(FALSE)
} else {
ori_parm_start = 0
FPATH = ""
args <- commandArgs(trailingOnly = TRUE)
ARG1 <- as.character(args[1])
if (nchar(ARG1) >= 2) {
if (substr(ARG1, nchar(ARG1), nchar(ARG1)) == "/") {
ori_parm_start = 1
FPATH <- ARG1
}
}
ADID <- AD.parm$ADID[AD.parm$ADID == args[ori_parm_start + 1]]
BDGT_DIV <- as.integer(args[ori_parm_start + 2])
#name <- as.character(args[ori_parm_start + 3])
#parm1 <- if (!is.na(args[ori_parm_start + 4])) as.numeric(args[ori_parm_start + 4]) else NA
parm1 <- NA
#parm2 <- if (!is.na(args[ori_parm_start + 5])) as.numeric(args[ori_parm_start + 5]) else NA
#parm3 <- if (!is.na(args[ori_parm_start + 6])) as.numeric(args[ori_parm_start + 6]) else NA
}
if (!is.na(parm1)) {
switch(name,
"BF" = parm <- list(BF = c(parm1, parm2)),
"BF.C" = parm <- list(BF.C = as.integer(parm1)),
"LIFT" = parm <- list(LIFT = parm1),
"LAM" = parm <- list(LAM = c(parm1, parm2)),
"LAM.C" = parm <- list(LAM.C = parm1),
"C" = parm <- list(C = parm1),
"RES.P" = parm <- list(RES.P = list(total.cost = parm2, achieved.num.click = as.integer(parm1), budget.left = parm3)),
"RES.O" = parm <- list(RES.O = list(total.cost = parm2, achieved.num.click = as.integer(parm1), budget.left = parm3)))
update_benchmark_parm(ADID, BDGT_DIV, parm, name, FPATH)
}
show_benchmark_parm_CV(ADID, BDGT_DIV, FPATH)
|
7c38f27b07dc0f58a1213fe31afd93dcb040cc36
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sjmisc/examples/dicho.Rd.R
|
344a346cb0ae54e3d9c9946257a27e0701cdfc54
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,344
|
r
|
dicho.Rd.R
|
library(sjmisc)
### Name: dicho
### Title: Dichotomize variables
### Aliases: dicho dicho_if
### ** Examples
data(efc)
summary(efc$c12hour)
# split at median
table(dicho(efc$c12hour))
# split at mean
table(dicho(efc$c12hour, dich.by = "mean"))
# split between value lowest to 30, and above 30
table(dicho(efc$c12hour, dich.by = 30))
# sample data frame, values from 1-4
head(efc[, 6:10])
# dichtomized values (1 to 2 = 0, 3 to 4 = 1)
library(dplyr)
efc %>%
select(6:10) %>%
dicho(dich.by = 2) %>%
head()
# dichtomize several variables in a data frame
dicho(efc, c12hour, e17age, c160age, append = FALSE)
# dichotomize and set labels
frq(dicho(
efc, e42dep,
var.label = "Dependency (dichotomized)",
val.labels = c("lower", "higher"),
append = FALSE
))
# works also with gouped data frames
mtcars %>%
dicho(disp, append = FALSE) %>%
table()
mtcars %>%
group_by(cyl) %>%
dicho(disp, append = FALSE) %>%
table()
# dichotomizing grouped data frames leads to different
# results for a dichotomized variable, because the split
# value is different for each group.
# compare:
mtcars %>%
group_by(cyl) %>%
summarise(median = median(disp))
median(mtcars$disp)
# dichotomize only variables with more than 10 unique values
p <- function(x) dplyr::n_distinct(x) > 10
dicho_if(efc, predicate = p, append = FALSE)
|
2d33c4ff1773f19b60d080bdee2f1ba15eb7e1b2
|
5b6b52a0097af0a1c014d766a6edfe667604addf
|
/plot2.R
|
3bec122abb7b364acfcc75542d41334069752f6c
|
[] |
no_license
|
FelixDavidMejia/ExData_Plotting1
|
a2be5d7346031c1283ddfe3e23b5c5148cdb3acb
|
2004a103f7bdd17bda51bfc1d44ec6457cb31507
|
refs/heads/master
| 2021-01-16T22:35:46.824808
| 2015-09-12T06:15:56
| 2015-09-12T06:15:56
| 42,286,203
| 0
| 0
| null | 2015-09-11T03:47:33
| 2015-09-11T03:47:33
| null |
UTF-8
|
R
| false
| false
| 1,167
|
r
|
plot2.R
|
# Data Science Specialization
# Exploratory Data Analysis: Course Project #1
## Program: plot2.R
## Student: Félix D. Mejía
## Abstract
## This assignment uses data from "Individual household electric power consumption Data Set",
## which includes measurements of electric power consumption in one household with a one-minute
## sampling rate over a period of almost 4 years. Different electrical quantities and some
## sub-metering values are available.
## This program will generate file "plot2.png", a line graph of Global Active Power measured in KiloWatts thru time.
# Setting the working directory on my Data Directory
setwd("~/OneDrive/usr/docs/Data Science Specialization/04. Exploratory Data Analysis/prj/01/data")
library(sqldf)
hhpower <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date in ('1/2/2007', '2/2/2007')", sep = ";")
hhpower$datetime <- strptime(paste(hhpower$Date, hhpower$Time), "%e/%m/%Y %H:%M:%S")
png(file="~/GitHub/ExData_Plotting1/plot2.png",width=480,height=480)
plot(hhpower$datetime, hhpower$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
|
cf712146333617d2fcd20bbffd4b5b1747de1bff
|
30cad48b8b1798d8e9bdb7ecbd527f15a5c58ad7
|
/GeneSetOverRepresentation.R
|
6b050504465a9b2106ff4fecdb42263364d5f6fe
|
[] |
no_license
|
ZHANGQiaosheng/IaPathway
|
9009d385f386dbc9c96483351f54b546ea83e576
|
a2cdaa0ad16448794505a607afbafbf941af0cb4
|
refs/heads/master
| 2020-05-25T20:34:17.831082
| 2019-05-22T06:53:46
| 2019-05-22T06:53:46
| 187,978,931
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,626
|
r
|
GeneSetOverRepresentation.R
|
rm(list = ls())
gc()
source("https://bioconductor.org/biocLite.R")
biocLite("HTSanalyzeR")
library("HTSanalyzeR")
library("limma")
eSet <- read.delim("E:/PhD/Manuscript/DATA/BRCA/NEW/scale/brca.exp.txt", row.names=1, stringsAsFactors=FALSE)
normals <- c(rep("Nor", 37), rep("Tur", 33))
design <- model.matrix(~0 + factor(normals))
colnames(design) <- c("Nor","Tur")
contrast.matrix <- makeContrasts(Nor-Tur, levels=design)
##step1
fit <- lmFit(eSet, design)
##step2
fit2 <- contrasts.fit(fit, contrast.matrix) ##这一步很重要,大家可以自行看看效果
fit2 <- eBayes(fit2) ## default no trend !!!
##eBayes() with trend=TRUE
##step3
#tempOutput = topTable(fit2, coef=1, n=Inf)
tempOutput = topTable(fit2, coef=1, p.value=0.01, n=Inf)
PathwayList <- list()
for (i in 1:281) {
t1 <-
paste(
"E:/PhD/Manuscript/DATA/BRCA/Result/OldPathway/Original/pathwayinformation/pathwayNo/",
i,
sep = ''
)
path <-
read.table(
paste(t1, ".txt", sep = ''),
quote = "\"",
comment.char = "",
as.is = TRUE
)
PathwayList[[i]] <- path$V1
}
pathwayID <- read.delim("E:/PhD/Manuscript/DATA/BRCA/Result/OldPathway/pathwayinformation/pathwayID.txt", stringsAsFactors=FALSE)
# pathwaynames <- list()
# for (i in 1:281){
# pathwaynames[[i]]<-pathwayID$Symbel[i]
# }
names(PathwayList) <- pathwayID$Symbel
Result <- multiHyperGeoTest(PathwayList, row.names(eSet), row.names(tempOutput), minGeneSetSize = 1, pAdjustMethod = "BH", verbose = TRUE)
write.table(Result, "Result.txt", row.names = T, col.names = T, sep = "\t", quote = F)
|
0169a1b7543dbdfefe857ee0531206d6c2c57943
|
bf1f9fc104b167e7754dff893720d5723eec23fe
|
/R/''BubbleChart.R
|
eb90547aa0471741d55119a6ac98cd154c37b91a
|
[] |
no_license
|
aboyher/react-bubble-chart
|
b5a4754d2f9430b7c9c79ccec742a36d52963022
|
6b6172ea58fef0fc8e91138fdbd262ba7237abbb
|
refs/heads/main
| 2023-08-07T10:44:16.918287
| 2021-09-27T19:30:33
| 2021-09-27T19:30:33
| 404,752,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 873
|
r
|
''BubbleChart.R
|
# AUTO GENERATED FILE - DO NOT EDIT
''BubbleChart <- function(id=NULL, data=NULL, height=NULL, labelFont=NULL, overflow=NULL, padding=NULL, selectedNode=NULL, showLegend=NULL, valueFont=NULL, width=NULL) {
props <- list(id=id, data=data, height=height, labelFont=labelFont, overflow=overflow, padding=padding, selectedNode=selectedNode, showLegend=showLegend, valueFont=valueFont, width=width)
if (length(props) > 0) {
props <- props[!vapply(props, is.null, logical(1))]
}
component <- list(
props = props,
type = 'BubbleChart',
namespace = 'dash_react_bubble_chart',
propNames = c('id', 'data', 'height', 'labelFont', 'overflow', 'padding', 'selectedNode', 'showLegend', 'valueFont', 'width'),
package = 'dashReactBubbleChart'
)
structure(component, class = c('dash_component', 'list'))
}
|
3f8ad305f25b87d799326fa8d47eb08faeced28b
|
3f0d799a02e246022d18f93b1a0c6391f24a0743
|
/man/build_geography_minCompAreas1970to2010_1970.Rd
|
34a857a9bd69e42741c7fb92ddd45c72426eebf8
|
[] |
no_license
|
claudiacerqn/harmonizeIBGE
|
3b28eb7cab87ec4a9d56c741438ccddafe6917be
|
da81837a052cb64d15b21e23c33630fc577aac40
|
refs/heads/master
| 2020-04-03T00:50:38.767549
| 2018-08-28T20:41:39
| 2018-08-28T20:41:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 460
|
rd
|
build_geography_minCompAreas1970to2010_1970.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_geography_minCompAreas1970to2010_1970.R
\name{build_geography_minCompAreas1970to2010_1970}
\alias{build_geography_minCompAreas1970to2010_1970}
\title{Builds a synthetic variable for education attainment - 2010}
\usage{
build_geography_minCompAreas1970to2010_1970(CensusData, state_var_name = "uf")
}
\description{
Builds a synthetic variable for education attainment - 2010
}
|
3b7aaeb7fecafce792e063513804020dc74ee4ed
|
dc8f251e6f270bf1aba1220b773cf65101962a30
|
/man/dt_dates.Rd
|
69699fe88e49f878ee9abc966cf2da2af65fa51c
|
[] |
no_license
|
jienagu/dtverse
|
2e3c8926947b17675e9fd6cb534123f53157b4d9
|
eda8c8b9d1c0d2bd3d1e5420ef8b6f809368701e
|
refs/heads/master
| 2022-11-28T15:29:14.009154
| 2020-08-14T16:08:04
| 2020-08-14T16:08:04
| 287,570,036
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 358
|
rd
|
dt_dates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-dt_dates.R
\docType{data}
\name{dt_dates}
\alias{dt_dates}
\title{Anonymized sample data}
\format{a data table with dates}
\usage{
data(dt_dates)
}
\description{
Anonymized sample data
}
\examples{
data(dt_dates)
}
\author{
Jiena Gu McLellan, 2020-05-26
}
\keyword{datasets}
|
6d4d8c20c4f00dabbe2a41b3e001fd27ce9d04b1
|
a22b29926712651fe32cb6d5731abca8db998f9d
|
/lecture-09/scratch.R
|
0a237edfa9016f9efc0b8d80905b5b4903b2b4bf
|
[] |
no_license
|
pyiguan/datasci611
|
349acb1f0dfdf7d013d1d5613d9c6508b63e8a45
|
e98acc982ff1f869613e18a4ed55eaedd226e30f
|
refs/heads/master
| 2022-12-31T03:45:10.087863
| 2020-10-27T19:25:41
| 2020-10-27T19:25:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
scratch.R
|
library(tidyverse);
source("utils.R");
d <- read_csv("./source_data/datasets_38396_60978_charcters_stats.csv") %>%
tidy_up_names() %>%
mutate(neutral=alignment == "neutral") %>%
drop_na();
d_neutral <- d %>% filter(neutral);
d_nn <- d %>% filter(!neutral);
fd <- rbind(tibble(neutral=rep(TRUE, 500),
intelligence = sample(d_neutral$intelligence, 500, replace=TRUE),
strength = sample(d_neutral$strength, 500, replace=TRUE),
speed = sample(d_neutral$speed, 500, replace=TRUE),
durability = sample(d_neutral$durability, 500, replace=TRUE),
power = sample(d_neutral$power, 500, replace=TRUE),
combat = sample(d_neutral$combat, 500, replace=TRUE)),
tibble(neutral=rep(FALSE, 1500),
intelligence = sample(d_nn$intelligence, 1500, replace=TRUE),
strength = sample(d_nn$strength, 1500, replace=TRUE),
speed = sample(d_nn$speed, 1500, replace=TRUE),
durability = sample(d_nn$durability, 1500, replace=TRUE),
power = sample(d_nn$power, 1500, replace=TRUE),
combat = sample(d_nn$combat, 1500, replace=TRUE)));
write.csv(fd, "source_data/faux-powers.csv");
|
78c7af3834ca62a5a1bb1a65f331cfcd180a8126
|
0686d74b327e626362239710f30bef2644c83cfc
|
/round2_metareview/spatial_EDA/temporal_EDA_with_spatial_scale.R
|
695dee1ac710e639a220893096fdc64bf7ecb498
|
[] |
no_license
|
caitlintwhite/kremeny_analyses
|
1f5bf691c2b2811941f06ca0ebd55e76132bd695
|
0ed7b19b7caffe8c80528767d07170f502f314cd
|
refs/heads/master
| 2022-07-03T17:08:57.467914
| 2022-06-27T16:15:40
| 2022-06-27T16:15:40
| 229,632,534
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,246
|
r
|
temporal_EDA_with_spatial_scale.R
|
library(tidyverse)
library(reshape2)
library(plotly)
data_path = "round2_metareview/data/cleaned/prelim_singlereview.csv"
df = read.csv(data_path)
non_excl_titles = df %>%
filter(qnum =='Q3', !answer=='Yes') %>%
select(Title)
spat = df %>%
filter(Title %in% non_excl_titles$Title) %>%
filter(qnum %in% c("Q8","Q9","Q10","Q11"))
smalls = c('25m','50m','100m','500m', '1km')
mediums = c('10km', '100km', '1000km')
larges = c('100Mgm','100Gm','101Gm')
time_trends_titles = df %>%
filter(qnum == "Q7", abbr=='TimeTrends', answer=="Yes (e.g., a site sampled multiple points in time, a model with dynamics)") %>%
pull(Title)
numyears_df = df %>%
filter(qnum == "Q7", abbr == 'YrsData', Title %in% time_trends_titles) %>%
select(Title, answer) %>%
rename(num_years = answer)
methods_used_df = df %>%
filter(qnum=="Q6", !abbr %in% c('GenInfo','MethodsNotes')) %>%
separate_rows(answer, sep = ",") %>%
mutate(answer = ifelse(answer=="Observational (Includes data observed in situ OR via remote sensing", "Observational", as.character(answer))) %>%
mutate(answer = ifelse(answer==" if used directly)", NA, as.character(answer))) %>%
group_by(Title) %>%
summarise(methods_used = paste0(unique(answer), collapse = ","))
spat_temp_methods_df = spat %>%
filter(qnum == 'Q9', !is.na(answer), !answer==0, !is.na(Group)) %>%
group_by(Title) %>%
summarise(scales = paste0(unique(Group), collapse = ',')) %>%
separate_rows(scales, sep = ",") %>%
mutate(scales_binned = case_when(
scales %in% smalls ~ 'Small',
scales %in% mediums ~ 'Medium',
scales %in% larges ~ 'Large',
scales == 'unk' ~ 'unk'
)) %>%
group_by(Title) %>%
summarise(cross_grps = paste0(unique(scales_binned), collapse = ',')) %>%
ungroup() %>%
mutate(cross_grps = gsub("unk,","",cross_grps)) %>%
mutate(cross_grps = gsub(",unk","",cross_grps)) %>%
left_join(numyears_df, by = 'Title') %>%
left_join(methods_used_df, by = 'Title') %>%
rename(spat_scale = cross_grps) %>%
mutate(methods_used = gsub(",NA","",methods_used)) %>%
mutate(spat_scale = gsub("unk", NA, spat_scale)) %>%
mutate(num_years = factor(num_years, levels = c('1 year or less','2–5 years','6–10 years','10+ years'))) %>%
mutate(spat_scale = factor(spat_scale, levels = c("Small", "Small,Medium","Medium","Medium,Large","Large","Small,Large","Small,Medium,Large","unk")))
# for each temp scale, what is spatial scale?
spat_temp_methods_df %>%
group_by(methods_used,num_years,spat_scale) %>%
summarise(count = n()) %>%
ggplot(aes(x = num_years, y = count)) +
geom_bar(position = 'fill', stat='identity', aes(fill = spat_scale)) +
geom_label(data = spat_temp_methods_df %>% group_by(num_years) %>% summarise(count = n()),
aes(label = count, y = 0)) +
theme_bw() +
theme(axis.text.x = element_text(angle=90,hjust=1)) +
ylab('Proportion of studies')
#long term studies have higher proporiton of larger scale studies, fewer small scale studies, more unk
# for each spatial scale, what is temp scale?
spat_temp_methods_df %>%
group_by(methods_used,num_years,spat_scale) %>%
summarise(count = n()) %>%
ggplot(aes(x = spat_scale, y = count)) +
geom_bar(position = 'fill', stat='identity', aes(fill = num_years)) +
geom_label(data = spat_temp_methods_df %>% group_by(spat_scale) %>% summarise(count = n()),
aes(label = count, y = 0)) +
theme_bw() +
theme(axis.text.x = element_text(angle=90,hjust=1)) +
ylab('Proportion of studies')
#long term studies have higher proporiton of larger scale studies, fewer small scale studies, more unk
# for each type of study (methods), what is temp scale?
spat_temp_methods_df %>%
group_by(methods_used,num_years,spat_scale) %>%
summarise(count = n()) %>%
ggplot(aes(x = methods_used, y = count)) +
geom_bar(position = 'fill', stat='identity', aes(fill = num_years)) +
geom_label(data = spat_temp_methods_df %>% group_by(methods_used) %>% summarise(count = n()),
aes(label = count, y = 0)) +
theme_bw() +
theme(axis.text.x = element_text(angle=90,hjust=1)) +
ylab('Proportion of studies')
#position="fill", stat="identity"
# might be nice to look at relative counts between scales, types
|
b2d9a8e807922d0d246e97e988143e5bfa18d0f3
|
125e43f58d98268b67421a3ace1bf10308792cf2
|
/bayes_net_test.R
|
253a1df5b9878d71e7c9df1e6064a4f514a6fea1
|
[] |
no_license
|
rbstern/bayesNet
|
58184b6ad91c7405860d51704ba3165f04fc6257
|
2ac4adae5b2782870ef6a1435099bfa3126abe2d
|
refs/heads/master
| 2018-11-11T18:51:10.572830
| 2018-08-22T21:34:03
| 2018-08-22T21:34:03
| 118,653,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,979
|
r
|
bayes_net_test.R
|
source("bayes_net.R")
############################################
## Example 1: Bayes net with no NA's, ##
## 2 labels and no covariates. ##
############################################
nn = 10^4
label1 = c(rep("a", nn/2), rep("b", nn/2))
label2 = c(rbinom(nn/2, 1, 0.2), rbinom(nn/2, 1, 0.9))
this_data = tibble::tibble(label1 = label1,
label2 = label2)
this_data_ref = Data_Ref$new(this_data)
#this_data_ref$get_df()
label1_node = Cat_Node$new(this_data_ref, "label1", NULL)
#label1_node$get_labels()
#label1_node$do_update_params()
#label1_node$get_param()
label2_node = Cat_Node$new(this_data_ref, "label2", c(label1_node))
#label2_node$get_param()
#label2_node$get_labels()
#label2_node$do_update_params()
#label2_node$get_param()
############################################
## Example 2: Bayes net with NA's, ##
## 2 labels and no covariates. ##
############################################
nn = 10^4
label1 = c(rep("a", nn/2), rep("b", nn/2))
label2 = c(rbinom(nn/2, 1, 0.2), rbinom(nn/2, 1, 0.9))
label1 = ifelse(rbinom(nn,1,0.1), NA, label1)
label2 = ifelse(rbinom(nn,1,0.1), NA, label2)
this_data = tibble::tibble(label1 = label1,
label2 = label2)
this_data_ref = Data_Ref$new(this_data)
#this_data_ref$get_df()
#this_data_ref$get_data_na()
label1_node = Cat_Node$new(this_data_ref, "label1", NULL)
#label1_node$get_labels()
#label1_node$do_update_params()
#label1_node$get_param()
label2_node = Cat_Node$new(this_data_ref, "label2", c(label1_node))
#label2_node$get_param()
#label2_node$get_labels()
#label2_node$do_update_params()
#label2_node$get_param()
#Gibbs
nodes = c(label1_node, label2_node)
for(ii in 1:10) {
random_node = sample(nodes, 1)
random_node$do_update_params()
}
############################################
## Example 3: Bayes net with NA's, ##
## 2 labels and 1 covariate. ##
############################################
nn = 10^4
label1 = c(rep("a", nn/2), rep("b", nn/2))
label2 = c(rbinom(nn/2, 1, 0.2), rbinom(nn/2, 1, 0.9))
label1 = ifelse(rbinom(nn, 1, 0.1), NA, label1)
label2 = ifelse(rbinom(nn, 1, 0.1), NA, label2)
covariate1 = c(rnorm(nn/2, 0, 1), rnorm(nn/2, 0, 2))
covariate1 = ifelse(rbinom(nn, 1, 0.1), NA, covariate1)
this_data = tibble::tibble(label1 = label1,
label2 = label2,
covariate1 = covariate1)
this_data_ref = Data_Ref$new(this_data)
label1_node = Cat_Node$new(this_data_ref, "label1", NULL)
label2_node = Cat_Node$new(this_data_ref, "label2", c(label1_node))
cov1_node = Double_Node$new(this_data_ref,
"covariate1",
c(label1_node, label2_node))
#cov1_node$get_node_name()
#cov1_node$get_param()
cov1_node$do_update_params()
cov1_node$get_param()
nodes = c(label1_node, label2_node, cov1_node)
for(ii in 1:10) {
random_node = sample(nodes, 1)[[1]]
random_node$do_update_params()
}
|
cf0dd7e6d3ed8d0ef5ddac4bc79c2380d53216cb
|
d1aca01f1aa7e658d6f4d5586f9a22bf5572cb15
|
/man/image_modify_hsv.Rd
|
fb261bd25cc6a005d498174c8d88c0a30503f440
|
[] |
no_license
|
cran/plothelper
|
cc65cf63b02c1b27e2c483f60cb223f078f131e5
|
c073fd0b0351023e8906ad1f15f5b130f0aadfff
|
refs/heads/master
| 2020-12-22T02:12:30.035018
| 2020-05-08T07:40:10
| 2020-05-08T07:40:10
| 236,640,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,777
|
rd
|
image_modify_hsv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_modify_hsv.R
\name{image_modify_hsv}
\alias{image_modify_hsv}
\title{Modify the H, S, V Values of a
Color Vector or an Image}
\usage{
image_modify_hsv(
x,
set_h = NULL,
add_h = NULL,
mult_h = NULL,
rescale_h = NULL,
fun_h = NULL,
set_s = NULL,
add_s = NULL,
mult_s = NULL,
rescale_s = NULL,
fun_s = NULL,
set_v = NULL,
add_v = NULL,
mult_v = NULL,
rescale_v = NULL,
fun_v = NULL,
result = "magick",
res = 144
)
}
\arguments{
\item{x}{an image created by \code{image_read} or
other functions in package magick. Alternatively, it
can be a vector of colors.}
\item{set_h}{set H values with specific values.}
\item{add_h}{add specific
values to current H values.}
\item{mult_h}{multiply the current values
with specific values.}
\item{rescale_h}{a length 2 numeric vector
specifying the desired range of H values,
e. g., \code{rescale_h = c(0.6, 0.95)} which
will make the smallest original value to
be 0.6, and the largest, 0.95. Alternatively,
it can be your own scaling function.}
\item{fun_h}{your own modifying function
(e. g., \code{fun_h = sqrt}). Alternatively, it can
be a list that designates how to use internal
curves. See Details.}
\item{set_s, add_s, mult_s, rescale_s, fun_s}{parameters
to change S values. Used in the same way as those
for H. See above.}
\item{set_v, add_v, mult_v, rescale_v, fun_v}{parameters
to change V values. Used in the same way as those
for H. See above.}
\item{result}{the default is "magick", the output is
a magick picture. When it is "raster", a matrix is created
which can be use as a raster
for \code{ggplot2::annotation_raster}.}
\item{res}{when the result is a magick picture, the
\code{res} parameter used by \code{magick::image_graph}.
Default is 144.}
}
\description{
The function modifies the H (0 - 1), S, V values
of a vector of colors or an image. The
three channels can be modified separately.
However, the most frequently used is
only the V modification.
The ways to modify include: setting values
to some specified values (set_*), adding (add_*),
multiplying the original values (mult_*),
rescaling the original values (rescale_*),
using a function to recompute values (fun_*).
The most useful way is to use some internal
curves that mimic those PS-like apps.
DO see Details.
}
\details{
\code{fun_*} can be a function or
a named list which tells the
function which internal function is to be used.
You must ensure values used by the function
specified by you to be in the range [0, 1] for
H, S, V modification and [0, 255] for R, G, B
modification. Also, you'd better make sure
the output values of the function are in
When \code{fun_*} is a list, it should be written in the
following way:
\itemize{
\item (1) \code{fun_* = list(fun = "s", c1 = -2,
c2 = 2, domain = c(0, 1))} An "s" curve will be
used. c1 points out how to deal with values below
0.5, c2 with values above 0.5. For c1 and c2, a value
larger than 0 means a curvature towards y = 1, and
a value smaller than 0 means a curvature towards
y = 0. So, c1 < 0 and c2 > 0 will make an s shape
curve. c1 and c2 can be any number, though
those with absolute values below 4 are quite
good (default is -2 and 2). 0 means no change.
domain specifies the
value domain to put the result. The default is
c(0, 1) which means not to rescale, thus
0.1 is 0.1. However, if you
set \code{domain = c(0.5, 1)},
then 0.1 will be 0.55. If you do not know how
to set domain, just ignore it.
\item (2) \code{fun_* = list(fun = "circle",
value = 0.5)} When the fun
is "circle" or "c", an arc will be used. value must
be a number between -1 and 1 (default is 0.5).
A number larger than 0 means the curvature is
towards y = 1, and a number smaller than 0 means
it is towards y = 0. value should not be 0.
\item (3) \code{list(fun_* = "linear", x0 = 0.4,
y0 = 0.6)} This makes a linear modification except
that there is a breakpoint. The default point is
(0.4, 0.6) which means: suppose all the original numbers
and output numbers are in the [0, 1] range and
the points with their x position smaller than 0.4 will
be put along the line that links (0, 0) and (0.4, 0.6),
and, those with x position larger than 0.4 will be put
along the line that links (0.4, 0.6) and (1, 1).
}
}
\examples{
\donttest{
# First create an image
library(magick)
mycolor=grDevices::hsv(0, s=seq(0.1, 0.9, 0.1),
v=seq(0.1, 0.9, 0.1))
img=image_graph(width=400, height=400)
print(showcolor(mycolor)+theme_void())
dev.off()
# Now increase S values with
# an internal circle curve and
# set V values between [0.5, 1].
res=image_modify_hsv(img,
fun_s=list("circle", value=1),
rescale_v=c(0.5, 1))
}
}
|
8b7202a20a4627334d62abf0c652fa16005babd7
|
98262aae95f8b1966972b1521411d44936fcc09d
|
/plots_code.R
|
286354048b37a7d7d62cdfecef5c37433c65267d
|
[] |
no_license
|
pmatarrese/twitter_bot_detection
|
f1bb53fadacf89ac9f5da9fd7eb96546bceef89c
|
106e99b1920b5b00d6867ce83b1761000252e59c
|
refs/heads/master
| 2023-06-01T04:34:50.633902
| 2021-06-04T03:56:18
| 2021-06-04T03:56:18
| 368,020,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,448
|
r
|
plots_code.R
|
# Show unlabeled results with only "verified" users
results_tbl %>%
filter(human_bot == 0) %>%
kable(caption = "The three classifiers at least predicted the known humans (verified accounts) fairly accurately.") %>%
kable_styling() %>%
save_kable("unlabeled_results_humans.png")
# Show account age difference in labeled set
tweets %>%
ggplot(aes(x = account_age, fill = human_bot)) +
geom_histogram() +
facet_wrap(~ human_bot, scales = "free_y") +
scale_fill_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Account Ages (in days) of Training Dataset",
x = "Account Age",
y = "Count")
ggsave(filename = "plot_labeled_account_ages.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
# Fav vs Status count in training data
tweets %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = human_bot)) +
geom_point() +
facet_wrap(~ human_bot) +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Training Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_labeled_fav_vs_status.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
tweets %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = human_bot)) +
geom_point() +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Training Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_labeled_fav_vs_status2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
# Fav/Status Ratio vs Ave Statuses per Day in training data
tweets %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = human_bot)) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
facet_wrap(~ human_bot) +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Training\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_labeled_ratio_vs_status_per_day.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
tweets %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = human_bot)) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Training\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_labeled_ratio_vs_status_per_day2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
# Unlabeled
# Log reg
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = logistic_reg)) +
geom_point() +
facet_wrap(~ logistic_reg) +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Unlabeled Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_unlabeled_log_fav_vs_status.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = logistic_reg)) +
geom_point() +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Unlabeled Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_unlabeled_log_fav_vs_status2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = logistic_reg)) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
facet_wrap(~ logistic_reg) +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Unlabeled\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_unlabeled_log_ratio_vs_status_per_day.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = logistic_reg)) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Unlabeled\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_unlabeled_log_ratio_vs_status_per_day2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
# KNN
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = as.factor(knn_results))) +
geom_point() +
facet_wrap(~ as.factor(knn_results)) +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Unlabeled Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_unlabeled_knn_fav_vs_status.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = as.factor(knn_results))) +
geom_point() +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Unlabeled Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_unlabeled_knn_fav_vs_status2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = as.factor(knn_results))) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
facet_wrap(~ as.factor(knn_results)) +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Unlabeled\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_unlabeled_knn_ratio_vs_status_per_day.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = as.factor(knn_results))) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Unlabeled\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_unlabeled_knn_ratio_vs_status_per_day2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
# Class Tree
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = as.factor(class_tree_results))) +
geom_point() +
facet_wrap(~ as.factor(class_tree_results)) +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Unlabeled Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_unlabeled_tree_fav_vs_status.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(x = favourites_count, y = statuses_count, color = as.factor(class_tree_results))) +
geom_point() +
scale_y_log10() +
scale_x_log10() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
labs(title = "Favorite count vs Status Count in Unlabeled Data",
x = "Favorites count (log scale)",
y = "Status count (log scale)")
ggsave(filename = "plot_unlabeled_tree_fav_vs_status2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = as.factor(class_tree_results))) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
facet_wrap(~ as.factor(class_tree_results)) +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Unlabeled\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_unlabeled_tree_ratio_vs_status_per_day.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
unlabeled_tweets %>%
left_join(results_tbl) %>%
ggplot(aes(y = ave_status_per_day, x = fav_status_ratio, color = as.factor(class_tree_results))) +
geom_point() +
scale_color_manual(labels = c("Human", "Bot"),
values = c("#1DA1F2", "red"),
name = "Human or Bot?") +
scale_x_log10() +
labs(title = "Favorite/Status ratio vs Ave. Statuses per Day in Unlabeled\nData",
x = "Favorite/Status Ratio (Log scale)",
y = "Ave. Status per Day")
ggsave(filename = "plot_unlabeled_tree_ratio_vs_status_per_day2.png",
plot = last_plot(),
device = "png",
path = "plots/",
width = 8,
height = 6,
units = "in",
dpi = 320)
|
62ee409429b2805eddf232d741e4b850d771926d
|
36445f7373f3ff510d01f577b9d1ed7ad76b365c
|
/Experiment II/Engagement/engagement_script.R
|
888fb954f8adb244cd96800751b0c5eb1d44247c
|
[] |
no_license
|
datastatistics/VR2018
|
710e9248349c71152951e763ce27989645395b7d
|
a6d030b103e8e39c000997e31a7999aae2b67bbe
|
refs/heads/master
| 2021-06-24T23:51:04.101515
| 2017-09-11T16:10:54
| 2017-09-11T16:10:54
| 103,141,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,829
|
r
|
engagement_script.R
|
source("Common_files/load_packages.R")
source("Common_files/statistics_analysis.R")
source("Common_files/utils.R")
eng_data.df <- read.csv("Experiment II/Engagement/Data/engagement_data.csv", sep = ";")
eng_data.df <- melt(eng_data.df, id.vars = c("user", "setup"))
melted.df <- ddply(eng_data.df, .(user, setup), summarise, value = mean(value))
engagement <- data.frame(c("int_cross", "int_hand", "loc_joy", "loc_wip"),
matrix(nrow = 4, ncol = 2))
colnames(engagement) <- c("setup", "value", "star")
shapiro.test(melted.df$value)
setup.df <- eng_data.df[which(eng_data.df$setup == "1" | eng_data.df$setup == "2"),]
engagement[1,2] <- parametric_independent_analysis(setup.df$value, setup.df$setup,
NULL, 2)
engagement[1,3] <- significance_level_star(engagement[1,2])
setup.df <- eng_data.df[which(eng_data.df$setup == "3" | eng_data.df$setup == "4"),]
engagement[2,2] <- parametric_independent_analysis(setup.df$value, setup.df$setup,
NULL, 2)
engagement[2,3] <- significance_level_star(engagement[2,2])
setup.df <- eng_data.df[which(eng_data.df$setup == "1" | eng_data.df$setup == "3"),]
engagement[3,2] <- parametric_independent_analysis(setup.df$value, setup.df$setup,
NULL, 2)
engagement[3,3] <- significance_level_star(engagement[3,2])
setup.df <- eng_data.df[which(eng_data.df$setup == "2" | eng_data.df$setup == "4"),]
engagement[4,2] <- parametric_independent_analysis(setup.df$value, setup.df$setup,
NULL, 2)
engagement[4,3] <- significance_level_star(engagement[4,2])
#######################################################################################
melted.df <- ddply(melted.df, .(setup), summarise, sd = sd(value),
se = sd/sqrt(length(value)), value = mean(value))
melted.df[melted.df == "1"] = "Joystick"
melted.df[melted.df == "2"] = "WIP"
melted.df[melted.df == "3"] = "Joystick"
melted.df[melted.df == "4"] = "WIP"
melted.df$int <- c("Crosshair", "Crosshair", "Pointing Finger", "Pointing Finger")
ggplot(melted.df, aes(x = setup, y = value)) +
geom_point() +
geom_errorbar(aes(ymin=value-se, ymax=value+se),
stat = "identity", width = .1, position=position_dodge(width=0.9)) +
geom_line(aes(group = int, color = factor(int)), size = 1) +
scale_color_hue(guide = guide_legend(title = "Interaction"),
labels = c("Head orientation", "Ray-casting")) +
theme(legend.position = "top") + g_theme() +
scale_x_discrete(labels = c("Joystick", "WIP")) +
labs(y = "Score", x = "Locomotion", title = "Game Engagement") +
ggsave("Experiment II/Engagement/Charts/engagement.eps", width = 6, height = 5)
rm(melted.df, eng_data.df, setup.df)
|
5608e865b3f82b8c5fba4029c445b246334432cf
|
0603cd2c6798c62b21769e970d02951b15b6628b
|
/run_analysis.R
|
ee90e1753f198d1f80f04be7bbeea0732ae85885
|
[] |
no_license
|
vonphoebe/Getting-and-Cleaning-Data-Course-Project
|
20c5e6564f2e6d236df9f76763373848a98225c6
|
11634b436033a9fd522a542e6d03ef5c41cbefd4
|
refs/heads/master
| 2021-01-20T15:36:51.942890
| 2016-07-24T06:11:54
| 2016-07-24T06:11:54
| 64,040,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,922
|
r
|
run_analysis.R
|
## Create one R script called run_analysis.R that does the following
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(data.table)
library(plyr)
#load all the data
featureNames <- read.table("./features.txt")
activity_labels <-read.table("./activity_labels.txt", header = FALSE)[,2]
subjectTest <- read.table("./test/subject_test.txt", header = FALSE)
activityTest <- read.table("./test/y_test.txt", header = FALSE)
featuresTest <- read.table("./test/X_test.txt", header = FALSE)
subjectTrain <- read.table("./train/subject_train.txt", header = FALSE)
activityTrain <- read.table("./train/y_train.txt", header = FALSE)
featuresTrain <- read.table("./train/X_train.txt", header = FALSE)
#process test data sets, subjectTest, activityTest, featuresTest
table(subjectTest$V1) ## 9 subjects in subjectTest
names(subjectTest) <- "Subject"
names(activityTest) <- "Activity"
##3. Use descriptive activity names to name the activities in the activityTest data set
activityTest[,2] <- activity_labels[activityTest[,1]] ## activityTest has 2 variables now
##label columns of featuresTest with featureNames
names(featuresTest) <- t(featureNames[2])
TestData <- cbind(activityTest, subjectTest, featuresTest)
#process train data sets , subjectTrain, activityTrain, featuresTrain
table(subjectTrain$V1) ## 21 subjects in subjectTrain
names(subjectTrain) <- "Subject"
names(activityTrain) <- "Activity"
##3. Use descriptive activity names to name the activities in the activityTrain data set
activityTrain[,2] <- activity_labels[activityTrain[,1]] ## activityTrain has 2 variables now
##label columns of featureTrain with featureNames
names(featuresTrain) <- t(featureNames[2])
TrainData <- cbind(activityTrain, subjectTrain, featuresTrain)
##1. Merge the traing and the test sets to create one data set
CompleteData <- rbind(TestData, TrainData)
##rename column V2 to Activity_label
setnames(CompleteData,"V2","Activity_label")
##2. Extract only the measurements on the mean and SD for each measurement.
columnsWithMeanSTD <- grep("mean|std", names(CompleteData), ignore.case=TRUE) ##86
requiredColumns <- c(1, 2, 3, columnsWithMeanSTD) #89
dim(CompleteData)
## [1] 10299 564
meanstd <- CompleteData[,requiredColumns]
dim(meanstd)
##[1] 10299 89
##4. Appropriately labels the data set with descriptive variable names
names(meanstd)
names(meanstd)<-gsub("Acc", "Accelerometer", names(meanstd))
names(meanstd)<-gsub("Gyro", "Gyroscope", names(meanstd))
names(meanstd)<-gsub("BodyBody", "Body", names(meanstd))
names(meanstd)<-gsub("Mag", "Magnitude", names(meanstd))
names(meanstd)<-gsub("^t", "Time", names(meanstd))
names(meanstd)<-gsub("^f", "Frequency", names(meanstd))
names(meanstd)<-gsub("tBody", "TimeBody", names(meanstd))
names(meanstd)<-gsub("angle", "Angle", names(meanstd))
names(meanstd)<-gsub("-mean()", "Mean", names(meanstd), ignore.case = TRUE)
names(meanstd)<-gsub("-std()", "STD", names(meanstd), ignore.case = TRUE)
names(meanstd)
##From the data set in step 4, created a second, independent tidy data set with the average
##of each variable for each activity and each subject
tidyData <- aggregate(. ~Subject + Activity, meanstd, mean)##180 obs, 89 var
tidyData <- tidyData[order(tidyData$Subject,tidyData$Activity),]
##3. Use descriptive activity names to name the activities in the data set again
tidyData[,3] <- activity_labels[tidyData[,2]]
write.table(tidyData, file = "Tidy.txt", row.names = FALSE)
|
c846c52badc901a96e50149e218cd3ae62aac7ac
|
3e30053945a0b0012901e4b33fd95c39bd8f154b
|
/DAISIE/man/get_ana_rate.Rd
|
f2b5eb0bae9472752f2a240704acc091b6edb9c0
|
[] |
no_license
|
richelbilderbeek/DAISIE
|
e0442c1dcf278642ee1c4a57bb817f1e27ec7e5a
|
f476f5bcda00909e86beb73869ca23a6fdeaf52f
|
refs/heads/master
| 2020-03-19T11:10:12.881461
| 2018-12-12T17:40:36
| 2018-12-12T17:40:36
| 136,436,234
| 0
| 1
| null | 2018-09-19T09:02:56
| 2018-06-07T07:05:37
|
R
|
UTF-8
|
R
| false
| true
| 743
|
rd
|
get_ana_rate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DAISIE_rates.R
\name{get_ana_rate}
\alias{get_ana_rate}
\title{Calculate anagenesis rate}
\usage{
get_ana_rate(laa, island_spec)
}
\arguments{
\item{laa}{per capita anagenesis rate}
\item{island_spec}{matrix with current state of system}
}
\description{
Internal function.
Calculates the anagenesis rate given the current number of
immigrant species and the per capita rate.
}
\seealso{
Does the same as \link{DAISIE_calc_clade_ana_rate}
Other rates calculation: \code{\link{DAISIE_plot_area}},
\code{\link{get_ext_rate}}, \code{\link{get_immig_rate}},
\code{\link{get_thor}}, \code{\link{island_area}},
\code{\link{update_rates}}
}
\author{
Pedro Neves
}
|
ebf03ebc3c10b33dc2c9d4773785321e3da7471b
|
538a7e7069ea7efa2386b552cdca126f230a38f4
|
/R/ranking.R
|
931f7b60ddbf0dffe7d768a1efb5690d91be671b
|
[] |
no_license
|
jakobbossek/evoprob
|
b803729f358e509b0f1d33fa0ffeb67e3ce5feaa
|
d925adb303fe603603625a7affec6c61e635b9d8
|
refs/heads/main
| 2023-05-31T19:35:22.322363
| 2021-06-29T19:12:58
| 2021-06-29T19:12:58
| 351,442,361
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,260
|
r
|
ranking.R
|
#' @title Get algorithm ranking.
#'
#' @description Given a named vector of performance values the function returns
#' the names in sorted order of performance.
#'
#' @param x [\code{numeric} | \code{list of numeric vectors}]\cr
#' Named vector of at least two performance values or list of such.
#' @param maximize [\code{logical(1)}]\cr
#' Is the goal to maximize performance values?
#' Defaults to \code{FALSE}.
#' @param as.string [\code{logical(1L)}]\cr
#' Convert to string representation, i.e. \code{c("A", "B", "C")} to
#' \code{"A---B---C"}.
#' @param sep [\code{character(1)}]\cr
#' Separator used to \dQuote{glue together} if \code{as.string = TRUE}.
#' Default is \dQuote{---}.
#' @return [\code{character}] \code{names(x)} in order of performance.
#' @export
get_algorithm_ranking = function(x, maximize = FALSE, as.string = FALSE, sep = "---") {
rank.fun = function(x, maximize, as.string) {
ns = names(x)
x = order(x, decreasing = maximize)
res = ns[x]
if (as.string)
res = re::collapse(res, sep = sep)
return(res)
}
if (is.list(x)) {
res = lapply(x, rank.fun, maximize, as.string)
if (as.string)
return(unlist(res))
else
return(res)
}
rank.fun(x, maximize, as.string)
}
|
d7fb58f314fc1408f01ea2368d466650214e835b
|
2b837f06f5b756dd9da76f613c5d58308a01f828
|
/man/lagInfluDataFHI.Rd
|
bba3e94523b8c5e861662409a9c7129d7b2de802
|
[
"MIT"
] |
permissive
|
Rapporteket/korona
|
f9f495af8a9aeeb092c71ff5276c384cf6b8e719
|
b54d69883d30405a98ba133fd585a47fb6cfae96
|
refs/heads/rel
| 2023-07-07T18:41:47.818867
| 2023-06-27T10:21:20
| 2023-06-27T10:21:20
| 250,873,426
| 0
| 0
|
NOASSERTION
| 2022-03-09T15:52:29
| 2020-03-28T19:09:35
|
R
|
UTF-8
|
R
| false
| true
| 392
|
rd
|
lagInfluDataFHI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FunksjonerDatafilerFHI.R
\name{lagInfluDataFHI}
\alias{lagInfluDataFHI}
\title{Henter data og velger variabler for overføring til FHI}
\usage{
lagInfluDataFHI(personIDvar = "PersonIdBC19Hash")
}
\value{
#Influensadata tilrettelagt for FHI
}
\description{
Henter data og velger variabler for overføring til FHI
}
|
d283a48b543f1dc177672de3804bca47cab15af2
|
b73ba9d91f872931cbf88d50999411c0bb7c211e
|
/R/process_casts.R
|
371a431221614ab52b3f550340f9b99bb71ba3c7
|
[
"MIT"
] |
permissive
|
weecology/portalcasting
|
73347ce66f8c1e5c080a1f1029ec17026c912588
|
a35a77214d41dbdaa50bb39452b5fe49c3763a83
|
refs/heads/main
| 2023-08-20T12:48:59.392495
| 2023-05-23T01:16:33
| 2023-05-23T01:16:33
| 129,144,321
| 8
| 12
|
NOASSERTION
| 2023-05-23T01:16:34
| 2018-04-11T19:34:03
|
R
|
UTF-8
|
R
| false
| false
| 21,875
|
r
|
process_casts.R
|
#' @title Process and Save Forecast Output to Files
#'
#' @description Take the model fit and forecast output, process them into savable objects, and save them to the output folders. \cr
#' The forecast metadata file is updated accordingly to track the saved output. \cr
#' `add_observations_to_forecast_table` appends a column of observations to a forecast's forecast tab. If a model interpolated a data set, it adds the true (non-interpolated) observations so that model predictions are all compared to the same data. \cr
#' `select_forecasts` determines the forecasts that match user specifications. Functionally, it is a wrapper on [`read_forecasts_metadata`] with filtering for specifications that provides a simple user interface to the large set of available forecasts via the metadata.
#'
#' @param model_fit,model_forecast Output from a model's fit and forecast functions.
#'
#' @param forecast_id,forecasts_ids `integer` (or integer `numeric`) value(s) representing the forecast(s) of interest, as indexed within the directory in the `casts` sub folder. See the forecasts metadata file (`forecasts_metadata.csv`) for summary information. If `NULL` (the default), the most recently generated forecast's output is read in. \cr
#' `forecasts_ids` can be NULL, one value, or more than one values, `forecast_id` can only be NULL or one value.
#'
#' @param model,models `character` values of the name(s) of the model(s) of interest, as indexed within the directory in the `forecasts` sub folder. See the forecasts metadata file (`forecasts_metadata.csv`) for summary information. If `NULL` (the default), the most recently generated forecast's output is read in. \cr
#' `models` can be NULL, one value, or more than one values, `model` can only be NULL or one value.
#'
#' @param dataset,datasets `character` vector of the rodent dataset name(s) to include.
#' `datasets` can be NULL, one value, or more than one values, `dataset` can only be NULL or one value.
#'
#' @param species `character` value of the species codes (or `"total"` for the total across species) to include. Default value is `NULL`, which equates to no selection with respect to `species`.
#'
#' @param forecast_table A `data.frame` of a cast's output. See [`read_forecast_table`].
#'
#' @param historic_end_newmoonnumbers `integer` (or integer `numeric`) newmoon numbers of the forecast origin. Default value is `NULL`, which equates to no selection.
#'
#' @param forecasts_groups `integer` (or integer `numeric`) value of the forecast groups to include. Default value is `NULL`, which equates to no selection with respect to `forecast_group`.
#'
#' @param main `character` value of the name of the main component of the directory tree.
#'
#' @param forecasts_metadata `data.frame` of forecast metadata. If `NULL` (default), will try to read via [`read_forecasts_metadata`].
#'
#' @details Four model-specific output components are saved and returned:
#' * `forecast_metadata`: saved out with [`write_yaml`][yaml::write_yaml].
#' * `forecast_tab`: saved using [`write_csv_arrow`].
#' * `model_fit`: saved out as a serialized `JSON` file via [`serializeJSON`][jsonlite::serializeJSON] and [`read_json`][jsonlite::read_json], so quite flexible with respect to specific object structure.
#' * `model_forecast`: saved out as a serialized `JSON` file via [`serializeJSON`][jsonlite::serializeJSON] and [`read_json`][jsonlite::read_json], so quite flexible with respect to specific object structure.
#'
#' @return `process_model_output`: relevant elements are saved to external files, and returned as a `list`. \cr
#' `read_forecast_table`: forecast table `data.frame`. \cr
#' `read_forecast_tables`: `data.frame` of combined forecast tables. \cr
#' `add_observations_to_forecast_table`: forecast table `data.frame` with an observation column added. \cr
#' `read_forecast_metadata`: `list` of `forecast_metadata`. \cr
#' `read_model_fit`: forecast output (typically as a `list`). \cr
#' `read_model_forecast`: forecast output (typically as a `list`). \cr
#' `select_forecasts`: `data.frame` of selected forecasts' metadata. \cr
#' `read_forecasts_metadata`: `data.frame` of forecasts' metadata.
#'
#' @name process forecast output
#'
#' @aliases process-forecasts
#'
#' @family core
#'
#' @examples
#' \dontrun{
#' main1 <- file.path(tempdir(), "forecast_output")
#'
#' setup_dir(main = main1)
#' dataset <- "all"
#' species <- "DM"
#' model <- "AutoArima"
#'
#' abundance <- prepare_abundance(main = main1,
#' dataset = dataset,
#' species = species,
#' model = model)
#' model_controls <- models_controls(main = main1,
#' models = model)[[model]]
#' metadata <- read_metadata(main = main1)
#' newmoons <- read_newmoons(main = main1)
#' covariates <- read_covariates(main = main1)
#'
#' fit_args <- named_null_list(element_names = names(model_controls$fit$args))
#' for (i in 1:length(fit_args)) {
#' fit_args[[i]] <- eval(parse(text = model_controls$fit$args[i]))
#' }
#' model_fit <- do.call(what = model_controls$fit$fun,
#' args = fit_args)
#'
#'
#' forecast_args <- named_null_list(element_names = names(model_controls$forecast$args))
#' for (i in 1:length(forecast_args)) {
#' forecast_args[[i]] <- eval(parse(text = model_controls$forecast$args[i]))
#' }
#'
#' model_forecast <- do.call(what = model_controls$forecast$fun,
#' args = forecast_args)
#'
#' process_model_output(main = main1,
#' model_fit = model_fit,
#' model_forecast = model_forecast,
#' model = model,
#' dataset = dataset,
#' species = species)
#'
#' cast_table <- read_forecast_table(main = main1)
#' cast_table2 <- add_observations_to_forecast_table(main = main1,
#' forecast_table = cast_table)
#' cast_tables <- read_forecast_tables(main = main1)
#' cast_metadata <- read_forecast_metadata(main = main1)
#' cast_forecast <- read_model_forecast(main = main1)
#'
#' casts_metadata <- read_forecasts_metadata(main = main1)
#' select_forecasts(main = main1)
#'
#' unlink(main1, recursive = TRUE)
#' }
#'
NULL
#' @rdname process-forecast-output
#'
#' @export
#'
process_model_output <- function (main = ".",
model_fit = NULL,
model_forecast,
model,
dataset,
species) {
settings <- read_directory_settings(main = main)
forecasts_metadata <- read_forecasts_metadata(main = main)
metadata <- read_metadata(main = main)
ids <- forecasts_metadata$forecast_id
ids <- as.numeric(ids)
next_id <- ceiling(max(c(0, ids), na.rm = TRUE)) + 1
model_controls <- read_models_controls(main = main)[[model]]
forecast_metadata <- update_list(metadata,
forecast_id = next_id,
model = model,
dataset = dataset,
species = species,
model_controls = model_controls[[model]],
dataset_controls = metadata$datasets_controls[[dataset]])
forecast_table <- data.frame(lead_time_newmoons = 1:metadata$time$lead_time_newmoons,
max_lag = metadata$time$max_lag,
lag_buffer = metadata$time$lag_buffer,
origin = metadata$time$origin,
forecast_date = metadata$time$forecast_date,
forecast_month = metadata$time$forecast_months,
forecast_year = metadata$time$forecast_years,
newmoonnumber = metadata$time$forecast_newmoonnumbers,
currency = metadata$datasets_controls[[dataset]]$args$output,
model = model,
dataset = dataset,
species = species,
estimate = as.numeric(model_forecast$mean),
lower_pi = as.numeric(model_forecast$lower[ , 1]),
upper_pi = as.numeric(model_forecast$upper[ , 1]),
historic_start_newmoonnumber = metadata$time$historic_start_newmoonnumber,
historic_end_newmoonnumber = metadata$time$historic_end_newmoonnumber,
forecast_start_newmoonnumber = metadata$time$forecast_start_newmoonnumber,
forecast_end_newmoonnumber = metadata$time$forecast_end_newmoonnumber,
confidence_level = metadata$confidence_level,
forecast_group = metadata$forecast_group,
old_cast_id = NA,
forecast_id = forecast_metadata$forecast_id)
pkg_version <- metadata$directory_configuration$setup$core_package_version
new_forecast_metadata <- data.frame(forecast_id = forecast_metadata$forecast_id,
old_cast_id = NA,
forecast_group = forecast_metadata$forecast_group,
forecast_date = forecast_metadata$time$forecast_date,
origin = forecast_metadata$time$origin,
historic_start_newmoonnumber = forecast_metadata$time$historic_start_newmoonnumber,
historic_end_newmoonnumber = forecast_metadata$time$historic_end_newmoonnumber,
forecast_start_newmoonnumber = forecast_metadata$time$forecast_start_newmoonnumber,
forecast_end_newmoonnumber = forecast_metadata$time$forecast_end_newmoonnumber,
lead_time_newmoons = forecast_metadata$time$lead_time_newmoons,
model = model,
dataset = dataset,
species = species,
portalcasting_version = pkg_version,
QAQC = TRUE,
notes = NA)
forecasts_metadata <- rbind(forecasts_metadata, new_forecast_metadata)
if (settings$save) {
# update these to be write_data calls
forecast_metadata_filename <- paste0("forecast_id_", forecast_metadata$forecast_id, "_metadata.yaml")
forecast_metadata_path <- file.path(main, settings$subdirectories$forecasts, forecast_metadata_filename)
write_yaml(x = forecast_metadata,
file = forecast_metadata_path)
forecast_table_filename <- paste0("forecast_id_", forecast_metadata$forecast_id, "_forecast_table.csv")
forecast_table_path <- file.path(main, settings$subdirectories$forecasts, forecast_table_filename)
row.names(forecast_table) <- NULL
write_csv_arrow(x = forecast_table,
file = forecast_table_path)
row.names(forecasts_metadata) <- NULL
write_csv_arrow(x = forecasts_metadata,
file = forecasts_metadata_path(main = main))
model_fit_filename <- paste0("forecast_id_", forecast_metadata$forecast_id, "_model_fit.json")
model_fit_path <- file.path(main, settings$subdirectories$fits, model_fit_filename)
model_fit_json <- serializeJSON(x = model_fit)
write_json(x = model_fit_json,
path = model_fit_path)
model_forecast_filename <- paste0("forecast_id_", forecast_metadata$forecast_id, "_model_forecast.json")
model_forecast_path <- file.path(main, settings$subdirectories$forecasts, model_forecast_filename)
model_forecast_json <- serializeJSON(x = model_forecast)
write_json(x = model_forecast_json,
path = model_forecast_path)
}
list(forecast_metadata = forecast_metadata,
forecast_table = forecast_table,
model_fit = model_fit,
model_forecast = model_forecast)
}
#' @rdname process-forecast-output
#'
#' @export
#'
read_forecast_table <- function (main = ".",
forecast_id = NULL) {
settings <- read_directory_settings(main = main)
if (is.null(forecast_id) ){
forecasts_meta <- select_forecasts(main = main)
forecast_id <- max(forecasts_meta$forecast_id)
}
lpath <- paste0("forecast_id_", forecast_id, "_forecast_table.csv")
cpath <- file.path(main, settings$subdirectories$forecasts, lpath)
if (!file.exists(cpath)) {
stop("forecast_id does not have a forecast_table")
}
out <- as.data.frame(read_csv_arrow(file = cpath))
out <- na_conformer(out)
class(out$species) <- "character"
out
}
#' @rdname process-forecast-output
#'
#' @export
#'
read_forecasts_tables <- function (main = ".",
forecasts_ids = NULL) {
settings <- read_directory_settings(main = main)
if (is.null(forecasts_ids)) {
forecasts_meta <- select_forecasts(main = main)
forecasts_ids <- max(forecasts_meta$forecast_id)
}
forecast_table <- read_forecast_table(main = main,
forecast_id = forecasts_ids[1])
ncasts <- length(forecasts_ids)
if (ncasts > 1) {
for (i in 2:ncasts) {
forecast_table_i <- read_forecast_table(main = main,
forecast_id = forecasts_ids[i])
forecast_table <- rbind(forecast_table, forecast_table_i)
}
}
forecast_table
}
#' @rdname process-forecast-output
#'
#' @export
#'
add_observations_to_forecast_table <- function (main = ".",
forecast_table = NULL) {
return_if_null(forecast_table)
dataset <- gsub("dm_", "", forecast_table$dataset[1])
species <- forecast_table$species[1]
forecast_table$observation <- NA
obs <- read_rodents_dataset(main = main,
dataset = dataset)
forecast_table$observation <- obs[match(forecast_table$newmoonnumber, obs$newmoonnumber), species]
forecast_table
}
#' @rdname process-forecast-output
#'
#' @export
#'
read_forecast_metadata <- function (main = ".",
forecast_id = NULL) {
settings <- read_directory_settings(main = main)
if (is.null(forecast_id)) {
forecasts_meta <- select_forecasts(main = main)
forecast_id <- max(forecasts_meta$forecast_id)
}
lpath <- paste0("forecast_id_", forecast_id, "_metadata.yaml")
cpath <- file.path(main, settings$subdirectories$forecasts, lpath)
if (!file.exists(cpath)) {
stop("forecast_id does not have a forecast_metadata file")
}
read_yaml(cpath, eval.expr = TRUE)
}
#' @rdname process-forecast-output
#'
#' @export
#'
read_model_fit <- function (main = ".",
forecast_id = NULL) {
settings <- read_directory_settings(main = main)
if (is.null(forecast_id)) {
forecasts_meta <- select_forecasts(main = main)
forecast_id <- max(forecasts_meta$forecast_id)
}
cpath <- file.path(main, settings$subdirectories$forecasts, paste0("forecast_id_", forecast_id, "_model_fit.json"))
if (file.exists(cpath)) {
read_in_json <- fromJSON(readLines(cpath))
unserializeJSON(read_in_json)
} else {
stop("forecast_id does not have a model_fit file")
}
}
#' @rdname process-forecast-output
#'
#' @export
#'
read_model_forecast <- function (main = ".",
forecast_id = NULL) {
settings <- read_directory_settings(main = main)
if (is.null(forecast_id)) {
forecasts_meta <- select_forecasts(main = main)
forecast_id <- max(as.numeric(gsub("-", ".", forecasts_meta$forecast_id)))
}
cpath_json <- file.path(main, settings$subdirectories$forecasts, paste0("forecast_id_", forecast_id, "_model_forecast.json"))
cpath_RData <- file.path(main, settings$subdirectories$forecasts, paste0("forecast_id_", forecast_id, "_model_forecast.RData"))
if (file.exists(cpath_json)) {
read_in_json <- fromJSON(readLines(cpath_json))
unserializeJSON(read_in_json)
} else if (file.exists(cpath_RData)) {
model_forecasts <- NULL
load(cpath_RData)
model_forecasts
} else {
stop("forecast_id does not have a model_forecast file")
}
}
#' @rdname process-forecast-output
#'
#' @export
#'
select_forecasts <- function (main = ".",
forecasts_metadata = NULL,
forecasts_ids = NULL,
forecasts_groups = NULL,
models = NULL,
datasets = NULL,
species = NULL,
historic_end_newmoonnumbers = NULL) {
settings <- read_directory_settings(main = main)
forecasts_metadata <- ifnull(forecasts_metadata, read_forecasts_metadata(main = main))
uforecast_ids <- unique(forecasts_metadata$forecast_id[forecasts_metadata$QAQC])
forecasts_ids <- ifnull(forecasts_ids, uforecast_ids)
match_id <- forecasts_metadata$forecast_id %in% forecasts_ids
uforecast_groups <- unique(forecasts_metadata$forecast_group[forecasts_metadata$QAQC])
forecasts_groups <- ifnull(forecasts_groups, uforecast_groups)
match_group <- forecasts_metadata$forecast_group %in% forecasts_groups
uend_moons <- unique(forecasts_metadata$historic_end_newmoonnumber[forecasts_metadata$QAQC])
end_moons <- ifnull(historic_end_newmoonnumbers, uend_moons)
match_end_moon <- forecasts_metadata$historic_end_newmoonnumber %in% end_moons
umodels <- unique(forecasts_metadata$model[forecasts_metadata$QAQC])
models <- ifnull(models, umodels)
match_model <- forecasts_metadata$model %in% models
udatasets <- unique(forecasts_metadata$dataset[forecasts_metadata$QAQC])
datasets <- ifnull(datasets, udatasets)
match_dataset <- forecasts_metadata$dataset %in% datasets
if ("species" %in% colnames(forecasts_metadata)) {
uspecies <- unique(forecasts_metadata$species[forecasts_metadata$QAQC])
species <- ifnull(species, uspecies)
match_species <- forecasts_metadata$species %in% species
} else {
match_species <- rep(TRUE, length(match_id))
}
forecasts_metadata[match_id & match_end_moon & match_model & match_dataset & match_species & forecasts_metadata$QAQC, ]
}
#' @rdname process-forecast-output
#'
#' @export
#'
read_forecasts_metadata <- function (main = ".") {
settings <- read_directory_settings(main = main)
meta_path <- forecasts_metadata_path(main = main)
if (!file.exists(meta_path)) {
messageq(" **creating forecast metadata file**", quiet = settings$quiet)
out <- data.frame(forecast_id = NA,
old_cast_id = NA,
forecast_group = 0,
forecast_date = NA,
origin = NA,
historic_start_newmoonnumber = NA,
historic_end_newmoonnumber = NA,
forecast_start_newmoonnumber = NA,
forecast_end_newmoonnumber = NA,
lead_time_newmoons = NA,
model = NA,
dataset = NA,
species = NA,
portalcasting_version = NA,
QAQC = NA,
notes = NA)
row.names(out) <- NULL
write_csv_arrow(x = out,
file = meta_path)
}
out <- as.data.frame(read_csv_arrow(file = meta_path))
if ("species" %in% colnames(out)) {
out <- na_conformer(out)
}
out[out$forecast_group != 0, ]
}
|
e24cf0a71e9d7101cf631b468004e0580c2eef9d
|
0ff06478c18026955ebf512cd3dcaef7293e1c30
|
/R/erccSpikeConcentration.R
|
3f770edf1ac47d660cf7a69766ce15a997798b37
|
[
"CC0-1.0"
] |
permissive
|
charles-plessy/smallCAGEqc
|
83d19b21890eed9455eaca13c87455bd53f45950
|
e3642f25b43314779c33388129b5d47a5a1538ec
|
refs/heads/master
| 2021-03-13T01:36:47.956099
| 2018-01-25T04:27:20
| 2018-01-25T04:27:20
| 34,089,765
| 1
| 1
| null | 2017-03-22T05:47:31
| 2015-04-17T01:24:16
|
R
|
UTF-8
|
R
| false
| false
| 2,232
|
r
|
erccSpikeConcentration.R
|
#' erccSpikeConcentration
#'
#' Various data related to ERCC spikes
#'
#' In quantitative transcriptome analysis, we often add synthetic RNA to the reaction for
#' quality control and normalisation, and the External RNA Controls Consortium (ERCC)
#' spikes are a popular choice, available commercially from Invitrogen (now Thermo Fisher).
#' In the commercial product, the spikes have different concentrations, covering six
#' orders of magnitude. These concentrations are given in a text file on the vendor's
#' webiste, and I do not know if the file is freely redistributable, hence this function
#' to retreive the data from the Internet or a local file.
#'
#' @param file File name or URL where to find the \sQuote{cms_095046.txt} text file.
#' Defaults to the current URL on the Thermo Fisher website.
#'
#' @return A data frame representing the file \sQuote{cms_095046.txt} from the vendors
#' website.
#'
#' The original column names are \sQuote{Re-sort ID}, \sQuote{ERCC ID}, \sQuote{subgroup},
#' \sQuote{concentration in Mix 1 (attomoles/ul)},
#' \sQuote{concentration in Mix 2 (attomoles/ul)}, \sQuote{expected fold-change ratio},
#' \sQuote{log2(Mix 1/Mix 2)}, but this not fit well for a R data frame. Therefore,
#' they are renamed as: \sQuote{sortID}, \sQuote{erccID}, \sQuote{subgroup},
#' \sQuote{concMix1}, \sQuote{concMix2}, \sQuote{FC}, \sQuote{log2FC}.
#'
#' @seealso loadMoiraiStats
#'
#' @export erccSpikeConcentration
erccSpikeConcentration <- function (file="http://tools.thermofisher.com/content/sfs/manuals/cms_095046.txt") {
ercc <- read.table( file
, head=T
, sep="\t"
, colClasses = c( "numeric"
, "character"
, "factor"
, rep("numeric", 4))
, col.names = c( "sortID"
, "erccID"
, "subgroup"
, "concMix1"
, "concMix2"
, "FC"
, "log2FC"))
rownames(ercc) <- ercc$erccID
ercc
}
|
14da6fa0746c8a9cbac95f09f672397d18aebbda
|
188cccf91562a5b65e060ffda807a0aa636436f4
|
/999_nate_data_check.R
|
eae400e050f680d3611493868c4f072ca688e33d
|
[] |
no_license
|
kristopherdelane/NASDAE_pums
|
90dfaf6168777bca2d556e7ca3d775efab04d949
|
d24d83e425abd94804c9816af8855e711b2abecf
|
refs/heads/main
| 2023-03-09T17:27:04.394978
| 2021-01-22T13:07:30
| 2021-01-22T13:07:30
| 331,948,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,486
|
r
|
999_nate_data_check.R
|
library(tidyverse)
library(tidycensus)
# Read in PUMS file for Kentucky ------------------------------------------
x <- Sys.time()
ky_pums <- get_pums(
variables = c("PUMA", "AGEP", "SCHL", "ESR", "FPAP", "NOC", "ENG", "SCH", "RELP"),
state = "KY",
survey = "acs5",
recode = TRUE
)
# Create an output of chidren living in a household -----------------------
#' with a Head of Household who doesn't have a High School Diploma
#' and those living in a household
#' with a Head of Household who does have a High School Diploma
#' then finding the percentage.
ky_pums%>%
#create a flag for all records where Person 1 (head of household)
#has or does not have a high school diploma
left_join(ky_pums %>%
select(SERIALNO, RELP, SCHL) %>%
filter(RELP == "00") %>%
filter(SCHL < 16) %>%
mutate(hoh_less_hs = 1) %>%
select(SERIALNO, hoh_less_hs)) %>%
#filter to people under the age of 18
filter(AGEP < 18) %>%
group_by(ST_label, hoh_less_hs) %>%
#get counds based around head of household education
summarize(count = sum(PWGTP))%>%
ungroup() %>%
#clean up for descriptive column names
mutate(hoh_less_hs = ifelse(is.na(hoh_less_hs), "more_than_high_school_diploma", "less_than_high_school_diploma")) %>%
pivot_wider(id_col = ST_label ,names_prefix = "head_of_household_", names_from = hoh_less_hs, values_from = count) %>%
#calculate %
mutate(pct_children_living_with_head_of_household_with_less_than_high_school_diploma = head_of_household_less_than_high_school_diploma/(head_of_household_more_than_high_school_diploma+head_of_household_less_than_high_school_diploma)) %>%
#make long for viewing below
pivot_longer(2:4)
Sys.time()-x
# ky_check <- fread("data/psam_p21.csv")
#
# ky_check_agg <- ky_check %>% aggregate_output()
# ky_pums_agg <- ky_pums %>% aggregate_output()
#
# ky_check%>%
# #create a flag for all records where Person 1 (head of household)
# #has or does not have a high school diploma
# left_join(ky_check %>%
# select(SERIALNO, RELP, SCHL) %>%
# filter(RELP == 00) %>%
# filter(SCHL < 16) %>%
# mutate(hoh_less_hs = 1) %>%
# select(SERIALNO, hoh_less_hs)) %>%
# #filter to people under the age of 18
# mutate(hoh_less_hs = ifelse(is.na(hoh_less_hs), "more_than_high_school_diploma", "less_than_high_school_diploma")) %>%
# filter(AGEP < 18) %>%
# group_by(ST, hoh_less_hs) %>%
# #get counds based around head of household education
# summarize(count = sum(PWGTP))%>%
# ungroup() %>%
# #clean up for descriptive column names
# pivot_wider(id_col = ST ,names_prefix = "head_of_household_", names_from = hoh_less_hs, values_from = count)
# #calculate %
# #mutate(pct_children_living_with_head_of_household_with_less_than_high_school_diploma = head_of_household_less_than_high_school_diploma/(head_of_household_more_than_high_school_diploma+head_of_household_less_than_high_school_diploma)) %>%
# #make long for viewing below
# #pivot_longer(2:4)
#
# ky_check %>%
# group_by(ST) %>%
# filter(AGEP > 17)%>%
# filter(AGEP < 65) %>%
# filter(FPAP == 1) %>%
# filter(SCHL < 16) %>%
# summarize(FPAP = sum(PWGTP)) %>%
# left_join(ky_check %>%
# group_by(ST) %>%
# filter(AGEP > 17)%>%
# filter(AGEP < 65) %>%
# filter(SCHL < 16) %>%
# summarize(TOTAL = sum(PWGTP)))
|
c683e1fed13b075ffcf5816980e999d330758d67
|
c6fe884e6fe918425ea56489107a2933790478c9
|
/plots/functions/altplot.R
|
46bc806e385ba1c437c20a5552c0677f35ea0a69
|
[] |
no_license
|
skgallagher/r0
|
2cd7a2a591982c1c7955333bc78194c60d8db71c
|
e84546793665876116501987d51feac0e74a7aae
|
refs/heads/master
| 2022-04-12T09:47:21.075378
| 2020-03-20T19:15:30
| 2020-03-20T19:15:30
| 139,631,262
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,259
|
r
|
altplot.R
|
res_path <- "C:/Users/Andersen/Documents/R0/DataSim/PaperResults/3"
res_list <- paste0(res_path, "/",
list.files(res_path))[-3]
alt_list <- vector("list", length = 3)
alt_list2 <- vector("list", length = 3)
for(kk in 1:length(alt_list)){
alt_list[[kk]] <- matrix(NA, nrow = 1, ncol = 2)
colnames(alt_list[[kk]]) <- c("Estimate", "Std. Dev")
}
blah <- c(1, 2, 7)
for(ii in 1:length(res_list)){
load(res_list[ii])
for(jj in 1:length(results_matlist)){
for(kk in 1:3) {
alt_list[[kk]] <- rbind(alt_list[[kk]], results_matlist[[jj]][blah[kk], 2:3])
}
}
}
dat_names_anorder <- c("Base", "Base3", "Base4", "Base5",
"SD = (10000, 500)", "SD = (2500, 100)", "SD = (50, 2)", "SD = (500, 20)",
"R0 = 60", "R0 = 1.5", "R0 = 1", "R0 = 0.25",
"N = 100", "N = 1000", "N = 10000", "N = 1000000",
"Y(0) = 1000", "Y(0) = 100", "Y(0) = 10", "Y(0) = 1",
"T = 100", "T = 20", "T = 200", "T = 50")
dat_names_paperorder <- c("Base",
"R0 = 60", "R0 = 1.5", "R0 = 1", "R0 = 0.25",
"T = 200", "T = 100", "T = 50", "T = 20",
"Y(0) = 1000", "Y(0) = 100", "Y(0) = 10", "Y(0) = 1",
"N = 1000000", "N = 10000", "N = 1000", "N = 100",
"SD = (50, 2)", "SD = (500, 20)", "SD = (2500, 100)", "SD = (10000, 500)")
mod_names <- c("RE", "rRE", "LL")
for(kk in 1:length(alt_list2)){
alt_list2[[kk]] <- data.frame(Estimate = alt_list[[kk]][-1, 1],
SE = alt_list[[kk]][-1, 2])
alt_list2[[kk]]$Error <- rep(c("AR", "AR-M", "Norm", "Norm-M"), times = 96/4)
alt_list2[[kk]]$Names <- rep(dat_names_anorder, each = 4)
alt_list2[[kk]]$Model <- mod_names[kk]
alt_list2[[kk]]$Standard <- (alt_list2[[kk]]$Estimate - 2) / alt_list2[[kk]]$SE
}
alt_list2[[2]][85, 1] <- -0.1539
alt_list2[[2]][85, 2] <- 2.7692
alt_list2[[2]][85, 6] <- (-0.1539 - 2) / 2.7692
results_matlist <- rbind(alt_list2[[1]][-c(5:16), ], alt_list2[[2]][-c(5:16), ], alt_list2[[3]][-c(5:16), ])
for(ii in c(1)){
results_matlist$Standard <- pmax(pmin(results_matlist$Standard, 10.5), -7.5)
results_matlist$Names <- factor(results_matlist$Names, levels = rev(dat_names_paperorder))
results_matlist$Error <- factor(results_matlist$Error, levels = c("AR", "AR-M", "Norm", "Norm-M"))
results_matlist$Model <- factor(results_matlist$Model, levels = c("RE", "rRE", "LL"))
}
ggplot(data = results_matlist, aes(y = Names, x = Standard)) +
geom_vline(xintercept = 0, col = "black", size = 1, linetype = "dashed",
alpha = 0.6) +
geom_point(size = 1) +
facet_wrap(Model~Error, nrow = 3) +
labs(x = TeX('Standardized $\\textbf{R}_0$'), y = "", title = "Error Robustness") +
xlim(-8, 11) + theme_bw() + theme(title = element_text(size = 18),
axis.text = element_text(size = 4, angle = 20),
legend.text = element_text(size = 12))
### Other Model
res_path <- "C:/Users/Andersen/Documents/R0/DataSim/PaperResults/3"
res_list <- paste0(res_path, "/",
list.files(res_path))[c(1, 3)]
alt_list <- vector("list", length = 3)
alt_list2 <- vector("list", length = 3)
for(kk in 1:length(alt_list)){
alt_list[[kk]] <- matrix(NA, nrow = 1, ncol = 2)
colnames(alt_list[[kk]]) <- c("Estimate", "Std. Dev")
}
blah <- c(1, 2, 7)
for(ii in 1:length(res_list)){
load(res_list[ii])
for(jj in 1:length(results_matlist)){
for(kk in 1:3) {
alt_list[[kk]] <- rbind(alt_list[[kk]], results_matlist[[jj]][blah[kk], 2:3])
}
}
}
dat_names_order <- c("Baseline","Baseline3","Baseline4","Baseline5", "Linear", "Quartic", "Linear SIR")
mod_names <- c("RE", "rRE", "LL")
for(kk in 1:length(alt_list2)){
alt_list2[[kk]] <- data.frame(Estimate = alt_list[[kk]][-1, 1],
SE = alt_list[[kk]][-1, 2])
alt_list2[[kk]]$Error <- rep(c("AR", "AR-M", "Norm", "Norm-M"), times = 28/4)
alt_list2[[kk]]$Names <- rep(dat_names_order, each = 4)
alt_list2[[kk]]$Model <- mod_names[kk]
alt_list2[[kk]]$Standard <- (alt_list2[[kk]]$Estimate - 2) / alt_list2[[kk]]$SE
}
results_matlist <- rbind(alt_list2[[1]][-c(5:16), ], alt_list2[[2]][-c(5:16), ], alt_list2[[3]][-c(5:16), ])
for(ii in c(1)){
results_matlist$Standard <- pmax(pmin(results_matlist$Standard, 10.5), -7.5)
results_matlist$Names <- factor(results_matlist$Names, levels = dat_names_order)
results_matlist$Error <- factor(results_matlist$Error, levels = c("AR", "AR-M", "Norm", "Norm-M"))
results_matlist$Model <- factor(results_matlist$Model, levels = c("RE", "rRE", "LL"))
}
ggplot(data = results_matlist, aes(y = Error, x = Standard)) +
geom_vline(xintercept = 0, col = "black", size = 1, linetype = "dashed",
alpha = 0.6) +
geom_point(size = 1.5) +
facet_wrap(Model~Names, nrow = 3) +
labs(x = TeX('Standardized $\\textbf{R}_0$'), y = "", title = "Model Robustness") +
xlim(-8, 11) + theme_bw() + theme(title = element_text(size = 18),
axis.text = element_text(size = 6, angle = 20),
legend.text = element_text(size = 12))
### Time1
model_names <- c("RE", "rRE", "LMA", "LMAT", "IPR", "SIPR", "LL", "MC", "SB", "a", "b")
for(ii in c(3, 7, 11, 15)){
results_matlist[[ii]]$Model <- model_names
results_matlist2 <- results_matlist[[ii]]
results_matlist2$Estimate <- round(results_matlist2$Estimate, 4)
results_matlist2[["Std. Dev"]] <- round(results_matlist2[["Std. Dev"]], 4)
results_matlist[[ii]]$Lower <- pmax(results_matlist[[ii]]$Estimate - 2 * pmax(results_matlist[[ii]][["Std. Dev"]], 0.025), -8)
results_matlist[[ii]]$Upper <- pmin(results_matlist[[ii]]$Estimate + 2 * pmax(results_matlist[[ii]][["Std. Dev"]], 0.025), 11)
results_matlist[[ii]]$Estimate <- pmax(pmin(results_matlist[[ii]]$Estimate, 10.5), -7.5)
results_matlist[[ii]]$Model <- factor(results_matlist[[ii]]$Model, levels = results_matlist[[ii]]$Model)
}
xx <- rbind(results_matlist[[3]][1:9, ], results_matlist[[7]][1:9, ], results_matlist[[11]][1:9, ], results_matlist[[15]][1:9, ])
xx$inc <- factor(data.table::between(2, xx$Lower, xx$Upper))
xx$diff <- c(rep("T = 100", 9), rep("T = 20", 9), rep("T = 200", 9), rep("T = 50", 9))
xx$diff <- factor(xx$diff, levels = rev(c("T = 200", "T = 100", "T = 50", "T = 20")))
ggplot(data = xx, aes(y = diff, x = Estimate, xmin = Lower, xmax = Upper)) +
geom_errorbarh(aes(xmin = Lower, xmax = Upper),
height = .3, size = 1.5, col = "black",
alpha = 0.6) +
geom_vline(xintercept = 2, col = "black", size = 1, linetype = "dashed",
alpha = 0.6) +
geom_point(size = 2) +
facet_wrap(~Model, nrow = 3) +
labs(x = TeX('$\\textbf{R}_0$'), y = "T*", title = "Time Point Differences") +
xlim(-8, 11) + theme_bw() + theme(title = element_text(size = 22),
axis.text = element_text(size = 16),
legend.text = element_text(size = 12))
### Time2
model_names <- c("RE", "rRE", "LMA", "LMAT", "IPR", "SIPR", "LL", "MC", "SB", "a", "b")
results_matlist <- list(results_matlist_daily[[1]],
results_matlist_weekly[[1]],
results_matlist_monthly[[1]],
results_matlist_quarterly[[1]])
for(ii in c(1, 2, 3, 4)){
results_matlist[[ii]]$Model <- model_names[1:9]
results_matlist2 <- results_matlist[[ii]]
results_matlist2$Estimate <- round(results_matlist2$Estimate, 4)
results_matlist2[["Std. Dev"]] <- round(results_matlist2[["Std. Dev"]], 4)
results_matlist[[ii]]$Lower <- pmax(results_matlist[[ii]]$Estimate - 2 * pmax(results_matlist[[ii]][["Std. Dev"]], 0.025), -8)
results_matlist[[ii]]$Upper <- pmin(results_matlist[[ii]]$Estimate + 2 * pmax(results_matlist[[ii]][["Std. Dev"]], 0.025), 11)
results_matlist[[ii]]$Estimate <- pmax(pmin(results_matlist[[ii]]$Estimate, 10.5), -7.5)
results_matlist[[ii]]$Model <- factor(results_matlist[[ii]]$Model, levels = results_matlist[[ii]]$Model)
}
xx <- rbind(results_matlist[[1]][1:9, ], results_matlist[[2]][1:9, ], results_matlist[[3]][1:9, ], results_matlist[[4]][1:9, ])
xx$inc <- factor(data.table::between(2, xx$Lower, xx$Upper))
xx$diff <- c(rep("Daily", 9), rep("Weekly", 9), rep("Monthly", 9), rep("Quarterly", 9))
xx$diff <- factor(xx$diff, levels = rev(c("Daily", "Weekly", "Monthly", "Quarterly")))
ggplot(data = xx, aes(y = diff, x = Estimate, xmin = Lower, xmax = Upper)) +
geom_errorbarh(aes(xmin = Lower, xmax = Upper),
height = .3, size = 1.5, col = "black",
alpha = 0.6) +
geom_vline(xintercept = 2, col = "black", size = 1, linetype = "dashed",
alpha = 0.6) +
geom_point(size = 2) +
facet_wrap(~Model, nrow = 3) +
labs(x = TeX('$\\textbf{R}_0$'), y = "Times", title = "Observation Times") +
xlim(-8, 11) + theme_bw() + theme(title = element_text(size = 22),
axis.text = element_text(size = 16),
legend.text = element_text(size = 12))
|
27f60e73654c51b217e133eed6dfe3f4ed66ecc3
|
b2bb84a70d1e8121612048ff54a664f14be7cf1a
|
/man/simulate_e.Rd
|
a01aa9ca50a1c91ff53bffc6f4d3f0e28d86a53e
|
[
"MIT"
] |
permissive
|
fboehm/mitra2013
|
1edacda8e5b324d458a1d925ba5309584d014f19
|
6914988c363a7a0a75d18b460bbd90324b3517dc
|
refs/heads/master
| 2021-01-10T12:13:05.724289
| 2016-02-29T04:43:57
| 2016-02-29T04:43:57
| 52,041,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 231
|
rd
|
simulate_e.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_emat.R
\name{simulate_e}
\alias{simulate_e}
\title{Simulate emat}
\usage{
simulate_e(G, betamat, niter = 8000)
}
\description{
Simulate emat
}
|
357543dc9331503c6464edac755dd75637cfffad
|
ba903baa4f3b3b594e8fd6b9ada8b1e675a8afc6
|
/R/unhcr_paged.R
|
0e4967c3e152e56802da11634a50f875a757c987
|
[] |
no_license
|
weisisheng/unhcr_pagedown
|
482eba671af9a9494d60dc455085f0cf836c216e
|
0ce5b6e9c1b2e1d8196d38d827e4d71dcac35ba5
|
refs/heads/main
| 2022-12-29T22:08:08.265812
| 2020-10-14T03:46:40
| 2020-10-14T03:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
unhcr_paged.R
|
#' Create a paged HTML document suitable for printing with UNHCR Brand
#'
#' @param ... Arguments passed to
#' \code{pagedown::\link[pagedown]{html_paged}}.
#' @references \url{https://pagedown.rbind.io}
#' @return An R Markdown output format.
#' @import stats utils pagedown
#' @export
#'
#'
unhcr_paged = function(...) {
cssfile <- function(...) {
system.file("resources", "css", paste0(..., ".css"), package = "unhcRstyle")
}
pagedown::html_paged(
css = c(cssfile('unhcr-fonts'), cssfile('unhcr-page'), cssfile('unhcr')),
...)
}
|
c77532702a0fab2684b323f8b8180bb39673e486
|
e119a33081c8aa7bf698a3084288b18e0c2b9612
|
/man/validate_qra_fit.Rd
|
7290f6bb5e694e6f27cf6bfe655fd155afd86cd5
|
[] |
no_license
|
reichlab/covidEnsembles
|
6bc7f7a5a7f4d75fdb5f3161fdedc7d79841a95d
|
8c5d2191f055e15a11c5a1f6ccbae4862799ff3d
|
refs/heads/master
| 2023-04-17T01:46:14.579225
| 2023-03-08T17:25:57
| 2023-03-08T17:25:57
| 263,742,360
| 5
| 5
| null | 2023-03-08T17:25:59
| 2020-05-13T21:01:59
|
HTML
|
UTF-8
|
R
| false
| true
| 353
|
rd
|
validate_qra_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qra_fit.R
\name{validate_qra_fit}
\alias{validate_qra_fit}
\title{Validate qra_fit object}
\usage{
validate_qra_fit(qra_fit)
}
\arguments{
\item{qra_fit}{}
}
\value{
invisible(TRUE) if qra_fit is valid;
otherwise, an error is thrown
}
\description{
Validate qra_fit object
}
|
20842bfd01618eb4f2a4f8c2e2256d8a4f31213d
|
327641fcdabd1e3e17eb931004eda0e7da26f953
|
/download/server-pairs.R
|
aa653a9609faf7756d13abe18d841fc73570f578
|
[] |
no_license
|
stranda/unpak-shiny
|
d496ed5e44b97efaebf63d7ea693738db7f6a930
|
e1a8d2874745655f55df95fda1423f0886f9c997
|
refs/heads/master
| 2020-12-25T22:36:15.444895
| 2017-08-18T12:06:07
| 2017-08-18T12:06:07
| 39,632,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,293
|
r
|
server-pairs.R
|
source("global.R")
dbInfo = read.table('../../dbInfo.txt')
makeHTMLtable <- function (df)
{
url.root.old <- "http://arabidopsis.org/servlets/Search?type=germplasm&search_action=search&pageNum=1&search=Submit+Query&germplasm_type=individual_line&taxon=1&name_type_1=gene_name&method_1=2&name_1=&name_type_2=germplasm_phenotype&method_2=1&name_2=&name_type_3=germplasm_stock_name&method_3=4&name_3="
url.root.cloud <- "http://172.245.61.151:3838/findline/?line="
url.root <- "http://107.170.89.221:3838/findline/?line="
df$Accession <- paste0("<a href='",url.root,df$Accession,"' target='_blank'>",df$Accession,"</a>")
cols <- dim(df)[2]
rows <- dim(df)[1]
for (j in 1:cols)
{
if (is.numeric(df[,j])) df[,j] <- as.character(round(df[,j],3))
if (is.factor(df[,j])) df[,j] <- as.character(df[,j],3)
}
str <- "<table border = 1>"
str <- paste0(str,"<tr><th>",paste(names(df),collapse="</th><th>"),"</tr>\n")
for (i in 1:rows) {
str <- paste0(str, "<tr><td>",paste(df[i,],collapse="</td><td>"),"</tr>\n")
}
str
}
#### Define server logic required to summarize and view the selected dataset
shinyServer(function(input, output) {
values <- reactive({
if (input$expt=="All"){expt=" "} else {expt=paste0(" WHERE E.name = '",input$expt,"'")}
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
query <- paste("SELECT Pl.idIndividualPlant, Pl.Accession_idAccession, T.name, E.name, F.Name,",
" Ph.name, O.value",
" FROM Observation O",
" JOIN IndividualPlant Pl ON O.IndividualPlant_idIndividualPlant = Pl.idIndividualPlant",
" JOIN Phenotype Ph ON O.Phenotype_idPhenotype = Ph.idPhenotype",
" JOIN Experiment E ON Pl.Experiment_idExperiment = E.idExperiment",
" JOIN Treatment T ON O.Treatment_idTreatment = T.idTreatment",
" JOIN Facility F ON Pl.Facility_idFacility = F.idFacility",
expt,
sep="")
obstbl <- dbGetQuery(con,query)
dbDisconnect(con)
names(obstbl) <- c("PlantID","Accession","treatment","experiment","facility","Phenotype","value")
if (input$linemeans=="yes")
{
ret <- with(obstbl,aggregate(cbind(value),by=list(Accession=Accession,
Experiment=experiment,
Facility=facility,
Phenotype=Phenotype,
Treatment=treatment),mean))
} else {
ret <- obstbl
ret$Treatment=ret$treatment
ret$Experiment=ret$experiment
ret$Facility=ret$facility
}
if (input$wide=="yes")
{
names(ret)[grep("Phenotype",names(ret))] <- "variable"
tmp <- cast(ret)
ret <- tmp
}
ret <- ret[,!(names(ret)%in%c("treatment","experiment","facility"))]
ret
})
output$msg <- renderText(
{
df <- values()
if (dim(df)[1]<1)
{
paste("No data present")
} else {
paste(dim(df)[1],"rows and",dim(df)[2],"columns of data ready to download")
}
})
output$downloadData <- downloadHandler(
filename = function() {
paste("phenotypes",Sys.Date(),".csv",sep="")
},
content = function(file) {
df <- values()
write.csv(file=file,row.names=F,df)
}
)
output$pairplot <- renderPlot(
{
df <- values()
if ("Phenotype" %in% names(df))
{
names(df)[grep("Phenotype",names(df))] <- "variable"
df <- cast(df)
}
pairs(df[,!(names(df) %in% c("PlantID","Experiment","Facility","Treatment","Accession"))])
})
})
|
f7bd92825c623d17001a3d5c4614807569745bc0
|
d709ea9a4b89c4b30ab1db4449ffbb237c8018eb
|
/app3/test.R
|
ff9fe1b3689b970283504163c5b62b0cdc27025e
|
[] |
no_license
|
lillamy2012/glans
|
98e3a0e8c3d066e4750e6878298cb83d01a53d2a
|
c9bb24b3469189562ff351c9d67a7af246f00363
|
refs/heads/master
| 2021-01-21T13:44:15.581004
| 2016-04-29T11:39:56
| 2016-04-29T11:39:56
| 36,927,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,284
|
r
|
test.R
|
library(dplyr)
indata = read.csv("/Users/elin.axelsson/Desktop/MS_H31_H33_allmodifications.csv",skip=2,sep=";",dec=",")
tab_data = tbl_df(indata)
groups_l3 = distinct(select(tab_data,Accession,Sequence,Modifications))
groups_l2 = distinct(select(tab_data,Accession,Sequence))
groups_l1 = distinct(select(tab_data,Accession))
table_function = function(data,groups,level){
perGr = list()
groups$numb=NA
groups$sample1=NA
groups$sample2=NA
groups$sample3=NA
groups$sample4=NA
groups$gr1=NA
groups$gr2=NA
for (i in 1:nrow(groups)){
if (level==3){
perGr[[i]]=filter(tab_data,Accession==as.character(groups[i,1]) & Sequence==as.character(groups[i,2]) & Modifications==as.character(groups[i,3]))
}
if (level==2){
perGr[[i]]=filter(tab_data,Accession==as.character(groups[i,1]) & Sequence==as.character(groups[i,2]))
}
if (level==1){
perGr[[i]]=filter(tab_data,Accession==as.character(groups[i,1]))
}
groups[i,"numb"]=nrow(perGr[[i]])
groups[i,paste("sample",1:4,sep="")]=apply(perGr[[i]][,4:7],2,function(x) sum(x=="X"))
}
groups$gr1 = rowSums(groups[,paste("sample",1:2,sep="")])
groups$gr2 = rowSums(groups[,paste("sample",3:4,sep="")])
groups$diff = abs(groups$gr1 -groups$gr2)
return(groups)
}
|
861ba9eb25ddec048d3fe8101937b1cda902a15c
|
0b594e08270fe5aa4bde5d1b400e9baf3d3f94e4
|
/R/main.R
|
6b4b69f6308f5fb679c66c8a2310df39c4e625bc
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/SkeletonPredictionValidationStudy
|
25e6ddd47693ddde19da8e53fa39c936c1c971fb
|
a509b70dde8a9bb6c59cbfddbff334521f84cc93
|
refs/heads/main
| 2022-05-20T23:41:31.549562
| 2022-03-17T21:01:13
| 2022-03-17T21:01:13
| 249,766,382
| 0
| 0
| null | 2022-02-23T19:00:36
| 2020-03-24T16:57:25
|
R
|
UTF-8
|
R
| false
| false
| 3,709
|
r
|
main.R
|
#' Execute the validation study
#'
#' @details
#' This function will execute the sepcified parts of the study
#'
#' @param databaseDetails Database details for the validation created using \code{PatientLevelPrediction::createDatabaseDetails()}
#' @param restrictPlpDataSettings Extras data settings such as sampling created using \code{PatientLevelPrediction::createRestrictPlpDataSettings()}
#' @param validationSettings Settings for the validation such as whether to recalibrate
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param createCohorts Whether to create the cohorts for the study
#' @param runValidation Whether to run the valdiation models
#' @param packageResults Whether to package the results (after removing sensitive details)
#' @param minCellCount The min count for the result to be included in the package results
#' @param logSettings Settings for the logging created using \code{PatientLevelPrediction::createLogSettings()}
#' @export
execute <- function(
databaseDetails,
restrictPlpDataSettings = PatientLevelPrediction::createRestrictPlpDataSettings(
sampleSize = NULL
),
validationSettings = PatientLevelPrediction::createValidationSettings(
recalibrate = NULL
),
outputFolder,
createCohorts = T,
runValidation = T,
packageResults = T,
minCellCount = 5,
logSettings = PatientLevelPrediction::createLogSettings(verbosity = 'INFO', logName = 'validatePLP')
){
databaseName <- databaseDetails$cdmDatabaseName
if (!file.exists(file.path(outputFolder,databaseName))){
dir.create(file.path(outputFolder,databaseName), recursive = TRUE)
}
ParallelLogger::addDefaultFileLogger(file.path(outputFolder,databaseName, "log.txt"))
if(createCohorts){
ParallelLogger::logInfo("Creating Cohorts")
createCohorts(
databaseDetails = databaseDetails,
outputFolder = file.path(outputFolder,databaseName)
)
}
if(runValidation){
ParallelLogger::logInfo("Validating Models")
# for each model externally validate
ParallelLogger::logInfo("Applying Models in models folder")
analysesLocation <- system.file(
"models",
package = "SkeletonPredictionValidationStudy"
)
models <- dir(analysesLocation)
for(model in models){
plpModel <- PatientLevelPrediction::loadPlpModel(file.path(analysesLocation, model))
#update cohort schema and table in covariate settings
ParallelLogger::logInfo('Updating cohort covariate settings is being used')
plpModel$settings$covariateSettings <- addCohortSettings(
covariateSettings = plpModel$settings$covariateSettings,
cohortDatabaseSchema = databaseDetails$cohortDatabaseSchema,
cohortTable = databaseDetails$cohortTable
)
PatientLevelPrediction::externalValidateDbPlp(
plpModel = plpModel,
validationDatabaseDetails = databaseDetails,
validationRestrictPlpDataSettings = restrictPlpDataSettings,
settings = validationSettings,
logSettings = logSettings,
outputFolder = outputFolder
)
}
}
# package the results: this creates a compressed file with sensitive details removed - ready to be reviewed and then
# submitted to the network study manager
# results saved to outputFolder/databaseName
if (packageResults) {
ParallelLogger::logInfo("Packaging results")
packageResults(
outputFolder = outputFolder,
databaseName = databaseName,
minCellCount = minCellCount
)
}
invisible(NULL)
}
|
7f1764aff8313f7aeed2c04176ec194c3cb97680
|
12830b5570e4acaf8bc44e670d0aecfe27f8a0b5
|
/tests/testthat.R
|
6f9a2b1908f50844c99b77fa22f7bd2d85cff163
|
[
"MIT"
] |
permissive
|
favstats/wtmapi
|
695488445d4242979b7f469e9607f19848abbb72
|
ccc99f4332d80a7a179e0050e17e3ecce1e00fe4
|
refs/heads/master
| 2023-07-17T16:08:01.103668
| 2021-09-06T14:44:43
| 2021-09-06T14:44:43
| 388,148,022
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(wtmapi)
test_check("wtmapi")
|
1810a2343d83598b0d39ff4296b80478ae5a0e6d
|
219715b27eec77fb87304b99e0066c6fcbc7438e
|
/04_8.R
|
ef58ef9a9a5658cf0c5365954946f7314e8cd467
|
[] |
no_license
|
jsta/practical_ecomod
|
047de080e243a988f88adfb49e5dfa9590dd809f
|
6e3a876908b51c268bb978b8b45310da93dff375
|
refs/heads/master
| 2020-03-15T08:11:34.981593
| 2018-05-03T20:43:00
| 2018-05-03T20:43:00
| 132,045,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,937
|
r
|
04_8.R
|
#####################################################
# Figure 4.8. Sediment mineralization model
#####################################################
#General settings
library(deSolve)
# Function to calculate model cost
costf <- function(params){
with(as.list(params), {
Carbon <- meanDepo * mult / k
outtimes <- as.vector(oxcon$time)
outmin <- ode(Carbon, outtimes, minmod, params)
sqdiff <- (outmin[,3] - oxcon$cons)^2
sum(sqdiff)
})
}
# Function to calculate derivative of state variable
minmod <- function(t,Carbon,parameters){
with (as.list(c(Carbon,parameters)),{
minrate <- k*Carbon
Depo <- approx(Flux[,1],Flux[,2], xout=t)$y
dCarbon <- mult*Depo - minrate
list(dCarbon,minrate)
})
}
# initial par estimates
# minimal parameter values
# maximal parameter values
# function to minimise
# nr elements in population
# number of iterations
# number of points in centroid
# relative variation upon stopping
pricefit <- function (par, minpar=rep(-1e8,length(par)),
maxpar=rep(1e8,length(par)), func, npop=max(5*length(par),50),
numiter=10000, centroid = 3, varleft = 1e-8, ...){
# Initialization
cost <- function (par) func(par,...)
npar <- length(par)
tiny <- 1e-8
varleft <- max(tiny,varleft)
populationpar <- matrix(nrow=npop,ncol=npar,byrow=TRUE,
data= minpar+runif(npar*npop)*rep((maxpar-minpar),npop))
colnames(populationpar)<-names(par)
populationpar[1,]<-par
populationcost <- apply(populationpar,FUN=cost,MARGIN=1)
iworst <- which.max(populationcost)
worstcost <- populationcost[iworst]
# Hybridization phase
iter<-0
while(iter < numiter &
(max(populationcost)-min(populationcost)) > (min(populationcost)*varleft)){
iter<-iter+1
selectpar <- sample(1:npop,size=centroid)
# for cross-fertilization
mirrorpar <- sample(1:npop,size=1)
# for mirroring
newpar <- colMeans(populationpar[selectpar,]) # centroid
newpar <- 2*newpar - populationpar[mirrorpar,] # mirroring
newpar <- pmin( pmax(newpar,minpar) ,maxpar)
newcost <- cost(newpar)
if(newcost < worstcost){
populationcost[iworst] <-newcost
populationpar [iworst,]<-newpar
iworst <- which.max(populationcost) # new worst member
worstcost <- populationcost[iworst]
}
} # end j loop
ibest <- which.min(populationcost)
bestpar <- populationpar[ibest,]
bestcost <- populationcost[ibest]
return (list(par = bestpar, cost = bestcost,
poppar = populationpar, popcost=populationcost))
}
# Define problem and data
Flux <- matrix(ncol=2,byrow=TRUE,data=c(
1,
0.654, 11, 0.167, 21, 0.060, 41, 0.070,
73,
0.277, 83, 0.186, 93, 0.140,103, 0.255,
113,
0.231,123, 0.309,133, 1.127,143, 1.923,
153,
1.091,163, 1.001,173, 1.691,183, 1.404,
194,
1.226,204, 0.767,214, 0.893,224, 0.737,
234,
0.772,244, 0.726,254, 0.624,264, 0.439,
274,
0.168,284, 0.280,294, 0.202,304, 0.193,
315,
0.286,325, 0.599,335, 1.889,345, 0.996,
355,
0.681,365, 1.135))
meanDepo <- mean(approx(Flux[,1],Flux[,2], xout=seq(1,365,by=1))$y)
oxcon<-as.data.frame(matrix(ncol=2,byrow=TRUE,data=c(
68, 0.387, 69, 0.447, 71, 0.473, 72, 0.515,
189, 1.210,190, 1.056,192, 0.953,193, 1.133,
220, 1.259,221, 1.291,222, 1.204,230, 1.272,
231, 1.168,232, 1.168,311, 0.963,312, 1.075,
313, 1.023)))
names(oxcon)<-c("time","cons")
plot(oxcon)
multser <- seq(1,1.5,by=.05)
numms <- length(multser)
kseries <- seq(0.001,0.05,by=0.002)
numks <- length(kseries)
outcost <- matrix(nrow = numms, ncol = numks)
for (m in 1:numms){
for (i in 1:numks){
pars <- c(k = kseries[i], mult = multser[m])
outcost[m,i] <- costf(pars)
}
}
minpos <- which(outcost==min(outcost),arr.ind=TRUE)
multm <- multser[minpos[1]]
ki <- kseries[minpos[2]]
optpar <- pricefit(par=c(k=ki,mult=multm),minpar=c(0.001,1),
maxpar=c(0.05,1.5),func=costf,npop=50,numiter=500,
centroid=3,varleft=1e-8)
optpar20 <- pricefit(par=optpar$par,minpar=c(0.001,1),
maxpar=c(0.05,1.5),func=costf,npop=50,numiter=500,
centroid=3,varleft=0.2)
optpar25 <- pricefit(par=optpar$par,minpar=c(0.001,1),
maxpar=c(0.05,1.5),func=costf,npop=50,numiter=500,
centroid=3,varleft=0.025)
outtimes <- seq(1,365,by=1)
Carbon <- meanDepo*optpar$par[2]/optpar$par[1]
names(Carbon) <-"Carbon"
out <- as.data.frame(ode(Carbon,outtimes,minmod, optpar$par))
names(out) <- c("time","Carbon","minrate")
par (oma=c(0,0,0,2))
plot(Flux,type="l",xlab="daynr",ylab="mmol/m2/d",
main="Sediment-detritus model",lwd=2)
lines(out$time,out$minrate,lwd=2,col="darkgrey")
points(oxcon$time,oxcon$cons,pch=25,col="black", bg="darkgray",cex=2)
par(new=TRUE)
plot(out$time,out$Carbon,axes=FALSE,xlab="",ylab="",
type="l",lty=2)
axis(4)
mtext(side=4,"mmolC/m2",outer=TRUE)
legend("topleft",col=c("black","darkgrey","black"),
leg=c("C flux","C mineralization","C concentration"),
lwd=c(2,2,1),lty=c(1,1,2))
|
ed90c7ad7b0cc9b7aad327708fbff7c454e9f27a
|
6614da007b351ae3c3335983a392062f08e91fd4
|
/examples/multiR/Rcross.R
|
b766ce08aedbfb43b75e038f363280e8f7a310e8
|
[] |
no_license
|
SchlossLab/Great_Lakes_SLURM
|
aa5e83f9f9c6d6287305d11fac823c69526e2b30
|
c745e06d2a5121262b14d3972c7b4f02910df1d7
|
refs/heads/master
| 2022-02-28T10:11:06.071015
| 2022-02-15T14:59:15
| 2022-02-15T14:59:15
| 189,630,304
| 12
| 6
| null | 2020-04-03T14:03:52
| 2019-05-31T17:04:42
| null |
UTF-8
|
R
| false
| false
| 746
|
r
|
Rcross.R
|
library(parallel)
n <- 100
set.seed(123)
x <- rnorm(n)
y <- x + rnorm(n)
rand.data <- data.frame(x, y)
K <- 10000000
samples <- split(sample(1:n), rep(1:K, length = n))
cv.fold.fun <- function(index) {
fit <- lm(y~x, data = rand.data[-samples[[index]],])
pred <- predict(fit, newdata = rand.data[samples[[index]],])
return((pred - rand.data$y[samples[[index]]])^2)
}
# Sequential version
start <- proc.time()
res.fun <- lapply(seq(along = samples), cv.fold.fun)
proc.time() - start
mean(unlist(res.fun))
# Parallel version
start <- proc.time()
options(cores = 4)
mcres.fun <- mclapply(seq(along = samples), cv.fold.fun)
proc.time() - start
mean(unlist(mcres.fun))
# All the elements are identical
all.equal(res.fun, mcres.fun)
|
dd6e4606ca77a803a413fce8751651075fe636f2
|
8b8ada35adca2b9636c585a130ca58708deca697
|
/R/grob.R
|
486b47e7566de6ab20ce4953760145d61ef8a1a6
|
[] |
no_license
|
pmur002/gridgeometry
|
bb79e5c9905cf5ea19096b17475e9aaa04ffcdb2
|
0d414bfa0924c5aca460303b3c497cce9a068c89
|
refs/heads/master
| 2022-12-23T05:27:11.859355
| 2022-12-06T21:42:28
| 2022-12-06T21:42:28
| 168,773,351
| 11
| 3
| null | 2022-10-10T00:41:27
| 2019-02-01T23:28:58
|
R
|
UTF-8
|
R
| false
| false
| 6,628
|
r
|
grob.R
|
################################################################################
## Convert polyclip() results to grobs
## Convert (closed) 'polyclip' polygon result to 'grid' path
xyListPath <- function(x, rule, name=NULL, gp=gpar()) {
if (missing(rule)) {
if (is.null(attr(x, "rule")))
rule <- "winding"
else
rule <- attr(x, "rule")
}
## Remove any coordinate sets that are too short
x <- x[sapply(x, function(c) length(c$x) > 1)]
if (length(x) == 0) {
nullGrob(name=name)
} else {
xx <- unlist(lapply(x, "[[", "x"))
yy <- unlist(lapply(x, "[[", "y"))
lengths <- sapply(x, function(y) length(y$x))
pathGrob(xx, yy, default.units="in",
id.lengths=lengths, rule=rule,
name=name, gp=gp)
}
}
xyListToPath <- xyListPath
## Convert (closed) 'polyclip' polygon result to 'grid' polygons
xyListPolygon <- function(x, name=NULL, gp=gpar()) {
## Remove any coordinate sets that are too short
x <- x[sapply(x, function(c) length(c$x) > 1)]
if (length(x) == 0) {
nullGrob(name=name)
} else {
xx <- unlist(lapply(x, "[[", "x"))
yy <- unlist(lapply(x, "[[", "y"))
lengths <- sapply(x, function(y) length(y$x))
polygonGrob(xx, yy, default.units="in",
id.lengths=lengths,
name=name, gp=gp)
}
}
xyListToPolygon <- xyListPolygon
## Convert (open) 'polyclip' polygon result to 'grid' polyline
xyListLine <- function(x, name=NULL, gp=gpar()) {
if (length(x) == 0) {
nullGrob(name=name)
} else {
xx <- unlist(lapply(x, "[[", "x"))
yy <- unlist(lapply(x, "[[", "y"))
lengths <- sapply(x, function(y) length(y$x))
polylineGrob(xx, yy, default.units="in",
id.lengths=lengths,
name=name, gp=gp)
}
}
xyListToLine <- xyListLine
################################################################################
## Convert grobs to valid input for polyclip()
numShapes <- function(coords) {
## Coords generated by 'grid' should be named
## Otherwise assume each set of coords is a separate shape
if (is.null(names(coords))) {
length(coords)
} else {
length(unique(names(coords)))
}
}
xyListFromCoords <- function(x, op, closed, rule, ...) {
UseMethod("xyListFromCoords")
}
emptyXYlist <- list(list(x = 0, y = 0))
xyListFromCoords.GridGrobCoords <- function(x, op, closed, ...) {
if (op == "flatten") {
attr(x, "name") <- NULL
attr(x, "rule") <- NULL
unclass(unname(x))
} else {
if (numShapes(x) == 1) {
attr(x, "name") <- NULL
## Keep rule because, e.g., polyclipGridGrob() will use it
unclass(unname(x))
} else {
names <- names(x)
unames <- sort(unique(names))
n <- length(unames)
A <- x[names == unames[1]]
B <- x[names == unames[2]]
fillrule <- convertRule(attr(x, "rule"))
coords <- polyclip::polyclip(A, B, op, closed,
fillA = fillrule,
fillB = fillrule,
...)
## Convert polyclip::polyclip() list() result to "emptyCoords".
## We try not to feed polyclip::polyclip() a list() as input.
if (!length(coords))
coords <- emptyXYlist
if (n > 2) {
for (i in 3:n) {
A <- coords
B <- x[names == unames[i]]
coords <- polyclip::polyclip(A, B, op, closed,
fillA = fillrule,
fillB = fillrule,
...)
if (!length(coords))
coords <- emptyXYlist
}
}
coords
}
}
}
xyListFromCoords.GridGTreeCoords <- function(x, op, closed, ...) {
if (op == "flatten") {
childCoords <- lapply(x, xyListFromCoords, op, closed, ...)
coords <- do.call(c, childCoords)
attr(coords, "rule") <- NULL
} else {
childCoords <- lapply(x, xyListFromCoords, op, closed, ...)
coords <- Reduce(function(A, B) {
fillA <- convertRule(attr(A, "rule"))
fillB <- convertRule(attr(B, "rule"))
coords <- polyclip::polyclip(A, B, op, closed,
fillA = fillA,
fillB = fillB,
...)
if (!length(coords))
emptyXYlist
else
coords
},
childCoords)
}
coords
}
xyListFromGrob <- function(x,
op = if (closed) "union" else "flatten",
closed = isClosedShape(x), ...) {
if (getRversion() < "4.2.0") {
## grobCoords() result became more complex in R 4.2.0
grobCoords(x, closed)
} else {
coords <- grobCoords(x, closed)
xyListFromCoords(coords, op, closed, ...)
}
}
################################################################################
## Determine default 'closed' value
##
## This is implemented in 'grid' in R >= 4.3.0, but the code
## here allows 'gridGeometry' to work with earlier versions of R
isClosedShape <- function(x, ...) {
if (getRversion() >= "4.3.0") {
isClosed <- get("isClosed", "package:grid")
isClosed(x, ...)
} else {
UseMethod("isClosedShape")
}
}
isClosedTRUE <- function(x, ...) {
TRUE
}
isClosedFALSE <- function(x, ...) {
FALSE
}
isClosedShape.default <- isClosedTRUE
isClosedShape.move.to <- isClosedFALSE
isClosedShape.line.to <- isClosedFALSE
isClosedShape.lines <- isClosedFALSE
isClosedShape.polyline <- isClosedFALSE
isClosedShape.segments <- isClosedFALSE
isClosedShape.beziergrob <- isClosedFALSE
isClosedShape.xspline <- function(x, ...) {
if (x$open)
FALSE
else
TRUE
}
isClosedShape.points <- function(x, ...) {
switch(as.character(x$pch),
"3"=, ## plus
"4"=, ## times
"8"=FALSE, ## plus-times
TRUE)
}
|
1cbf10832bc98cea097bc1ce5f8dee972096e6df
|
4b7e8e2e40adab24179197e069408740dcb309dd
|
/plot2.R
|
066f56c787c93113e86ea410c4ff0e5df6326ec1
|
[] |
no_license
|
SimoneSinkovec/ExData_Plotting1
|
0c8a3e1057c5e8b9511b70b20b45ce448c8a4569
|
3f226a723470fad7cc5d248d3875005135500416
|
refs/heads/master
| 2022-04-08T12:00:03.915305
| 2020-03-25T20:31:56
| 2020-03-25T20:31:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
plot2.R
|
library(dplyr)
library(datesets)
library(tidyr)
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", dec = ".") # read txt file with read.table.
data$Date <- as.character(data$Date) #class had to be character for subsetting without as.Date() & strptime() function
feb <- subset(data, Date == "1/2/2007" | Date == "2/2/2007") #subset data for both days
feb <- unite(feb, datetime, c(Date, Time), sep = "_", remove = TRUE, na.rm = FALSE) # paste column date + time to one variable "datetime"
feb$datetime <- as.character(feb$datetime) #convert "datetime" to charactor, for converting to Class = "POSIXlt" "POSIXt"
feb$datetime <- strptime(feb$datetime, format = "%d/%m/%Y_%H:%M:%S") #convert "datetime to "POSIXlt" "POSIXt"
feb$Global_active_power <- as.numeric(as.character(feb$Global_active_power, dec = ".")) #class has to be numeric for plotting histogram
png("plot2.png", width=480, height=480) # open PNG connction in working directory, with requested layout
plot(feb$datetime, feb$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)") #use plot for scatterplot with lines
dev.off() #close link to file device
|
f0af7a9204e66fcba3001009451452b1d43a0168
|
0f54bd5a4fd9c51f30896cd1c070ba1fe5709095
|
/man/splendid-package.Rd
|
9aac8e8dcceec8d18ad1ca023d94e4f89a578af1
|
[
"MIT"
] |
permissive
|
AlineTalhouk/splendid
|
643905b988fa48fe5ed1e889bd745654c9f7a31f
|
c1062d167227afb3b9f8f70d924e81b8bd7843dd
|
refs/heads/master
| 2021-12-10T13:40:24.727988
| 2021-12-03T02:22:38
| 2021-12-03T02:22:38
| 86,625,708
| 1
| 0
|
NOASSERTION
| 2021-02-10T22:34:00
| 2017-03-29T20:19:30
|
R
|
UTF-8
|
R
| false
| true
| 1,140
|
rd
|
splendid-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splendid-package.r
\docType{package}
\name{splendid-package}
\alias{splendid-package}
\alias{_PACKAGE}
\title{splendid: SuPervised Learning ENsemble for Diagnostic IDentification}
\description{
Provides a bootstrapping and ensemble framework for supervised learning analyses using multiclass classification algorithms for modelling, prediction, and evaluation. Predicted classes are evaluated under metrics such as log loss, AUC, F1-score, Matthew's correlation coefficient, and accuracy. Discrimination and reliability plots visualize the classifier performances. The .632+ estimator is implemented for the log loss error rate.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/AlineTalhouk/splendid}
\item \url{https://alinetalhouk.github.io/splendid/}
\item Report bugs at \url{https://github.com/AlineTalhouk/splendid/issues}
}
}
\author{
\strong{Maintainer}: Derek Chiu \email{dchiu@bccrc.ca}
Authors:
\itemize{
\item Aline Talhouk \email{atalhouk@bccrc.ca}
\item Dustin Johnson \email{djohnson@bccrc.ca}
}
}
\keyword{internal}
|
5abad6fedc8a604a32b08d219a29bb533274be74
|
0d00184a3503f14305ca7e98aa3298a69de33e39
|
/9Dec2104.R
|
cdde613d8f3247f006dfa1ae720ebcc13b71c28d
|
[] |
no_license
|
StrangeTcy/Statistics
|
e5e1f879cc0362e9c71c45ed90b549ecf64c8bc5
|
cd1301f0e031781d54ae3cb7eb42ae68112ab6e5
|
refs/heads/master
| 2021-01-23T02:28:55.264697
| 2015-03-28T10:12:27
| 2015-03-28T10:12:27
| 33,028,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,160
|
r
|
9Dec2104.R
|
data_x <-c(-3,-2,-1,0,1,2,3)
data_y <- c(-10,0,4,5,4,2,-2)
plot(data_x, data_y)
d<-matrix(c(7,0,28,0,28,0,28,0,196),nrow=3)
d_inv <- solve(d)
X_transp <- matrix(c(1,1,1,1,1,1,1,-3,-2,-1,0,1,2,3,9,4,1,0,1,4,9),nrow=3,byrow=T)
X_transp
## a%*%b --matrix multiplication for matrices a and b (in specified order)
temp <- d_inv%*%X_transp
Y <- matrix (c(data_y), nrow = 7)
beta_hat <- temp %*%Y
beta_hat
Q_e <-0
for (i in 1:7)
{
Q_e <- Q_e+ (data_y[i]-beta_hat[1,1]-beta_hat[2,1]*data_x[i]-beta_hat[3,1]*data_x[i]*data_x[i])* (data_y[i]-beta_hat[1,1]-beta_hat[2,1]*data_x[i]-beta_hat[3,1]*data_x[i]*data_x[i])
}
y_pred <- c()
for (i in 1:7)
{
y_pred[i] <- beta_hat[1,1]+beta_hat[2,1]*data_x[i]+beta_hat[3,1]*data_x[i]*data_x[i]
}
y_pred
s <-Q_e/(7-2-1) #s^2 - смотри конспект
alpha <- 0.05
beta_hat[1,1]-qt(1-alpha/2,7-2-1)*sqrt(s)*sqrt(d_inv[1,1])
beta_hat[1,1]+qt(1-alpha/2,7-2-1)*sqrt(s)*sqrt(d_inv[1,1])
beta_hat[2,1]-qt(1-alpha/2,7-2-1)*sqrt(s)*sqrt(d_inv[2,2])
beta_hat[2,1]+qt(1-alpha/2,7-2-1)*sqrt(s)*sqrt(d_inv[2,2])
beta_hat[3,1]-qt(1-alpha/2,7-2-1)*sqrt(s)*sqrt(d_inv[3,3])
beta_hat[3,1]+qt(1-alpha/2,7-2-1)*sqrt(s)*sqrt(d_inv[3,3])
|
c4197909bc34b8a267c33784f25ffa62ce0bdab7
|
e654e66d43ccaf9658408dce89c675b98612fdb2
|
/code/simulate_kmt.R
|
2392e637bc9ff0811143aa55b9b01c057b342de9
|
[] |
no_license
|
hannahbus/gmvp
|
76b43f125ab543064007816dc706ef0fd805ab04
|
0a28e4361f3095831cdc91e5a5df5571e6fc828f
|
refs/heads/master
| 2022-11-24T03:41:27.261348
| 2020-07-29T21:40:59
| 2020-07-29T21:40:59
| 277,495,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,128
|
r
|
simulate_kmt.R
|
# Simulation of data in accordance with Taylor ====
generate_data <- function(n_T, sigma_vartheta, logh_0, gamma,
phi, sigma_eta, X_series,
beta_0){
h_series <- generate_h(n_T, sigma_vartheta, logh_0, gamma, phi)
beta_series <- generate_beta(n_T, sigma_eta, beta_0)
epsilon <- matrix(rnorm(n_T, mean = 0, sd = 1), nrow = n_T)
y_series <- apply(X_series * beta_series, 1, sum) + h_series * epsilon
return(list(y_series, h_series, beta_series))
}
generate_h <- function(n_T, sigma_vartheta, logh_0, gamma, phi){
logh_series <- matrix(NA, ncol = 1, nrow = (n_T + 1))
vartheta <- matrix(rnorm(n_T, mean = 0, sd = sqrt(sigma_vartheta)),
nrow = n_T)
logh_series[1, 1] <- logh_0
for (i in 1:n_T){
logh_series[i + 1, 1] <- gamma + phi * logh_series[i, 1] + vartheta[i, 1]
}
sqrt(exp(logh_series))[2:nrow(logh_series), ,drop = FALSE]
}
generate_beta <- function(n_T, sigma_eta, beta_0){
eta_series <- mvrnorm(n_T, mu = rep(0, k), Sigma = sigma_eta)
eta_series[1, ] <- eta_series[1, ] + beta_0
apply(eta_series, 2, cumsum)
}
|
0834a0ae962551118709e4299209574c9e54a61c
|
72234b64f6903685a3dcb757ec68fae8c0c0f832
|
/cachematrix.R
|
6d1a98322397747d55091afaca3b104ae90b8d5f
|
[] |
no_license
|
ebloo/ProgrammingAssignment2
|
cceae189a3cddce7f3b64c26ef0c6c1fc5cd28c7
|
421a3b499ff342683cd31177e45022b427879497
|
refs/heads/master
| 2021-01-19T07:54:42.267585
| 2015-09-24T20:09:10
| 2015-09-24T20:09:10
| 43,083,554
| 0
| 0
| null | 2015-09-24T17:57:37
| 2015-09-24T17:57:36
| null |
UTF-8
|
R
| false
| false
| 2,152
|
r
|
cachematrix.R
|
####################################################################################
##
## cacheMatrix.R
##
## Contains a framework for computing and caching the inverse of a matrix,
## leveraging scoping rules of R for efficiency.
##
## Can be invoked as follows:
## mymatrix <- matrix(rnorm(4), 2, 2)
## m <- makeCacheMatrix(x = mymatrix)
## cacheSolve(m) # note that subsequent calls to cacheSolve hit the cache
##
####################################################################################
####################################################################################
## This function creates a special "matrix", which essentially consists of a list
## containing 4 functions:
## 1. set, which sets the value of the matrix
## 2. get, which simply returns the matrix
## 3. setinverse, which sets the inverse
## 4. getinverse, which returns the inverse
makeCacheMatrix <- function(x = matrix()) {
# Initialize the inverse to NULL
inverse <- NULL
# Define the 4 functions described above (see comment)
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
# Return the list of 4 functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
####################################################################################
## This function returns the inverse of the special "matrix" passed as a parameter
## First, it checks whether the inverse has already been cached. If so, it returns
## from cache. Otherwise, it computes the inverse, caches it for future use, and
## returns it.
cacheSolve <- function(x, ...) {
# Attempt to get the inverse from cache
inverse <- x$getinverse()
# If already cached, simply return the inverse from cache
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# Otherwise, compute the inverse, cache it, and return it
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
672fbc1c3420e978a53287dee1a129fa80988e56
|
9277e549802eb213f90c7d61624aace84003e820
|
/man/get_cmapplot_globals.Rd
|
4e82bc79cdc45e334e8708780baa55d100ac7e9e
|
[
"MIT"
] |
permissive
|
CMAP-REPOS/cmapplot
|
5f479a7e217666e03c86132054915b23475ceb5a
|
13563f06e2fdb226500ee3f28dc0ab2c61d76360
|
refs/heads/master
| 2023-03-16T03:25:59.202177
| 2023-03-08T04:28:04
| 2023-03-08T04:28:04
| 227,153,492
| 9
| 1
|
NOASSERTION
| 2023-03-08T04:28:06
| 2019-12-10T15:25:18
|
R
|
UTF-8
|
R
| false
| true
| 4,727
|
rd
|
get_cmapplot_globals.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cmapplot_globals.R
\name{get_cmapplot_globals}
\alias{get_cmapplot_globals}
\alias{cmapplot_globals}
\alias{get_cmapplot_global}
\alias{set_cmapplot_global}
\title{The cmapplot_globals environment}
\usage{
get_cmapplot_globals()
get_cmapplot_global(...)
set_cmapplot_global(value, ..., quietly = FALSE)
}
\arguments{
\item{...}{The path to the variable within \code{cmapplot_globals} to be
get/set. The function willparse \code{$}, or recursive list elements can be
split over multiple arguments (e.g. \code{"font$strong$family"} is
equivalent to \code{"font", "strong", "family"}).}
\item{value}{the value to be set}
\item{quietly}{suppress confirmatory messages}
}
\description{
The \code{cmapplot_globals} environment contains a list of predefined
variables for use by the cmapplot package and its users. It includes commonly
used colors, font and font size specifications, and a list of constants which
aid in drawing cmap-themed plots. It cannot be accessed directly, but the
helper functions described here provide the user access if needed.
}
\section{Functions}{
\itemize{
\item \code{get_cmapplot_globals()}: Get the entire environment as a list.
\item \code{get_cmapplot_global()}: Get a specific global value
\item \code{set_cmapplot_global()}: Set a specific global value
}}
\section{Plot Constants}{
The primary portion of these global variables of
interest to the user is \code{cmapplot_globals$consts}, a list of default
constants that set certain plot aesthetics. Units of all plot constants are
"bigpts": 1/72 of an inch. Most plot constants are invoked (and can be
overridden) in \code{\link{finalize_plot}}: these are marked below with an
\strong{F}. Some are used/can be overridden in \code{\link{theme_cmap}}:
these are marked with \strong{T}.
\itemize{ \item \code{lwd_strongline}: This stronger-width line is drawn
vertically or horizontally with the \code{hline, vline} args of
\code{theme_cmap()}. \strong{(T)} \item \code{lwd_gridline}: This
thinner-width line is drawn vertically or horizontally with the
\code{gridlines, axislines} args of \code{theme_cmap()}. \strong{(T)} \item
\code{lwd_plotline}: The width of any lines drawn by geoms in the plot (e.g.
\code{geom_line}) but not explicitly sized by the geom's aesthetic.
Implemented by \code{finalize_plot} or by \code{apply_cmap_default_aes} but
not overridable in either context. (Modify by setting the size explicitly in
the geom, but see \code{gg_lwd_convert} first.) \item \code{lwd_topline}:
The width of the line above the plot. \strong{(F)} \item
\code{length_ticks}: The length of the axis ticks (if shown). \strong{(T)}
\item \code{margin_topline_t}: The margin between the top edge of the image
and the top line. \strong{(F)} \item \code{margin_title_t}: The margin
between the top line and the title. \strong{(F)} \item
\code{margin_title_b}: The margin between the title and the caption when
both are drawn in the sidebar. \strong{(F)} \item \code{margin_caption_b}:
The margin between the bottom of the caption and the bottom edge of the
image. \strong{(F)} \item \code{margin_legend_t}: The margin between the top
line and the plot box (i.e., the top of the legend). \strong{(F)} \item
\code{margin_legend_i}: The margin between legends (this only applies in
plots with two or more legends and does not affect legend spacing on plots
with single legends that have multiple rows). \strong{(T, F)} \item
\code{margin_legend_b}: The margin between the bottom of the legend and the
rest of the plot. \strong{(T, F)} \item \code{margin_plot_b}: The margin
between the bottom of the plot and the bottom edge of the image (or top of
caption). \strong{(F)} \item \code{margin_sidebar_l}: The margin between the
left edge of the image and the title and caption, when the sidebar exists.
Deducted from \code{title_width}. \strong{(F)} \item \code{margin_plot_l}:
The margin between the left edge of the plot and the sodebar. \strong{(F)}
\item \code{margin_plot_r}: The margin between the right edge of the plot
and the edge of the image. \strong{(F)} \item \code{margin_panel_r}: Padding
between the plot and its right-hand drawing extent. Override this based on
space needed for x axis labels. \strong{(T)} \item \code{leading_title}:
Text leading for Title text. \strong{(F)} \item \code{leading_caption}: Text
leading for Caption text. \strong{(F)} }
}
\examples{
# These are the same:
get_cmapplot_global("consts$lwd_gridline")
get_cmapplot_global("consts", "lwd_gridline")
# Globals can be modified if needed
set_cmapplot_global(5, "consts$lwd_gridline")
get_cmapplot_global("consts$lwd_gridline")
}
|
f9620725516c88bd895f216d76f4e774a21df1b8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rcolombos/examples/quick_search.Rd.R
|
62e204ec16a34d45de755fcb32626f0ce6c695fe
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
quick_search.Rd.R
|
library(Rcolombos)
### Name: quick_search
### Title: This method mimics the quick_search functionality of Colombos.
### It takes a string containg the nickname for the selected organism and
### a vector of string representing the genes of interest for the
### specified organism and returns a list containing the locustags
### (gene_names), contrasts and M-values for the current selection.
### Aliases: quick_search
### ** Examples
## Not run:
##D library("Rcolombos")
##D my_module <- quick_search(organism="ecoli",
##D genes=c("b0400","b2805","b0567"),
##D geneNames=FALSE)
##D heatmap(as.matrix(my_module), col=terrain.colors(15))
## End(Not run)
|
2e8af700975fd55ed5bcede70b63530fe48642e1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rmonad/examples/extract_metadata.Rd.R
|
b7885a91c3f1c5286dacd51d84a4b06ed42ef822
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
extract_metadata.Rd.R
|
library(rmonad)
### Name: extract_metadata
### Title: Extract docstring and meta data from an anonymous function
### Aliases: extract_metadata
### Keywords: internal
### ** Examples
## extract metadata from a block
expr <- substitute(
{
"this is the docstring"
list(foo="this is meta data")
5 * 32
}
)
extract_metadata(expr)
foo <- function(x,y){
"docstring"
list(meta="data")
x + y
}
## extract metadata from a function name
extract_metadata(substitute(foo), skip_name=FALSE)
## extract from a partially applied function
extract_metadata(substitute(foo(y=2)))
|
667a28779876f7f319a5bf55820ca490cdef2598
|
67beb7f17ae6a66b5865c0828592e8c13097da46
|
/man/pmf_list.Rd
|
3207845d4379bb2863e009b3037acab26b5c28c9
|
[] |
no_license
|
SamirRachidZaim/binomialRF
|
7604c2991119a34210332a1274852c27ac3eb411
|
1c715007809f116f1261063bcb2c5263de070344
|
refs/heads/master
| 2021-08-08T09:36:56.897143
| 2021-07-17T01:06:17
| 2021-07-17T01:06:17
| 187,258,451
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,187
|
rd
|
pmf_list.Rd
|
\name{pmf_list}
\docType{data}
\alias{pmf_list}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{A prebuilt distribution for correlated binary data
%% ~~function to do ... ~~
}
\description{
This data contains probability mass functions (pmf's) for correlated binary data for various parameters. The sum of correlated exchangeable binary data is a generalization of the binomial distribution that deals with correlated trials. The correlation in decision trees occurs as the subsampling and bootstrapping step in random forests touch the same data, creating a co-dependency. This data contains some pre-calculated distributions for random forests with 500, 1000, and 2000 trees with 10, 100, and 1000 features. For more distributions, they can be calculated via the correlbinom R package.
}
\usage{
pmf_list
}
\format{A list of lists}
\references{
Witt, Gary. "A Simple Distribution for the Sum of Correlated, Exchangeable Binary Data." Communications in Statistics-Theory and Methods 43, no. 20 (2014): 4265-4280.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{datasets}% use one of RShowDoc("KEYWORDS")
|
e0df196ad7b1706bb9bc21b707195a8932a5fecf
|
38ee7ac7164940545b868d3113ffb1d8ee350baf
|
/R/StatFactor.Cov.R
|
48d5174eaf4c66e58078356e077491f5156ab538
|
[] |
no_license
|
cran/FinCovRegularization
|
15d0e30d3a115fa589a8b12d2280edf37e437aac
|
a9f293028dd5a93166689f6fd93fdcbbe3607eb0
|
refs/heads/master
| 2021-01-18T22:36:36.791318
| 2016-04-25T15:32:07
| 2016-04-25T15:32:07
| 31,490,191
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,188
|
r
|
StatFactor.Cov.R
|
#' @title Covariance Matrix Estimation by Statistical Factor Model
#'
#' @description
#' Estimate covariance matrix by fitting a statistical factor model
#' using principle components analysis
#'
#' @param assets a matrix of asset returns
#' @param k numbers of factors, if k = 0,
#' automatically estimating by Kaiser method
#' @return an estimated p*p covariance matrix
#' @examples
#' data(m.excess.c10sp9003)
#' assets <- m.excess.c10sp9003[,1:10]
#' StatFactor.Cov(assets, 3)
#' @export
StatFactor.Cov <- function(assets, k = 0) {
# Sample Covariance
N <- dim(assets)[1]
assets <- scale(assets, center = TRUE, scale = FALSE)
cov.SAM <- (N-1)^(-1) * t(assets) %*% assets
# SVD
decomp <- svd(assets)
if (k == 0) {
# Kaiser method
eigenvalue <- decomp$d
k <- sum(eigenvalue > 1)
}
# Factor Loadings
beta <- (N-1)^(-1/2) * decomp$v[,1:k] %*% diag(decomp$d[1:k])
# Specific Variances
VarE <- diag(diag(cov.SAM - beta %*% t(beta)))
# Computing Covariance Matrix estimated by Statistical Factor Model
COV <- beta %*% t(beta) + VarE
dimnames(COV) <- list(colnames(assets), colnames(assets))
return(COV)
}
|
b8c38f550a375a6736f0cba1a2f0eced70658e8b
|
15a13b7cb3504bc94c922554caab22618503bd98
|
/man/getRatingResult.Rd
|
0b5f5d72341331ab6048456015972d5ceaf35b27
|
[] |
no_license
|
DataShrimp/rating
|
875156265f69e57b3bb71c41cec883eab5a6d5d4
|
66934a513cca367f9d5eda2a332f566ed149af20
|
refs/heads/master
| 2020-12-25T18:52:02.660671
| 2017-06-11T09:59:38
| 2017-06-11T09:59:38
| 93,995,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 562
|
rd
|
getRatingResult.Rd
|
\name{getRatingResult}
\alias{getRatingResult}
\title{get rating results from the index data}
\usage{
dt.index <- getRatingResult(dt, rate_range, category_colnum=NA, bins=NA)
}
\description{
This function is used to rate the specific indexes defined by parameter 'rate_range'.
The parameter bins is used to assign the specific ruler field using function 'getRulerBins'
Or, the parameter bins can be defined for absolute evaluation.
The result will get 'A, B, C, D, E' according to the confidence interval of normal distribution.
}
\examples{
NA
}
|
a3e8b9f6c807d2e4cf721411c1e57c469f34d496
|
97b43d02ae8395bb6b45f6756e96bbb9b6078191
|
/R/glue-bowtie.R
|
a9a197874fdf4c5097ccf40485db80e78acb4643
|
[
"MIT"
] |
permissive
|
t-arae/ngscmdr
|
03637f7fa4f7835f65e1be734a3bd1dbedb207a7
|
5912ca6a0c7c6f6471d070bf2ad6535e9e99da4b
|
refs/heads/master
| 2020-03-09T00:57:13.322949
| 2019-09-18T00:56:28
| 2019-09-18T00:56:28
| 128,501,154
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,179
|
r
|
glue-bowtie.R
|
#' Generate genome index for bowtie
#' @importFrom glue glue
#' @param fasta_path fasta file containing reference sequences to be aligned for
#' @param index_name Name of the index
#' @export
glue_bowtie_genome_generate <-
function(fasta_path, index_name){
glue("
bowtie-build -f {fasta_path} {index_name}
")
}
#' Map with bowtie and output a sorted bam file
#' @importFrom glue glue
#' @param head_label .tar file path
#' @param index_name .tar file path
#' @param core_num number of threads to use
#' @param in_dir .tar file path
#' @param out_dir .tar file path
#' @export
glue_bowtie_bamsort <-
function(
head_label,
index_name,
in_dir = "./fastq",
out_dir = "./mapped_by_bowtie",
core_num = 2
){
lineend <- "\\"
fq_ext <- fq_ext
glue(
"
mkdir {out_dir}
bowtie {lineend}
-p {core_num} {lineend}
-S {lineend}
-a --best --strata {line_end}
{index_name} {lineend}
{in_dir}/{head_label}.{fq_ext} {lineend}
| {lineend}
samtools view -@ {core_num} -bS {lineend}
| {lineend}
samtools sort -@ {core_num} > {out_dir}/{head_label}.sort.bam
samtools index {out_dir}/{head_label}_bowtie.sort.bam
"
)
}
|
c8dba850112fc7f80c240cc7f6e53d566bd7f52f
|
3c8b7c89dd898021de2e5c4b6e67197e554d5b14
|
/src/eda - s550/eda.R
|
7c13a79078a48e4b4e8d5e37032cc39680ef3a6d
|
[] |
no_license
|
hxman027/RealEstate
|
c8b80f6aa29927beb0a022ba38304a6f985d9990
|
e1626c3e29d838c57b39e89441ba39800a419d9f
|
refs/heads/master
| 2022-05-15T08:38:12.438640
| 2020-04-29T19:53:02
| 2020-04-29T19:53:02
| 236,350,582
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,151
|
r
|
eda.R
|
# Gian Carlo EDA
# preamble ####
library(tidyverse)
library(readr)
library(readxl)
library(ggplot2)
ggplot2::theme_set(theme_classic())
library(viridis)
# data wrangling ####
# data import
columnnames <- readxl::read_xlsx("colnames.xlsx") %>%
colnames()
real.estate_full <- readr::read_csv("2016 - 2020 Raw.csv",
na = c("", "NA", "NULL", "NULL_1"),
col_names = columnnames,
col_types = "ciccccccccccccccdddddiicccddddc")
# data wrangling
# assessments by PIC and year
assessments <- real.estate_full %>%
dplyr::select(PIC, Year, # relevant variables
AssessedValueAmt, AssetTypeDesc) %>%
dplyr::rename(year = Year,
assessment = AssessedValueAmt,
assessment.type = AssetTypeDesc) %>%
dplyr::group_by(PIC, year, assessment.type) %>%
dplyr::summarise(assessment = sum(assessment)) %>%
dplyr::ungroup() %>%
tidyr::spread(assessment.type, assessment) %>%
dplyr::rename(improvement.assessment = Improvement,
land.assessment = Land) %>%
dplyr::select(PIC, year, improvement.assessment, land.assessment) %>%
dplyr::mutate(total.assessment = improvement.assessment + land.assessment)
re <- real.estate_full %>%
dplyr::select(PIC, Year, AddressAssessorMunicipalityDesc, # relevant variables
TaxClassCode, TaxOwingAmountTotalCalculated, TaxClassTaxRate) %>%
dplyr::rename(year = Year,
municipality = AddressAssessorMunicipalityDesc, # human-readable names
tax.class = TaxClassCode,
tax = TaxOwingAmountTotalCalculated,
mill.rate = TaxClassTaxRate) %>%
dplyr::filter(tax.class %in% c("01", "05", "06")) %>% # relevant values for tax class
dplyr::distinct() %>%
dplyr::left_join(assessments, by = c("PIC" = "PIC", "year" = "year")) # add assessment
# properties by municipality 2020
re %>%
dplyr::select(municipality, PIC) %>%
#dplyr::filter(year == 2020) %>%
dplyr::group_by(municipality) %>%
dplyr::summarise(n.properties = n()) %>%
dplyr::arrange(desc(n.properties)) %>%
dplyr::mutate(perc.properties = n.properties / sum(n.properties),
acum.properties = cumsum(perc.properties)) %>%
View()
# data viz ####
# tax classes dictionary
tax.classes <- as_labeller(c(
`01` = "01 - Residential",
`05` = "05 - Industrial",
`06` = "06 - Commercial"
))
# facet scatter plots with year
re %>%
dplyr::select(-PIC) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(year, municipality, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = log(total.assessment), y = log(mill.rate), color = factor(year))) +
geom_point() +
facet_wrap(.~tax.class, labeller = tax.classes) +
labs(x = "mean log assessment value",
y = "mean log mill rate",
color = "year") +
scale_color_viridis_d() +
theme(text = element_text(size = 18))
ggsave("RealEstate/src/eda - s550/plots/1. scatter with year.pdf")
ggsave("RealEstate/src/eda - s550/plots/1. scatter with year.png")
# facet scatter plots for 2020 by municipality
re %>%
dplyr::filter(year == 2020) %>%
dplyr::select(-PIC, -year) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(municipality, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = log(total.assessment), y = log(mill.rate))) +
geom_point(color = viridis(20)[3]) +
facet_wrap(.~tax.class, labeller = tax.classes) +
labs(x = "mean log assessment value",
y = "mean log mill rate") +
theme(text = element_text(size = 18))
ggsave("RealEstate/src/eda - s550/plots/2. scatter 2020 by municipality.pdf")
ggsave("RealEstate/src/eda - s550/plots/2. scatter 2020 by municipality.png")
# facet scatter plots for 2020 by property
re %>%
dplyr::filter(year == 2020) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(PIC, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = log(total.assessment), y = log(mill.rate))) +
geom_point(color = viridis(20)[3]) +
facet_wrap(.~tax.class, labeller = tax.classes) +
labs(x = "log assessment",
y = "log mill rate",
title = "Assessment vs mill rates by property for 2020")
ggsave("RealEstate/src/eda - s550/plots/3. scatter 2020 by property.pdf")
ggsave("RealEstate/src/eda - s550/plots/3. scatter 2020 by property.png")
# repeat for each year
for(i in 2016:2019){
p <- re %>%
dplyr::filter(year == i) %>%
dplyr::select(-PIC, -year) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(municipality, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = log(total.assessment), y = log(mill.rate))) +
geom_point(color = viridis(20)[3]) +
facet_wrap(.~tax.class, labeller = tax.classes) +
labs(x = "log assessment",
y = "log mill rate",
title = paste0("Assessment vs mill rates by municipality for ", as.character(i)))
no <- i - 2015
title <- paste0("RealEstate/src/eda - s550/plots/2.", as.character(no), " scatter ", as.character(i), " by municipality")
ggsave(filename = paste0(title, ".pdf"), plot = p)
ggsave(filename = paste0(title, ".png"), plot = p)
}
# facet scatter plots average over years
re %>%
dplyr::select(-PIC) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(municipality, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = log(total.assessment), y = log(mill.rate))) +
geom_point(color = viridis(20)[3]) +
facet_wrap(.~tax.class, labeller = tax.classes) +
labs(x = "log assessment",
y = "log mill rate",
title = "Assessment vs mill rates yearly average by municipality, 2016-2020")
ggsave("RealEstate/src/eda - s550/plots/4. scatter average year.pdf")
ggsave("RealEstate/src/eda - s550/plots/4. scatter average year.png")
# facet line trends full
re %>%
dplyr::select(-PIC) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(year, municipality, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = year, y = log(mill.rate), group = municipality, color = municipality)) +
geom_line() +
facet_wrap(.~tax.class, labeller = tax.classes) +
labs(x = "year",
y = "log mill rate",
title = "Mill rates over time for all municipalities") +
scale_color_viridis_d() +
theme(legend.position = "none")
ggsave("RealEstate/src/eda - s550/plots/5. mill rate evolution full.pdf")
ggsave("RealEstate/src/eda - s550/plots/5. mill rate evolution full.png")
# facet line trends sample of 10
re %>%
dplyr::select(-PIC) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(year, tax.class) %>%
dplyr::sample_n(size = 10) %>%
dplyr::ungroup() %>%
dplyr::group_by(year, tax.class, municipality) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = year, y = log(mill.rate), group = municipality)) +
geom_line(color = viridis(20)[3]) +
facet_wrap(.~tax.class, labeller = tax.classes) +
#scale_color_viridis_d() +
#theme(legend.position = "none") +
labs(x = "year",
y = "log mill rate") +
theme(text = element_text(size = 18),
axis.text.x = element_text(angle = 45, hjust = 1))
ggsave("RealEstate/src/eda - s550/plots/6. mill rate evolution sample.pdf")
ggsave("RealEstate/src/eda - s550/plots/6. mill rate evolution sample.png")
# facet line trends sample of 10 assessment
re %>%
dplyr::select(-PIC) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate)) %>%
dplyr::group_by(year, tax.class) %>%
dplyr::sample_n(size = 10) %>%
dplyr::ungroup() %>%
dplyr::group_by(year, tax.class, municipality) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = year, y = log(total.assessment), group = municipality)) +
geom_line(color = viridis(20)[3]) +
facet_wrap(.~tax.class, labeller = tax.classes) +
#scale_color_viridis_d() +
#theme(legend.position = "none") +
labs(x = "year",
y = "log assessment value") +
theme(text = element_text(size = 18),
axis.text.x = element_text(angle = 45, hjust = 1))
ggsave("RealEstate/src/eda - s550/plots/6.1 assessment evolution sample.pdf")
ggsave("RealEstate/src/eda - s550/plots/6.1 assessment evolution sample.png")
# violin plots of mill rates accross tax classes, for 2020
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020) %>%
dplyr::group_by(PIC, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = tax.class, y = log(mill.rate), fill = tax.class)) +
geom_violin(alpha = 0.5, width = 1) +
geom_boxplot(alpha = 0.75, width = 0.1) +
labs(x = "tax class",
y = "log mill rate") +
#scale_fill_viridis_d(begin=0, end=1) +
scale_fill_manual(values = c("#3E4A89FF", "#26828EFF","#B4DE2CFF" )) +
theme(legend.position = "none") +
scale_x_discrete(labels = c("01 - Residential", "05 - Industrial", "06 - Commercial")) +
theme(text = element_text(size = 18))
ggsave("RealEstate/src/eda - s550/plots/7. violin mill rates.pdf")
ggsave("RealEstate/src/eda - s550/plots/7. violin mill rates.png")
# violin plots of assessment values accross tax classes, for 2020
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020) %>%
dplyr::group_by(PIC, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = tax.class, y = log(total.assessment), fill = tax.class)) +
geom_violin(alpha = 0.5, width = 1) +
geom_boxplot(alpha = 0.75, width = 0.1) +
labs(x = "tax class",
y = "log assessment value") +
#scale_fill_viridis_d(begin=0, end=1) +
scale_fill_manual(values = c("#3E4A89FF", "#26828EFF","#B4DE2CFF" )) +
theme(legend.position = "none") +
scale_x_discrete(labels = c("01 - Residential", "05 - Industrial", "06 - Commercial")) +
theme(text = element_text(size = 18))
ggsave("RealEstate/src/eda - s550/plots/7.1 violin assessment values.pdf")
ggsave("RealEstate/src/eda - s550/plots/7.1 violin assessment values.png")
# boxplots of mill rates accross tax classes, for 2020
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020) %>%
dplyr::group_by(PIC, tax.class) %>%
dplyr::summarise(total.assessment = mean(total.assessment), mill.rate = mean(mill.rate)) %>%
ggplot(aes(x = tax.class, y = log(mill.rate))) +
geom_boxplot() +
labs(x = "tax class",
y = "log mill rate",
title = "Mill rates accross tax classes")
ggsave("RealEstate/src/eda - s550/plots/8. boxplot mill rates.pdf")
ggsave("RealEstate/src/eda - s550/plots/8. boxplot mill rates.png")
# correlation plot
re %>%
dplyr::select(-PIC) %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020) %>%
dplyr::select(municipality, total.assessment) %>%
dplyr::group_by(municipality) %>%
dplyr::summarise(total.assessment = mean(total.assessment)) %>%
tidyr::spread(key = municipality, value = total.assessment) %>%
cor() %>%
corrplot::corrplot(method = "color",
type = "upper",
tl.col = "black",
tl.pos="n")
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020, tax.class == "01") %>%
dplyr::select(PIC, total.assessment) %>%
dplyr::group_by(PIC) %>%
dplyr::summarise(total.assessment = mean(total.assessment)) %>%
tidyr::spread(key = PIC, value = total.assessment) %>%
cor() %>%
corrplot::corrplot(method = "color",
type = "upper",
tl.col = "black",
tl.pos="n")
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020, tax.class == "01") %>%
dplyr::select(municipality, total.assessment) %>%
GGally::ggpairs(cardinality_threshold = 162)
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020, tax.class == "01") %>%
dplyr::select(total.assessment) %>%
acf(plot = FALSE) %>%
with(data.frame(lag, acf)) %>%
ggplot(aes(lag, acf)) +
geom_hline(aes(yintercept = 0)) +
geom_segment(mapping = aes(xend = lag, yend = 0)) +
geom_hline(aes(yintercept = 1.96/sqrt(nrow(re))),
linetype = 2, color = 'darkblue') +
geom_hline(aes(yintercept = -1.96/sqrt(nrow(re))),
linetype = 2, color = 'darkblue') +
labs(x = "Lag",
y = "Autocorrelation function") +
theme(text = element_text(size = 18))
re %>%
dplyr::filter(!is.na(total.assessment), !is.na(mill.rate), year == 2020, tax.class == "01") %>%
dplyr::select(municipality, total.assessment) %>%
dplyr::group_by(municipality) %>%
dplyr::summarise(total.assessment = mean(total.assessment)) %>%
dplyr::select(total.assessment) %>%
acf(plot = FALSE) %>%
with(data.frame(lag, acf)) %>%
ggplot(aes(lag, acf)) +
geom_hline(aes(yintercept = 0)) +
geom_segment(mapping = aes(xend = lag, yend = 0)) +
geom_hline(aes(yintercept = 1.96/sqrt(nrow(re))),
linetype = 2, color = 'darkblue') +
geom_hline(aes(yintercept = -1.96/sqrt(nrow(re))),
linetype = 2, color = 'darkblue') +
labs(x = "Lag",
y = "Autocorrelation function") +
theme(text = element_text(size = 18))
# export data set ####
set.seed(2303)
train <- re %>%
dplyr::filter(year != 2020)
test <- re %>%
dplyr::filter(year != 2016)
readr::write_delim(train, "train_data.txt", delim = ",")
readr::write_delim(test, "test_data.txt", delim = ",")
# read for test
train_read <- readr::read_delim("train_data.txt", delim = ",",
col_types = "ciccddddd")
test_read <- readr::read_delim("test_data.txt", delim = ",",
col_types = "ciccddddd")
## S450 data ####
re.450 <- read_csv("RealEstate/data/assessment_aggregate.csv")
re.450 %>%
ggplot(aes(x = log(assessTotal), y = rate, color = factor(Year))) +
geom_point() +
facet_wrap(.~TaxClassCode) +
labs(x = "log assessment",
y = "mill rate",
color = "year") +
scale_color_viridis_d()
|
842b71a57985bcdda13f6965bcb0161409d341cf
|
0709b186b4ddca2a5413e36d1a84753c8b90d41a
|
/Fig5.R
|
0d6784f55eabf8d2e7b5b964e1f211c44be3290a
|
[] |
no_license
|
RyoMogi/CCF_Decomposition
|
6d7637921240742620cc1dc6f8d4bb51e48a2256
|
154f9b4ac4fb45769b01c62b6ab4e67cbabe1134
|
refs/heads/master
| 2023-03-04T04:25:56.741095
| 2021-02-14T14:36:14
| 2021-02-14T14:36:14
| 338,822,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,669
|
r
|
Fig5.R
|
library(ggrepel)
Mycol <- c("#08306B", "#238B45", "#FD8D3C", "#D4B9DA", "#FFEDA0")
#sel_bc_Fig5 <- reactive({
# switch(input$dataset_Fig5,
# "1940-1970" = 1,
# "1945-1970" = 2,
# "1950-1970" = 3,
# "1955-1970" = 4,
# "1960-1970" = 5,
# "1965-1970" = 6
# )
#})
sel_bc_Fig5 <- 6
Country <- c("AUS", "CR", "FIN", "GR", "HUN", "ITA", "IR", "RUS", "SER", "SK", "ESP")
CCF_all <- c()
Prop_F_all <- c()
for(i in 1:length(Country)){
data_Fig5 <- read.csv(paste("Data/", Country[i], "raw.csv", sep = ""), header = T)
data_edu_Fig5 <- data_Fig5 %>%
as.data.frame() %>%
mutate(We = parity_0 + parity_1 + parity_2 + parity_3 + parity_4 + parity_5 + parity_6 + parity_7 + parity_8p,
B0 = We,
B1 = parity_1 + parity_2 + parity_3 + parity_4 + parity_5 + parity_6 + parity_7 + parity_8p,
B2 = parity_2 + parity_3 + parity_4 + parity_5 + parity_6 + parity_7 + parity_8p,
B3 = parity_3 + parity_4 + parity_5 + parity_6 + parity_7 + parity_8p,
B4 = parity_4 + parity_5 + parity_6 + parity_7 + parity_8p,
B5 = parity_5 + parity_6 + parity_7 + parity_8p,
B6 = parity_6 + parity_7 + parity_8p,
B7 = parity_7 + parity_8p,
B8 = parity_8p,
B3p = B3 + B4 + B5 + B6 + B7 + B8,
B = B1 + B2 + B3 + B4 + B5 + B6 + B7 + B8) %>%
select(cohort, edu, We, B0, B1, B2, B3, B4, B5, B6, B7, B8, B3p, B)
if(Country[i] == "ITA"){
bc_min <- case_when(sel_bc_Fig5 == 1 ~ 1938,
sel_bc_Fig5 == 2 ~ 1942,
sel_bc_Fig5 == 3 ~ 1948,
sel_bc_Fig5 == 4 ~ 1953,
sel_bc_Fig5 == 5 ~ 1958,
sel_bc_Fig5 == 6 ~ 1963)
bc_max <- bc_min + 5
} else {
bc_min <- case_when(sel_bc_Fig5 == 1 ~ 1940,
sel_bc_Fig5 == 2 ~ 1945,
sel_bc_Fig5 == 3 ~ 1950,
sel_bc_Fig5 == 4 ~ 1955,
sel_bc_Fig5 == 5 ~ 1960,
sel_bc_Fig5 == 6 ~ 1965)
bc_max <- bc_min + 5
}
## To get CCF
CCF <- data_edu_Fig5 %>%
#filter(cohort >= bc_min & cohort <= bc_max) %>%
filter(cohort >= 1965 & cohort <= 1970) %>%
summarise(W = sum(We),
B = sum(B)) %>%
mutate(CCF = B / W)
CCF <- CCF$CCF
## To get the proportion of F
if(Country[i] == "ITA"){
data_edu_all <- data_edu_Fig5 %>%
group_by(cohort) %>%
summarise(W = sum(We)) %>%
left_join(data_edu_Fig5, by = "cohort") %>%
filter(cohort != 1933) %>%
mutate(E = We / W,
F1e = B1 / We,
F2e = B2 / We,
F3e = B3 / We,
F4e = B4 / We,
F5e = B5 / We,
F6e = B6 / We,
F7e = B7 / We,
F8e = B8 / We)
} else {
data_edu_all <- data_edu_Fig5 %>%
group_by(cohort) %>%
summarise(W = sum(We)) %>%
left_join(data_edu_Fig5, by = "cohort") %>%
filter(cohort %in% seq(1940, 1970, 5)) %>%
mutate(E = We / W,
F1e = B1 / We,
F2e = B2 / We,
F3e = B3 / We,
F4e = B4 / We,
F5e = B5 / We,
F6e = B6 / We,
F7e = B7 / We,
F8e = B8 / We)
}
decomp_CCF <- function(Edu){
D <- data_edu_all %>%
filter(edu == Edu) %>%
mutate_if(is.integer, as.numeric) %>%
select(B0, B1, B2, B3, B4, B5, B6, B7, B8, B, We, W, E,
F1e, F2e, F3e, F4e, F5e, F6e, F7e, F8e)
cohort_t1 <- D[1:6, ]
cohort_t2 <- D[2:7, ]
# midpoint of each factor
mid <- (cohort_t2 * cohort_t1) ^ 0.5
h <- 5
# derivative
D_deriv <- D %>%
select(F1e, F2e, F3e, F4e, F5e, F6e, F7e, F8e, E)
cohort_t1_deriv <- D_deriv[1:6, ]
cohort_t2_deriv <- D_deriv[2:7, ]
deriv <- (log(cohort_t2_deriv / cohort_t1_deriv) / h) * mid[, c("F1e", "F2e", "F3e", "F4e", "F5e", "F6e", "F7e", "F8e", "E")]
for(i in 1:ncol(deriv)){
deriv[, i] <- ifelse(is.na(deriv[, i]), 0, deriv[, i])
}
# Compute contribution of each element to CCF
F <- deriv[, 1:8] * mid[, "E"]
E <- mid[, 14:21] * deriv[, "E"]
cont <- cbind(F, E)
colnames(cont) <- c("F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8",
"E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8")
if(Country[i] == "ITA"){
rownames(cont) <- c("1938-1942", "1942-1948", "1948-1953", "1953-1958", "1958-1963", "1963-1968")
} else {
rownames(cont) <- c("1940-45", "1945-50", "1950-55", "1955-60", "1960-65", "1965-70")
}
F_cont <- rowSums(cont[, 1:8])
E_cont <- rowSums(cont[, 9:16])
T_cont <- cbind(F_cont, E_cont, cont)
return(T_cont)
}
comp_low <- decomp_CCF("Low")
comp_medium <- decomp_CCF("Medium")
comp_high <- decomp_CCF("High")
F <- comp_low$F_cont + comp_medium$F_cont + comp_high$F_cont
E <- comp_low$E_cont + comp_medium$E_cont + comp_high$E_cont
if(Country[i] == "ITA"){
bc <- c("1938-1942", "1942-1948", "1948-1953", "1953-1958", "1958-1963", "1963-1968")
bc5_sel <- bc[sel_bc_Fig5]
} else {
bc <- c("1940-1945", "1945-1950", "1950-1955", "1955-1960", "1960-1965", "1965-1970")
bc5_sel <- bc[sel_bc_Fig5]
}
D_cont <- cbind(bc, F, E)
Prop_F <- D_cont %>%
as.data.frame() %>%
mutate(F = as.numeric(as.character(F)),
E = as.numeric(as.character(E)),
Abs_F = ifelse(F < 0, -F, F),
Abs_E = ifelse(E < 0, -E, E),
Abs_Total = Abs_F + Abs_E,
Prop_F = Abs_F / Abs_Total,
cumsum_Abs_F = rev(cumsum(rev(Abs_F))),
cumsum_Abs_E = rev(cumsum(rev(Abs_E))),
cumsum_Abs_Total = cumsum_Abs_F + cumsum_Abs_E,
cumsum_Prop_F = cumsum_Abs_F / cumsum_Abs_Total) %>%
filter(bc == bc5_sel)
Prop_F <- Prop_F$cumsum_Prop_F
CCF_all <- c(CCF_all, CCF)
Prop_F_all <- c(Prop_F_all, Prop_F)
}
D_Fig5 <- cbind(Country, CCF_all, Prop_F_all)
Fig5 <- D_Fig5 %>%
as.data.frame() %>%
mutate(CCF_all = as.numeric(as.character(CCF_all)),
Prop_F_all = as.numeric(as.character(Prop_F_all)),
Country = case_when(Country == "AUS" ~ "Australia",
Country == "CR" ~ "Croatia",
Country == "FIN" ~ "Finland",
Country == "GR" ~ "Greece",
Country == "HUN" ~ "Hungary",
Country == "ITA" ~ "Italy",
Country == "IR" ~ "Ireland",
Country == "RUS" ~ "Russia",
Country == "SER" ~ "Serbia",
Country == "SK" ~ "South Korea",
Country == "ESP" ~ "Spain"),
Country_sel = ifelse(Country %in% c("Australia", "Finland", "Hungary", "Ireland", "South Korea", "Spain"), "1", "0")) %>%
ggplot(aes(x = CCF_all, y = Prop_F_all, group = Country, colour = Country_sel)) +
geom_point(size = 4) +
geom_label_repel(aes(label = Country)) +
theme_bw(base_family = "Times New Roman") +
scale_colour_manual(values = c(Mycol[1], Mycol[3]), name = "") +
labs(x = "Completed cohort fertility rate",
y = "Average contribution of the component F to changes in completed cohort fertility") +
theme(legend.position = "none")
ggsave("Output/Fig5_dot.eps", Fig5, device = cairo_ps, width = 8.5, height = 6)
|
7b74327e735f6149ba3fcc8d67f320c7f0c30432
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dataMaid/examples/exampleData.Rd.R
|
e144f375045d04a7090cba2bea2b6a252b66078c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,164
|
r
|
exampleData.Rd.R
|
library(dataMaid)
### Name: exampleData
### Title: Example data with zero-inflated variables
### Aliases: exampleData
### Keywords: datasets
### ** Examples
## Not run:
##D isID <- function(v, nMax = NULL, ...) {
##D out <- list(problem = FALSE, message = "")
##D if (class(v) %in% c("character", "factor", "labelled", "numeric", "integer")) {
##D v <- as.character(v)
##D lengths <- nchar(v)
##D if (all(lengths > 10) & length(unique(lengths)) == 1) {
##D out$problem <- TRUE
##D out$message <- "Warning: This variable seems to contain ID codes!"
##D }
##D }
##D out
##D }
##D
##D
##D countZeros <- function(v, ...) {
##D res <- length(which(v == 0))
##D summaryResult(list(feature = "No. zeros", result = res, value = res))
##D }
##D countZeros <- summaryFunction(countZeros, description = "Count number of zeros",
##D classes = allClasses())
##D summarize(toyData, numericSummaries = c(defaultNumericSummaries()))
##D
##D
##D mosaicVisual <- function(v, vnam, doEval) {
##D thisCall <- call("mosaicplot", table(v), main = vnam, xlab = "")
##D if (doEval) {
##D return(eval(thisCall))
##D } else return(deparse(thisCall))
##D }
##D mosaicVisual <- visualFunction(mosaicVisual,
##D description = "Mosaic plots using graphics",
##D classes = allClasses())
##D
##D identifyColons <- function(v, nMax = Inf, ... ) {
##D v <- unique(na.omit(v))
##D problemMessage <- "Note: The following values include colons:"
##D problem <- FALSE
##D problemValues <- NULL
##D
##D problemValues <- v[sapply(gregexpr("[[:xdigit:]]:[[:xdigit:]]", v),
##D function(x) all(x != -1))]
##D
##D if (length(problemValues) > 0) {
##D problem <- TRUE
##D }
##D
##D problemStatus <- list(problem = problem,
##D problemValues = problemValues)
##D outMessage <- messageGenerator(problemStatus, problemMessage, nMax)
##D
##D checkResult(list(problem = problem,
##D message = outMessage,
##D problemValues = problemValues))
##D }
##D
##D identifyColons <- checkFunction(identifyColons,
##D description = "Identify non-suffixed nor -prefixed colons",
##D classes = c("character", "factor", "labelled"))
##D makeDataReport(exampleData, replace = TRUE,
##D preChecks = c("isKey", "isEmpty", "isID"),
##D allVisuals = "mosaicVisual",
##D characterSummaries = c(defaultCharacterSummaries(), "countZeros"),
##D factorSummaries = c(defaultFactorSummaries(), "countZeros"),
##D labelledSummaries = c(defaultLabelledSummaries(), "countZeros"),
##D numericSummaries = c(defaultNumericSummaries(), "countZeros"),
##D integerSummaries = c(defaultIntegerSummaries(), "countZeros"),
##D characterChecks = c(defaultCharacterChecks(), "identifyColons"),
##D factorChecks = c(defaultFactorChecks(), "identifyColons"),
##D labelledCheck = c(defaultLabelledChecks(), "identifyColons"))
##D
##D
##D
## End(Not run)
|
6139f0ce8f0d3ee6523a92146d73307b3b1d8b9c
|
cae21dd8f17f9d284ec9635128b62cc7c0149633
|
/R/power.R
|
b0d757d158ef898909f960d4f8d81e0ec3d34ff1
|
[] |
no_license
|
heiniglab/scPower
|
5cf321753d0da67573bde205e0092a7981b44c64
|
108bf5691b853496290d60e63ffe230e0dc886fe
|
refs/heads/master
| 2023-08-08T23:35:35.626748
| 2023-07-19T14:01:39
| 2023-07-19T14:01:39
| 249,658,551
| 31
| 4
| null | 2023-07-10T09:32:12
| 2020-03-24T08:58:19
|
R
|
UTF-8
|
R
| false
| false
| 115,996
|
r
|
power.R
|
######################################
# Functions for power calculation
#####################################
#' Power calculation for cell type identification
#'
#' Calculate probability to detect at least min.num.cells of a cell type
#' with class frequency cell.type.frac in a sample of size nCells.
#' for each of nSamples individual
#' @param nCells Number of cells measured for each individuum
#' @param min.num.cells Minimal number of the cells for the cell type of interest that should be detected
#' @param cell.type.frac Frequency of the cell type of interest
#' @param nSamples Sample size
#'
#' @return Power to detect the cell type of interest
#'
#' @export
power.detect.celltype<-function(nCells,min.num.cells,cell.type.frac,nSamples){
return(pnbinom(nCells-min.num.cells,min.num.cells,cell.type.frac)^nSamples)
}
#' Cell sample size calculation for cell type identification
#'
#' Calculate required number of cells per individual to detect at least min.num.cells of a cell type
#' with class frequency cell.type.frac for each of nSamples individual.
#' with a probability of prob.cutoff
#' @param prob.cutoff Target power to detect the cell type
#' @param min.num.cells Minimal number of the cells for the cell type of interest that should be detected
#' @param cell.type.frac Frequency of the cell type of interest
#' @param nSamples Sample size (number of individuals)
#'
#' @return Required number of cells per individual to reach the target power
#'
#' @export
number.cells.detect.celltype<-function(prob.cutoff,min.num.cells,cell.type.frac,nSamples){
return(qnbinom(prob.cutoff^(1/nSamples),min.num.cells,cell.type.frac)+min.num.cells)
}
#' Power calculation for a DE/eQTL study with 10X design (with a restricted number of individuals per lane)
#'
#' This function to calculate the detection power for a DE or eQTL study, given DE/eQTL genes from a reference study,
#' in a single cell 10X RNAseq study. The power depends on the cost determining parameter of sample size, number of cells
#' per individual and read depth.
#'
#' @param nCells Number of cells per individual
#' @param readDepth Target read depth per cell
#' @param ct.freq Frequency of the cell type of interest
#' @param samplesPerLane Maximal number of individuals per 10X lane
#' @param read.umi.fit Data frame for fitting the mean UMI counts per cell
#' depending on the mean readds per cell (required columns: intercept, reads (slope))
#' @param gamma.mixed.fits Data frame with gamma mixed fit parameters for each cell type
#' (required columns: parameter, ct (cell type), intercept, meanUMI (slope))
#' @param ct Cell type of interest (name from the gamma mixed models)
#' @param disp.fun.param Function to fit the dispersion parameter dependent on the mean
#' (required columns: ct (cell type), asymptDisp, extraPois (both from taken from DEseq))
#' @param mappingEfficiency Fraction of reads successfully mapped to the transcriptome
#' in the end (need to be between 1-0)
#' @param multipletRate Expected increase in multiplets for additional cell in the lane
#' @param multipletFactor Expected read proportion of multiplet cells vs singlet cells
#' @param multipletRateGrowth Development of multiplet rate with increasing number of cells per lane,
#' "linear" if overloading should be modeled explicitly, otherwise "constant".
#' The default value for the parameter multipletRate is matching the option "linear".
#' @param returnResultsDetailed If true, return not only summary data frame,
#' but additional list with exact probability vectors
#' @inheritParams calculate.probabilities
#'
#' @return Power to detect the DE/eQTL genes from the reference study in a single cell experiment with these parameters
#'
#' @export
#'
#' @examples
#' power.general.withDoublets(83,1000,1000,0.2,"de",de.ref.study, "Blueprint (CLL) iCLL-mCLL",
#' 8,read.umi.fit,gamma.mixed.fits,"CD4 T cells",disp.fun.param)
#'
power.general.withDoublets<-function(nSamples,nCells,readDepth,ct.freq,
type,ref.study,ref.study.name,
samplesPerLane,read.umi.fit,
gamma.mixed.fits,ct,
disp.fun.param,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05,
MTmethod="Bonferroni",
useSimulatedPower=TRUE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
returnResultsDetailed=FALSE){
#Estimate multiplet fraction dependent on cells per lane
if(multipletRateGrowth=="linear"){
multipletFraction<-multipletRate*nCells*samplesPerLane
} else if (multipletRateGrowth == "constant") {
multipletFraction<-multipletRate
} else {
stop("No known option for multipletRateGrowth. Use the values 'linear' or 'constant'.")
}
#Check that the number of cells entered does not provide a multiplet rate of >100%
if(multipletFraction>=1){
stop("Too many cells per individual entered! Multiplet rate of more than 100%!")
}
usableCells<-round((1-multipletFraction)*nCells)
#Estimate multiplet rate and "real read depth"
readDepthSinglet<-readDepth*nCells/(usableCells+multipletFactor*(nCells-usableCells))
#Estimate fraction of correctly mapped reads
mappedReadDepth<-readDepthSinglet*mappingEfficiency
#Get the fraction of cell type cells
ctCells<-round(usableCells*ct.freq)
#Check that really only one row is given for read.umi.fit
if(nrow(read.umi.fit)>1){
stop("Please only enter data frame with one row for read.umi.fit,
only one fit can be evaluated in one run!")
}
#Get mean umi dependent on read depth
umiCounts<-read.umi.fit$intercept+read.umi.fit$reads*log(mappedReadDepth)
if(umiCounts<=0){
stop("Read depth too small! UMI model estimates a mean UMI count per cell smaller than 0!")
}
#Check if gamma fit data for the cell type exists
if(! any(gamma.mixed.fits$ct==ct)){
stop(paste("No gene curve fitting data in the data frame gamma.mixed.fits fits to the specified cell type",
ct,". Check that the cell type is correctly spelled and the right gamma.mixed.fits object used."))
}
#Get gamma values dependent on mean umi
gamma.fits.ct<-gamma.mixed.fits[gamma.mixed.fits$ct==ct,]
gamma.fits.ct$fitted.value<-gamma.fits.ct$intercept+gamma.fits.ct$meanUMI*umiCounts
gamma.parameters<-data.frame(p1=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="p1"],
p3=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="p3"],
mean1=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="mean1"],
mean2=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="mean2"],
sd1=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="sd1"],
sd2=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="sd2"])
#Convert them to the rateshape variant
gamma.parameters<-convert.gamma.parameters(gamma.parameters,type="rateshape")
#Check if dispersion data for the cell type exists
if(! any(disp.fun.param$ct==ct)){
stop(paste("No dispersion fitting data in the data frame disp.fun.param fits to the specified cell type",
ct,". Check that the cell type is correctly spelled and the right disp.fun.param object used."))
}
#Fit dispersion parameter dependent on mean parameter
disp.fun<-disp.fun.param[disp.fun.param$ct==ct,]
#Calculate expression power depending on gamma parameters
probs<-calculate.probabilities(nSamples,ctCells,type,
ref.study,ref.study.name,
gamma.parameters,disp.fun,
min.UMI.counts,perc.indiv.expr,
cutoffVersion,
nGenes,samplingMethod,
sign.threshold,MTmethod,
useSimulatedPower,
simThreshold,
speedPowerCalc,
indepSNPs,
ssize.ratio.de,
returnResultsDetailed)
#Return either detailed probabilities for each DE/eQTL gene or only overview
if(returnResultsDetailed){
power.study<-data.frame(name=ref.study.name,
powerDetect=probs$overview.df$powerDetect,
exp.probs=probs$overview.df$exp.probs,
power=probs$overview.df$power,
sampleSize=nSamples,
totalCells=nCells,
usableCells=usableCells,
multipletFraction=multipletFraction,
ctCells=ctCells,
readDepth=readDepth,
readDepthSinglet=readDepthSinglet,
mappedReadDepth=mappedReadDepth,
expressedGenes=probs$overview.df$expressedGenes)
return(list(overview.df=power.study,probs.df=probs$probs.df))
} else {
power.study<-data.frame(name=ref.study.name,
powerDetect=probs$powerDetect,
exp.probs=probs$exp.probs,
power=probs$power,
sampleSize=nSamples,
totalCells=nCells,
usableCells=usableCells,
multipletFraction=multipletFraction,
ctCells=ctCells,
readDepth=readDepth,
readDepthSinglet=readDepthSinglet,
mappedReadDepth=mappedReadDepth,
expressedGenes=probs$expressedGenes)
}
return(power.study)
}
#' Power calculation for a DE/eQTL study with 10X design (with a restricted number of cells per lane)
#'
#' This function is a variant of power.general.withDoublets, where not the number of samplesPerLane is given as an
#' parameter, but instead the individuals are distributed over the lanes in a way that restricts the total number of
#' cells per lane instead. This gives also an upper bound for the doublet rate.
#'
#' @param cellsPerLane Maximal number of cells per 10X lane
#' @inheritParams power.general.withDoublets
#'
#' @return Power to detect the DE/eQTL genes from the reference study in a single cell experiment with these parameters
#'
#' @export
power.general.restrictedDoublets<-function(nSamples,nCells,readDepth,ct.freq,
type,ref.study,ref.study.name,
cellsPerLane,read.umi.fit,
gamma.mixed.fits,ct,
disp.fun.param,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05,
MTmethod="Bonferroni",
useSimulatedPower=TRUE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
returnResultsDetailed=FALSE){
#Distribute individuals most optimal over the lanes
samplesPerLane<-floor(cellsPerLane/nCells)
if(samplesPerLane==0){
stop("Allowed number of cells per lane is too low to fit so many cells per individual!")
}
return(power.general.withDoublets(nSamples,nCells,readDepth,ct.freq,
type,ref.study,ref.study.name,
samplesPerLane,
read.umi.fit,gamma.mixed.fits,ct,
disp.fun.param,
mappingEfficiency,
multipletRate,multipletFactor,
min.UMI.counts,perc.indiv.expr,
cutoffVersion,
nGenes,samplingMethod,
multipletRateGrowth,
sign.threshold,MTmethod,
useSimulatedPower,
simThreshold,
speedPowerCalc,
indepSNPs,
ssize.ratio.de,
returnResultsDetailed))
}
#' Power calculation for a DE/eQTL study with Smart-seq design
#'
#' This function to calculate the detection power for a DE or eQTL study, given DE/eQTL genes from a reference study,
#' in a single cell Smart-seq RNAseq study. The power depends on the cost determining parameter of sample size, number of cells
#' per individual and read depth.
#'
#' @param nSamples Sample size
#' @param nCells Number of cells per individual
#' @param readDepth Target read depth per cell
#' @param ct.freq Frequency of the cell type of interest
#' @param type (eqtl/de) study
#' @param ref.study Data frame with reference studies to be used for expression ranks and effect sizes
#' (required columns: name (study name), rank (expression rank), FoldChange (DE study) /Rsq (eQTL study))
#' @param ref.study.name Name of the reference study. Will be checked in the ref.study data frame for it (as column name).
#' @param gamma.mixed.fits Data frame with gamma mixed fit parameters for each cell type
#' (required columns: parameter, ct (cell type), intercept, meanReads (slope))
#' @param ct Cell type of interest (name from the gamma mixed models)
#' @param disp.linear.fit Function to fit the dispersion parameter dependent on the mean (parameter linear dependent on read depth)
#' (required columns: parameter, ct (cell type), intercept, meanReads (slope))
#' @param mappingEfficiency Fraction of reads successfully mapped to the transcriptome
#' in the end (need to be between 1-0)
#' @param multipletFraction Multiplet fraction in the experiment as a constant factor
#' @param multipletFactor Expected read proportion of multiplet cells vs singlet cells
#' @param nGenes Number of genes to simulate (should match the number of genes used for the fitting)
#' @param min.norm.count Expression cutoff in one individual: if cutoffVersion=absolute,
#' more than this number of counts per kilobase transcript for each gene per individual and
#' cell type is required; if cutoffVersion=percentage, more than this percentage
#' of cells need to have a count value large than 0
#' @param samplingMethod Approach to sample the gene mean values (either taking quantiles
#' or random sampling)
#' @param sign.threshold Significance threshold
#' @param MTmethod Multiple testing correction method (possible options: "Bonferroni","FDR","none")
#' @param useSimulatedPower Option to simulate eQTL power for small mean values to increase accuracy
#' (only possible for eQTL analysis)
#' @param simThreshold Threshold until which the simulated power is taken instead of the analytic
#' @param speedPowerCalc Option to speed power calculation by skipping all genes with
#' an expression probability less than 0.01 (as overall power is anyway close to 0)
#' @param indepSNPs Number of independent SNPs assumed for each loci (for eQTL
#' Bonferroni multiple testing correction the number of tests are estimated
#' as number expressed genes * indepSNPs)
#' @param ssize.ratio.de In the DE case, ratio between sample size of group 0
#' (control group) and group 1 (1=balanced design)
#' @param returnResultsDetailed If true, return not only summary data frame, but additional list with exact probability vectors
#' @inheritParams estimate.exp.prob.values
#'
#' @return Power to detect the DE/eQTL genes from the reference study in a single cell experiment with these parameters
#'
#' @export
power.smartseq<-function(nSamples,nCells,readDepth,ct.freq,
type,ref.study,ref.study.name,
gamma.mixed.fits,ct,
disp.linear.fit,
mappingEfficiency=0.8,
multipletFraction=0,multipletFactor=1.82,
min.norm.count=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
sign.threshold=0.05,MTmethod="Bonferroni",
useSimulatedPower=TRUE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
returnResultsDetailed=FALSE){
usableCells<-round((1-multipletFraction)*nCells)
#Estimate multiplet rate and "real read depth"
readDepthSinglet<-readDepth*nCells/(usableCells+multipletFactor*(nCells-usableCells))
#Estimate fraction of correctly mapped reads
mappedReadDepth<-readDepth*mappingEfficiency
ctCells<-usableCells*ct.freq
#Check if gamma fit data for the cell type exists
if(! any(gamma.mixed.fits$ct==ct)){
stop(paste("No gene curve fitting data in the data frame gamma.mixed.fits fits to the specified cell type",
ct,". Check that the cell type is correctly spelled and the right gamma.mixed.fits object used."))
}
#Get gamma values dependent on mean reads
gamma.fits.ct<-gamma.mixed.fits[gamma.mixed.fits$ct==ct,]
gamma.fits.ct$fitted.value<-gamma.fits.ct$intercept+gamma.fits.ct$meanReads*mappedReadDepth
if(any(gamma.fits.ct$fitted.value[gamma.fits.ct$parameter %in%
c("mean1","mean2","sd1","sd2")]<=0)){
stop("At least one of the gamma parameter got negative for this read depth.",
"Choose a higher read depth or a different gamma - read fit.")
}
gamma.parameters<-data.frame(p1=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="p1"],
p3=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="p3"],
mean1=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="mean1"],
mean2=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="mean2"],
sd1=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="sd1"],
sd2=gamma.fits.ct$fitted.value[gamma.fits.ct$parameter=="sd2"])
#Convert them to the rateshape variant
gamma.parameters<-convert.gamma.parameters(gamma.parameters,type="rateshape")
#Sample means values
if(samplingMethod=="random"){
gene.means<-sample.mean.values.random(gamma.parameters,nGenes)
} else if (samplingMethod=="quantiles"){
gene.means<-sample.mean.values.quantiles(gamma.parameters,nGenes)
} else {
stop("No known sampling method. Use the options 'random' or 'quantiles'.")
}
sim.genes<-data.frame(mean=gene.means)
#Calculate the mean per cell type for each individuum
sim.genes$mean.sum<-sim.genes$mean*ctCells
#Sort simulated genes by mean expression
sim.genes<-sim.genes[order(sim.genes$mean, decreasing = TRUE),]
#Check if the study reference name exists in the data frame
if(! any(ref.study$name==ref.study.name)){
stop(paste("No study name in the data frame ref.study fits to the specified reference study name",
ref.study.name,". Check that the name is correctly spelled and the right ref.study.name object used."))
}
#Assign a gene length for each gene (default 5000), for known DE genes the real value
sim.genes$geneLength<-5000
temp.ranks<-ref.study[ref.study$rank<nGenes & ref.study$name==ref.study.name,]
sim.genes$geneLength[temp.ranks$rank]<-temp.ranks$geneLength
#Check if dispersion data for the cell type exists
if(! any(disp.linear.fit$ct==ct)){
stop(paste("No dispersion fitting data in the data frame disp.linear.fit fits to the specified cell type",
ct,". Check that the cell type is correctly spelled and the right disp.linear.fit object used."))
}
#Get dispersion values dependent on mean reads
disp.fun.ct<-disp.linear.fit[disp.linear.fit$ct==ct,]
disp.fun.ct$fitted.value<-disp.fun.ct$intercept+disp.fun.ct$meanReads*mappedReadDepth
#Fit dispersion parameter dependent on mean parameter
disp.fun<-data.frame(asymptDisp=disp.fun.ct$fitted.value[disp.fun.ct$parameter=="asymptDisp"],
extraPois=disp.fun.ct$fitted.value[disp.fun.ct$parameter=="extraPois"],
ct=ct)
#Fit dispersion parameter dependent on mean parameter
sim.genes$mean.length.transformed<-sim.genes$mean*sim.genes$geneLength/1000
sim.genes$disp<-sample.disp.values(sim.genes$mean.length.transformed,disp.fun)
sim.genes$disp.sum<-sim.genes$disp/ctCells
#Fit also length transformed sum
sim.genes$mean.length.sum<-sim.genes$mean.length.transformed*ctCells
#Calculate for each gene the expression probability
sim.genes$exp.probs<-estimate.exp.prob.values(sim.genes$mean,1/sim.genes$disp,ctCells,
nSamples=nSamples,min.counts=min.norm.count,
perc.indiv.expr=perc.indiv.expr,
cutoffVersion=cutoffVersion)
#Calculate the expected number of expressed genes
exp.genes<-round(sum(sim.genes$exp.probs))
#Set the simulated DE genes as the genes at the same rank position as the original DE genes
ranks<-ref.study$rank[ref.study$name==ref.study.name]
#Set all DE with rank > nGEnes to nGenes (expression anyway nearly 0)
ranks[ranks>nGenes]<-nGenes
#Choose the DE genes according to the rank
foundSignGenes<-sim.genes[ranks,]
#Calculate alpha parameter corrected for multiple testing
if(MTmethod=="Bonferroni"){
alpha<-sign.threshold/exp.genes
} else if (MTmethod=="none"){
alpha<-sign.threshold
#For FDR correction, optimization is done dependent on eqtl/de power later
#Only first parameters are calculated here
} else if(MTmethod=="FDR"){
lowerBound<-sign.threshold/exp.genes
m0<-exp.genes-round(sum(foundSignGenes$exp.probs))
} else {
stop(paste("MTmethod",MTmethod,"is unknown! Please choose between",
"Bonferroni, FDR and none!"))
}
#Calculate power
if(type=="eqtl"){
#Check that the required column Rsq exists
if(! any(colnames(ref.study)=="Rsq")){
stop(paste("Column name Rsq missing in the ref.study data frame.",
"Please make sure to provide this column for eQTL power analysis."))
}
#Set the Rsq respectively
foundSignGenes$Rsq<-ref.study$Rsq[ref.study$name==ref.study.name]
if(MTmethod=="FDR"){
#In the extreme case that also with Bonferroni cut-off less than one TP
#can be found, the optimization is not working, use here the Bonferroni
#cutoff instead
if(fdr.optimization(x=lowerBound,
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$Rsq,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.length.sum,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold)>0){
alpha<-lowerBound
} else {
root<-uniroot(f=fdr.optimization,
interval=c(lowerBound,sign.threshold),
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$Rsq,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.length.sum,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold)
alpha<-root$root
}
} else if (MTmethod=="Bonferroni"){
#Restrict the Bonferroni for eQTLs cut-off further,
#assuming x independent SNPs per gene
alpha<-alpha/indepSNPs
}
#Skip power calculation for not expressed genes (if this option is chosen)
if(speedPowerCalc){
foundSignGenes$power<-sapply(1:nrow(foundSignGenes), function(i)
if(foundSignGenes$exp.probs[i]<0.01) {
return(0)
}else{
power.eqtl(foundSignGenes$mean.length.sum[i],
foundSignGenes$Rsq[i],
alpha,nSamples,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold)})
} else {
foundSignGenes$power<-sapply(1:nrow(foundSignGenes),
function(i) power.eqtl(foundSignGenes$mean.length.sum[i],
foundSignGenes$Rsq[i],
alpha,nSamples,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold))
}
} else if (type=="de") {
#Check that the required column FoldChange exists
if(! any(colnames(ref.study)=="FoldChange")){
stop(paste("Column name FoldChange missing in the ref.study data frame.",
"Please make sure to provide this column for DE power analysis."))
}
#Set the fold change respectively
foundSignGenes$FoldChange<-ref.study$FoldChange[ref.study$name==ref.study.name]
if(MTmethod=="FDR"){
#In the extreme case that also with Bonferroni cut-off less than one TP
#can be found, the optimization is not working, use here the Bonferroni
#cutoff instead
if(fdr.optimization(x=lowerBound,
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$FoldChange,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.length.sum,
disp.vector = foundSignGenes$disp.sum,
ssize.ratio.de=ssize.ratio.de)>0){
alpha<-lowerBound
} else {
root<-uniroot(f=fdr.optimization,
interval=c(lowerBound,sign.threshold),
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$FoldChange,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.length.sum,
disp.vector = foundSignGenes$disp.sum,
ssize.ratio.de=ssize.ratio.de)
alpha<-root$root
}
}
#Skip power calculation for not expressed genes (if this option is chosen)
if(speedPowerCalc){
foundSignGenes$power<-sapply(1:nrow(foundSignGenes),function(i)
if(foundSignGenes$exp.probs[i]<0.01){
return(0)
} else {
power.de(
nSamples,
foundSignGenes$mean.length.sum[i],
foundSignGenes$FoldChange[i],
1/foundSignGenes$disp.sum[i],
alpha,3,ssize.ratio=ssize.ratio.de)
})
} else {
foundSignGenes$power<-sapply(1:nrow(foundSignGenes),function(i) power.de(
nSamples,
foundSignGenes$mean.length.sum[i],
foundSignGenes$FoldChange[i],
1/foundSignGenes$disp.sum[i],
alpha,3,ssize.ratio=ssize.ratio.de))
}
} else {
stop('For type parameter only "eqtl" or "de" possible!')
}
#Calculate total probability as the DE power times the expression probability
foundSignGenes$combined.prob<-foundSignGenes$power*foundSignGenes$exp.probs
power.study<-data.frame(name=ref.study.name,
powerDetect=mean(foundSignGenes$combined.prob),
exp.probs=mean(foundSignGenes$exp.probs),
power=mean(foundSignGenes$power),
sampleSize=nSamples,
totalCells=nCells,
usableCells=usableCells,
multipletFraction=multipletFraction,
ctCells=ctCells,
readDepth=readDepth,
readDepthSinglet=readDepthSinglet,
mappedReadDepth=mappedReadDepth,
expressedGenes=exp.genes)
#Return either detailed probabilities for each DE/eQTL gene or only overview
if(returnResultsDetailed){
return(list(overview.df=power.study,probs.df=foundSignGenes))
} else {
return(power.study)
}
}
#' Power calculation for a DE/eQTL study with same read depth as the fitted gamma distribution
#'
#' This is a simplified version of the function power.general.withDoublets to be used on a gamma
#' fit not parameterized for UMI/read counts. It evaluates the effect of different samples sizes
#' and cells per person, keeping the same read depth as in the experiment used for fitting.
#'
#' @param nCells Number of cells per individual
#' @param ct.freq Frequency of the cell type of interest
#' @param samplesPerLane Maximal number of individuals per 10X lane
#' @param gamma.parameters Data frame with gamma parameters for each cell type
#' (required columns: ct (cell type), s1, r1, s2, r2, p1, p2/p3 (gamma parameters for both components))
#' @param ct Cell type of interest (name from the gamma mixed models)
#' @param disp.fun.param Function to fit the dispersion parameter dependent on the mean
#' (required columns: ct (cell type), asymptDisp, extraPois (both from taken from DEseq))
#' @param mappingEfficiency Fraction of reads successfully mapped to the transcriptome in the end (need to be between 1-0)
#' @param multipletRate Expected increase in multiplets for additional cell in the lane
#' @param multipletFactor Expected read proportion of multiplet cells vs singlet cells
#' @param multipletRateGrowth Development of multiplet rate with increasing number of cells per lane, "linear" if overloading should be
#' modeled explicitly, otherwise "constant". The default value for the parameter multipletRate is matching the option "linear".
#' @inheritParams calculate.probabilities
#'
#' @return Power to detect the DE/eQTL genes from the reference study in a single cell experiment with these parameters
#'
#' @export
#'
power.sameReadDepth.withDoublets<-function(nSamples,nCells,ct.freq,
type,ref.study,ref.study.name,
samplesPerLane,
gamma.parameters,ct,
disp.fun.param,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05, MTmethod="Bonferroni",
useSimulatedPower=TRUE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
returnResultsDetailed=FALSE){
#Estimate multiplet fraction dependent on cells per lane
if(multipletRateGrowth=="linear"){
multipletFraction<-multipletRate*nCells*samplesPerLane
} else if (multipletRateGrowth == "constant") {
multipletFraction<-multipletRate
} else {
stop(paste("Input", multipletRateGrowth, "No known option for multipletRateGrowth.",
"Use the values 'linear' or 'constant'."))
}
#Check that the number of cells entered does not provide a multiplet rate of >100%
if(multipletFraction>=1){
stop("Too many cells per individual entered! Multiplet rate of more than 100%!")
}
usableCells<-round((1-multipletFraction)*nCells)
#Get the fraction of cell type cells
ctCells<-round(usableCells*ct.freq)
#Check if gamma fit data for the cell type exists
if(! any(gamma.parameters$ct==ct)){
stop(paste("No gene curve fitting data in the data frame gamma.mixed.fits fits to the specified cell type",
ct,". Check that the cell type is correctly spelled and the right gamma.mixed.fits object used."))
}
gamma.parameters<-gamma.parameters[gamma.parameters$ct==ct,]
#Check if dispersion data for the cell type exists
if(! any(disp.fun.param$ct==ct)){
stop(paste("No dispersion fitting data in the data frame disp.fun.param fits to the specified cell type",
ct,". Check that the cell type is correctly spelled and the right disp.fun.param object used."))
}
#Fit dispersion parameter dependent on mean parameter
disp.fun<-disp.fun.param[disp.fun.param$ct==ct,]
#Calculate expression power depending on gamma parameters
probs<-calculate.probabilities(nSamples,ctCells,type,
ref.study,ref.study.name,
gamma.parameters,disp.fun,
min.UMI.counts,perc.indiv.expr,
cutoffVersion,
nGenes,samplingMethod,
sign.threshold,MTmethod,
useSimulatedPower,
simThreshold,
speedPowerCalc,
indepSNPs,
ssize.ratio.de,
returnResultsDetailed)
#Return either detailed probabilities for each DE/eQTL gene or only overview
if(returnResultsDetailed){
power.study<-data.frame(name=ref.study.name,
powerDetect=probs$overview.df$powerDetect,
exp.probs=probs$overview.df$exp.probs,
power=probs$overview.df$power,
sampleSize=nSamples,
totalCells=nCells,
usableCells=usableCells,
multipletFraction=multipletFraction,
ctCells=ctCells,
expressedGenes=probs$overview.df$expressedGenes)
return(list(overview.df=power.study,probs.df=probs$probs.df))
} else {
power.study<-data.frame(name=ref.study.name,
powerDetect=probs$powerDetect,
exp.probs=probs$exp.probs,
power=probs$power,
sampleSize=nSamples,
totalCells=nCells,
usableCells=usableCells,
multipletFraction=multipletFraction,
ctCells=ctCells,
expressedGenes=probs$expressedGenes)
}
return(power.study)
}
#' Power calculation for a DE/eQTL study with same read depth as the fitted gamma distribution
#'
#' This function is a variant of power.sameReadDepth.withDoublets, where not the number of samplesPerLane is given as an
#' parameter, but instead the individuals are distributed over the lanes in a way that restricts the total number of
#' cells per lane instead. This gives also an upper bound for the doublet rate.
#' @param cellsPerLane Maximal number of cells per 10X lane
#' @inheritParams power.sameReadDepth.withDoublets
#'
#' @return Power to detect the DE/eQTL genes from the reference study in a single cell experiment with these parameters
#'
#' @export
#'
power.sameReadDepth.restrictedDoublets<-function(nSamples,nCells,ct.freq,
type,ref.study,ref.study.name,
cellsPerLane,
gamma.parameters,ct,
disp.fun.param,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05, MTmethod="Bonferroni",
useSimulatedPower=TRUE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
returnResultsDetailed=FALSE){
#Distribute individuals most optimal over the lanes
samplesPerLane<-floor(cellsPerLane/nCells)
if(samplesPerLane==0){
stop("Allowed number of cells per lane is too low to fit so many cells per individual!")
}
return(power.sameReadDepth.withDoublets(nSamples,nCells,ct.freq,
type,ref.study,ref.study.name,
samplesPerLane,
gamma.parameters,ct,
disp.fun.param,
mappingEfficiency,
multipletRate,multipletFactor,
min.UMI.counts,perc.indiv.expr,
cutoffVersion,
nGenes,samplingMethod,
multipletRateGrowth,
sign.threshold,
MTmethod,
useSimulatedPower,
simThreshold,
speedPowerCalc,
indepSNPs,
ssize.ratio.de,
returnResultsDetailed))
}
#' Help function to calculate expression probability, power and detection power
#' for a given gamma distribution plus additional parameters
#'
#' @param nSamples Sample size
#' @param ctCells Number of cells of the target cell type
#' @param type (eqtl/de) study
#' @param ref.study Data frame with reference studies to be used for expression ranks and effect sizes
#' (required columns: name (study name), rank (expression rank), FoldChange (DE study) /Rsq (eQTL study))
#' @param ref.study.name Name of the reference study. Will be checked in the ref.study data frame for it (as column name).
#' @param gamma.parameters Data frame with gamma parameters, filtered for the correct cell type
#' (required columns: ct (cell type), s1, r1, s2, r2, p1, p2/p3
#' (gamma parameters for both components))
#' @param disp.fun Function to fit the dispersion parameter dependent on the mean,
#' filtered for the correct cell type (required columns: ct (cell type),
#' asymptDisp, extraPois (both from taken from DEseq))
#' @param nGenes Number of genes to simulate (should match the number of genes used for the fitting)
#' @param min.UMI.counts Expression cutoff in one individual: if cutoffVersion=absolute,
#' more than this number of UMI counts for each gene per individual and
#' cell type is required; if cutoffVersion=percentage, more than this percentage
#' of cells need to have a count value large than 0
#' @param samplingMethod Approach to sample the gene mean values (either taking
#' quantiles or random sampling)
#' @param sign.threshold Significance threshold
#' @param MTmethod Multiple testing correction method
#' (possible options: "Bonferroni","FDR","none")
#' @param useSimulatedPower Option to simulate eQTL power for small mean values
#' to increase accuracy (only possible for eQTL analysis)
#' @param simThreshold Threshold until which the simulated power is taken instead
#' of the analytic (only for the eQTL analysis)
#' @param speedPowerCalc Option to speed power calculation by skipping all genes with
#' an expression probability less than 0.01 (as overall power is anyway close to 0)
#' @param indepSNPs Number of independent SNPs assumed for each loci (for eQTL
#' Bonferroni multiple testing correction the number of tests are estimated
#' as number expressed genes * indepSNPs)
#' @param ssize.ratio.de In the DE case, ratio between sample size of group 0
#' (control group) and group 1 (1=balanced design)
#' @param returnResultsDetailed If true, return not only summary data frame,
#' but additional list with exact probability vectors
#' @inheritParams estimate.exp.prob.values
#'
#' @return Power to detect the DE/eQTL genes from the reference study in a single cell experiment with these parameters
#'
calculate.probabilities<-function(nSamples,ctCells,type,
ref.study,ref.study.name,
gamma.parameters,disp.fun,
min.UMI.counts,perc.indiv.expr,
cutoffVersion,
nGenes,samplingMethod,
sign.threshold,MTmethod,
useSimulatedPower,
simThreshold,
speedPowerCalc,
indepSNPs,
ssize.ratio.de,
returnResultsDetailed){
#Sample means values
if(samplingMethod=="random"){
gene.means<-sample.mean.values.random(gamma.parameters,nGenes)
} else if (samplingMethod=="quantiles"){
gene.means<-sample.mean.values.quantiles(gamma.parameters,nGenes)
} else {
stop("No known sampling method. Use the options 'random' or 'quantiles'.")
}
gene.disps<-sample.disp.values(gene.means,disp.fun)
sim.genes<-data.frame(mean=gene.means, disp=gene.disps)
#Sort simulated genes by mean expression
sim.genes<-sim.genes[order(sim.genes$mean, decreasing = TRUE),]
#Calculate the mean per cell type for each individuum
sim.genes$mean.sum<-sim.genes$mean*ctCells
sim.genes$disp.sum<-sim.genes$disp/ctCells
#Calculate for each gene the expression probability
sim.genes$exp.probs<-estimate.exp.prob.values(sim.genes$mean,1/sim.genes$disp,ctCells,
nSamples=nSamples,min.counts=min.UMI.counts,
perc.indiv.expr=perc.indiv.expr,
cutoffVersion=cutoffVersion)
#Calculate the expected number of expressed genes
exp.genes<-round(sum(sim.genes$exp.probs))
#Check if the study reference name exists in the data frame
if(! any(ref.study$name==ref.study.name)){
stop(paste("No study name in the data frame ref.study fits to the specified reference study name",
ref.study.name,". Check that the name is correctly spelled and the right ref.study.name object used."))
}
#Set the simulated DE genes as the genes at the same rank position as the original DE genes
ranks<-ref.study$rank[ref.study$name==ref.study.name]
#Set all DE with rank > nGenes to nGenes (expression anyway nearly 0)
ranks[ranks>nGenes]<-nGenes
#Choose the DE genes according to the rank
foundSignGenes<-sim.genes[ranks,]
#Calculate alpha parameter corrected for multiple testing
if(MTmethod=="Bonferroni"){
alpha<-sign.threshold/exp.genes
} else if (MTmethod=="none"){
alpha<-sign.threshold
#For FDR correction, optimization is done dependent on eqtl/de power later
#Only first parameters are calculated here
} else if(MTmethod=="FDR"){
lowerBound<-sign.threshold/exp.genes
m0<-exp.genes-round(sum(foundSignGenes$exp.probs))
#Check that there are expressed non-DE genes (otherwise probably input issue)
if(m0==0){
stop(paste0("With the current parameter, all genes that are predicted to be ",
"expressed (in total ",exp.genes,") are defined as DE genes. ",
"FDR correction not possible in this case. Please verify your input ",
"data.frame ref.study contains only rows with DE genes!"))
}
} else {
stop(paste("MTmethod",MTmethod,"is unknown! Please choose between",
"Bonferroni, FDR and none!"))
}
#Calculate power
if(type=="eqtl"){
#Check that the required column Rsq exists
if(! any(colnames(ref.study)=="Rsq")){
stop(paste("Column name Rsq missing in the ref.study data frame.",
"Please make sure to provide this column for eQTL power analysis."))
}
#Set the Rsq respectively
foundSignGenes$Rsq<-ref.study$Rsq[ref.study$name==ref.study.name]
if(MTmethod=="FDR"){
#In the extreme case that also with Bonferroni cut-off less than one TP
#can be found, the optimization is not working, use here the Bonferroni
#cutoff instead
if(fdr.optimization(x=lowerBound,
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$Rsq,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.sum,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold)>0){
alpha<-lowerBound
} else {
root<-uniroot(f=fdr.optimization,
interval=c(lowerBound,sign.threshold),
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$Rsq,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.sum,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold)
alpha<-root$root
}
} else if (MTmethod=="Bonferroni"){
#Restrict the Bonferroni for eQTLs cut-off further,
#assuming x independent SNPs per gene
alpha<-alpha/indepSNPs
}
#Skip power calculation for not expressed genes (if this option is chosen)
if(speedPowerCalc){
foundSignGenes$power<-sapply(1:nrow(foundSignGenes), function(i)
if(foundSignGenes$exp.probs[i]<0.01) {
return(0)
}else{
power.eqtl(foundSignGenes$mean.sum[i],
foundSignGenes$Rsq[i],
alpha,nSamples,
useSimulatedPower,
simThreshold)})
} else {
foundSignGenes$power<-sapply(1:nrow(foundSignGenes),
function(i) power.eqtl(foundSignGenes$mean.sum[i],
foundSignGenes$Rsq[i],
alpha,nSamples,
useSimulatedPower,
simThreshold))
}
} else if (type=="de") {
#Check that the required column FoldChange exists
if(! any(colnames(ref.study)=="FoldChange")){
stop(paste("Column name FoldChange missing in the ref.study data frame.",
"Please make sure to provide this column for DE power analysis."))
}
#Set the fold change respectively
foundSignGenes$FoldChange<-ref.study$FoldChange[ref.study$name==ref.study.name]
if(MTmethod=="FDR"){
#In the extreme case that also with Bonferroni cut-off less than one TP
#can be found, the optimization is not working, use here the Bonferroni
#cutoff instead
if(fdr.optimization(x=lowerBound,
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$FoldChange,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.sum,
disp.vector = foundSignGenes$disp.sum,
ssize.ratio.de=ssize.ratio.de)>0){
alpha<-lowerBound
} else {
root<-uniroot(f=scPower:::fdr.optimization,
interval=c(lowerBound,sign.threshold),
fdr=sign.threshold,m0=m0,type=type,
exp.vector=foundSignGenes$exp.probs,
es.vector=foundSignGenes$FoldChange,
nSamples=nSamples,
mean.vector=foundSignGenes$mean.sum,
disp.vector = foundSignGenes$disp.sum,
ssize.ratio.de=ssize.ratio.de)
alpha<-root$root
}
}
#Skip power calculation for not expressed genes (if this option is chosen)
if(speedPowerCalc){
foundSignGenes$power<-sapply(1:nrow(foundSignGenes),function(i)
if(foundSignGenes$exp.probs[i]<0.01){
return(0)
} else {
power.de(
nSamples,
foundSignGenes$mean.sum[i],
foundSignGenes$FoldChange[i],
1/foundSignGenes$disp.sum[i],
alpha,3,ssize.ratio=ssize.ratio.de)
})
} else {
foundSignGenes$power<-sapply(1:nrow(foundSignGenes),function(i) power.de(
nSamples,
foundSignGenes$mean.sum[i],
foundSignGenes$FoldChange[i],
1/foundSignGenes$disp.sum[i],
alpha,3,ssize.ratio=ssize.ratio.de))
}
} else {
stop('For type parameter only "eqtl" or "de" possible!')
}
#Calculate total probability as the DE power times the expression probability
foundSignGenes$combined.prob<-foundSignGenes$power*foundSignGenes$exp.probs
#Return probabilities and expected number of expressed genes
results<-data.frame(powerDetect=mean(foundSignGenes$combined.prob),
exp.probs=mean(foundSignGenes$exp.probs),
power=mean(foundSignGenes$power),
expressedGenes=exp.genes)
#Return either detailed probabilities for each DE/eQTL gene or only overview
if(returnResultsDetailed){
return(list(overview.df=results,probs.df=foundSignGenes))
} else {
return(results)
}
}
#' Optimizing cost parameters to maximize detection power for a given budget and 10X design
#'
#' This function determines the optimal parameter combination for a given budget.
#' The optimal combination is thereby the one with the highest detection power.
#' Of the three parameters sample size, cells per sample and read depth, two need to be set and
#' the third one is uniquely defined given the other two parameters and the overall budget.
#'
#' @param totalBudget Overall experimental budget
#' @param costKit Cost for one 10X kit
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param nSamplesRange Range of sample sizes that should be tested (vector)
#' @param nCellsRange Range of cells per individual that should be tested (vector)
#' @param readDepthRange Range of read depth values that should be tested (vector)
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#' @inheritParams power.general.withDoublets
#'
#' @return Data frame with overall detection power, power and expression power for
#' each possible parameter combination given the budget and the parameter ranges
#'
#' @export
#'
#' @examples
#' optimize.constant.budget(10000,seq(1000,10000,by=1000),seq(1000,10000,by=1000),
#' 5600,14032,4100*10^6,0.2,"de",de.ref.study,"Blueprint (CLL) iCLL-mCLL",8,
#' read.umi.fit,gamma.mixed.fits,"CD4 T cells",disp.fun.param)
#'
optimize.constant.budget<-function(totalBudget,type,
ct,ct.freq,
costKit,costFlowCell,readsPerFlowcell,
ref.study,ref.study.name,
samplesPerLane,
read.umi.fit,gamma.mixed.fits,
disp.fun.param,
nSamplesRange=NULL,
nCellsRange=NULL, readDepthRange=NULL,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05,MTmethod="Bonferroni",
useSimulatedPower=FALSE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
reactionsPerKit=6){
#Check that exactly two of the parameters are set and the third one is not defined
if(sum(is.null(nSamplesRange),is.null(nCellsRange),is.null(readDepthRange))!=1){
stop(paste("To optimize the experimental design for a given budget,",
"always exactly one of the parameters nSamplesRange, nCellsRange",
"and readDepthRange should be set to NULL."))
}
#Case 1: estimate the sample size
if(is.null(nSamplesRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nCellsRange,readDepthRange)
colnames(param.combis)<-c("nCells","readDepth")
#Sample size dependent on the budget
param.combis$nSamples<-sapply(1:nrow(param.combis),
function(i)floor(sampleSizeBudgetCalculation(param.combis$nCells[i],
param.combis$readDepth[i],
totalBudget,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit)))
#Case 2: estimate the number of cells per individuals
} else if (is.null(nCellsRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,readDepthRange)
colnames(param.combis)<-c("nSamples","readDepth")
param.combis$nCells<-sapply(1:nrow(param.combis),
function(i)cellsBudgetCalculation(param.combis$nSamples[i],
param.combis$readDepth[i],
totalBudget,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit))
# Case 3: estimate the read depth
} else {
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,nCellsRange)
colnames(param.combis)<-c("nSamples","nCells")
param.combis$readDepth<-sapply(1:nrow(param.combis),
function(i)readDepthBudgetCalculation(param.combis$nSamples[i],
param.combis$nCells[i],
totalBudget,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit))
}
#Remove all combinations where one of the parameters is <=0
if(any(param.combis$nSamples==0) | any(param.combis$nCells<=0) | any(param.combis$readDepth<=0)){
warning("Some of the parameter combinations are too expensive and removed from the parameter grid.")
param.combis<-param.combis[param.combis$nSamples>0 & param.combis$nCells>0 & param.combis$readDepth>0,]
}
#Check if at least one parameter combination remains
if(nrow(param.combis)==0){
stop("The total budget is too low for parameters in the given range!")
}
power.study<-mapply(power.general.withDoublets,
param.combis$nSamples,
param.combis$nCells,
param.combis$readDepth,
MoreArgs=list(ct.freq=ct.freq,
multipletRate=multipletRate,
multipletFactor=multipletFactor,
type=type,
ref.study=ref.study,
ref.study.name=ref.study.name,
samplesPerLane=samplesPerLane,
read.umi.fit=read.umi.fit,
gamma.mixed.fits=gamma.mixed.fits,
ct=ct,
disp.fun.param=disp.fun.param,
mappingEfficiency=mappingEfficiency,
min.UMI.counts=min.UMI.counts,
perc.indiv.expr=perc.indiv.expr,
cutoffVersion=cutoffVersion,
nGenes=nGenes,
samplingMethod=samplingMethod,
multipletRateGrowth=multipletRateGrowth,
sign.threshold=sign.threshold,
MTmethod=MTmethod,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold,
speedPowerCalc=speedPowerCalc,
indepSNPs=indepSNPs,
ssize.ratio.de=ssize.ratio.de))
power.study<-data.frame(apply(power.study,1,unlist),stringsAsFactors = FALSE)
power.study[,2:ncol(power.study)]<-apply(power.study[,2:ncol(power.study)],2,as.numeric)
return(power.study)
}
#' Optimizing cost parameters to maximize detection power for a given budget with
#' library preparation costs per cell
#'
#' This function determines the optimal parameter combination for a given budget.
#' The optimal combination is thereby the one with the highest detection power.
#' Of the three parameters sample size, cells per sample and read depth, two need to be set and
#' the third one is uniquely defined given the other two parameters and the overall budget.
#'
#' @param totalBudget Overall experimental budget
#' @param prepCostCell Library preparation costs per cell
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param nSamplesRange Range of sample sizes that should be tested (vector)
#' @param nCellsRange Range of cells per individual that should be tested (vector)
#' @param readDepthRange Range of read depth values that should be tested (vector)
#' @inheritParams power.general.withDoublets
#'
#' @return Data frame with overall detection power, power and expression power for
#' each possible parameter combination given the budget and the parameter ranges
#'
#' @export
#'
optimize.constant.budget.libPrepCell<-function(totalBudget, type,
ct,ct.freq,
prepCostCell,costFlowCell,readsPerFlowcell,
ref.study,ref.study.name,
samplesPerLane,
read.umi.fit,gamma.mixed.fits,
disp.fun.param,
nSamplesRange=NULL,
nCellsRange=NULL, readDepthRange=NULL,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05,MTmethod="Bonferroni",
useSimulatedPower=FALSE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1){
#Check that exactly two of the parameters are set and the third one is not defined
if(sum(is.null(nSamplesRange),is.null(nCellsRange),is.null(readDepthRange))!=1){
stop(paste("To optimize the experimental design for a given budget,",
"always exactly one of the parameters nSamplesRange, nCellsRange",
"and readDepthRange should be set to NULL."))
}
#Case 1: estimate the sample size
if(is.null(nSamplesRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nCellsRange,readDepthRange)
colnames(param.combis)<-c("nCells","readDepth")
#Sample size dependent on the budget
param.combis$nSamples<-sapply(1:nrow(param.combis),
function(i)floor(sampleSizeBudgetCalculation.libPrepCell(param.combis$nCells[i],
param.combis$readDepth[i],
totalBudget,prepCostCell,
costFlowCell,readsPerFlowcell)))
#Case 2: estimate the number of cells per individuals
} else if (is.null(nCellsRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,readDepthRange)
colnames(param.combis)<-c("nSamples","readDepth")
param.combis$nCells<-sapply(1:nrow(param.combis),
function(i)cellsBudgetCalculation.libPrepCell(param.combis$nSamples[i],
param.combis$readDepth[i],
totalBudget, prepCostCell,
costFlowCell,readsPerFlowcell))
# Case 3: estimate the read depth
} else {
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,nCellsRange)
colnames(param.combis)<-c("nSamples","nCells")
param.combis$readDepth<-sapply(1:nrow(param.combis),
function(i)readDepthBudgetCalculation.libPrepCell(param.combis$nSamples[i],
param.combis$nCells[i],
totalBudget,prepCostCell,
costFlowCell,readsPerFlowcell))
}
#Remove all combinations where one of the parameters is <=0
if(any(param.combis$nSamples==0) | any(param.combis$nCells<=0) | any(param.combis$readDepth<=0)){
warning("Some of the parameter combinations are too expensive and removed from the parameter grid.")
param.combis<-param.combis[param.combis$nSamples>0 & param.combis$nCells>0 & param.combis$readDepth>0,]
}
#Check if at least one parameter combination remains
if(nrow(param.combis)==0){
stop("The total budget is too low for parameters in the given range!")
}
power.study<-mapply(power.general.withDoublets,
param.combis$nSamples,
param.combis$nCells,
param.combis$readDepth,
MoreArgs=list(ct.freq=ct.freq,
multipletRate=multipletRate,
multipletFactor=multipletFactor,
type=type,
ref.study=ref.study,
ref.study.name=ref.study.name,
samplesPerLane=samplesPerLane,
read.umi.fit=read.umi.fit,
gamma.mixed.fits=gamma.mixed.fits,
ct=ct,
disp.fun.param=disp.fun.param,
mappingEfficiency=mappingEfficiency,
min.UMI.counts=min.UMI.counts,
perc.indiv.expr=perc.indiv.expr,
cutoffVersion=cutoffVersion,
nGenes=nGenes,
samplingMethod=samplingMethod,
multipletRateGrowth=multipletRateGrowth,
sign.threshold=sign.threshold,
MTmethod=MTmethod,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold,
speedPowerCalc=speedPowerCalc,
indepSNPs=indepSNPs,
ssize.ratio.de=ssize.ratio.de))
power.study<-data.frame(apply(power.study,1,unlist),stringsAsFactors = FALSE)
power.study[,2:ncol(power.study)]<-apply(power.study[,2:ncol(power.study)],2,as.numeric)
return(power.study)
}
#' Optimizing cost parameters to maximize detection power for a given budget and 10X design
#'
#' This function determines the optimal parameter combination for a given budget.
#' The optimal combination is thereby the one with the highest detection power.
#' Of the three parameters sample size, cells per sample and read depth, two need to be set and
#' the third one is uniquely defined given the other two parameters and the overall budget.
#'
#' @param totalBudget Overall experimental budget
#' @param costKit Cost for one 10X kit
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param nSamplesRange Range of sample sizes that should be tested (vector)
#' @param nCellsRange Range of cells per individual that should be tested (vector)
#' @param readDepthRange Range of read depth values that should be tested (vector)
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#' @inheritParams power.general.restrictedDoublets
#'
#' @return Data frame with overall detection power, power and expression power for
#' each possible parameter combination given the budget and the parameter ranges
#'
#' @export
#'
optimize.constant.budget.restrictedDoublets<-function(totalBudget,type,
ct,ct.freq,
costKit,costFlowCell,readsPerFlowcell,
ref.study,ref.study.name,
cellsPerLane,
read.umi.fit,gamma.mixed.fits,
disp.fun.param,
nSamplesRange=NULL,
nCellsRange=NULL, readDepthRange=NULL,
mappingEfficiency=0.8,
multipletRate=7.67e-06,multipletFactor=1.82,
min.UMI.counts=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
multipletRateGrowth="linear",
sign.threshold=0.05,MTmethod="Bonferroni",
useSimulatedPower=FALSE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1,
reactionsPerKit=6){
#Check that exactly two of the parameters are set and the third one is not defined
if(sum(is.null(nSamplesRange),is.null(nCellsRange),is.null(readDepthRange))!=1){
stop(paste("To optimize the experimental design for a given budget,",
"always exactly one of the parameters nSamplesRange, nCellsRange",
"and readDepthRange should be set to NULL."))
}
#Case 1: estimate the sample size
if(is.null(nSamplesRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nCellsRange,readDepthRange)
colnames(param.combis)<-c("nCells","readDepth")
#Sample size dependent on the budget
param.combis$nSamples<-sapply(1:nrow(param.combis),
function(i)floor(sampleSizeBudgetCalculation.restrictedDoublets(param.combis$nCells[i],
param.combis$readDepth[i],
totalBudget,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit)))
#Case 2: estimate the number of cells per individuals
} else if (is.null(nCellsRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,readDepthRange)
colnames(param.combis)<-c("nSamples","readDepth")
param.combis$nCells<-sapply(1:nrow(param.combis),
function(i)cellsBudgetCalculation.restrictedDoublets(param.combis$nSamples[i],
param.combis$readDepth[i],
totalBudget,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit))
# Case 3: estimate the read depth
} else {
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,nCellsRange)
colnames(param.combis)<-c("nSamples","nCells")
param.combis$readDepth<-sapply(1:nrow(param.combis),
function(i)readDepthBudgetCalculation.restrictedDoublets(param.combis$nSamples[i],
param.combis$nCells[i],
totalBudget,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit))
}
#Remove all combinations where one of the parameters is <=0
if(any(param.combis$nSamples==0) | any(param.combis$nCells<=0) | any(param.combis$readDepth<=0)){
warning("Some of the parameter combinations are too expensive and removed from the parameter grid.")
param.combis<-param.combis[param.combis$nSamples>0 & param.combis$nCells>0 & param.combis$readDepth>0,]
}
#Check if at least one parameter combination remains
if(nrow(param.combis)==0){
stop("The total budget is too low for parameters in the given range!")
}
power.study<-mapply(power.general.restrictedDoublets,
param.combis$nSamples,
param.combis$nCells,
param.combis$readDepth,
MoreArgs=list(ct.freq=ct.freq,
multipletRate=multipletRate,
multipletFactor=multipletFactor,
type=type,
ref.study=ref.study,
ref.study.name=ref.study.name,
cellsPerLane=cellsPerLane,
read.umi.fit=read.umi.fit,
gamma.mixed.fits=gamma.mixed.fits,
ct=ct,
disp.fun.param=disp.fun.param,
mappingEfficiency=mappingEfficiency,
min.UMI.counts=min.UMI.counts,
perc.indiv.expr=perc.indiv.expr,
cutoffVersion=cutoffVersion,
nGenes=nGenes,
samplingMethod=samplingMethod,
multipletRateGrowth=multipletRateGrowth,
sign.threshold=sign.threshold,
MTmethod=MTmethod,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold,
speedPowerCalc=speedPowerCalc,
indepSNPs=indepSNPs,
ssize.ratio.de=ssize.ratio.de))
power.study<-data.frame(apply(power.study,1,unlist),stringsAsFactors = FALSE)
power.study[,2:ncol(power.study)]<-apply(power.study[,2:ncol(power.study)],2,as.numeric)
return(power.study)
}
#' Optimizing cost parameters to maximize detection power for a given budget and Smart-seq design
#'
#' This function determines the optimal parameter combination for a given budget.
#' The optimal combination is thereby the one with the highest detection power.
#' Of the three parameters sample size, cells per sample and read depth, two need to be set and
#' the third one is uniquely defined given the other two parameters and the overall budget.
#'
#' @param totalBudget Overall experimental budget
#' @param prepCostCell Library preparation costs per cell
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param nSamplesRange Range of sample sizes that should be tested (vector)
#' @param nCellsRange Range of cells per individual that should be tested (vector)
#' @param readDepthRange Range of read depth values that should be tested (vector)
#' @inheritParams power.smartseq
#'
#' @return Data frame with overall detection power, power and expression power for
#' each possible parameter combination given the budget and the parameter ranges
#'
#' @export
#'
optimize.constant.budget.smartseq<-function(totalBudget, type,
ct,ct.freq,
prepCostCell,costFlowCell,readsPerFlowcell,
ref.study,ref.study.name,
gamma.mixed.fits,
disp.linear.fit,
nSamplesRange=NULL,
nCellsRange=NULL, readDepthRange=NULL,
mappingEfficiency=0.8,
multipletFraction=0,multipletFactor=1.82,
min.norm.count=3,perc.indiv.expr=0.5,
cutoffVersion="absolute",
nGenes=21000,samplingMethod="quantiles",
sign.threshold=0.05,MTmethod="Bonferroni",
useSimulatedPower=FALSE,
simThreshold=4,
speedPowerCalc=FALSE,
indepSNPs=10,
ssize.ratio.de=1){
#Check that exactly two of the parameters are set and the third one is not defined
if(sum(is.null(nSamplesRange),is.null(nCellsRange),is.null(readDepthRange))!=1){
stop(paste("To optimize the experimental design for a given budget,",
"always exactly one of the parameters nSamplesRange, nCellsRange",
"and readDepthRange should be set to NULL."))
}
#Case 1: estimate the sample size
if(is.null(nSamplesRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nCellsRange,readDepthRange)
colnames(param.combis)<-c("nCells","readDepth")
#Sample size dependent on the budget
param.combis$nSamples<-sapply(1:nrow(param.combis),
function(i)floor(sampleSizeBudgetCalculation.libPrepCell(param.combis$nCells[i],
param.combis$readDepth[i],
totalBudget,prepCostCell,
costFlowCell,readsPerFlowcell)))
#Case 2: estimate the number of cells per individuals
} else if (is.null(nCellsRange)){
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,readDepthRange)
colnames(param.combis)<-c("nSamples","readDepth")
param.combis$nCells<-sapply(1:nrow(param.combis),
function(i)cellsBudgetCalculation.libPrepCell(param.combis$nSamples[i],
param.combis$readDepth[i],
totalBudget, prepCostCell,
costFlowCell,readsPerFlowcell))
# Case 3: estimate the read depth
} else {
#Build a frame of all possible combinations
param.combis<-expand.grid(nSamplesRange,nCellsRange)
colnames(param.combis)<-c("nSamples","nCells")
param.combis$readDepth<-sapply(1:nrow(param.combis),
function(i)readDepthBudgetCalculation.libPrepCell(param.combis$nSamples[i],
param.combis$nCells[i],
totalBudget,prepCostCell,
costFlowCell,readsPerFlowcell))
}
#Remove all combinations where one of the parameters is <=0
if(any(param.combis$nSamples==0) | any(param.combis$nCells<=0) | any(param.combis$readDepth<=0)){
warning("Some of the parameter combinations are too expensive and removed from the parameter grid.")
param.combis<-param.combis[param.combis$nSamples>0 & param.combis$nCells>0 & param.combis$readDepth>0,]
}
#Check if at least one parameter combination remains
if(nrow(param.combis)==0){
stop("The total budget is too low for parameters in the given range!")
}
power.study<-mapply(power.smartseq,
param.combis$nSamples,
param.combis$nCells,
param.combis$readDepth,
MoreArgs=list(ct.freq=ct.freq,
multipletFraction=multipletFraction,
multipletFactor=multipletFactor,
type=type,
ref.study=ref.study,
ref.study.name=ref.study.name,
gamma.mixed.fits=gamma.mixed.fits,
ct=ct,
disp.linear.fit=disp.linear.fit,
mappingEfficiency=mappingEfficiency,
min.norm.count=min.norm.count,
perc.indiv.expr=perc.indiv.expr,
cutoffVersion=cutoffVersion,
nGenes=nGenes,
samplingMethod=samplingMethod,
sign.threshold=sign.threshold,
MTmethod=MTmethod,
useSimulatedPower=useSimulatedPower,
simThreshold=simThreshold,
speedPowerCalc=speedPowerCalc,
indepSNPs=indepSNPs,
ssize.ratio.de=ssize.ratio.de))
power.study<-data.frame(apply(power.study,1,unlist),stringsAsFactors = FALSE)
power.study[,2:ncol(power.study)]<-apply(power.study[,2:ncol(power.study)],2,as.numeric)
return(power.study)
}
#' Wrapper funtion to use either simulated power or power based on F-test
#' (dependent on pseudobulk mean and used parameters)
#'
#' @param count.mean Expression mean in the pseudobulk
#' @param heritability Heritability of the trait
#' @param sig.level Significane threshold
#' @param nSamples Sample size
#' @param useSimulatedPower Option to simulate eQTL power for small mean values
#' to increase accuracy
#' @param simThreshold Threshold until which the simulated power is taken
#' instead of the analytic
#'
#' @return Power to detect the eQTL gene
#'
power.eqtl<-function(count.mean,heritability, sig.level, nSamples,
useSimulatedPower=TRUE, simThreshold=4){
#Check if simulated or analytic power shall be used
if(useSimulatedPower){
if(round(count.mean) == 0){
return(0)
} else if (round(count.mean) > simThreshold){
return(power.eqtl.ftest(heritability, sig.level, nSamples))
} else {
index<-paste(round(count.mean),round(heritability,2),nSamples,
sep="_")
if(index %in% rownames(scPower::sim.eqtl.pvals)){
return(mean(scPower::sim.eqtl.pvals[index,]<sig.level))
} else {
# warning(paste0("Simulated p-values not available for the current parameter combination ",
# "(",round(count.mean),",",round(heritability,2),",",nSamples,").",
# "Calculation from scratch might take a bit!"))
return(power.eqtl.simulated(count.mean,heritability, sig.level, nSamples))
}
}
} else {
return(power.eqtl.ftest(heritability, sig.level, nSamples))
}
}
#' Power calculation for an eQTL gene using the F-test
#'
#' This function calculates the power to detect an eQTL gene.
#' @param heritability Heritability of the trait
#' @param sig.level Significane threshold
#' @param nSamples Sample size
#'
#' @return Power to detect the eQTL gene
#'
power.eqtl.ftest<-function(heritability, sig.level, nSamples) {
require(pwr)
#A sample size larger than 2 is required for the power analysis
if(nSamples<3){
return(NA)
}
f2 <- heritability / (1 - heritability)
df.num <- 1 ## dfs of the full model
df.denom <- nSamples - df.num - 1 ## error dfs
power<-pwr::pwr.f2.test(u=df.num, v=df.denom, f2=f2, sig.level=sig.level)$power
return(power)
}
#' Power calculation for an eQTL gene using simulations
#'
#' The eQTL power is independent of the mean except for very small mean values,
#' where small effect sizes might not be detectable due to the discrete counts
#' In these cases, the power can instead be simulated.
#'
#' @param count.mean Expression mean in the pseudobulk
#' @param heritability Heritability of the trait
#' @param sig.level Significane threshold
#' @param nSamples Sample size
#' @param rep.times Number of repetitions used for the
#'
#' @return Power to detect the eQTL gene
#'
power.eqtl.simulated<-function(count.mean, heritability, sig.level, nSamples,
rep.times=100){
#Use precalculated size estimates
size.estimates<-scPower::size.estimates
#Power for pseudobulk means close to 0 is set to 0 (no simulation possible)
if(round(count.mean)==0){return(0)}
#Simulated power
p.vals<-sapply(1:rep.times,function(i)
power.eqtl.simulated.help(round(count.mean), heritability, nSamples,
size.estimates))
#Simulated power
return(mean(p.vals<sig.level))
}
#' Helper function for eQTL simulation power calculation
#'
#' @param count.mean Expression mean in the pseudobulk
#' @param Rsq Heritability of the trait
#' @param nSamples Sample size
#' @param size.estimates Data frame with precalculated size values to speed calculation
#' @param af Allele frequency (sampled if not explicity given)
#'
power.eqtl.simulated.help<-function(count.mean,Rsq,nSamples,
size.estimates,af=NULL){
#Randomly sample AF if not given
if(is.null(af)){
af<-sample(seq(0.1,0.9,by=0.1),1)
}
#Genotype distribution in the population
bb<-round(nSamples*af^2)
ab<-round(nSamples*2*af*(1-af))
aa<-nSamples-ab-bb
genotypes<-c(rep(0,aa),rep(1,ab),rep(2,bb))
#Calculate beta value and standard error
beta<-sqrt(Rsq/(2*af*(1-af)))
#Get dispersion parameter from look-up table if available
#Look-up dispersion parameter from numerical optimization
size.vector<-unlist(size.estimates[size.estimates$Rsq==round(Rsq,2) &
size.estimates$af==round(af,1) &
size.estimates$mean==round(count.mean),4:6])
#If the look-up value could not be found, calculate it by hand
if(length(size.vector)==0){
# warning(paste("Look-up value not found for Rsq",round(Rsq,2),
# "af",round(af,1),"mean",round(count.mean),".",
# "Optimization will take time."))
size.vector<-scPower:::estimate.size.simulation(round(count.mean),
round(Rsq,2),
round(af,1))
#Replace size factors with NA with voom approximation!
sd.error<-sqrt(1-Rsq)
for(g in 1:3){
size.vector[g]<-ifelse(is.na(size.vector[g]),
1/(sd.error^2-1/(exp(log(count.mean) + beta * (g-1)))),
size.vector[g])
}
}
#Sample from a normal distribution dependent on the genotype
mean.vector<-exp(log(count.mean) + beta * genotypes)
size.vector<-size.vector[genotypes+1]
#Suppress warning messages if NA values are generated
suppressWarnings(counts<-rnbinom(nSamples,mu=mean.vector,size=size.vector))
simData<-data.frame(count=counts,
log.count=log(counts+1),
g=genotypes)
#Check if the counts are all the same
if(length(setdiff(unique(simData$count),NA))<=1){
return(1)
}
#Calculate p-values
model<-lm(log.count~g,data=simData)
#Check if there were enough data points to fit the model
if("g" %in% row.names(summary(model)$coefficients)){
return(summary(model)$coefficients["g","Pr(>|t|)"])
} else{
return(1)
}
}
#' Estimation of size parameter (1/dsp) for eQTL power simulation
#'
#' @param count.mean Mean of negative binomial distribution for the counts
#' @param Rsq Effect size
#' @param af Allele frequency
#'
#' @return Numerical optimized size parameter (1/dispersion) to model the standard error
#' as good as possible in the simulation
#'
estimate.size.simulation<-function(count.mean,Rsq,af){
#Calculate beta and Rsq
beta<-sqrt(Rsq/(2*af*(1-af)))
sd.error<-sqrt(1-Rsq)
#Estimate for genotype the suitable size factor for each genotype
disp.estimates<-c()
for(g in c(0,1,2)){
#Tayler estimate of size as approximation for optimization range
beta.mean<-exp(log(count.mean) + beta * g)
size<-1/(sd.error^2-1/beta.mean)
root<-try(uniroot(f=optimizeSizeFactor, interval=c(0.01,max(size*2,1)),
sd.error=sd.error,
beta.mean=beta.mean),silent=TRUE)
#For some very small size factors the optimization does not work
if(class(root)=="try-error"){
disp.estimates<-c(disp.estimates,NA)
} else {
disp.estimates<-c(disp.estimates,root$root)
}
}
return(disp.estimates)
}
#' Function for numeric optimization of size parameter
#'
#' @param x Tested size factor
#' @param sd.error The targeted standard error
#' @param beta.mean The mean of the counts
#'
#' @return A value close to 0 shows a good accordance of targeted and simulated
#' standard error
optimizeSizeFactor<-function(x,sd.error,beta.mean){
set.seed(1)
#Simulate counts
log.counts<-log(rnbinom(10000,mu=beta.mean,size=x)+1)
return(sd.error-sd(log.counts))
}
# Alternative calculation of power function
# power.eqtl <- function(heritability, sig.level, nSamples) {
# ## determine the rejection area under the null model (standard normal)
# reject <- qnorm(1 - sig.level)
# ## determine the non-centrality paramter
# z <- sqrt((nSamples * heritability) / (1 - heritability))
# ## get the probability to be in the rejection area given that alternative is true
# ## P(reject H0 | H1 true) = P(Z > reject | H1 true)
# power <- pnorm(reject, lower.tail=FALSE, mean=z)
# return(power)
# }
#' Power calculation for a DE gene
#'
#' This function calculates the power to detect an DE gene (comparsion of two groups 0 and 1)
#' by using the function power.nb.test of the package MKmisc. The power for a gene with mean of 0
#' is defined as 0 (the gene will have an expression probability of 0, so the overall detection power is also 0).
#'
#' @param nSamples Total samples size (n0 + n1), group balancing defined by size.ratio
#' @param mu.grou0 Mean value of group 0
#' @param RR effect size of group 1 vs group 0 (fold change mu1/mu0)
#' @param theta 1/dispersion parameter of the negative binomial fit
#' @param sig.level Significance threshold
#' @param approach Choose between three different methods implemented in the package for the power calculation (1,2,3)
#' @param ssize.ratio Sample size ratio between group 1 and 0 (n1/n0)
#' (Remark: there is a mistake in the package documentation that the ratio is n0/n1,
#' but I checked the code and the associated package)
#'
#' @return Power to detect the DE gene
#'
power.de<-function(nSamples,mu.group0,RR,theta,sig.level,approach=3,ssize.ratio=1){
require(MKmisc)
#Calculate the sample size of group 0 based on total sample size and ssize ratio
nSamples.group0<-round(nSamples/(ssize.ratio+1))
#Break code if the chosen sample size ratio is too extreme
if(nSamples.group0==0 || nSamples.group0==nSamples){
warning(paste("Chosen sample size ratio of",ssize.ratio,
"is too extreme and produces a group size of 0!",
"Result power is always 0."))
return(0)
}
if(mu.group0 == 0){
return(0)
} else {
calc<-MKmisc::power.nb.test(n=nSamples.group0,mu0=mu.group0,RR=RR, duration=1,
theta=theta, ssize.ratio=ssize.ratio,
sig.level=sig.level,alternative="two.sided",
approach=approach)
return(calc$power)
}
}
#' Function for numeric optimization to get an FDR corrected significance thresold
#'
#' @param x Adjusted significance threshold to be found during optimization
#' @param fdr Chosen FDR threshold
#' @param m0 Number of null hypothesis
#' @param type Either DE or eQTL power
#' @param exp.vector Vector of expression probabilities for each DEG/eQTL
#' @param es.vector Effect size vector (Fold Change for DEGs, Rsq for eQTLs)
#' @param nSamples Sample size
#' @param mean.vector Mean value for each DEG / eQTL gene
#' @param disp.vector Dispersion value for each DEG (only required for DE power)
#' @param useSimulatedPower Option to simulate eQTL power for small mean values
#' to increase accuracy (only required for the eQTL power)
#' @param simThreshold Threshold until which the simulated power is taken
#' instead of the analytic (only required with eQTL power)
#' @param ssize.ratio.de In the DE case, ratio between sample size of group 0
#' (control group) and group 1 (1=balanced design)
#'
#' @return Optimizing this function to 0 will lead to the correct adjusted
#' significance threshold x
#'
fdr.optimization<-function(x,fdr,m0,type,
exp.vector,
es.vector,
nSamples,
mean.vector,
disp.vector=NULL,
useSimulatedPower=TRUE,
simThreshold=4,
ssize.ratio.de=ssize.ratio.de){
#Calculate DE power (similar for eQTL power)
if(type=="de"){
power<-sapply(1:length(es.vector), function(i)
power.de(nSamples,mean.vector[i],
es.vector[i],1/disp.vector[i],x,
ssize.ratio=ssize.ratio.de))
} else if (type=="eqtl"){
power<-sapply(1:length(es.vector), function(i)
power.eqtl(mean.vector[i],es.vector[i],x,nSamples,
useSimulatedPower,simThreshold))
} else {
stop("Type unknown!")
}
r1<-sum(power*exp.vector, na.rm=TRUE)
return(x-(fdr*r1)/(m0*(1-fdr)))
}
#' Calculate total cost dependent on parameters for 10X design
#'
#' @param nSamples Number of samples
#' @param nCells Cells per individual
#' @param readDepth Read depth per cell
#' @param costKit Cost for one 10X kit
#' @param samplesPerLane Number of individuals sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param rounding Rounds up the number of used kits and flow cells
#' (which might give a more realistic estimation of costs)
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Total experimental cost dependent on the parameters
#'
#' @export
budgetCalculation<-function(nSamples,nCells,readDepth,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
rounding=FALSE,
reactionsPerKit=6){
if(rounding){
totalBudget<-ceiling(nSamples/(reactionsPerKit*samplesPerLane))*costKit+
ceiling(nSamples*nCells*readDepth/readsPerFlowcell)*costFlowCell
} else {
totalBudget<-nSamples/(reactionsPerKit*samplesPerLane)*costKit+
nSamples*nCells*readDepth/readsPerFlowcell*costFlowCell
}
return(totalBudget)
}
#' Estimate possible sample size depending on the total cost and the other parameters for 10X design
#'
#' A balanced design with two classes is assumed for the sample size calculation.
#'
#' @param nCells Cells per individual
#' @param readDepth Read depth per cell
#' @param totalCost Experimental budget
#' @param costKit Cost for one 10X kit
#' @param samplesPerLane Number of individuals sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Number of samples that can be measured with this budget and other parameters
#'
#' @export
sampleSizeBudgetCalculation<-function(nCells,readDepth,totalCost,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=6){
#Estimate the maximal sample size dependent on the cost for the other parameter
samples <- totalCost / (costKit/(reactionsPerKit*samplesPerLane) +
nCells * readDepth / readsPerFlowcell * costFlowCell)
#Return only even sample sizes (due to the balanced design)
return(floor(samples / 2) * 2)
}
#' Estimate possible number of cells per individual depending on the total cost
#' and the other parameters for 10X design
#'
#' @param nSamples Number of samples
#' @param readDepth Read depth per cell
#' @param totalCost Experimental budget
#' @param costKit Cost for one 10X kit
#' @param samplesPerLane Number of individuals sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Cells per individual that can be measured with this budget and other parameters
#'
#' @export
cellsBudgetCalculation<-function(nSamples,readDepth,totalCost,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=6){
nCells <- (totalCost - nSamples/(reactionsPerKit*samplesPerLane)*costKit)/
(nSamples*readDepth/readsPerFlowcell*costFlowCell)
return(floor(nCells))
}
#' Estimate possible read depth depending on the total cost
#' and the other parameters for 10X design
#'
#' @param nSamples Number of samples
#' @param nCells Cells per individual
#' @param totalCost Experimental budget
#' @param costKit Cost for one 10X kit
#' @param samplesPerLane Number of individuals sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Read depth that can be measured with this budget and other parameters
#'
#' @export
readDepthBudgetCalculation<-function(nSamples,nCells,totalCost,
costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=6){
readDepth <- (totalCost - nSamples/(reactionsPerKit*samplesPerLane)*costKit)/
(nSamples*nCells/readsPerFlowcell*costFlowCell)
return(floor(readDepth))
}
#' Calculate total cost dependent on parameters (adaptation with cells per lane instead of samples)
#'
#' @param nSamples Number of samples
#' @param nCells Cells per individual
#' @param readDepth Read depth per cell
#' @param costKit Cost for one 10X kit
#' @param cellsPerLane Number of cells sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param rounding Rounds up the number of used kits and flow cells
#' (which might give a more realistic estimation of costs)
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Total experimental cost dependent on the parameters
#'
#' @export
budgetCalculation.restrictedDoublets<-function(nSamples,nCells,readDepth,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
rounding=FALSE, reactionsPerKit=6){
#Estimate individuals per lane
samplesPerLane<-floor(cellsPerLane/nCells)
return(budgetCalculation(nSamples,nCells,readDepth,
costKit,samplesPerLane, costFlowCell,
readsPerFlowcell, rounding,
reactionsPerKit = reactionsPerKit))
}
#' Estimate possible sample size depending on the total cost and the other parameters
#' (with a restricted number of cells per lane)
#'
#' Variant of sampleSizeBudgetCalculation, which restricts the cells per lane instead the individuals
#' per lane.
#'
#' @param nCells Cells per individual
#' @param readDepth Read depth per cell
#' @param totalCost Experimental budget
#' @param costKit Cost for one 10X kit
#' @param cellsPerLane Number of cells sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Number of samples that can be measured with this budget and other parameters
#'
#' @export
sampleSizeBudgetCalculation.restrictedDoublets<-function(nCells,readDepth,totalCost,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=6){
#Estimate individuals per lane
samplesPerLane<-floor(cellsPerLane/nCells)
#Calculate sample size dependent on the estimated number of individuals per lane
return(sampleSizeBudgetCalculation(nCells,readDepth,totalCost,costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit))
}
#' Estimate possible number of cells per individual depending on the total cost
#' and the other parameters (with a restricted number of cells per lane)
#'
#' Approximation without rounding
#'
#' @param nSamples Number of samples
#' @param readDepth Read depth per cell
#' @param totalCost Experimental budget
#' @param costKit Cost for one 10X kit
#' @param cellsPerLane Number of cells sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Cells per individual that can be measured with this budget and other parameters
#'
#' @export
cellsBudgetCalculation.restrictedDoublets<-function(nSamples,readDepth,totalCost,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=6){
#Estimate the maximal sample size dependent on the cost for the other parameter
nCells <- totalCost / (nSamples*costKit/(reactionsPerKit*cellsPerLane) +
nSamples * readDepth / readsPerFlowcell * costFlowCell)
return(floor(nCells))
}
#' Estimate possible read depth depending on the total cost
#' and the other parameters (with a restricted number of cells per lane)
#'
#' @param nSamples Number of samples
#' @param nCells Cells per individual
#' @param totalCost Experimental budget
#' @param costKit Cost for one 10X kit
#' @param cellsPerLane Number of cells sequenced per lane
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Read depth that can be measured with this budget and other parameters
#'
#' @export
readDepthBudgetCalculation.restrictedDoublets<-function(nSamples,nCells,totalCost,
costKit,cellsPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=6){
#Estimate individuals per lane
samplesPerLane<-floor(cellsPerLane/nCells)
return(readDepthBudgetCalculation(nSamples,nCells,totalCost,costKit,samplesPerLane,
costFlowCell,readsPerFlowcell,
reactionsPerKit=reactionsPerKit))
}
#' Calculate total cost dependent on parameters for 10X design
#' using library preparation costs per cell
#'
#' @param nSamples Number of samples
#' @param nCells Cells per individual
#' @param readDepth Read depth per cell
#' @param prepCostCell Library preparation costs per cell
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param rounding Rounds up the number of used kits and flow cells
#' (which might give a more realistic estimation of costs)
#'
#' @return Total experimental cost dependent on the parameters
#'
#' @export
budgetCalculation.libPrepCell<-function(nSamples,nCells,readDepth,
prepCostCell,
costFlowCell,readsPerFlowcell,
rounding=FALSE){
if(rounding){
totalBudget<-prepCostCell*nSamples*nCells+
ceiling(nSamples*nCells*readDepth/readsPerFlowcell)*costFlowCell
} else {
totalBudget<-prepCostCell*nSamples*nCells+
nSamples*nCells*readDepth/readsPerFlowcell*costFlowCell
}
return(totalBudget)
}
#' Estimate possible sample size depending on the total cost,
#' using library preparation costs per cell
#'
#' A balanced design with two classes is assumed for the sample size calculation.
#'
#' @param nCells Cells per individual
#' @param readDepth Read depth per cell
#' @param totalCost Experimental budget
#' @param prepCostCell Library preparation costs per cell
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#'
#' @return Number of samples that can be measured with this budget and other parameters
#'
#' @export
sampleSizeBudgetCalculation.libPrepCell<-function(nCells,readDepth,totalCost,
prepCostCell,costFlowCell,readsPerFlowcell){
#Estimate the maximal sample size dependent on the cost for the other parameter
samples <- totalCost / (prepCostCell * nCells +
nCells * readDepth / readsPerFlowcell * costFlowCell)
#Return only even sample sizes (due to the balanced design)
return(floor(samples / 2) * 2)
}
#' Estimate possible number of cells per individual depending on the total cost,
#' using library preparation costs per cell
#'
#' @param nSamples Number of samples
#' @param readDepth Read depth per cell
#' @param totalCost Experimental budget
#' @param prepCostCell Library preparation costs per cell
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#'
#' @return Cells per individual that can be measured with this budget and other parameters
#'
#' @export
cellsBudgetCalculation.libPrepCell<-function(nSamples,readDepth,totalCost,
prepCostCell, costFlowCell,readsPerFlowcell){
nCells <- totalCost / (prepCostCell * nSamples +
nSamples * readDepth / readsPerFlowcell * costFlowCell)
return(floor(nCells))
}
#' Estimate possible read depth depending on the total cost,
#' using library preparation costs per cell
#'
#' @param nSamples Number of samples
#' @param nCells Cells per individual
#' @param totalCost Experimental budget
#' @param prepCostCell Library preparation costs per cell
#' @param costFlowCell Cost of one flow cells for sequencing
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#'
#' @return Read depth that can be measured with this budget and other parameters
#'
#' @export
readDepthBudgetCalculation.libPrepCell<-function(nSamples,nCells,totalCost,
prepCostCell,
costFlowCell,readsPerFlowcell){
readDepth <- (totalCost - prepCostCell*nSamples*nCells)/
(nSamples*nCells/readsPerFlowcell*costFlowCell)
return(floor(readDepth))
}
#' Identifying the expression threshold combination that maximizes the detection power
#'
#' @param umi_range Vector with UMI counts to test
#' @param pop_range Vector with population thresholds to test
#' @param ... additional arguments that can be passed to
#' \code{\link[scPower:power.general.restrictedDoublets]{power.general.restrictedDoublets()}}
#' (excluding min.UMI.counts
#' and perc.indiv.expr that will be evaluated in the function)
#' @inheritParams power.general.restrictedDoublets
#'
#' @return Results for threshold combination with the maximal detection power
#'
#' @export
#'
select.cutoffs<-function(umi_range,pop_range,
nSamples,
nCells,
readDepth,
ct.freq,
type,
ref.study,
ref.study.name,
cellsPerLane,
read.umi.fit,
gamma.mixed.fits,
ct,
disp.fun.param,...){
dots<-list(...)
if(any(c("min.UMI.counts","perc.indiv.expr") %in% names(dots))){
stop("Specifying min.UMI.counts or perc.indiv.expr not allowed!")
}
max_power<-0
param_combi<-NULL
for(umi in umi_range){
for(pop in pop_range){
res<-power.general.restrictedDoublets(nSamples,
nCells,
readDepth,
ct.freq,
type,
ref.study,
ref.study.name,
cellsPerLane,
read.umi.fit,
gamma.mixed.fits,
ct,
disp.fun.param,
min.UMI.counts=umi,
perc.indiv.expr=pop,
...)
if(res$powerDetect > max_power){
max_power<-res$powerDetect
param_combi<-res
param_combi$umiThreshold<-umi
param_combi$popThreshold<-pop
}
}
}
return(param_combi)
}
#' Print design parameters of optimal design
#'
#' Convenience function to read the output of the budget optimization functions
#' more easily: selected the row with the optimal detection power and showing
#' sample size, cells and read depth as well as the number of 10X kits and flow cells
#' for this specific design
#'
#' @param optim_output Result data frame of the optimization functions
#' (optimize.constant.budget and variants of the function)
#' @param samplesPerLane Number of individuals sequenced per lane
#' @param readsPerFlowcell Number reads that can be sequenced with one flow cell
#' @param reactionsPerKit Reactions (=lanes) per kit, defines the total
#' number of tested individuals per kit
#'
#' @return Nothing (print output instead)
#'
#' @export
#'
print_optimalDesign_10X<-function(optim_output,
samplesPerLane,
readsPerFlowcell,
reactionsPerKit=6){
#Get best performing design
best_design<-optim_output[which.max(optim_output$powerDetect),]
#Print resulting parameters of the optimal design
print(paste("Optimal design with power",best_design$powerDetect))
print(paste("Sample size:",best_design$sampleSize))
print(paste("Cells per sample:",best_design$totalCells))
print(paste("Read depth:",best_design$readDepth))
print(paste("Number 10X kits:",
ceiling(best_design$sampleSize/(samplesPerLane*reactionsPerKit))))
print(paste("Number sequencing flowcells:",
ceiling(best_design$sampleSize*best_design$totalCells*best_design$readDepth/readsPerFlowcell)))
}
|
2db6251b0d3b011d4fbdc396543bfa1e41744be3
|
0079d55b6d1178f390b333cc6afc36e39c87ac87
|
/man/InitialProcessing.Rd
|
3f6048f66a25d8ba3f015d502170dc75485a81f5
|
[] |
no_license
|
cran/rWMBAT
|
08e88ec256fd6125b11ff16e9b510f888568c96a
|
1b5961730491fa9cea2bd4625b7fc0da6911fbf1
|
refs/heads/master
| 2018-12-29T21:26:46.553640
| 2009-10-28T00:00:00
| 2009-10-28T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,115
|
rd
|
InitialProcessing.Rd
|
\name{InitialProcessing}
\alias{InitialProcessing}
\title{INITIALPROCESSING Inital Prep of Data from Signal Processing}
\description{Takes peak list which has been imported into R and prepares the peak list For Bayesian Analysis}
\usage{
InitialProcessing(StructIn)
}
\arguments{
\item{StructIn}{List with the following double-typed arrays:
Intensities:double n x m real-valued array with variables (peaks) in columns, cases (samples) in rows.
MZ:double list of the labels (m/z value) for each of the variables.Must be the same size as the number of variables in Intensities.
Class:intefer vector, classification of each sample (disease state)-- 1 or 2--must be the same size as the number of cases in Intensities.
ID:double column array, case or patient ID number, same size as class. May have second column, so each row is [ID1 ID2] where ID2 is replicate number.
Options:logical Array of processing options with elements
1. Normalize.
2. Clip Data (remove negatives).
3. Replicate Average.
4. Auto threshold MI.
5. Use Log of Data.
6. Remove Low Signal cases.
NOT DONE 3 Bin (2 Bin if False).}
}
\value{
\item{RawData}{Intensities as input}
\item{ClipData}{RawData where all values less than 1 are set to 1}
\item{NormData}{ClipData normalized by total ion count, i.e. divided by the sum of all variables for each case}
\item{LogData}{Natural logarithm of NormData}
\item{Class}{Same as input}
\item{MZ}{Same as input}
\item{ID}{SIngle column. If replicates are not averaged, the entries are now ID1.ID2. If replicates averaged, then just ID1}
\item{DeltaMZ}{difference in peak m/z values to look for adducts}
\item{RatioMZ}{ratios of m/z values ot look for satellites}
}
\references{http://kwkusc.people.wm.edu/dissertation/dissertation.htm }
\author{Karl Kuschner, Qian Si and William Cooke, College of William and Mary, Dept. of Physics, 2009}
\examples{
data(In) #load input example data from the package
StructOut <- InitialProcessing(In)
}
|
1a9c1c6b02a9e5e8e144b90d77a7b5b6e20386e8
|
a68fcf7bad70e91af4b398df8bee04b9b0bda82e
|
/S34_S38_phylogenetic_comparative_methods/scripts/resources/slouch/man/slouchtree.plot.Rd
|
ddf218462ad4018769ed6afdaa497c0fa9a8e19a
|
[] |
no_license
|
hj1994412/teleost_genomes_immune
|
44aac06190125b4dea9533823b33e28fc34d6b67
|
50f1552ebb5f19703b388ba7d5517a3ba800c872
|
refs/heads/master
| 2021-03-06T18:24:10.316076
| 2016-08-27T10:58:39
| 2016-08-27T10:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,767
|
rd
|
slouchtree.plot.Rd
|
% --- Source file: slouchtree.plot.R ---
\name{slouchtree.plot}
\alias{slouchtree.plot}
\title{Plot a trees and niches encoded in slouch format as a phylogram}
\description{
Allows for plotting the tree and visualizing reconstructed ancestral states on the internal branches.
}
\usage{
slouchtree.plot(topology, times, names = NULL, regimes = NULL, cex = NULL, lwd = NULL, reg.col = NULL)
}
\arguments{
\item{topology}{A column in the \pkg{slouch} tree data.frame named ancestor that specifies which node a current species or internal node is connected to. The ancestral node for the node at the root of the tree gets a numeric value of 0.}
\item{times}{A column in the slouch tree data frame specifying the cumulative time of the current node in relation to the root node, which occurs at time 0.}
\item{names}{An optional vector of species names, internal nodes are encoded as NA}
\item{regimes}{An optional vector of codes (numeric or character) that specify different categorical predictor variables (regimes). Usually these would also include a reconstruction of the states on the phylogeny so that the internal nodes are also coded according to which regime the branch leading back from the current node belongs to (using e.g. the \code{fitch} function).
}
\item{cex}{As in the R base package, \code{cex} controls text size.}
\item{lwd}{As in the R base package, \code{lwd} controls line width.}
\item{reg.col}{An optional vector of color names (as characters) that allows user-specified coloring of the different regimes.}
}
\details{
\code{plot.tree} was borrowed and modified from the \pkg{ouch} package written by Marguerite Butler and Aaron King (2004), and is therefore similar the plotting functions in that package.
}
\references{
Butler, M. A. & King. A. A. (2004) Phylogenetic comparative analysis: a modeling approach for adaptive evolution. \emph{The american Naturalist} \bold{164}: 683--695.
}
\author{Jason Pienaar \email{jasonpienaar@gmail.com}}
\seealso{ \code{\link{fitch}},\code{\link{make.tree}},\code{\link{ouch2slouch}},\code{\link{slouch2ouch}}}
\examples{
data(Lio)
attach(Lio)
## plot tree topology
slouchtree.plot(ancestor, time)
## add species names, adjust text size and line width
slouchtree.plot(ancestor, time, species, cex=0.8, lwd=2)
## plot niche reconstructions
pars.climate<-fitch(Lio, Lio$climate, root="Tropical")
slouchtree.plot(ancestor, time, species, cex=0.8, lwd=2, regimes=pars.climate)
pars.del.temp<-fitch(Lio, Lio$temp, root="Warm", deltran=TRUE)
slouchtree.plot(ancestor, time, species, cex=0.8, lwd=2, regimes=pars.del.temp)
## choose own regime colors
slouchtree.plot(ancestor, time, species, cex=0.8, lwd=2, regimes=pars.del.temp, reg.col=c("blue", "red"))
detach(Lio)
}
\keyword{aplot}
|
0160adb111b2d120ecbfc70e257170b4f43edade
|
b43be2e9461c7b2bce2688cff25b6ed1b1baddeb
|
/convert_shpfiles.R
|
8c90c513f81637c021047d362fd3fded88fd38f9
|
[] |
no_license
|
BobbieSueDevRecruiter/rbg_analysis_code
|
21d74c11c787b8082b8a821bbf96547e2e814bfb
|
47a599fd831d45012002bd56261b2cf50c3bad1e
|
refs/heads/master
| 2020-03-29T06:15:13.885298
| 2018-09-20T13:07:38
| 2018-09-20T13:07:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,103
|
r
|
convert_shpfiles.R
|
### Playing with shapefiles
library(raster)
library(sp)
library(rgdal)
dirpath = '/home/s1326314/RBGdata'
shapefiles = list.files(paste0(dirpath, "/shpfiles"),pattern = "bt_15_utm_esri.shp",full.names = TRUE)
# Open layer (e.g. raster) with coordiante system you want to convert to
temp = raster('/home/s1326314/RBGdata/LandSat/LC08_L1TP_182058_20180523_20180605_01_T1.tar/LC08_L1TP_182058_20180523_20180605_01_T1_B10.TIF')
newProj = as.character(crs(temp))
for (i in 1:length(shapefiles) ) {
shp = shapefiles[i] # Get shapefile path
print(shp)
# sp::spTransform() # [package]::[function in said pacakge] - not neecessary, but good form and needed if function name is common to multiple packages
toProject = shapefile(shp) # First load the shapefile
newShp = sp::spTransform(toProject , newProj ) # the project it
# Export the shapefile to a new folder called "projected" in your shapefile folder
writeOGR(newShp, paste0( dirname(shp), "/projected/",basename(shp)), layer = basename(shp), driver = "ESRI Shapefile", overwrite_layer = TRUE )
}
#convert lines to polygons
|
e29a8e50d679d91d68a936b04a4fc00b8b1f0ced
|
be9d60bc856095989a7eabb0b629f432cd062a67
|
/3 сем/ВКИАД/Lab5v27/Lab5.R
|
a1aa92e89ad308411541d69788b1253119a6fa96
|
[] |
no_license
|
Viiktorias/2_course
|
01682c9d172c61008ef8b3c49e97e378aa5eb192
|
26fd5fab667e37569436221765edfe49e1d6bd34
|
refs/heads/master
| 2022-01-29T12:12:40.682971
| 2019-08-07T13:36:56
| 2019-08-07T13:36:56
| null | 0
| 0
| null | null | null | null |
WINDOWS-1251
|
R
| false
| false
| 519
|
r
|
Lab5.R
|
dat=read.table("input.txt", dec='.')
plot(dat,type="p",main="Диаграмма рассеяния",xlab="X", ylab="Y")
dat
dat[,1]<-dat[,1]/mean(dat[,1])
dat[,2]<-dat[,2]/mean(dat[,2])
cl1<-kmeans(dat,2)
table(cl1$cluster)
cl1$centers
cl2<-kmeans(dat,3)
table(cl2$cluster)
cl2$centers
plot(dat,col=ifelse(cl1$cluster==1,"blue","green"), pch=ifelse(cl2$cluster==1,1, ifelse(cl2$cluster==2, 2, 3)))
legend("topleft",legend=c("1","2"),fill=c("blue","green"))
legend("bottomright",legend=c("1","2","3"),pch=c(1,2,3))
|
3bfd27bcc74a501c0c40c5fcf9d317443f40a69b
|
d87241791be8df80425a8afcce0ef252d6d1ff38
|
/man/compute_pd2_scale_nll_gpd.Rd
|
45ac0bcfe29cc0f372d8f08d4cc310490b61c3ad
|
[] |
no_license
|
MartinRoth/gpdIcm
|
551dd3228d806dabd3658c522f693fd0a5b66e99
|
5929123e94ae4e6269e319c8c9bd352c51e8cc42
|
refs/heads/master
| 2021-01-19T01:03:00.879843
| 2016-10-29T17:39:10
| 2016-10-29T17:39:10
| 46,576,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 544
|
rd
|
compute_pd2_scale_nll_gpd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{compute_pd2_scale_nll_gpd}
\alias{compute_pd2_scale_nll_gpd}
\title{Computes the second partial derivative (scale) of the negative
log likelihood for the GPD}
\usage{
compute_pd2_scale_nll_gpd(y, scale, shape)
}
\arguments{
\item{y}{numeric vector of the data}
\item{scale}{numeric vector of the scape parameters}
\item{shape}{double}
}
\description{
Computes the second partial derivative (scale) of the negative
log likelihood for the GPD
}
|
1099e25ed6495525b4e09bd988ada34bfee9a3f4
|
2096d0405ce8368cd8d207d359b46e659434bd55
|
/plot2.R
|
99f15cd98404eb4cd3b3efc80bc68dab303e90a0
|
[] |
no_license
|
Andy7475/ExData_Plotting1
|
c639930bf4dd55dc2be493137ba97daba3ceaaab
|
fe0f1d3356cb4c089ffcf17d4a24556d425f2de9
|
refs/heads/master
| 2020-04-02T19:39:03.645685
| 2015-08-09T21:24:25
| 2015-08-09T21:24:25
| 40,448,851
| 0
| 0
| null | 2015-08-09T20:05:32
| 2015-08-09T20:05:31
| null |
UTF-8
|
R
| false
| false
| 890
|
r
|
plot2.R
|
#First read the data file
library(dplyr)
df <- read.table("household_power_consumption.txt",sep = ";",header = TRUE,
colClasses = c("character","character",rep("numeric",7)),na.strings = "?")
#filter the data based on 1st and 2nd of Feb 2007 only
df2<- filter(df,Date=="1/2/2007" | Date=="2/2/2007")
#combine dates and times into one column (overwriting time column as otherwise a date would be implied)
df2$Time <- paste(df2$Date,df2$Time,sep=" ")
#Add correct date information to the time column
df2$Time <- as.POSIXct(df2$Time, format="%d/%m/%Y %H:%M:%S")
#open the png device with the specified dimensions
png(filename = "Plot2.png",width = 480,height = 480)
#create a linechart
with(df2,plot(Global_active_power~Time,type="n",ylab = "Global Active Power (kilowatts)",
xlab=""))
with(df2,lines(Global_active_power~Time))
#close the device
dev.off()
|
7d562e7f8436d2baa2af4eb598319c7bd46b5db4
|
a8f14c758c14d7799d6f532e7c7b277e37711c15
|
/02418/carrots/src/loaddata.R
|
0b733768ed6e0e5322247987897296c55a16f959
|
[] |
no_license
|
zhucer2003/dtu-fall-2011
|
61f4e25724ee8919f87481dc94efd9b5a7b4f93a
|
176b2a0c4ad47a956749dc60835ce0e41a5a35b6
|
refs/heads/master
| 2021-05-05T14:03:39.291852
| 2011-12-08T09:31:56
| 2011-12-08T09:31:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 227
|
r
|
loaddata.R
|
carrots = read.table("../data/carrots.txt", header=TRUE, sep=",")
carrots$Consumer = factor(carrots$Consumer)
carrots$Age = factor(carrots$Age)
carrots$Gender = factor(carrots$Gender)
carrots$product = factor(carrots$product)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.