blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0818e2d9c5b0df8c3243eb971f117d74b7f2fb6b
|
78e656557b5cc6b77f8a30a3792e41b6f79f2f69
|
/aslib/man/writeASScenario.Rd
|
99c6048e2feb464e118c83081edf018dc1ea7598
|
[] |
no_license
|
coseal/aslib-r
|
f7833aa6d9750f00c6955bade2b8dba6b452c9e1
|
2363baf4607971cd2ed1d784d323ecef898b2ea3
|
refs/heads/master
| 2022-09-12T15:19:20.609668
| 2022-09-02T17:48:51
| 2022-09-02T17:48:51
| 27,724,280
| 6
| 7
| null | 2021-10-17T17:34:54
| 2014-12-08T16:38:21
|
R
|
UTF-8
|
R
| false
| true
| 734
|
rd
|
writeASScenario.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeASScenario.R
\name{writeASScenario}
\alias{writeASScenario}
\title{Writes an algorithm selection scenario to a directory.}
\usage{
writeASScenario(asscenario, path = asscenario$desc$scenario_id)
}
\arguments{
\item{asscenario}{[\code{\link{ASScenario}}]\cr
Algorithm selection scenario.}
\item{path}{[\code{character(1)}]\cr
Path to write scenario to. Default is the name of the scenario.}
}
\description{
Splits an algorithm selection scenario into description, feature
values / runstatus / costs, algorithm performance and cv splits and
saves those data sets as single ARFF files in the given directory.
}
\seealso{
\code{\link{parseASScenario}}
}
|
31f8050a3061fbb7ccb592167191b02e6eadf599
|
83f845cf9d3987c8d816ca9b0d168c9c90e6cdf4
|
/R/helpers.R
|
ce95b63c1bcd32e5c3a26d02a6fa3fcaae88de55
|
[] |
no_license
|
jslefche/piecewiseSEM
|
e5a572eef4538b6fb2cb0df00fa3e49f7c3c3872
|
aac65aafd979b8dbce6c725b11b85123097f6fe7
|
refs/heads/main
| 2023-06-07T12:30:47.849089
| 2023-06-02T18:45:45
| 2023-06-02T18:45:45
| 22,606,015
| 145
| 53
| null | 2023-03-28T21:10:38
| 2014-08-04T13:55:07
|
R
|
UTF-8
|
R
| false
| false
| 16,684
|
r
|
helpers.R
|
#' Remove random effects from all.vars
#'
#' @keywords internal
#'
all.vars.merMod <- function(formula.) {
if(!any(class(formula.) %in% c("formula", "formula.cerror"))) formula. <- formula(formula.)
if(inherits(formula., "formula.cerror"))
gsub(" " , "", unlist(strsplit(formula., "~~"))) else {
n <- rownames(attr(terms(formula.), "factors"))
if(any(grepl("\\|", n)))
all.vars(lme4::nobars(formula.)) else
all.vars(formula.)
}
}
#' Get vector of untransformed variables
#'
#' @keywords internal
#'
all.vars_notrans <- function(formula.) {
if(!all(class(formula.) %in% c("formula", "formula.cerror"))) formula. <- formula(formula.)
if(inherits(formula., "formula")) {
if(any(grepl("\\|", formula.))) formula. <- lme4::nobars(formula.)
formula. <- all.vars_trans(formula.)
if(any(grepl(":", formula.))) {
idx <- which(grepl(":", formula.))
for(i in idx) formula.[i] <- paste(sapply(strsplit(formula.[i], ":"), stripTransformations), collapse = ":")
for(j in (1:length(formula.))[-idx]) formula.[j] <- stripTransformations(formula.[j])
} else {
formula. <- sapply(formula., stripTransformations)
}
} else formula. <- unlist(strsplit(formula., " ~~ "))
ret <- gsub("(,.*)", "", formula.)
return(ret)
}
#' Get vector of transformed variables
#'
#' @keywords internal
#'
all.vars_trans <- function(formula., smoothed = FALSE) {
if(!all(class(formula.) %in% c("formula", "formula.cerror"))) formula. <- formula(formula.)
if(inherits(formula., "formula")) {
if(formula.[[3]] == 1) ret <- deparse(formula.[[2]]) else {
if(any(grepl("\\|", formula.))) formula. <- lme4::nobars(formula.)
ret <- c(rownames(attr(terms(formula.), "factors"))[1], labels(terms(formula.)))
if(smoothed == FALSE) ret <- gsub("(.*)\\,.*", "\\1", gsub("s\\((.*)\\).*", "\\1", ret))
# else {
# ret <- gsub("(s\\(.*),.*", "\\1", ret)
#
# if(any(grepl("s\\(", ret))) ret <- sapply(ret, function(x)
# ifelse(grepl("s\\(", x) & !grepl("\\)", x), paste0(x, ")"), x))
# }
# ret <- gsub("(,.*)", "", ret)
}
return(ret)
} else unlist(strsplit(formula., " ~~ "))
}
#' Captures output table
#'
#' @keywords internal
#'
captureTable <- function(g, row.names = FALSE) {
g1 <- capture.output(print(g, row.names = row.names))
if(all(g1 == "data frame with 0 columns and 0 rows"))
g1 <- "No independence claims present. Tests of directed separation not possible."
g1 <- paste0(g1, "\n")
return(g1)
}
#' Bind data.frames of differing dimensions
#'
#' From: https://stackoverflow.com/a/31678079
#'
#' @param ... data.frames to be bound, separated by commas
#'
#' @keywords internal
#'
cbind_fill <- function(...) {
nm <- list(...)
dfdetect <- grepl("data.frame|matrix", unlist(lapply(nm, function(cl) paste(class(cl), collapse = " ") )))
vec <- data.frame(nm[!dfdetect])
n <- max(sapply(nm[dfdetect], nrow))
vec <- data.frame(lapply(vec, function(x) rep(x, n)))
if (nrow(vec) > 0) nm <- c(nm[dfdetect], list(vec))
nm <- lapply(nm, as.data.frame)
do.call(cbind, lapply(nm, function (df1)
rbind(df1, as.data.frame(matrix(NA, ncol = ncol(df1), nrow = n-nrow(df1), dimnames = list(NULL, names(df1))))) ))
}
#' Transform variables based on model formula and store in new data frame
#'
#' @keywords internal
#'
dataTrans <- function(formula., data) {
notrans <- all.vars.merMod(formula.)
if(inherits(formula., "formula.cerror")) notrans <- gsub(".*\\((.*)\\)", "\\1", notrans)
trans <- all.vars_trans(formula.)
trans <- unlist(strsplit(trans, "\\:"))
trans <- trans[!duplicated(trans)]
if(any(grepl("scale\\(.*\\)", trans))) {
#
# trans[which(grepl("scale(.*)", trans))] <- notrans[which(grepl("scale(.*)", trans))]
#
warning("`scale` applied directly to variable. Use argument `standardize = TRUE` instead.", call. = FALSE)
}
if(any(!notrans %in% trans)) {
for(k in 1:length(notrans)) {
if(is.factor(data[, notrans[k]])) next else
if(grepl("scale(.*)", trans[k])) data[, notrans[k]] <- scale(data[, notrans[k]]) else
data[, notrans[k]] <-
sapply(data[, notrans[k]], function(x) eval(parse(text = gsub(notrans[k], x, trans[k]))))
}
}
colnames(data) <- notrans
return(data)
}
#' Get ANOVA results from `merMod`
#'
#' @keywords internal
#'
getAnova <- function(model, test.statistic = "F", test.type = "III") {
if(inherits(model, "glmmTMB")) test.statistic = "Chisq"
krp <- as.data.frame(car::Anova(model, test.statistic = test.statistic, type = test.type))
ct <- summary(model)$coefficients
colnames(ct)[2] <- "Std.Error"
ret <- do.call(rbind, lapply(1:nrow(krp), function(i) {
if(rownames(krp)[i] %in% rownames(ct)) {
cbind.data.frame(
ct[i, 1:2, drop = FALSE],
DF = krp[i, 3],
Crit.Value = krp[i, 1],
P = krp[i, ncol(krp)],
row.names = NULL
)
} else {
data.frame(
Estimate = NA,
Std.Error = NA,
DF = krp[i, 3],
Crit.Value = krp[i, 1],
P = krp[i, ncol(krp)]
)
}
} ) )
# ret <- cbind.data.frame(
# ct[, 1:2],
# DF = krp[, 3],
# Crit.Value = krp[, 1],
# P = krp[, ncol(krp)]
# )
names(ret)[ncol(ret)] <- "Pr(>|t|)"
rownames(ret) <- rownames(krp)
return(ret)
}
#' Get random effects from lme
#'
#' @keywords internal
#'
findbars.lme <- function(model) {
rand <- model$call$random
sapply(rand, function(i) {
i = gsub(".*\\|(.*)", "\\1", as.character(i)[2])
strsplit(gsub(" " , "", i), "\\/")[[1]]
} )
}
#' Get data from model list
#'
#' @keywords internal
#'
GetData <- function(modelList) {
if(!all(class(modelList) %in% c("psem", "list"))) modelList <- list(modelList)
modelList <- removeData(modelList, formulas = 1)
data.list <- lapply(modelList, GetSingleData)
data.list <- data.list[!sapply(data.list, is.null)]
data.list <- unname(data.list)
if(all(sapply(data.list, class) == "comparative.data"))
data <- data.list[[1]] else
data <- do.call(cbind_fill, data.list)
data <- data[, !duplicated(colnames(data), fromLast = TRUE)]
# colnames(data) <- gsub(".*\\((.*)\\).*", "\\1", colnames(data))
data <- as.data.frame(data)
rownames(data) <- 1:nrow(data)
return(data)
}
#' Get data from one model
#'
#' @keywords internal
#'
GetSingleData <- function(model) {
dat <- data.frame()
switch(class(model)[1],
"phylolm" = {
stop("Please provide `data =` argument to `psem`.", call. = FALSE)
},
"phyloglm" = {
stop("Please provide `data =` argument to `psem`.", call. = FALSE)
},
"lm" ={
dat <- eval(getCall(model)$data, environment(formula(model)))
},
"negbin" = {
dat <- eval(getCall(model)$data, environment(formula(model)))
},
"Sarlm" = {
dat <- eval(getCall(model)$data, environment(formula(model)))
},
"glm" = {
dat <- model$data
},
"glmmPQL" = {
dat <- model$data
},
"pgls" = {
dat <- model$data
},
"lmerMod" = {
dat <- lme4::getData(model) #model@frame
},
"glmerMod" = {
dat <- lme4::getData(model) #model@frame
},
"lmerModLmerTest" = {
dat <- lme4::getData(model) #model@frame
},
"glmmTMB" = {
dat <- model$frame
},
"gls" = {
dat <- nlme::getData(model)
},
"lme" = {
dat <- nlme::getData(model)
},
"gam" = {
dat <- eval(getCall(model)$data, environment(formula(model)))
}
)
return(dat)
}
#' Obtain (observation-level) random effects from a generalized linear mixed model
#'
#' RE = "all" all random effects are reported
#' RE = "RE" just group effects are reported
#' RE = "OLRE" just observation-level effects are reported
#'
#' @keywords internal
#'
GetOLRE <- function(sigma, model, X, data, RE = c("all", "RE", "OLRE")) {
if(class(model) %in% c("lmerMod", "glmerMod")) {
if(is.null(X)) X <- model.matrix(model)
rand <- sapply(lme4::findbars(formula(model)), function(x) as.character(x)[3])
rand <- rand[!duplicated(rand)]
}
# else if(class(model) %in% c("lme", "glmmPQL")) { }
idx <- sapply(sapply(strsplit(rand, "\\:"), function(x) gsub("\\(|\\)", "", x)), function(x) {
length(unique(data[, x])) == nrow(data)
} )
sigma.names <- unlist(names(sigma)) # unlist(strsplit(names(sigma), "\\."))
idx. <- sapply(sigma.names, function(x) !any(x %in% rand[idx]))
if(RE == "RE")
out <- sapply(sigma[idx.], function(i) {
if(all(rownames(i) %in% colnames(X))) X. <- X else
X. <- do.call(cbind, model.matrix(model, type = "randomListRaw"))
Z <- as.matrix(X.[, rownames(i), drop = FALSE])
sum(rowSums(Z %*% i) * Z) / nrow(X.)
} ) else if(RE == "OLRE") {
if(all(idx == FALSE)) out <- 0 else {
out <- sapply(sigma[idx], function(i) {
Z <- as.matrix(X[, rownames(i), drop = FALSE])
sum(rowSums(Z %*% i) * Z) / nrow(X)
} ) } } else if(RE == "all")
out <- sapply(sigma, function(i) {
Z <- as.matrix(X[, rownames(i), drop = FALSE])
sum(rowSums(Z %*% i) * Z) / nrow(X)
} )
if(length(out) == 0) out <- 0
return(out)
}
#' Get random effects variance-covariance from lme
#'
#' @keywords internal
#'
GetVarCov <- function(model) {
vc <- try(getVarCov(model), silent = TRUE)
if(any(class(vc) == "try-error")) {
vc <- nlme::VarCorr(model)
v <- suppressWarnings(as.numeric(vc[, 1]))
names(v) <- gsub(" =", "", rownames(vc))
vm <- as.list(na.omit(v[-length(v)]))
vl <- lapply(1:length(vm), function(i) matrix(vm[[i]], dimnames = list(names(vm)[i], names(vm)[i])))
names(vl) <- names(which(is.na(v)))
vl
} else list(vc)
}
#' Assess significance
#'
#' @keywords internal
#'
isSig <- function(p) {
ifelse(p > 0.01 & p < 0.05, "*",
ifelse(p > 0.001 & p <= 0.01, "**",
ifelse(p <= 0.001, "***", "")))
}
#' Recompute P-values using Kenward-Rogers approximation
#'
#' @keywords internal
#'
# KRp <- function(model, vars, data, intercepts = FALSE) {
#
# # if(any(grepl("\\*", all.vars_notrans(formula(model)))) & !all(grepl("\\*", vars))) {
#
# f <- all.vars_trans(formula(model))
#
# model <- update(model, as.formula(paste(f[1], " ~ ", paste(f[-1], collapse = " + "), " + ", paste(onlyBars(formula(model)), collapse = " + "))))
#
# # }
#
# out <- data.frame()
#
# for(x in vars) { #sapply(vars, function(x) {
#
# reduceModel <- update(model, as.formula(paste(". ~ . -", x)))
#
# if(nobs(model) != nobs(reduceModel)) stop("Different sample sizes for `KRmodcomp`. Remove all NAs and re-run")
#
# kr <- try(pbkrtest::KRmodcomp(model, reduceModel), silent = TRUE)
#
# if(class(kr) == "try-error")
#
# stop("Cannot obtain P-values from `lmerMod` using `pbkrtest::KRmodcopm`. Consider fitting using `nlme::lme`") else {
#
# d <- round(kr$stats$ddf, 2)
#
# p <- kr$stats$p.valueU
#
# out <- rbind(out, data.frame(d, p))
#
# }
#
# } # )
#
# if(intercepts == TRUE) {
#
# reduceModelI <- update(model, as.formula(paste("~ . - 1")), data = data)
#
# krI <- try(pbkrtest::KRmodcomp(model, reduceModelI), silent = TRUE)
#
# if(class(krI) == "try-error")
#
# stop("Cannot obtain P-values from `lmerMod` using `pbkrtest::KRmodcomp`. Consider re-fitting using `nlme::lme`")else {
#
# dI <- krI$stats$ddf
#
# pI <- krI$stats$p.valueU
#
# out <- rbind(data.frame(d = dI, p = pI), out)
#
# }
#
# }
#
# return(out)
#
# }
#' Get list of formula from a `psem` object
#'
#' @keywords internal
#'
listFormula <- function(modelList, formulas = 0) {
modelList <- removeData(modelList, formulas)
if(!all(class(modelList) %in% c("psem", "list"))) modelList <- list(modelList)
fList <- lapply(modelList, function(i) if(any(class(i) %in% c("formula.cerror"))) i else formula(i) )
fList <- lapply(fList, lme4::nobars)
return(fList)
}
#' Get number of observations from a model
#'
#' @keywords internal
#'
nObs <- function(object, ...) if(any(class(object) %in% c("phylolm", "phyloglm", "Sarlm"))) length(fitted(object)) else nobs(object, ...)
#' Get random effects from merMod
#'
#' @keywords internal
#'
onlyBars <- function(formula., slopes = TRUE) {
f <- lme4::findbars(formula.)
if(slopes == TRUE) paste(sapply(f, function(x) paste0("(", deparse(x), ")")), collapse = " + ") else {
# paste(sapply(f, function(x) paste0("(1 ", gsub(".*(\\|.*)", "\\1", f), ")")), collapse = "+")
f <- f[sapply(f, function(x) grepl("1\\||1 \\|", deparse(x)))]
paste(sapply(f, function(x) paste0("(", deparse(x), ")")), collapse = " + ")
}
}
#' Do not print attributes with custom functions
#'
#' @keywords internal
#'
#' @method print attr
#'
print.attr <- function(x, ...) {
attributes(x) <- NULL
noquote(x)
}
#' Remove data from the model list
#'
#' formulas = 0, keep everything
#' formulas = 1, remove all formulas including correlated errors
#' formulas = 2, remove only formula but keep correlated errors
#' formulas = 3, remove correlated errors but keep formula
#'
#' @keywords internal
#'
removeData <- function(modelList, formulas = 0) {
remove <- c("character", "matrix", "data.frame", "SpatialPointsDataFrame", "comparative.data")
if(formulas == 1) remove <- c(remove, "formula", "formula.cerror")
if(formulas == 2) remove <- c(remove, "formula")
if(formulas == 3) remove <- c(remove, "formula.cerror")
modelList[!sapply(modelList, function(x) any(class(x) %in% remove))]
}
#' Strip transformations
#'
#' @keywords internal
#'
stripTransformations <- function(x) {
x <- gsub(".*\\((.*)\\).*", "\\1", x)
gsub(" ", "", gsub("(.*)\\+.*", "\\1", x))
}
#' Get Response Name as a Character
#'
#' @keywords internal
#'
get_response <- function(mod) {
mod <- removeData(mod)
f <- lapply(mod, formula)
r <- lapply(f, function(x) x[[2]])
return(as.character(r))
}
#' Get Left-hand side of formulae
#'
#' @keywords internal
#'
getLHS <- function(formulaList){
sapply(formulaList, function(x) as.character(x[[2]]))
}
#' Get Right-hand side of formulae
#'
#' @keywords internal
#'
getRHS <- function(formulaList){
rhs <- sapply(formulaList, function(x) all.vars(x)[-1])
unique(do.call(c, rhs))
}
#' Operator for non-overlap in sets
#'
#' @keywords internal
#'
"%not_in%" <- function(x, y) x[!x %in% y]
#' Get a sorted psem object in DAG order
#'
#' @description Takes a [psem] object, pulls out the
#' DAG, and then sorts the psem object into the order
#' of the DAG (from exogenous to terminal endogenous
#' variable) for use by other functions. Note: removes
#' correlated errors.
#'
#' @param object A fit [psem] object
#' @param keepdata Defaults to TRUE. Should the
#' data with the psem be included in the returned
#' object?
#'
#' @return A new [psem] object, without the data.
#' @export
getSortedPsem <- function(object, keepdata = TRUE){
#first, remove data
dat <- object$data
object <- removeData(object, formulas = 1)
#Now, get formulae
formulaList <- listFormula(object)
lhs <- getLHS(formulaList)
names(object)<- lhs
#sort some dags so we do things in the right order
object_dag <- getDAG(formulaList)
sorted_dag <- sortDag(object_dag, formulaList)
lhs_sorted <- colnames(sorted_dag)
lhs_sorted <- lhs_sorted[which(lhs_sorted %in% lhs)]
#Sort the object
object <- object[lhs_sorted]
#should we include the data?
if(keepdata) object$data <- dat
#return
return(object)
}
|
f4e24bb74545ba6aeebc969ce1c9b844a40b57b5
|
6a8a10228612c00e09ea2a33cf0e62c37e7eabb2
|
/R/writeVarianceExplained.R
|
8cb8205f12fad52f4e48a3a2045ce49a76366c8a
|
[
"MIT"
] |
permissive
|
avcarr2/MetaNetworkDownloadable
|
b69b22e56b062aff3225d4399fd894863a713abc
|
e94d406f676b48dd14d508360bd0093562e224df
|
refs/heads/main
| 2023-01-27T14:15:53.268785
| 2020-12-02T22:38:24
| 2020-12-02T22:38:24
| 316,055,812
| 0
| 0
|
MIT
| 2020-12-02T22:38:25
| 2020-11-25T21:19:03
|
R
|
UTF-8
|
R
| false
| false
| 436
|
r
|
writeVarianceExplained.R
|
## Variance explained by eigenproteins
writeVarianceExplained <- function(datExpr,
colors,
MEs){
varianceExplained <- propVarExplained(datExpr,
colors,
MEs,
corFnc = "cor",
corOptions = "use = 'p'")
write.csv(x = varianceExplained, file = "Results/varianceExplained.csv")
message("Variance explained file successfully written")
}
|
22970a85458b0dba43c5f2ae9277197cfec8e105
|
4a8188222cdb0935f963f4e2b5ef95905586e28d
|
/dataLoading.R
|
38e752371fb55a29fd8e709892dcd0fb4851b079
|
[] |
no_license
|
Nevethan/SM-Exercises
|
dbbddca2521d78deea3258271798db1700b15a23
|
7a472c0f4d6ed53cca4a509c67e3539f69b441aa
|
refs/heads/master
| 2018-05-31T11:36:28.908667
| 2018-05-31T05:45:39
| 2018-05-31T05:45:39
| 120,762,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,350
|
r
|
dataLoading.R
|
#KNN Assignment
library("gmodels")
#source('C:/Users/Bruger/Desktop/SM-Exercises/loadImage.R', echo=TRUE) #This contains Smoothing override method
#source('C:/Users/Bruger/Desktop/Statistical Mashine Learning/BaseFolder/loadImage.R')
source('loadImage.R')
source('Methods.R')
#directory <- 'C:/Users/Bruger/Desktop/Statistical Mashine Learning/2018/group';
#directory <- 'C:/Users/Anna/svn/new/trunk/preProcessed/2018/group';
getAllData <- function(dataList, folder, persons){
index <- 0
id <- data.frame()
idList <- list()
for(i in 1:length(dataList))
{
if( length(dataList[[i]]) > 0 ){
for(j in 1:length(dataList[[i]])){
idTemp <- loadSinglePersonsData(100,i - 1, dataList[[i]][j], folder)
idList <- append(idList, list(idTemp))
index <- index+1
if (index == persons) {
return(idList)
}
}
}
}
return(idList)
}
#getDisjunctData('C:/Users/Anna/svn/new/trunk/preProcessed/2018/group', 2, 70)
getDisjunctData <- function(folder, persons, split) {
datalist <- list( list( 1) ,list( 1, 2 ), list( 1, 2, 3 ), list( 1, 2, 3 ), list( 1, 0, 4, 2, 3 ),
list( 1, 5, 4, 2, 3 ), list( 0, 2, 3 ), list( 1 ), list( 1, 2, 3 ), list( 1, 2, 3 ),
list( 1, 2, 3 ), list( 1, 4, 2, 3 ), list( 1, 2, 3 ), list( 1, 2 ), list( 1, 2, 3 ),
list( 1, 2 ), list( 1, 4, 2, 3 ), list( 1, 4, 2, 3 ), list( 1, 2, 3 ))
idList <- getAllData(datalist, folder, persons)
id <- data.frame()
for(i in 1:length(idList)){
idTemp <- idList[i]
idTemp <- data.frame(idTemp)
id <- as.data.frame(rbind(id,idTemp))
}
dataresult <- as.data.frame(id)
#normalize(dataset)
dataset <- datasetShuffle(dataresult)
#Data split
total_persons <- persons*4000
split_training <- total_persons*split/100
train <- 1:split_training
test <- (split_training+1):total_persons
data.train <- dataset[train,]
data.test <- dataset[test,]
tempTrain <- datasetShuffle(data.train)
tempTest <- datasetShuffle(data.test)
dataset.train.labels <- factor(tempTrain[,1])
dataset.test.labels <- factor(tempTest[,1])
dataset.train <- tempTrain[,-1]
dataset.test <- tempTest[,-1]
return(list("train" = dataset.train, "test" = dataset.test, "train.labels" = dataset.train.labels, "test.labels" = dataset.test.labels))
}
#getAllPersonsInData('C:/Users/Anna/svn/new/trunk/preProcessed/2018/group', 2, 70)
getAllPersonsInData <- function(folder, persons, split) {
datalist <- list( list( 1) ,list( 1, 2 ), list( 1, 2, 3 ), list( 1, 2, 3 ), list( 1, 0, 4, 2, 3 ),
list( 1, 5, 4, 2, 3 ), list( 0, 2, 3 ), list( 1 ), list( 1, 2, 3 ), list( 1, 2, 3 ),
list( 1, 2, 3 ), list( 1, 4, 2, 3 ), list( 1, 2, 3 ), list( 1, 2 ), list( 1, 2, 3 ),
list( 1, 2 ), list( 1, 4, 2, 3 ), list( 1, 4, 2, 3 ), list( 1, 2, 3 ))
idList <- getAllData(datalist, folder, persons)
id <- data.frame()
for(i in 1:length(idList)){
idTemp <- idList[i]
idTemp <- data.frame(idTemp)
id <- as.data.frame(rbind(id,idTemp))
}
dataresult <- as.data.frame(id)
#normalize(dataset)
data.shuffle <- datasetShuffle(dataresult)
#Data split
total_persons <- persons*4000
split_training <- total_persons*split/100
train <- 1:split_training
test <- (split_training+1):total_persons
temp.train <- data.shuffle[train,]
temp.test <- data.shuffle[test,]
dataset.train.labels <- factor(temp.train[,1])
dataset.test.labels <- factor(temp.test[,1])
dataset.train <- temp.train[,-1]
dataset.test <- temp.test[,-1]
return(list("train" = dataset.train, "test" = dataset.test, "train.labels" = dataset.train.labels, "test.labels" = dataset.test.labels))
}
#getIndividualData('C:/Users/Anna/svn/new/trunk/preProcessed/2018/group', 2, 70)
getIndividualData <- function(folder, persons, split) {
datalist <- list( list( 1) ,list( 1, 2 ), list( 1, 2, 3 ), list( 1, 2, 3 ), list( 1, 0, 4, 2, 3 ),
list( 1, 5, 4, 2, 3 ), list( 0, 2, 3 ), list( 1 ), list( 1, 2, 3 ), list( 1, 2, 3 ),
list( 1, 2, 3 ), list( 1, 4, 2, 3 ), list( 1, 2, 3 ), list( 1, 2 ), list( 1, 2, 3 ),
list( 1, 2 ), list( 1, 4, 2, 3 ), list( 1, 4, 2, 3 ), list( 1, 2, 3 ))
#Data split
data_pr_person <- 4000
split_training <- data_pr_person*split/100
train <- 1:split_training
test <- (split_training+1):data_pr_person
idList <- getAllData(datalist, folder, persons)
id.train <- data.frame()
id.test <- data.frame()
for(i in 1:length(idList)){
idTemp <- idList[i]
idTemp <- data.frame(idTemp)
# Shuffle one person's data
temp.shuffle <- datasetShuffle(idTemp)
# Split one person's data
temp.train <- temp.shuffle[train,]
temp.test <- temp.shuffle[test,]
id.train <- as.data.frame(rbind(id.train,temp.train))
id.test <- as.data.frame(rbind(id.test,temp.test))
}
temp.train <- as.data.frame(id.train)
temp.test <- as.data.frame(id.test)
dataset.train.labels <- factor(temp.train[,1])
dataset.test.labels <- factor(temp.test[,1])
dataset.train <- temp.train[,-1]
dataset.test <- temp.test[,-1]
return(list("train" = dataset.train, "test" = dataset.test, "train.labels" = dataset.train.labels, "test.labels" = dataset.test.labels))
}
|
8dec3e49ce586b9e4aaa4434b79aed51a2e9bc28
|
0dc7121ee1e033ffca6575849e7ce9bed2c7d0c2
|
/R/sharpe.R
|
cf8c8500daeac3f077602ab6010341fc3464cb1c
|
[] |
no_license
|
gmahjub/steal-basis-r
|
156341e2812eaf721890ad25c2dc2fc304130242
|
67d3db66adb1b3aeb91a167b00220a02afe50502
|
refs/heads/master
| 2021-03-31T01:11:27.351523
| 2018-08-05T03:13:19
| 2018-08-05T03:13:19
| 125,122,687
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,143
|
r
|
sharpe.R
|
#' get_period_open_to_close_sharpe
#'
#' Sharpe of open to close returns.
#'
#' @param op2cl_returns_tibble
#' @param window_size
#' @param annualize defaults to FALSE, if annualize sharpe desired, set to TRUE
#' @param scale 252 for daily periodicity, 52 for weekly, 12 for monthly.
#'
#' @return tibble object with period open to close sharpe.
#' @export
#'
#' @examples
get_period_open_to_close_sharpe<-function(op2cl_returns_tibble, window_size = 5, annualize = FALSE, scale = 252){
period_open_to_close_sharpe_tibble<-op2cl_returns_tibble %>% group_by(symbol) %>% if_else(!annualize,
tq_mutate(mutate_fun = rollapply,
width = window_size,
FUN = function(x)
mean(x$op.cl.Ret)/sd(x$op.cl.Ret),
by.column = FALSE,
col_rename = "op.cl.Sharpe"),
tq_mutate(mutate_fun = rollapply,
width = window_size,
FUN = function(x)
mean(x$op.cl.Ret)/sd(x$op.cl.Ret)*sqrt(scale),
by.column = FALSE,
col_rename = "op.cl.Sharpe"))
return(period_open_to_close_sharpe_tibble)
}
#' get_rolling_Sharpe_AdCl_to_AdCl
#'
#' Calculates a rolling sharpe ratio of returns calculated using adjusted close
#' prices. The returns are calculated day over day, week over week, month over
#' month, etc..., depending on the value of periodicity. Finally, we use
#' tq_mutate to calcularte a rolling non-annualized sharpe ratio, where the size
#' of the rolling window is specified in the parameter num_periods.
#'
#' @param tibble_obj
#' @param num_periods size of rolling window for calc of sharpe ratio
#' @param periodicity daily, weekly, monthly, quarterly, yearly
#' @param annualize defaults to FALSE, set to TRUE if annualzied sharpe desired
#' @param scale 252 for daily periodicity, 52 for weekly, 12 for monthly, etc...
#'
#' @return a tibble, all columns from tibble obj input and a new column named "Ad2Ad.Sharpe..."
#' @export
#'
#' @examples
get_rolling_Sharpe_AdCl_to_AdCl<-function(tibble_obj, num_periods, periodicity="daily", annualize = FALSE, scale = 252){
if (periodicity == "daily"){period_returns_tibble_obj<-get_append_daily_log_returns(tibble_obj, fill_in_between_dates = FALSE)}
if (periodicity == "weekly"){period_returns_tibble_obj<-get_append_weekly_log_returns(tibble_obj, fill_in_between_dates = FALSE)}
if (periodicity == "monthly"){period_returns_tibble_obj<-get_append_monthly_log_returns(tibble_obj, fill_in_between_dates = FALSE)}
if (periodicity == "quarterly"){period_returns_tibble_obj<-get_append_quarterly_log_returns(tibble_obj, fill_in_between_dates = FALSE)}
if (periodicity == "yearly"){period_returns_tibble_obj<-get_append_yearly_log_returns(tibble_obj, fill_in_between_dates = FALSE)}
num_cols<-length(names(period_returns_tibble_obj))
rets_col_nm <- names(period_returns_tibble_obj)[num_cols]
new_col_nm<-paste("Ad2Ad.Sharpe", ".", rets_col_nm, sep='')
if (annualize){
sharpe_tibble<-period_returns_tibble_obj %>% group_by(symbol) %>% tq_mutate(select = rets_col_nm,
mutate_fun = rollapply,
width = num_periods,
align="right",
by.column=FALSE,
FUN = function(x) mean(x)/sd(x)*sqrt(scale),
col_rename = c(new_col_nm))
} else {
sharpe_tibble<-period_returns_tibble_obj %>% group_by(symbol) %>% tq_mutate(select = rets_col_nm,
mutate_fun = rollapply,
width = num_periods,
alight = "right",
by.column = FALSE,
FUN = function(x) mean(x)/sd(x),
col_rename = c(new_col_nm))
}
return(sharpe_tibble)
}
#' get_period_low_to_next_period_high_sharpe
#'
#' Sharpe of the return betwen period t low to period t+1 high
#'
#' @param period_low_to_next_period_high_returns_tibble
#' @param window_size
#'
#' @return
#' @export
#'
#' @examples
get_period_low_to_next_period_high_sharpe<-function(period_low_to_next_period_high_returns_tibble, window_size = 5){
period_low_to_next_period_high_sharpe_tibble<-period_low_to_next_period_high_returns_tibble %>% group_by(symbol) %>%
tq_mutate(mutate_fun = rollapply,
width = window_size,
FUN = function(x) mean(x$lo.nxtHi.Ret)/sd(x$lo.nxtHi.Ret),
by.column = FALSE,
col_rename = "lo.nxtHi.Sharpe")
return(period_low_to_next_period_high_sharpe_tibble)
}
#' get_period_close_to_next_period_open_sharpe
#'
#' Sharpe of returns between close @ period t to open at period t+1.
#' Basically the sharpe of overnight returns.
#'
#' @param period_close_to_next_period_open_returns_tibble
#' @param window_size
#'
#' @return
#' @export
#'
#' @examples
get_period_close_to_next_period_open_sharpe<-function(period_close_to_next_period_open_returns_tibble, window_size = 5){
period_close_to_next_period_open_sharpe_tibble<-period_close_to_next_period_open_returns_tibble %>% group_by(symbol) %>%
tq_mutate(mutate_fun = rollapply,
width = window_size,
FUN = function(x) mean(x$adjCl.nxtOp.Ret)/sd(x$adjCl.nxtOp.Ret),
by.column = FALSE,
col_rename = "adjCl.nxtOp.Sharpe")
return(period_close_to_next_period_open_sharpe_tibble)
}
#' get_period_high_to_next_period_low_sharpe
#'
#' Sharpe of returns between high at period t to low at period t+1.
#' Rolling sharpe of length window size.
#'
#' @param period_high_to_next_period_low_returns_tibble
#' @param window_size
#'
#' @return
#' @export
#'
#' @examples
get_period_high_to_next_period_low_rolling_sharpe<-function(period_high_to_next_period_low_returns_tibble, window_size=5){
period_high_to_next_period_low_returns_tibble %>% group_by(symbol) %>% tq_mutate(mutate_fun = rollapply,
width = window_size,
FUN = function(x) mean(x$hi.nxtLo.Ret)/sd(x$hi.nxtLo.Ret),
by.column = FALSE,
col_rename = "hi.nxtLo.Sharpe")
return(period_high_to_next_low_sharpe_tibble)
}
#' get_period_high_to_next_period_low_sharpe
#'
#' Sharpe of returns between high @ period t to low @ period t+1.
#' Rolling sharpe of length window size.
#'
#' @param period_high_to_next_period_low_returns_tibble
#' @param window_size
#'
#' @return
#' @export
#'
#' @examples
get_period_high_to_next_period_low_rolling_sharpe<-function(period_high_to_next_period_low_returns_tibble, window_size=5){
period_high_to_next_period_low_returns_tibble %>% group_by(symbol) %>% tq_mutate(mutate_fun = rollapply,
width = window_size,
FUN = function(x) mean(x$hi.nxtLo.Ret)/sd(x$hi.nxtLo.Ret),
by.column = FALSE,
col_rename = "hi.nxtLo.Sharpe")
return(period_high_to_next_low_sharpe_tibble)
}
|
528164cd8215694371e77f4e7d6f52453e43efc9
|
c38a1efc9e7f53b6a9754b55a770f505ddacda8c
|
/qNewtonNLS/Analysis/analysis_fn.R
|
0cd0b123aaea74e55990a7e3e9db4335b1bcc1ba
|
[] |
no_license
|
bsh2/Experiments
|
0b15cf996304b89386690c58acadb106054ab11e
|
a7434426db486542d3155e4b1bea0c3bf05913dc
|
refs/heads/main
| 2023-04-20T12:21:09.515896
| 2021-05-20T12:05:20
| 2021-05-20T12:05:20
| 369,193,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,264
|
r
|
analysis_fn.R
|
standardLoss <- function(pars, standardCriterion, loads, standardIndex){
squaredResiduals <- numeric(length(standardCriterion))
inputData <- data.frame(days = 1:length(loads),
loads = loads)
for (i in 1:length(standardCriterion)){
inputSubset <- inputData[1:standardIndex[i], ]
modelledPerformance <- pars[1] +
pars[2]*(sum(inputSubset$loads *
exp(-(standardIndex[i] - inputSubset$days) / pars[3]))) -
pars[4]*(sum(inputSubset$loads *
exp(-(standardIndex[i] - inputSubset$days) / pars[5])))
squaredResiduals[i] <- (modelledPerformance - standardCriterion[i])^2
}
return(sum(squaredResiduals))
}
standardCompute <- function(pars, loads, returnObject = "all"){
pars <- as.numeric(pars)
p <- numeric(length = length(loads))
fitness <- numeric(length = length(loads))
fatigue <- numeric(length = length(loads))
s <- 1:length(loads)
df0 <- data.frame(s, "ws" = loads)
for (n in 1:length(s)){
df1 <- df0[1:s[n], ]
fitness[n] <- pars[2] * sum( df1$ws * exp(- (n - df1$s) / pars[3]) )
fatigue[n] <- pars[4] * sum( df1$ws * exp(- (n - df1$s) / pars[5]) )
p[n] <- pars[1] + fitness[n] - fatigue[n]
}
if (returnObject == "performance"){
return(p)}
if (returnObject == "fitness"){
return(fitness)
}
if (returnObject == "fatigue"){
return(fatigue)
}
if (returnObject == "all"){
return(data.frame("fitness" = fitness, "fatigue" = fatigue,
"performance" = p))
}
}
standard_true_pars <- c(100, 0.72, 28.5, 1.2, 8.6)
loads <- read.csv("loads.csv")
loads <- loads$load
standardCriterion1 <- standardCompute(standard_true_pars, loads, returnObject = "performance")
standardIndex1 <- seq(1, length(standardCriterion1), by = 1)
standardCriterion2 <- standardCriterion1[seq(1,length(standardCriterion1),by=2)]
standardIndex2 <- seq(1, length(standardCriterion1), by = 2)
standardCriterion3 <- standardCriterion1[seq(1,length(standardCriterion1),by=3)]
standardIndex3 <- seq(1, length(standardCriterion1), by = 3)
calvertLoss <- function(pars, calvertCriterion, loads, calvertIndex){
squaredResiduals <- numeric(length(calvertCriterion))
inputData <- data.frame(days = 1:length(loads),
loads = loads)
for (i in 1:length(calvertCriterion)){
inputSubset <- inputData[1:calvertIndex[i], ]
modelledPerformance <- pars[1] +
pars[2]*(sum(inputSubset$loads *
(exp(-(calvertIndex[i] - inputSubset$days) / pars[3]) -
exp(-(calvertIndex[i] - inputSubset$days) / pars[4])))) -
pars[5]*(sum(inputSubset$loads *
exp(-(calvertIndex[i] - inputSubset$days) / pars[6])))
squaredResiduals[i] <- (modelledPerformance - calvertCriterion[i])^2
}
return(sum(squaredResiduals))
}
calvertCompute <- function(pars, loads, returnObject = "all"){
pars <- as.numeric(pars)
p <- numeric(length = length(loads))
fitness <- numeric(length = length(loads))
fatigue <- numeric(length = length(loads))
s <- 1:length(loads)
df0 <- data.frame(s, "ws" = loads)
for (n in 1:length(s)){
df1 <- df0[1:s[n], ]
fitness[n] <- pars[2] * sum( df1$ws * (exp(- (n - df1$s) / pars[3])-
exp(- (n - df1$s) / pars[4])))
fatigue[n] <- pars[5] * sum( df1$ws * exp(- (n - df1$s) / pars[6]) )
p[n] <- pars[1] + fitness[n] - fatigue[n]
}
if (returnObject == "performance"){
return(p)}
if (returnObject == "fitness"){
return(fitness)
}
if (returnObject == "fatigue"){
return(fatigue)
}
if (returnObject == "all"){
return(data.frame("fitness" = fitness, "fatigue" = fatigue,
"performance" = p))
}
}
calvert_true_pars <- c(100, 0.72, 32.5, 4.3, 1.05, 8.6)
calvertCriterion1 <- calvertCompute(calvert_true_pars, loads, returnObject = "performance")
calvertIndex1 <- seq(1, length(calvertCriterion1), by = 1)
calvertCriterion2 <- calvertCriterion1[seq(1,length(calvertCriterion1),by=2)]
calvertIndex2 <- seq(1, length(calvertCriterion1), by = 2)
calvertCriterion3 <- calvertCriterion1[seq(1,length(calvertCriterion1),by=3)]
calvertIndex3 <- seq(1, length(calvertCriterion1), by = 3)
|
d38d1fa392a7df78c6396d49815ea5718fdd87d2
|
7e79dc0c40e45872ce00e7921dc110c2891fadea
|
/clase 02.R
|
4ede99738f7f425681d03295aeaaa783e018e253
|
[] |
no_license
|
pmtempone/DM_Ec_Fin
|
4f29675de38d2c0727abc47c721a9434d6949141
|
13ca54e7e96b1947221067daa0e669067fad8eeb
|
refs/heads/master
| 2020-04-17T22:20:32.818267
| 2016-12-04T16:29:10
| 2016-12-04T16:29:10
| 66,108,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 709
|
r
|
clase 02.R
|
-----#Carga de librerias#-----
library(RPostgreSQL)
library(dplyr)
----#clase 2-----
lift_dm <- function (pos,tot) {
(pos/tot)/(617/194289)
}
#P1 antiguedad < 12
(52/9529)/(617/194289) #lift alto
#ganancia
52*8000-250*9529
#P not 1
(565/(670+183525))/(617/194289)
#visa cuenta estado >10
(79/558)/(617/194289) #lift 44
lift_dm(79,558)
#ganacias
79*8000-250*558
#edad >35
----#arbol basico----
visa_cuenta_estado <- dbGetQuery(con,"select visa_cuenta_estado,clase,count(*)
from fct_prod_premium_201604
group by visa_cuenta_estado,clase
order by 1,2;")
#reemplazo dataset abril (archivo mal subido)
producto_premium_201604 <-dbGetQuery(con,"SELECT *
FROM public.fct_prod_premium_201604;")
|
1e70521974b8a7d4bc00a2df61cf7d8f4a5be330
|
57399b29b38f1d72bca228495f4da6d3dab0b0ae
|
/data/geoht/ncep_rf2_geoht_data_process.R
|
a8d316bc6fcf7bc24072e2357d04cfa3e77c226c
|
[] |
no_license
|
zpb4/ms_project1
|
db1b25f6c09c0b3e7a627c8585168d42a85fc30c
|
4ebaad0da991a3e4fc0febbb546fc04d486a00b0
|
refs/heads/master
| 2020-06-22T19:01:52.968077
| 2019-08-01T16:56:38
| 2019-08-01T16:56:38
| 197,782,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,665
|
r
|
ncep_rf2_geoht_data_process.R
|
#2) Process .nc files into RDS
#NOTE: Downloaded files from 1 Dec 84 to 31 Mar 19, 12539 days
ht<-500
ef<-'mean'#c('cf','mean')
for(k in 1:length(ef)){
nc1<-nc_open(paste("data/geoht/raw/hgt",ht,"_ncep_",ef[k],"_19841201_20190331_hres.nc",sep=""))
nc2<-nc_open(paste("data/geoht/raw/hgt",ht,"_ncep_",ef[k],"_19841201_20190331_lres.nc",sep=""))
fhr1<-ncvar_get(nc1,varid="fhour")
fhr2<-ncvar_get(nc2,varid="fhour")
if(ef[k]=='mean') idx1<-c(2,6,10,seq(14,length(fhr1),4)) else
idx1<-c(2,6,11,seq(15,length(fhr1),4))
idx2<-seq(3,length(fhr2),4)
for(i in 1:length(idx1)){
dat1<-ncvar_get(nc1,nc1$var[[3]],start=c(1,1,1,idx1[i],1),count = c(-1,-1,1,1,-1))
dat2<-ncvar_get(nc2,nc2$var[[3]],start=c(1,1,1,idx2[i],1),count = c(-1,-1,1,1,-1))
lab1<-paste(ht,ef[k],fhr1[idx1[i]],sep="_")
lab2<-paste(ht,ef[k],fhr2[idx2[i]],sep="_")
saveRDS(dat1,paste('data/geoht/raw/ncep_rf2_',lab1,'.rds',sep=""))
saveRDS(dat2,paste('data/geoht/raw/ncep_rf2_',lab2,'.rds',sep=""))
rm(dat1,dat2)
}
nc_close(nc1);nc_close(nc2)
rm(nc1,nc2)
}
#b. Create combined arrays
source('output/index/array_rotate.r')
n<-16
fhr<-seq(6,(n*24),24)
ht<-500
ef<-c('cf','mean')
d<-readRDS('data/geoht/raw/500_cf_6.rds')
dims<-c(dim(d)[1],dim(d)[2],n,dim(d)[3])
rm(d)
for(k in 1:length(ef)){
hgt_array<-array(NA,dims)
for (j in 1:length(fhr)){
hgt<-readRDS(paste('data/geoht/raw/',ht,'_',ef[k],'_',fhr[j],'.rds',sep=""))
hgt_array[,,j,]<-hgt
rm(hgt)
}
hgt_array<-array_rotate(hgt_array,1,2) #rot type 1, 90 left for correct georef
saveRDS(hgt_array,paste('data/geoht/ncep_rf2_',ef[k],'_geoht_',ht,'.rds',sep=""))
}
rm(list=ls())
|
4ba49cb1970372b91b45f8252bc35f8cfc25b357
|
d175703f8d1de8846380ae92af020ae70ed78843
|
/global.R
|
6d45cb13843e75e16f2b8d5b9a05bdbbc6e07c53
|
[] |
no_license
|
jordaoalves/Analisar-Gratificacoes---IPERN
|
3cc54846144f571c5e207bbaf800f32f8b4e0f7a
|
298f85b85e7df2e73233e4c19ba11c5f2b5fe1ff
|
refs/heads/master
| 2022-04-24T14:22:16.713522
| 2020-04-27T03:33:45
| 2020-04-27T03:33:45
| 259,187,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
global.R
|
library(shinydashboard)
library("shinyjs",warn.conflicts = FALSE)
library("stats",warn.conflicts = FALSE)
library("shiny",warn.conflicts = FALSE)
library("shinyWidgets",warn.conflicts = FALSE)
library(dplyr)
library(pdftools)
library(tabulizer)
library(tidyverse)
library(stringi)
library(stringr)
library(rmarkdown)
library(lubridate)
library(DT)
source('leitorPDF9501.R')
source('leitorPDF.R')
source('lerPdf9500.R')
source('lerPdf01.R')
source('lerPdf.R')
source('tabelaValoresIncidentes9501.R')
source('tabelaContribuicaoTotalCompleta9501.R')
source('tabelaValoresIncidentes.R')
source('tabelaContribuicaoTotalCompleta.R')
source('escreveDataPorExtenso.R')
source('contabilizaDatas.R')
source('mensagemRubrica.R')
#diretorio <- "C:/Users/Jordão Alves/Desktop/analisarAtestadoGratificacao"
#anosFolha9501 <- c(1995,1996,1997,1998,1999,2000)
|
c9dc7e2ee4f44bcdfc89684dedeb87c901abb242
|
d17028a361bd8af0e1b30b0450d8eed299f5fa82
|
/man/expr.dev.test.Rd
|
8b5931840eccff45576e90418887d7c539b69684
|
[] |
no_license
|
cran/GlobalDeviance
|
85518cc4350920dd25b425ab19f1758653fef96e
|
c198e13686ea5a0c4ee81f955430f0bf0e79266a
|
refs/heads/master
| 2016-09-06T01:38:12.749061
| 2013-09-20T00:00:00
| 2013-09-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,073
|
rd
|
expr.dev.test.Rd
|
\name{expr.dev.test}
\alias{expr.dev.test}
\title{Deviance Test}
\description{
Deviance permuation test.
}
\usage{
expr.dev.test(xx, formula.full, formula.red = NULL, D.red = NULL, model.dat,
test.vars, glm.family, perm = 100, method = c("chisqstat", "permutation"),
cf="fisher", adjust=FALSE, snowfall.args=list(parallel=FALSE),
snowfall.seed, save.stat.data = FALSE,
file.save = "GlobalDeviance_statistic_Tperm_and_Tobs.Rdata",
use.save.stat.data = FALSE,
file.use = "GlobalDeviance_statistic_Tperm_and_Tobs.Rdata")
}
\arguments{
\item{xx}{Dataset (variables x observations/patients).}
\item{formula.full}{Formula from the full model.}
\item{formula.red}{Formula from the reduced model.}
\item{D.red}{Designmatrix of the reduced model.}
\item{model.dat}{Dataset with covariables (observations/patients x covariables).}
\item{test.vars}{Variables or groups of variables to test.}
\item{glm.family}{Family for the regression.}
\item{perm}{Number of permutations.}
\item{method}{Method for the calculation of the p-value.}
\item{cf}{Combining function for the partial tests.}
\item{adjust}{Method for p-value adjustment. Not implemented.}
\item{snowfall.args}{A list with parameters for the initialisation of parallelizing with package \code{snowfall}.}
\item{snowfall.seed}{Start seed.}
\item{save.stat.data}{Logical value, default is \code{FALSE}. Save the permutation and original test statistic.}
\item{file.save}{File name for saving.}
\item{use.save.stat.data}{Logical value, default is \code{FALSE}. Use the saved permutation and original test statistic.}
\item{file.use}{File name for data loading.}
}
\value{
A list
\item{\code{method}}{Method for the calculation of the p-value.}
\item{\code{number.of.variables}}{Number of variables.}
\item{\code{number.of.permutations}}{Number of permutations.}
\item{\code{formula.full}}{Formula from the full model.}
\item{\code{formula.red}}{Formula from the reduced model.}
\item{\code{test}}{Test results.}
\item{\code{data.perm}}{Permutation test statistic.}
\item{\code{data.original}}{Original test statistic.}
\item{\code{test.vars}}{Variables or groups of variables to test.}
}
\author{Frederike Fuhlbrueck \email{f.fuhlbrueck@googlemail.com}}
\seealso{
See \code{\link{PermTest}}, \code{\link{varwiselogLik}} and \code{\link{Rossi}}.
}
\examples{
\dontrun{
### Example 1: poisson random data
set.seed(6666)
n<-100
Y1<-c(rpois(n, 1))
Y2<-c(rpois(n/2, 1), rpois(n/2, 10))
A<-rnorm(n)
B<-c(rep(1, n/2), rep(0, n/2)) # group variable
C<-rpois(n, 1)
test.variables<-list("Y1", "Y2", c("Y1", "Y2"))
names(test.variables)<-c("Y1", "Y2", "Y1, Y2")
t.random<-expr.dev.test(xx=t(data.frame(Y1, Y2)), formula.full=~ A + B + C,
formula.red=~ A + C, model.dat=data.frame(A, B, C), test.vars=test.variables,
glm.family=poisson(link="log"), perm=1000, method="permutation", cf="fisher",
snowfall.args=list(parallel=TRUE), snowfall.seed=54321)
summary(t.random, digits=3)
### Example 2: data set Rossi
data(Rossi)
# Covariables (patients x covariables)
model.dat<-Rossi[, c("arrest", "fin", "wexp")]
str(model.dat)
# data (variables/genes x patients)
xx<-rbind(t(t(t(Rossi[, c("prio", "n.work.weeks")]))), rpois(432, 1))
rownames(xx)<-c("prio", "n.work.weeks", "random")
formula.full<- ~ arrest + fin + wexp
formula.red<- ~ arrest + fin
test.vars<-list("prio", "n.work.weeks", "random", c("prio", "n.work.weeks"),
c("prio", "n.work.weeks", "random"))
names(test.vars)<-c("prio", "n.work.weeks", "random", "prio+n.work.weeks",
"prio+n.work.weeks+random")
set.seed(54321)
t.rossi1<-expr.dev.test(xx=xx, formula.full=formula.full, formula.red=formula.red,
D.red=NULL, model.dat, test.vars=test.vars, glm.family=poisson(link="log"),
perm=100, method="permutation", cf="fisher")
t.rossi2<-expr.dev.test(xx=xx, formula.full=formula.full, formula.red=formula.red,
D.red=NULL, model.dat, test.vars=test.vars, glm.family=poisson(link="log"),
perm=100, method="chisqstat", cf="fisher")
summary(t.rossi1, digits=2)
summary(t.rossi2, digits=3)
}
}
|
8a61bba2527ba2ab634a00ccc38922a1d8f87c6b
|
c6e8ce84341637f680872634e3c87bece43cef22
|
/dmel_comparison.R
|
99407c9a43f1b354bfb10627f804a803471f3547
|
[] |
no_license
|
Cy1614/GIA
|
bb2c8664a0b04ff1e31e00b0ef4c77c835c8bb07
|
74e9a0d5099f24fdc3e674c2cda91927518b968b
|
refs/heads/master
| 2021-08-14T22:52:24.252108
| 2017-11-16T23:14:01
| 2017-11-16T23:14:01
| 109,289,065
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,349
|
r
|
dmel_comparison.R
|
library(rtracklayer)
plot_data<-c()
gtf_data<-c()
file_mat<-matrix(c("genscan_dmoj_clean.txt","genscan_dgri_clean.txt","genscan_dvir_clean.txt","dmoj-all-r1.04.gtf","dgri-all-r1.05.gtf","dvir-all-r1.06.gtf","dmoj","dgri","dvir"),,3)
for( i in 1:3){
#i<-1
pure_gene_out<-fread(file_mat[i,1])
neg_begin<-pure_gene_out[pure_gene_out$S=="-"]$Begin
neg_end<-pure_gene_out[pure_gene_out$S=="-"]$End
pure_gene_out[pure_gene_out$S=="-"]$End<-neg_begin
pure_gene_out[pure_gene_out$S=="-"]$Begin<-neg_end
promoter_loc<-grep("Prom",pure_gene_out$Type)
pure_gene_out<-pure_gene_out[-promoter_loc]
gene_Ex_tab<-read.table(text=sprintf("%.4f", pure_gene_out$Gn.Ex),sep=".")
pure_gene_out$unique_ID<-paste(pure_gene_out$Scaffold_ID,gene_Ex_tab[,1],sep="_")
passed_genscan_out<-pure_gene_out[pure_gene_out$P>0.5]
#passed_genscan_out<-pure_gene_out
gene_ex_list <- split(passed_genscan_out, passed_genscan_out$unique_ID)
gene_region_data<-mclapply(gene_ex_list,mc.cores = getOption("mc.cores", 4L),function(x)genscan_gene_region_finder(x))
gene_region_mat<-do.call(rbind,gene_region_data)
rownames(gene_region_mat)<-names(gene_region_data)
gene_region_df<-as.data.frame(as.matrix(gene_region_mat),stringsAsFactors=F)
colnames(gene_region_df)<-c("scafold_ID", "strand","start","end","unique_ID")
#gtf_file<-import(file_mat[i,2])
#gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
#cm_gene_region_df<-gene_region_df[which(gene_region_df$scafold_ID%in%gene_gtf_file@seqnames),]
#genscan_gene_region_gr<-GRanges(cm_gene_region_df$scafold_ID,strand=cm_gene_region_df$strand,IRanges(as.numeric(cm_gene_region_df$start),as.numeric(cm_gene_region_df$end)),cm_gene_region_df$unique_ID)
#genscan_gene_overlap<-findOverlaps(genscan_gene_region_gr,gene_gtf_file)
#write.table(as.data.frame(genscan_gene_region_gr[genscan_gene_overlap@from]),paste(file_mat[i,3],"overlap",".txt",sep="_"),quote=F,col.names = T,row.names=F,sep="\t")
write.table(gene_region_df,paste(file_mat[i,3],"original",".txt",sep="_"),quote=F,col.names = T,row.names=F,sep="\t")
}
head(plot_data)
colnames(plot_data)<-c("genscan","spec")
colnames(gtf_data)<-c("gtf","spec")
plot(density(log10(as.numeric(gene_region_df$end)-as.numeric(gene_region_df$start))))
dev.list()
pdf("fb_genscan_original_length_distirbution.pdf",height=6,width=8)
dev.list()
gtf_file<-import("dmel-all-r6.18.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
plot(density(log10(as.data.frame(gene_gtf_file)$width)),col="blue",ylim=c(0,0.9),xlim=c(1.5,5.5),main="Distirbution of gene length",xlab="log10(gene length)")
gtf_file<-import("dmoj-all-r1.04.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="turquoise1")
gtf_file<-import("dgri-all-r1.05.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="steelblue1")
gtf_file<-import("dvir-all-r1.06.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="navy")
#dmoj<-read.delim("dmoj_original_.txt",stringsAsFactors = F)
#lines(density(log10(dmoj$end-dmoj$start)),col="magenta1")
#dgri<-read.delim("dgri_original_.txt",stringsAsFactors = F)
#lines(density(log10(dgri$end-dgri$start)),col="mediumvioletred")
#dvir<-read.delim("dvir_original_.txt",stringsAsFactors = F)
#lines(density(log10(dvir$end-dvir$start)),col="red")
gtf_file<-import("dmel-all-r6.18.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
plot(density(log10(as.data.frame(gene_gtf_file)$width)),col="blue",ylim=c(0,2),xlim=c(0.5,4.5),main="Distirbution of gene length",xlab="log10(gene length)")
gtf_file<-import("dmoj-all-r1.04.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="turquoise1")
gtf_file<-import("dgri-all-r1.05.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="steelblue1")
gtf_file<-import("dvir-all-r1.06.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="navy")
dmoj<-read.delim("genscan_dmoj_clean.txt",stringsAsFactors = F)
dmoj<-dmoj[dmoj$P>0.5&!is.na(dmoj$P),]
lines(density(log10(abs(dmoj$End-dmoj$Begin))),col="magenta1")
dgri<-read.delim("genscan_dgri_clean.txt",stringsAsFactors = F)
dgri<-dgri[dgri$P>0.5&!is.na(dgri$P),]
lines(density(log10(abs(dgri$End-dgri$Begin))),col="mediumvioletred")
dvir<-read.delim("genscan_dvir_clean.txt",stringsAsFactors = F)
dvir<-dvir[dvir$P>0.5&!is.na(dvir$P),]
lines(density(log10(abs(dvir$End-dvir$Begin))),col="red")
legend("topright", legend=c("fb dmel","fb dmoj","fb dgri","fb dvir", "genscan dmoj","genscan dgri", "genscan dvir"),col=c("blue", "turquoise1","steelblue1","navy","magenta1","mediumvioletred","red"), lty=1, cex=0.8)
dev.off()
#-----------------------exon--------------------
file_mat<-matrix(c("genscan_dmoj_clean.txt","genscan_dgri_clean.txt","genscan_dvir_clean.txt","dmoj-all-r1.04.gtf","dgri-all-r1.05.gtf","dvir-all-r1.06.gtf","dmoj","dgri","dvir"),,3)
i<-1
for(i in 1:3){
pure_gene_out<-fread(file_mat[i,1])
neg_begin<-pure_gene_out[pure_gene_out$S=="-"]$Begin
neg_end<-pure_gene_out[pure_gene_out$S=="-"]$End
pure_gene_out[pure_gene_out$S=="-"]$End<-neg_begin
pure_gene_out[pure_gene_out$S=="-"]$Begin<-neg_end
promoter_loc<-grep("Prom",pure_gene_out$Type)
pure_gene_out<-pure_gene_out[-promoter_loc]
gene_Ex_tab<-read.table(text=sprintf("%.4f", pure_gene_out$Gn.Ex),sep=".")
pure_gene_out$unique_ID<-paste(pure_gene_out$Scaffold_ID,gene_Ex_tab[,1],sep="_")
passed_genscan_out<-pure_gene_out[pure_gene_out$P>0.5]
gene_ex_list <- split(passed_genscan_out, passed_genscan_out$unique_ID)
gene_region_data<-mclapply(gene_ex_list,mc.cores = getOption("mc.cores", 4L),function(x)genscan_gene_region_finder(x))
gene_region_mat<-do.call(rbind,gene_region_data)
rownames(gene_region_mat)<-names(gene_region_data)
gene_region_df<-as.data.frame(as.matrix(gene_region_mat),stringsAsFactors=F)
colnames(gene_region_df)<-c("scafold_ID", "strand","start","end","unique_ID")
gtf_file<-import(file_mat[i,2])
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
cm_gene_region_df<-gene_region_df[which(gene_region_df$scafold_ID%in%gene_gtf_file@seqnames),]
genscan_gene_region_gr<-GRanges(cm_gene_region_df$scafold_ID,strand=cm_gene_region_df$strand,IRanges(as.numeric(cm_gene_region_df$start),as.numeric(cm_gene_region_df$end)),cm_gene_region_df$unique_ID)
genscan_gene_overlap<-findOverlaps(genscan_gene_region_gr,gene_gtf_file)
overlap_gene_ID<-genscan_gene_region_gr[genscan_gene_overlap@from]$`cm_gene_region_df$unique_ID`
overlap_gene_loc<-which(passed_genscan_out$unique_ID%in%overlap_gene_ID)
overlap_gene_data<-passed_genscan_out[overlap_gene_loc,]
overlap_gene_ex_gr<-GRanges(overlap_gene_data$Scaffold_ID,strand=overlap_gene_data$S,IRanges(overlap_gene_data$Begin,overlap_gene_data$End),overlap_gene_data$unique_ID)
exon_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
exon_gtf_df<-as.data.frame(exon_gtf_file)
unique_chr_loc<-apply(exon_gtf_df,1,function(x)paste(x[1:3],collapse = "_"))
dup_loc<-which(duplicated(unique_chr_loc))
no_dup_exon_gtf_file<-exon_gtf_file[-dup_loc]
exon_overlap<-findOverlaps(overlap_gene_ex_gr,no_dup_exon_gtf_file)
write.table(as.data.frame(overlap_gene_ex_gr[exon_overlap@from]),paste(file_mat[i,3],"exon","overlap",".txt",sep="_"),quote=F,col.names = T,row.names=F,sep="\t")
}
dev.list()
pdf("fb_genscan_exon_length_distirbution.pdf",height=6,width=8)
dev.list()
gtf_file<-import("dmel-all-r6.18.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
plot(density(log10(as.data.frame(gene_gtf_file)$width)),col="blue",ylim=c(0,1.3),xlim=c(1,4),main="Distirbution of exon length",xlab="log10(exon length)")
gtf_file<-import("dmoj-all-r1.04.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="turquoise1")
gtf_file<-import("dgri-all-r1.05.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="steelblue1")
gtf_file<-import("dvir-all-r1.06.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="navy")
dmoj<-read.delim("dmoj_exon_overlap_.txt",stringsAsFactors = F)
lines(density(log10(dmoj$width)),col="magenta1")
dgri<-read.delim("dgri_exon_overlap_.txt",stringsAsFactors = F)
lines(density(log10(dgri$width)),col="mediumvioletred")
dvir<-read.delim("dvir_exon_overlap_.txt",stringsAsFactors = F)
lines(density(log10(dvir$width)),col="red")
legend("topright", legend=c("fb dmel","fb dmoj","fb dgri","fb dvir", "genscan dmoj","genscan dgri", "genscan dvir"),
col=c("blue", "turquoise1","steelblue1","navy","magenta1","mediumvioletred","red"), lty=1, cex=0.8)
dev.off()
#--------------------blastn_length--------------
dev.list()
pdf("fb_blastn_gene_length_distirbution.pdf",height=6,width=8)
dev.list()
gtf_file<-import("dmel-all-r6.18.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
plot(density(log10(as.data.frame(gene_gtf_file)$width)),col="blue",ylim=c(0,0.9),xlim=c(1.5,5.5),main="Distirbution of gene length",xlab="log10(gene length)")
gtf_file<-import("dmoj-all-r1.04.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="turquoise1")
gtf_file<-import("dgri-all-r1.05.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="steelblue1")
gtf_file<-import("dvir-all-r1.06.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="gene"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="navy")
dmoj<-fread("/Users/LawCheukTing/Desktop/gia2_result/blastn_dmoj_gene.gtf")
lines(density(log10(dmoj$V5-dmoj$V4)),col="magenta1")
dgri<-fread("/Users/LawCheukTing/Desktop/gia2_result/blastn_dgri_gene.gtf")
lines(density(log10(dgri$V5-dgri$V4)),col="mediumvioletred")
dvir<-fread("/Users/LawCheukTing/Desktop/gia2_result/blastn_dvir_gene.gtf")
lines(density(log10(dvir$V5-dvir$V4)),col="red")
legend("topright", legend=c("fb dmel","fb dmoj","fb dgri","fb dvir", "blastn dmoj","blastn dgri", "blastn dvir"),
col=c("blue", "turquoise1","steelblue1","navy","magenta1","mediumvioletred","red"), lty=1, cex=0.8)
dev.off()
#--------------------blastn_exon_length--------------
dev.list()
pdf("fb_blastn_exon_length_distirbution.pdf",height=6,width=8)
dev.list()
gtf_file<-import("dmel-all-r6.18.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
plot(density(log10(as.data.frame(gene_gtf_file)$width)),col="blue",ylim=c(0,1.3),xlim=c(1,4),main="Distirbution of exon length",xlab="log10(exon length)")
gtf_file<-import("dmoj-all-r1.04.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="turquoise1")
gtf_file<-import("dgri-all-r1.05.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="steelblue1")
gtf_file<-import("dvir-all-r1.06.gtf")
gene_gtf_file<-gtf_file[gtf_file@elementMetadata[,"type"]=="exon"]
lines(density(log10(as.data.frame(gene_gtf_file)$width)),col="navy")
dmoj<-fread("/Users/LawCheukTing/Desktop/gia2_result/blastn_dmoj_exons.gtf")
lines(density(log10(dmoj$V5-dmoj$V4)),col="magenta1")
dgri<-fread("/Users/LawCheukTing/Desktop/gia2_result/blastn_dgri_exons.gtf")
lines(density(log10(dgri$V5-dgri$V4)),col="mediumvioletred")
dvir<-fread("/Users/LawCheukTing/Desktop/gia2_result/blastn_dvir_exons.gtf")
lines(density(log10(dvir$V5-dvir$V4)),col="red")
legend("topright", legend=c("fb dmel","fb dmoj","fb dgri","fb dvir", "blastn dmoj","blastn dgri", "blastn dvir"),
col=c("blue", "turquoise1","steelblue1","navy","magenta1","mediumvioletred","red"), lty=1, cex=0.8)
dev.off()
|
3bcd5ebe419f77311fbcd786fb7669865f6ed3ab
|
6603cf711b61df3ca298c727894d88f759ab7a63
|
/plot1.r
|
1c0e2bcb8252e354e14964f156cf019f0fe5c87e
|
[] |
no_license
|
ThiDur/-Coursera_hopkins_exploratory_data_analysis_wk1
|
b62b43784cb511fe9147ceb8f75a32a14f65d845
|
797a5ee50ef1f77eea9fd6518b51414d83ccc019
|
refs/heads/master
| 2020-05-22T16:52:00.314647
| 2019-05-13T17:45:57
| 2019-05-13T17:45:57
| 186,438,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
plot1.r
|
library(tidyverse)
tbl_power_consumption <- read_csv2(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), na=c("", "NA", "?"), col_types=list(col_date(format='%d/%m/%Y'), col_time(), col_number(), col_number(), col_number(), col_number(), col_number(), col_number(), col_number()))
tbl_power_consumption <- filter(tbl_power_consumption, Date=='2007-02-01' | Date=='2007-02-02')
tbl_power_consumption <- mutate(tbl_power_consumption, datetime=as.POSIXct(paste(Date, " ", Time)))
hist(tbl_power_consumption$Global_active_power / 1000, col='red', main='Global Active Power', xlab='Global Active Power (kilowatts)')
dev.copy(png, file='plot1.png', width=480, height=480)
dev.off()
|
1760f7b55fba575147cbc512cdac720dffd37904
|
ecfac3a7b04856a1b5a57e71210cd5ab0fb83787
|
/Scripts/LiuEtAl-Reanalysis_final_MJ.R
|
a817ca4629acde1101b952b1a4cb99abbceb0937
|
[] |
no_license
|
nemochina2008/Liu_reanalysis
|
5d2f4f6150d02afcdeb50520ad3c3fc8bbe508b8
|
ecb3e708619df1f8ed96ef2a8fae1351a9f2ed07
|
refs/heads/master
| 2021-01-23T23:13:26.630115
| 2015-10-20T13:42:33
| 2015-10-20T13:42:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,925
|
r
|
LiuEtAl-Reanalysis_final_MJ.R
|
#### Preparation and standard Package Loading ####
rm(list=ls())
library(ggplot2)
library(nlme)
library(MuMIn)
library(mgcv)
library(AICcmodavg)
library(scales)
library(lattice)
library(ncf)
library(plyr)
library(raster)
library(sp)
library(maptools)
#### Data loading and format ####
# Load in the supplementary data from LIU et al. DOI: 10.1111/geb.12113
Liu <- read.csv("Data/Liu_aged.csv")
temp<-raster("Data/bio1.bil")
precip<-raster("Data/bio12.bil")
forest<-raster("Data/Forest.tif")
#do spatial analysis to look at representativeness
#of forests used in our study
Liu_coords<-SpatialPoints(cbind(Liu[,5],Liu[,4]))
Liu_clim<-data.frame(precip=extract(precip,Liu_coords),temp=extract(temp/10,Liu_coords),data="Our data")
#create grid with 0.5 degree resolution
Grid<-expand.grid(x=seq(-180,180,by = 0.5),y=seq(-90,90,by=0.5))
coordinates(Grid)<-c("x", "y")
gridded(Grid) <- TRUE
Forest_climate<-data.frame(precip=extract(precip,Grid),temp=extract(temp,Grid),forest=extract(forest,Grid))
Forest_climate2<-subset(Forest_climate,forest==1)
all_data<-data.frame(precip=as.numeric(Forest_climate2$precip),
temp=as.numeric(Forest_climate2$temp/10),
data="Global data")
#stick these data from sites and all forests together
Climate<-rbind(Liu_clim,all_data)
Climate<-subset(Climate,!is.na(precip)&!is.na(temp))
Climate$Climate_precip_bin<-as.numeric(cut(Climate$precip,
breaks=(seq(min(Climate$precip),max(Climate$precip),by=200)),
labels=seq(min(Climate$precip),max(Climate$precip),by=200)[-1]))
Climate$Climate_temp_bin<-as.numeric(cut(Climate$temp,
breaks=(seq(min(Climate$temp),max(Climate$temp),by=1)),
labels=seq(min(Climate$temp),max(Climate$temp),by=1)[-1]))
ddply(Climate,.(data),summarise,
Temp_perc=(Climate_temp_bin/length(Climate_temp_bin))*100,
Clim_perc=(Climate_precip_bin/length(Climate_precip_bin))*100)
#find convex hull for these data
find_hull <- function(Climate) Climate[chull(Climate$precip,Climate$temp), ]
hulls <- ddply(Climate, "data", find_hull)
plot <- ggplot(data = Climate, aes(x = precip, y = temp, colour=data)) +
geom_point(alpha=0.5)+geom_polygon(data=hulls,fill=NA)+
labs(x = "Precipitation", y = "Temperature")+facet_wrap(~data)
plot
bb <- extent(-180, 180, -90, 90)
precip2 <- setExtent(precip, bb, keepres=TRUE)
forest2 <- setExtent(forest, bb, keepres=TRUE)
plot(precip2)
plot(forest)
Precip_mask<-mask(precip2,forest2)
#look at biases in the data that may influence results
#age
theme_set(theme_bw(base_size=12))
Geom_hist<-ggplot(Liu,aes(x=Age))+geom_histogram()+ylab("number of sites")+xlab("Estimate age (Years)")+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(size=1.5,colour="black",fill=NA))+geom_hline(y=0)
#location
theme_set(theme_bw(base_size=12))
world_map <- map_data("world")#Get world map info
p <- ggplot() + coord_fixed()#Create a base plot
base_world <- p + geom_polygon(data=world_map,aes(x=long,y=lat,group=group),fill="light grey")#Add map to base plot
Location<-base_world + geom_point(data=Liu,aes(x=Long,y=Lat,size=Age),colour="black",alpha=0.2)+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(size=1.5,colour="black",fill=NA),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())
# ---------------------------------------------------- #
# Spatial autocorrelation of Liu et al. original data
ncf.cor <- correlog(Liu$Long, Liu$Lat, Liu$Age,increment=50, resamp=50,latlon = T)
qplot(x=ncf.cor$mean.of.class,y=ncf.cor$correlation)+geom_smooth()
# So there is some autocorrelation present in smaller and larger geographic scales
# in the Age dataset
#### ReAnalysis - Nullmodels ####
# We fit a dummy variable to be used for models without random factor
Liu$dummy <- rep(1,nrow(Liu))
# First build a spatial correlation Matrix
# We change coordinates slightly since some sites have exactly the same coordinates
Liu$Lat_J <- Liu$Lat+(rnorm(length(Liu$Lat),0,0.00001))
cs1Exp <- corExp(1, form = ~ Lat_J + Long)
cs1Exp <- Initialize(cs1Exp, Liu)
corMatrix(cs1Exp)[1:10, 1:4] # Looks good
# Then we run some null-models to test if ether random variables or Spatial Autocorrelation
# are appropriate on the Above-Ground-Biomass
null.model<-lme(log(AGB)~1,data=Liu,random=~1|dummy,method="ML") # Without Random Structure - equivalent to original OLS
null.model2<-lme(log(AGB)~1,data=Liu,random=~1|Ref,method="ML") # With Random Structure
null.model3<-lme(log(AGB)~1,data=Liu,random=~1|Ref/Site,method="ML") # Hierarchical nested random Structure
null.model4<- update(null.model2, correlation = corExp(1, form = ~ Lat_J + Long),method="ML") # The same as above but including the Spatial Autocorrelation matrix
null.model5<- update(null.model3, correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
# Null model comparison
(nm_out <- aictab(list(null.model,null.model2,null.model3,null.model4,null.model5),sort=T,second.ord=F,
modnames=c("Null - w/o Random","Null - w. Random","Null - w. nested Random","Null - w. Random + SAC","Null - w. nested Random + SAC")) )
write.csv(as.data.frame(nm_out),"Results/NullModel.Comparison.csv")
# Models that account for differences between studies and / or spatial autocorrelation
# outperform models that have no such structure (such as Liu et al. original OLS)
#### ReAnalysis - Liu original Models ####
# We start modeling and build models similar to those of liu et al (OLS) mentioned in the Appendix
# We considering everything independently to each other but using our random variable structure to account for
# spatial autocorrelation and any systematic differences amongst studies
# The Following part is to show that models with included random factor and taking SAC
# into account the models outperform models without
# Precipitation - a model with a squared term for mean_precip
Precip_model<-lme(AGB~Mean_precip+I(Mean_precip^2),data=Liu,random=~1|dummy,method="ML")
Precip_model_sac<-lme(AGB~Mean_precip+I(Mean_precip^2),data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
# Temperature
Temp_model<-lme(AGB~Mean_T+I(Mean_T^2),data=Liu,random=~1|dummy,method="ML")
Temp_model_sac<-lme(AGB~Mean_T+I(Mean_T^2),data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
# Age
Age_model<-lme(AGB~Age+I(Age^2),data=Liu,random=~1|dummy,method="ML")
Age_model_sac<-lme(AGB~Age+I(Age^2),data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
(liuold_out <- aictab(list(Precip_model,Precip_model_sac,Temp_model,Temp_model_sac,Age_model,Age_model_sac),sort=T,second.ord=F,
modnames=c("AGB - Precip","AGB - Precip + SAC","AGB ~ Temp","AGB - Temp + SAC","AGB - Age","AGB - Age + SAC")) )
write.csv(as.data.frame(liuold_out),"Results/LIUmodels_SAC.csv")
mean(liuold_out$Delta_AIC[3:6])
# Look at spatial autocorrelation in the model residuals for both models
sar.df <- data.frame()
# Precip
ncf.cor_precip<- correlog(Liu$Long, Liu$Lat, resid(Precip_model),increment=50, resamp=50,latlon = T)
ncf.cor_precip_ourmodel<- correlog(Liu$Long, Liu$Lat, resid(Precip_model_sac),increment=50, resamp=50,latlon = T)
sar.df <- rbind(sar.df,data.frame(mean.of.class=ncf.cor_precip$mean.of.class,correlation=ncf.cor_precip$correlation,type="Precipitation model (with SAR)"))
sar.df <- rbind(sar.df,data.frame(mean.of.class=ncf.cor_precip_ourmodel$mean.of.class,correlation=ncf.cor_precip_ourmodel$correlation,type="Precipitation model (corrected)") )
# Temperature
ncf.cor_temp<- correlog(Liu$Long, Liu$Lat, resid(Temp_model),increment=50, resamp=50,latlon = T)
ncf.cor_temp_ourmodel<- correlog(Liu$Long, Liu$Lat, resid(Temp_model_sac),increment=50, resamp=50,latlon = T)
sar.df <- rbind(sar.df,data.frame(mean.of.class=ncf.cor_temp$mean.of.class,correlation=ncf.cor_temp$correlation,type="Temperature model (with SAR)"))
sar.df <- rbind(sar.df,data.frame(mean.of.class=ncf.cor_temp_ourmodel$mean.of.class,correlation=ncf.cor_temp_ourmodel$correlation,type="Temperature model (corrected)") )
# Age
ncf.cor_age<- correlog(Liu$Long, Liu$Lat, resid(Age_model),increment=50, resamp=50,latlon = T)
ncf.cor_age_ourmodel<- correlog(Liu$Long, Liu$Lat, resid(Age_model_sac),increment=50, resamp=50,latlon = T)
sar.df <- rbind(sar.df,data.frame(mean.of.class=ncf.cor_age$mean.of.class,correlation=ncf.cor_age$correlation,type="Age model (with SAR)"))
sar.df <- rbind(sar.df,data.frame(mean.of.class=ncf.cor_age_ourmodel$mean.of.class,correlation=ncf.cor_age_ourmodel$correlation,type="Age model (corrected)") )
# Plotting
g <- ggplot(sar.df,aes(x=mean.of.class,y=correlation))
g <- g + geom_line() + facet_wrap(~type,nrow = 3,as.table = T)
g <- g + labs(x="Distance class",y="Correlation",title="Correcting for spatial autocorrelation in models")
ggsave("Figures/SpatialAutocorrelationOfOriginalModelResiduals.png",plot=g,height=fig_h,width=fig_w,dpi=fig_dpi,units=fig_units,scale=fig_scale)
# All models with SAC and random structure perform better than Liu et al. original models
# and reduce the spatial autocorrelation especially at larger scales!
# Furthermore due to colinearity between Predictors we need to consider interactions between
# predictors
#### ReAnalysis - Model with Interactions ####
# First we build a global model for use in model averaging that contains all varibles that are needed
# Squared and cubed terms temperature and precipitation are not included due to missing biological sense
Liu$Mean_T2<-Liu$Mean_T+17
Liu$logAge<-log(Liu$Age)
Liu$Age_sq<-Liu$Age^2
mymodel<-lme(log(AGB)~Age*Mean_precip+Age*Mean_T2+Mean_T2*Mean_precip+Age_sq+logAge*Mean_precip+logAge*Mean_T2,
data=Liu,
weights= varFunc(~I(Mean_T2)), # To approach homoscedasticity
random=~1|Ref/Site,
correlation = corExp(1, form = ~ Lat_J + Long),
method="ML")
# Check for heteroskedasticity
plot(mymodel, which=2) # Somewhat greater spread at higher AGB
plot(ranef(mymodel)) # Random effects seem okay
qplot(Liu$Age,resid(mymodel))+geom_smooth()
qplot(Liu$Mean_T2,resid(mymodel))+geom_smooth()
qplot(Liu$Mean_precip,resid(mymodel))+geom_smooth()
qqnorm(mymodel,abline = c(0, 1))
# Heteroskedasticity is present in some cases,
# likely due to small sample sizes with high variances in extreme regions (few samples in tropics)
# Now we dredge the model so that the value of each variable in predicting biomass
# can be assessed rather than using them in isolation
# Use second-order Information Criterion and keep Age as explanatory variable
MS1 <- dredge(mymodel,evaluate=T,rank=AICc,trace=T,subset = !(Age&&logAge) && dc(Age,Age_sq) ,extra=c("R^2","adjR^2"))
poss_mod <- get.models(MS1,subset=delta<7)
modsumm <- model.sel(poss_mod, rank = "AICc",fit=T) # Rank and select the best models
modsumm2 <- subset(modsumm,modsumm$delta<7)
modsumm2
averaged <- model.avg(modsumm2,fit=T,subset=delta<7)
# since the model without log terms comes out best, rerun the model averaging
# routine without this the log term
mymodel2<-lme(log(AGB)~Age*Mean_precip+Age*Mean_T2+Mean_T2*Mean_precip,
data=Liu,
weights= varFunc(~I(Mean_T2)), # To approach homoscedasticity
random=~1|Ref/Site,
correlation = corExp(1, form = ~ Lat_J + Long),
method="ML")
plot(mymodel2)
anova(mymodel,mymodel2)
MS2 <- dredge(mymodel2,evaluate=T,rank=AICc,trace=T,REML=F)
poss_mod <- get.models(MS2,subset=delta<7)
modsumm <- model.sel(poss_mod, rank = "AICc",fit=T) # Rank and select the best models
modsumm2 <- subset(modsumm,modsumm$delta<7)
modsumm2
averaged <- model.avg(modsumm2,fit=T,subset=delta<7)
averaged$formula
#### ReAnalysis - Model predictions ####
# Check the value distribution per rounded data level
Liu$Agebin <- ( round_any(Liu$Age,100) )
Liu$Precipbin <-( round_any(Liu$Mean_precip,1000) )
Liu$Tempbin <- ( round_any(Liu$Mean_T,10) )
# Finally run the best performing model including interactions from the model averaging process
# Use Liu et al. original models in comparison which consider those factors in isolation,
# but control for random structure and spatial autocorrelation
top_model<-lme(averaged$formula,data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),weights= varFunc(~I(Mean_T2)),method="ML")
Precip_model<-lme(log(AGB)~Mean_precip+I(Mean_precip^2),data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
Temp_model<-lme(log(AGB)~Mean_T+I(Mean_T^2),data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
Age_model<-lme(log(AGB)~Age+I(Age^2),data=Liu,random=~1|Ref/Site,correlation = corExp(1, form = ~ Lat_J + Long),method="ML")
(out <- aictab(list(top_model,Precip_model,Temp_model,Age_model),sort=T,second.ord=T,
modnames=c("Model including Interactions","Precip. only","Temp. only","Age only")) )
write.csv(as.data.frame(out),"Results/FinalModelComparison.csv")
# Our Best performing model includes several interactions between all used Predictors
# outperforms all other of Liu et al.s Models even if accounted for Spatial Autocorrelation
# and random Structure. Thus indicates that Interactions are indeed important to determine Above Ground Biomass
#subset dataset to give only sites where precipitation bin is 1000-3000
Liu_precip<-subset(Liu,Precipbin>0&Precipbin<=3000)
Liu_precip$Mean_precip<-Liu_precip$Precipbin
ddply(Liu,.(Precipbin),summarize,minp=min(Age),max=max(Age),no=length(Age))
# Figure for interaction between age, precipitation and AGB
AP_pred<-data.frame(rbind(data.frame(Age=seq(80,795,1),Mean_precip=1000,Mean_T2=mean(Liu$Mean_T2)),
data.frame(Age=seq(80,1200,1),Mean_precip=2000,Mean_T2=mean(Liu$Mean_T2)),
data.frame(Age=seq(80,750,1),Mean_precip=3000,Mean_T2=mean(Liu$Mean_T2))))
AP_pred$Age_sq<-AP_pred$Age^2
AP_pred$Pred<-predict(top_model,AP_pred,level=0,se.fit=T,backtransform=T)$fit
AP_pred$UCI<-AP_pred$Pred+(predict(top_model,AP_pred,level=0,se.fit=T)$se.fit*2)
AP_pred$LCI<-AP_pred$Pred-(predict(top_model,AP_pred,level=0,se.fit=T)$se.fit*2)
# Plot predictions
theme_set(theme_bw(base_size=12))
Age_precip1 <- ggplot(AP_pred,aes(Age,exp(Pred),ymax=exp(UCI),ymin=exp(LCI),group=as.factor(Mean_precip),fill=as.factor(Mean_precip)))+geom_line()+geom_ribbon(alpha=0.2)
Age_precip2 <- Age_precip1+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))+ theme(legend.position="none")+facet_wrap(~Mean_precip)
Age_precip3 <- Age_precip2+scale_fill_brewer(palette = "Set1")+geom_rug(data=Liu_precip,aes(x=Age,y=AGB,ymax=NULL,ymin=NULL,fill=NULL))+geom_point(data=Liu_precip,aes(x=Age,y=AGB,ymax=NULL,ymin=NULL,fill=NULL),shape=1,alpha=0.5)
Age_precip3 <- Age_precip3+labs(y=expression(paste("Aboveground biomass (Mg ",ha^-1,")",sep="")),
x="Estimated forest age")
ggsave("Figures/Age_Precip.png",plot = Age_precip3,height=fig_h,width=fig_w,dpi=fig_dpi,units=fig_units,scale=fig_scale)
# Now age and temperature
Liu_Temp <- subset(Liu,Tempbin>=0&Tempbin<=20)
Liu_Temp$Mean_T <- Liu_Temp$Tempbin
ddply(Liu,.(Tempbin),summarize,minp=min(Age),maxp=max(Age),no=length(Age))
AT_pred <- data.frame(rbind(data.frame(Age=seq(80,1200,1),Mean_precip=mean(Liu$Mean_precip),Mean_T=0),
data.frame(Age=seq(80,1000,1),Mean_precip=mean(Liu$Mean_precip),Mean_T=10),
data.frame(Age=seq(80,200,1),Mean_precip=mean(Liu$Mean_precip),Mean_T=20)))
AT_pred$Age_sq<-AT_pred$Age^2
AT_pred$Mean_T2<-AT_pred$Mean_T+17
AT_pred$Pred<-predict(top_model,AT_pred,level=0,se.fit=T)$fit
AT_pred$UCI<-AT_pred$Pred+(predict(top_model,AT_pred,level=0,se.fit=T)$se.fit*2)
AT_pred$LCI<-AT_pred$Pred-(predict(top_model,AT_pred,level=0,se.fit=T)$se.fit*2)
Temp_Age1 <- ggplot(AT_pred,aes(Age,exp(Pred),ymax=exp(UCI),ymin=exp(LCI),group=as.factor(Mean_T),fill=as.factor(Mean_T)))+geom_line()+geom_ribbon(alpha=0.5)
Temp_Age2 <- Temp_Age1+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))+ theme(legend.position="none")+facet_wrap(~Mean_T)
Temp_Age3 <- Temp_Age2+scale_fill_brewer(palette = "Set1")+geom_rug(data=Liu_Temp,aes(x=Age,y=AGB,ymax=NULL,ymin=NULL,fill=NULL))+geom_point(data=Liu_Temp,aes(x=Age,y=AGB,ymax=NULL,ymin=NULL,fill=NULL),shape=1,alpha=0.2)
Temp_Age3 <- Temp_Age3 + labs(y = expression(paste("Aboveground biomass (Mg ",ha^-1,")",sep="")),
x = "Estimated forest age")
ggsave("Figures/Age_Temp.png",plot = Temp_Age3,height=fig_h,width=fig_w,dpi=fig_dpi,units=fig_units,scale=fig_scale)
#now temperature and precipitation
Liu_Temp<-subset(Liu,Tempbin>=0&Tempbin<=20)
Liu_Temp$Mean_T <- Liu_Temp$Tempbin
ddply(Liu,.(Tempbin),summarize,minp=min(Mean_precip),maxp=max(Mean_precip),no=length(Mean_precip))
AP_pred<-data.frame(rbind(data.frame(Age=mean(Liu$Age),Mean_precip=seq(0,3000,1),Mean_T=0),
data.frame(Age=mean(Liu$Age),Mean_precip=seq(0,3700,1),Mean_T=10),
data.frame(Age=mean(Liu$Age),Mean_precip=seq(0,5800,1),Mean_T=20)))
AP_pred$Age_sq<-AP_pred$Age^2
AP_pred$Mean_T2<-AP_pred$Mean_T+17
AP_pred$Pred<-predict(top_model,AP_pred,level=0,se.fit=T)$fit
AP_pred$UCI<-AP_pred$Pred+(predict(top_model,AP_pred,level=0,se.fit=T)$se.fit*2)
AP_pred$LCI<-AP_pred$Pred-(predict(top_model,AP_pred,level=0,se.fit=T)$se.fit*2)
#now plot this
theme_set(theme_bw(base_size=12))
Temp_precip1 <- ggplot(AP_pred,aes(Mean_precip,exp(Pred),ymax=exp(UCI),ymin=exp(LCI),group=as.factor(Mean_T),fill=as.factor(Mean_T)))+geom_line()+geom_ribbon(alpha=0.5)
Temp_precip2 <- Temp_precip1 + theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),panel.border = element_rect(size=1.5,colour="black",fill=NA))+ theme(legend.position="none")+facet_wrap(~Mean_T)
Temp_precip3 <- Temp_precip2 + scale_fill_brewer(palette = "Set1")+geom_rug(data=Liu_Temp,aes(x=Mean_precip,y=AGB,ymax=NULL,ymin=NULL,fill=NULL))+geom_point(data=Liu_Temp,aes(x=Mean_precip,y=AGB,ymax=NULL,ymin=NULL,fill=NULL),shape=1,alpha=0.2)
Temp_precip3 <- Temp_precip3 + labs(y=expression(paste("Aboveground biomass (Mg ",ha^-1,")",sep="")),
x= "Mean annual precipitation (mm)")
ggsave("Figures/Temp_Precip.png",plot=Temp_precip3,height=fig_h,width=fig_w,dpi=fig_dpi,units=fig_units,scale=fig_scale)
# Spatial look at the residuals
#r <- residuals(top_model)
#base_world+geom_point(data=Liu,aes(x=Long,y=Lat,size=sqrt(r^2)),color="blue")
|
048060fe8440cc2453626c4ee9f5daee1e5e5d15
|
11ba9630777d42a9e92b5aea962a8f75ef5a6cde
|
/man/get_species.Rd
|
ca33859d8b47991d2cb6492afd421102bc4a3c3d
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
ramiromagno/ensemblr
|
afb3e03b283ed02bb35dd9cb1ad3d4ab3b912a62
|
23d56975c4be7d6646636d9ff12a44c593a240a0
|
refs/heads/master
| 2021-11-23T23:18:15.179804
| 2021-11-08T00:23:18
| 2021-11-08T00:23:18
| 198,703,945
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,212
|
rd
|
get_species.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/species.R
\name{get_species}
\alias{get_species}
\title{Get Ensembl species}
\usage{
get_species(
division = get_divisions(),
verbose = FALSE,
warnings = TRUE,
progress_bar = TRUE
)
}
\arguments{
\item{division}{Ensembl division, e.g., \code{"EnsemblVertebrates"} or
\code{"EnsemblBacteria"}, or a combination of several divisions. Check
function \code{\link[ensemblr]{get_divisions}} to get available Ensembl
divisions.}
\item{verbose}{Whether to be verbose about the http requests and respective
responses' status.}
\item{warnings}{Whether to show warnings.}
\item{progress_bar}{Whether to show a progress bar.}
}
\value{
A \code{\link[tibble]{tibble}} of 12 variables:
\describe{
\item{division}{Ensembl division: \code{"EnsemblVertebrates"},
\code{"EnsemblMetazoa"}, \code{"EnsemblPlants"}, \code{"EnsemblProtists"},
\code{"EnsemblFungi"} or \code{"EnsemblBacteria"}.}
\item{taxon_id}{NCBI taxon identifier.}
\item{species_name}{Ensembl species name: this is the name used internally
by Ensembl to uniquely identify a species by name. It is the scientific
name but formatted without capitalisation and spacing converted with an
underscore, e.g., \code{'homo_sapiens'}.}
\item{species_display_name}{Species display name: the name used for display
on Ensembl website.}
\item{species_common_name}{Species common name.}
\item{release}{Ensembl release version.}
\item{genome_assembly_name}{Code name of the genome assembly.}
\item{genbank_assembly_accession}{Genbank assembly accession identifier.}
\item{strain}{Species strain.}
\item{strain_collection}{Species strain collection.}
\item{species_aliases}{Other names or acronyms used to refer to the
species. Note that this column is of the list type.}
\item{groups}{Ensembl databases for which data exists for this species.
Note that this column is of the list type.}
}
}
\description{
This function retrieves species-level information. The data is returned as a
\code{\link[tibble]{tibble}} where each row is a species and the columns are
metadata about each species. See below under section Value for details about
each column.
}
|
4d1f41a70a7fde0b8498e8aa70790c4386868554
|
32d6491e5f646c7110f63ced853c4e125e359af9
|
/data scrape.R
|
d4838944ea121c98c372a4f900cc0c7d70532fa2
|
[] |
no_license
|
NoStaples/Twitter-Sraping
|
59bc5740c07e90d126bdafe058e643103726e6a4
|
9c826da517fe4503ade6c4caf7f51aa783acc43f
|
refs/heads/master
| 2020-03-30T06:35:22.454945
| 2018-09-29T14:39:48
| 2018-09-29T14:39:48
| 150,872,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,548
|
r
|
data scrape.R
|
library(rvest)
library(tidytext)
library(dplyr)
library(RSelenium)
rd <- rsDriver()
remDr <- rd[["client"]]
remDr$navigate("https://twitter.com/PattyMurray")
#scraping_twitter <- read_html("https://twitter.com/realDonaldTrump")
#scroll down 5 times, waiting for the page to load at each time
for(i in 1:15){
remDr$executeScript(paste("scroll(0,",i*10000,");"))
Sys.sleep(3)
}
#get the page html
page_source<-remDr$getPageSource()
scraping_twitter <- read_html(page_source[[1]])
# Scrape all of the tweet content
tweets <- scraping_twitter %>% html_nodes(".tweet-text") %>% html_text()
tweets_df <- data_frame(lines = 1:length(tweets), text = tweets )
# Scrape all of the headers so we can limit the tweets to just a specific individual
headers <- scraping_twitter %>% html_nodes(".stream-item-header") %>% html_text()
headers_df <- data_frame(lines = 1:length(headers), text = headers)
# Combine it all together
content <- headers_df %>% inner_join(tweets_df, by = "lines")
colnames(content) <- c("lines", "header", "text")
# Subset data to filter out retweets
originals <- content %>% filter(stringr::str_detect(header, "@PattyMurray"))
# At this point, there are still pictures and links to videos (including titles) that will need to be filtered out
# Start the actual text analysis stuff
tokens <- originals[,-2] %>% unnest_tokens(word, text)
# Lets remove some stop words
data("stop_words")
tokens <- tokens %>% anti_join(stop_words)
# Lets count the most often used words
counts <- tokens %>% count(word, sort = TRUE)
|
f53d4038fff640a4f1436431171fc002b991e353
|
ab219b48b72c851385a30aaed62b37707e25c009
|
/man/plot_curve.Rd
|
1a2a99fa7993640b98170ee5fc15394016d1aac8
|
[] |
no_license
|
morphr/morphr
|
6526e3f93bedea134e11438a0957bf097071d597
|
0cc8f0ebfe82810f2d6d22ed136aaba0def4e8c0
|
refs/heads/master
| 2021-04-26T23:33:11.646234
| 2020-09-26T18:42:28
| 2020-09-26T18:42:28
| 124,013,754
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 397
|
rd
|
plot_curve.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{plot_curve}
\alias{plot_curve}
\title{Plot the curve p with color value}
\usage{
plot_curve(p, colorstr, l = FALSE)
}
\arguments{
\item{p}{coordinates of curve}
\item{colorstr}{color value}
\item{l}{Default value is False, which means returning a plot}
}
\description{
Plot the curve p with color value
}
|
08268939014270bcacf15d6014f09855b1d51eb4
|
c0a85d0ac12178099174f7afbfc09b8eaac54a7d
|
/R/ApiClientStateGet.r
|
30ecdc2a8d2a30052feee10befb887b83f0423bd
|
[] |
no_license
|
voigtstefan/lykke
|
a2b7d4f5517c9e2a729c7bb9ea80208a0bfb00e3
|
d9739ef6871acf6d0770c586ca779576a0bf1341
|
refs/heads/master
| 2021-06-15T11:38:32.620773
| 2017-04-10T11:25:01
| 2017-04-10T11:25:01
| 83,903,639
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 252
|
r
|
ApiClientStateGet.r
|
#' @export
#' @importFrom httr GET
ApiClientStateGet <- function(email) {
base_url <- "https://api.lykkex.com/api/"
get_url <- paste0(base_url, "ClientState", "?", param, "=", email)
res <- GET(get_url)
return(content(res))
}
|
70db918894838b4d32d693e8ec78d985b5b61fcc
|
3735a4dd8ad40ca28472adb79f7b24b882c2aedf
|
/R/dim.R
|
628350e6361af712215596d43b745b432697c09d
|
[] |
no_license
|
kar-agg-gen/DGEobj
|
efbd8aa9f086a8db97cb159a89a00f51beef51ce
|
5382e6de0dbc4289312a58eccbfac657334fe18b
|
refs/heads/master
| 2023-03-01T16:42:28.156818
| 2020-08-17T16:19:52
| 2020-08-17T16:19:52
| 304,116,160
| 0
| 2
| null | 2020-11-20T21:37:50
| 2020-10-14T19:31:31
| null |
UTF-8
|
R
| false
| false
| 687
|
r
|
dim.R
|
### Function dim ###
#' Function dim
#'
#' Reports the dimensions of the assay slot (row = genes; col = samples).
#'
#' @author John Thompson, \email{john.thompson@@bms.com}
#' @keywords RNA-Seq, DGEobj
#'
#' @param dgeObj A class dgeObj created by function initDGEobj
#'
#' @return An integer vector [r,c] with a length of 2.
#'
#' @examples
#' dim(MyDgeObj)
#'
#' @export
dim.DGEobj <- function(dgeObj){
#check the first assay for dimensions
dimension <- c(0,0)
idx <- attr(dgeObj, "basetype") == "assay"
myassays <- unclass(dgeObj)[idx]
if (length(myassays) > 0)
dimension <- dim(myassays[[1]])
return(dimension)
}
|
10782d63c11485004544a47febae687850a4e044
|
532038c73c749dcccf9281a9b2800b2949ae040b
|
/R/GNARXdesign.R
|
84a5841519596bb41944bc46af0f06d57314da36
|
[] |
no_license
|
cran/GNAR
|
5c83dd0e0515a7694dc2e72ffaafe34787b84ca9
|
abfb7ee822930710f64fc9e2be3b06b1eec403dc
|
refs/heads/master
| 2023-05-13T16:52:47.056363
| 2023-04-27T19:10:05
| 2023-04-27T19:10:05
| 150,179,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,571
|
r
|
GNARXdesign.R
|
#) CHANGES START HERE
GNARXdesign <- function (vts = GNAR::fiveVTS, net = GNAR::fiveNet, alphaOrder = 2,
betaOrder = c(1,1), fact.var = NULL, globalalpha=TRUE,
tvnets=NULL, netsstart=NULL, lambdaOrder=NULL, xvts=NULL)
#) CHANGES END HERE
#) lambdaOrder is a length H vector containing the number of lags of the
#) H exogenous regressors included in the GNARX equation.
#) xvts is a length H list, where each element of the list is of same dimension
#) as vts. If lambdaOrder remains null while xvts is not, xvts is ignored.
{
#) CHANGES START HERE
if(!is.null(lambdaOrder)){
stopifnot(is.list(xvts))
H <- length(lambdaOrder)
stopifnot(H == length(xvts))
stopifnot(floor(lambdaOrder) == lambdaOrder)
stopifnot(min(lambdaOrder) >= 0)
for(h in 1:H){
stopifnot(all(dim(xvts[[h]]) == dim(vts)))
}
}
#) CHANGES END HERE
#) This ensures that each exogenous regressor is contained in a list,
#) has a correponding lag order and
#) all of the exogenous regressor data matrices have the same dimension as vts.
#) Also ensure lag orders are nonnegative integers.
stopifnot(is.GNARnet(net))
stopifnot(ncol(vts) == length(net$edges))
stopifnot(alphaOrder > 0)
stopifnot(floor(alphaOrder) == alphaOrder)
stopifnot(length(betaOrder) == alphaOrder)
stopifnot(floor(betaOrder) == betaOrder)
if(!is.null(fact.var)){
stopifnot(length(fact.var) == length(net$edges))
if(!globalalpha){
stop("Use factors OR individual alphas")
}
# if(sum(fact.var %in% c(0,1))!=length(fact.var)){
# cat("More than two (0/1) factor variables not yet supported")
# }
# stopifnot(sum(fact.var %in% c(0,1))==length(fact.var))
}
stopifnot(is.matrix(vts))
# if(!globalalpha){
# cat("Individual alphas not yet supported")
# }
# stopifnot(globalalpha)
stopifnot(is.logical(globalalpha))
if(!is.null(tvnets)){
cat("Time-varying networks not yet supported")
}
stopifnot(is.null(tvnets))
#cat("Note: input net should contain distances (not weights)")
#flip network so that NofNeighbours gives into node information
netmat <- as.matrix(net, normalise=FALSE)
if(!isSymmetric(netmat)){
net <- as.GNARnet(t(netmat))
}
parNames <- parLoc <- NULL
for (jj in 1:alphaOrder) {
if(globalalpha){
parNames <- c(parNames, paste("alpha", jj, sep = ""))
parLoc <- c(parLoc, "a")
}else{
for(kk in 1:ncol(vts)){
parNames <- c(parNames, paste("alpha", jj, "node", kk, sep=""))
parLoc <- c(parLoc, "a")
}
}
if (betaOrder[jj] > 0) {
for (kk in 1:betaOrder[jj]) {
parNames <- c(parNames, paste("beta", jj, ".",
kk, sep = ""))
parLoc <- c(parLoc, "b")
}
}
}
maxOrder <- alphaOrder
#) CHANGES START HERE
if(!is.null(lambdaOrder)){
for(jj in 1:H){
for(ii in 0:lambdaOrder[jj]){
parNames <- c(parNames, paste("lambda", jj,".", ii, sep = ""))
parLoc <- c(parLoc, paste("l", jj, sep = ""))
}
}
maxOrder <- max(maxOrder, max(lambdaOrder))
}
#) CHANGES END HERE
#) Added a loop to assign names to the lambda coefficients of the exogenous
#) regressors. Also changed the maxOrder just in case the lambda order
#) exceeds the alpha order.
predt <- nrow(vts) - maxOrder
nnodes <- ncol(vts)
#) CHANGES START HERE
dmat <- matrix(0, nrow = predt * nnodes, ncol = length(parLoc),
dimnames = list(NULL, parNames))
#) CHANGES END HERE
#) Changed the ncol argument to allow for exogenous regressor data.
for (ii in 1:nnodes) {
for (aa in 1:alphaOrder) {
if(globalalpha){
alphaLoc <- which(parLoc == "a")[aa]
}else{
alphaLoc <- which(parLoc=="a")[nnodes*(aa-1)+ii]
}
dmat[((predt * (ii - 1) + 1):(predt * ii)), alphaLoc] <- vts[((maxOrder +
1 - aa):(predt + (maxOrder - aa))), ii]
}
}
if (sum(betaOrder) > 0) {
betaN <- NULL
betaTimes <- rep(1:alphaOrder, betaOrder)
for (jj in 1:alphaOrder) {
#betaTimes <- c(betaTimes, rep(jj, betaOrder[jj]))
if (betaOrder[jj] > 0) {
betaN <- c(betaN, 1:betaOrder[jj])
}
}
for (ii in 1:nnodes) {
NofNei <- NofNeighbours(node = ii, stage = max(betaOrder),
net = net)
Nei <- NofNei$edges
Wei <- NofNei$dist
if ((!is.null(Nei)) & (length(Nei) > 0)) {
if (!is.null(Nei[[1]])&!is.na(Nei[[1]][1])) {
Wei <- lapply(Wei, function(x){1/(x*sum(1/x))})
# for (jj in 1:length(Wei)) {
# inv.dist <- 1/Wei[[jj]]
# Wei[[jj]] <- inv.dist/sum(inv.dist)
# }
for (bb in 1:sum(betaOrder)) {
betaLoc <- which(parLoc == "b")[bb]
if (length(Nei[[betaN[bb]]]) > 1) {
# print(paste("node", ii, "betaN[bb]", betaN[bb]))
# print("In length(Nei[[betaN[bb]]]) > 1")
vts.cut <- vts[((maxOrder + 1 - betaTimes[bb]):(predt +
(maxOrder - betaTimes[bb]))), Nei[[betaN[bb]]]]
for (kk in 1:nrow(vts.cut)) {
if (any(is.na(vts.cut[kk, ]))) {
if (all(is.na(vts.cut[kk, ]))) {
#if there are no neighbours left at any time point, set to zero
vts.cut[kk, ] <- 0
}
else {
new.wei <- Wei[[betaN[bb]]][which(!is.na(vts.cut[kk,
]))]
new.wei <- new.wei/sum(new.wei)
sub.val <- vts.cut[kk, which(!is.na(vts.cut[kk,
]))] %*% new.wei
vts.cut[kk, which(is.na(vts.cut[kk,
]))] <- sub.val
}
}
}
dmat[((predt * (ii - 1) + 1):(predt *
ii)), betaLoc] <- vts.cut %*% Wei[[betaN[bb]]]
}
else {
# print(paste("node", ii, "betaN[bb]", betaN[bb]))
# print("In length(Nei[[betaN[bb]]]) > 1 else")
if ((length(Nei[[betaN[bb]]]) == 1) &
(!is.na(Nei[[betaN[bb]]]))) {
# print("In (length(Nei[[betaN[bb]]]) == 1) & (!is.na(Nei[[betaN[bb]]]))")
vts.cut <- vts[((maxOrder +
1 - betaTimes[bb]):(predt + (maxOrder -
betaTimes[bb]))), Nei[[betaN[bb]]]]
#and if this is missing at any time point, set to zero
vts.cut[is.na(vts.cut)] <- 0
dmat[((predt * (ii - 1) + 1):(predt *
ii)), betaLoc] <- vts.cut * Wei[[betaN[bb]]]
}
else {
dmat[((predt * (ii - 1) + 1):(predt *
ii)), betaLoc] <- 0
}
}
}
}
else {
for (bb in 1:sum(betaOrder)) {
betaLoc <- which(parLoc == "b")[bb]
dmat[((predt * (ii - 1) + 1):(predt * ii)),
betaLoc] <- 0
}
}
}
else {
for (bb in 1:sum(betaOrder)) {
betaLoc <- which(parLoc == "b")[bb]
dmat[((predt * (ii - 1) + 1):(predt * ii)),
betaLoc] <- 0
}
}
}
}
#) CHANGES START HERE
if(!is.null(lambdaOrder)){
assignCol <- ncol(dmat) - sum(lambdaOrder) - H + 1
for(ii in 1:H){
for(jj in 0:lambdaOrder[ii]){
if(is.null(dim(xvts[[ii]]))){
#) This if statement ensures that, if xvts[[ii]] is only a vector rather
#) than a matrix, we don't use the vec() function, which throws up an
#) error when applied to a vector.
dmat[, assignCol] <- matrix(xvts[[ii]][(maxOrder + 1 - jj):(nrow(xvts[[ii]]) - jj), ])
}else{
dmat[, assignCol] <- vec(xvts[[ii]][(maxOrder + 1 - jj):(nrow(xvts[[ii]]) - jj), ])
}
assignCol <- assignCol + 1
}
}
}
#) CHANGES END HERE
#) This block of code incorporates the exogenous regressor data into the
#) design matrix.
if (is.null(fact.var)) {
return(dmat)
}else {
#allow more than just two factors
facun <- unique(fact.var)
if(length(facun)==1){
return(dmat)
}else{
dmcol <- ncol(dmat)
dmatex <- dmat
exnames <- paste(colnames(dmat), " '",facun[1],"'", sep="")
for(ii in 2:length(facun)){ #duplicate matrix columns
dmatex <- cbind(dmatex, dmat)
#change names to reflect factors
exnames <- c(exnames, paste(colnames(dmat), " '",facun[ii], "'", sep=""))
}
#for each unique factor, set other entries to 0
for(ii in 1:length(facun)){
dmatex[fact.var != facun[ii], ((ii-1)* dmcol + (1:dmcol))] <- 0
}
colnames(dmatex) <- exnames
return(dmatex)
}
}
}
|
25763fea582be4a52685f503b167d9b077b8eeb6
|
4cabda4635edd3226a371403d608b6622fb7fd71
|
/R/cake_filling_round.R
|
673fcfa84dafc6cd9358c975d67572813030ff21
|
[] |
no_license
|
randallhelms/cakeR
|
a67fad8e563ec9dc267c6f2aaa79b3e2fc78ae44
|
923a718c6bc8c2a9354c83373ab94587da8df815
|
refs/heads/master
| 2021-06-04T16:58:28.637311
| 2020-03-19T21:58:16
| 2020-03-19T21:58:16
| 145,759,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,638
|
r
|
cake_filling_round.R
|
cake_filling_round <- function(d,h,filling) {
require(stringr)
require(tidyr)
require(dplyr)
require(tibble)
d_range <- 6:12
h_range <- seq(3,4,.25)
price_list <- tibble(ingredient = c('icing_sugar','butter','double_cream','chocolate','rum'),
units = c(250,250,200,2500,700),
price = c(.49,1.75,.65,21.9,13.49),
price_per_unit = price / units
)
filling <- str_to_lower(gsub(' ','_',filling))
possibles <- c('ganache','buttercream','chocolate_buttercream','coconut_rum_buttercream','vanilla_buttercream')
filling <- str_to_lower(ifelse(filling %in% c('chocolate ganache','chocolate'),'ganache',filling))
mult <- 5.5
vol <- pi * (d/2)^2 * h
surf_area <- (2 * pi * (d/2) * h) + (2 * pi * (d/2)^2)
if(filling %in% possibles) {
if (filling=='buttercream') {
buttercream <- round(mult*surf_area,-1)
icing_sugar <- round(buttercream * (2/3),-1)
butter <- icing_sugar/2
butter_calcs <- tibble(ingredient = c('buttercream','icing_sugar','butter'),
amount = c(buttercream,icing_sugar,butter))
butter_prices <- butter_calcs %>%
inner_join(price_list,by='ingredient') %>%
mutate(price = round(price_per_unit * amount,digits=2)) %>%
select(ingredient,price)
butter_sum <- sum(butter_prices$price)
butter_prices <- butter_prices %>%
add_row(ingredient = 'buttercream',price=butter_sum)
butter_calcs <- butter_calcs %>%
inner_join(butter_prices,by='ingredient') %>%
mutate(unit = 'g') %>%
select(ingredient,amount,unit,price)
return(butter_calcs) } else {
ganache <- round(mult * surf_area,-1)
chocolate <- round((ganache / 2) * 1.1,-.5)
double_cream <- chocolate
ganache_calcs <- tibble(ingredient = c('ganache','chocolate','double_cream'),
amount = c(ganache,chocolate,double_cream),
unit = c('g','g','ml')
)
ganache_prices <- ganache_calcs %>%
inner_join(price_list,by='ingredient') %>%
mutate(price = round(price_per_unit * amount,digits=2)) %>%
select(ingredient,price,unit)
ganache_sum <- sum(ganache_prices$price)
ganache_prices <- ganache_prices %>%
add_row(ingredient = 'ganache',price=ganache_sum) %>%
select(ingredient,price)
ganache_calcs <- ganache_calcs %>%
inner_join(ganache_prices,by='ingredient')
return(ganache_calcs)
}
} else {
stop('Error: Bad input! Please try again')
}
}
|
07fd5e20c4219974424afb6cbd8743e93c333e05
|
970b53258a6b4b54e539ed34c792adde373495d5
|
/src/ExploratoryDataPlot.R
|
faa4357880a641f9e614a14016292d275489f19e
|
[] |
no_license
|
sfmb-mx/ExData_Plotting1
|
7f9338b298a70c3548243d05243a7412f6d3a2d0
|
f7127855ccc9cfbc10ff124bec8bad496c099d9d
|
refs/heads/master
| 2022-09-17T03:55:26.868635
| 2014-08-10T04:28:11
| 2014-08-10T04:28:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,849
|
r
|
ExploratoryDataPlot.R
|
### ExploratoryDataPlot.R ---
##
## Filename: ExploratoryDataPlot.R
## Description:
## Author: Sergio-Feliciano Mendoza-Barrera
## Maintainer:
## Created: Sat Aug 9 09:14:48 2014 (-0500)
## Version:
## Package-Requires: ()
## Last-Updated: Sat Aug 9 21:57:18 2014 (-0500)
## By: Sergio-Feliciano Mendoza-Barrera
## Update #: 142
## URL:
## Doc URL:
## Keywords:
## Compatibility:
##
######################################################################
##
### Commentary:
##
## Exploratory Data Analysis
## Project 1
##
######################################################################
##
### Change Log:
##
##
######################################################################
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or (at
## your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>.
##
######################################################################
##
### Code:
rm(list = ls()) # Remove all workspace data
library(parallel)
library(data.table)
library(lubridate)
######################################################################
## Download the required files
## source("dwDataFile.R") # Download the dataset
## fileName <- "household_power_consumption.zip"
## source <-
## "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## dwDataFile(fileName, source) # Download the dataset
######################################################################
## Read dataset
dataFN <- "./data/household_power_consumption.txt"
ColNames <- c("Date", "Time", "Global_active_power" ,
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## Because data and time of the first item is "12/16/2006 17:24:00",
## so I count the number of items before "01/02/2007 00:00:00" that is
## skipRows = (47 * 60 * 24) - (17 * 60) - 23
## And there have 2 * 24 * 60 items in two days
## myNrows <- 2 * 24 * 60
EPConsumption <- fread(dataFN, sep = ";", header = FALSE, na.strings =
"?", colClasses = "character",
skip = 66637, nrows = 2880)
setnames(EPConsumption, ColNames)
## head(EPConsumption, n = 10)
## tail(EPConsumption, n = 10)
######################################################################
## Converting the data to easy handle
## Pasting date and time
EPConsumption$myDate <- paste(EPConsumption$Date, EPConsumption$Time, sep=" ")
EPConsumption$myDate <- dmy_hms(EPConsumption$myDate)
## head(EPConsumption)
EPConsumption$Global_active_power <-
sapply(EPConsumption$Global_active_power, as.numeric)
EPConsumption$Global_reactive_power <-
sapply(EPConsumption$Global_reactive_power, as.numeric)
EPConsumption$Voltage <-
sapply(EPConsumption$Voltage, as.numeric)
EPConsumption$Global_intensity <-
sapply(EPConsumption$Global_intensity, as.numeric)
EPConsumption$Sub_metering_1 <- sapply(EPConsumption$Sub_metering_1,
as.numeric)
EPConsumption$Sub_metering_2 <- sapply(EPConsumption$Sub_metering_2,
as.numeric)
EPConsumption$Sub_metering_3 <- sapply(EPConsumption$Sub_metering_2,
as.numeric)
## sapply(EPConsumption, mode) # Testing NA values existence
## sapply(EPConsumption, class)
## anyNA(EPConsumption)
## dim(EPConsumption)
## Test!
## I seem to have the right subset, but the data seems to be mismatched.
## I have 2880 rows. I created this by using the following code.
## filename="data/household_power_consumption.txt"
## hpcdata<-read.csv.sql(filename,sep=";",sql='select * from file where Date="2/1/2007" OR Date="2/2/2007"')
## hpcdata$Date<-strptime(paste(hpcdata$Date,hpcdata$Time),format)
## The summary looks like this.
## > summary(hpcdata)
## Date Time Global_active_power Global_reactive_power
## Min. :2007-02-01 00:00:00 Length:2880 Min. :0.220 Min. :0.0000
## 1st Qu.:2007-02-01 11:59:45 Class :character 1st Qu.:0.380 1st Qu.:0.0000
## Median :2007-02-01 23:59:30 Mode :character Median :0.532 Median :0.1100
## Mean :2007-02-01 23:59:30 Mean :1.020 Mean :0.1145
## 3rd Qu.:2007-02-02 11:59:15 3rd Qu.:1.466 3rd Qu.:0.1620
## Max. :2007-02-02 23:59:00 Max. :5.448 Max. :0.8620
## Voltage Global_intensity Sub_metering_1 Sub_metering_2 Sub_metering_3
## Min. :233.9 Min. : 1.000 Min. :0 Min. :0.0000 Min. : 0.000
## 1st Qu.:239.0 1st Qu.: 1.600 1st Qu.:0 1st Qu.:0.0000 1st Qu.: 0.000
## Median :241.4 Median : 2.200 Median :0 Median :0.0000 Median : 0.000
## Mean :241.1 Mean : 4.292 Mean :0 Mean :0.2444 Mean : 6.215
## 3rd Qu.:243.0 3rd Qu.: 6.000 3rd Qu.:0 3rd Qu.:0.0000 3rd Qu.:17.000
## Max. :250.0 Max. :23.000 Max. :0 Max. :2.0000 Max. :19.000
######################################################################
### ExploratoryDataPlot.R ends here
|
57620d59d5323da3cd005c6d9f2e9e1c718acbac
|
7965c2d3c932f5de20e5431571beba251453654d
|
/tests/testthat/test_pmh.R
|
0b9eabef0c9fd2fc49d6fd651b9ccddf4ee44329
|
[
"CC0-1.0"
] |
permissive
|
ZhangAngus/mimicfilters
|
d8e6419846680e7ff13c4c816f79dccc918b5734
|
b6903be8cf89ceb53f15d70919db6874d9a0befe
|
refs/heads/master
| 2020-05-14T19:22:09.134537
| 2016-11-18T21:58:28
| 2016-11-18T21:58:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,011
|
r
|
test_pmh.R
|
context("testing functions for the past medical history extraction function")
temp_str = "The quick brown fox jumps over the lazy dog.\nPast medical history: pt history of smoking related emphysema. Social history: test"
temp_df = data.frame(id = c(1,2,3),
text = c("blah, blah\nPast medical history: pt history of smoking related emphysema. Social history: test",
"blah, blah PRIOR MEDICAL HISTORY: excessive etoh, compromised liver function. blah: test",
"blah, blah history of present illness: obesity, hypertension, HF etc blah: test"))
exclusion = c('etoh', 'smok')
test_that("extract_pmh_string", {
expect_equal(length(extract_pmh_string(temp_str)), 1)
expect_true(nchar(extract_pmh_string(temp_str)) <= nchar(temp_str))
expect_equal(extract_pmh_string(temp_str), "Past medical history: pt history of smoking related emphysema. Social h")
})
test_that("extract_pmhflag_string", {
expect_equal(length(extract_pmhflag_string(temp_str, exclusion)), 1)
expect_true(extract_pmhflag_string(temp_str, exclusion))
expect_true(extract_pmhflag_string(temp_df$text[1], exclusion))
expect_true(extract_pmhflag_string(temp_df$text[2], exclusion))
expect_false(extract_pmhflag_string(temp_df$text[3], exclusion))
})
test_that("extract_pmhflag_string_for_hf", {
expect_equal(length(extract_pmhflag_string_for_hf(temp_str)), 1)
expect_false(extract_pmhflag_string_for_hf(temp_str))
expect_false(extract_pmhflag_string_for_hf(temp_df$text[1]))
expect_false(extract_pmhflag_string_for_hf(temp_df$text[2]))
expect_true(extract_pmhflag_string_for_hf(temp_df$text[3]))
})
test_that("extract_pmhflag_df", {
expect_error(extract_pmhflag_df(temp_df, 'test', exclusion), "The supplied diagnosis_column argument does not match a column name in the supplied data frame.")
expect_equal(dim(extract_pmhflag_df(temp_df, 'text', exclusion)), c(3,3))
expect_equal(extract_pmhflag_df(temp_df, 'text', exclusion)$pmhflag, list(TRUE, TRUE, FALSE))
})
test_that("extract_pmhflag_df_for_hf", {
expect_error(extract_pmhflag_df_for_hf(temp_df, 'test'), "The supplied diagnosis_column argument does not match a column name in the supplied data frame.")
expect_equal(dim(extract_pmhflag_df_for_hf(temp_df, 'text')), c(3,3))
expect_equal(extract_pmhflag_df_for_hf(temp_df, 'text')$pmhflag, list(FALSE, FALSE, TRUE))
})
# temp_str = "blah past medical history: smoking related emphysema. blah: test"
# extract_pastmedicalhistory_string(temp_str)
# extract_pmhflag_string(temp_str, 'smok') # TRUE
# extract_pmhflag_string(temp_str, 'etoh') # FALSE
#
# temp_df = data.frame(id = c(1,2,3),
# text = c("blah\nPast medical history: smoking related emphysema. blah: test",
# "blah PRIOR MEDICAL HISTORY: excessive etoh. blah: test",
# "blah history of present illness: obesity, hypertension, HF etc blah: test"))
#
# extract_pmhflag_df(temp_df, 'text', c('smok', 'etoh'))
# extract_pmhflag_df_for_hf(temp_df, 'text')
|
f09417fc75edfd545ce9194c62204fe1be974786
|
5c8d345990b7c849d2842633f0d98d992ea90b01
|
/man/graph.diffuseP1.Rd
|
2c40b4068afe18f68f068301a8f260b1a22a8f6f
|
[
"MIT"
] |
permissive
|
BRL-BCM/CTD
|
0aaf0fc12efd45b32d397d40e9e62743e7e59bf9
|
67e65f42f329f8c089b7ee35e1621a81f9fd8bad
|
refs/heads/master
| 2023-08-17T16:06:43.246761
| 2023-08-10T06:53:12
| 2023-08-10T06:53:12
| 161,826,954
| 6
| 8
|
NOASSERTION
| 2023-08-10T06:53:13
| 2018-12-14T18:44:37
|
Jupyter Notebook
|
UTF-8
|
R
| false
| true
| 3,059
|
rd
|
graph.diffuseP1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph.diffuseP1.r
\name{graph.diffuseP1}
\alias{graph.diffuseP1}
\title{Diffuse Probability P1 from a starting node}
\usage{
graph.diffuseP1(p1,sn,G,vNodes,thresholdDiff,adj_mat,verbose=FALSE,
out_dir="",r_level=1,coords=NULL)
}
\arguments{
\item{p1}{- The probability being dispersed from the starting node,
sn, which is preferentially distributed between
network nodes by the probability diffusion algorithm
based solely on network connectivity.}
\item{sn}{- "Start node", or the node most recently visited by the
network walker, from which p1 gets dispersed.}
\item{G}{- A list of probabilities, with names of the list being the
node names in the network.}
\item{vNodes}{- "Visited nodes", or the history of previous draws
in the node ranking sequence.}
\item{thresholdDiff}{- When the probability diffusion algorithm exchanges
this amount (thresholdDiff) or less between nodes,
the algorithm returns up the call stack.}
\item{adj_mat}{- The adjacency matrix that encodes the edge weights for
the network, G.}
\item{verbose}{- If debugging or tracking a diffusion event, verbose=TRUE
will activate print statements. Default is FALSE.}
\item{out_dir}{- If specified, a image sequence will generate in the
output directory specified.}
\item{r_level}{- "Recursion level", or the current depth in the call stack
caused by a recursive algorithm. Only relevant if out_dir
is specified.}
\item{coords}{- The x and y coordinates for each node in the network, to
remain static between images. Only relevant if out_dir
is specified.}
}
\value{
G - A list of returned probabilities after the diffusion of
probability has truncated, with names of the list being the node names
in the network.
}
\description{
Recursively diffuse probability from a starting node based on the
connectivity of the network, representing the likelihood that a
variable is most influenced by a perturbation in the starting node.
}
\examples{
# Read in any network via its adjacency matrix
adj_mat=rbind(c(0,1,2,0,0,0,0,0,0), #A's neighbors
c(1,0,3,0,0,0,0,0,0), #B's neighbors
c(2,3,0,0,1,0,0,0,0), #C's neighbors
c(0,0,0,0,0,0,1,1,0), #D's neighbors
c(0,0,1,0,0,1,0,0,0), #E's neighbors
c(0,0,0,0,1,0,0,0,0), #F's neighbors
c(0,0,0,1,0,0,0,1,0), #G's neighbors
c(0,0,0,1,0,0,1,0,0), #H's neighbors
c(0,0,0,0,0,0,0,0,0) #I's neighbors
)
rownames(adj_mat)=c("A","B","C","D","E","F","G","H","I")
colnames(adj_mat)=c("A","B","C","D","E","F","G","H","I")
G=vector(mode="list", length=ncol(adj_mat))
names(G)=colnames(adj_mat)
G=lapply(G, function(i) i[[1]]=0)
probs_afterCurrDraw=graph.diffuseP1(p1=1.0, sn=names(G)[1], G=G,
vNodes=names(G)[1],
thresholdDiff=0.01, adj_mat, TRUE)
}
\keyword{diffusion}
\keyword{network}
\keyword{probability}
\keyword{walker}
|
f7d67e1fa353ed46a7ce1229c5c08b7c1ed2431a
|
eab2d831fb2d60ca810d4a82f00c431f7fd2a74f
|
/R/make_time_windows.R
|
fef5b36fde534262ff8d84d7d447e78a03e2b6d5
|
[] |
no_license
|
fossabot/circadian-dynamics
|
51236723f853713081129502cfb1cb0ffb308530
|
959eebf225045767cee0905792ed87bc43d0b8ca
|
refs/heads/master
| 2022-09-17T02:44:50.481183
| 2020-06-03T16:35:59
| 2020-06-03T16:35:59
| 269,140,681
| 0
| 0
| null | 2020-06-03T16:35:58
| 2020-06-03T16:35:58
| null |
UTF-8
|
R
| false
| false
| 1,666
|
r
|
make_time_windows.R
|
# Takes a time series data.frame and returns iterable time windows of any size
#' Create iterable time windows
#' @description Creates iterable time windows for a data.frame
#' @usage make_time_windows(data = NULL, window_size_in_days = 3, window_step_in_days = 1)
#' @param data a data.frame with 2 columns. Column 1 must be a POSIXct object and column 2 must be the measurement values.
#' @param window_size_in_days a numeric indicating the width of the window size in day units.
#' @param window_step_in_days a numeric indicating the amount of day by which to move the window in day units.
#'
#' @return
#' A data.frame cotaining:
#'
#' window iterator by which to subset the data.frame
#' values raw measurement data
#' @export
#'
#' @examples
#' windowed_data <- function(data = df)
#'
make_time_windows <- function(data = NULL, window_size_in_days = 3, window_step_in_days = 1){
# Set parameters
window_size <- lubridate::days(window_size_in_days) #Width of the window
window_step <- lubridate::days(window_step_in_days) #Days to move the window
# Finding dates where the window does not exceed the last time point in the data
days_in_data <- seq(from = min(data[[1]]) - lubridate::days(1), to = max(data[[1]] + lubridate::days(1)), by = "1 day")
usable_dates <- days_in_data[!(days_in_data + window_step + window_size >= max(data[[1]]))]
# Creating a new data.frame where data is partitioned by window
windowed_data <- purrr::map_df(usable_dates,
~ dplyr::filter(data, datetime >= . + window_step & datetime <= . + window_step + window_size),
.id = "window")
return(windowed_data)
}
|
afa9f4188bf41a7919c65095850587db6dee0896
|
3a0190e0fd8786e7975de831d4dd1c24619aea25
|
/man/abilene.Rd
|
702e7a74ca910316a343c6119b5e96b172e3670b
|
[] |
no_license
|
sguo28/networkTomography
|
9c0e5315be379bd2e6078cd48fd1ec15ea115b67
|
4ad68e48b029beb17b617847f5dd7ea85ec7d06f
|
refs/heads/master
| 2021-05-27T03:51:29.681158
| 2014-01-10T03:47:10
| 2014-01-10T03:47:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,915
|
rd
|
abilene.Rd
|
\docType{data}
\name{abilene}
\alias{abilene}
\title{Abilene data from Fang et al. (2007)}
\usage{
abilene
}
\description{
Data from the 12 node Abilene network from Fang et al.
(2007). Both the OD flows and the topology correspond to
the actual network. This is the X1 dataset from the given
paper.
}
\section{Objects}{
The list abilene, which contains several objects:
\itemize{ \item \code{A}, the routing matrix for this
network (truncated for full row rank) \item \code{X}, a
matrix of origin-destination flows formatted for analysis
\item \code{Y}, a matrix of link loads formatted for
analysis \item \code{A.full}, the routing matrix for this
network without truncatation for full row rank) \item
\code{Y.full}, a matrix of link loads corresponding to
code{A.full} } In this data, we have \code{A \%*\% t(X)
== t(Y)} and \code{A.full \%*\% t(X) == t(Y.full)}
}
\section{Variables}{
The list abilene contains the following: \itemize{ \item
The routing matrix \code{A}. The columns of this matrix
correspond to individual OD flows (the columns of X), and
its rows correspond to individual link loads (the columns
of Y). \item The OD matrix X. Columns correspond to
individual OD flows, and the rows correspond to
observations. \item The link load matrix Y. Columns of
the Y matrix correspond to individual link loads, and the
rows correspond to observations. \item The routing
matrix \code{A.full}. This is the complete routing matrix
before reduction for full row-rank. \item The link load
matrix Y.full, corresponding to A.full. }
}
\references{
J. Fang, Y. Vardi, and C.-H. Zhang. An iterative
tomogravity algorithm for the estimation of network
traffic. In R. Liu, W. Strawderman, and C.-H. Zhang,
editors, Complex Datasets and Inverse Problems: Tomography,
Networks and Beyond, volume 54 of Lecture Notes-Monograph
Series. IMS, 2007.
}
\keyword{datasets}
|
e9e33b1af0c1809cf0d8c7d945227a6681563d31
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/encode/examples/encode.Rd.R
|
9f89a2587a6321a735544af5ff92661dbc057675
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 933
|
r
|
encode.Rd.R
|
library(encode)
### Name: encode
### Title: Encode Factor-like Levels and Labels as a Simple String
### Aliases: encode
### ** Examples
a <- encode(
x = list(
c('M','F'),
c(1:4)
),
labels = list(
c('male','female'),
c('caucasian','asian','african',NA)
)
)
b <- encode(c(1:2),c('pediatric','adult'))
a
b
c <- c('a',NA,'##b##')
encoded(a)
encoded(b)
encoded(c)
encoded(' //4// ')
codes(a)
codes(b)
codes(b,simplify=FALSE)
codes(c)
codes('..1..')
decodes(a)
decodes(b)
decodes(c)
decode(1:4,'//1/a//2/b//3/c//')
decode(1:4,'//1/a//1/b//3/c//') # duplicate code: ignored
decode(1:4,'//1/a//2/a//3/c//') # duplicate decode: collapsed
# encode(c(1,1,2,3),c('a','b','c','d')) Warning: duplicate codes
## Don't show:
stopifnot(encoded('////'))
stopifnot(encoded('//a///'))
stopifnot(!encoded('///a//'))
stopifnot(!encoded('//a/a//b/b///'))
stopifnot(identical(decode(1:4),factor(1:4)))
## End(Don't show)
|
d0767a1dd3373bed7a67d63824a7d92a6589166a
|
06aca8bcd3aa514e69740b022333fe1502947e82
|
/syntax/00_start.R
|
192b5c81a99c3229ccebf2f208a4d9cddf4cc2da
|
[] |
no_license
|
m-sudmann-day/Kaggle-online-news-popularity
|
c673f0e337fb37aac08c1d42050fd6ae62afdd55
|
f81110ab56fc2aaabba734e5a930dc12c35e9fd2
|
refs/heads/master
| 2016-09-13T18:18:31.986435
| 2016-04-19T22:00:06
| 2016-04-19T22:00:06
| 56,633,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,019
|
r
|
00_start.R
|
################################################################################
# Barcelona Graduate School of Economics
# Master's Degree in Data Science
################################################################################
# Course : Advanced Computational Methods
# Project : Kaggle Competition
# Script : 00_start.R
################################################################################
# Author : Miquel Torrens, 2016.01.24
# Modified : Miquel Torrens, 2016.02.11
################################################################################
# source('/Users/miquel/Desktop/bgse/projects/kaggle/syntax/00_start.R')
################################################################################
################################################################################
# Read command line arguments
args <- commandArgs(trailingOnly = TRUE)
# Root path
if (is.na(args[1]) || .Platform['OS.type'] != 'unix') {
PATH <- '/Users/miquel/Desktop/bgse/projects/kaggle/' # Miquel
if (.Platform['OS.type'] == 'windows') {
PATH <- 'C:/OneDrive/BGSE/GitHub/kaggle/' # Matthew
}
#PATH <- '/home/ubuntu/project/'
} else {
PATH <- paste(system(toString(args[1]), intern = TRUE), '/', sep = '')
}
# Project Name
PROJECT <- 'DS16T2.ACM_KAGGLE_COMP'
################################################################################
################################################################################
# Define relative Paths
DOCDIR <- paste(PATH, 'doc/', sep = '')
DATADIR <- paste(PATH, 'data/', sep = '')
TEMPDIR <- paste(PATH, 'temp/', sep = '')
INPUTDIR <- paste(PATH, 'input/', sep = '')
OUTPUTDIR <- paste(PATH, 'output/', sep = '')
SYNTAXDIR <- paste(PATH, 'syntax/', sep = '')
SCRIPTSDIR <- paste(SYNTAXDIR, 'scripts/', sep = '')
# Create folders
try(dir.create(PATH, showWarnings = FALSE))
try(dir.create(DOCDIR, showWarnings = FALSE))
try(dir.create(DATADIR, showWarnings = FALSE))
try(dir.create(TEMPDIR, showWarnings = FALSE))
try(dir.create(INPUTDIR, showWarnings = FALSE))
try(dir.create(OUTPUTDIR, showWarnings = FALSE))
try(dir.create(SYNTAXDIR, showWarnings = FALSE))
try(dir.create(SCRIPTSDIR, showWarnings = FALSE))
# Project Index
source(paste(SYNTAXDIR, '00_index.R', sep = ''))
################################################################################
################################################################################
# Settings
# Check R version
check.version(dev.R = '3.2.2 x86_64')
# Print starting time
bs <- begin.script(script = paste('[', PROJECT, '] 00_start.R', sep = ''))
# Packages needed
load.packages(pkgs = c('class', 'randomForest', 'MASS', 'nnet', 'ggplot2',
'foreach', 'doMC'))
# Stone parameters
today <- format(Sys.time(), '%Y%m%d')
# # CRS
# CRS_GOOGLE <- CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# Record
end.script(begin = bs, end = Sys.time()); rm(bs)
################################################################################
# END OF SCRIPT
|
f3ac58bc652c8e4698898ff010432608d47b6cb9
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2019/risk_factors_code/air/paf/save_results_parent.R
|
967beb1d90f7f0369d34e326c5f83402b5ec7b5c
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,863
|
r
|
save_results_parent.R
|
#----HEADER----------------------------------------------------------------------------------------------------------------------
# Author: NAME
# Date: 4/21/2018
# Purpose: Launches save results for air_pm, air_hap and air, new proportional pafs method
# source("FILEPATH.R", echo=T)
# qsub -N save_air -pe multi_slot 1 -P ADDRESS -o FILEPATH -e FILEPATH FILEPATH.sh FILEPATH.R
#*********************************************************************************************************************************
#----CONFIG----------------------------------------------------------------------------------------------------------------------
# clear memory
rm(list=ls())
user <- 'USERNAME'
# runtime configuration
if (Sys.info()["sysname"] == "Linux") {
j_root <- "ADDRESS"
h_root <- "ADDRESS"
central_lib <- "ADDRESS"
} else {
j_root <- "ADDRESS"
h_root <- "ADDRESS"
central_lib <- "ADDRESS"
}
pacman::p_load(data.table, magrittr)
project <- "-P ADDRESS"
sge.output.dir <- paste0(" -o ADDRESS", user, "ADDRESS -e ADDRESS", user, "ADDRESS")
#sge.output.dir <- "" # toggle to run with no output files
save.script <- "-s FILEPATH.R"
r.shell <- "FILEPATH.sh"
paf.version <- 51
decomp <- "step4"
#create datatable of unique jobs
risks <- data.table(risk=c("air_pmhap","air_pm","air_hap"), me_id=c(20260,8746,8747), rei_id=c(380,86,87))
save <- function(i){
args <- paste(risks[i,risk],
risks[i,me_id],
risks[i,rei_id],
paf.version,
decomp)
mem <- "-l m_mem_free=75G"
fthread <- "-l fthread=10"
runtime <- "-l h_rt=06:00:00"
archive <- "-l archive=TRUE"
jname <- paste0("-N ","save_results_",risks[i,risk])
system(paste("qsub",jname,mem,fthread,runtime,archive,project,"-q long.q",sge.output.dir,r.shell,save.script,args))
}
complete <- lapply(1:nrow(risks),save)
|
192491c197e3f8ba83a84cdc634be9a6e529de4e
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/man/cloudfront_get_continuous_deployment_policy_config.Rd
|
cc0e6499c7e9c08e913bd7f9149ac4bbf2fcd312
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 707
|
rd
|
cloudfront_get_continuous_deployment_policy_config.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudfront_operations.R
\name{cloudfront_get_continuous_deployment_policy_config}
\alias{cloudfront_get_continuous_deployment_policy_config}
\title{Gets configuration information about a continuous deployment policy}
\usage{
cloudfront_get_continuous_deployment_policy_config(Id)
}
\arguments{
\item{Id}{[required] The identifier of the continuous deployment policy whose configuration
you are getting.}
}
\description{
Gets configuration information about a continuous deployment policy.
See \url{https://www.paws-r-sdk.com/docs/cloudfront_get_continuous_deployment_policy_config/} for full documentation.
}
\keyword{internal}
|
3c4ab216c425711829325b99008b475e7c78c46c
|
8d34ff6dee9fcf523ca3202b971e1b39d07fc749
|
/man/Tosls.formula.Rd
|
0ad29cbf923f23bb6cb7d1cad1ff0245e357ba81
|
[] |
no_license
|
cran/tosls
|
224bca64ae9d83a7db2e9e4ffab5197868796b11
|
189ebad66e70baa8258e2cb8920a422abc7554b5
|
refs/heads/master
| 2020-06-05T10:50:31.564993
| 2014-03-31T00:00:00
| 2014-03-31T00:00:00
| 18,368,877
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 270
|
rd
|
Tosls.formula.Rd
|
\name{Tosls.formula}
\alias{Tosls.formula}
\title{formula}
\usage{
\method{Tosls}{formula}(formula, data = list(), ...)
}
\arguments{
\item{formula}{PIB~INF+TIR|Cap+m2r}
\item{data}{the dataframe}
\item{...}{not used}
}
\description{
formula
}
|
31764ca3aab04c801d87b7d0dd48b23165fc9882
|
975bbef69335a0f5a05c69adee0a61141311c669
|
/bin/generateScreenBatchJobs.R
|
de513ad6acf3003cb9ab2ce04ab2baa6857a5868
|
[] |
no_license
|
mzager/crispr-screen-nf
|
49386dc7aa15cbf6c706e40852d56476d0bfc12c
|
d02789306d7e3d6bbfc375b526505b37874e44e7
|
refs/heads/main
| 2023-07-06T18:03:33.149593
| 2021-08-11T21:15:11
| 2021-08-11T21:15:11
| 394,996,191
| 0
| 0
| null | 2021-08-11T13:20:29
| 2021-08-11T13:20:29
| null |
UTF-8
|
R
| false
| false
| 3,361
|
r
|
generateScreenBatchJobs.R
|
options(stringsAsFactors=F)
sampleSheet<-read.csv("/fh/fast/_SR/Genomics/ngs/illumina/solexa/SampleSheets/190812_D00300_0802_BH3FV2BCX3_lcarter.csv")
names(sampleSheet)[3] <- "Sample"
#TO DO: add some sanity checks
#fastx toolkit requires that the sample names be alphanumeric
sampleSheet$Sample<-gsub("-","_",sampleSheet$Sample)
sampleSheet$Sample<-gsub(" ","_",sampleSheet$Sample)
#moabAcct<-"paddison_p"
sampleSheet <- data.frame(Sample = sampleSheet$Sample)
sampleSheet$User <- "lcarter"
sampleSheet$FlowCell <- "190812_D00300_0802_BH3FV2BCX3"
sampleSheet <- unique(sampleSheet); nrow(sampleSheet) #31
#associate each sample with the correct reference
sampleSheet$Reference <- "Kinetochore"
sampleSheet$Reference[grep("CPD", sampleSheet$Sample)] <- "CPD"
sampleSheet$Reference[grep("Brunello", sampleSheet$Sample)] <- "Brunello"
sampleSheet$Reference[grep("Spliceosome", sampleSheet$Sample)] <- "Spliceosome"
for(i in 1:nrow(sampleSheet)){
cat("#!/bin/bash", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n")
cat("#SBATCH -N1 -n1 -t 0-4 -p campus --mail-type=END --mail-user=pchanana@fhcrc.org ", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat("PATH=/home/solexa/apps/fastx_toolkit_0.0.13:/app/cutadapt/1.1/bin:/home/solexa/apps/bowtie/bowtie-1.0.0:$PATH", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
#demultiplex
cat("runDir=/fh/fast/_SR/Genomics/user/pchanana/2019.08.15.lcarter/align", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat("fastqDir=$runDir/fastq", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat("cd $fastqDir", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat(paste("sampleName=",sampleSheet$Sample[i],sep=""), file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
#align
if(sampleSheet$Reference[i] == "Brunello"){
cat("bowtieGenome=/shared/solexa/solexa/Genomes/genomes/sgRNA/Brunello/Brunello.fa", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
}
if(sampleSheet$Reference[i] == "CPD"){
cat("bowtieGenome=/shared/solexa/solexa/Genomes/genomes/sgRNA/CPD/CPD.fa", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
}
if(sampleSheet$Reference[i] == "Kinetochore"){
cat("bowtieGenome=/shared/solexa/solexa/Genomes/genomes/sgRNA/Kinetochore/Kinetochore.fa", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
}
if(sampleSheet$Reference[i] == "Spliceosome"){
cat("bowtieGenome=/shared/solexa/solexa/Genomes/genomes/sgRNA/Spliceosome/Spliceosome.fa", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
}
cat("bowtieDir=$runDir/bowtie", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat("mkdir -p $bowtieDir", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat(paste("zcat $sampleName.fastq.gz | bowtie -p 1 --trim5 30 --trim3 0 -n 0 $bowtieGenome - $bowtieDir/$sampleName.bt",sep=""), file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
cat("exit 0", file=paste(sampleSheet$Sample[i],".sbatch",sep=""),sep="\n", append=TRUE)
}
#system("mv *.sbatch ~/Desktop/ngs/ngs/illumina/lcarter/190812_D00300_0802_BH3FV2BCX3/qsub_files")
|
8ae0bff341c354d461eb2b69536e847cb4e7e142
|
f58d73bb5d624a78c329e79a60d5fb06b4c36837
|
/inst/NRM/global.R
|
3270fe2b6e3272eeac24687d849a09e426cb645c
|
[] |
no_license
|
cran/irtDemo
|
be108fc0c36aa0328f1ed23b5d2c153ed3c0b701
|
3b36e362d74563f404374c8333f11cda023abc70
|
refs/heads/master
| 2020-04-06T07:01:12.326558
| 2018-04-05T19:29:46
| 2018-04-05T19:29:46
| 57,357,089
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
global.R
|
thetas <- seq(from=-6, to=6, by=0.1)
deltas <- seq(from=-6, to=6, by=0.1)
alphas <- seq(from=-6, to=6, by=0.1)
N = length(thetas)
# define probability function
p2num <- matrix(NA,nrow=N,ncol=1)
p3num <- matrix(NA,nrow=N,ncol=1)
p4num <- matrix(NA,nrow=N,ncol=1)
p5num <- matrix(NA,nrow=N,ncol=1)
p2fun <- function(alpha1, theta, delta1, D){
z1 <- D*alpha1*(theta-delta1)
p2num <- exp(z1)
return(p2num)
}
p3fun <- function(alpha2, theta, delta2, D){
z2 <- D*alpha2*(theta-delta2)
p3num <- exp(z2)
return(p3num)
}
p4fun <- function(alpha3, theta, delta3, D){
z3 <- D*alpha3*(theta-delta3)
p4num <- exp(z3)
return(p4num)
}
p5fun <- function(alpha4, theta, delta4, D){
z4 <- D*alpha4*(theta-delta4)
p5num <- exp(z4)
return(p5num)
}
|
cb27b78d71e673e12890c33c6721ade4e9d2371d
|
af239a6dd830bf1c2d0096796384bcea303a113b
|
/PSet5/multicontrast.R
|
0fa1b0bfe9fc01915d160f22c3616b0b2fcbaa8c
|
[] |
no_license
|
davidlieberman/SDS363
|
701611bf972b992f63667d56122d0a0574edda67
|
0724a9de3456e8f8c1648f2e035506ea4423e78b
|
refs/heads/master
| 2021-07-06T11:53:04.404758
| 2020-09-12T00:40:48
| 2020-09-12T00:40:48
| 169,842,894
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
multicontrast.R
|
multicontrast <- function(contrast, data, grouping){
# Groups
groups <- as.vector(as.matrix(unique(grouping)))
# Multivariate Means for Each Variable in Each Group
M <- apply(data, 2, function(y) tapply(y, grouping, mean))
M <- M[match(groups, row.names(M)),]
# Counts for Each Group
N <- table(grouping)
N <- N[match(groups, row.names(N))]
# Calculate Weighted Sum of Squared Weights
SW <- sum(contrast^2 / N)
# Calculate SSCP Between (Hypothesis Matrix)
C_hat <- colSums(contrast*M)
SSCP_between <- (cbind(C_hat) %*% rbind(C_hat)) / SW
# Calculate SSCP Within (Error Matrix)
SSCP_within_each_group <- list()
for (i in seq_along(groups)){
X <- subset(data, grouping == groups[i])
deviations <- matrix(0, nrow(X), ncol(X))
for (j in 1:ncol(X)){
deviations[,j] <- X[,j] - M[i,j]
}
SSCP_within_each_group[[i]] <- t(deviations) %*% deviations
}
SSCP_within <- Reduce("+", SSCP_within_each_group)
# Calculate Wilks' Lambda
lambda <- det(SSCP_within) / det(SSCP_between + SSCP_within)
# Calculate Degrees of Freedom
p <- ncol(data) # no.variables
m <- nrow(data) - length(groups) # no.observations - no.groups
df1 <- p
df2 <- m-p+1
# Calculate approx. F
F.stat <- (1 - lambda) / lambda * df2 / df1
# Calculate p-value from F distribution
p.value <- 1 - pf(F.stat, df1, df2)
# Output
out <- c(lambda, F.stat, df1, df2, p.value)
names(out) <- c("Wilks", "approx.F", "df1", "df2", "p.value")
return(out)
}
|
1b22c227b5e808a4f44416caff8a56b8fe40f089
|
ffbb81116f468b997ce33496367f01acc667b392
|
/man/plotM.Rd
|
a38152a1af042b2af7cc3323925cda3ccbf2cd7b
|
[] |
no_license
|
cran/ctrlGene
|
2fb9e27eeaf24c5be29d8ee8495cf4a215fbe75c
|
735bf747ffc6bd5346df1898a1577b7831d55ce4
|
refs/heads/master
| 2020-03-30T07:57:35.694817
| 2019-07-04T08:40:28
| 2019-07-04T08:40:28
| 150,977,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 609
|
rd
|
plotM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geNorm.R
\name{plotM}
\alias{plotM}
\title{Plots average M of remaining genes}
\usage{
plotM(Mrem)
}
\arguments{
\item{Mrem}{the result returned by function of geNorm()}
}
\description{
This function plots the average expression stability values of remaining control genes.
}
\examples{
FIB
x=geNorm(FIB,ctVal=FALSE)
plotM(x)
FIBct
y=geNorm(FIBct)
plotM(y)
}
\references{
Vandesompele J, De Preter K, Pattyn F, Poppe B, Van Roy N, De Paepe A, Speleman F (2002) <doi: 10.1186/gb-2002-3-7-research0034>.
}
|
a83537741471047ae31e611d4cf9ecf4d696521f
|
257bd63361aa846ffdacdc15edaecf84c6364e78
|
/rsou/pro2/ex16cor.R
|
88e835dd9cf3f0e8dcb7da85c814264307d2d761
|
[] |
no_license
|
gom4851/hcjeon
|
86dcfd05ce47a13d066f13fe187d6a63142fb9fe
|
59a00ca9499f30e50127bb16eb510553e88ace43
|
refs/heads/master
| 2020-06-04T23:16:08.632278
| 2019-01-15T09:54:08
| 2019-01-15T09:54:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,463
|
r
|
ex16cor.R
|
result <- read.csv("testdata/drinking_water.csv", header = T)
head(result)
summary(result)
sd(result$친밀도) # 0.9703446
sd(result$적절성) # 0.8596574
sd(result$만족도) # 0.8287436
# 정규성 확인
hist(result$친밀도)
hist(result$적절성)
hist(result$만족도)
# cov : 공분산 확인. 상관계수 확인 하기 위함.
cov(1:5, 2:6) # 2.5
cov(1:5, c(3, 3, 3, 3, 3)) # 0 , 0이면 관련이 없다는 것. 한쪽값에 바뀌는 것에 대해 관련이 있는지 확인.
cov(1:5, 5:1) # -2.5
# 공분산
cov(result$친밀도, result$적절성) # 0.4164218
cov(result)
# 상관계수
?cor
cor(result$친밀도, result$적절성) # 0.4992086
cor(result$친밀도, result$만족도) # 0.467145
cor(result$적절성, result$만족도) # 0.7668527
cor(result$적절성, result$만족도, result$친밀도) # X
cor(result$적절성 + result$만족도, result$친밀도) # O / 0.5143693
cor(result$적절성 + result$친밀도, result$만족도) # 0.7017394
cor(result)
cor(result, method = "pearson") # 대상변수가 등간, 비율척도
#cor(result, method = "spearman") # 대상변수가 서열척도
symnum(cor(result)) # 기호로 보여줌.
install.packages("corrgram")
library(corrgram)
corrgram(result)
corrgram(result, upper.panel = panel.conf) # 위쪽
corrgram(result, lower.panel = panel.conf) # 아래
install.packages("PerformanceAnalytics")
library(PerformanceAnalytics)
chart.Correlation(result, histogram = , pch = "+")
|
4b13f907b842d179090e469677795e99af01e0d1
|
398f4240620627d11768107f84da044f160aa065
|
/text analysis II.R
|
6e4c73beb062a95040288acaaa8010e6d18cd8fb
|
[] |
no_license
|
Yu-study/R_language_script
|
b70e4351bb60ccf79e2dda7067da7bfd5e5fbc99
|
528083b3d4ab24f26513dc2a908f9760001e3a52
|
refs/heads/master
| 2020-07-03T11:18:21.626531
| 2019-08-14T07:47:02
| 2019-08-14T07:47:02
| 201,889,768
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 2,481
|
r
|
text analysis II.R
|
# 设置文档存储位置
# setwd("C:/Users/apple/Desktop/Textasdata")
library(rJava);
library(Rwordseg);
library(tm);
# 安装中文TM包
#install.packages("C:\\SogouDownload\\tmcn_0.1-4.tar", repos=NULL, type="source")
library(tmcn)
library(tm)
library(Rwordseg)
# lecture<-scan(file.choose(),sep="\n",what="",encoding="UTF-8")
# names(lecture)
# nchar(lecture)
# == 文本预处理
# res=lecture[lecture!=" "];
# ls()
fix(res)
#剔除URL
# res=gsub(pattern="http:[a-zA-Z\\/\\.0-9]+","",res);
# res=gsub(pattern="[n|t]","",res);
#剔除特殊词
# res=gsub(pattern="[我|你|的|了|一下|一个|没有|这样|现在|原告|被告|北京|法院|简称]","",res);
#剔除数字
# res=gsub(pattern="/^[^0-9]*$/","",res);
# 安装新词典
#installDict("C:\\SogouDownload\\diming.scel","diming")
#installDict("C:\\SogouDownload\\xinli.scel","xinli")
#installDict("C:\\SogouDownload\\zzx.scel","zzx")
#listDict()
# uninstallDict()
# d.vec <- segmentCN("samgov.csv", returnType = "tm")
# read file after word segment, R默认中文文本为KGB编码,如果为UTF-8,则使用encoding="UTF-8"
samgov.segment <- read.table("samgov.segment.csv", header = TRUE, fill = TRUE, stringsAsFactors = F,
sep = ",")
fix(samgov.segment)
# 创建DTM文件(document term matrix)
d.corpus <- Corpus(VectorSource(samgov.segment$content))
# fix(d.corpus )
inspect(d.corpus[1:20])
d.corpus <- tm_map(d.corpus, removeWords, stopwordsCN())
ctrl <- list(removePunctuation = TRUE, removeNumbers= TRUE,stopwords = stopwordsCN(), wordLengths = c(2, Inf))
d.dtm <- DocumentTermMatrix(d.corpus, control = ctrl)
inspect(d.dtm[1:10, 110:112])
fix(d.dtm)
# 词频分析
findFreqTerms(d.dtm,100)
findFreqTerms(d.dtm,50)
# 词频共显关系
findAssocs(d.dtm, "社会", 0.5)
findAssocs(d.dtm, "征用", 0.5)
# 删除稀疏矩阵
d.dtm.sub <- removeSparseTerms(d.dtm, 0.99)
dim(d.dtm.sub)
dim(d.dtm)
findAssocs(d.dtm.sub, "农民", 0.5)
# 聚类分析
library(proxy) # proxy中的dist函数可以计算文档间的余弦相似度,作为聚类分析的基础
d.dist <- proxy:: dist(as.matrix(d.dtm),method='cosine')
heatmap(as.matrix(d.dist),labRow=FALSE, labCol=FALSE)
d.clust <- hclust(d.dist) #聚类分析
result<-cutree(d.clust,k=5)
summary(result)
result
plot(d.clust)
# 主题分析
library(topicmodels)
ctm<-CTM(d.dtm,k=5, control=list(seed=111))
Terms <- terms(ctm, 10)
Terms[,1:5]
ctm<-CTM(d.dtm,k=10, control=list(seed=1234))
Terms <- terms(ctm, 20)
Terms[,1:10]
|
d82a1af7e7c3cd92d9cfb9d101f73f7c1d14e439
|
a06941ba61a0ee4482c2cee1f80119b40b955b9f
|
/man/getFolds.Rd
|
e69fba51332d4652a84c2277bfc930c8ab87e385
|
[
"MIT"
] |
permissive
|
lwaldron/LeviRmisc
|
d44d2ff5a925e85ee359dd13d6ccf144b2547e0c
|
410cae0cb9dfb73373b3837333952caece64e3e7
|
refs/heads/master
| 2021-01-19T01:44:15.971949
| 2016-10-06T18:51:16
| 2016-10-06T18:51:16
| 12,511,219
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
rd
|
getFolds.Rd
|
\name{getFolds}
\alias{getFolds}
\title{split N samples into nfolds folds.}
\description{Convenient function for cross-validation
Function by Levi Waldron.}
\usage{getFolds(N, nfolds)}
\arguments{
\item{N}{number of samples}
\item{nfolds}{number of folds}
}
\value{integer vector indicating to which fold each sample is assigned.}
\author{Levi Waldron and Markus Riester}
\examples{
getFolds(5, 3)
}
|
202e77b2b2907b5ca671dfcd44077d28ca498bbe
|
7be3d7253ab53bc69097d97aca5f380486f97763
|
/plot4.R
|
53ce5f13a5eb50eb95ff5f6d14f0e6cff5e6871a
|
[] |
no_license
|
ubuntukeeper/ExData_Plotting1
|
331b3c9a0de30e5e42510f1da5763fecfa314de1
|
a72776e48a2572c135961001c11b1578ac6c70a7
|
refs/heads/master
| 2020-05-29T09:52:04.205395
| 2015-11-08T19:40:53
| 2015-11-08T19:40:53
| 45,789,316
| 0
| 0
| null | 2015-11-08T16:50:39
| 2015-11-08T16:50:38
| null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
plot4.R
|
library(dplyr)
consmp <- read.csv("../supplemental/household_power_consumption.txt", sep=";", header=TRUE,
stringsAsFactors=FALSE, na.strings="?",
colClasses=c("character", "character", rep("numeric", 7)))
consmp_tab <- tbl_df(consmp)
consmp_tab <- consmp_tab %>%
filter(Date == "1/2/2007" | Date == "2/2/2007") %>%
mutate(DateTime=paste(Date, Time)) %>%
mutate(DateTime=as.POSIXct(DateTime, format="%d/%m/%Y %H:%M:%S"))
png("plot4.png", width=480, height=480)
par(mfcol=c(2,2), mar=c(4,2,2,2))
with(consmp_tab, {
plot(Global_active_power ~ DateTime, data=consmp_tab, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
plot(consmp_tab$DateTime, consmp_tab$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(x=consmp_tab$DateTime, y = consmp_tab$Sub_metering_2, col="red")
lines(x=consmp_tab$DateTime, y = consmp_tab$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1), col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Voltage ~ DateTime, data=consmp_tab, type="l",
xlab="datetime", ylab="Voltage")
plot(Global_reactive_power ~ DateTime, data=consmp_tab, type="l",
xlab="datetime", ylab="Voltage")
})
dev.off()
|
5af9ce057bcc43a345f807357b6d58a4eb45aa08
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/osqp/examples/osqp.Rd.R
|
6a1ac9626d09a88bbc5f2d4d631a8c76c1f422d5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
osqp.Rd.R
|
library(osqp)
### Name: osqp
### Title: OSQP Solver object
### Aliases: osqp
### ** Examples
## example, adapted from OSQP documentation
library(Matrix)
P <- Matrix(c(11., 0.,
0., 0.), 2, 2, sparse = TRUE)
q <- c(3., 4.)
A <- Matrix(c(-1., 0., -1., 2., 3.,
0., -1., -3., 5., 4.)
, 5, 2, sparse = TRUE)
u <- c(0., 0., -15., 100., 80)
l <- rep_len(-Inf, 5)
settings <- osqpSettings(verbose = FALSE)
model <- osqp(P, q, A, l, u, settings)
# Solve
res <- model$Solve()
# Define new vector
q_new <- c(10., 20.)
# Update model and solve again
model$Update(q = q_new)
res <- model$Solve()
|
347ddd95218ad94cdd510635719058e446450abd
|
ce208aa19eb9c9068d0ac206df4a16e25e4e11b5
|
/8章/8_1_DM購買促進問題/data.R
|
dcaff0b915d1f993b436b6022dbfa88e77a5c3b8
|
[
"MIT"
] |
permissive
|
yukirin/Bayesianstatistics
|
9f30cb349f1caed5c761fa342eef088a9c6dedad
|
9bc67fb15386392cf0918d7e1bd716d3c56d83a6
|
refs/heads/master
| 2020-03-10T17:17:26.466509
| 2018-04-14T09:34:45
| 2018-04-14T09:34:45
| 129,496,676
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
data.R
|
N<-c(200,200)
n<-structure(.Data=c(128,72,97,103),.Dim=c(2,2))
|
ee6085389f17a66aa20ac166d5e7d5d33bff2793
|
d771ff12fe4ede6e33699704efa371a2f33cdfaa
|
/R/package.load.R
|
d25ba9f843215d788dcefe292f4d12c653307a8e
|
[
"MIT"
] |
permissive
|
ImmuneDynamics/Spectre
|
aee033979ca6a032b49ede718792c72bc6491db5
|
250fe9ca3050a4d09b42d687fe3f8f9514a9b3bf
|
refs/heads/master
| 2023-08-23T14:06:40.859152
| 2023-04-27T00:31:30
| 2023-04-27T00:31:30
| 306,186,694
| 52
| 17
|
MIT
| 2023-08-06T01:26:31
| 2020-10-22T01:07:51
|
HTML
|
UTF-8
|
R
| false
| false
| 1,656
|
r
|
package.load.R
|
#' package.load - a function to load (library) all required packages.
#'
#' This function allows you to load all of the common use packages dependencies for Spectre.
#'
#' @return loads all the common use package libraries.
#'
#' @param type DEFAULT = "general". If "general", then loads packages required for general Spectre usage. If "spatial", then loads additional packages required for spatial analysis. If "ML", then loads additional packages required for machine-learing functionality.
#'
#'
#' @author Thomas M Ashhurst, \email{thomas.ashhurst@@sydney.edu.au}
#'
#' @references \url{https://sydneycytometry.org.au/spectre}
#'
#' @usage See \url{https://sydneycytometry.org.au/spectre} for usage instructions and vignettes.
#'
#' @examples
#' package.load()
#'
#' @export
package.load <- function(type = "general"){
require('devtools')
require('data.table')
require('plyr')
require('dplyr')
require('tidyr')
require('rstudioapi')
require('Rtsne')
require('umap')
require('reticulate')
require('ggplot2')
require('ggthemes')
require('scales')
require('colorRamps')
require('RColorBrewer')
require('gridExtra')
require('ggpointdensity')
require('pheatmap')
require('ggpubr')
require('caret')
require('class')
require('flowCore')
require('Biobase')
require('flowViz')
require('FlowSOM')
if(type == "spatial"){
require('raster')
require('tiff')
require('rgeos')
require('exactextractr')
require('sp')
require('sf')
require('stars')
require('qs')
require('s2')
require('rhdf5')
require('HDF5Array')
}
}
|
ddefd4e16dd1038d2eb3386b1e9125c0e5d50709
|
9fff0c4ea727dc15f59c70fcf58d112c27f9181f
|
/scri_cran/relative_expression_vs_osteo.R
|
4a1a61b2bc106cdf5e03b3af7dff9b09994297df
|
[
"MIT"
] |
permissive
|
kkdang/sage-data-analysis
|
d58a84daeffad6d3489f9ab00c6d967448c20109
|
b78ed29bd74afd7ee1bff27f19b88211b7a012d4
|
refs/heads/master
| 2020-04-12T09:04:47.222396
| 2018-10-25T22:38:27
| 2018-10-25T22:38:27
| 17,002,799
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,642
|
r
|
relative_expression_vs_osteo.R
|
#! /usr/bin/env Rscript
# Jan. 30, 2017
# KKD for Sage Bionetworks
library(synapseClient)
synapseLogin()
library('githubr')
source('/Users/kkdang/Computing/rgithubclient_authenticate.R')
sourceRepoFile(sageCode, "scri_cran/process_metadata_validation.R")
setwd('~/Computing/cranio/')
library(biomaRt)
library(gplots)
library(edgeR)
Hs = useMart('ENSEMBL_MART_ENSEMBL')
Hs = useDataset(dataset = "hsapiens_gene_ensembl",mart = Hs)
# get residual data
resid = read.delim(getFileLocation(synGet("syn5873539")))
# get transcript lengths
transLengths = getBM(attributes = c("ensembl_gene_id", "transcript_length", "ensembl_transcript_id"),filters = "transcript_biotype",values = "protein_coding",mart = Hs)
head(transLengths)
getLongestTranscript=function(ensg,transLengthsBM){
geneLines = which(transLengthsBM$ensembl_gene_id %in% ensg)
return(max(transLengthsBM$transcript_length[geneLines]))
}
longLengths = lapply(as.list(unique(transLengths$ensembl_gene_id)),function(s){getLongestTranscript(ensg = s,transLengths)})
names(longLengths) = unique(transLengths$ensembl_gene_id)
# get gene lists
osteoAll = read.delim('~/Downloads/GeneList-from QiagenRT2-profiler_for Kristin to check expression in candidates.txt',header = FALSE)
osteo = data.frame(geneNames = osteoAll[-which(duplicated(osteoAll[,1])),])
symb = getBM(attributes = c("ensembl_gene_id", "hgnc_symbol"),filters = "hgnc_symbol",values = osteo[,1],mart = Hs)
osteo$ENSG = symb$ensembl_gene_id[match(osteo[,1], symb$hgnc_symbol)]
head(osteo)
ofInterest = data.frame(geneNames = c("AXL","PDGFRA", "PIEZO1", "FLNA", "FLNB", "FLNC"))
symb = getBM(attributes = c("ensembl_gene_id", "hgnc_symbol"),filters = "hgnc_symbol",values = ofInterest$geneNames,mart = Hs)
ofInterest$ENSG = symb$ensembl_gene_id[match(ofInterest$geneNames, symb$hgnc_symbol)]
head(ofInterest)
# calculate median transcript length and get correction factor for each gene
ofInterest$transLength = longLengths[match(as.character(ofInterest$ENSG), names(longLengths))]
osteo$transLength = longLengths[match(as.character(osteo$ENSG), names(longLengths))]
medOfAll = median(c(as.numeric(ofInterest$transLength), as.numeric(osteo$transLength)))
ofInterest$adj = medOfAll/as.numeric(ofInterest$transLength)
osteo$adj = medOfAll/as.numeric(osteo$transLength)
head(osteo)
head(ofInterest)
# divide genes by correction factor
ofInterestExp = resid[which(rownames(resid) %in% ofInterest$ENSG),]
ofInterest = ofInterest[match(rownames(ofInterestExp),ofInterest$ENSG),]
rownames(ofInterestExp)
ofInterest$ENSG
ofInterestExp = ofInterestExp * ofInterest$adj
osteoExp = resid[which(rownames(resid) %in% osteo$ENSG),]
toRemove = setdiff(osteo$ENSG, rownames(osteoExp))
osteoTrimmed = osteo[-which(osteo$ENSG %in% toRemove),]
rm(osteo)
osteoTrimmed = osteoTrimmed[match(rownames(osteoExp),osteoTrimmed$ENSG),]
head(osteoTrimmed$ENSG)
head(rownames(osteoExp))
osteoExp = osteoExp * osteoTrimmed$adj
head(osteoExp)
# heatmap non-scaled of geneList vs osteo genes across all patients
combined = rbind(osteoExp,ofInterestExp)
heatmap.2(data.matrix(combined),scale = NULL,col = bluered(9),trace = "none",RowSideColors = c(rep("white",nrow(osteoExp)),rep("orange", nrow(ofInterestExp))),labRow = "")
# boxplot of corrected gene values for gene list and osteo genes.
boxplot(t(combined), las = 2)
## Another look, using CPM data instead.
counts = read.csv(getFileLocation(synGet("syn2820309")),row.names = 1)
head(counts)
counts.dge =DGEList(counts = counts,remove.zeros = TRUE)
counts.dge = calcNormFactors((counts.dge))
shortCounts.dge = counts.dge[which(rownames(counts.dge) %in% rownames(combined)),]
shortCounts.dge = shortCounts.dge[match(rownames(combined),rownames(shortCounts.dge)),]
head(rownames(shortCounts.dge))
head(rownames(combined))
head(osteoTrimmed$ENSG)
counts.rpkm = rpkm(shortCounts.dge,gene.length = as.numeric(as.vector(c(osteoTrimmed$transLength,ofInterest$transLength))),normalized.lib.sizes = TRUE,log = TRUE)
heatmap.2(counts.rpkm,scale = NULL,col = bluered(9),trace = "none",RowSideColors = c(rep("white",nrow(osteoExp)),rep("orange", nrow(ofInterestExp))),labRow = "")
boxplot(counts.rpkm, las = 2,use.cols = FALSE,col = c(rep("white",nrow(osteoExp)),rep("orange", nrow(ofInterestExp))),names = c(as.character(osteoTrimmed$geneNames), as.character(ofInterest$geneNames)),cex.axis = 0.8, ylab = "RPKM")
head(counts.rpkm)
rpkm.meds = apply(counts.rpkm,MARGIN = 1,function(x){median(x,na.rm = TRUE)})
names(rpkm.meds) = c(as.character(osteoTrimmed$geneNames), as.character(ofInterest$geneNames))
write.csv(rpkm.meds,file = "osteo_gene_medians.csv",quote = FALSE,row.names = TRUE)
|
539a9e15f9442f8502092153c11dd0673b8b8180
|
da7a9bee3e4aec666571e05b38eab56f0bafdc35
|
/man/mp32wav.Rd
|
b0c3caa68974bea71229784123e4e62a6e452bd6
|
[] |
no_license
|
fburkitt/warbleR
|
d7c48b468f4f7c1461ba05d6f28962b1aaac7d97
|
9dea327f39905436067ca83284688764e7cdcddc
|
refs/heads/master
| 2020-03-11T00:35:08.729072
| 2018-04-10T15:01:32
| 2018-04-10T15:01:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,876
|
rd
|
mp32wav.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mp32wav.R
\name{mp32wav}
\alias{mp32wav}
\title{Convert .mp3 files to .wav}
\usage{
mp32wav(samp.rate = 44.1, parallel = 1, from = NULL, to = NULL,
normalize = NULL, pb = TRUE)
}
\arguments{
\item{samp.rate}{Sampling rate at which the .wav files should be written. The maximum permitted is 44.1 kHz (default). Units should be kHz.}
\item{parallel}{Numeric. Controls whether parallel computing is applied.
It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).}
\item{from}{Character string containing the directory path where the .mp3 files are located.
If \code{NULL} (default) then the current working directory is used.}
\item{to}{Character string containing the directory path where the .wav files will be saved.
If \code{NULL} (default) then the current working directory is used.}
\item{normalize}{Character string containing the units to be used for amplitude normalization. Check
(\code{\link[tuneR]{normalize}}) for details. If NULL (default) no normalization is carried out.}
\item{pb}{Logical argument to control progress bar. Default is \code{TRUE}.}
}
\value{
.wav files saved in the working directory with same name as original mp3 files.
}
\description{
\code{mp32wav} converts several .mp3 files in working directory to .wav format
}
\details{
convert all .mp3 files in working directory to .wav format. Function used internally to read .mp3 files (\code{\link[tuneR]{readMP3}}) sometimes crashes.
}
\examples{
\dontrun{
# First set temporary folder
# setwd(tempdir())
#Then download mp3 files from xeno-canto
querxc(qword = "Phaethornis aethopygus", download = TRUE)
# Convert all files to .wav format
mp32wav()
#check this folder!!
getwd()
}
}
\author{
Marcelo Araya-Salas (\email{araya-salas@cornell.edu}) and Grace Smith Vidaurre
}
|
78db908096b5dbd51318b11b1cd717f0aadf0a41
|
cae8ea79126f2dd6c62be26fc5791599302b5d70
|
/paper_submission.R
|
a8222c87ab1502ba672a5d98f025e72208ee7f8a
|
[] |
no_license
|
ggruenhagen3/tooth_scripts
|
d00e55768435faca5fbfa56fc78a9a1d06ac9ebf
|
c07200114dc9524c087d13cea15754002bab324d
|
refs/heads/master
| 2023-08-30T21:30:35.373870
| 2023-08-28T14:02:53
| 2023-08-28T14:02:53
| 240,586,917
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,692
|
r
|
paper_submission.R
|
# Load Packages
library("edgeR")
library("Seurat")
library("Matrix")
library("reticulate")
library("cowplot")
library("biomaRt")
library("stringr")
library("dplyr")
library("CytoTRACE")
library("ggplot2")
library('RColorBrewer')
# Load Seurat Objects
combined <- readRDS("C:/Users/miles/Downloads/rna/data/combined.rds")
tj <- readRDS("C:/Users/miles/Downloads/d_tooth/tooth_scripts/tj_shiny/data/tj.rds")
jpool <- readRDS("C:/Users/miles/Downloads/d_tooth/tooth_scripts/pool_shiny/data/jpool.rds")
mes <- readRDS("C:/Users/miles/Downloads/rna/data/combined.rds")
epi <- readRDS("C:/Users/miles/Downloads/d_tooth/data/epi_full.rds")
epi$cond[is.na(epi$cond)] <- "INJR"
jaw <- jpool
mes <- combined
# Load CytoTRACE Data
mes_cyto <- readRDS("C:/Users/miles/Downloads/d_tooth/data/mesenchyme_cyto.rds")
epi_cyto <- readRDS("C:/Users/miles/Downloads/d_tooth/data/epithelium_cyto.rds")
tj_cyto <- readRDS("C:/Users/miles/Downloads/d_tooth/data/tooth_cyto.rds")
jaw_cyto <- readRDS("C:/Users/miles/Downloads/d_tooth/data/jaw_cyto.rds")
# Save CytoTRACE Data As Metadata in the Seurat Object
mes$cyto <- mes_cyto$CytoTRACE
epi$cyto <- epi_cyto$CytoTRACE
tj$cyto <- tj_cyto$CytoTRACE
jaw$cyto <- jaw_cyto$CytoTRACE
# Figure Scratch Folder
fig_path = "C:/Users/miles/Downloads/rna/paper_submission/figures/"
scratch = paste0(fig_path, "scratch/")
############
# Figure 1 #
############
# 4 columns Mouse mes (combo treatments), mouse epi (combo treatments), cichlid jaw, cichlid tooth.
# 4 rows Clusters, YAP paint, piezo1 paint, Cytotrace
png_w = 1000
png_h = 600
png_res = 100
lbl_size = 9
text_size = 20
# Row 1 - Clusters
all_p <- list()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_clusters_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = DimPlot(ob.list[[x]], label = TRUE, pt.size = 1.5) + labs(title = paste("Mouse Mesenchyme -", cond.list[[x]])) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'))
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_clusters.png"), width = png_w, height = png_h, res = png_res)
p <- DimPlot(mes, label = TRUE, pt.size = 1.5, label.size = lbl_size) + labs(title = paste("Mouse Mesenchyme")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_clusters_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p <- DimPlot(ob.list[[x]], label = TRUE, pt.size = 1.5) + labs(title = paste("Mouse Epithelium -", cond.list[[x]])) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'))
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_clusters.png"), width = png_w, height = png_h, res = png_res)
p <- DimPlot(epi, label = TRUE, pt.size = 1.5, label.size = lbl_size) + labs(title = paste("Mouse Epithelium")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_clusters.png"), width = png_w, height = png_h, res = png_res)
tj_p <- DimPlot(tj, label = TRUE, pt.size = 2.5, label.size = lbl_size) + labs(title = paste("Cichlid Tooth")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(tj_p)
dev.off()
png(paste0(scratch, "jaw_clusters.png"), width = png_w, height = png_h, res = png_res)
jaw_p <- DimPlot(jaw, label = TRUE, pt.size = 2.5, label.size = lbl_size) + labs(title = paste("Cichlid Jaw")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(jaw_p)
dev.off()
all_p[[length(all_p)+1]] <- tj_p
all_p[[length(all_p)+1]] <- jaw_p
# Row 2 - Yap
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_yap_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], feature = "Yap1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_yap.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(mes, feature = "Yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_yap_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p <- FeaturePlot(ob.list[[x]], features = "Yap1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_yap.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(epi, feature = "Yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_yap.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(tj, features = "yap1", label = TRUE, pt.size = 2.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "jaw_yap.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(jaw, features = "yap1", label = TRUE, pt.size = 2.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# Row 3 - Piezo
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_piezo_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], feature = "Piezo1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_piezo.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(mes, feature = "Piezo1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_piezo_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p <- FeaturePlot(ob.list[[x]], features = "Piezo1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_piezo.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(epi, feature = "Piezo1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_piezo.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(tj, features = "piezo1", label = TRUE, pt.size = 2.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "jaw_piezo.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(jaw, features = "piezo1", label = TRUE, pt.size = 2.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# Row 4 - CytoTRACE
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_cyto_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank())
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(mes, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_cyto_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank())
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(epi, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(tj, features = "cyto", reduction = "umap", pt.size = 2.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "jaw_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(jaw, features = "cyto", reduction = "umap", pt.size = 2.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(file = paste0(fig_path, "fig_1_2.png"), width = png_w*4, height = png_h*4, res = png_res)
p = plot_grid(plotlist=all_p, ncol = 4)
print(p)
dev.off()
##################
# Figure 1 - Alt #
##################
# Using tj_jaw combo instead of separate
test <- RunCCA(tj, jaw, renormalize = TRUE, rescale = TRUE)
tj_jaw <- FindVariableFeatures(test)
tj_jaw <- RunPCA(tj_jaw, npcs = 30, verbose = FALSE)
tj_jaw <- RunUMAP(tj_jaw, reduction = "pca", dims = 1:12)
tj_jaw <- FindNeighbors(tj_jaw, reduction = "umap", dims = 1:2)
tj_jaw <- FindClusters(tj_jaw, resolution = 0.25)
DimPlot(tj_jaw, reduction = "umap", split.by = "cond", label = TRUE)
tj_jaw_cyto <- readRDS("C:/Users/miles/Downloads/d_tooth/data/tj_jaw_cyto.rds")
tj_jaw$cyto <- tj_jaw_cyto$CytoTRACE
text_size = 25
# Row 1 - Clusters
all_p <- list()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_clusters_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = DimPlot(ob.list[[x]], label = TRUE, pt.size = 1.5) + labs(title = paste("Mouse Mesenchyme -", cond.list[[x]])) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'))
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_clusters.png"), width = png_w, height = png_h, res = png_res)
p <- DimPlot(mes, label = TRUE, pt.size = 1.5, label.size = lbl_size) + labs(title = paste("Mouse Mesenchyme")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_clusters_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p <- DimPlot(ob.list[[x]], label = TRUE, pt.size = 1.5) + labs(title = paste("Mouse Epithelium -", cond.list[[x]])) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'))
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_clusters.png"), width = png_w, height = png_h, res = png_res)
p <- DimPlot(epi, label = TRUE, pt.size = 1.5, label.size = lbl_size) + labs(title = paste("Mouse Epithelium")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_jaw_clusters.png"), width = png_w, height = png_h, res = png_res)
p <- DimPlot(tj_jaw, label = TRUE, pt.size = 2.5, label.size = lbl_size) + labs(title = paste("Cichlid Tooth")) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# Row 2 - Yap
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_yap_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], feature = "Yap1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_yap.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(mes, feature = "Yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_yap_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p <- FeaturePlot(ob.list[[x]], features = "Yap1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_yap.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(epi, feature = "Yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_jaw_yap.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(tj_jaw, features = "yap1", label = TRUE, pt.size = 2.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# Row 3 - Piezo
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_piezo_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], feature = "Piezo1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_piezo.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(mes, feature = "Piezo1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_piezo_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p <- FeaturePlot(ob.list[[x]], features = "Piezo1", label = TRUE, pt.size = 1.5, order = T) + theme(plot.title = element_blank())
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_piezo.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(epi, feature = "Piezo1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_jaw_piezo.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(tj_jaw, features = "piezo1", label = TRUE, pt.size = 2.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# Row 4 - CytoTRACE
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Clipped"
# ob.list <- SplitObject(mes, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "mes_cyto_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank())
# all_p[[length(all_p)+1]] <- p
# print(p)
# dev.off()
# }
png(paste0(scratch, "mes_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(mes, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
# cond.list <- list()
# cond.list[[1]] <- "Control"
# cond.list[[2]] <- "Injury"
# ob.list <- SplitObject(epi, split.by = "orig.ident")
# for (x in 1:length(ob.list)) {
# png(paste0(scratch, "epi_cyto_", cond.list[[x]], ".png"), width = png_w, height = png_h, res = png_res)
# p = FeaturePlot(ob.list[[x]], features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank())
# print(p)
# all_p[[length(all_p)+1]] <- p
# dev.off()
# }
png(paste0(scratch, "epi_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(epi, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_jaw_cyto.png"), width = png_w, height = png_h, res = png_res)
p = FeaturePlot(tj_jaw, features = "cyto", reduction = "umap", pt.size = 2.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(file = paste0(fig_path, "fig_1_3.png"), width = png_w*4, height = png_h*4, res = png_res)
p = plot_grid(plotlist=all_p, ncol = 3)
print(p)
dev.off()
############
# Figure 2 #
############
# maybe try a figure for YAP like you made for celsr 1,
# using only YAP1 cells and looking at expression and
# cytotrace side by side across the data sets
# Find yap positive cells
mes_yap_pos_cells <- names(which(mes@assays$RNA@data["Yap1",] > 0))
epi_yap_pos_cells <- names(which(epi@assays$RNA@data["Yap1",] > 0))
tj_yap_pos_cells <- names(which(tj@assays$RNA@data["yap1",] > 0))
jaw_yap_pos_cells <- names(which(jaw@assays$RNA@data["yap1",] > 0))
# Subset Seurat objects by yap positive cells
mes_yap <- subset(mes, cells = mes_yap_pos_cells)
epi_yap <- subset(epi, cells = epi_yap_pos_cells)
tj_yap <- subset(tj, cells = tj_yap_pos_cells)
jaw_yap <- subset(jaw, cells = jaw_yap_pos_cells)
# Recluster the objects
mes_yap$orig.clust <- mes$seurat_clusters[mes_yap_pos_cells]
mes_yap <- FindVariableFeatures(object = mes_yap, mean.function = ExpMean, dispersion.function = LogVMR, nfeatures = 2000)
mes_yap <- ScaleData(object = mes_yap, vars.to.regress = NULL)
mes_yap <- RunPCA(mes_yap, npcs = 30, verbose = FALSE)
mes_yap <- RunUMAP(mes_yap, reduction = "pca", dims = 1:12)
mes_yap <- FindNeighbors(mes_yap, reduction = "umap", dims = 1:2)
mes_yap <- FindClusters(mes_yap, resolution = 0.30)
epi_yap$orig.clust <- epi$seurat_clusters[epi_yap_pos_cells]
epi_yap <- FindVariableFeatures(object = epi_yap, mean.function = ExpMean, dispersion.function = LogVMR, nfeatures = 2000)
epi_yap <- ScaleData(object = epi_yap, vars.to.regress = NULL)
epi_yap <- RunPCA(epi_yap, npcs = 30, verbose = FALSE)
epi_yap <- RunUMAP(epi_yap, reduction = "pca", dims = 1:12)
epi_yap <- FindNeighbors(epi_yap, reduction = "umap", dims = 1:2)
epi_yap <- FindClusters(epi_yap, resolution = 0.30)
tj_yap$orig.clust <- tj$seurat_clusters[tj_yap_pos_cells]
tj_yap <- FindVariableFeatures(object = tj_yap, mean.function = ExpMean, dispersion.function = LogVMR, nfeatures = 2000)
tj_yap <- ScaleData(object = tj_yap, vars.to.regress = NULL)
tj_yap <- RunPCA(tj_yap, npcs = 30, verbose = FALSE)
tj_yap <- RunUMAP(tj_yap, reduction = "pca", dims = 1:12)
tj_yap <- FindNeighbors(tj_yap, reduction = "umap", dims = 1:2)
tj_yap <- FindClusters(tj_yap, resolution = 0.30)
jaw_yap$orig.clust <- jaw$seurat_clusters[jaw_yap_pos_cells]
jaw_yap <- FindVariableFeatures(object = jaw_yap, mean.function = ExpMean, dispersion.function = LogVMR, nfeatures = 2000)
jaw_yap <- ScaleData(object = jaw_yap, vars.to.regress = NULL)
jaw_yap <- RunPCA(jaw_yap, npcs = 30, verbose = FALSE)
jaw_yap <- RunUMAP(jaw_yap, reduction = "pca", dims = 1:12)
jaw_yap <- FindNeighbors(jaw_yap, reduction = "umap", dims = 1:2)
jaw_yap <- FindClusters(jaw_yap, resolution = 0.30)
# Make the plots
all_p <- list()
text_size = 18
# Row 1 - Yap Expressions
p <- FeaturePlot(mes_yap, features = "Yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
p <- FeaturePlot(epi_yap, features = "Yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
p <- FeaturePlot(tj_yap, features = "yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
p <- FeaturePlot(jaw_yap, features = "yap1", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank(), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
# Row 2 - CytoTRACE
p = FeaturePlot(mes_yap, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
p = FeaturePlot(epi_yap, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
p = FeaturePlot(tj_yap, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
p = FeaturePlot(jaw_yap, features = "cyto", reduction = "umap", pt.size = 1.5, label=T, order = T, label.size = lbl_size) + scale_color_gradientn( colors = rev(brewer.pal(11,"Spectral")), limits = c(0,1)) + theme(plot.title = element_blank(), text=element_text(size=text_size))
all_p[[length(all_p)+1]] <- p
png(filename = paste0(fig_path, "fig_2_1.png"), width=600*4, height=png_h*2, res = png_res)
p = plot_grid(plotlist = all_p, ncol = 4)
print(p)
dev.off()
############
# Figure 3 #
############
# cartoon of cichlid jaw vs tooth samples, clusters for each dissection,
# then paint keratin 5+15 in both samples
fig_3_w = 700
fig_3_h = 400
fig_3_res = 100
lbl_size = 6
tj_krt_5 <- names(which(tj@assays$RNA@data["krt5",] > 0))
jaw_krt_5 <- names(which(jaw@assays$RNA@data["krt5",] > 0))
tj_krt_15 <- names(which(tj@assays$RNA@data["krt15",] > 0))
jaw_krt_15 <- names(which(jaw@assays$RNA@data["krt15",] > 0))
tj_krt5_15 <- tj_krt_5[which(tj_krt_5 %in% tj_krt_15)]
jaw_krt5_15 <- jaw_krt_5[which(jaw_krt_5 %in% jaw_krt_15)]
tj$krt5_15 <- tj@assays$RNA@data["krt5",] + tj@assays$RNA@data["krt15",]
jaw$krt5_15 <- jaw@assays$RNA@data["krt5",] + jaw@assays$RNA@data["krt15",]
tj$krt5_15[which(! names(tj$krt5_15) %in% tj_krt5_15)] = 0
jaw$krt5_15[which(! names(jaw$krt5_15) %in% jaw_krt5_15)] = 0
all_p <- list()
png(paste0(scratch, "tj_clusters_fig_3.png"), width = fig_3_w, height = fig_3_h, res = fig_3_res)
p <- DimPlot(tj, label = TRUE, pt.size = 1.5, label.size = lbl_size) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "jaw_clusters_fig_3.png"), width = fig_3_w, height = fig_3_h, res = fig_3_res)
p <- DimPlot(jaw, label = TRUE, pt.size = 1.5, label.size = lbl_size) + theme(plot.title = element_text(hjust = 0.5, face = 'plain'), axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "tj_krt5_krt15.png"), width = fig_3_w, height = fig_3_h, res = fig_3_res)
p <- FeaturePlot(tj, features = "krt5_15", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank())
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(paste0(scratch, "jaw_krt5_krt15.png"), width = png_w, height = png_h, res = png_res)
p <- FeaturePlot(jaw, features = "krt5_15", label = TRUE, pt.size = 1.5, order = T, label.size = lbl_size) + theme(plot.title = element_blank())
print(p)
all_p[[length(all_p)+1]] <- p
dev.off()
png(file = paste0(fig_path, "fig_3_1.png"), width = fig_3_w*2, height = fig_3_h*2, res = 120)
p = plot_grid(plotlist=all_p)
print(p)
dev.off()
|
e7100b30f8451f37ebe9357a30a886d596297496
|
1f4fb6044e39e1c632c13487fb79f7e5bc836175
|
/tests/testthat/test_colours.R
|
70c53ce34611814f876708e6d418a265cbb92a1f
|
[] |
no_license
|
ComputationalProteomicsUnit/cputools
|
777a034e14f938ded2fee6672d3da64fb171488a
|
17e4584cf1cb3f874dcc60202680e7dcdf306188
|
refs/heads/master
| 2020-12-24T15:14:22.000702
| 2017-01-10T17:33:21
| 2017-01-10T17:33:21
| 19,713,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
test_colours.R
|
context("colours")
test_that("", {
expect_identical(x <- darken("red"), "#B60000")
expect_identical(lighten(x), "#FE0000")
})
|
b09b165c8067243dc05882f5633929b404372655
|
0939caa7d9fb7dc60da87df12a8daf856fd79551
|
/R/RcppExports.R
|
fc6379a34243fb27c219f595d2e4b8cf25dc1953
|
[] |
no_license
|
sjkdfsjkdf/screenmill
|
7c2a6ed3388f78cbfe67e49159e1a4e56759299b
|
0e0fafc19c0f0d3cf1dd985fc188aa0e4151e653
|
refs/heads/master
| 2021-01-11T03:39:30.369678
| 2016-10-19T22:29:14
| 2016-10-19T22:29:14
| 71,404,523
| 0
| 0
| null | 2016-10-19T22:27:25
| 2016-10-19T22:27:25
| null |
UTF-8
|
R
| false
| false
| 390
|
r
|
RcppExports.R
|
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
measureColonies <- function(img, l, r, t, b, background, thresh) {
.Call('screenmill_measureColonies', PACKAGE = 'screenmill', img, l, r, t, b, background, thresh)
}
nearestNeighbor <- function(x, y) {
.Call('screenmill_nearestNeighbor', PACKAGE = 'screenmill', x, y)
}
|
0d68552cf6cefae4cc8d10776edfde1d7a4ce4bd
|
16d00df419d17a6e222e53342344fe89e67b096d
|
/code/priceImpacts.R
|
c15debde783c5642a70590d3dd16081367de1c57
|
[] |
no_license
|
mattia-cai/SICOMA_2020
|
f35b3d9a2677bc583db7b7646489dda8e374842b
|
de31feb7da697ab6c40e84345ae4a224542bd927
|
refs/heads/master
| 2023-05-04T19:55:06.828982
| 2021-05-26T13:54:45
| 2021-05-26T13:54:45
| 292,814,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,110
|
r
|
priceImpacts.R
|
rm( list = ls( ) )
# Compute the price impacts using the leontief price model
# Compared to the 1903 version, I have also included a second version
# of the analysis that accounts for case duration
# author: mattia cai
##############################################
### Warning: XLConnect conflicts with xlsx ###
##############################################
##################
### Input data ###
##################
# Dependencies
require( "data.table" )
library( "stats")
require( "openxlsx" )
source( "code/funlib/leontiefPriceModel.R" )
source( "code/funlib/write_list.R" ) # Revised so that rJava is not needed
# dataset -- (single Nace 2 assignment)
load( file = paste0( "outputData/compCases.RData"), verbose = T )
str( dta.cases )
# EU28 supply and use system (2014)
load( "intmData/IO_EU28_2014.RData" )
# Results Codebook
# codebook <- read.csv( "inputData/resultCodebooks/priceImpacts_codebook.csv", header = T, stringsAsFactors = F )
##########################
### Familiar L68 issue ###
##########################
# In the IO table L68 is broken into L68A and L68B
# L68A is imputed rents, which in practice is just a bunch of zeros.
# The only part I care about is L68B
# In my dataset I have a unique L68 industry.
# We don't have any L68 cases, it seems, but just in case...
setdiff( dta.cases[ , unique( nace2_2d_a64 ) ], colnames( Vt ) )
dta.cases[ nace2_2d_a64 == "L68", nace2_2d_a64 := "L68B" ]
# Check
all( dta.cases[ , unique( nace2_2d_a64 ) ] %in% colnames( Vt ) ) # Should be T
################################################
### Collapse the case data to 2-digit NACE 2 ###
################################################
# Here I am not making any attempt to account for case duration
dta.cases[ , case.count := 1 ]
dta <- dta.cases[ , .( case.count = sum( case.count ),
mkt_none = sum( mkt ),
mkt_logistic = sum( mktT ),
dp_none = weighted.mean( x = delta_p, w = mkt ),
dp_logistic = weighted.mean( x = delta_p, w = mktT ),
go2_a64 = mean( go2_a64 ) ),
by = .( year, nace2_2d_a64 ) ]
# At a first glance, it seems ok
dta.cases[ , .( mkt_none = sum( mkt ), mkt_logistic = sum( mktT ) ) , by = year ]
dta[ , .( mkt_none = sum( mkt_none ), mkt_logistic = sum( mkt_logistic ) ), by = year ]
# Reshape from wide to long
dta <- copy( reshape( dta, direction = "long",
varying = list(
grep( "mkt", colnames( dta ), value = T ),
grep( "dp", colnames( dta ), value = T) ),
v.names = c( "mkt", "dp" ) ,
times = c( "none", "logistic" ),
timevar = "deterrence" ) )
#################################
### Duration (2012-2019 data) ###
#################################
# Last year in which a case produces effects
dta.cases[ , through := year + duration - 1 ]
dta.cases[ , .( case_id, type, year, duration, through ) ]
# Years for which data are available
min.year <- dta.cases[ , min( year ) ]
max.year <- dta.cases[ , max( year ) ]
# Listwise: aggregate by year
fun <- function( t ){
dt <- dta.cases[ t >= year & t <= through, ] # cases producing effects in year t
dt <- dt[ , .( case.count = sum( case.count ),
mkt_none = sum( mkt ),
mkt_logistic = sum( mktT ),
dp_none = weighted.mean( x = delta_p, w = mkt ),
dp_logistic = weighted.mean( x = delta_p, w = mktT ),
go2_a64 = mean( go2_a64 ) ),
by = .( nace2_2d_a64 ) ]
dt <- cbind( year = t, dt )
return( dt )
}
lst <- lapply( min.year : max.year, fun )
# Stack
dta1 <- do.call( "rbind", lst )
# From wide to long
dta1 <- copy( reshape( dta1, direction = "long",
varying = list(
grep( "mkt", colnames( dta1 ), value = T ),
grep( "dp", colnames( dta1 ), value = T) ),
v.names = c( "mkt", "dp" ) ,
times = c( "none", "logistic" ),
timevar = "deterrence" ) )
##############################################
### Combine analysis with/without duration ###
##############################################
# Without duration
dta <- cbind( duration = "woDur" , dta )
# With duration
dta1 <- cbind( duration = "wDur" , dta1 )
# Stack
dta <- rbind( dta, dta1 )
dta[ , id := NULL ]
##############################
### w and rho calculations ###
##############################
# Weights
dta[ , w := mkt / go2_a64 ]
# Any weights in excess of 1?
dta[ , any( w > 1 ) ] # Yep...
# W > 1 only occurs when duraction is taken into account,
# it is relatively rare, an only affects industry J61 in years 2016 and 2018
dta[ w > 1 ]
# Overall, it is not a big deal, I think: I'll just cap the weight at 1 in these cases
dta[ w > 1, w := 1 ]
# I won't need these
dta[ , mkt := NULL ]
dta[ , go2_a64 := NULL ]
###############################
### expand Nace 2 structure ###
###############################
# Fill in the missing indutries
setkeyv( dta, c( "year" , "duration", "deterrence", "nace2_2d_a64" ) )
dta.grid <- expand.grid( year = dta[ , unique( year ) ],
duration = dta[ , unique( duration ) ],
deterrence = dta[ , unique( deterrence ) ],
nace2_2d_a64 = colnames( Vt ), stringsAsFactors = F )
dta.grid <- as.data.table( dta.grid )
setkeyv( dta.grid, c( "year" , "duration", "deterrence", "nace2_2d_a64" ) )
dta <- dta[ dta.grid ]
fun <- function( z ){ z[ is.na( z ) ] <- 0; return( z ) }
cols <- c( "case.count", "w", "dp" )
dta[ , ( cols ) := lapply( .SD, fun ), .SDcols = cols ]
# The annoying L68A-B rows... don't forget
table( dta[ , nace2_2d_a64 ] == colnames( Vt ) )
dta[ which( dta[ , nace2_2d_a64 ] != colnames( Vt ) ) , ]
# Eventually, store a copy of the input data (with codebook relating that to the report)
# ###############################
# ### Average shock over time ###
# ###############################
#
# # Can I market this as equivalent to the permanent shock in QUEST?
# dta.avg <- dta[ ,.( case.count = mean( case.count ), dp = mean( dp ), w = mean( w ) ) , by = .( nace2_2d_a64, deterrence, duration ) ]
# dta.avg[ , year := 9999 ]
# dta <- rbind( dta, dta.avg )
#########################################
### Recover GO2 (aggregation weights) ###
#########################################
dta[, go2_a64 := colSums( Vt )[ dta[ , nace2_2d_a64 ] ] ]
###############################
### within-industry effects ###
###############################
#percentage price change
dta[ , within := - ( w * dp ) * 100 ]
##########################################
### Arrange for spillover calculations ###
##########################################
lst <- split( dta, f = dta[ , .( year, deterrence, duration ) ] )
lst <- lapply( lst, function( z ) z[ match( z[ , nace2_2d_a64 ], rownames( Ad ) ), ] )
# Check order
all( sapply( lst, function( z ) { all( z[ , nace2_2d_a64 ] == rownames( Ad ) ) } ) ) # Should be T
##############################################################
### Model one year-deterrence-duration combination at time ###
##############################################################
# List of spillover matrices
mat.lst <- lapply( lst, function( dt ) IOpriceSpilloverMatrix( A = Ad, w = dt[ , w ], rho = 1 - dt[ , dp ] ) )
# Averages over time
det.labs <- dta[ , unique( deterrence ) ]
dur.labs <- dta[ , unique( duration ) ]
tmpfun <- function( i, j ){
x <- mat.lst[ grepl( names( mat.lst ), pattern = paste0( i,".", j ) ) ]
x <- Reduce( '+', x ) / length( x )
return( x )
}
index <- expand.grid( det.labs, dur.labs, stringsAsFactors = F )
index <- split( index, f = 1 : nrow( index ) )
avg.lst <- lapply( index, function( x ) tmpfun( x[ 1 ], x[ 2 ] ) )
# Labels
names( avg.lst ) <- sapply( index, function( x ) paste0( "9999.", x[ 1 ], ".", x[ 2 ] ) )
mat.lst <- c( mat.lst, avg.lst )
##############################################################
### Save the annual spillover matrices (to separate files) ###
##############################################################
# Excel files for COMP
for ( i in dta[ , unique( deterrence ) ] ){
for ( j in dta[ , unique( duration ) ] ){
spill.mats <- mat.lst[ grepl( names( mat.lst ), pattern = paste0( i,".", j ) ) ]
names( spill.mats ) <- substr( names( spill.mats ), 1, 4 )
fun <- function( X ){
X <- X * 100
go <- colSums( Vt )[ rownames( X ) ]
contr <- drop( ( go / sum( go ) ) %*% X )
share <- ( contr / sum( contr ) ) * 100
X <- rbind( X, "Contribution to spillover" = contr, "Percent of spillover" = share )
X <- as.data.frame( X )
X <- cbind( "gross output" = c( go, NA, NA ), X, "total" = rowSums( X ) )
return( X )
}
spill.mats <- lapply( spill.mats, fun )
spill.mats <- lapply( spill.mats, as.data.frame )
fpath <- paste0( "results/spillMat_", i, "_", j, ".xlsx" )
write_list( my_list = spill.mats, wb_name = fpath )
}
}
# Save in R format as well, just in case
spillover.matrices <- mat.lst
save( spillover.matrices, file = paste0( "results/spillMatrices.RData" ) )
#############################################
### Industries as receivers of spillovers ###
#############################################
# Spillover effects
vec <- unlist( lapply( mat.lst, rowSums ) )
vec <- vec[ dta[ , paste( year, deterrence, duration, nace2_2d_a64, sep ="." ) ] ]
dta[ , spillover := vec * 100 ]
# Total effect
dta[ , total := within + spillover ]
# Clean and save
dta[ , c( "case.count", "dp", "w" ) := NULL ]
# Average over entire period
dta.avg <- dta[ , .( go2_a64 = mean( go2_a64 ),
within = mean( within ),
spillover = mean( spillover ),
total = mean( total ) ), by = .( nace2_2d_a64, deterrence, duration )]
dta.avg[ , year := 9999 ]
dta <- rbind( dta, dta.avg )
# For work in R
save( dta, file = paste0( "results/industryOfDestination.RData" ) )
write.csv( dta, file = paste0( "results/industryOfDestination.csv" ), row.names = F, na = "" )
# Excel files for COMP
dt.lst <- split( dta, dta[, .( deterrence, duration ) ] )
dt.lst <- lapply( dt.lst , function( x ) setkeyv( x, c( "year", "nace2_2d_a64" ) ) )
for ( i in names( dt.lst ) ){
dt.lst.i <- dt.lst[[ i ]]
dt.lst.i[ , deterrence := NULL ]
fpath <- paste0( "results/industryOfDestination_", gsub( x = i, pattern = ".", replacement = "_", fixed = T ) , ".xlsx" )
write_list( split( dt.lst.i, dt.lst.i[, year ] ), fpath )
}
#############################################
### Industries as producers of spillovers ###
#############################################
# Aggregation weights
dta[ , wgt := sum( go2_a64 ), by = .( year, deterrence, duration ) ]
dta[ , wgt := go2_a64 / wgt ]
# Economy-wide within effect (shock)
dta[ , within := ( within * wgt ) ]
dta[ , within.ew := sum( within ), by = .( year, deterrence, duration ) ]
# Industry contribution to shock
dta[ , within.decomposition := ( within / within.ew ) * 100 ]
#dta[ , sum( within.decomposition ), by = .( year, deterrence, duration ) ]
# Contribution economy-wide spillover effect
vec <- unlist( lapply( mat.lst, function( X ) drop( ( colSums( Vt )[ rownames( X ) ] / sum( Vt ) ) %*% X ) ) )
vec <- vec[ dta[ , paste( year, deterrence, duration, nace2_2d_a64, sep ="." ) ] ]
dta[ , spillover := vec * 100 ]
# Economy-wide spillover effect (shock)
dta[ , spillover.ew := sum( spillover ), by = .( year, deterrence, duration ) ]
# Industry contributions
dta[ , spillover.decomposition := spillover / spillover.ew * 100 ]
#dta[ , sum( within.ew.share ), by = .( year, deterrence ) ]
dta[ , sum( spillover.decomposition ), by = .( year, deterrence, duration ) ]
# Total, economy-wide
dta[ , total := within + spillover ]
dta[ , total.ew := sum( total ), by = .( year, deterrence, duration ) ]
# Industry contributions to total
dta[ , total.decomposition := total / total.ew * 100 ]
# In retrospect I should have picked better names
dta[ , c( "go2_a64", "wgt", "within", "spillover", "total" ) :=NULL]
old <- c( "within.ew", "within.decomposition", "spillover.ew", "spillover.decomposition", "total.ew", "total.decomposition" )
new <- c( "within", "pctOfWithin", "spillover", "pctOfSpillover", "total", "pctOfTotal" )
setnames( dta, old, new )
# Excel files for COMP
dt.lst <- split( dta, dta[, .( deterrence, duration ) ] )
dt.lst <- lapply( dt.lst , function( x ) setkeyv( x, c( "year", "nace2_2d_a64" ) ) )
for ( i in names( dt.lst ) ){
dt.lst.i <- dt.lst[[ i ]]
dt.lst.i[ , deterrence := NULL ]
fpath <- paste0( "results/industryOfOrigin_", gsub( x = i, pattern = ".", replacement = "_", fixed = T ), ".xlsx" )
write_list( split( dt.lst.i, dt.lst.i[, year ] ), fpath )
}
# For work in R
save( dta, file = paste0( "results/industryOfOrigin.RData" ) )
write.csv( dta, file = paste0( "results/industryOfOrigin.csv" ), row.names = F, na = "" )
|
77a29c222820315dcab66bfdaef79b31d441171f
|
708f1b2117c87a9d82694718a95e90988181853f
|
/01 - R Clustering.r
|
1c8e27b41687edeb102bfb67a39053b6c5a0e3f8
|
[] |
no_license
|
EugenioGrant/AprendizajeconR
|
f1494f0ad0cf8b8bd2469c5fe62f4d44a9d18ee0
|
79b0561fd76a5c25189540631222767f4c341c38
|
refs/heads/master
| 2020-04-26T21:09:29.582319
| 2019-03-04T23:00:37
| 2019-03-04T23:00:37
| 173,833,900
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,138
|
r
|
01 - R Clustering.r
|
#...............................................................
# Code: Machine Learning with R
# Obj: Unsupervised Learning: Clustering
# author: E. Grant
#...............................................................
#...............................................................
# 1. Working directory and libraries ----
#...............................................................
# Define Path
setwd("C:/Users/e.grant.SKIM/Google Drive/SKIM/05. Tools/40. R Kmeans") # Change it!
# Needed Packages - Note: Missing libraries can be installed with command "Install.Package("packagename")"
library(tidyverse) # Tidiverse: Packages for data manipulation, exploration and visualization.
library(NbClust) # NbClust: A Package providing 30 indices for determining the best number of clusters.
library(factoextra) # factoextra: extract and visualize the output of multivariate data analyses.
library(cluster) # Methods: for Cluster analysis
library(gmodels) # Crosstabulations modeled after PROC FREQ in SAS or CROSSTABS in SPSS
#...............................................................
# 2. Data Pre Processing ----
#...............................................................
# Read our data file
actividades_tbl <- read.csv("activities_db.csv", na = "")
# Remove any missing value
actividades_tbl <- na.omit(actividades_tbl)
# Glimpse the data set
glimpse(actividades_tbl)
# View the firt 5 rows
head(actividades_tbl)
# Explore our dataset key variables
table(actividades_tbl$sexo, useNA = "ifany")
prop.table(table(actividades_tbl$sexo, useNA = "ifany"))
prop.table(table(actividades_tbl$edad, useNA = "ifany"))
prop.table(table(actividades_tbl$nse, useNA = "ifany"))
summary(actividades_tbl$nse)
#...............................................................
# 3. Training a model on the data ----
#...............................................................
# Keep vector of activities
vector <- actividades_tbl %>% select(-id, -sexo, -edad, -nse) # set rownames
vector <- as_tibble(vector)
# For K-means values matters so we scale variables (also getting postives & negatives)
vector_z <- as.data.frame(lapply(vector, scale))
# Compute and visualize k-means clustering
#? kmeans
set.seed(86)
# Let's test 2 groups
activities_clusters <- kmeans(vector_z, 2, nstart = 500)
fviz_cluster(activities_clusters, data = vector_z, ellipse.type = "convex")+ theme_minimal()
activities_clusters$size
# Let's test 3 groups
activities_clusters <- kmeans(vector_z, 3, nstart = 500)
fviz_cluster(activities_clusters, data = vector_z, ellipse.type = "convex")+ theme_minimal()
activities_clusters$size
#...............................................................
# 4. We need to define the k number of clusters ----
#...............................................................
# Elbow method
# the basic idea behind cluster partitioning methods, such as k-means clustering,
# is to define clusters such that the total intra-cluster variation
# (known as total within-cluster variation or total within-cluster sum of square) is minimized
fviz_nbclust(vector_z, kmeans, method = "wss") +
geom_vline(xintercept = 5, linetype = 5)+
labs(subtitle = "Elbow method")
# Silhouette method
# In short, the average silhouette approach measures the quality of a clustering.
# That is, it determines how well each object lies within its cluster.
# A high average silhouette width indicates a good clustering.
fviz_nbclust(vector_z, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
# This can take a while
# 30 indices for determining the number of clusters and proposes to user the best clustering scheme
# from the different results obtained by varying all combinations of number of clusters, distance measures,
# and clustering methods. https://www.rdocumentation.org/packages/NbClust/versions/3.0/topics/NbClust
nc <- NbClust(vector_z, distance="euclidean", min.nc=2, max.nc=5, method="kmeans")
# We now have a k let's cluster again
activities_clusters <- kmeans(vector_z, 4, nstart = 500)
fviz_cluster(activities_clusters, data = vector_z, ellipse.type = "convex") + theme_minimal()
activities_clusters$size
# Export Centers
activities_clusters$centers
centers_tbl <- as.data.frame(activities_clusters$centers)
write_csv(centers_tbl,"centers_tbl.csv",na = "")
# Put the cluster into our df
actividades_tbl$cluster <- activities_clusters$cluster
write.csv(actividades_tbl,"actividades_tbl_clust.csv", na ="")
table(actividades_tbl$cluster)
# Evalute Results
#?CrossTable
CrossTable(actividades_tbl$sexo, actividades_tbl$cluster,digits = 1,
prop.t = FALSE, prop.r = FALSE, prop.chisq = FALSE, format="SPSS")
CrossTable(actividades_tbl$edad, actividades_tbl$cluster,digits = 1,
prop.t = FALSE, prop.r = FALSE, prop.chisq = FALSE, format="SPSS")
CrossTable(actividades_tbl$nse, actividades_tbl$cluster,digits = 1,
prop.t = FALSE, prop.r = FALSE, prop.chisq = FALSE, format="SPSS")
|
24f8efd04fb60c9f9c33ae884cd8e6898ef40cfb
|
57f780626d36e07c5a824b9e24e092d6110a12c9
|
/style.R
|
ccb0087f0be578f33f900b81f76d5f3123fdfc06
|
[] |
no_license
|
phanhung2/rps10_barcode
|
d173676fd7aed616bdbd3aca1c9bc280c17e759a
|
76a5cb1a251df62363202f9593cd731515a5d7a4
|
refs/heads/main
| 2023-08-18T21:10:26.027377
| 2021-09-23T18:23:06
| 2021-09-23T18:23:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 90
|
r
|
style.R
|
knitr::opts_chunk$set(echo = TRUE, fig.width = 10, fig.height = 10)
# options(width = 100)
|
636653b1f845efe6f7f963f3c6e4e47b244f0b1f
|
9b0dcef4d9e58c1feb0a4e80e80c9331cfd36b64
|
/man/geom_flat_violin.Rd
|
79bd4634aaffaac84f1916d6516201e759602ef0
|
[
"MIT"
] |
permissive
|
letaylor/scclusteval
|
89b0df0cfa044852c5767cc231a326490fe4b488
|
c7ed17bf781b64a3f1822641fad08dc60b2a5909
|
refs/heads/master
| 2020-09-14T03:56:25.103488
| 2019-09-18T15:13:33
| 2019-09-18T15:13:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 882
|
rd
|
geom_flat_violin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geomflatviolin.R
\name{geom_flat_violin}
\alias{geom_flat_violin}
\title{A Flat Violin plot}
\usage{
geom_flat_violin(mapping = NULL, data = NULL, stat = "ydensity",
position = "dodge", trim = TRUE, scale = "area",
show.legend = NA, inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{See \code{\link[ggplot2]{geom_violin}}}
\item{data}{See \code{\link[ggplot2]{geom_violin}}}
\item{position}{See \code{\link[ggplot2]{geom_violin}}}
\item{trim}{See \code{\link[ggplot2]{geom_violin}}}
\item{scale}{See \code{\link[ggplot2]{geom_violin}}}
\item{show.legend}{See \code{\link[ggplot2]{geom_violin}}}
\item{inherit.aes}{See \code{\link[ggplot2]{geom_violin}}}
\item{...}{}
}
\description{
A Flat Violin plot
}
\examples{
ggplot(diamonds, aes(cut, carat)) +
geom_flat_violin() +
coord_flip()
}
|
c5211079c83e1f40a49254da379051b527c00b0d
|
3dd1b3bff4d96c8af5bf003b0998e7e91e25943e
|
/bin/rawSigNBModel.R
|
93081943d73749b7c272d1a38627e1a97f91bb22
|
[] |
no_license
|
ParkerLab/chromatin_information
|
87c6ceaedf4c47df4cdf8a242b41451ab572d483
|
643ea042984096416afd88174410e7c6acc5d72b
|
refs/heads/master
| 2022-03-10T11:19:05.906325
| 2022-03-02T15:27:46
| 2022-03-02T15:27:46
| 206,384,812
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,799
|
r
|
rawSigNBModel.R
|
#!/usr/bin/env Rscript
# Raw Signal NB binomial modeling
# This will take a raw signal distribution and fit it to NB to
# calculate probabilities scores.
# INPUT:
# 1) Motifs files
# 2) Factor name
# 3) Output file handle
# 4) Column where values are
# (mu) Manual NB mean
# (size) Manual NB overdispersion
# (TP) ChIP-seq true positives
library(optparse)
library(ggplot2)
library(scales)
library(MASS)
options(stringsAsFactors = F)
option_list = list(
# Mandatory
make_option(c("-f", "--file"), type="character", default=NULL,
help="raw signal file", metavar="character"),
make_option(c("-o", "--output"), type="character", default=NULL,
help="output file handle", metavar="character"),
make_option(c("-c", "--colnum"), type="integer", default=NULL,
help="raw signal column on the first file", metavar="integer"),
# Optional arguments
make_option(c("-n", "--name"), type="character", default=NULL,
help="motif name", metavar="character"),
make_option(c("--dir1"), type="character", default='motifsWithBigwigCoverage/',
help="directory with all read counts (include trailing slash)",
metavar="character"),
make_option(c("--dir2"), type="character", default='motifsWithBigwigCoverage_outsidePeaks/',
help="directory with all read counts outside peaks (include trailing slash)",
metavar="character"),
make_option(c("--colnum2"), type="integer", default=NULL,
help="raw signal column on the second file", metavar="integer"),
make_option(c("-m", "--mu"), type="numeric", default=NULL,
help="manual NB mean", metavar="numeric"),
make_option(c("-s", "--size"), type="numeric", default=NULL,
help="manual NB overdispersion", metavar="numeric"),
make_option(c("-t", "--tp"), type="character", default=NULL,
help="true positives", metavar="character"),
make_option(c("--plotFitted"), type="logical", default=T,
help="make fitted distribution plots", metavar="logical"),
# make_option(c("--qvalcutoff"), type="numeric", default=7.957526e-09,
# help="manual q-value cutoff", metavar="numeric"),
make_option(c("--qvalcutoff"), type="numeric", default=1e-7,
help="manual q-value cutoff", metavar="numeric"),
make_option(c("--writeBed"), action = "store_true", default = FALSE,
help="Write bed file with adjusted p-values"),
# Comparison to other method (optional)
make_option(c("--otherMethod"), type="character", default=NULL,
help="bed file with other method scores", metavar="character"),
make_option(c("--threshold"), type="character", default=NULL,
help="other method threshold", metavar="character"),
make_option(c("--otherName"), type="character", default=NULL,
help="name of other method", metavar="character"),
make_option(c("--otherCol"), type="numeric", default=NULL,
help="other method's column on bed file", metavar="numeric"),
# Look at ChIP-seq signal and motif distances (optional)
make_option(c("--distanceChip"), type="character", default=NULL,
help="name of ChIP coverage and distances file", metavar="character"),
make_option(c("--distanceCol"), type="numeric", default=9,
help="ChIP coverage column on bed file", metavar="numeric"),
make_option(c("--chipCol"), type="numeric", default=10,
help="Summit disctance column on bed file", metavar="numeric"),
# Graphical parameters (optional)
make_option(c("--xlim"), type="numeric", default = 300,
help="histograms xlim", metavar="numeric"),
make_option(c("--ylim"), type="numeric", default = NULL,
help="histograms ylim", metavar="numeric"),
make_option(c("--xlim_p1"), type="numeric", default = NULL,
help="p1 histogram xlim", metavar="numeric")
)
opt_parser <- OptionParser(option_list = option_list);
args <- parse_args(opt_parser)
opt <- list()
# source('/home/albanus/scripts/source/rawSigNBModel/testing.R')
if (is.null(args$file)){
print_help(opt_parser)
stop("At least the first three arguments must be supplied (input, output, column).", call. = FALSE)
}
# Read data
d <- read.table(paste(args$dir1, args$file, sep = ''), sep = "\t")
d2 <- read.table(paste(args$dir2, args$file, sep = ''), sep = "\t")
outfile <- args$output
column <- args$colnum
if(!is.null(args$colnum2)){
column2 <- args$colnum2
} else{
column2 <- args$colnum
}
colnames(d)[column] <- 'readCounts'
colnames(d2)[column2] <- 'readCounts'
## Parse optional arguments
# Name
if(!is.null(args$name)){
factor <- args$name
} else {factor <- args$file}
# Evaluate performance
if(!is.null(args$tp)){
tp <- read.table(args$tp)
opt$evalPerf <- TRUE
} else {opt$evalPerf <- FALSE}
# Manual NB parameters
if(!is.null(args$mu) & !is.null(args$size)){
mu <- as.numeric(args$mu)
size <- as.numeric(args$size)
opt$manual <- TRUE
} else {opt$manual <- FALSE}
# Compare to other method
if(!is.null(args$otherMethod) & !is.null(args$threshold) &
!is.null(args$otherName) & !is.null(args$otherCol)){
opt$compare <- TRUE
} else {opt$compare <- FALSE}
# Set NAs to 0 and convert to integer
if(!is.numeric(d[,column]) | !is.numeric(d[,column])){
warning(paste(factor,
": One or more columns are not numbers. Forcefully converting and proceeding.",
sep = ''))
d[,column] <- as.numeric(d[,column])
d2[,column] <- as.numeric(d2[,column])
}
d[is.na(d[,column]), column] <- 0
d[,column] <- as.integer(round(d[,column],digits = 0))
d2[is.na(d2[,column2]), column2] <- 0
d2[,column2] <- as.integer(round(d2[,column2],digits = 0))
# Fit data into a Negative Binomial (complains a bit, but does its job)
n <- 15000
if(!opt$manual){
dist <- list(mus = NULL, sizes = NULL, logliks = NULL)
for(i in 1:100){
sampled <- sample(d2[,column2], n)
fitted <- suppressWarnings(fitdistr(sampled,"negative binomial"))
dist$sizes[i] <- fitted$estimate[1]
dist$mus[i] <- fitted$estimate[2]
dist$logliks[i] <- fitted$loglik
}; rm(i, fitted)
mu <- mean(dist$mus)
muSD <- sd(dist$mus)
size <- mean(dist$sizes)
sizeSD <- sd(dist$sizes)
loglik <- mean(dist$logliks)
loglikSD <- sd(dist$logliks)
negbin <- rnbinom(n = n, size = size, mu = mu)
write.table(data.frame(factor = factor, size = size, mu = mu, loglik = loglik,
sizeSD = sizeSD, muSD = muSD, loglikSD = loglikSD),
file = paste(outfile,'.nb',sep=''), quote = F, row.names = F,
sep = '\t', col.names = T)
} else {
negbin <- rnbinom(n = n, size = size, mu = mu)
sampled <- sample(d2[,column2], n)
}
# Use fitted or manual data to calculate signal probabilities
d$negbin <- pnbinom(q = d[,column], size = size, mu = mu, lower.tail = F)
d$log10negbin <- -log10(d$negbin)
d$qval <- p.adjust(d$negbin, 'BH')
d$log10qval <- -log10(d$qval)
d$p_by <- p.adjust(d$negbin, 'BY')
d$log10by <- -log10(d$p_by)
if(args$writeBed){
out_data <- subset(d, select = -c(negbin,qval,log10qval,p_by))
out_data <- out_data[,c(1:7,9,8)] # Keep column for compatibility
outBed = paste(outfile,'.bed.gz',sep='')
write.table(out_data, file = gzfile(outBed),
quote = F, row.names = F, sep = '\t', col.names = F)
}
# Evaluate performance
if(opt$evalPerf){
# Define TPs
d$tp <- paste(d$V1,d$V2,d$V3,sep="_") %in% paste(tp$V1,tp$V2,tp$V3,sep="_")
# Sort by -log10 p-value, add index and cumulative fraction of TPs (Recall)
d <- d[order(d$log10negbin, decreasing = T),]
ntp <- sum(d$tp)
ntn <- nrow(d) - ntp
d$index <- 1:nrow(d)
d$recall <- cumsum(d$tp)/ntp
# Calculate Precision for each of the positions (TP / (TP + FP))
d$ntp <- cumsum(d$tp)
d$nfp <- cumsum(d$tp == F)
d$precision <- d$ntp/(d$nfp+d$ntp)
# Calculate FPR (FP / (TN + FP))
d$tn <- ntn - d$nfp
d$fpr <- d$ntp/(d$tn + d$nfp)
# Calculate F1 scores
d$f1 <- 2*(d$precision * d$recall)/(d$precision + d$recall)
d$f1[is.na(d$f1)] <- 0
maxf1qval <- d$qval[d$f1 == max(d$f1)]
idxFdr <- c(sum(d$qval <= 1e-13),
sum(d$qval <= 1e-12),
sum(d$qval <= 1e-11),
sum(d$qval <= 1e-10),
sum(d$qval <= 1e-9),
sum(d$qval <= 1e-8),
sum(d$qval <= 1e-7),
sum(d$qval <= 1e-6),
sum(d$qval <= 1e-5),
sum(d$qval <= 1e-4),
sum(d$qval <= 0.001),
sum(d$qval <= 0.01),
sum(d$qval <= 0.05),
sum(d$qval <= 0.1),
sum(d$qval <= 0.2),
sum(d$qval <= maxf1qval),
sum(d$qval <= args$qvalcutoff),
sum(d$p_by < 0.05))
idxFdr[idxFdr == 0] <- 1
idxFdr <- data.frame(index = idxFdr,
nReads = d[idxFdr, column],
log10negbin = d$log10negbin[idxFdr],
percentile = 1 - (d$index[idxFdr]/nrow(d)),
precision = d$precision[idxFdr],
recall = d$recall[idxFdr],
fpr = d$fpr[idxFdr],
fdr = factor(c('1e-13','1e-12','1e-11','1e-10','1e-9','1e-8','1e-7',
'1e-6','1e-5','0.01%','0.1%','1%', '5%','10%','20%',
'F1 max','Custom', 'Bonferroni'),
levels = c('1e-13','1e-12','1e-11','1e-10','1e-9','1e-8',
'1e-7','1e-6','1e-5','0.01%','0.1%','1%',
'5%','10%','20%','F1 max','Custom','Bonferroni'),
ordered = T),
qval = c(1e-13,1e-12,1e-11,1e-10,1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,
0.001,0.01,0.05,0.1,0.2,maxf1qval,args$qvalcutoff,NA)
)
write.table(idxFdr, file = paste(outfile,'.out',sep=''), quote = F, row.names = F,
sep = '\t', col.names = T)
# Calculate peak statistics (assuming d2 is the subset of d outside peaks - might not be depending
# on which dataset is used for d2)
d$peaks <- paste(d$V1,d$V2,d$V3,sep="_") %in% paste(d2$V1,d2$V2,d2$V3,sep="_") == F
d$categories <- ifelse(d$tp & d$peak, 'ChIP + Atac +',
ifelse(d$tp & !d$peak, 'ChIP + Atac -',
ifelse(!d$tp & d$peak, 'ChIP - Atac +',
ifelse(!d$tp & !d$peak, 'ChIP - Atac -',
'error'))))
categories <- as.data.frame(as.matrix(table(d$categories)))
categories <- rbind(
as.data.frame(as.matrix(c(
'All' = nrow(d),
'ChIP -' = sum(!d$tp),
'ChIP +' = sum(d$tp),
'Atac -' = sum(!d$peaks),
'Atac +' = sum(d$peaks)))),
categories)
categories <- data.frame(factor = factor,
category = rownames(categories),
counts = categories$V1,
percentage = categories$V1 / nrow(d))
# Get the percentages of the predictions that were correct
categories2 <- as.data.frame(t(as.matrix(table(d$qval <= 0.01))), row.names = 'All')
categories2 <- rbind(categories2,
as.data.frame.matrix(table(d$tp, d$qval <= 0.01), row.names = c('ChIP -', 'ChIP +'))
)
categories2 <- rbind(categories2,
as.data.frame.matrix(table(d$peaks, d$qval <= 0.01), row.names = c('Atac -', 'Atac +'))
)
categories2 <- rbind(categories2,
as.data.frame.matrix(table(d$categories, d$qval <= 0.01)))
categories2 <- data.frame(factor = factor,
category = rownames(categories2),
calledTrue = categories2[,'TRUE'],
calledFalse = categories2[,'FALSE'],
percentTrue = categories2[,'TRUE'] /apply(categories2,1,sum)
)
categories <- merge(categories,categories2)
# Output
write.table(categories, file = paste(outfile,'.categories',sep=''), quote = F, row.names = F,
sep = '\t', col.names = T)
# Set categories to factor for plotting later
d$categories <- factor(d$categories, levels = c('ChIP + Atac +', 'ChIP + Atac -',
'ChIP - Atac +', 'ChIP - Atac -'), ordered = T)
}
## Plot ###
# Fitted distribution to sampled data
nbins <- ifelse(max(sampled) >= max(negbin), max(sampled), max(negbin))
if(args$plotFitted){
p1 <- ggplot(NULL, aes(sampled)) +
geom_histogram(aes(fill = 'Sampled data', color = NULL), alpha = .5, bins = nbins) +
geom_histogram(aes(negbin, fill = 'Negative binomial', color = NULL), alpha = .5, bins = nbins) +
scale_fill_manual(name='Distributions', values = c('Sampled data' = 'black',
'Negative binomial' = 'red')
) +
labs(x = 'Tags in motif vicinity', y = 'Number of motifs', title = factor) +
theme_bw()
if(!is.null(args$xlim_p1)){
p1 <- p1 + coord_cartesian(xlim = c(0, args$xlim_p1))
}
ggsave(p1,filename=paste(outfile,'.fitted.pdf',sep=''), height=5, width=6)
# Histogram of p-values
p1.2 <- ggplot(d, aes(negbin)) +
geom_histogram(bins = 10, fill = 'white', color = 'black') +
theme_bw() +
labs(x = 'p-value', y = 'Counts',
title = paste(args$name, ' p-values distribution', sep ='')) +
scale_y_continuous(labels = comma) +
theme(plot.title = element_text(face = 'bold', margin = margin(b=20,t=10)),
axis.title.x = element_text(margin = margin(b=10,t=20)),
axis.title.y = element_text(margin = margin(l=10,r=20)))
ggsave(p1.2, filename=paste(outfile,'.p-histogram.pdf',sep=''), height=5.5, width=5.5)
}
if(opt$evalPerf){
# Make PR plots of the relationships
p2 <- ggplot(d, aes(x = recall, y = precision)) +
geom_line() +
geom_vline(aes(xintercept = recall, color = fdr, lty = fdr), idxFdr) +
geom_hline(aes(yintercept = precision, color = fdr, lty = fdr), idxFdr) +
geom_point(aes(x = recall, y = precision, color = fdr), idxFdr, size = 3) +
scale_x_continuous(labels = comma) +
ylim(c(0,1)) +
labs(x = 'Recall', y = 'Precision', color = 'FDR Threshold', lty = 'FDR Threshold', title = factor) +
theme_bw()
ggsave(p2,filename=paste(outfile,'.pr.pdf',sep=''), height=5, width=6.5)
p2.1 <- ggplot(d, aes(x = index, y = f1)) +
geom_line() +
scale_x_continuous(labels = comma) +
labs(x = 'Motif index', y = 'F1 score', title = factor) +
theme_bw()
ggsave(p2.1,filename=paste(outfile,'.f1.pdf',sep=''), height=5, width=6.5)
# Plot overall distributions
p3 <- ggplot(d, aes(x = index, y = log10negbin)) +
geom_line() +
geom_vline(aes(xintercept = index, color = fdr, lty = fdr), idxFdr, lwd = 1) +
geom_point(aes(x = index, y = log10negbin, color = fdr), idxFdr, size = 3) +
scale_x_continuous(labels = comma) +
labs(x = 'Ordered Motif Rank', y = expression('-log'[10]*'(p-value)'),
color = 'FDR Threshold', lty = 'FDR Threshold', title = factor) +
theme_bw()
ggsave(p3, file = paste(outfile, '.pdf', sep = ''), width = 5.5, height = 4)
# Make categories plots
source('/home/albanus/scripts/source/rawSigNBModel/plotCategories.R')
# Compare to other method?
if(opt$compare){
source('/home/albanus/scripts/source/rawSigNBModel/compareMethods.R')
}
# Look at ChIP-seq signal and summit distances?
if(!is.null(args$distanceChip)){
source('/home/albanus/scripts/source/rawSigNBModel/summitDistances.R')
}
}
|
b653462335877b12f02160a9df452e1cedea51fc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ibmdbR/examples/idadf.Rd.R
|
6156fae14bfe8ad8b7ad5873db3061638f25031e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
idadf.Rd.R
|
library(ibmdbR)
### Name: idadf,idaSave,idaUpdate
### Title: Query, store and update data in the database.
### Aliases: idadf idaSave idaUpdate
### ** Examples
## Not run:
##D # create connection to DB
##D con <- idaConnect("BLUDB", "", "")
##D
##D # create data.frame from table
##D df <- idadf(con, "SELECT * FROM IRIS")
##D
##D # close the connection again
##D idaClose(con)
## End(Not run)
|
e73dbbc7def4e79ddd15494d5b72bbb07d394abd
|
9948bca9f36c48dbe4329f6e030184c25d56ded4
|
/man/nmis.Rd
|
40c5fa80fd1f76ea376549afa6ffeb296c4f624e
|
[] |
no_license
|
Aulide81/estadisticos
|
0b75160fec317349fd0a4d20b820f7e538a4a491
|
cf79860219eba96fc404eed83516a2852a506fc6
|
refs/heads/master
| 2020-05-21T04:50:10.973647
| 2019-10-10T14:33:31
| 2019-10-10T14:33:31
| 38,392,518
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
rd
|
nmis.Rd
|
\name{nmis}
\alias{nmis}
\title{
Campos no validos
}
\description{
Devuelve el numero de campos en los que hay valor ausente, missing
}
\usage{
nmis(x)
}
\arguments{
\item{x}{
Matriz o data.frame
}
}
\value{
Vector numerico
}
\author{
Emilio Arenas
}
|
3192f3da051c92174dbfbd764281aa0ee74e7e2c
|
e04b360ce5307d44c775cde976a39ce71e93e89f
|
/R/globals.R
|
66f0e01fcfd237ea3c3e2360e3be096201b9837b
|
[] |
no_license
|
haukelicht/AnnotationModelsR
|
efe73983e0f0171c7cf75aa1688f1994aa6626fd
|
b507e0d5f581adcaea93fc214cbb05cd665afa21
|
refs/heads/master
| 2020-09-14T11:32:10.682643
| 2020-07-07T07:57:47
| 2020-07-07T07:57:47
| 223,117,022
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 315
|
r
|
globals.R
|
utils::globalVariables(
c(
"."
, "_item"
, "votes"
, "tie_breaker"
, "majority_vote"
, "_label"
, "est_prob"
, "name"
, "value"
, "_annotator"
, "labeled"
, "est_prob"
, "iter"
, "ll"
, "labeled"
, "est_prob"
, "_prob_"
, "_label_no_"
)
)
|
078e99e1bd1ff6694bc66589cd0e6539386bdbef
|
e50a4d0be5dba098791f9e33482c88ad822689c9
|
/R/readjustWindow.R
|
7c2c36d8494aa998f8c27b0a91de32f312ebde80
|
[] |
no_license
|
protViz/prozor
|
10e2349928f51de53acad1f0cdb49b44cf126846
|
c2ec70174d4c6abe555ad5bdcb0615ad5c866387
|
refs/heads/master
| 2023-07-12T05:05:43.347932
| 2023-06-26T08:29:25
| 2023-06-26T08:29:25
| 41,495,885
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,184
|
r
|
readjustWindow.R
|
# moves the windows start and end to regions where no peaks are observed
.makenewfromto <- function(windfrom, empty , isfrom = TRUE) {
newfrom <- NULL
for (from in windfrom) {
idx <- which.min(abs(from - empty))
startmass <- 0
if (isfrom) {
if (idx > 1) {
if (empty[idx] < from) {
startmass <- empty[idx]
} else {
startmass <- empty[idx - 1]
}
} else{
startmass <- from
}
} else{
if (idx < length(empty)) {
if (empty[idx] > from) {
startmass <- empty[idx]
} else {
startmass <- empty[idx + 1]
}
} else{
startmass <- from
}
}
newfrom <- c(newfrom, startmass)
}
return(newfrom)
}
#' Readjust windows so that boundaries in regions of few peaks.
#'
#' @param wind a data frame with columns from and to
#' @param ms1data masses
#' @param digits mass accuracy
#' @param maxbin maximum number of bins
#' @param plot diagnostic plots (default FALSE)
#' @return data.frame of same format as \code{wind} but with improved start and end masses.
#' @export
#' @examples
#' data(masses)
#' cdsw <- Cdsw(masses)
#' breaks <- cdsw$sampling_breaks(maxwindow=100,plot=TRUE)
#' table <- cdsw$asTable()
#' dim(table)
#' head(table)
#'
#' tmp <- readjustWindows(table, masses,maxbin=10)
#' data.frame(tmp)
#'
readjustWindows <-
function(wind ,
ms1data,
digits = 1,
maxbin = 15,
plot = FALSE) {
breaks <-
seq(
round(min(ms1data) - 1 / 10 ^ digits, digits = 1),
round(max(ms1data) + 1 / 10 ^ digits, digits = 1),
by = 1 / 10 ^ digits
)
reshist <- graphics::hist(ms1data, breaks = breaks, plot = plot)
if (plot) {
graphics::abline(v = wind$from,
col = 2,
lty = 2)
graphics::abline(v = wind$to,
col = 3,
lty = 2)
}
empty <- reshist$mids[which(reshist$counts < maxbin)]
newfrom <- .makenewfromto(wind$from , empty)
newto <- .makenewfromto(wind$to , empty , isfrom = FALSE)
if (plot) {
graphics::plot(reshist, xlim = c(newfrom[round(length(newfrom) / 2)] - 3, newfrom[round(length(newfrom) /
2)] + 3))
graphics::abline(v = empty, col = "gray")
graphics::abline(v = newfrom, lwd = 0.5, col = "red")
graphics::abline(v = newto , lwd = 0.5, col = "green")
graphics::plot(reshist, xlim = c(newfrom[round(length(newfrom) / 4)] -
3, newfrom[round(length(newfrom) / 4)] + 3))
graphics::abline(v = newfrom, lwd = 0.5, col = "red")
graphics::abline(v = newto , lwd = 0.5, col = "green")
}
width <- (newto - newfrom)
mid <- (newfrom + newto) * 0.5
newCounts <- NULL
for (i in seq_len(length(newfrom)))
{
newCounts <-
c(newCounts, sum(ms1data >= newfrom[i] & ms1data <= newto[i]))
}
list(
from = newfrom,
to = newto,
mid = mid,
width = width,
counts = newCounts
)
}
|
e13be05be1c60fc01e1d8015f3f3d554c5a64d57
|
b60f4edb84e136ae67e2ad78bd69953d8d25f0e3
|
/src/02_variant_filtering/03_qc_filtering.R
|
daab34ccdd509cd8890b7ea88b77f9cc879696b4
|
[] |
no_license
|
rivas-lab/sex-diff-biomarker-genetics
|
1c7a40f023b8d62ec3dbb6378c78a73047c0036e
|
43f7386787ee2ed35717d1c00af444db76d61698
|
refs/heads/master
| 2022-12-09T22:22:08.380682
| 2020-09-09T21:53:00
| 2020-09-09T21:53:00
| 103,570,042
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,921
|
r
|
03_qc_filtering.R
|
# qc_filtering.R
# E Flynn
# 11/9/17
# Code for running initial QC filtering for X, Y, XY (PAR), MT chromosomes.
# Filters by LD, missingness <= 0.1, MAF >= 0.01. We do not have HWE data for X chromosome - this is tricky to get.
DATA.FOLDER <- "data/"
QC.DIR <- sprintf('%s/chr_qc/', DATA.FOLDER)
LD.DIR <- QC.DIR
generateQCFile <- function(chr){
# updated to more stringent LD filters to match other data
ld <- read.table(sprintf("%s/ld_out%s.txt.prune.in", LD.DIR, chr), colClasses='character') # list of variants that pass
#ld <- read.table(sprintf("%s/chr%s_ld.prune.in", QC.DIR, chr), colClasses='character') # list of variants that pass
colnames(ld) <- c("SNP")
f <- read.table(sprintf("%s/chr%s.f.afreq", QC.DIR, chr))
colnames(f) <- c("CHR", "SNP", "A1", "A2", "MAF", "NOBS") # number of allele observations
m <- read.table(sprintf("%s/chr%s.m.vmiss", QC.DIR, chr))
colnames(m) <- c("CHR", "SNP", "N_MISS", "N_GENO", "F_MISS") # missing call rate
combined <- merge(f[,c("CHR", "SNP", "A1", "A2", "MAF")], m[,c("SNP", "F_MISS")], by="SNP")
combined$LD <- sapply(combined$SNP, function(x) ifelse(as.character(x) %in% ld$SNP, 1, 0))
print(nrow(combined))
#nrow(combined[combined$LD == 1 & combined$MAF >= 0.01,]) # 10297
print(nrow(combined[combined$LD == 1 & combined$MAF >= 0.01 & combined$F_MISS <= 0.1,])) # 9849
#vars.to.keep <- combined[combined$LD == 1 & combined$MAF >= 0.01,]$SNP
vars.to.keep2 <- combined[combined$LD == 1 & combined$MAF >= 0.01 & combined$F_MISS <= 0.1,]$SNP
combined$keep <- sapply(combined$SNP, function(x) ifelse(as.character(x) %in% vars.to.keep2, 1, 0))
write.table(combined, sprintf("%s/chr%s_qc_table.txt", QC.DIR, chr), row.names=FALSE, quote=FALSE)
return(combined)
}
chr.tables <- lapply(c("XY", "X"), generateQCFile)
full.tab <- do.call(rbind, chr.tables)
write.table(full.tab, file="alt_chr_qc_table.txt", row.names=FALSE, quote=FALSE)
|
329d41c76aa00314813cdf4d472d8f63d0941f93
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/get_populations.R
|
e2f24a7ee7692c5dad2514ed031aafcafc728c50
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,053
|
r
|
get_populations.R
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param location_set_version_id PARAM_DESCRIPTION
#' @param year_start PARAM_DESCRIPTION
#' @param year_end PARAM_DESCRIPTION
#' @param by_sex PARAM_DESCRIPTION, Default: 1
#' @param by_age PARAM_DESCRIPTION, Default: 1
#' @param custom_sex_id PARAM_DESCRIPTION, Default: NULL
#' @param custom_age_group_id PARAM_DESCRIPTION, Default: NULL
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname get_populations
#' @export
get_populations <- function(location_set_version_id,
year_start, year_end,
by_sex = 1, by_age = 1,
custom_sex_id = NULL, custom_age_group_id = NULL) {
## Make Frame
df <- get_demographics(location_set_version_id, year_start, year_end, by_sex, by_age, custom_sex_id, custom_age_group_id)
for (ids in c("location_id", "year_id", "age_group_id", "sex_id")) {
assign(ids, paste0(unique(df[[ids]]), collapse = ","))
}
## Pull
dbname <- "shared"
host <- "modeling-cod-db.ihme.washington.edu"
query <- paste0("SELECT
o.age_group_id,
year_id,
o.location_id,
o.sex_id,
pop_scaled
FROM
mortality.output o
LEFT JOIN
mortality.output_version ov using (output_version_id)
LEFT JOIN
shared.age_group a using (age_group_id)
LEFT JOIN
shared.location l using (location_id)
LEFT JOIN
shared.sex s using (sex_id)
WHERE
ov.is_best = 1
and year_id in (", year_id, ")
and o.location_id in (", location_id, ")
and o.sex_id in (", sex_id, ")
and o.age_group_id in (", age_group_id, ")")
run_query(dbname, host, query)
}
|
8709e7b8946880d6ca74f790cd1b3ef59a79394f
|
f23c29c28a3aa386372d6f0e0e9faae74cf10296
|
/man/journals.Rd
|
3e3649cd01c5b1a9be39a8e737659bb2854e50b4
|
[] |
no_license
|
xiaoningwang/rticles
|
1d9318ea9f95d3ff2078a2dec5d95db09a091cc7
|
5e81f6aa7ccfbdfb657b3c5786b56bb5ccb4cf88
|
refs/heads/master
| 2023-05-11T19:28:08.593412
| 2023-04-25T19:14:14
| 2023-04-25T19:14:14
| 97,591,351
| 1
| 0
| null | 2017-07-18T11:42:17
| 2017-07-18T11:42:17
| null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
journals.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{journals}
\alias{journals}
\title{List available journals}
\usage{
journals()
}
\value{
A character vector of the journal names.
}
\description{
List available journal names in this package.
}
\details{
These names can be useful in two ways:
\itemize{
\item You can add \verb{_article} suffix to get the name of the output format (e.g.,
\code{\link[=rjournal_article]{rjournal_article()}}).
\item You can use the name directly in the \code{template} argument of
\code{\link[rmarkdown:draft]{rmarkdown::draft()}}.
}
}
\examples{
rticles::journals()
}
|
7abfc77816c97c778ed6df24ca975dda65a39851
|
5e2016422948ec45305bd96c72ff0690f1f8fe0e
|
/R/setup.R
|
cebac0313511676e86ef6bb576f87efa1c2a1276
|
[] |
no_license
|
nm-training/rsetup
|
d45102fbadea50671c3953205afcb067b8951289
|
f8c65c455dd80f3791b6e7a9b6cfe8654f9b7d36
|
refs/heads/main
| 2023-06-24T10:31:08.276688
| 2021-07-25T07:32:39
| 2021-07-25T07:32:39
| 355,864,030
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
setup.R
|
# download course file and unzip it
get_course <- function(url_path, set_path) {
options(Ncpus = 4)
# if (require(rsetup)) { remove.packages("rsetup") }
usethis::use_course(
url = url_path,
destdir = set_path
)
}
# get course from url and activate the project
set_project <- function(url_path, set_path) {
# usethis::create_project(dir)
rsetup::get_course(url_path, set_path)
usethis::proj_activate(set_path)
}
# get project path from user
set_path <- function() {
if (.Platform$OS.type == "windows") {
utils::choose.dir("", caption = "Choose a Suitable Folder")
# choose.files()
}
else {
rstudioapi::selectDirectory(
caption = "Select Directory",
label = "Select",
path = ""
)
}
}
# install tinytex dependency for tinytex pdf
install_pdf <- function() {
tinytex::install_tinytex(version = "latest")
}
# add local jobs using function
run_jobs <- function(){
path <- file.choose()
# path <- "C:\Users\m1s1n\Documents\R\r-course-2021\00_participants\00_getting_started\install_packages.R"
rstudioapi::jobRunScript(path, importEnv = TRUE)
}
|
4e962c2a12d8422eb98538d342fb581d8540ded0
|
014aef7c521f0fc8e8fee29789095fc2ccdc10ef
|
/lab3/q3.R
|
bbfb5577bf72ccec075a136898b2bdc68a8df114
|
[] |
no_license
|
listerys/R-Lab-Kiit
|
d68096ff6058e1516add5adb0b2bb519cf7f3a32
|
bb4ad6ab3bc9e9ae5d3b231cc7a19fcec51acc13
|
refs/heads/master
| 2022-12-01T06:34:54.990834
| 2020-08-18T04:27:50
| 2020-08-18T04:27:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
r
|
q3.R
|
# Title : TODO
# Objective : TODO
# Created by: KIIT
# Created on: 8/18/2020
nterms <- as.integer(readline("Enter the Number of terms "))
n1 <- 0
n2 <- 1
count <- 2
while(count < nterms) {
nth <- n1 + n2
print(nth)
n1 <- n2
n2 <- nth
count <- count + 1
}
|
98070c8e975d3445a0356b0fee5be28e94f76dad
|
08c557f4e442c3238634fd57f09d966ffa114fc9
|
/run_analysis.R
|
8a6e494c54578c6bedb449740c4a2b9b1c89d4dd
|
[] |
no_license
|
vericone4/GettingAndCleaningData
|
f7a35a4804f5e64920d5eb2ba0cfdfcfc1788f8c
|
12e4ed697f521c0fd4adf85e1247ef47185378ac
|
refs/heads/master
| 2021-01-20T02:02:03.459500
| 2014-09-20T06:59:23
| 2014-09-20T06:59:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,864
|
r
|
run_analysis.R
|
run_analysis<-function(){
#**************I/O for all the given data files**********************#
# set the wd
setwd("~/Data/Venkata/Personal/Coursera/Getting_and _Cleaning_Data/Working_Directory")
# identify the data directory
data_dir<-"Data/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset"
#identify the features file
features_file<-paste(data_dir,"features.txt", sep="/")
#identify the activities file
activity_labels_file<-paste(data_dir,"activity_labels.txt", sep="/")
# identify the test data directory
test_dir<- paste(data_dir,"test", sep="/")
# identify the training test directory
training_dir<-paste(data_dir,"train", sep = "/")
# identify the test data file
test_file<-paste(test_dir,"X_test.txt", sep ="/")
# identify the file which has the activity for the test data
test_y_file<-paste(test_dir,"y_test.txt", sep ="/")
# identify the file which has the subjects for the test data
test_subject_file<-paste(test_dir,"subject_test.txt", sep ="/")
# identify the training data file
train_file<-paste(training_dir,"X_train.txt", sep ="/")
# identify the file which has the activity for the test data
train_y_file<-paste(training_dir,"y_train.txt", sep ="/")
# identify the file which has the subjects for the test data
train_subject_file<-paste(training_dir,"subject_train.txt", sep ="/")
# data table for the test data
test_dt<-read.table(file=test_file,sep="",header=FALSE)
# data table for the train data
train_dt<-read.table(file=train_file, sep ="", header=FALSE)
# data table for the factors vector
factors_dt<-read.table(file=features_file, sep = "", header=FALSE)
# assign the factors as the columns for the test data
names(test_dt)<-factors_dt[ ,2]
# assign the factors as the columns for the train data
names(train_dt)<-factors_dt[ ,2]
# read the test activity data into a data table
test_y_dt<-read.table(file=test_y_file, sep = "", header=FALSE)
# read the train activity data into a data table
train_y_dt<-read.table(file=train_y_file, sep = "", header=FALSE)
# read the test subject data into a data table
test_subject_dt<-read.table(test_subject_file, sep = "", header= FALSE)
# read the train subject data into a data table
train_subject_dt<-read.table(train_subject_file, sep = "",
header= FALSE)
#read the activity file as a data table
activity_dt<-read.table(activity_labels_file, sep = "",header=FALSE)
#assign column names for activity data table and test_y_dt
names(test_y_dt)<-c("activity_id")
names(train_y_dt)<-c("activity_id")
names(activity_dt)<-c("activity_id", "activity")
#make sure the key column is numeric
test_y_dt$activity_id<-as.numeric(test_y_dt$activity_id)
train_y_dt$activity_id<-as.numeric(train_y_dt$activity_id)
activity_dt$activity_id<-as.numeric(activity_dt$activity_id)
#******************** DATA PROCESSING ******************************#
#Merge the activity labels data table and test_y_dt by activity id
if(require(dplyr)){
#Using dplyr package for inner join by activity_id
test_y_dt<-inner_join(activity_dt,test_y_dt ,by ="activity_id")
train_y_dt<-inner_join(activity_dt,train_y_dt,by ="activity_id")
#convert the test_y_dt into tbl_df and also store the result
# a final test_df
final_test_df<-tbl_df(test_y_dt)
#convert the test_y_dt into tbl_df and also store the result
# a final test_df
final_train_df<-tbl_df(train_y_dt)
#use mutate function to add the new column into the
#final test based
#on values of subject test first column and also naming
#the resulting column as subject id
final_test_df<-mutate(final_test_df,
subject=test_subject_dt[,1])
# select only the columns of interest
final_test_df <-select(final_test_df,activity:subject)
#use mutate function to add the new column into the
#final test based
#on values of subject test first column and also naming
#the resulting column as subject id
final_train_df <- mutate(final_train_df,
subject=train_subject_dt[,1])
# select only the columns of interest
final_train_df <-select(final_train_df,activity:subject)
#factors_dt<-tbl_df(factors_dt)
#temp<-factors_dt[grepl("mean|std",factors_dt$V2),]
# convert the test_dt into tbl_df
test_dt<-tbl_df(test_dt)
# select only columns having mean or std in the names
test_dt <- select(test_dt,matches("mean|std"))
#from the above result further select columns
#which does not have angle in the name
test_dt <- select(test_dt,-contains("angle"))
# convert the train_dt into tbl_df
train_dt<-tbl_df(train_dt)
# select only columns having mean or std in the names
train_dt <- select(train_dt,matches("mean|std"))
#from the above result further select columns
#which does not have angle in the name
train_dt <- select(train_dt,-contains("angle"))
#assembling all the cleaned and processed data
# first step is to column bind the test_dt & final_test
final_test_df<-cbind(final_test_df,test_dt)
#do the same above for train_dt and final_train_dt
final_train_df<-cbind(final_train_df,train_dt)
#step2 row bind both test and training data
final_df<-rbind(final_test_df,final_train_df)
# clean up the names of the final data frame
# remove the "-" (hyphen) in the column names using the gsub
names(final_df)<-gsub("-","",names(final_df))
# remove the "()" using the gsub
names(final_df)<-gsub("\\(\\)","",names(final_df))
#finally making all the column names as lower case
names(final_df)<-tolower(names(final_df))
#use the arrange function to order the final data frame by
#$subject column
final_df<-arrange(final_df,subject)
}
#************************ Part 5 *************************************
# To accomplish the results for part 5 of the problem , the
# final_df from the above is taken and melted using the
# functions available in reshape2 package
#**********************************************************************
if(require (reshape2)){
# first use melt function to make the final_df long and
#skinny. We do this by melting the data frame so that the
# resulting data frame captures the each measurement of mean
#and std as a unique row for each combination of subject
#and activity
result_df<-melt(final_df,id=c("subject","activity"),
measure.vars=3:81)
# cast the result_df so that we get the average of each variables
# for the unique combination subject and activity
result_df<-dcast(result_df,subject+activity ~variable, mean)
# Write the result to an output file.
write.table(result_df, file = "dataset.txt", sep = " ",
row.name = FALSE)
}
}
|
5f6264ccbb6f1b5a070ff0fa807834ecf1745f5b
|
288b4b6998906714ab368e0ee14c70a4059be4ab
|
/data-raw/dat.lee2004.r
|
e5afa0b546435b8f95f268d298ec33035a51436b
|
[] |
no_license
|
qsh7950/metadat
|
f6243a382c8c0e3f4c9a0e2cd657edb0ffa3e018
|
5c70fa63d7acfa1f315534fb292950513cb2281e
|
refs/heads/master
| 2021-02-26T06:42:18.937872
| 2019-10-21T21:58:33
| 2019-10-21T21:58:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
dat.lee2004.r
|
dat.lee2004 <- read.table("data-raw/dat.lee2004.txt", header=TRUE, stringsAsFactors=FALSE)
save(dat.lee2004, file="data/dat.lee2004.rda")
|
759950d0a3d53d6da20c54c774c002aaa1c4ac30
|
5b871b8f9db99ab2c88a60bb3a1edbf0fd8aec2f
|
/R/finish-methods.R
|
cd258f23e2b0fcdfc4b34ee16a702f11b27042b7
|
[] |
no_license
|
JasonHackney/ReportingTools
|
f461f158ff7813a0e35e54a3201037e0cae89e1c
|
2ead9868b44cf9f6829f4aad4d545946ca96250f
|
refs/heads/master
| 2021-08-01T15:05:39.198739
| 2021-07-30T15:38:45
| 2021-07-30T15:38:45
| 100,539,925
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,032
|
r
|
finish-methods.R
|
setMethod("finish",
signature = signature(
publicationType = "HTMLReport"
),
definition = function(publicationType, ...){
closePage(page(publicationType))
}
)
setMethod("finish",
signature = signature(
publicationType = "DataPackage"
),
definition = function(publicationType, ...){
pkg.dir <- path(publicationType)
description.fn <- file.path(pkg.dir, 'DESCRIPTION')
description.str <- paste(
"Package: ", name(publicationType),
"\nVersion: ", publicationType@version,
"\nTitle: ", title(publicationType),
"\nAuthor: ", publicationType@author,
"\nMaintainer: ", publicationType@maintainer,
"\nDepends: ", paste(dependencies(publicationType), collapse=','),
"\nLicense: ", publicationType@license,
"\nType: Package",
"\nLazyLoad: yes\nDescription: ", publicationType@description, sep="")
cat(description.str, file=description.fn)
}
)
|
cd62128b104c26a3e4121de3f938c83d6059121d
|
d835f0602ed9c05fc66f94b8a43e825b9b76d919
|
/archetypes/01_extract_covariates/03_bound_covs_by_transmission.r
|
ba45e9da04b7d8930df49e530352a89cc6819656
|
[] |
no_license
|
InstituteforDiseaseModeling/archetypes-intervention-impact
|
a41e15ad3b1d5ff00ca52dada12c553fa6fc847e
|
e7be7be6a689d15da4a8eb658cf80d65a65cbf19
|
refs/heads/master
| 2023-08-18T09:44:54.709338
| 2021-09-09T04:00:06
| 2021-09-09T04:00:06
| 128,471,001
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,513
|
r
|
03_bound_covs_by_transmission.r
|
## -----------------------------------------------------------------------------------------------------------------
# Seasonality Classification
# 00_bound_by_transmission.r
#
# Amelia Bertozzi-Villa, Institute for Disease Modeling, University of Oxford
# May 2018
#
# For a given covariate and continent of interest, this script extracts covariate-specific
# data from global rasters, transforms it into a panel dataset, and saves it for further clustering analyses.
#
# For a detailed project write-up see
# https://paper.dropbox.com/doc/Cluster-MAP-pixels-by-seasonality-zga4UM1DnBx8pc11rStOS
## -----------------------------------------------------------------------------------------------------------------------
library(gdistance)
library(data.table)
library(stringr)
library(stats)
library(rasterVis)
rm(list=ls())
source("extraction_functions.r")
root_dir <- Sys.getenv("HOME")
# map_root_dir <- "/Volumes/map_data/mastergrids"
#
# if (!dir.exists(map_root_dir)){
# stop("Root map directory does not exist-- have you remembered to map the appropriate drives to your machine?")
# }
# rewrite if there's already a saved covariate extraction?
overwrite_extraction <- T
base_dir <- file.path(root_dir,
"Dropbox (IDM)/Malaria Team Folder/projects/map_intervention_impact/archetypes/covariates")
unbounded_cov_dir <- file.path(base_dir, "no_transmission_limits")
bounded_cov_dir <- file.path(base_dir, "with_transmission_limits")
# transmission_limit_dir <- file.path(map_root_dir, "../GBD2017/Processing/Spatial_Data/Static_Limits", "Pf_Limits_EnvironmentalOnly_Endemic2017Only_5k.tif")
transmission_limit_dir <- file.path(bounded_cov_dir, "from_GBD2017_Pf_Limits_EnvironmentalOnly_Endemic2017Only_5k.tif")
transmission_limits <- raster(transmission_limit_dir)
continents <- list.dirs(unbounded_cov_dir, recursive = F, full.names = F)
for (continent in continents){
unbounded_continent_dir <- file.path(unbounded_cov_dir, continent)
covs_to_bound <- list.dirs(unbounded_continent_dir, recursive = F, full.names = F)
bounded_continent_dir <- file.path(bounded_cov_dir, continent)
dir.create(bounded_continent_dir, showWarnings=F, recursive=T)
for (this_cov in covs_to_bound){
print(paste("running extraction for", this_cov, "in", continent))
this_unbounded_cov_dir <- file.path(unbounded_continent_dir, this_cov)
this_bounded_cov_dir <- file.path(bounded_continent_dir, this_cov)
dir.create(this_bounded_cov_dir, showWarnings=F, recursive=T)
unbounded_extraction_vals <- fread(file.path(this_unbounded_cov_dir, paste0(this_cov, "_vals.csv")))
bounded_extraction_fname <- file.path(this_bounded_cov_dir, paste0(this_cov, "_vals.csv"))
## find values
if (file.exists(bounded_extraction_fname) & overwrite_extraction==F){
print("values already extracted")
}else{
# check for mask
clipped_mask_fname <- file.path(bounded_continent_dir, "bounded_mask.tif")
if (file.exists(clipped_mask_fname)){
print("loading saved mask")
bounded_mask <- raster(clipped_mask_fname)
}else{
print("finding mask")
orig_mask <- raster(file.path(unbounded_continent_dir, "mask.tif"))
clipped_transmission <- extend(crop(transmission_limits, orig_mask), orig_mask)
bounded_mask <- raster::mask(orig_mask, clipped_transmission, maskvalue=0, updatevalue=0)
writeRaster(bounded_mask, clipped_mask_fname, overwrite=T)
plot(bounded_mask)
}
print("extracting from raster")
all_cov_fnames <- list.files(this_unbounded_cov_dir)
these_cov_fnames <- all_cov_fnames[all_cov_fnames %like% this_cov & all_cov_fnames %like% "tif"]
this_cov_stack <- stack(file.path(this_unbounded_cov_dir, these_cov_fnames))
cropped_layers <- crop_raster(this_cov_stack, mask = bounded_mask,
out_fname = file.path(this_bounded_cov_dir, paste0(names(this_cov_stack), ".tif"))
)
# convert to data table, find pixels to keep from originally extracted dataset.
vals <- as.matrix(cropped_layers)
vals <- data.table(vals,
id=1:nrow(vals))
vals <- vals[complete.cases(vals)]
vals <- unbounded_extraction_vals[id %in% unique(vals$id)]
print("saving extracted values")
write.csv(vals, file=bounded_extraction_fname, row.names=F)
}
}
}
|
ca34b804b317eab70584a7011bc9015711ea399a
|
fbaaee512a18759486d3333f71a407e07b2ba105
|
/8_machineLearning/2_SP/PrintHeatMap.R
|
cd25d0cc21fc3928338c3ec0fbfc5c092af6c4cc
|
[] |
no_license
|
LeonardoMorenoG/ViPhOGs
|
3b3597073552f8fe7933b7b7c749b0ce729c0041
|
63cd5f6ec140a499f3316a2f0528a4ae2e87c1a3
|
refs/heads/main
| 2023-01-28T08:28:07.466080
| 2020-12-10T14:00:55
| 2020-12-10T14:00:55
| 320,289,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 935
|
r
|
PrintHeatMap.R
|
#!/usr/bin/Rscript
rm(list=ls());
library(gplots);
Inputs<-commandArgs(trailingOnly=TRUE)
#Inputs=c("~/Dropbox/Work/MalCheck/", "ToHeatMap_Consensus.txt", "HeatMap_consenus.pdf", "white", "blue", "red")
##set working directory
setwd(getwd())
#setwd(Inputs[1])
#Load file in R, assumes first row is header and first column is row names.
MatrixFile<-as.matrix(read.table(file=Inputs[1], header=T, sep="\t", row.names=1))
#Mt<-log((MatrixFile*100000)+1)
#Mt2<-log(Mt)
# Opens the output file
pdf(file=Inputs[2])
# Makes the color gradient, uniformly distributed
#colfun<-colorRampPalette(c(Inputs[3], Inputs[4],Inputs[5]))
colfun<-colorRampPalette(c('white', 'blue', 'green', 'yellow', 'orange','red'))
#Matrix_norm<-sqrt(MatrixFile)
# Makes and print the heatmap, 100 colors.
heatmap.2(MatrixFile, Rowv=F, Colv=F, scale="none", col=colfun(1000), dendrogram="none", trace="none", density.info="none")
# Close the output PDF
dev.off()
|
e900388d917185aa267f81ea5fd76b874133db16
|
60ed8fb3dbd7199f2efd95857d0544f255024c51
|
/redfin.R
|
c06ef316c210f04b65992df6c414b6d943089f30
|
[] |
no_license
|
hack-r/yang
|
33e990e592a79160bf73375959802570bf071a81
|
c9e9bab0c0d9b8de766a1d304e1ba2a64c46ac2a
|
refs/heads/master
| 2021-01-10T11:04:52.090264
| 2016-04-20T18:41:26
| 2016-04-20T18:41:26
| 54,159,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
r
|
redfin.R
|
# Options -----------------------------------------------------------------
setwd("C://users//jmiller//Desktop//yang")
# Functions and Libraries -------------------------------------------------
pacman::p_load(data.table, rvest, stringi, sqldf, XML)
# returns string w/o leading or trailing whitespace
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# Extract -----------------------------------------------------------------
rf90210 <- fread("redfin_90210.csv")
|
7ccb870422ac04d102707d5868d9e636a1c1f710
|
2bd971cc829a8639792f615d48fe143bd898a821
|
/modules/Init/init_tree.R
|
fef1dab1e7c82350bd74662e6597130b970ce811
|
[] |
no_license
|
DavidBarke/shinyplyr
|
e2acaf11585c3510df38982401fd83c834932e3d
|
ddc30c2c2361cec74d524f2000a07f3304a5b15f
|
refs/heads/master
| 2023-04-20T07:43:47.992755
| 2021-05-11T10:56:49
| 2021-05-11T10:56:49
| 250,501,858
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,240
|
r
|
init_tree.R
|
init_tree <- function(tree, .values) {
# Add pkgs node
pkgs_node <- tree$get_root_node()$add_child(
explorer_class_id = "__group__",
object = Object$new("Package datasets"),
removable = FALSE,
return = "child"
)
# Extract all data.frames out of datasets and fill pkgs_node
add_pkg_datasets("datasets", pkgs_node)
add_pkg_datasets("ggplot2", pkgs_node)
add_pkg_datasets("dplyr", pkgs_node)
# Add import node
.values$import$node <- tree$get_root_node()$add_child(
explorer_class_id = "__group__",
object = Object$new("Imported datasets"),
removable = FALSE,
return = "child"
)
}
add_pkg_datasets <- function(pkg, node) {
obj_names <- ls(paste("package", pkg, sep = ":"))
pkg_node <- node$add_child(
explorer_class_id = "__group__",
object = Object$new(pkg),
removable = FALSE,
return = "child"
)
purrr::walk(obj_names, function(obj_name) {
obj <- get(obj_name, paste("package", pkg, sep = ":"))
if ("data.frame" %in% class(obj)) {
pkg_node$add_child(
explorer_class_id = "dataset",
object = DatasetObject$new(
name = obj_name,
dataset = obj
),
removable = FALSE
)
}
})
}
|
bcead90976b0d2d4688f276950ddcf20ba4dfb9d
|
599d35b03b589f634433653629cdc33f7bdc2f17
|
/R/computing_priors.R
|
5aee5bfd9c7c34c74159ea0def30ed07d3258424
|
[] |
no_license
|
francescodc87/IPA
|
28f111a7b5866c58f93024623c0cce34ea312f57
|
1325ecfbf52ff277c6fa5c5fc099213ca89a509f
|
refs/heads/master
| 2023-04-09T20:47:37.132055
| 2023-03-28T14:41:20
| 2023-03-28T14:41:20
| 175,849,532
| 7
| 3
| null | 2019-07-29T10:12:02
| 2019-03-15T15:50:50
|
R
|
UTF-8
|
R
| false
| false
| 5,225
|
r
|
computing_priors.R
|
#' @title Computing prior probabilities associated with putative annotations
#'
#' @description
#' This functions takes as input the output of the find.hits() function and computes the prior
#' probabilities for the putative annotations
#'
#'
#' @param Hits The output of find.hits() function
#' @param dataset A matrix containing the measured data, organized in 3 colums: mz, RT and Int
#' @param pk A vector of length nrow(Hits$all.formulas). Values between 0 and 1 expressing initial confidence of the presence of each formula
#' @param ppm A number indicating the instrument accuracy to be considered
#' @param unknown.ppm The ppm number to be assigned to unknown (default NA - No unknown is considered)
#' @param RT.pen A numerical value indicating the multiplicative factor used when the measured retention time is outside of the range reported in the database
#' @param v A logical value indicating if the progress will be shown (default TRUE)
#' @param IT A number inticating after how many iteration an update should be shown (default 120)
#' @param pr.lim A number inidicating the lowest probability value allowed (default 1e-05)
#'
#' @return A list containing the matrix of the prior probabilities and the id.masses and all.formulas objects
#'
#' @author Francesco Del Carratore \email{francescodc87@@gmail.com}
#'
#' @seealso find.Hits IPAposteriors
#'
#' @import Matrix
#' @import enviPat
#' @import stringr
#' @importFrom utils combn
#' @export
### the funtion needs enviPat
"compute.Priors" <- function(Hits, dataset, pk = rep(1, nrow(Hits$all.formulas)), ppm, unknown.ppm = NA, RT.pen=0.5, v = T, IT = 120, pr.lim = 1e-05) {
cat("Computing Priors... \n")
# defing the number of masses and the number of the compounds
compounds.mass <- as.numeric(Hits$all.formulas[, 5])
Nc <- nrow(Hits$all.formulas)
mass <- as.numeric(dataset[Hits$id.masses, 1])
M <- length(mass)
### evaluating precision
deltaMs <- ppm * mass * (1e-06)
sigma <- deltaMs/2
precision <- 1/(sigma^2)
rm(sigma, deltaMs)
# evaluation of prior probabilities (likelihood based only on mass) initialize some variables
#### here I create una matrix of the same dimension of pr (MxC) to multiply to pr
pr.RT <- matrix(1,M,length(pk))
idxRTs <- which(!is.na(Hits$all.formulas[,3]))
pr.RT[,idxRTs] <- RT.pen
for(k in idxRTs){
RTrange <- as.numeric(unlist(strsplit(Hits$all.formulas[k,3], split=";")))
idxM <- which(dataset[Hits$id.masses,2]>=RTrange[1] & dataset[Hits$id.masses,2]<=RTrange[2])
pr.RT[idxM,k] <- 1
}
if (!is.na(unknown.ppm)) {
pr <- Matrix(0, M, (Nc + 1))
for (k in 1:M) {
pr[k, 1:Nc] <- ((exp((-0.5 * precision[k]) * ((compounds.mass - mass[k])^2))) * pk * pr.RT[k,])
delta.unknown <- unknown.ppm * mass[k] * 1e-06
pr[k, Nc + 1] <- ((exp((-0.5 * precision[k]) * ((delta.unknown)^2))))
if (v) {
if (k%%IT == 0) {
# Print on the screen some message
cat(paste0(round((k * 100)/M, 1), "%", "\n"))
}
}
}
all.formulas1 = rbind(Hits$all.formulas, c("unknown", rep(NA, 7)))
} else {
pr <- Matrix(0, M, Nc)
for (k in 1:M) {
pr[k, 1:Nc] <- ((exp((-0.5 * precision[k]) * ((compounds.mass - mass[k])^2))) * pk * pr.RT[k,])
if (v) {
if (k%%IT == 0) {
# Print on the screen some message
cat(paste0(round((k * 100)/M, 1), "%", "\n"))
}
}
}
all.formulas1 = Hits$all.formulas
}
for (k in 1:nrow(pr)) {
pr[k, ] <- pr[k, ]/sum(pr[k, ])
}
idx.lim <- which(as.matrix(pr)<pr.lim)
pr[idx.lim] <- 0
ind.C <- which(apply(pr, 2, sum, na.rm = T) > 0)
all.formulas = all.formulas1[ind.C, ] ### of course it is considering the last column even if there isn't the unknown entry in Hits!!
orphans <- NULL
if (length(ind.C) < ncol(pr)) {
## I have to check for orphans isotopes
cat("Checking for orphan isotopes... \n")
IDs <- unique(all.formulas[, 1:2])
for (k in 1:nrow(IDs)) {
ind <- which(all.formulas[, 1] == IDs[k, 1] & all.formulas[, 7] == "mono" & all.formulas[, 2] == IDs[k, 2])
if (length(ind) == 0) {
orphans <- c(orphans, which(all.formulas1[, 1] == IDs[k, 1] & all.formulas1[, 2] == IDs[k, 2]))
}
if (v) {
if (k%%IT == 0) {
# Print on the screen some message
cat(paste0(round((k * 100)/nrow(IDs), 1), "%", "\n"))
}
}
}
}
if (!is.null(orphans)) {
ind.C <- ind.C[which(!ind.C %in% orphans)]
}
pr <- pr[, ind.C]
ind.M <- which(apply(pr[,1:(ncol(pr)-1)], 1, sum, na.rm = T) > 0)
pr <- pr[ind.M, ]
all.formulas = all.formulas1[ind.C, ]
id.masses = Hits$id.masses[ind.M]
for (k in 1:nrow(pr)) {
pr[k, ] <- pr[k, ]/sum(pr[k, ])
}
out <- list(Priors = pr, id.masses = id.masses, all.formulas = all.formulas)
## filter masses and formulas
return(out)
}
|
a5f8250cc3bb48d6919a9849328d26ea7bb1a4ed
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/futile.matrix/examples/RandomMatrixModel.Rd.R
|
ebbe4bb78d7287dd41fe727f361b4512eb208d47
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
RandomMatrixModel.Rd.R
|
library(futile.matrix)
### Name: RandomMatrixModel
### Title: Type constructors for random matrices and ensembles of random
### matrices
### Aliases: Ensemble JacobiMatrix JacobiModel RandomMatrixModel
### WignerMatrix WignerModel WishartMatrix WishartModel
### ** Examples
model <- WignerModel(10)
m <- rmatrix(model)
e <- Ensemble(20, model)
|
74fc88d1d2798c429473d1bbfe63c9724b795c8c
|
7a91b0eec2b3ab87ef6c868d1203063fa97b43d4
|
/R/glmrobMqle-DQD.R
|
e69fa113740deceb11adb67ac9cbfd64c0017525
|
[] |
no_license
|
cran/robustbase
|
a40f49c769a17af095660947616d9fbbbc3cf1e4
|
335b69f2310bd21ca4cdfc17a2a99ebbcad84017
|
refs/heads/master
| 2023-06-30T09:52:16.026413
| 2023-06-16T12:30:02
| 2023-06-16T12:30:02
| 17,699,299
| 7
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,620
|
r
|
glmrobMqle-DQD.R
|
#### Quasi-Deviance Differences --- for Model Selection
#### --------------------------------------------------- -> ./anova-glmrob.R
## MM: These function names are really too long
## but then, they are hidden in the name space ...
## (Maybe it would be nice to do this as one function with "family" .. )
glmrobMqleDiffQuasiDevB <- function(mu, mu0, y, ni, w.x, phi, tcc)
{
##
f.cnui <- function(u, y, ni, tcc)
{
pr <- u/ni
Vmu <- pr * (1 - pr) ## = binomial()$variance
residP <- (y-pr)*sqrt(ni/Vmu)
## First part: nui
nui <- pmax.int(-tcc, pmin.int(tcc, residP))
## Second part: Enui
H <- floor(u - tcc*sqrt(ni*Vmu))
K <- floor(u + tcc*sqrt(ni*Vmu))
## Actually, floor is not needed because pbinom() can cope
## with noninteger values in the argument q!
## what follows is similar to glmrob.Mqle.EpsiB except a
## different vectorisation
h1 <- (if(ni == 1) as.numeric(- (H < 0) + (K >= 1) ) * sqrt(Vmu)
else
(pbinom(K-1,1,pr) - pbinom(H-1,ni-1,pr)
- pbinom(K,ni,pr) + pbinom(H,ni,pr)) * pr * sqrt(ni/Vmu))
## pmax was needed to get numeric returns from pbinom
Enui <- (tcc*(1 - pbinom(K,ni,pr) - pbinom(H,ni,pr)) + h1)
return((nui - Enui) / sqrt(ni*Vmu))
} ## f.cnui()
nobs <- length(mu)
stopifnot(nobs > 0)
QMi <- numeric(nobs)
## Numerical integrations
for(i in 1:nobs)
QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], tcc = tcc,
subdivisions = 200,
lower = mu[i]*ni[i], upper = mu0[i]*ni[i])$value
## robust quasi-deviance
## -2*(sum(QMi1)-sum(QMi2)) ## Andreas' interpretation of (4) and (5)
## -2*(sum(QMi1)-sum(QMi2)/nobs) ## Eva's interpretation of (4) and (5)
## According to Andreas' interpretation
-2*sum(QMi*w.x)
} ## glmrobMqleDiffQuasiDevB
glmrobMqleDiffQuasiDevPois <- function(mu, mu0, y, ni, w.x, phi, tcc)
{
##
f.cnui <- function(u, y, ni, tcc)
{
Vmu <- u ## = poisson()$variance
residP <- (y-u)/sqrt(Vmu)
## First part: nui
nui <- pmax.int(-tcc, pmin.int(tcc, residP))
## Second part: Enui
H <- floor(u - tcc*sqrt(Vmu))
K <- floor(u + tcc*sqrt(Vmu))
## what follows is similar to Epsipois except a
## different vectorisation
h1 <- u/sqrt(Vmu)*(dpois(H,u)- dpois(K,u))
Enui <- tcc*(1 - ppois(K,u) - ppois(H,u)) + h1
return((nui - Enui) / sqrt(Vmu))
}
nobs <- length(mu)
stopifnot(nobs > 0)
QMi <- numeric(nobs)
## Numerical integrations
for(i in 1:nobs)
QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], tcc = tcc,
lower = mu[i], upper = mu0[i])$value
## robust quasi-deviance
## -2*(sum(QMi1)-sum(QMi2)) ## Andreas' interpretation of (4) and (5)
## -2*(sum(QMi1)-sum(QMi2)/nobs) ## Eva's interpretation of (4) and (5)
## According to Andreas' interpretation
-2*sum(QMi*w.x)
}## glmrobMqleDiffQuasiDevPois
glmrobMqleDiffQuasiDevGamma <- function(mu, mu0, y, ni, w.x, phi, tcc,
variant = c("V1", "Eva1", "Andreas1"))
{
## Notation similar to the discrete case (Cantoni & Ronchetti, 2001)
f.cnui <- function(u, y, ni, phi, tcc)
{
s.ph <- sqrt(phi)
## First part: nui
sV <- s.ph * u ## = sqrt(dispersion * Gamma()$variance)
residP <- (y-u)/sV
nui <- pmax.int(-tcc, pmin.int(tcc, residP))
## Second part: Enui
## what follows is similar to glmrob.Mqle.Epsipois except a
## different vectorisation
nu <- 1/phi ## form parameter nu
snu <- 1/s.ph ## sqrt (nu)
pPtmc <- pgamma(snu - tcc, shape=nu, rate=snu)
pPtpc <- pgamma(snu + tcc, shape=nu, rate=snu)
Enui <- tcc*(1-pPtpc-pPtmc) + Gmn(-tcc,nu) - Gmn( tcc,nu)
( nui/sV - Enui/u*s.ph )
}
f.cnui1 <- function(u, y, ni, phi, tcc)
{
## First part: nui
sV <- sqrt(phi) * u ## = sqrt(dispersion * Gamma()$variance)
residP <- (y-u)/sV
nui <- pmax.int(-tcc, pmin.int(tcc, residP))
(nui / sV)
}
f.cnui2 <- function(u, y, ni, phi, tcc)
{
## First part: nui
s.ph <- sqrt(phi)
sV <- s.ph * u ## = sqrt(dispersion * Gamma()$variance)
snu <- 1/s.ph ## sqrt (nu)
## Second part: Enui
## what follows is similar to EpsiGamma except a
## different vectorisation
nu <- 1/phi ## form parameter nu
pPtmc <- pgamma(snu - tcc, shape=nu, rate=snu)
pPtpc <- pgamma(snu + tcc, shape=nu, rate=snu)
Enui <- tcc*(1-pPtpc-pPtmc) + Gmn(-tcc,nu) - Gmn( tcc,nu)
return(Enui/u * s.ph)
}
nobs <- length(mu)
stopifnot(nobs > 0)
variant <- match.arg(variant)
## robust quasi-deviance
if(variant == "V1") {
QMi <- numeric(nobs)
## Numerical integrations
for(i in 1:nobs)
QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], phi=phi, tcc = tcc,
lower = mu[i], upper = mu0[i])$value
-2*sum(QMi*w.x)
} else { ## "Eva1" or "Andreas1"; Using two terms
QMi1 <- QMi2 <- numeric(nobs)
for(i in 1:nobs)
QMi1[i] <- integrate(f.cnui1, y = y[i], ni = ni[i], phi=phi, tcc = tcc,
lower = mu[i], upper = mu0[i])$value
for(i in 1:nobs)
QM2i[i] <- integrate(f.cnui2, y = y[i], ni = ni[i], phi=phi, tcc = tcc,
lower = mu[i], upper = mu0[i])$value
if(variant == "Eva1") { ## Eva Cantoni's interpretation of (4) and (5)
-2*(sum(QMi1)-sum(QMi2)/nobs)
} else if (variant == "Andreas1") { ## Andreas' interpretation of (4) and (5)
-2*(sum(QMi1)-sum(QMi2))
} else stop("invalid 'variant': ", variant)
}
}
|
e25cdb1602b57d5ef216a60334fb814d2e3ba4f3
|
e0ce0f89a6ae408f4dbce9fe784ff0fe3e7c8344
|
/R/plotPath.R
|
64843e49a4dc1d504d87dc6f99f72636b4e40f84
|
[] |
no_license
|
rtlemos/scurvy
|
6cc932df8c4e454e5fe0a279828acdfd8af92831
|
5fbcd7ddb69283ecc25c4afbcbf18d315ca6c953
|
refs/heads/master
| 2020-03-29T14:45:36.104275
| 2019-11-30T23:24:17
| 2019-11-30T23:24:17
| 150,031,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,279
|
r
|
plotPath.R
|
#' Plot a path
#'
#' @param dataset List with data, path, lat, lon, etc.
#' @param plot_data Plot the data?
#' @param lat_bounds Latitude bounds (for zooming in)
#' @param lon_bounds Longitude bounds (for zooming in)
#' @param colored_line Color the path: NA=>no, "group"=> by group ID, "value"=> by group value
#' @param fixed_aspect_ratio Use a 1:1 aspect ratio in the plot?
#' @param background Background color
#' @param show_line_scale Display colorbar for line?
#' @param show_fill_scale Display colorbar for fill?
#'
#' @return ggplot of path
#' @export
#'
plotPath = function(dataset,
plot_data = FALSE,
lat_bounds = NULL,
lon_bounds = NULL,
colored_line = 'value',
fixed_aspect_ratio = TRUE,
background = NA,
show_line_scale = FALSE,
show_fill_scale = FALSE) {
path = dataset$path
lat = dataset$lat
lon = dataset$lon
data = dataset$data
restricted_bounds = !is.null(lat_bounds) & !is.null(lon_bounds)
n = length(path)
nr = length(lat)
nc = length(lon)
path_col = floor((path - 1) / nr) + 1
path_row = (path - 1) %% nr + 1
ri = path_row[1:(n - 1)]
rj = path_row[2:n]
ci = path_col[1:(n - 1)]
cj = path_col[2:n]
gi = dataset$group$id[1:(n - 1)]
gj = dataset$group$id[2:n]
vi = dataset$group$value[1:(n - 1)]
vj = dataset$group$value[2:n]
if (plot_data) {
rst = cbind(expand.grid(lat=lat,lon=lon), z=as.numeric(data))
if (restricted_bounds) {
crit = rst$lat >= lat_bounds[1] & rst$lat <= lat_bounds[2] &
rst$lon >= lon_bounds[1] & rst$lon <= lon_bounds[2]
rst = rst[crit, ]
}
p = ggplot() + geom_raster(rst, mapping = aes(x=lon, y=lat, fill=z)) +
scale_fill_gradient(low=gray(0.01), high=gray(0.99))
} else {
p = ggplot()
}
df = data.frame(lat = lat[c(rbind(ri, rj))],
lon = lon[c(rbind(ci, cj))],
id = c(rbind(gi, gj)),
val = c(rbind(vi, vj)),
idx = c(rbind(1:(n - 1), 1:(n - 1))))
if (restricted_bounds) {
crit = df$lat >= lat_bounds[1] & df$lat <= lat_bounds[2] &
df$lon >= lon_bounds[1] & df$lon <= lon_bounds[2]
df = df[crit,]
}
if (fixed_aspect_ratio) {
p = p + coord_equal()
}
if (colored_line %in% c("group", "group_id", "id")) {
p = p + geom_line(data = df, mapping=aes(x=lon, y=lat, group=idx, color = id)) +
scale_color_gradientn(colours=rainbow(100))
} else if (colored_line %in% c("val", "value", "v")) {
p = p + geom_line(data = df, mapping=aes(x=lon, y=lat, group=idx, color = val)) +
scale_color_viridis_c(option='D')
} else {
p = p + geom_line(data=df, mapping=aes(x=lon, y=lat, group=idx))
}
if (restricted_bounds) {
p = p + scale_x_continuous(limits = lon_bounds, expand = c(0, 0)) +
scale_y_continuous(limits = lat_bounds, expand = c(0, 0))
}
clr <- if (show_line_scale) 'colorbar' else 'none'
fll <- if (show_fill_scale) 'colorbar' else 'none'
p = p + guides(color=clr, fill=fll) +
theme(panel.background = element_rect(fill = background),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
p
}
|
712e084d78d88fd2966bbeda0d788b30e3e61064
|
a593d96a7f0912d8dca587d7fd54ad96764ca058
|
/man/ml_evaluator.Rd
|
b4f92afbe7badef5468175be263f1f3eb00ef46b
|
[
"Apache-2.0"
] |
permissive
|
sparklyr/sparklyr
|
98f3da2c0dae2a82768e321c9af4224355af8a15
|
501d5cac9c067c22ad7a9857e7411707f7ea64ba
|
refs/heads/main
| 2023-08-30T23:22:38.912488
| 2023-08-30T15:59:51
| 2023-08-30T15:59:51
| 59,305,491
| 257
| 68
|
Apache-2.0
| 2023-09-11T15:02:52
| 2016-05-20T15:28:53
|
R
|
UTF-8
|
R
| false
| true
| 4,013
|
rd
|
ml_evaluator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_evaluation_prediction.R
\name{ml_evaluator}
\alias{ml_evaluator}
\alias{ml_binary_classification_evaluator}
\alias{ml_binary_classification_eval}
\alias{ml_multiclass_classification_evaluator}
\alias{ml_classification_eval}
\alias{ml_regression_evaluator}
\title{Spark ML - Evaluators}
\usage{
ml_binary_classification_evaluator(
x,
label_col = "label",
raw_prediction_col = "rawPrediction",
metric_name = "areaUnderROC",
uid = random_string("binary_classification_evaluator_"),
...
)
ml_binary_classification_eval(
x,
label_col = "label",
prediction_col = "prediction",
metric_name = "areaUnderROC"
)
ml_multiclass_classification_evaluator(
x,
label_col = "label",
prediction_col = "prediction",
metric_name = "f1",
uid = random_string("multiclass_classification_evaluator_"),
...
)
ml_classification_eval(
x,
label_col = "label",
prediction_col = "prediction",
metric_name = "f1"
)
ml_regression_evaluator(
x,
label_col = "label",
prediction_col = "prediction",
metric_name = "rmse",
uid = random_string("regression_evaluator_"),
...
)
}
\arguments{
\item{x}{A \code{spark_connection} object or a \code{tbl_spark} containing label and prediction columns. The latter should be the output of \code{\link{sdf_predict}}.}
\item{label_col}{Name of column string specifying which column contains the true labels or values.}
\item{raw_prediction_col}{Raw prediction (a.k.a. confidence) column name.}
\item{metric_name}{The performance metric. See details.}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments; currently unused.}
\item{prediction_col}{Name of the column that contains the predicted
label or value NOT the scored probability. Column should be of type
\code{Double}.}
}
\value{
The calculated performance metric
}
\description{
A set of functions to calculate performance metrics for prediction models. Also see the Spark ML Documentation \href{https://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.ml.evaluation.package}{https://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.ml.evaluation.package}
}
\details{
The following metrics are supported
\itemize{
\item Binary Classification: \code{areaUnderROC} (default) or \code{areaUnderPR} (not available in Spark 2.X.)
\item Multiclass Classification: \code{f1} (default), \code{precision}, \code{recall}, \code{weightedPrecision}, \code{weightedRecall} or \code{accuracy}; for Spark 2.X: \code{f1} (default), \code{weightedPrecision}, \code{weightedRecall} or \code{accuracy}.
\item Regression: \code{rmse} (root mean squared error, default),
\code{mse} (mean squared error), \code{r2}, or \code{mae} (mean absolute error.)
}
\code{ml_binary_classification_eval()} is an alias for \code{ml_binary_classification_evaluator()} for backwards compatibility.
\code{ml_classification_eval()} is an alias for \code{ml_multiclass_classification_evaluator()} for backwards compatibility.
}
\examples{
\dontrun{
sc <- spark_connect(master = "local")
mtcars_tbl <- sdf_copy_to(sc, mtcars, name = "mtcars_tbl", overwrite = TRUE)
partitions <- mtcars_tbl \%>\%
sdf_random_split(training = 0.7, test = 0.3, seed = 1111)
mtcars_training <- partitions$training
mtcars_test <- partitions$test
# for multiclass classification
rf_model <- mtcars_training \%>\%
ml_random_forest(cyl ~ ., type = "classification")
pred <- ml_predict(rf_model, mtcars_test)
ml_multiclass_classification_evaluator(pred)
# for regression
rf_model <- mtcars_training \%>\%
ml_random_forest(cyl ~ ., type = "regression")
pred <- ml_predict(rf_model, mtcars_test)
ml_regression_evaluator(pred, label_col = "cyl")
# for binary classification
rf_model <- mtcars_training \%>\%
ml_random_forest(am ~ gear + carb, type = "classification")
pred <- ml_predict(rf_model, mtcars_test)
ml_binary_classification_evaluator(pred)
}
}
|
08ef682b822f82024273fc25f220927bdf69069c
|
0da2bf00dd26e37b37ede0492b0c1b30ea57085d
|
/analysis/experiment4.R
|
2d7acba8a3f41fb6f7b79976039693bcb779275f
|
[] |
no_license
|
Srcd-managing-ed/aliens
|
b1d1ab8b13db818f674a94943c6ab7e2813f2019
|
b1deacc7e38e1c5b80bf847db71fb9c364851d7d
|
refs/heads/master
| 2021-01-22T12:49:49.592963
| 2015-06-28T13:20:03
| 2015-06-28T13:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,531
|
r
|
experiment4.R
|
rm(list=ls())
library(plyr)
library(reshape2)
library(ggplot2)
library(lme4)
library(bootstrap)
raw.data <- read.csv("../data/experiment4.csv")
## for bootstrapping 95% confidence intervals
theta <- function(x,xdata,na.rm=T) {mean(xdata[x],na.rm=na.rm)}
ci.low <- function(x,na.rm=T) {
mean(x,na.rm=na.rm) - quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.025,na.rm=na.rm)}
ci.high <- function(x,na.rm=T) {
quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.975,na.rm=na.rm) - mean(x,na.rm=na.rm)}
##########################TEST TRIALS#########################
md = melt(raw.data,c("Subj_ID","age", "agegroup", "gender", "list"), c("trial1_correct", "trial2_correct", "trial3_correct", "trial4_correct", "trial5_correct", "trial6_correct", "trial7_correct", "trial8_correct", "trial1", "trial2", "trial3","trial4","trial5","trial6","trial7","trial8",
"trial1_type", "trial2_type", "trial3_type", "trial4_type", "trial5_type", "trial6_type", "trial7_type", "trial8_type",
"trial1_adj", "trial2_adj", "trial3_adj", "trial4_adj", "trial5_adj", "trial6_adj", "trial7_adj", "trial8_adj"))
data <- md[1: 392,]
data$alienName <- md$value[393:784]
data$contrastType <- md$value[785:1176]
data$adj <- md$value[1177:1568]
names(data)[7] <- "correct"
data$correct <- data$correct==1
agg.data <- aggregate(data$correct, list(data$contrastType, data$agegroup), FUN=sum)
agg.data.len <- aggregate(data$correct, list(data$contrastType, data$agegroup), FUN=length)
agg.data$x <- agg.data$x
agg.data.len$x <- agg.data.len$x
names(agg.data) <- c("contrasts", "agegroup","count")
agg.data$total <- agg.data.len$x
agg.data$prop.corr <- agg.data$count / agg.data$total
agg.data$q <- 1 - agg.data$prop.corr
agg.data$err <- sqrt((agg.data$prop.corr * agg.data$q) / agg.data$total)
#### PLOT ####
plot.style <- theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), axis.line = element_line(colour="black",size=.5), axis.ticks = element_line(size=.5),legend.justification=c(1,0), legend.position=c(0.25,.75),legend.title=element_blank(), axis.title.x = element_text(vjust=-.5),
axis.title.y = element_text(angle=90,vjust=0.25))
dodge <- position_dodge(width=0.9)
limits <- aes(ymax = prop.corr + err, ymin=prop.corr - err)
qplot(data = agg.data,
x = agegroup,
y = prop.corr,
geom="bar",
stat="identity",
fill=contrasts,
#main="Preschooler results",
ylab="Proportion Correct Contrast Judgement",
xlab="Age",
position=dodge,
ylim=c(0,1)) +
geom_abline(intercept=.5,slope=0,lty=2) +
geom_errorbar(limits,position=dodge,width=0.25) +
theme_bw() +
plot.style +
scale_fill_manual(values=c("orange", "red"))
#### STATS ###
gl <- glmer(correct ~ contrastType * age +
(contrastType | Subj_ID) +
(contrastType | alienName),
data=data,
family=binomial)
summary(gl)
gl <- glmer(correct ~ adj + age +
(contrastType | Subj_ID),
data=data, family=binomial)
summary(gl)
mss <- ddply(data,
.(agegroup, Subj_ID),
summarise, m=mean(correct))
t.test(mss$m[mss$agegroup=="4.0--4.5"] - .5)
t.test(mss$m[mss$agegroup=="4.5--5.0"] - .5)
mss <- ddply(data, .(agegroup, Subj_ID, contrastType), summarise, m=mean(correct))
t.test(mss$m[mss$agegroup=="4.0--4.5" & mss$contrastType=="size"] - .5)
t.test(mss$m[mss$agegroup=="4.0--4.5" & mss$contrastType=="feature"] - .5)
t.test(mss$m[mss$agegroup=="4.5--5.0" & mss$contrastType=="size"] - .5)
t.test(mss$m[mss$agegroup=="4.5--5.0" & mss$contrastType=="feature"] - .5)
#### ITEM EFFECTS ####
data$marked <- data$adj %in% c("closed","dark","dirty","empty","pointy",
"short","skinny","small","soft","wet")
iss <- ddply(data,
.(contrastType, marked, adj),
summarise,
m = mean(correct),
n = length(correct),
cih = ci.high(correct),
cil = ci.low(correct))
iss$adj <- factor(iss$adj,
levels = iss$adj[order(iss$m, decreasing=TRUE)])
ggplot(iss, aes(x = adj, y = m, col = marked)) +
geom_linerange(aes(ymin = m - cil, ymax = m + cih)) +
geom_point(aes(size = n)) +
geom_hline(yintercept=.5, lty=2) +
ylim(c(0,1)) +
facet_grid(.~contrastType, scales= "free_x") +
theme_bw() +
theme(axis.text.x = element_text(angle=90,vjust=0.25)) +
ylab("Mean Proportion Correct") +
xlab("Adjective")
|
4f69423739181e62db3e80c474ffe4bcccadac6e
|
0de7dffc4ca5b21247e7017c97943be054f0cfc1
|
/Lib/scan_ssms/correlated_from_pvals.R
|
6771c4a808208c340ac02ca09ad2ee9df9b735d3
|
[] |
no_license
|
morrislab/RNAcompete-S
|
f29b97d2cc8f4204c43fec0c15dff6d3fc516603
|
433756dd865ef5413989826158d6b1affbb05400
|
refs/heads/master
| 2021-01-19T01:03:12.935318
| 2017-03-15T22:45:13
| 2017-03-15T22:45:13
| 84,678,329
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 815
|
r
|
correlated_from_pvals.R
|
#!/usr/bin/Rscript
args = commandArgs(TRUE)
infile = args[1]
outfile = args[2]
library(Matrix)
df = read.table(infile, header=T)
motifs = levels(df$motif_ID_1)
combos = combn(motifs,2)
pvals = matrix(NA,length(motifs),length(motifs))
for (i in 1:ncol(combos) ) {
df_subset = subset(df,motif_ID_1==combos[1,i]&motif_ID_2==combos[2,i])
test = cor.test(df_subset$score_1,df_subset$score_2,alternative="greater")
pval = test[["p.value"]]
c1 = as.integer(unlist(strsplit(combos[1,i],'_',fixed=TRUE))[2])
c2 = as.integer(unlist(strsplit(combos[2,i],'_',fixed=TRUE))[2])
pvals[c1,c2] = pval
}
pvalsSym = forceSymmetric(pvals)
correlated = as.matrix(pvalsSym<0.01)
correlated2 = correlated
correlated2[correlated] <- 1
write.table(correlated2,outfile,quote=FALSE,sep="\t",row.names=FALSE,col.names=FALSE)
|
f79fb335a7a4afccc890775e376dbccb1a61ed69
|
7044839eae96eaec4642a9fd4be56ceca84ca657
|
/tests/testthat/test-clustering.R
|
98c84c59a165ccdb2d16a2c79205654801520a4c
|
[
"MIT"
] |
permissive
|
cugliari/iecclust
|
bb58e11c35bece256d613f0824ad98967e4e441c
|
1b6e97a0c317a8f1959b5d927a0787a36726a4de
|
refs/heads/master
| 2021-08-19T19:50:24.825273
| 2017-11-27T09:01:53
| 2017-11-27T09:01:53
| 110,530,261
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,621
|
r
|
test-clustering.R
|
context("clustering")
test_that("clusteringTask1 behave as expected",
{
# Generate 60 reference sinusoïdal series (medoids to be found),
# and sample 900 series around them (add a small noise)
n <- 900
x <- seq(0,9.5,0.1)
L <- length(x) #96 1/4h
K1 <- 60
s <- lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) )
series <- matrix(nrow=L, ncol=n)
for (i in seq_len(n))
series[,i] <- s[[I(i,K1)]] + rnorm(L,sd=0.01)
getSeries <- function(indices) {
indices <- indices[indices <= n]
if (length(indices)>0) as.matrix(series[,indices]) else NULL
}
wf <- "haar"
ctype <- "absolute"
getContribs <- function(indices) curvesToContribs(as.matrix(series[,indices]),wf,ctype)
require("cluster", quietly=TRUE)
algoClust1 <- function(contribs,K) cluster::pam(t(contribs),K,diss=FALSE)$id.med
indices1 <- clusteringTask1(1:n, getContribs, K1, algoClust1, 140, verbose=TRUE)
medoids_K1 <- getSeries(indices1)
expect_equal(dim(medoids_K1), c(L,K1))
# Not easy to evaluate result: at least we expect it to be better than random selection of
# medoids within initial series
distor_good <- computeDistortion(series, medoids_K1)
for (i in 1:3)
expect_lte( distor_good, computeDistortion(series,series[,sample(1:n, K1)]) )
})
test_that("clusteringTask2 behave as expected",
{
# Same 60 reference sinusoïdal series than in clusteringTask1 test,
# but this time we consider them as medoids - skipping stage 1
# Here also we sample 900 series around the 60 "medoids"
n <- 900
x <- seq(0,9.5,0.1)
L <- length(x) #96 1/4h
K1 <- 60
K2 <- 3
#for (i in 1:60) {plot(x^(1+i/30)*cos(x+i),type="l",col=i,ylim=c(-50,50)); par(new=TRUE)}
s <- lapply( seq_len(K1), function(i) x^(1+i/30)*cos(x+i) )
series <- matrix(nrow=L, ncol=n)
for (i in seq_len(n))
series[,i] <- s[[I(i,K1)]] + rnorm(L,sd=0.01)
getSeries <- function(indices) {
indices <- indices[indices <= n]
if (length(indices)>0) as.matrix(series[,indices]) else NULL
}
# Perfect situation: all medoids "after stage 1" are ~good
algoClust2 <- function(dists,K) cluster::pam(dists,K,diss=TRUE)$id.med
indices2 <- clusteringTask2(1:K1, getSeries, K2, algoClust2, 210, 3, 4, 8, "little",
verbose=TRUE)
medoids_K2 <- getSeries(indices2)
expect_equal(dim(medoids_K2), c(L,K2))
# Not easy to evaluate result: at least we expect it to be better than random selection of
# synchrones within 1...K1 (from where distances computations + clustering was run)
distor_good <- computeDistortion(series, medoids_K2)
#TODO: This fails; why?
# for (i in 1:3)
# expect_lte( distor_good, computeDistortion(series, series[,sample(1:K1,3)]) )
})
|
afb9bcd337eab5ef4e418c6c974959149d120ac5
|
0ff9cab1383b811353c8120eba24c629ef05c51c
|
/code/00_PlotSelection.R
|
327d2e4f71622da7bcef34932b98a0d39a670b54
|
[] |
no_license
|
miquelcaceres/INFORMED_CaseStudy
|
4d0a01abfdebb2b33c766cf2b70e2948b866b07d
|
5f1e17f8c28eecc9016849e097de37d6eb557730
|
refs/heads/master
| 2020-09-19T16:56:12.803211
| 2019-11-26T16:11:19
| 2019-11-26T16:11:19
| 81,959,619
| 0
| 1
| null | 2017-05-17T12:06:41
| 2017-02-14T15:30:36
|
R
|
UTF-8
|
R
| false
| false
| 4,827
|
r
|
00_PlotSelection.R
|
# Script to select the plots with pure Pinus nigra stands (>80% BA) within Solson?s
library(medfate)
#setwd("D:/Recerca/Lab/CaseStudy_INFORMED/")
# Load coordinates
ifn3_xy <- read.delim("D:/Recerca/Datasets/IFN/IFN3/ifn3_xy_cat_unique.txt", row.names=1, header=TRUE)
coords = ifn3_xy[,-1]
rownames(coords) = ifn3_xy[,1]
#coords (in datum ED50!)
ifn3_sp = SpatialPoints(coords, CRS("+proj=utm +zone=31 +ellps=intl +units=m +no_defs"))
ifn3_sp_wgs84 = spTransform(ifn3_sp,CRS("+proj=utm +zone=31 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"))
ifn3_sp_longlat = spTransform(ifn3_sp,CRS("+proj=longlat +datum=WGS84"))
# Read topography (TO DO: check the projection of topographic data!)
ifn3_topo = read.table("D:/Recerca/Datasets/IFN/IFN3/ifn3_topo.txt", sep="\t", header=TRUE)
# Load counties
load("D:/Recerca/Datasets/Limits/comarques.rdata")
load("D:/Recerca/Datasets/Limits/catalonia.rdata")
comarques = spTransform(comarques, CRS("+proj=utm +zone=31 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"))
cat.contour = spTransform(cat.contour, CRS("+proj=utm +zone=31 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"))
solsones = comarques[11,]
par(mar=c(0,0,0,0))
plot(comarques)
plot(cat.contour, col="red", lwd=2, add=TRUE)
plot(solsones, col="gray", add=TRUE)
# select IFN plots in Solsones
sel = over(ifn3_sp_wgs84, as(solsones,"SpatialPolygons"))
sel[is.na(sel)] = 0
sel = (sel==1)
ifn3_sp = ifn3_sp[sel]
ifn3_sp_wgs84 = ifn3_sp_wgs84[sel]
ifn3_sp_longlat = ifn3_sp_longlat[sel]
ifn3_topo =ifn3_topo[sel,]
codes = row.names(ifn3_topo)
# Load IFN data
load("./Rdata/IFN3_exist_SpatialForestPoints.Rdata")
solsones_forestlist = y_ifn3_exist@forestlist[codes]
solsones_soillist = y_ifn3_exist@soillist[codes]
#Ancillary functions
adult_ba<-function(x, minDBH) {
a = plant.BasalArea(x)
a = a[!is.na(a)]
a = a[x$treeData$DBH>=minDBH]
return(sum(a, na.rm=TRUE))
}
sp_ba<-function(x, sp, minDBH) {
a = plant.BasalArea(x)
b = plant.Species(x)==sp
b = b[!is.na(a)]
a = a[!is.na(a)]
b = b[x$treeData$DBH>=minDBH]
a = a[x$treeData$DBH>=minDBH]
return(sum(a[b]))
}
density_spp<-function(x, spp) {
dens = x$treeData$N
sp = plant.Species(x)
sp = sp[!is.na(dens)]
dens = dens[!is.na(dens)]
sp = sp[x$treeData$DBH>=2.5]
dens = dens[x$treeData$DBH>=2.5]
sel = sp %in% spp
return(sum(dens[sel], na.rm=TRUE))
}
#Selection by plot characteristics
ab_total = unlist(lapply(solsones_forestlist, adult_ba, 7.5))
ab_pn = unlist(lapply(solsones_forestlist, sp_ba, 55, 7.5)) #BA P. nigra
ab_ps = unlist(lapply(solsones_forestlist, sp_ba, 59, 7.5)) #BA P. nigra
pn_pbas = ab_pn/ab_total *100
sel_pn = pn_pbas>80
sel_pn[is.na(pn_pbas)] = FALSE
ps_pbas = ab_ps/ab_total *100
sel_ps = ps_pbas>80
sel_ps[is.na(ps_pbas)] = FALSE
sel_mx = ((pn_pbas+ps_pbas)>80)
sel_mx[is.na(sel_mx)] = FALSE
sel_mx = sel_mx & (!(sel_pn | sel_ps))
plot(solsones)
points(ifn3_sp_wgs84[sel_pn,], col="black", pch=20)
points(ifn3_sp_wgs84[sel_ps,], col="red", pch=20)
points(ifn3_sp_wgs84[sel_mx,], col="blue", pch=20)
#Densitat de frondoses
spp = 0:88
spp = spp[-c(0, 54:60)]
dens_broadleaf = unlist(lapply(solsones_forestlist, density_spp, spp))
sel_pn = sel_pn & dens_broadleaf<=500
sel_ps = sel_ps & dens_broadleaf<=500
sel_mx = sel_mx & dens_broadleaf<=500
plot(solsones)
points(ifn3_sp_wgs84[sel_pn,], col="black", pch=20)
points(ifn3_sp_wgs84[sel_ps,], col="red", pch=20)
points(ifn3_sp_wgs84[sel_mx,], col="blue", pch=20)
sum(sel_pn)
sum(sel_ps)
sum(sel_mx)
#Subset data P. nigra
pn_forestlist = solsones_forestlist[sel_pn]
pn_sp_wgs84 = ifn3_sp_wgs84[sel_pn,]
pn_sp_longlat = ifn3_sp_longlat[sel_pn,]
pn_soillist = solsones_soillist[sel_pn]
pn_topo = ifn3_topo[sel_pn,]
pn_codes = codes[sel_pn]
#Save
save(pn_codes, pn_topo, pn_forestlist, pn_soillist, pn_sp_wgs84, pn_sp_longlat, solsones, cat.contour, comarques, file="Rdata/pn.rdata")
#Subset data P. sylvestris
ps_forestlist = solsones_forestlist[sel_ps]
ps_sp_wgs84 = ifn3_sp_wgs84[sel_ps,]
ps_sp_longlat = ifn3_sp_longlat[sel_ps,]
ps_soillist = solsones_soillist[sel_ps]
ps_topo = ifn3_topo[sel_ps,]
ps_codes = codes[sel_ps]
#Save
save(ps_codes, ps_topo, ps_forestlist, ps_soillist, ps_sp_wgs84, ps_sp_longlat, solsones, cat.contour, comarques, file="Rdata/ps.rdata")
#Subset data Mixed plots
mx_forestlist = solsones_forestlist[sel_mx]
mx_sp_wgs84 = ifn3_sp_wgs84[sel_mx,]
mx_sp_longlat = ifn3_sp_longlat[sel_mx,]
mx_soillist = solsones_soillist[sel_mx]
mx_topo = ifn3_topo[sel_mx,]
mx_codes = codes[sel_mx]
#Save
save(mx_codes, mx_topo, mx_forestlist, mx_soillist, mx_sp_wgs84, mx_sp_longlat, solsones, cat.contour, comarques, file="Rdata/mx.rdata")
|
e174b7f04e63d582e15829c146a4ff36c01d01fe
|
4cf827146404badf6c4ffcc3237187ece23b6084
|
/man/Prostate2000Raw.Rd
|
25c3a581f28fc099c2b848b5e4ddff3a1ffe13f4
|
[] |
no_license
|
Git294/ChemometricsWithR
|
1f883099604bfd375a54350ebdc067420e6037fe
|
9d15f50972ffa7fe254567c097eab7cbced586c6
|
refs/heads/master
| 2022-12-06T09:17:15.609101
| 2020-09-02T14:28:44
| 2020-09-02T14:28:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,894
|
rd
|
Prostate2000Raw.Rd
|
\name{Prostate2000Raw}
\alias{Prostate2000Raw}
\title{Prostate Cancer 2000 Raw Spectra}
\concept{prostate cancer}
\description{A data object of class \code{msSet},
consisting of 654 mass spectra (327 spectra in duplicate) from 2000 to
20000 Da, which were generated from patients with prostate cancer,
benign prostatic hypertrophy, and normal controls. These spectra are
already baseline corrected and normalized. Please see the references for
more details.
Since the original package msProstate is orphaned at the end of 2012,
the data are included in the ChemometricsWithR package so that the
examples in the book are still executable. This manual page has been
adapted to reflect this.
}
\references{
B.L. Adam, Y. Qu, J.W. Davis, M.D. Ward, M.A. Clements, L.H. Cazares,
O.J. Semmes, P.F. Schellhammer, Y. Yasui, Z. Feng, and G.L. Wright, Jr.,
"Serum protein fingerprinting coupled with a pattern-matching algorithm
distinguishes prostate cancer from benign prostate hyperplasia and
healthy men," \emph{Cancer Research}, 62(13):3609--14, 2002.
Y. Qu, B.L. Adam, Y. Yasui, M.D. Ward, L.H. Cazares,
P.F. Schellhammer, Z. Feng, O.J. Semmes, and G.L. Wright Jr.,
"Boosted decision tree analysis of surface-enhanced laser
desorption/ionization mass spectral serum profiles discriminates
prostate cancer from noncancer patients",
\emph{Clinical Chemistry}, 48(10):1835--43, 2002.
R. Wehrens, "Chemometrics with R - Multivariate Data Analysis in the
Natural Sciences and Life Sciences". 2nd edition, Springer, Heidelberg, 2019.
}
\examples{
## Examples have been changed from the original man page upon inclusion
## in the ChemometricsWithRData package
data("Prostate2000Raw")
## plot a few spectra, partially
matplot(Prostate2000Raw$mz[1:8000],
Prostate2000Raw$intensity[1:8000, 1:5], type = "l",
lty = 1, col = 1:5, xlab = "m/z", ylab = "response")
}
\keyword{datasets}
|
692944faa6455e0b9935615a3edcce5992ff6e08
|
4cee6dec70875ca85f20dd738932be86f361a63e
|
/pkg/man/findDeseqFactorsForFractions.Rd
|
73169680d09e3a52d214294ff6bd855563d2028f
|
[] |
no_license
|
dieterich-lab/pulseR
|
9b7114769b48a305ba0a11357226e8f774b73a20
|
1323b378e95b483c8bda99d6c71befccd45c810f
|
refs/heads/master
| 2021-01-18T20:40:00.474158
| 2018-10-26T10:45:32
| 2018-10-26T10:45:32
| 72,013,067
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 612
|
rd
|
findDeseqFactorsForFractions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pulseData.R
\name{findDeseqFactorsForFractions}
\alias{findDeseqFactorsForFractions}
\title{Calculate normalisation factors}
\usage{
findDeseqFactorsForFractions(count_data, conditions)
}
\arguments{
\item{count_data}{integer matrix, colnames correspond to samples
(rownames in \code{conditions})}
\item{conditions}{factors to split samples for normalisation}
}
\value{
vector of double; normalisation factors in the same order as
columns in the \code{count_data}
}
\description{
Calculate normalisation factors
}
\keyword{internal}
|
68e550f90d7671f6d0a22d73c9bb658c358cf88c
|
9072500aa28ba2f603688fa9fba69d6b7f9fd6b7
|
/ProjectTraining/NormalDistributions.R
|
e5c9f1d5b5f5939b5fe4e2f176114f7699ea29d6
|
[] |
no_license
|
lchi91/r-training
|
df4f827cc29096c3063cc6c4a23617a770e75951
|
9c9f7b7d18f0523d4a7165061f2abfd5c7d54b7c
|
refs/heads/master
| 2021-05-22T18:23:14.819224
| 2020-04-04T16:08:25
| 2020-04-04T16:08:25
| 253,038,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 642
|
r
|
NormalDistributions.R
|
library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv"
filename <- basename(url)
download(url, destfile=filename)
x <- unlist( read.csv(filename) )
# make averages5
set.seed(1)
n <- 1000
averages5 <- vector("numeric",n)
for(i in 1:n){
X <- sample(x,5)
averages5[i] <- mean(X)
}
# make averages50
set.seed(1)
n <- 1000
averages50 <- vector("numeric",n)
for(i in 1:n){
X <- sample(x,50)
averages50[i] <- mean(X)
}
# last set 50
hist(X,)
plot(density(x))
mean(averages50<=25) - mean(averages50<=23)
pnorm(25, 23.9,0.43) - pnorm(23,23.9,0.43)
qqnorm(x)
|
c81d43f9d8c920164a897e65a10d7c5b12712a9f
|
9150e6280152559008d7cebd796b739f29b1622a
|
/Assemble_Source_Data.R
|
b5bc33e17de220e27025977aa47a4e3053c3e58f
|
[] |
no_license
|
ImprovementPathSystems/Measuring_Abnormality_in_High_Dimensional_Spaces
|
30604aea6d1a5c896bcbfba93e47ebc3f3c104c1
|
ab4eeaccf1d975ffcad3b1c8bb57bd299ee6ed2b
|
refs/heads/master
| 2021-04-27T08:04:38.126016
| 2018-02-23T16:45:07
| 2018-02-23T16:45:07
| 122,647,240
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,164
|
r
|
Assemble_Source_Data.R
|
#' create a list of all the packages needed to run this R File.
all_packages_needed <- c("reshape", "dplyr","proxy", "matrixStats","R.methodsS3", "zoo","gtools", "Hmisc","knitr","plyr")
#'
#'if an R package that is needed to run this code is not installed, install it.
packages_to_install <- all_packages_needed[!(all_packages_needed %in% installed.packages()[,"Package"])]
if(length(packages_to_install)) try(install.packages(packages_to_install),silent=TRUE)
#'
#'load all the packages needed. A FALSE value returned indicates the package failed to load.
Load_Package_Silently <- function(Package_Name){suppressMessages(require(Package_Name,character.only = TRUE))}
sapply(all_packages_needed,Load_Package_Silently)#load all packages needed and don't print messages
#'
#'
#'Set variables needed to run this code
#Set the percent variance explained cutoff for TSQI, KmQI and KnQI. Value between 0 and 1.
# PCA_PercentVarExplainedCutoff <- .92
NormalSampleSize <- 32
Initial_Vector_Length <- 3500
NormalVectorFileDirectory <- read.csv('Source_File_Directory.csv', header=T, sep=',')
NormalVectorMatrix <- matrix(NA, nrow = Initial_Vector_Length, NormalSampleSize)
# ##Interlab File Directory
# InterlabDataFiles <- read.csv('Main_Source_Files/Interlab_FileDirectory_Final_Methodology.csv', header=T, sep=',')
# #load temporal spatial data
# refpop = as.data.frame(read.table('Main_Source_Files/TS_Normal_Variable_Matrix.csv', header=T, sep=','))
# refpopheights = as.data.frame(read.table('Main_Source_Files/TS_Normal_Height_Matrix_NoCadence_20141125.csv', header=T, sep=','))
#
###############################################################################################################
#'Assemble all the normal vectors in a 3500 x NormalSampleSize matrix
for(n in 1:NormalSampleSize){
CurrentNormVectorFilename <- NormalVectorFileDirectory$NormalVectorFileNames[n]
CurrentNormData <- read.csv(paste('Source_Data/', CurrentNormVectorFilename,sep=''), header=T, sep=',')
CurrentNormMean_AllVars <- subset(CurrentNormData, select=c(4))
NormalVectorMatrix[,n] <- as.matrix(CurrentNormMean_AllVars)
}
#' #'**Begin the kinematic and kinetic calculations on the normal data**
#' ###############################################################################################################
#' #Calculate normal mean and std dev for all 3500 rows.
#' NormalMean <- as.matrix(rowMeans(NormalVectorMatrix))
#' NormalStdDev <- as.matrix(rowSds(NormalVectorMatrix))
#'
#'
#' ##Calculate z-scores on entire matrix
#' Z_Normal_AllVars <- apply(NormalVectorMatrix,2, function(x) (x-NormalMean)/NormalStdDev)
###############################################################################################################
#'###GDI Prep Calculations
##We need to calculate the previous methodololgies, the GDI and GPS to compare to the GQI
##This GDI section was created by Norman Dotson##
#######Setting Typically Developing Control Group Data###########
##################################
#Create list of measurements to be used
MeasurementList = c("L_Pelvis_Rotation"
,"R_Pelvis_Fwd_Tilt"
,"R_Pelvis_Lat_Tilt"
,"L_HIP_Abd_ANG"
,"L_HIP_Flex_ANG"
,"L_HIP_Rot_ANG"
,"L_KNEE_Flex_ANG"
,"L_ANK_Flex_ANG"
,"L_Foot_Orientation"
,"R_HIP_Abd_ANG"
,"R_HIP_Flex_ANG"
,"R_HIP_Rot_ANG"
,"R_KNEE_Flex_ANG"
,"R_ANK_Flex_ANG"
,"R_Foot_Orientation"
)
###########################
#######Variables###########
###########################
Vector_nrow = length(MeasurementList) * 100
#create an empty matrix with 100 * length(MeasurementAngleList)
#This will change according to vector variables selected
#Multiple Sample Size by 2 to account for left and right sides
GDI_GPS_NormalVectorMatrix <- matrix(NA, nrow = Vector_nrow, ncol=NormalSampleSize)
#Assemble all the normal vectors in a 3500 x NormalSampleSize matrix and assembler the GDI and GPS based on measurement list
for(n in 1:NormalSampleSize){
CurrentNormVectorFilename <- NormalVectorFileDirectory$NormalVectorFileNames[n]
CurrentNormData <- read.csv(paste('Source_Data/', CurrentNormVectorFilename,sep=''), header=T, sep=',')
CurrentNormMean_AllVars <- subset(CurrentNormData, select=c(4))
NormalVectorMatrix[,n] <- as.matrix(CurrentNormMean_AllVars)
###GDI and GPS Matrix##
CurrentNormMean_ReqVars <- CurrentNormData[ which(CurrentNormData[,2] %in% MeasurementList),]
##Must include column 1 and 2 in 'order' to preserve a correct structure order
#CurrentNormMean_ReqVars[,2] = as.character(CurrentNormMean_ReqVars[,2])
CurrentNormMean_ReqVars <- CurrentNormMean_ReqVars[order(CurrentNormMean_ReqVars[,2],CurrentNormMean_ReqVars[,1]),]
##Pull the Mean values for each patient and append to NormalVectorMatrix
GDI_GPS_NormalVectorMatrix[,n] <- as.matrix(CurrentNormMean_ReqVars[,4])
}
GDI_GPS_Var_Details <- CurrentNormMean_ReqVars[,1:3]
#' ##GDI Calculations for Normals###
#' NormMeans_ForGDIandGPS <- as.matrix(rowMeans(GDI_GPS_NormalVectorMatrix))
#'
#'
#' #'Able-Bodied Temporal Spatial Quality Index prep calculations
#' ###############################################################################################################
#' #Limit Temporal Spatial data based on number of normals in sample.
#' refpop <- refpop[which(refpop$NormID <= NormalSampleSize),]
#' refpopheights <- refpopheights[which(refpopheights$NormID <= NormalSampleSize),]
#'
#' refpopheights <- subset(refpopheights, select=c(3,4,5,8,10,13,14))
#'
#' refpop <- subset(refpop, select=c(3,4,5,8,10,13,14))
#'
#' ##convert to a matrix
#' m_refpopheights <- data.matrix(refpopheights)
#' m_refpop <- data.matrix(refpop)
#' ##Normalize to height
#' refpopHeightNormalized <- m_refpopheights * m_refpop
#'
#'
#'
#'
#'
#'
#'
#' ###### Assemble Patient Data. ######
#' PatientVectorMatrix <- matrix(NA, nrow = Vector_nrow, ncol = length(InterlabDataFiles$VectorFileName))
#' PatientTemporalSpatialData <- matrix(NA, ncol= 7, nrow = length(InterlabDataFiles$VectorFileName))
#'
#' #assemble interlab data into a single dataframe
#' #' ###Loop through all of the interlab data and calculate the GQI, GDI and GPS for each study.
#' #'These 10 subjects have been studied at 3 different time points over two days
#' #'This data is used to calculate the minimum detectable change (MDC) for the different methodologies
#' ###############################################################################################################
#' for(i in 1:length(InterlabDataFiles$VectorFileName)){
#'
#' #Read in interlab data
#' CurrentVectorFilename <- InterlabDataFiles$VectorFileName[i]
#' PatientData <- read.csv(paste('Interlab_Files/',CurrentVectorFilename,sep=''), header=T, sep=',')
#'
#' #Subset the data we need
#' PatientMean_AllVars <- subset(PatientData, select=c(4))
#'
#' ###GDI and GPS Matrix##
#' CurrentPatientMean_ReqVars <- PatientData[ which(PatientData[,2] %in% MeasurementList),]
#' ##Must include column 1 and 2 in 'order' to preserve a correct structure order
#' #CurrentNormMean_ReqVars[,2] = as.character(CurrentNormMean_ReqVars[,2])
#' CurrentPatientMean_ReqVars <- CurrentPatientMean_ReqVars[order(CurrentPatientMean_ReqVars[,2],CurrentPatientMean_ReqVars[,1]),]
#' PatientVectorMatrix[,i] <- CurrentPatientMean_ReqVars$Mean
#'
#' #Read in the patient's temporal Spatial data
#' CurrentTemporalSpatialFilename <- InterlabDataFiles$TempSpatialFileName[i]
#' CurrentTemporalSpatialData <- read.csv(paste('Interlab_Files/',CurrentTemporalSpatialFilename,sep=''), header=T, sep=',')
#'
#' #Read in the current height multiplier
#' CurrentHeightMultiplier <- InterlabDataFiles$HeightMultiplier[i]
#'
#' CurrentHeightMultiplier <- as.numeric(as.character(CurrentHeightMultiplier))
#'
#' #Subset the data we need.
#' CurrentTemporalSpatialData <- subset(CurrentTemporalSpatialData, select=c(1,2,3,6,8,11,12))
#'
#' ##Normalize to height
#' CurrentTemporalSpatialData$L_Step_Length <- CurrentTemporalSpatialData$L_Step_Length * CurrentHeightMultiplier
#' CurrentTemporalSpatialData$R_Step_Length <- CurrentTemporalSpatialData$R_Step_Length * CurrentHeightMultiplier
#' CurrentTemporalSpatialData$L_Stride_Length <- CurrentTemporalSpatialData$L_Stride_Length * CurrentHeightMultiplier
#' CurrentTemporalSpatialData$R_Stride_Length <- CurrentTemporalSpatialData$R_Stride_Length * CurrentHeightMultiplier
#' CurrentTemporalSpatialData$L_Velocity <- CurrentTemporalSpatialData$L_Velocity * CurrentHeightMultiplier
#'
#' ##Per KK Edits, DO NOT NORMALIZE CADENCE TO HEIGHT.
#' #CurrentTemporalSpatialData$L_Cadence <- CurrentTemporalSpatialData$L_Cadence * CurrentHeightMultiplier
#' CurrentTemporalSpatialData$R_Step_Width <- CurrentTemporalSpatialData$R_Step_Width * CurrentHeightMultiplier
#'
#' PatientTemporalSpatialData[i,] <- as.matrix(CurrentTemporalSpatialData)
#'
#' }
|
8a31590f7b64c91de922387ab25b6d07e0f18b38
|
4a40b201dc14a357e7f5377a3492652230c5d7eb
|
/R/00_importing_global-fishing-watch.R
|
07cc264e63a98b712be37f7eecdd91893ec11f34
|
[] |
no_license
|
fishvice/VMS-training-2018
|
dd3e2d06fe1cf35f9b210da11315ac2b2a72d37d
|
f6063177b5a4d7db8c4a571ea348b4265c59dc3b
|
refs/heads/master
| 2020-03-17T17:41:19.535339
| 2018-05-29T19:07:22
| 2018-05-29T19:07:22
| 133,798,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 745
|
r
|
00_importing_global-fishing-watch.R
|
library(tidyverse)
files <-
data_frame(path = dir("data-raw/global-fishing-watch/fishing_effort/daily_csvs",
full.names = TRUE),
year = dir("data-raw/global-fishing-watch/fishing_effort/daily_csvs") %>%
str_sub(1,4))
years <- unique(files$year)
for(y in 1:length(years)) {
print(years[y])
files.tmp <-
files %>%
filter(year == years[y]) %>%
pull(path)
res <- list()
for(i in 1:length(files.tmp)) {
res[[i]] <-
read_csv(files.tmp[i]) %>%
rename(lon = lon_bin,
lat = lat_bin) %>%
mutate(lon = lon / 100,
lat = lat / 100)
}
bind_rows(res) %>%
write_rds(path = paste0("data/global-fishing-watch_", years[y], ".rds"))
}
|
35c9c9bd75ac8821826d61f1b06b14941be541fe
|
e8c958880769f1d47fcea3fe3a4d47d2417b034b
|
/MeanPiAnalysis.R
|
7da7b8a49db52584b2c4100cbb80c25195181e60
|
[] |
no_license
|
melbourne-lab/StochasticGenomicEvolution
|
c72c623e1fbd0b92362145414271355b38168150
|
b0a69c60cf3a7d97c21e0f54151d6b2d2955005a
|
refs/heads/master
| 2023-08-25T19:04:42.812065
| 2018-11-12T18:13:19
| 2018-11-12T18:13:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,444
|
r
|
MeanPiAnalysis.R
|
# Test the effect of spatial structure on the average reduction in pi values
# for both autosomes and the X chromosome and generate the relevant confidence
# intervals.
setwd("/home/topher/RangeExpansionGenetics/FinalAnalyses/MeanPi")
library(lme4)
library(boot)
library(pbkrtest)
PopPi <- read.csv("PiMeans.csv")
PopPi$block <- factor(PopPi$block)
# Subset and format the data
FounderData <- subset(PopPi, gen == 0)
Gen8Data <- subset(PopPi, gen == 8)
Gen8Data$location <- ifelse(is.na(Gen8Data$location), "S", Gen8Data$location)
Gen8Data$location <- as.factor(Gen8Data$location)
levels(Gen8Data$location) <- c("C", "E", "S")
# Check how many landscapes in each block and treatment we have
NumLandscapes <- matrix(NA, nrow = 3, ncol = 2)
colnames(NumLandscapes) <- c("struct", "shuf")
rownames(NumLandscapes) <- c("B2", "B3", "B4")
NumLandscapes[1,1] <- nrow(subset(FounderData, (block == 2) & (treat == "struct")))
NumLandscapes[1,2] <- nrow(subset(FounderData, (block == 2) & (treat == "shuff")))
NumLandscapes[2,1] <- nrow(subset(FounderData, (block == 3) & (treat == "struct")))
NumLandscapes[2,2] <- nrow(subset(FounderData, (block == 3) & (treat == "shuff")))
NumLandscapes[3,1] <- nrow(subset(FounderData, (block == 4) & (treat == "struct")))
NumLandscapes[3,2] <- nrow(subset(FounderData, (block == 4) & (treat == "shuff")))
NumLandscapes
# Create the data frames to hold all the results from the statistical analyses
WinSizes <- c(5000, 7500, 10000, 12500, 15000)
MeanBlockAuto <- data.frame(window = WinSizes, P = rep(NA, 5), FoundP = rep(NA, 5),
EC = rep(NA,5), EClwr = rep(NA,5), ECupr = rep(NA,5),
ES = rep(NA, 5), ESlwr = rep(NA,5), ESupr = rep(NA,5),
E = rep(NA, 5), Elwr = rep(NA, 5), Eupr = rep(NA, 5),
C = rep(NA, 5), Clwr = rep(NA, 5), Cupr = rep(NA, 5),
S = rep(NA, 5), Slwr = rep(NA, 5), Supr = rep(NA, 5))
MeanBlockSex <- data.frame(window = WinSizes, P = rep(NA, 5), FoundP = rep(NA, 5),
EC = rep(NA,5), EClwr = rep(NA,5), ECupr = rep(NA,5),
ES = rep(NA, 5), ESlwr = rep(NA,5), ESupr = rep(NA,5),
E = rep(NA, 5), Elwr = rep(NA, 5), Eupr = rep(NA, 5),
C = rep(NA, 5), Clwr = rep(NA, 5), Cupr = rep(NA, 5),
S = rep(NA, 5), Slwr = rep(NA, 5), Supr = rep(NA, 5))
MeanNoBlockAuto <- data.frame(window = WinSizes, P = rep(NA, 5), FoundP = rep(NA, 5),
EC = rep(NA,5), EClwr = rep(NA,5), ECupr = rep(NA,5),
ES = rep(NA, 5), ESlwr = rep(NA,5), ESupr = rep(NA,5),
E = rep(NA, 5), Elwr = rep(NA, 5), Eupr = rep(NA, 5),
C = rep(NA, 5), Clwr = rep(NA, 5), Cupr = rep(NA, 5),
S = rep(NA, 5), Slwr = rep(NA, 5), Supr = rep(NA, 5))
MeanNoBlockSex <- data.frame(window = WinSizes, P = rep(NA, 5), FoundP = rep(NA, 5),
EC = rep(NA,5), EClwr = rep(NA,5), ECupr = rep(NA,5),
ES = rep(NA, 5), ESlwr = rep(NA,5), ESupr = rep(NA,5),
E = rep(NA, 5), Elwr = rep(NA, 5), Eupr = rep(NA, 5),
C = rep(NA, 5), Clwr = rep(NA, 5), Cupr = rep(NA, 5),
S = rep(NA, 5), Slwr = rep(NA, 5), Supr = rep(NA, 5))
# Create a function to use for bootstrapping in the models with block effects
ModEffects <- function(fitobj){
# Get the fixed effect coefficients from the fit object
FixedCoefs <- summary(fitobj)$coefficients[,1]
# Calculate the core, edge, and shuffled values
Core <- FixedCoefs[1]
Edge <- FixedCoefs[1] + FixedCoefs[2]
Shuf <- FixedCoefs[1] + FixedCoefs[3]
# Calculate the percent reduction in edge populations from core and shuffled
EC <- (Core-Edge)/Core
ES <- (Shuf-Edge)/Shuf
# Return all the relevant quantities
return(c(Core, Edge, Shuf, EC, ES))
}
# Now loop through the different window sizes to calculate all the necessary
# quantitites
for(i in 1:5){
# Calculate the difference in mean pi values between generation 8 and the founders
Gen8Data$AutoDiff <- rep(NA, dim(Gen8Data)[1])
Gen8Data$SexDiff <- rep(NA, dim(Gen8Data)[1])
for(j in 1:dim(Gen8Data)[1]){
cur_landscape <- Gen8Data$landscape[j]
founder <- subset(FounderData, landscape == cur_landscape)
Gen8Data$AutoDiff[j] <- Gen8Data[j,6+4*(i-1)] - founder[1,6+4*(i-1)]
Gen8Data$SexDiff[j] <- Gen8Data[j,7+4*(i-1)] - founder[1,7+4*(i-1)]
}
################# Now calculate the statistics without using a block effect
########## Autosomal results
FullMod <- lm(AutoDiff ~ location, data = Gen8Data)
NullMod <- lm(AutoDiff ~ 1, data = Gen8Data)
MeanNoBlockAuto$P[i] <- anova(FullMod, NullMod)$"Pr(>F)"[2]
MeanNoBlockAuto$EC[i] <- (-1*FullMod$coefficients[2]) / FullMod$coefficients[1]
MeanNoBlockAuto$ES[i] <- (FullMod$coefficients[3] - FullMod$coefficients[2]) /
(FullMod$coefficients[1] + FullMod$coefficients[3])
##### CIs
CIDat <- expand.grid(location=factor(c("C", "E", "S")))
temp <- predict(FullMod, CIDat, interval = "confidence")
MeanNoBlockAuto$C[i] <- temp[1,1]
MeanNoBlockAuto$Clwr[i] <- temp[1,2]
MeanNoBlockAuto$Cupr[i] <- temp[1,3]
MeanNoBlockAuto$E[i] <- temp[2,1]
MeanNoBlockAuto$Elwr[i] <- temp[2,2]
MeanNoBlockAuto$Eupr[i] <- temp[2,3]
MeanNoBlockAuto$S[i] <- temp[3,1]
MeanNoBlockAuto$Slwr[i] <- temp[3,2]
MeanNoBlockAuto$Supr[i] <- temp[3,3]
MeanNoBlockAuto$EClwr <- (MeanNoBlockAuto$Cupr[i] - MeanNoBlockAuto$Elwr[i]) / MeanNoBlockAuto$Cupr[i]
MeanNoBlockAuto$ECupr <- (MeanNoBlockAuto$Clwr[i] - MeanNoBlockAuto$Eupr[i]) / MeanNoBlockAuto$Clwr[i]
MeanNoBlockAuto$ESlwr <- (MeanNoBlockAuto$Supr[i] - MeanNoBlockAuto$Elwr[i]) / MeanNoBlockAuto$Supr[i]
MeanNoBlockAuto$ESupr <- (MeanNoBlockAuto$Slwr[i] - MeanNoBlockAuto$Eupr[i]) / MeanNoBlockAuto$Slwr[i]
##### Founders
FullMod <- lm(FounderData[,6+4*(i-1)] ~ FounderData$treat)
NullMod <- lm(FounderData[,6+4*(i-1)] ~ 1)
MeanNoBlockAuto$FoundP[i] <- anova(FullMod, NullMod)$"Pr(>F)"[2]
########## Sex chromosome results
FullMod <- lm(SexDiff ~ location, data = Gen8Data)
NullMod <- lm(SexDiff ~ 1, data = Gen8Data)
MeanNoBlockSex$P[i] <- anova(FullMod, NullMod)$"Pr(>F)"[2]
MeanNoBlockSex$EC[i] <- (-1*FullMod$coefficients[2]) / FullMod$coefficients[1]
MeanNoBlockSex$ES[i] <- (FullMod$coefficients[3] - FullMod$coefficients[2]) /
(FullMod$coefficients[1] + FullMod$coefficients[3])
##### CIs
CIDat <- expand.grid(location=factor(c("C", "E", "S")))
temp <- predict(FullMod, CIDat, interval = "confidence")
MeanNoBlockSex$C[i] <- temp[1,1]
MeanNoBlockSex$Clwr[i] <- temp[1,2]
MeanNoBlockSex$Cupr[i] <- temp[1,3]
MeanNoBlockSex$E[i] <- temp[2,1]
MeanNoBlockSex$Elwr[i] <- temp[2,2]
MeanNoBlockSex$Eupr[i] <- temp[2,3]
MeanNoBlockSex$S[i] <- temp[3,1]
MeanNoBlockSex$Slwr[i] <- temp[3,2]
MeanNoBlockSex$Supr[i] <- temp[3,3]
MeanNoBlockSex$EClwr <- (MeanNoBlockSex$Cupr[i] - MeanNoBlockSex$Elwr[i]) / MeanNoBlockSex$Cupr[i]
MeanNoBlockSex$ECupr <- (MeanNoBlockSex$Clwr[i] - MeanNoBlockSex$Eupr[i]) / MeanNoBlockSex$Clwr[i]
MeanNoBlockSex$ESlwr <- (MeanNoBlockSex$Supr[i] - MeanNoBlockSex$Elwr[i]) / MeanNoBlockSex$Supr[i]
MeanNoBlockSex$ESupr <- (MeanNoBlockSex$Slwr[i] - MeanNoBlockSex$Eupr[i]) / MeanNoBlockSex$Slwr[i]
##### Founders
FullMod <- lm(FounderData[,7+4*(i-1)] ~ FounderData$treat)
NullMod <- lm(FounderData[,7+4*(i-1)] ~ 1)
MeanNoBlockSex$FoundP[i] <- anova(FullMod, NullMod)$"Pr(>F)"[2]
########################## Now calculate the statistics with a block effect
########## Autosomal results
FullMod <- lmer(AutoDiff ~ location + (1|block), data = Gen8Data)
NullMod <- lmer(AutoDiff ~ 1 + (1|block), data = Gen8Data)
PermTest <- PBmodcomp(FullMod, NullMod, nsim = 10000)
MeanBlockAuto$P[i] <- PermTest$test$p.value[2]
PointEsts <- ModEffects(FullMod)
MeanBlockAuto$C[i] <- PointEsts[1]
MeanBlockAuto$E[i] <- PointEsts[2]
MeanBlockAuto$S[i] <- PointEsts[3]
MeanBlockAuto$EC[i] <- PointEsts[4]
MeanBlockAuto$ES[i] <- PointEsts[5]
##### CIs
# Perform the bootstrap simulations
bootpreds <- bootMer(FullMod, ModEffects, nsim = 10000)
# Calculate the resultant intervals
ParamCIs <- vector("list", 5)
for (j in 1:5){
ParamCIs[[j]] <- boot.ci(bootpreds, type = "perc", index = j)
}
MeanBlockAuto$Clwr <- ParamCIs[[1]]$percent[1,4]
MeanBlockAuto$Cupr <- ParamCIs[[1]]$percent[1,5]
MeanBlockAuto$Elwr <- ParamCIs[[2]]$percent[1,4]
MeanBlockAuto$Eupr <- ParamCIs[[2]]$percent[1,5]
MeanBlockAuto$Slwr <- ParamCIs[[3]]$percent[1,4]
MeanBlockAuto$Supr <- ParamCIs[[3]]$percent[1,5]
MeanBlockAuto$EClwr <- ParamCIs[[4]]$percent[1,4]
MeanBlockAuto$ECupr <- ParamCIs[[4]]$percent[1,5]
MeanBlockAuto$ESlwr <- ParamCIs[[5]]$percent[1,4]
MeanBlockAuto$ESupr <- ParamCIs[[5]]$percent[1,5]
##### Founders
FullMod <- lmer(FounderData[,6+4*(i-1)] ~ FounderData$treat + (1|FounderData$block))
NullMod <- lmer(FounderData[,6+4*(i-1)] ~ 1+ (1|FounderData$block))
PermTest <- PBmodcomp(FullMod, NullMod, nsim = 10000)
MeanBlockAuto$FoundP[i] <- PermTest$test$p.value[2]
########## Sex chromosome results
FullMod <- lmer(SexDiff ~ location + (1|block), data = Gen8Data)
NullMod <- lmer(SexDiff ~ 1 + (1|block), data = Gen8Data)
PermTest <- PBmodcomp(FullMod, NullMod, nsim = 10000)
MeanBlockSex$P[i] <- PermTest$test$p.value[2]
PointEsts <- ModEffects(FullMod)
MeanBlockSex$C[i] <- PointEsts[1]
MeanBlockSex$E[i] <- PointEsts[2]
MeanBlockSex$S[i] <- PointEsts[3]
MeanBlockSex$EC[i] <- PointEsts[4]
MeanBlockSex$ES[i] <- PointEsts[5]
##### CIs
# Perform the bootstrap simulations
bootpreds <- bootMer(FullMod, ModEffects, nsim = 10000)
# Calculate the resultant intervals
ParamCIs <- vector("list", 5)
for (j in 1:5){
ParamCIs[[j]] <- boot.ci(bootpreds, type = "perc", index = j)
}
MeanBlockSex$Clwr <- ParamCIs[[1]]$percent[1,4]
MeanBlockSex$Cupr <- ParamCIs[[1]]$percent[1,5]
MeanBlockSex$Elwr <- ParamCIs[[2]]$percent[1,4]
MeanBlockSex$Eupr <- ParamCIs[[2]]$percent[1,5]
MeanBlockSex$Slwr <- ParamCIs[[3]]$percent[1,4]
MeanBlockSex$Supr <- ParamCIs[[3]]$percent[1,5]
MeanBlockSex$EClwr <- ParamCIs[[4]]$percent[1,4]
MeanBlockSex$ECupr <- ParamCIs[[4]]$percent[1,5]
MeanBlockSex$ESlwr <- ParamCIs[[5]]$percent[1,4]
MeanBlockSex$ESupr <- ParamCIs[[5]]$percent[1,5]
##### Founders
FullMod <- lmer(FounderData[,7+4*(i-1)] ~ FounderData$treat + (1|FounderData$block))
NullMod <- lmer(FounderData[,7+4*(i-1)] ~ 1+ (1|FounderData$block))
PermTest <- PBmodcomp(FullMod, NullMod, nsim = 10000)
MeanBlockSex$FoundP[i] <- PermTest$test$p.value[2]
}
save(MeanNoBlockAuto, MeanNoBlockSex, MeanBlockAuto, MeanBlockSex,
file = "MeanPiResults.rdata")
|
85e403f2b35708b1a001c280931d3e64c9b16e1f
|
ff91ae3a2b38914424363e4e04d2770577167fb2
|
/mapping/archive/sqtl.seeker.modified.R
|
bee804eb964ecf070aadf21406fc06b49c26523f
|
[] |
no_license
|
ErikSchutte/QTL-Mapping
|
6ce9029fcb6f6fe9b40e7484fe49bc220201caf0
|
8bc66e1cba31bc7c00fb3dea161c588bfde2cf99
|
refs/heads/master
| 2021-01-11T20:54:29.009462
| 2017-01-30T16:14:04
| 2017-01-30T16:14:04
| 79,209,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,612
|
r
|
sqtl.seeker.modified.R
|
sqtl.seeker.modified <- function (tre.df, genotype.f, gene.loc, genic.window = 5000,
min.nb.ext.scores = 1000, nb.perm.max = 1e+06, nb.perm.max.svQTL = 10000,
svQTL = FALSE, approx = TRUE, verbose = TRUE)
{
. = nb.groups = snpId = NULL
check.genotype <- function(geno.df, tre.df) {
apply(geno.df, 1, function(geno.snp) {
if (sum(as.numeric(geno.snp) == -1) > 2) {
return("Missing genotype")
}
geno.snp.t = table(geno.snp[geno.snp > -1])
if (sum(geno.snp.t >= 2) < 5) {
return("One group of >5 samples")
}
nb.diff.pts = sapply(names(geno.snp.t)[geno.snp.t >
1], function(geno.i) {
sQTLseekeR:::nbDiffPt(tre.df[, which(geno.snp == geno.i)])
})
if (sum(nb.diff.pts >= 2) < 5) {
return("One group of >5 different splicing")
}
return("PASS")
})
}
analyze.gene.f <- function(tre.gene) {
if (verbose)
message(tre.gene$geneId[1])
gr.gene = with(gene.loc[which(gene.loc$geneId == tre.gene$geneId[1]),
], GenomicRanges::GRanges(chr, IRanges::IRanges(start,
end)))
if (genic.window > 0) {
gr.gene = GenomicRanges::resize(gr.gene, GenomicRanges::width(gr.gene) +
2 * genic.window, fix = "center")
}
if (length(gr.gene) > 0) {
#print("Check if tre.gene is na:\n")
tre.gene = tre.gene[, !is.na(tre.gene[1, ])]
if ( dim(tre.gene)[2] < 3 ) {
message("Not enough samples")
} else {
#print(tre.gene)
#print("Read table genotype f as genotype headers:\n")
genotype.headers = as.character(utils::read.table(genotype.f,
as.is = TRUE, nrows = 1))
#print(genotype.headers)
#print("Colnames tre.gene:\n")
#print(colnames(tre.gene))
#print("Checking com samples :\n")
com.samples = intersect(colnames(tre.gene), genotype.headers)
#print(com.samples)
#print("Calculate distance:\n")
tre.dist = sQTLseekeR:::hellingerDist(tre.gene[, com.samples])
#print(tre.dist)
res.df = data.frame()
#print("Gr.gene.spl is gr.gene is:\n")
gr.gene.spl = gr.gene
#print(gr.gene.spl)
if (any(GenomicRanges::width(gr.gene) > 20000)) {
#print("Inside if any genomic ranges width gr gene > 20000\n")
gr.gene.spl = gr.gene[which(GenomicRanges::width(gr.gene) <=
20000)]
#print(gr.gene.spl)
#print("For each unique genomicragne width gr.gene bigger than 20000:\n")
for (ii in unique(which(GenomicRanges::width(gr.gene) >
20000))) {
#print("Pos.breaks:\n")
pos.breaks = unique(round(seq(GenomicRanges::start(gr.gene[ii]),
GenomicRanges::end(gr.gene[ii]), length.out = floor(GenomicRanges::width(gr.gene[ii])/10000) +
1)))
#print(pos.breaks)
#print("lenght pos.breaks")
#print(length(pos.breaks))
#print("gr.gene.spl.ii:\n")
gr.gene.spl.ii = rep(gr.gene[ii], length(pos.breaks) -
1)
#print(gr.gene.spl.ii)
#print("Start:\n")
GenomicRanges::start(gr.gene.spl.ii) = pos.breaks[-length(pos.breaks)]
#print(GenomicRanges::start(gr.gene.spl.ii))
#print("posbreaks length = pos.breaks length + 1\n")
pos.breaks[length(pos.breaks)] = pos.breaks[length(pos.breaks)] +
1
#print(pos.breaks[length(pos.breaks)])
#print("End:\n")
GenomicRanges::end(gr.gene.spl.ii) = pos.breaks[-1] -
1
#print(GenomicRanges::end(gr.gene.spl.ii))
#print("Gr.gene.spl:\n")
gr.gene.spl = c(gr.gene.spl, gr.gene.spl.ii)
#print(gr.gene.spl)
}
}
res.df = lapply(1:length(gr.gene.spl), function(ii) {
res.range = data.frame()
if (verbose) {
message(" Sub-range ", ii)
}
#print("Gr.gene.spl[ii]:\n")
#print(gr.gene.spl[ii])
#print(GenomicRanges::start(gr.gene.spl[ii]))
if ( GenomicRanges::start(gr.gene.spl[ii]) >= 0 ) {
genotype.gene = read.bedix(genotype.f, gr.gene.spl[ii])
#print("Genotype.gene:\n")
#print(genotype.gene)
if (verbose & is.null(genotype.gene)) {
message(" No SNPs in the genomic range.")
}
if (!is.null(genotype.gene)) {
#print("Colnames genotype gene:\n")
#print(colnames(genotype.gene))
#print("Com.samples:\n")
#print(com.samples)
snps.to.keep = check.genotype(genotype.gene[,
com.samples], tre.gene[, com.samples])
#print("Snps.to.keep \n")
#print(snps.to.keep)
if (verbose) {
snps.to.keep.t = table(snps.to.keep)
message(" ", paste(names(snps.to.keep.t),
snps.to.keep.t, sep = ":", collapse = ", "))
}
if (any(snps.to.keep == "PASS")) {
#print("check if gorup of 5 samples enters here")
genotype.gene = genotype.gene[snps.to.keep ==
"PASS", ]
res.range = dplyr::do(dplyr::group_by(genotype.gene,
snpId), sQTLseekeR:::compFscore(., tre.dist, tre.gene,
svQTL = svQTL))
}
}
#print("res.range is: \n")
#print(res.range)
return(res.range)
}
})
range.done = which(unlist(lapply(res.df, nrow)) >
0)
if (length(range.done) > 0) {
res.df = res.df[range.done]
res.df = do.call(rbind, res.df)
res.df = dplyr::do(dplyr::group_by(res.df, nb.groups),
sQTLseekeR:::compPvalue(., tre.dist, approx = approx, min.nb.ext.scores = min.nb.ext.scores,
nb.perm.max = nb.perm.max))
if (svQTL) {
res.df = dplyr::do(dplyr::group_by(res.df,
nb.groups), sQTLseekeR:::compPvalue(., tre.dist, svQTL = TRUE,
min.nb.ext.scores = min.nb.ext.scores, nb.perm.max = nb.perm.max.svQTL))
}
return(data.frame(done = TRUE, res.df))
}
}
}
else {
if (verbose) {
warning("Issue with the gene location.")
}
}
return(data.frame(done = FALSE))
}
ret.df = lapply(unique(tre.df$geneId), function(gene.i) {
#print("Gene.i:\n")
#print(gene.i)
#print("Which gene is in tre.df:\n")
#print(which(tre.df$geneId == gene.i))
df = tre.df[which(tre.df$geneId == gene.i), ]
#print(df)
data.frame(geneId = gene.i, analyze.gene.f(df))
})
done = which(unlist(lapply(ret.df, ncol)) > 2)
if (length(done) > 0) {
ret.df = ret.df[done]
ret.df = do.call(rbind, ret.df)
ret.df$done = NULL
return(ret.df)
}
else {
return(NULL)
}
}
|
3c28f65c2adbe987d52902226f91badfe5af707c
|
a42d0875157fc896ed059812e58f729095ee459b
|
/cachematrix.R
|
43ff3047b9931136bdce2c6823fb723b6dbd5e20
|
[] |
no_license
|
cmacchambers/ProgrammingAssignment2
|
df86edbdd98fb3b66a2b8a7072ba8e9487a63f11
|
aef72c149f334ecf62539ba5e618cf98437ba663
|
refs/heads/master
| 2021-01-18T00:41:31.575833
| 2015-12-22T02:12:23
| 2015-12-22T02:12:23
| 48,401,342
| 0
| 0
| null | 2015-12-22T00:26:28
| 2015-12-22T00:26:27
| null |
UTF-8
|
R
| false
| false
| 989
|
r
|
cachematrix.R
|
##R Programming - Assignment 2
##Two functions that cache and return the inverse of a matrix.
## makeCacheMatrix
## Inputs: 1 Matrix
## Returns: 1 List of 4 functions: set, get, setinverse and getinverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## CacheSolve
## Works in conjunction with makeCacheMatrix to solve and set the matrix inverse.
## Inputs: the initialized list from makeCacheMatrix
## Returns: the solved matrix inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I <- x$getinverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setinverse(I)
I
}
|
ec5feaa26f9e098b7a0bb907c160a4486f8d90f8
|
e07447d03d156edfee5f3b0b899460b10e7d4dc2
|
/score_old.R
|
47a4b7cd20f90b2719fd78c8fdebae6c19ee8fbe
|
[] |
no_license
|
rbauer2000/HandsOnR
|
887e40fa409d71d3eb67f3adfebe620d2b73dace
|
55748893c3b426f7ed25c38f1527f79873149bfe
|
refs/heads/master
| 2021-04-12T16:55:51.540133
| 2020-04-04T14:49:19
| 2020-04-04T14:49:19
| 249,094,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 943
|
r
|
score_old.R
|
score = function(symbols) {
prize <- 0
all_bars <- FALSE
bars <- c("B", "BB", "BBB", "DD")
if (sum(symbols %in% bars) == 3) {
all_bars <- TRUE
}
}
if (sum(symbols %in% c("B", "DD")) == 3 & sum(symbols %in% c("B")) >= 1) {
prize <- 10
} else if (sum(symbols %in% c("C", "DD")) == 3 & sum(symbols %in% c("C")) >= 1) {
prize <- 10
} else if (sum(symbols %in% c("BB", "DD")) == 3 & sum(symbols %in% c("BB")) >= 1) {
prize <- 25
} else if (sum(symbols %in% c("BBB", "DD")) == 3 & sum(symbols %in% c("BBB")) >= 1) {
prize <- 40
} else if (sum(symbols %in% c("7", "DD")) == 3) {
prize <- 80
} else if (sum(symbols %in% "DD") == 3) {
prize <- 100
} else if (all_bars) {
prize <- 5
} else if (any(symbols == "C")) {
num_of_Cs <- sum(symbols %in% "C")
if (num_of_Cs == 1) {
prize <- 2
} else if (num_of_Cs ==2) {
prize <- 5
}
mult <- sum(symbols %in% "DD")
prize <- prize * 2 ^ mult
prize
}
|
e9915e55236a86428b15500d452dd0839ff24752
|
943dd151022cd99b23eda1a23b136226dd6f60ad
|
/project.R
|
2ee90494c2beb31289b6cfda63129fb700aa45eb
|
[] |
no_license
|
AleksAllav/PracticalMLproject
|
7bdcc01ae3453e49bc2b85cae1f98bbb8cce4552
|
96c44bd55e516d41af4cc42e1107bdf99f7a73bf
|
refs/heads/master
| 2020-07-26T06:08:36.235362
| 2019-09-16T14:39:04
| 2019-09-16T14:39:04
| 208,559,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,866
|
r
|
project.R
|
library(caret); library(knitr);library(randomForest)
library(rattle); library(rpart.plot);library(rpart)
library(gbm); library(corrplot); library(ggplot2)
library(colorspace); library(e1071)
#load("pml-testing.csv")
#load("pml-training.csv")
#load data
trainData <- read.csv("pml-training.csv"); dim(trainData)
testData <- read.csv("pml-testing.csv"); dim(testData)
set.seed(100)
#create sets
inTrain<-createDataPartition(y=trainData$classe, p=0.7, list=FALSE)
trainSet<- trainData[inTrain, ]
cvSet<- trainData[-inTrain, ]
#find variables near zero
NZV <- nearZeroVar(trainSet)
trainSet <- trainSet[, -NZV]
cvSet <- cvSet[, -NZV]
dim(trainSet)
#remove variables that are mostly NA (mNA)
mostlyNA <- sapply(trainSet, function(x) mean(is.na(x))) > 0.95
trainSet <- trainSet[, mostlyNA==FALSE]
cvSet <- cvSet[, mostlyNA==FALSE]
#remove identification only variables (columns 1 to 5)
trainSet <- trainSet[, -(1:5)]
cvSet <- cvSet[, -(1:5)]
corMatrix <- cor(trainSet[, -54])
corrplot(corMatrix, order = "FPC", method = "color", type = "lower",
tl.cex = 0.8, tl.col = rgb(0, 0, 0))
#Random Forest model fit
trControlRF <-trainControl(method = "cv", number = 3, verbose = FALSE)
modFitRF <-train(classe ~ ., data = trainSet, method = "rf", trControl = trControlRF)
modFitRF$finalModel
predictRF <- predict(modFitRF, newdata = cvSet)
confMatrixRF <- confusionMatrix(predictRF, cvSet$classe)
confMatrixRF
plot(confMatrixRF$table, col = confMatrixRF$byClass,
main = paste("Random Forest - Accuracy =",
round(confMatrixRF$overall['Accuracy'], 4)))
#Decision Trees model fit
set.seed(4000)
modFitDT <- rpart(classe ~ ., data=trainSet, method="class")
fancyRpartPlot(modFitDT)
predictDT <- predict(modFitDT, newdata=cvSet, type="class")
confMatrixDT <- confusionMatrix(predictDT, cvSet$classe)
confMatrixDT
plot(confMatrixDT$table, col = confMatrixDT$byClass,
main = paste("Decision Tree - Accuracy =",
round(confMatrixDT$overall['Accuracy'], 4)))
#Generalized Boosted Model model fit
set.seed(4000)
trControlGBM <- trainControl(method = "repeatedcv", number = 5, repeats = 1)
modFitGBM <- train(classe ~ ., data=trainSet, method = "gbm",
trControl = trControlGBM, verbose = FALSE)
modFitGBM$finalModel
predictGBM <- predict(modFitGBM, newdata=cvSet)
confMatrixGBM <- confusionMatrix(predictGBM, cvSet$classe)
confMatrixGBM
plot(confMatrixGBM$table, col = confMatrixGBM$byClass,
main = paste("GBM - Accuracy =", round(confMatrixGBM$overall['Accuracy'], 4)))
combo<-data.frame(predictRF,predictGBM,predictDT, classe = cvSet$classe)
modCombo<-train(classe ~ ., data = combo, method = "rf")
prCombo<-predict(modCombo, cvSet)
confMatrixCmb <- confusionMatrix(prCombo, cvSet$classe)
plot(confMatrixCmb$table, col = confMatrixCmb$byClass,
main = paste("Generalized Boosted Model Accuracy =",
round(confMatrixCmb$overall['Accuracy'], 4)))
randomForest <- print(paste("Random Forest - Accuracy =",
round(confMatrixRF$overall['Accuracy'], 4)))
decisionTree <- print(paste("Decision Tree - Accuracy =",
round(confMatrixDT$overall['Accuracy'], 4)))
GBM <- print(paste("GBM - Accuracy =", round(confMatrixGBM$overall['Accuracy'], 4)))
print(paste0("Combo accuracy = ", confusionMatrix(prCombo, cvSet$classe)$overall['Accuracy']))
predictTEST <- predict(modFitRF, newdata=testData)
predictTEST
summary(predictTEST)
predictTEST <- c(as.character(predictTEST))
#Length of the predicted vector
length(predictTEST)
length(cvSet$classe)
outOfSampleError.accuracy <- sum(predictTEST == cvSet$classe)/length(predictTEST)
outOfSampleError.accuracy
outOfSampleError <- 1 - outOfSampleError.accuracy
outOfSampleError
e <- outOfSampleError * 100
paste0("Out of sample error estimation: ", round(e, digits = 2), "%")
|
b2becd77f33788e183b911c503fb14be4d496a13
|
cc2d7f64376bdb1acbedae4c00e78cf357761958
|
/man/plantGrowth.Rd
|
40ddd3eb3a99f2b121f2f90458b5b96325912657
|
[] |
no_license
|
jg44/JGTools
|
89ea46b0e77da6532ea2b0867a03166d0700931d
|
b2643a574076d915889afb3615c278483221225f
|
refs/heads/master
| 2023-02-06T07:48:59.976457
| 2023-01-24T15:50:52
| 2023-01-24T15:50:52
| 187,842,688
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 776
|
rd
|
plantGrowth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plantGrowth.r
\docType{data}
\name{plantGrowth}
\alias{plantGrowth}
\title{Aboveground biomass of imaginary seedlings treated with two levels of N and P in a crossed design.}
\format{
A data frame with 24 rows and 4 variables
}
\usage{
data(plantGrowth)
}
\description{
A sample (fake) dataset showing aboveground biomass of six seedlings per treatment:
}
\details{
\itemize{
\item N Nitrogen level in watering solution (L = 0.1 g/L; H = 1g/L)
\item P Phosphorus level in watering solution (L = 0.04; H = 0.4 g/L)
\item seedSource Blocking factor (Maternal seed source in open pollinated system).
\item drymass Dry aboveground biomass of each seedling after x weeks.
}
}
\keyword{datasets}
|
3b578533c4edb2949fe0191c3062e5a67e5ed8df
|
a5be19737a57491c0dfbe41d068558542b2d5e10
|
/R/type_5.R
|
138c2dd28ef464c5b93c438e36bc5f28e4a5709b
|
[] |
no_license
|
cran/jordan
|
02ed2dfc77ae1c2d23b7b37016a24d019ce6ee87
|
576f96a2d484e0f60d1a451a465ea6a7984c4380
|
refs/heads/master
| 2023-03-29T00:43:24.948503
| 2021-04-08T10:00:02
| 2021-04-08T10:00:02
| 355,965,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,529
|
r
|
type_5.R
|
`spin` <- function(a,V){
stopifnot(is.numeric(a))
stopifnot(is.matrix(V))
new("spin",x=rbind(a,V)) # this is the only place new("spin",...) is called
}
`r1` <- function(x){x@x[1,,drop=TRUE]}
`rn` <- function(x){x@x[-1,,drop=FALSE]}
`quadraticform` <- function(M){ # modelled on lorentz::sol()
if(missing(M)){ # return quadratic form
jj <- getOption("quadraticform")
if(is.null(jj)){
cat("identity matrix\n")
return(invisible(NULL))
} else {
return(jj)
}
} else { # set quadratic form; notionally a symmetric matrix
stopifnot(is.matrix(M))
stopifnot(nrow(M) == ncol(M))
options("quadraticform" = M)
return(M)
}
}
`is.spin` <- function(x){inherits(x,"spin")}
`as.spin` <- function(x,d){
if(is.spin(x)){
return(x)
} else if(is.matrix(x)){
return(spin(a=x[1,,drop=TRUE],V=x[-1,,drop=FALSE]))
} else if(is.numeric(x) & is.vector(x)){
return(spin(a=x,V=matrix(0,d,length(x))))
} else if(is.list(x)){
return(spin(a=x[[1]],V=x[[2]]))
} else {
stop("not recognised")
}
}
setGeneric("dim")
setMethod("dim","spin",function(x){ nrow(rn(x)) })
# names() defined for jordan objects
`rspin` <- function(n=3,d=5){spin(round(rnorm(n),2),matrix(round(rnorm(n*d),2),d,n))}
`spin_id` <- function(n=3,d=5){as.spin(rbind(1,matrix(0,d,n)))}
setMethod("show", "spin", function(object){spin_show(object)})
`spin_show` <- function(x){
cat("Vector of",description(x,plural=TRUE), "with entries\n")
x <- as(x,"matrix")
rownames(x) <- paste("[",seq_len(nrow(x))-1,"]",sep="")
if(is.null(colnames(x))){
colnames(x) <- paste("[",seq_len(ncol(x)),"]",sep="")
}
o <- getOption("head_and_tail")
if(is.null(o)){o <- c(5,3)}
if(length(o)==1){o <- c(o,o)}
jj <- capture.output(x)
n <- nrow(x)
substr(jj[2],1,3) <- " r "
if(sum(o) < (n-1)){
jj <- c(
jj[1:2],
paste(rep("-",nchar(jj[1])),collapse=""),
jj[3:(o[1]+2)],
paste(rep(".",nchar(jj[1])),collapse=""),
jj[(n-o[2]+2):(n+1)]
)
}
for(i in jj){
cat(paste(i,"\n"))
}
return(x)
}
`harmonize_spin_spin` <- function(e1,e2){ # e1,e2: spin objects
jj <- rbind(seq_along(e1),seq_along(e2))
list(
s1 = r1(e1)[ jj[1,] ],
s2 = r1(e2)[ jj[2,] ],
v1 = rn(e1)[,jj[1,],drop=FALSE],
v2 = rn(e2)[,jj[2,],drop=FALSE]
)
}
`harmonize_spin_numeric` <- function(e1,e2){ # e1: spin, e2: numeric
jj <- rbind(seq_along(e1),seq_along(e2))
list(
s1 = r1(e1)[ jj[1,]],
s2 = e2 [ jj[2,]],
v1 = rn(e1)[,jj[1,],drop=FALSE]
)
}
`spin_prod_spin` <- function(e1,e2){
if(is.null(getOption("quadraticform"))){
innerprod <- function(v1,v2){colSums(v1*v2)}
} else {
innerprod <- function(v1,v2){emulator::quad.3diag(quadraticform(),v1,v2)}
}
with(harmonize_spin_spin(e1,e2),{
return(spin(a=s1*s2 + innerprod(v1,v2), V=sweep(v2,2,s1,"*")+sweep(v1,2,s2,"*")))})
}
`spin_prod_numeric` <- function(e1,e2){with(harmonize_spin_numeric(e1,e2),{return(spin(a=s1*s2,V=sweep(v1,2,s2,"*")))})}
`spin_plus_numeric` <- function(e1,e2){stop("not implemented")}
`spin_negative` <- function(e1){spin(-r1(e1),-rn(e1))}
`spin_plus_spin` <- function(e1,e2){with(harmonize_spin_spin(e1,e2),{return(spin(s1+s2,v1+v2))})}
`spin_equal_spin` <- function(e1,e2){with(harmonize_spin_spin(e1,e2),{return(spin(s1+s2,v1+v2))})}
`spin_inverse` <- function(...){ stop("not a division algebra") }
`spin_power_spin` <- function(...){ stop("spin^spin not defined") }
`spin_power_single_n` <- function(e1,n){ # n a length-one nonnegative integer
stopifnot(n==round(n))
stopifnot(n>=0)
stopifnot(length(n)==1)
if(n==0){
return(spin(a=1+0*r1(e1),V=rn(e1)*0)) # 1
} else if(n==1){
return(e1)
} else {
## return(e1*Recall(e1,n-1)) # this would be inefficient
out <- e1
for(i in seq_len(n-1)){ # NB: n>=2
out <- out*e1
}
return(out)
}
}
`spin_power_numeric` <- function(e1,e2){
stop("not yet implemented (it makes sense but I have not got round to implementing it yet")
n <- e2 # yes it's redundant but using e2 for n drives me nuts
if(length(n)==1){
return(spin_power_single_n(e1,n))
} else {
jj <- harmonize_spin_numeric(e1,n)
}
return(as.spin(e1))
}
setMethod("Arith",signature(e1 = "spin", e2="missing"),
function(e1,e2){
switch(.Generic,
"+" = e1,
"-" = spin_negative(e1),
stop(paste("Unary operator", .Generic,
"not allowed on spin objects"))
)
} )
## binary operators:
setMethod("Arith",signature(e1 = "spin", e2="spin"),
function(e1,e2){
switch(.Generic,
"+" = spin_plus_spin(e1, e2),
"-" = spin_plus_spin(e1, spin_negative(e2)),
"*" = spin_prod_spin(e1, e2),
"/" = stop("1/spin not defined"),
"^" = stop("x^spin not defined"),
stop(paste("binary operator \"", .Generic, "\" not defined for spin objects"))
)})
setMethod("Arith",signature(e1 = "spin", e2="numeric"),
function(e1,e2){
switch(.Generic,
"+" = spin_plus_numeric(e1,e2),
"-" = spin_plus_numeric(e1,-e2),
"*" = spin_prod_numeric(e1,e2),
"/" = spin_prod_numeric(e1,1/e2),
"^" = spin_power_numeric(e1, e2),
stop(paste("binary operator \"", .Generic, "\" not defined for onions"))
)})
setMethod("Arith",signature(e1 = "numeric", e2="spin"),
function(e1,e2){
switch(.Generic,
"+" = spin_plus_numeric(e2,e1),
"-" = spin_plus_numeric(spin_negative(e2),e1),
"*" = spin_prod_numeric(e2,e1),
"/" = stop("1/spin not defined"),
"^" = stop("x^spin not defined"),
stop(paste("binary operator \"", .Generic, "\" not defined for onions"))
)})
setMethod("[", signature("spin",i="index",j="missing"),function(x,i,j,drop){spin(a=r1(x)[i],V=rn(x)[,i,drop=FALSE])})
setMethod("[", signature("spin",i="missing",j="index"),function(x,i,j,drop){stop()})
setMethod("[", signature("spin",i="missing",j="missing"),function(x,i,j,drop){x})
setReplaceMethod("[",signature(x="spin",i="index",j="missing",value="spin"),
function(x,i,j,value){
outa <- r1(x)
outa[i] <- r1(value)
outV <- rn(x)
outV[,i] <- rn(value)
return(spin(a=outa,V=outV))
} )
setReplaceMethod("[",signature(x="spin",i="index",j="missing",value="numeric"),
function(x,i,j,value){
stopifnot(length(value)==1)
stopifnot(value==0)
outa <- r1(x)
outa[i] <- value
outV <- rn(x)
outV[,i] <- 0 # the meat
return(spin(a=outa,V=outV))
} )
setReplaceMethod("[",signature(x="spin",i="ANY" ,j="missing",value="ANY"),function(x,i,j,value){stop()})
setReplaceMethod("[",signature(x="spin",i="index" ,j="index" ,value="ANY"),function(x,i,j,value){stop()})
setReplaceMethod("[",signature(x="spin",i="missing",j="ANY" ,value="numeric"),function(x,i,j,value){stop()})
setReplaceMethod("[",signature(x="spin",i="missing",j="missing",value="spin"),function(x,i,j,value){
})
setReplaceMethod("[",signature(x="spin",i="missing",j="missing",value="numeric"),function(x,i,j,value){
})
|
aa4f7613ad963c9d49022e6401022b565b8319de
|
590c6ae4469f9741ecd2a6392ea545900a5f78a7
|
/exploratory_data_analysis/Project2/loadData.R
|
e4dbf3fbb6a904fb8cdf6d51116c17a24db0f52a
|
[] |
no_license
|
grace828822/datasciencecoursera
|
e7b64f99ddc081c517a0f95de3df5590bbcc3460
|
dc013d658d06aa9883cf80ef3fa032113db0fbab
|
refs/heads/master
| 2021-01-23T12:10:23.360915
| 2015-08-10T13:58:20
| 2015-08-10T13:58:20
| 32,438,117
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 128
|
r
|
loadData.R
|
# Loading the .rds data
NEIdata <- readRDS("data/summarySCC_PM25.rds")
SCCdata <- readRDS("data/Source_Classification_Code.rds")
|
9727001daaa63ac966b7982b7cb925c9f082abd2
|
e10d735501e104bf4c2ce339c4ab61ad02d88bd7
|
/R/package.R
|
1268bb1d0a1051a43006c0796e67f94978a3682d
|
[] |
no_license
|
dots26/ccoevolution
|
36933b9bbb76083d80407c54769a5c438d5327e2
|
7c972482727500619d8ed72d6688f8838fc3cef4
|
refs/heads/master
| 2021-07-08T11:15:06.569323
| 2020-10-07T09:32:50
| 2020-10-07T09:32:50
| 197,369,313
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,299
|
r
|
package.R
|
# Package Description for Roxygen:
#' This package is an implementation of cooperative coevolution for large scale global optimization.
#' To use this package, several Python modules must be installed beforehand, namely: numpy and PyGMO.
#'
#' Note: This package uses column-major ordering, i.e. an individual should be contained in a single column, each row represents different variable.
#'
#' \tabular{ll}{
#' Package: \tab ccoevolution\cr
#' Type: \tab Package\cr
#' Version: \tab 0.0.1\cr
#' Date: \tab 2019-07-16\cr
#' License: \tab GPL (>= 2)\cr
#' LazyLoad: \tab yes\cr
#' }
#'
#' @name ccoevolution-package
#' @aliases ccoevolution
#' @docType package
#' @title Cooperative Coevolution for LSGO
#' @author Dani Irawan \email{irawan_dani@@yahoo.com}
#' @keywords package
#' @seealso Main interface function is \code{\link{ccoev}}.
#' @import reticulate
#' @import nsga2R
#' @import lhs
#' @import nnet
#' @import stringr
#' @import randtoolbox
#' @import e1071
#'
#' @section Acknowledgments:
#' This work is funded by the European Commission's H2020 programme through
#' the UTOPIAE Marie Curie Innovative Training Network, H2020-MSCA-ITN-2016,
#' under Grant Agreement No. 722734.
#'
#' @section Maintainer:
#' Dani Irawan \email{irawan_dani@@yahoo.com}
#End of Package Description
NA
|
cf75ac44b84554143ed4646242d129449a477c0c
|
4b588c08a9eb7d236e147ac93cff657c82c09e91
|
/man/nhanes_data.Rd
|
79b99405a5114b7ad75f6aff027210d3d6139044
|
[
"MIT"
] |
permissive
|
monicagerber/mchtoolbox
|
131ef106a018c2d2a6b48283ac289b227eff5fb7
|
44086ca5f21bfc9885a3360be9ff5164e8a13655
|
refs/heads/master
| 2020-03-18T04:51:59.665932
| 2018-05-22T20:19:50
| 2018-05-22T20:19:50
| 134,310,492
| 0
| 0
| null | 2018-05-21T18:42:07
| 2018-05-21T18:42:06
| null |
UTF-8
|
R
| false
| true
| 541
|
rd
|
nhanes_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nhanes_data.R
\docType{data}
\name{nhanes_data}
\alias{nhanes_data}
\title{National Health and Nutrition Examination Survey (NHANES) example dataset}
\format{A dataframe.
\describe{
\item{cid}{Patient identifier.}
\item{sex}{1 = male, 2 = female}
\item{agemons}{Age in months.}
\item{height}{Height in cm.}
\item{weight}{Weight in kg.}
\item{headcir}{Head circumference in cm.}
}}
\usage{
nhanes_data
}
\description{
NHANES data.
}
\keyword{datasets}
|
9fe13f9cc8389c2784d85ae0b7036fa0fd6157fb
|
ae30b705f1c0618c118a8f33052c21d5d442d69d
|
/man/createJSON.Rd
|
c749a9a0e717b0feb68194e1b11c104a26b14d11
|
[] |
no_license
|
dselivanov/LDAvis
|
aa9b8398069a2a7e0583903ecf65fc12eb890fd9
|
176f87e1d89d86aaa7fe29079cb62d217c4d165a
|
refs/heads/master
| 2020-12-25T22:29:52.709582
| 2015-12-22T11:57:50
| 2015-12-22T11:57:50
| 48,425,203
| 1
| 1
| null | 2015-12-22T10:15:37
| 2015-12-22T10:15:37
| null |
UTF-8
|
R
| false
| true
| 5,625
|
rd
|
createJSON.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createJSON.R
\name{createJSON}
\alias{createJSON}
\title{Create the JSON object to read into the javascript visualization}
\usage{
createJSON(phi = matrix(), theta = matrix(), doc.length = integer(),
vocab = character(), term.frequency = integer(), R = 30,
lambda.step = 0.01, mds.method = jsPCA, cluster, plot.opts = list(xlab =
"PC1", ylab = "PC2"), reorder.topics = TRUE, ...)
}
\arguments{
\item{phi}{matrix, with each row containing the distribution over terms
for a topic, with as many rows as there are topics in the model, and as
many columns as there are terms in the vocabulary.}
\item{theta}{matrix, with each row containing the probability distribution
over topics for a document, with as many rows as there are documents in the
corpus, and as many columns as there are topics in the model.}
\item{doc.length}{integer vector containing the number of tokens in each
document of the corpus.}
\item{vocab}{character vector of the terms in the vocabulary (in the same
order as the columns of \code{phi}). Each term must have at least one
character.}
\item{term.frequency}{integer vector containing the frequency of each term
in the vocabulary.}
\item{R}{integer, the number of terms to display in the barcharts
of the interactive viz. Default is 30. Recommended to be roughly
between 10 and 50.}
\item{lambda.step}{a value between 0 and 1.
Determines the interstep distance in the grid of lambda
values over which to iterate when computing relevance.
Default is 0.01. Recommended to be between 0.01 and 0.1.}
\item{mds.method}{a function that takes \code{phi} as an input and outputs
a K by 2 data.frame (or matrix). The output approximates the distance
between topics. See \link{jsPCA} for details on the default method.}
\item{cluster}{a cluster object created from the \link{parallel} package.
If supplied, computations are performed using \link{parLapply} instead
of \link{lapply}.}
\item{plot.opts}{a named list used to customize various plot elements.
By default, the x and y axes are labeled "PC1" and "PC2"
(principal components 1 and 2), since \link{jsPCA} is the default
scaling method.}
\item{reorder.topics}{whether to re-order the K topics in order
of decreasing proportion.}
\item{...}{not currently used.}
}
\value{
A string containing JSON content which can be written to a file
or feed into \link{serVis} for easy viewing/sharing. One element of this
string is the new ordering of the topics.
}
\description{
This function creates the JSON object that feeds the visualization template.
For a more detailed overview,
see \code{vignette("details", package = "LDAvis")}
}
\details{
The function first computes the topic frequencies (across the whole
corpus), and then it reorders the topics in decreasing order of
frequency. The main computation is to loop through the topics and through the
grid of lambda values (determined by \code{lambda.step})
to compute the \code{R} most
\emph{relevant} terms for each topic and value of lambda.
}
\examples{
\dontrun{
data(TwentyNewsgroups, package="LDAvis")
# create the json object, start a local file server, open in default browser
json <- with(TwentyNewsgroups,
createJSON(phi, theta, doc.length, vocab, term.frequency))
serVis(json) # press ESC or Ctrl-C to kill
# createJSON() reorders topics in decreasing order of term frequency
RJSONIO::fromJSON(json)$topic.order
# You may want to just write the JSON and other dependency files
# to a folder named TwentyNewsgroups under the working directory
serVis(json, out.dir = 'TwentyNewsgroups', open.browser = FALSE)
# then you could use a server of your choice; for example,
# open your terminal, type `cd TwentyNewsgroups && python -m SimpleHTTPServer`
# then open http://localhost:8000 in your web browser
# A different data set: the Jeopardy Questions+Answers data:
# Install LDAvisData (the associated data package) if not already installed:
# devtools::install_github("cpsievert/LDAvisData")
library(LDAvisData)
data(Jeopardy, package="LDAvisData")
json <- with(Jeopardy,
createJSON(phi, theta, doc.length, vocab, term.frequency))
serVis(json) # Check out Topic 22 (bodies of water!)
# If you have a GitHub account, you can even publish as a gist
# which allows you to easily share with others!
serVis(json, as.gist = TRUE)
# Run createJSON on a cluster of machines to speed it up
system.time(
json <- with(TwentyNewsgroups,
createJSON(phi, theta, doc.length, vocab, term.frequency))
)
# user system elapsed
# 14.415 0.800 15.066
library("parallel")
cl <- makeCluster(detectCores() - 1)
cl # socket cluster with 3 nodes on host 'localhost'
system.time(
json <- with(TwentyNewsgroups,
createJSON(phi, theta, doc.length, vocab, term.frequency,
cluster = cl))
)
# user system elapsed
# 2.006 0.361 8.822
# another scaling method (svd + tsne)
library("tsne")
svd_tsne <- function(x) tsne(svd(x)$u)
json <- with(TwentyNewsgroups,
createJSON(phi, theta, doc.length, vocab, term.frequency,
mds.method = svd_tsne,
plot.opts = list(xlab="", ylab="")
)
)
serVis(json) # Results in a different topic layout in the left panel
}
}
\references{
Sievert, C. and Shirley, K. (2014) \emph{LDAvis: A Method for
Visualizing and Interpreting Topics}, ACL Workshop on Interactive
Language Learning, Visualization, and Interfaces.
\url{http://nlp.stanford.edu/events/illvi2014/papers/sievert-illvi2014.pdf}
}
\seealso{
\link{serVis}
}
|
59f367b7de903c24bb579840477eb8be95579912
|
7a1990f2026969dbc9e826b33faf8ed1754f83c0
|
/ai/datacamp/dswr_visual_ggplot/part1/ggplot_pract.R
|
68690060628193d91f2e11074b52d03a65b524dd
|
[] |
no_license
|
slzdevsnp/learn
|
5ba8ebf35f134bb1a0c8acbd9054b103e5ab8ac5
|
12731e276af7351a93f1495292889ff7e0c96c9a
|
refs/heads/master
| 2021-06-02T12:47:53.072823
| 2020-01-16T08:48:56
| 2020-01-16T08:48:56
| 150,734,323
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
r
|
ggplot_pract.R
|
# setwd("~/Dropbox/cs/bigdata/datacamp/dswr_visual_ggplot/part1"); source("ggplot_pract.R")
require(ggplot2)
require(tidyverse)
require(data.table)
library(repr)
options(repr.plot.width=4, repr.plot.height=3)
testplot <- function(meansdf)
{
p <- ggplot(meansdf,
aes(fill = condition,
y = means,
x = condition))
p + geom_bar(position = "dodge", stat = "identity")
return(p)
}
testplot1 <- function(meansdf, xvar = "condition", yvar = "means",
fillvar = "condition") {
p <- ggplot(meansdf,
aes_string(x = xvar, y= yvar, fill = fillvar)) +
geom_bar(position="dodge", stat="identity") +
labs(x="variable condition", y="variable means")
return(p)
}
##main
m <- c(13.8, 16.9, 14.8 )
cond <- c(1,3, 2)
means <- data.frame(means=m, condition=cond)
p<-testplot1(means)
print(p)
## from leslie
# years<-c('1980','1990','2000','2010')
# id <- 1:4
# temper <-c(rep('cold',2),rep('hot',2))
# df<-data.table(years,id,temper)
# p<-ggplot(data=df, mapping=aes(x=years,y=id) ) +
# geom_point()
# print(p)
|
963c13b995373a56d565a043d2f86d1bebb15b64
|
da4cadc389b67840e84bdc8d2f744ba1f472ea8b
|
/RsurveyGroup_1.R
|
97570ef1a39cfc19157e0a6a0eb89c405c6dd443
|
[] |
no_license
|
Lewis-Barnett-NOAA/STageCompsEstimation
|
d2ba224d8f9963ee3bcd9df457883c31a40350ca
|
8f46353575f51e955b71a36d4df66bfceedaa543
|
refs/heads/master
| 2022-12-16T11:14:16.365280
| 2020-09-17T22:55:19
| 2020-09-17T22:55:19
| 296,452,520
| 0
| 0
| null | 2020-09-17T22:08:05
| 2020-09-17T22:08:04
| null |
UTF-8
|
R
| false
| false
| 5,225
|
r
|
RsurveyGroup_1.R
|
# SET YOUR WORKING DIRECTORY !!
# SCRIPT TO COMPARE AGE ABUNDANCE ESTIMATES BY ALK AND CRL
rm(list = ls())
# Libraries
require(mgcv)
require(fishmethods)
require(reshape2)
require(ggplot2)
# Read data:
cdata = read.csv('data/POLLOCK_CATCH_2015_2019.csv')
ldata = read.csv('data/POLLOCK_LENGTH_2015_2019.csv')
adata = read.csv('data/POLLOCK_AGE_2015_2019.csv')
# Some parameters
sel_year = 2017
min_age = 1
age_plus = 12
sel_station = c('A-05', 'G-15', 'H-15', 'J-15', 'M-30', 'J-10', 'E-11', 'K-08', 'H-07')
# --------------------------------------------------------------
# subset data:
cdata = cdata[cdata$YEAR == sel_year, ]
ldata = ldata[ldata$YEAR == sel_year, ]
adata = adata[adata$YEAR == sel_year, ]
# Analyze age subsamples: IMPORTANT STEP TO DECIDE THE RIGHT AGE PLUS GROUP:
# This might vary across years
#write.csv(table(adata$LENGTH, adata$AGE), 'Examine.csv')
# Estimate C_L per station in length subsample data:
data5 = aggregate(ldata$FREQUENCY, list(STATIONID = ldata$STATIONID), sum)
ldata$N_CATCH = cdata$NUMBER_FISH[match(ldata$STATIONID, cdata$STATIONID)]
ldata$N_SUBSAMPLE = data5$x[match(ldata$STATIONID, data5$STATIONID)]
ldata$LAMBDA = ldata$N_SUBSAMPLE/ldata$N_CATCH
ldata$C_l_i = ldata$FREQUENCY/ldata$LAMBDA
# Apply age plus group:
adata$AGE = ifelse(test = adata$AGE > age_plus, age_plus, adata$AGE)
# character STATIONID:
cdata$STATIONID = as.character(cdata$STATIONID)
ldata$STATIONID = as.character(ldata$STATIONID)
adata$STATIONID = as.character(adata$STATIONID)
# 1) ALK approach --------------------------------------------------
# Create length vs age freq (entire age subsample data):
ALK_tmp = table(adata$LENGTH, adata$AGE)
len = as.numeric(rownames(ALK_tmp))
nl = numeric(length(len))
# Select a station
tmp_ldata = ldata[ldata$STATIONID %in% sel_station, ]
tmp_station = aggregate(tmp_ldata$FREQUENCY, list(STATIONID = tmp_ldata$STATIONID, LENGTH = tmp_ldata$LENGTH), sum)
# Make a function to calculate Proportions by station
alk_calc = function(station_data){
find_pos = match(station_data$LENGTH, len)
find_pos_2 = find_pos[!is.na(find_pos)] # if length bin is not present in age subsample, remove it
nl[find_pos_2] = station_data$x[!is.na(find_pos)]
alkData = cbind(len, nl, ALK_tmp)
alkData = as.data.frame(alkData)
prop_station = alkprop(alkData) # use function in fishmethods (explain briefly)
return(data.frame(AGE = min_age:age_plus, PROP = prop_station$results$prop))
}
# Find propotions by station
prop_by_station = by(data = tmp_station, INDICES = tmp_station$STATIONID, FUN = alk_calc)
# Make data.frame from list
prop_data = do.call(rbind, prop_by_station)
prop_data$STATIONID = gsub("\\..*","",rownames(prop_data))
# Calculate C_a_i:
prop_data$CATCH = cdata$NUMBER_FISH[ match(prop_data$STATIONID, cdata$STATIONID) ]
prop_data$C_a_i = prop_data$CATCH * prop_data$PROP
# Order data frame:
alk_data = prop_data[,c('STATIONID', 'AGE', 'C_a_i')]
alk_data$METHOD = 'ALK'
# 2) CRL approach ------------------------------------------------------------
# Create new data frame:
tmp_adata = adata
# Create matrix to save results
all_ages = sort(unique(tmp_adata$AGE))
PropAgeMat = matrix(NA, ncol = length(all_ages), nrow = 1)
colnames(PropAgeMat) = all_ages
# run the model GAM:
matPreds = matrix(NA, ncol = length(all_ages), nrow = nrow(ldata))
for(ii in seq_along(all_ages)){
if(ii == length(all_ages)){
predvals = rep(1, times = nrow(ldata))
} else {
tmp_adata$AGEFAC = ifelse(test = tmp_adata$AGE > all_ages[ii], 0, 1)
modtmp = gam(AGEFAC ~ s(LENGTH) + s(LON, LAT, k = 10), family = binomial,
data = tmp_adata) # here is the model
predtmp = predict(modtmp, newdata = ldata, type = 'response')
predvals = as.vector(predtmp)
elimina = which(tmp_adata$AGEFAC == 1)
if(length(elimina) > 0) {
tmp_adata = tmp_adata[-which(tmp_adata$AGEFAC == 1), ]
} else {
tmp_adata = tmp_adata
}
}
matPreds[,ii] = predvals
}
# conditional probability:
matPreds2 = matrix(NA, ncol = length(all_ages), nrow = nrow(ldata))
for(kk in seq_along(all_ages)){
if(kk == 1){
matPreds2[,kk] = matPreds[,kk]
} else {
mattmp = 1 - as.matrix(matPreds[,(kk-1):1])
matPreds2[,kk] = matPreds[,kk]*apply(X = mattmp, MARGIN = 1, FUN = prod)
}
}
# Here, we already have proportions by age
# Calculate C_a_i
tempo2 = sweep(matPreds2, MARGIN = 1, ldata$C_l_i, `*`)
rownames(tempo2) = ldata$STATIONID
colnames(tempo2) = min_age:age_plus
# Order data frame:
tmp_station = tempo2[rownames(tempo2) %in% sel_station, ]
tmp_station = reshape2::melt(tmp_station)
crl_data = aggregate(tmp_station$value, list(STATIONID = tmp_station$Var1, AGE = tmp_station$Var2), sum)
colnames(crl_data)[3] = 'C_a_i'
crl_data$METHOD = 'CRL'
# MERGE ALK AND CRL DATASETS: ------------------------------------------
plot_data = rbind(alk_data, crl_data)
# Plot: ----------------------------------------------------------------
ggplot(data = plot_data, aes(AGE, C_a_i, color = METHOD)) +
geom_line() +
theme_bw() +
xlab('Age') +
labs(color = "Method") +
scale_x_continuous(breaks = min_age:age_plus) +
facet_wrap(~ STATIONID, scales = 'free_y')
|
e501edcd1913ae0895405765d88e1a2a4a31bd6e
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/quicksight_describe_template_definition.Rd
|
f2e2bc00e0e07718b5e7a375b35fd42b7fb57311
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,268
|
rd
|
quicksight_describe_template_definition.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_describe_template_definition}
\alias{quicksight_describe_template_definition}
\title{Provides a detailed description of the definition of a template}
\usage{
quicksight_describe_template_definition(
AwsAccountId,
TemplateId,
VersionNumber = NULL,
AliasName = NULL
)
}
\arguments{
\item{AwsAccountId}{[required] The ID of the Amazon Web Services account that contains the template.
You must be using the Amazon Web Services account that the template is
in.}
\item{TemplateId}{[required] The ID of the template that you're describing.}
\item{VersionNumber}{The version number of the template.}
\item{AliasName}{The alias of the template that you want to describe. If you name a
specific alias, you describe the version that the alias points to. You
can specify the latest version of the template by providing the keyword
\verb{$LATEST} in the \code{AliasName} parameter. The keyword \verb{$PUBLISHED} doesn't
apply to templates.}
}
\description{
Provides a detailed description of the definition of a template.
See \url{https://www.paws-r-sdk.com/docs/quicksight_describe_template_definition/} for full documentation.
}
\keyword{internal}
|
85a0de11ac73cd3a32d7f308b227bccdb22125f3
|
8e20060c5475f00e9a513f76725bcf6e54f2068a
|
/man/edge_attr_names.Rd
|
1b5b1c2670525f020f90fc4947a64473921ef0bc
|
[] |
no_license
|
DavisVaughan/rigraph
|
8cc1b6c694ec03c1716d8b471d8f910e08c80751
|
a28ac7fe7b45323a38ffe1f13843bb83bdb4278f
|
refs/heads/master
| 2023-07-18T20:34:16.631540
| 2021-09-20T22:55:53
| 2021-09-20T22:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,070
|
rd
|
edge_attr_names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attributes.R
\name{edge_attr_names}
\alias{edge_attr_names}
\alias{list.edge.attributes}
\title{List names of edge attributes}
\usage{
edge_attr_names(graph)
}
\arguments{
\item{graph}{The graph.}
}
\value{
Character vector, the names of the edge attributes.
}
\description{
List names of edge attributes
}
\examples{
g <- make_ring(10) \%>\%
set_edge_attr("label", value = letters[1:10])
edge_attr_names(g)
plot(g)
}
\seealso{
Other graph attributes:
\code{\link{delete_edge_attr}()},
\code{\link{delete_graph_attr}()},
\code{\link{delete_vertex_attr}()},
\code{\link{edge_attr<-}()},
\code{\link{edge_attr}()},
\code{\link{graph_attr<-}()},
\code{\link{graph_attr_names}()},
\code{\link{graph_attr}()},
\code{\link{igraph-dollar}},
\code{\link{igraph-vs-attributes}},
\code{\link{set_edge_attr}()},
\code{\link{set_graph_attr}()},
\code{\link{set_vertex_attr}()},
\code{\link{vertex_attr<-}()},
\code{\link{vertex_attr_names}()},
\code{\link{vertex_attr}()}
}
\concept{graph attributes}
|
750c3523a0d71bde0ea6b32b91c77e2f670d54b5
|
278499567ef34de194ccec599f3843e59ddf1ce2
|
/R/eps_CC_loglik.R
|
9c1a4c9c3a50a8d6cf3a7291fdf39b864bb345ea
|
[] |
no_license
|
theabjorn/extremesampling
|
3a5850a913a416b0f2d3bf302e3c01e639a29f15
|
29b2846e6fb3d6285745c0b384f47fca64ede967
|
refs/heads/master
| 2020-05-21T20:29:23.768030
| 2018-12-03T09:34:15
| 2018-12-03T09:34:15
| 62,583,578
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,044
|
r
|
eps_CC_loglik.R
|
# Log-likelihood function for EPS-CC
epsCC.loglik_ex = function(parameters,data,l,u){
param = parameters
len = dim(data)[2]
y = data[,1]
x = as.matrix(data[,2:len])
n = length(y)
lenp = length(param)
alpha = param[1]
beta = param[2:(lenp-1)]
tau = param[lenp]; sigma2 = exp(tau); sigma = sqrt(sigma2)
z = (y-alpha - x%*%beta)/sigma
zl = (l-alpha - x%*%beta)/sigma
zu = (u-alpha - x%*%beta)/sigma
ll = sum(log(dnorm(z)/sigma)-log(1-pnorm(zu)+pnorm(zl)))
return(ll)
}
epsCC.loglik_e = function(parameters,data,l,u){
param = parameters
len = dim(data)[2]
y = data[,1]
n = length(y)
lenp = length(param)
alpha = param[1]
tau = param[lenp]; sigma2 = exp(tau); sigma = sqrt(sigma2)
z = (y-alpha )/sigma
zl = (l-alpha )/sigma
zu = (u-alpha )/sigma
ll = sum(log(dnorm(z)/sigma)-log(1-pnorm(zu)+pnorm(zl)))
return(ll)
}
# epsCC.loglik_rand = function(parameters,data,l,u,randomindex){
# param = parameters
# len = dim(data)[2]
#
# y_e = data[randomindex==0,1]
# x_e = as.matrix(data[randomindex==0,2:len])
# ne = length(y_e)
# y_r = data[,1][randomindex==1]
# x_r = as.matrix(data[randomindex==1,2:len])
# nr = length(y_r)
#
# lenp = length(param)
# alpha = param[1]
# beta = param[2:(lenp-1)]
# tau = param[lenp]; sigma2 = exp(tau); sigma = sqrt(sigma2)
#
# z = (y_e-alpha - x_e%*%beta)/sigma
# zl = (l-alpha - x_e%*%beta)/sigma
# zu = (u-alpha - x_e%*%beta)/sigma
#
# z_r = (y_r-alpha - x_r%*%beta)/sigma
#
# ll = sum(log(dnorm(z_r)/sigma)) + sum(log(dnorm(z)/sigma)-log(1-pnorm(zu)+pnorm(zl)))
# return(ll)
# }
#
# epsCC.loglik_z_rand = function(parameters,data,cutoffs,randomindex){
# param = parameters
# l = min(cutoffs)
# u = max(cutoffs)
#
# y_e = data[,1][randomindex==0]
# ne = length(y_e)
# y_r = data[,1][randomindex==1]
# nr = length(y_r)
#
# lenp = length(param)
# alpha = param[1]
# tau = param[lenp]; sigma2 = exp(tau); sigma = sqrt(sigma2)
#
# z = (y_e-alpha)/sigma
# zl = (l-alpha)/sigma
# zu = (u-alpha)/sigma
#
# z_r = (y_r-alpha)/sigma
#
# ll = sum(log(dnorm(z_r)/sigma)) + sum(log(dnorm(z)/sigma)-log(1-pnorm(zu)+pnorm(zl)))
# return(ll)
# }
#
#
# #######################################################
# # EPS-CC loglik for secondary phenotype W
# #######################################################
#
# epsCC.loglik.W = function(parameters,data){
# param = parameters
# len = dim(data)[2]
#
# w = data[,1]
# x = as.matrix(data[,2:(len-1)])
# gamma = data[,len]
# n = length(w)
#
# lenp = length(param)
# alpha = param[1]
# beta = param[2:(lenp-2)]
# tau = param[(lenp)-1]; sigma2 = exp(tau); sigma = sqrt(sigma2)
# tmp = param[(lenp)]; rho = exp(tmp)/(1+exp(tmp))
#
# z = (w-alpha - x%*%beta - sigma*rho*gamma)
#
# ll = -(n/2)*log(sigma2) - (n/2)*log(1-rho^2) - 0.5*(1/sigma2)*(1/(1-rho^2))*sum(z^2)
# return(ll)
# }
|
4668dc3771ee38b8344f4d02fbcce69e9cdc4541
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.management/man/managedgrafana_describe_workspace.Rd
|
f29af6519b2a1262e703d7ff982239d4aae39565
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 619
|
rd
|
managedgrafana_describe_workspace.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/managedgrafana_operations.R
\name{managedgrafana_describe_workspace}
\alias{managedgrafana_describe_workspace}
\title{Displays information about one Amazon Managed Grafana workspace}
\usage{
managedgrafana_describe_workspace(workspaceId)
}
\arguments{
\item{workspaceId}{[required] The ID of the workspace to display information about.}
}
\description{
Displays information about one Amazon Managed Grafana workspace.
See \url{https://www.paws-r-sdk.com/docs/managedgrafana_describe_workspace/} for full documentation.
}
\keyword{internal}
|
d3c763fc1c9dc2881855ba5aed6be54fea6add3e
|
596e526d5a2bc3cc8b833096c0f37108ab242b64
|
/analysis/shir/shir_hwep.R
|
953778f31fa5869f6e4a1987fb4e3497fd76ad68
|
[] |
no_license
|
dcgerard/hwesims
|
f967c92549f9ddcc690376f0374097da73be74a6
|
99aa7963e638509279d78a40ea2b2f09ec305cdf
|
refs/heads/main
| 2023-04-13T15:33:15.612702
| 2022-07-25T18:08:05
| 2022-07-25T18:08:05
| 338,132,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,277
|
r
|
shir_hwep.R
|
######################
## Fit hwep on Shirasawa data
######################
# Number of threads to use for multithreaded computing. This must be
# specified in the command-line shell; e.g., to use 8 threads, run
# command
#
# R CMD BATCH '--args nc=8' shir_hwep.R
#
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
nc <- 1
} else {
eval(parse(text = args[[1]]))
}
cat(nc, "\n")
library(hwep)
library(updog)
library(future)
library(tidyverse)
nmat <- as.matrix(read.csv("./output/shir/shir_nmat.csv", row.names = 1))
plan(multisession, workers = nc)
hout <- hwefit(nmat = nmat, type = "ustat", thresh = 0)
plan("sequential")
plan(multisession, workers = nc)
lout <- hwefit(nmat = nmat, type = "mle", thresh = 0)
plan("sequential")
plan(multisession, workers = nc)
dout <- hwefit(nmat = nmat, type = "nodr")
plan("sequential")
plan(multisession, workers = nc)
rout <- hwefit(nmat = round(nmat), type = "rm")
plan("sequential")
sp <- seq(0, 40, by = 3)
tibble(MLE = lout$p_hwe, `U-stat` = hout$p_hwe, `No DR` = dout$p_hwe) %>%
gather(key = "Method", value = "P-value") %>%
mutate(`P-value` = -log10(`P-value`),
Method = parse_factor(Method,
levels = c("No DR", "MLE", "U-stat"))) %>%
ggplot(aes(x = Method, y = `P-value`)) +
geom_boxplot(outlier.size = 0.5) +
scale_y_continuous(name = "Observed p-values", breaks = sp, labels = 10^(-sp), minor_breaks = NULL) +
theme_bw() ->
pl
ggsave(filename = "./output/shir/shir_pbox.pdf",
plot = pl,
height = 2,
width = 3,
family = "Times")
## Compare random mating genotype estimates to hypergeometric ----
rout$locus <- rownames(nmat)
mout <- readRDS("./output/shir/shir_updog.RDS")
snpdf <- mout$snpdf
gamdf <- data.frame(
rbind(
stats::dhyper(x = 0:3, m = 1, n = 6 - 1, k = 3),
stats::dhyper(x = 0:3, m = 2, n = 6 - 2, k = 3),
stats::dhyper(x = 0:3, m = 3, n = 6 - 3, k = 3),
stats::dhyper(x = 0:3, m = 4, n = 6 - 4, k = 3),
stats::dhyper(x = 0:3, m = 5, n = 6 - 5, k = 3)
)
)
names(gamdf) <- c("hg_0", "hg_1", "hg_2", "hg_3")
gamdf$pgeno <- 1:5
snpdf %>%
select(snp, pgeno) %>%
left_join(gamdf, by = "pgeno") %>%
filter(snp %in% rownames(nmat)) %>%
left_join(rout, by = c("snp" = "locus")) %>%
select(-chisq_rm, -df_rm, -p_rm) %>%
gather(starts_with("hg_"), starts_with("p."), key = "method_dosage", value = "prop") %>%
separate(col = "method_dosage", into = c("method", "dosage")) %>%
spread(key = method, value = prop) %>%
mutate(dosage = paste0("dosage = ", dosage)) ->
rmdf
ggplot(rmdf, aes(x = hg, y = p)) +
facet_wrap(. ~ dosage) +
geom_point() +
theme_bw() +
geom_abline(slope = 1, intercept = 0, lty = 2, col = 2) +
xlab("Hypergeometric Gamete Probability") +
ylab("Estimated Gamete Probability") +
theme(strip.background = element_rect(fill = "white")) ->
pl
ggsave(filename = "./output/shir/shir_gamprob.pdf",
plot = pl,
height = 4,
width = 4,
family = "Times")
ggplot(rout, aes(x = p_rm)) +
geom_histogram(bins = 30, color = "black", fill = "white") +
theme_bw() +
xlab("P-value") ->
pl
ggsave(filename = "./output/shir/shir_rmhist.pdf",
plot = pl,
height = 2,
width = 3,
family = "Times")
|
490eec9375945dbc1268219e8f220f438c67ecf7
|
3ee48064cf4a49718e56368aab3518da43335df5
|
/inst/NEWS.Rd
|
46297ba169273a3f33b4a46ff9b507daa7decb04
|
[] |
no_license
|
Shians/SingleCellExperiment
|
23b4a476bc6880be98f0bfa0893f50d40c8d2171
|
f46c5345d01529371635100839c22cb714476083
|
refs/heads/master
| 2021-07-06T22:43:12.669217
| 2017-09-28T04:35:35
| 2017-09-28T04:35:35
| 105,102,435
| 0
| 0
| null | 2017-09-28T04:30:18
| 2017-09-28T04:30:17
| null |
UTF-8
|
R
| false
| false
| 194
|
rd
|
NEWS.Rd
|
\name{SCEnews}
\title{SingleCellExperiment News}
\encoding{UTF-8}
\section{Version 0.99.4}{\itemize{
\item
New package SingleCellExperiment, for representation of single-cell genomics data.
}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.