id stringlengths 40 40 | repo_name stringlengths 5 110 | path stringlengths 2 233 | content stringlengths 0 1.03M ⌀ | size int32 0 60M ⌀ | license stringclasses 15 values |
|---|---|---|---|---|---|
487659c7cd21b61fd28f2f9b7a0874e662f14ca3 | CancerInSilico/CancerInSilico | R/class-OffLatticeModel.R | #' @include class-CellModel.R
NULL
library(methods)
################ Class Definition ################
#' @title OffLatticeModel
#' @description General description of an off-lattice cell-based model.
#' not quite a full implementation, but contains much of the neccesary
#' structure for models of this type
#'
#' @slot maxTranslation the largest distance the center of a cell can move
#' @slot maxRotation the largest angle a cell can rotate
#' @export
setClass('OffLatticeModel', contains = c('CellModel', 'VIRTUAL'), slots = c(
maxTranslation = 'numeric',
maxRotation = 'numeric'
))
#' Off-Lattice Model Constructor
#' @param .Object OffLatticeModel object
#' @param maxTranslation maximum movement of cell
#' @param maxRotation maximim rotation of mitosis cell
#' @param ... model specific parameters
#' @return initialized cell model
setMethod('initialize', 'OffLatticeModel',
function(.Object, maxTranslation = 0.1, maxRotation = 0.3, ...)
{
# store parameters, don't overwrite existing value
if (!length(.Object@maxTranslation))
.Object@maxTranslation <- maxTranslation
if (!length(.Object@maxRotation))
.Object@maxRotation <- maxRotation
# finish intialization, return object
.Object <- callNextMethod(.Object, ...)
return(.Object)
}
)
setValidity('OffLatticeModel',
function(object)
{
if (length(object@maxTranslation) == 0)
"missing 'maxTranslation'"
else if (length(object@maxRotation) == 0)
"missing 'maxRotation'"
else if (object@maxTranslation <= 0)
"'maxTranslation' must be greater than zero"
else if (object@maxRotation <= 0)
"'maxRotation' must be greater than zero"
}
)
##################### Generics ###################
#' get coordinates of a cell at a given time
#' @export
#' @docType methods
#' @rdname getCoordinates-methods
#'
#' @param model cell model object
#' @param time hour of the model to query
#' @param cell id of cell to query
#' @return pair of (x,y) coordinates
#' @examples
#' data(SampleModels)
#' getCoordinates(modDefault, modDefault@runTime, 1)
setGeneric('getCoordinates', function(model, time, cell)
{standardGeneric('getCoordinates')})
#' get cell radius at a given time
#' @export
#' @docType methods
#' @rdname getRadius-methods
#'
#' @param model cell model object
#' @param time hour of the model to query
#' @param cell id of cell to query
#' @return radius of cell
#' @examples
#' data(SampleModels)
#' getRadius(modDefault, modDefault@runTime, 1)
setGeneric('getRadius', function(model, time, cell)
{standardGeneric('getRadius')})
#' get cell axis length at a given time
#' @export
#' @docType methods
#' @rdname getAxisLength-methods
#'
#' @param model cell model object
#' @param time hour of the model to query
#' @param cell id of cell to query
#' @return axis length
#' @examples
#' data(SampleModels)
#' getAxisLength(modDefault, modDefault@runTime, 1)
setGeneric('getAxisLength', function(model, time, cell)
{standardGeneric('getAxisLength')})
#' get cell axis angle at a given time
#' @export
#' @docType methods
#' @rdname getAxisAngle-methods
#'
#' @param model cell model object
#' @param time hour of the model to query
#' @param cell id of cell to query
#' @return axis angle
#' @examples
#' data(SampleModels)
#' getAxisAngle(modDefault, modDefault@runTime, 1)
setGeneric('getAxisAngle', function(model, time, cell)
{standardGeneric('getAxisAngle')})
##################### Methods ####################
getEntry <- function(model, time, cell, col)
{
if (time > model@runTime | time < 0) stop('invalid time')
else row <- floor(time / model@recordIncrement) + 1
col <- col + 9 * (cell - 1)
return(model@cells[[row]][col])
}
#' @rdname getCoordinates-methods
#' @aliases getCoordinates
setMethod('getCoordinates', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(c(getEntry(model,time,cell,1), getEntry(model,time,cell,2)))
}
)
#' @rdname getRadius-methods
#' @aliases getRadius
setMethod('getRadius', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(getEntry(model, time, cell, 3))
}
)
#' @rdname getAxisLength-methods
#' @aliases getAxisLength
setMethod('getAxisLength', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(getEntry(model, time, cell, 4))
}
)
#' @rdname getAxisAngle-methods
#' @aliases getAxisAngle
setMethod('getAxisAngle', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(getEntry(model, time, cell, 5))
}
)
#' @rdname getCycleLength-methods
#' @aliases getCycleLength
setMethod('getCycleLength', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(getEntry(model, time, cell, 6))
}
)
#' @rdname getCellPhase-methods
#' @aliases getCellPhase
setMethod('getCellPhase', signature(model='OffLatticeModel'),
function(model, time, cell)
{
phases <- c('I', 'M', 'G0', 'G1', 'S', 'G2')
return(phases[getEntry(model, time, cell, 7)+1])
}
)
#' @rdname getCellType-methods
#' @aliases getCellType
setMethod('getCellType', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(getEntry(model, time, cell, 8) + 1)
}
)
#' @rdname getTrialAcceptRate-methods
#' @aliases getTrialAcceptRate
setMethod('getTrialAcceptRate', signature(model='OffLatticeModel'),
function(model, time, cell)
{
return(getEntry(model, time, cell, 9))
}
)
#' @rdname getNumberOfCells-methods
#' @aliases getNumberOfCells
setMethod('getNumberOfCells', signature('OffLatticeModel'),
function(model, time)
{
if (time > model@runTime | time < 0) stop('invalid time')
else row <- floor(time / model@recordIncrement) + 1
return(length(model@cells[[row]]) / 9)
}
)
#' @rdname getDensity-methods
#' @aliases getDensity
setMethod('getDensity', signature('OffLatticeModel'),
function(model, time)
{
nCells <- getNumberOfCells(model, time)
radii <- sapply(1:nCells, getRadius, model=model, time=time)
if (model@boundary > 0)
{
return(sum(radii ** 2) / (model@boundary ^ 2))
}
else
{
coords <- sapply(1:nCells, getCoordinates, model=model, time=time)
d <- max(sqrt(coords[1,] ** 2 + coords[2,] ** 2) + radii)
return(sum(radii ** 2) / (d ^ 2))
}
}
)
#' @rdname getCellDistance-methods
#' @aliases getCellDistance
setMethod('getCellDistance', signature(model='OffLatticeModel'),
function(model, time, cellA, cellB)
{
centers <- function(model, time, cell)
{
crds <- getCoordinates(model, time, cell)
rad <- getRadius(model, time, cell)
axisLen <- getAxisLength(model, time, cell)
axisAng <- getAxisAngle(model, time, cell)
x1 <- crds[1] + (0.5 * axisLen - rad) * cos(axisAng)
y1 <- crds[2] + (0.5 * axisLen - rad) * sin(axisAng)
x2 <- crds[1] - (0.5 * axisLen - rad) * cos(axisAng)
y2 <- crds[2] - (0.5 * axisLen - rad) * sin(axisAng)
return(matrix(c(x1,x2,y1,y2), ncol=2))
}
cA <- centers(model, time, cellA)
cB <- centers(model, time, cellB)
minDist <- (cA[1,1]-cB[1,1])^2 + (cA[1,2]-cB[1,2])^2
minDist <- min(minDist, (cA[1,1]-cB[2,1])^2 + (cA[1,2]-cB[2,2])^2)
minDist <- min(minDist, (cA[2,1]-cB[1,1])^2 + (cA[2,2]-cB[1,2])^2)
minDist <- min(minDist, (cA[2,1]-cB[1,1])^2 + (cA[2,2]-cB[1,2])^2)
return(sqrt(minDist) - getRadius(model, time, cellA) -
getRadius(model, time, cellB))
}
)
#' @rdname getLocalDensity-methods
#' @aliases getLocalDensity
setMethod('getLocalDensity', signature('OffLatticeModel'),
function(model, time, cell, radius)
{
dis <- function(a,b) sqrt((a[1]-b[1])^2 + (a[2]-b[2])^2)
# generate grid around point
genGrid <- function(p1, rad, p2=NULL)
{
width <- seq(-rad, rad, length.out=10)
grid <- as.matrix(unname(expand.grid(width, width)))
grid <- grid[apply(grid, 1, dis, b=c(0,0)) < rad,]
grid <- t(t(grid) + p1)
if (!is.null(p2))
grid <- grid[apply(grid,1,dis,b=p1) < apply(grid,1,dis,b=p2),]
return(grid)
}
# find nearby cells
cellRad <- getRadius(model, time, cell)
cells <- setdiff(1:getNumberOfCells(model, time), cell)
cells <- cells[sapply(cells, function(c) cellRad +
getCellDistance(model, time, cell, c) < radius)]
if (!length(cells)) return(0)
# get cell info
coords <- sapply(cells, getCoordinates, model=model, time=time)
rad <- sapply(cells, getRadius, model=model, time=time)
axisLen <- sapply(cells, getAxisLength, model=model, time=time)
axisAng <- sapply(cells, getAxisAngle, model=model, time=time)
type <- sapply(cells, getCellType, model=model, time=time)
sz <- sapply(type, function(t) model@cellTypes[[t]]@size)
# find cell center coordinates
term <- 0.5 * axisLen - rad
p1 <- cbind(coords[1]+term*cos(axisAng), coords[2]+term*sin(axisAng))
p2 <- cbind(coords[1]-term*cos(axisAng), coords[2]-term*sin(axisAng))
grid <- matrix(nrow=0, ncol=2)
for (c in 1:length(cells))
{
if (all.equal(2 * rad[c], axisLen[c], tol=1e-3) == TRUE)
grid <- rbind(grid, genGrid(coords[,c], rad[c]))
else
grid <- rbind(grid, rbind(genGrid(p1[c,], rad[c], p2[c,]),
genGrid(p2[c,], rad[c], p1[c,])))
}
# check points for being in radius, return proportion of area
cellCoords <- getCoordinates(model, time, cell)
numPoints <- apply(grid, 1, function(p) dis(p, cellCoords) < radius)
prop <- sum(numPoints) / nrow(grid)
area <- sapply(1:length(cells), function(c) ifelse(all.equal(2*rad[c],
axisLen[c], tol=1e-3)==TRUE, rad[c]^2, 2*sz[c]))
return(prop * sum(area) / (radius^2 - cellRad^2))
}
)
#' @rdname plotCells-methods
#' @aliases plotCells
#' @importFrom graphics plot symbols
setMethod('plotCells', signature('OffLatticeModel'),
function(model, time)
{
# get all the cell information
nCells <- getNumberOfCells(model, time)
coords <- sapply(1:nCells, getCoordinates, model=model, time=time)
radii <- sapply(1:nCells, getRadius, model=model, time=time)
axisLen <- sapply(1:nCells, getAxisLength, model=model, time=time)
axisAng <- sapply(1:nCells, getAxisAngle, model=model, time=time)
phases <- sapply(1:nCells, getCellPhase, model=model, time=time)
mitNdx <- rep(phases, 2) == 'M'
# calculate plot bounds
mn <- ifelse(model@boundary > 0, -model@boundary-2, min(coords)-2)
mx <- ifelse(model@boundary > 0, model@boundary+2, max(coords)+2)
# create the plot template
plot(c(mn, mx), c(mn, mx), main=paste("Plot of CellModel At Time",
time), xlab="", ylab="", type="n", asp=1)
# get all (x,y) pairs for each of the cell centers
x1 <- coords[1,] + (0.5 * axisLen - radii) * cos(axisAng)
x2 <- coords[1,] - (0.5 * axisLen - radii) * cos(axisAng)
y1 <- coords[2,] + (0.5 * axisLen - radii) * sin(axisAng)
y2 <- coords[2,] - (0.5 * axisLen - radii) * sin(axisAng)
# combine all coordinate pairs along with the radii
x <- c(x1,x2)
y <- c(y1,y2)
rad <- c(radii, radii)
# plot the cells
if (sum(mitNdx))
symbols(x[mitNdx], y[mitNdx], circles=rad[mitNdx],
inches=FALSE, add=TRUE, bg="black", fg="black")
if (sum(!mitNdx))
symbols(x[!mitNdx], y[!mitNdx], circles=rad[!mitNdx],
inches=FALSE, add=TRUE, bg="bisque4", fg="bisque4")
# draw boundary
symbols(0, 0, circles = model@boundary, inches = FALSE, add = TRUE,
lwd = 2)
}
)
| 12,312 | gpl-3.0 |
687073ffd78a13a798d6fb57794917aa4bb64d9c | naokazumizuta/RecSys2013YelpBusinessRatingPrediction | r/init.R | # initial settings
root <- "C:/Users/nao/Documents/GitHub/RecSys2013YelpBusinessRatingPrediction"
folder <- list()
folder_name <- c(
"data",
"docs",
"log",
"py",
"r",
"raw",
"rdata",
"submit")
for(name in folder_name) {
folder[[name]] <- file.path(root, name)
dir.create(folder[[name]], showWarnings = FALSE)
}
# metric
RMSE <- function(predicted, actual) sqrt(mean((predicted - actual)^2))
| 434 | mit |
ba490b7f4c43b7f6faccdfb56b4283561b3c4fbf | duhi23/CouchDB | classify_emotion.R | classify_emotion <- function(textColumns,algorithm="bayes",prior=1.0,verbose=FALSE,...) {
matrix <- create_matrix(textColumns,...)
lexicon <- read.csv(system.file("data/emotions.csv.gz",package="sentiment"),header=FALSE)
counts <- list(anger=length(which(lexicon[,2]=="anger")),disgust=length(which(lexicon[,2]=="disgust")),fear=length(which(lexicon[,2]=="fear")),joy=length(which(lexicon[,2]=="joy")),sadness=length(which(lexicon[,2]=="sadness")),surprise=length(which(lexicon[,2]=="surprise")),total=nrow(lexicon))
documents <- c()
for (i in 1:nrow(matrix)) {
if (verbose) print(paste("DOCUMENT",i))
scores <- list(anger=0,disgust=0,fear=0,joy=0,sadness=0,surprise=0)
doc <- matrix[i,]
words <- findFreqTerms(doc,lowfreq=1)
for (word in words) {
for (key in names(scores)) {
emotions <- lexicon[which(lexicon[,2]==key),]
index <- pmatch(word,emotions[,1],nomatch=0)
if (index > 0) {
entry <- emotions[index,]
category <- as.character(entry[[2]])
count <- counts[[category]]
score <- 1.0
if (algorithm=="bayes") score <- abs(log(score*prior/count))
if (verbose) {
print(paste("WORD:",word,"CAT:",category,"SCORE:",score))
}
scores[[category]] <- scores[[category]]+score
}
}
}
if (algorithm=="bayes") {
for (key in names(scores)) {
count <- counts[[key]]
total <- counts[["total"]]
score <- abs(log(count/total))
scores[[key]] <- scores[[key]]+score
}
} else {
for (key in names(scores)) {
scores[[key]] <- scores[[key]]+0.000001
}
}
best_fit <- names(scores)[which.max(unlist(scores))]
if (best_fit == "disgust" && as.numeric(unlist(scores[2]))-3.09234 < .01) best_fit <- NA
documents <- rbind(documents,c(scores$anger,scores$disgust,scores$fear,scores$joy,scores$sadness,scores$surprise,best_fit))
}
colnames(documents) <- c("ANGER","DISGUST","FEAR","JOY","SADNESS","SURPRISE","BEST_FIT")
return(documents)
} | 2,787 | gpl-3.0 |
91fe2aab3520f8a0f17f86fdc24cc250f82c1742 | kakaba2009/MachineLearning | r/learn/times/stft.R | library(e1071)
library(xts)
source('./mylib/mcalc.R')
source('./mylib/mtool.R')
options(max.print=5.5E5)
df <- loadSymbol('JPY=X')
df <- df$Close
ts <- as.ts(df)
x <- tail(ts, n=1000)
y <- stft(x, win=6, inc=1, coef=64)
plot(y) | 231 | apache-2.0 |
1fd36dbce6955314812dfa1ddc1934bb59eebafc | rstudio/reticulate | tests/testthat/resources/venv-activate.R |
args <- commandArgs(TRUE)
venv <- args[[1]]
Sys.unsetenv("RETICULATE_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON_ENV")
reticulate::use_virtualenv(venv, required = TRUE)
sys <- reticulate::import("sys")
writeLines(sys$path)
| 223 | apache-2.0 |
92e8032a1f2328d0d10c16745c567570796222f6 | kapsitis/ddgatve-stat | nms-reports/topResults.R |
# listInit <- function(tensBySch, tensByLang, tensByMun, tensByGend,
# sch, lang, mun, gend) {
# if (!sch %in% names(tensBySch)) {
# tensBySch[[sch]] <- 0
# }
# if (!lang %in% names(tensByLang)) {
# tensByLang[[lang]] <- 0
# }
# if (!mun %in% names(tensByMun)) {
# tensByMun[[mun]] <- 0
# }
# if (!gend %in% names(tensByGend)) {
# tensByGend[[gend]] <- 0
# }
#
# }
getMaxPointLists <- function(grades) {
tensBySch <- list()
tensByLang <- list()
tensByMun <- list()
tensByGend <- list()
for(i in 1:nrow(results)) {
# print(paste0("i=",i))
sch <- as.character(results[i,"Skola"])
lang <- as.character(results[i,"Language"])
mun <- as.character(results[i,"Municipality"])
gend <- as.character(results[i,"Dzimums"])
for (j in 1:5) {
cName <- paste0("Uzd",j)
if (results[i,cName] == 10 & results[i,"Grade"] %in% grades) {
if (!sch %in% names(tensBySch)) {
tensBySch[[sch]] <- 0
}
if (!lang %in% names(tensByLang)) {
tensByLang[[lang]] <- 0
}
if (!mun %in% names(tensByMun)) {
tensByMun[[mun]] <- 0
}
if (!gend %in% names(tensByGend)) {
tensByGend[[gend]] <- 0
}
tensBySch[[sch]] <- tensBySch[[sch]] + 1
tensByLang[[lang]] <- tensByLang[[lang]] + 1
tensByMun[[mun]] <- tensByMun[[mun]] + 1
tensByGend[[gend]] <- tensByGend[[gend]] + 1
}
}
}
return(list(tbs = tensBySch,
tbl = tensByLang,
tbm = tensByMun,
tbg = tensByGend))
}
grades <- c(5:12)
maxPointLists <- getMaxPointLists(grades)
tensByGend = maxPointLists$tbg
gendParticip <- table(results$Dzimums[results$Grade %in% grades])[c("Male","Female")]
gendTens <- sapply(c("Male","Female"), function(arg) {tensByGend[[arg]]})
barplot(
height=gendTens/gendParticip,
width=gendParticip,
col=c("darkblue","darkred"),
names.arg=sprintf(c("Males\n%s","Females\n%s"),gendParticip),
ylab="Max-scores/participants",
space=0, main="10-point Scores in a Single Paper (by Gender)")
maleMaxShare <- numeric(0)
femaleMaxShare <- numeric(0)
for (ii in 5:12) {
grades <- ii
maxPointLists <- getMaxPointLists(grades)
tensByGend = maxPointLists$tbg
gendParticip <-
table(results$Dzimums[results$Grade %in% grades])[c("Male","Female")]
gendTens <- sapply(c("Male","Female"), function(arg) {tensByGend[[arg]]})
maleMaxShare <- c(maleMaxShare,(gendTens/gendParticip)[1])
femaleMaxShare <- c(femaleMaxShare,(gendTens/gendParticip)[2])
}
plot(5:12,
maleMaxShare,
ylab="Max-scores/Participants",
xlab="Grade",
main="Max-scores per Grade and Gender",
type="o",
col="darkblue",
lwd=2,
ylim=c(0,max(maleMaxShare)))
points(5:12,
femaleMaxShare,
type="o",
col="darkred",
lwd=2,
ylim=c(0,max(maleMaxShare)))
grid(col="black")
| 3,115 | apache-2.0 |
1fd36dbce6955314812dfa1ddc1934bb59eebafc | terrytangyuan/reticulate | tests/testthat/resources/venv-activate.R |
args <- commandArgs(TRUE)
venv <- args[[1]]
Sys.unsetenv("RETICULATE_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON_ENV")
reticulate::use_virtualenv(venv, required = TRUE)
sys <- reticulate::import("sys")
writeLines(sys$path)
| 223 | apache-2.0 |
b36a145f0df82bfc348c3e54046568f16d0e000c | arcolombo/sleuthData | R/zzz.R | cat("Results from GSE37704 are available in",
system.file("extdata", "results", package="sleuthData"))
| 108 | artistic-2.0 |
cb79b016a986ebbc62d38bad2d5781681d177182 | rstudio/reticulate | tests/testthat/test-python-objects.R | context("objects")
test_that("the length of a Python object can be computed", {
skip_if_no_python()
m <- py_eval("[1, 2, 3]", convert = FALSE)
expect_equal(length(m), 3L)
x <- py_eval("None", convert = FALSE)
expect_identical(length(x), 0L)
expect_identical(py_bool(x), FALSE)
expect_error(py_len(x), "'NoneType' has no len()")
x <- py_eval("object()", convert = FALSE)
expect_identical(length(x), 1L)
expect_identical(py_bool(x), TRUE)
expect_error(py_len(x), "'object' has no len()")
})
test_that("python objects with a __setitem__ method can be used", {
skip_if_no_python()
library(reticulate)
py_run_string('
class M:
def __getitem__(self, k):
return "M"
')
m <- py_eval('M()', convert = TRUE)
expect_equal(m[1], "M")
m <- py_eval('M()', convert = FALSE)
expect_equal(m[1], r_to_py("M"))
})
test_that("py_id() returns unique strings; #1216", {
skip_if_no_python()
pypy_id <- py_eval("lambda x: str(id(x))")
o <- py_eval("object()")
id <- pypy_id(o)
expect_identical(py_id(o), pypy_id(o))
expect_identical(py_id(o), id)
expect_false(py_id(py_eval("object()")) == py_id(py_eval("object()")))
expect_true(py_id(py_eval("object")) == py_id(py_eval("object")))
})
| 1,236 | apache-2.0 |
5430aa0337d5f5f0393413efc8a523835afcc6d3 | MulletLab/leafangle_supplement | h2_and_qtl/rqtl_mqm_scripts/scantwo_perm_R07018xR07020.R | ################################################################################
# Calculate Penalties for curated Multiple QTL Mapping in R\qtl #
# Written by Sandra Truong 10/14/2014 #
# Much of the code originates from http://www.rqtl.org/tutorials #
################################################################################
# Rscript this_script.R ${SCANTWOPERMPATH} ${PERMUTATIONS_PER_JOB} ${JOBNUMBER} ${RQTLCROSSPATH} ${CROSS}
# takes in arguement --args
args <- commandArgs(TRUE)
operm_scantwo_filepath <- args[1]
setwd(file.path(operm_scantwo_filepath))
perms_per_job = args[2]
operm_scantwo_iteration <- paste(args[3], "operm_scantwo", sep="_")
operm_scantwo_name <- paste(operm_scantwo_iteration, "RDS", sep=".")
input_file_directory <- file.path(args[4])
input_file_name <- paste("./", args[5], ".csv", sep="")
input_file_name_cross <- file.path(input_file_name)
generation_interval = 5
phenotype_list=c("angle_leaf_3_avg_gh204A_2013_normalized",
"angle_leaf_4_avg_gh204A_2013_normalized",
"angle_leaf_3_avg_csfield_2014_rep1_normalized",
"angle_leaf_4_avg_csfield_2014_rep1_normalized",
"angle_leaf_5_avg_csfield_2014_rep1_normalized",
"angle_leaf_3_avg_csfield_2014_rep2_normalized",
"angle_leaf_4_avg_csfield_2014_rep2_normalized",
"angle_leaf_5_avg_csfield_2014_rep2_normalized")
################################################################################
# Direct path to R libraries in wsgi-hpc
# install R libraries
# > install.packages("qtl", lib=c("/data/thkhavi/r_lib")
.libPaths("/data/thkhavi/r_lib")
# load R/qtl library
library(qtl)
# load snow library
library(snow)
################################################################################
# Read in cross
cross_inputcross <- read.cross(format="csvr",
dir=input_file_directory,
file=input_file_name_cross,
BC.gen=0,
F.gen=generation_interval,
genotypes=c("AA","AB","BB","D","C"))
# If the cross type is considered a RIL keep the next line, if not comment out with "#"
# cross_inputcross <- convert2riself(cross_inputcross)
# scantwo as currently implemented (10/2014) is unable to handle > 1400 markers
# If your genetic map is made of more than 1400 markers, you may need to thin out markers:
# Choose the distance (in cM) to thin out
marker_distance = 2
# Check and drop markers, if appropriate
if (totmar(cross_inputcross) > 100)
{
cross_inputcross_map <- pull.map(cross_inputcross)
markers2keep <- lapply(cross_inputcross_map, pickMarkerSubset, min.distance=marker_distance)
cross_sub <- pull.markers(cross_inputcross, unlist(markers2keep))
cross_inputcross <- cross_sub
}
cross_inputcross <- calc.genoprob(cross_inputcross, map.function="haldane")
cross_inputcross <- sim.geno(cross_inputcross, map.function="haldane")
print(totmar(cross_inputcross))
# scantwo permutations
seed_number_base = 85842518
seed_number = seed_number_base + as.integer(args[3])
set.seed(seed_number)
operm_scantwo <- scantwo(cross=cross_inputcross, n.perm = as.integer(perms_per_job), pheno.col = phenotype_list, n.cluster = 8)
saveRDS(operm_scantwo, file = operm_scantwo_name)
| 3,398 | gpl-2.0 |
1c89d8e03f966a4b6e584e30de574532513b7b1c | zlskidmore/GenVisR | R/covBars_qual.R | #' Construct coverage cohort plot
#'
#' given a matrix construct a plot to display coverage as percentage bars for a
#' group of samples
#' @name covBars_qual
#' @param x object of class matrix containing rows for the coverage and columns
#' the sample names
#' @return a list of data frame and color vector
covBars_qual <- function(x)
{
# Check that x is a matrix with at least 1 row
if(!is.matrix(x))
{
memo <- paste0("Argument supplied to x is not a matrix... ",
"attempting to coerce")
message(memo)
x <- as.matrix(x)
}
if(nrow(x) < 1)
{
memo <- paste0("argument supplied to x needs at least one row")
stop(memo)
}
# Check that rownames of x can be converted to integers
if(is.null(rownames(x)))
{
memo <- paste0("all rownames of x are missing, they will be converted",
" to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else {
naind <- which(is.na(as.integer(rownames(x))))
if(length(naind)==nrow(x))
{
memo <- paste0("no rownames of x can be interpreted as integers, ",
"they will be converted to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else if(length(naind) > 0) {
paste0("some rownames of x cannot be interpreted as integers, ",
"they will be removed")
message(memo)
x <- x[-naind,]
}
}
return(list(x))
}
| 1,621 | cc0-1.0 |
7787765a9d1cccd54b1f2b1f52e4d8da778dcae5 | richelbilderbeek/R | old_notes/Phylogenies/get_test_fasta_filename.R | get_test_fasta_filename <- function() {
fasta_filename <- "convert_alignment_to_fasta.fasta"
#fasta_filename <- "convert_fasta_file_to_sequences.fasta"
if (file.exists(fasta_filename)) { return (fasta_filename) }
fasta_filename <- paste("~/GitHubs/R/Phylogenies/",fasta_filename,sep="")
if (file.exists(fasta_filename)) { return (fasta_filename) }
print("get_test_fasta_filename: cannot find file")
stop()
}
#get_test_fasta_filename() | 451 | gpl-3.0 |
a8fac03dddeb188cf33d668bfa70d4a3aeb36cc8 | fernandojunior/online-players-behavior | src/R/evaluation_measures.R | # Functions to evaluate a predictive model
# http://journals.plos.org/plosone/article/figure/image?size=large&id=info:doi/10.1371/journal.pone.0118432.t001
# targets
# outcomes 0 1
# 0 TN FN
# 1 FP TP
confusion_matrix = function (outcomes, targets) {
return(table(outcomes, targets))
}
# the proportion of the total number of predictions that were correct.
accuracy = function (confusion_matrix) {
return(sum(diag(confusion_matrix))/sum(confusion_matrix))
}
# The proportion of positive predictive cases that were correctly identified.
# Aliases: positive predictive value
precision = function (tp, fp) {
pp = (tp + fp)
return(tp / pp)
}
# taxa de previsoes de times que realmente venceram em relacao ao total de previsoes de times vencedores
# previsão de times vencedores que realmente vencenram
# previsões corretas de times vencedores
# The proportion of actual positive cases which are correctly identified.
# Aiases: sensitivity, true positive rate, probability of detection
recall = function (tp, fn) {
p = (tp + fn)
return(tp / p)
}
# taxa de previsoes de times que realmente venceram em relacao ao total de times vencedores
# previsões corretas de times vencedores em relação total de times vencedores
# The proportion of actual negative cases which are correctly identified.
# Alias: true negative rate, fall-out or probability of false alarm
specificity = function (tn, fp) {
return(tn / (tn + fp))
}
# Alias?
false_positive_rate = function (tn) {
n = tn + fp
return(fp / n)
}
# confusion_matrix: outcomes x targets
f_measure = function (confusion_matrix) {
tp = confusion_matrix[2, 2]
fp = confusion_matrix[2, 1]
fn = confusion_matrix[1, 2]
precision = precision(tp, fp)
recall = recall(tp, fn)
return(2 * (precision * recall) / (precision + recall))
}
# Evaluate prediction outcomes
evaluate_outcomes = function (targets, outcomes) {
confusion_matrix=confusion_matrix(outcomes, targets)
return(list(
confusion_matrix=confusion_matrix,
accuracy=accuracy(confusion_matrix),
f_measure=f_measure(confusion_matrix)
))
}
install.packages('ROCR', dependencies=TRUE)
import_package('ROCR', attach=TRUE)
roc_curve = function (outcomes, targets) {
# outcomes = as.numeric(outcomes)
# targets = as.numeric(targets)
performance = ROCR::performance(prediction(predictions=outcomes, labels=targets) , "tpr", "fpr")
# changing params for the ROC plot - width, etc
# par(mar=c(5,5,2,2),xaxs = "i",yaxs = "i",cex.axis=1.3,cex.lab=1.4)
# plotting the ROC curve
plot(performance,col="black",lty=3, lwd=3)
# plot(perf,col="black",lty=3, lwd=3)
}
| 2,712 | mit |
1c89d8e03f966a4b6e584e30de574532513b7b1c | jkunisak/GenVisR | R/covBars_qual.R | #' Construct coverage cohort plot
#'
#' given a matrix construct a plot to display coverage as percentage bars for a
#' group of samples
#' @name covBars_qual
#' @param x object of class matrix containing rows for the coverage and columns
#' the sample names
#' @return a list of data frame and color vector
covBars_qual <- function(x)
{
# Check that x is a matrix with at least 1 row
if(!is.matrix(x))
{
memo <- paste0("Argument supplied to x is not a matrix... ",
"attempting to coerce")
message(memo)
x <- as.matrix(x)
}
if(nrow(x) < 1)
{
memo <- paste0("argument supplied to x needs at least one row")
stop(memo)
}
# Check that rownames of x can be converted to integers
if(is.null(rownames(x)))
{
memo <- paste0("all rownames of x are missing, they will be converted",
" to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else {
naind <- which(is.na(as.integer(rownames(x))))
if(length(naind)==nrow(x))
{
memo <- paste0("no rownames of x can be interpreted as integers, ",
"they will be converted to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else if(length(naind) > 0) {
paste0("some rownames of x cannot be interpreted as integers, ",
"they will be removed")
message(memo)
x <- x[-naind,]
}
}
return(list(x))
}
| 1,621 | cc0-1.0 |
cb6dfce429bc9ced5678b9dd40ecb7e791747e2e | wch/r-source | src/library/datasets/data/Harman74.cor.R | "Harman74.cor" <-
structure(list(cov = structure(c(1, 0.318, 0.403, 0.468, 0.321,
0.335, 0.304, 0.332, 0.326, 0.116, 0.308, 0.314, 0.489, 0.125,
0.238, 0.414, 0.176, 0.368, 0.27, 0.365, 0.369, 0.413, 0.474,
0.282, 0.318, 1, 0.317, 0.23, 0.285, 0.234, 0.157, 0.157, 0.195,
0.057, 0.15, 0.145, 0.239, 0.103, 0.131, 0.272, 0.005, 0.255,
0.112, 0.292, 0.306, 0.232, 0.348, 0.211, 0.403, 0.317, 1, 0.305,
0.247, 0.268, 0.223, 0.382, 0.184, -0.075, 0.091, 0.14, 0.321,
0.177, 0.065, 0.263, 0.177, 0.211, 0.312, 0.297, 0.165, 0.25,
0.383, 0.203, 0.468, 0.23, 0.305, 1, 0.227, 0.327, 0.335, 0.391,
0.325, 0.099, 0.11, 0.16, 0.327, 0.066, 0.127, 0.322, 0.187,
0.251, 0.137, 0.339, 0.349, 0.38, 0.335, 0.248, 0.321, 0.285,
0.247, 0.227, 1, 0.622, 0.656, 0.578, 0.723, 0.311, 0.344, 0.215,
0.344, 0.28, 0.229, 0.187, 0.208, 0.263, 0.19, 0.398, 0.318,
0.441, 0.435, 0.42, 0.335, 0.234, 0.268, 0.327, 0.622, 1, 0.722,
0.527, 0.714, 0.203, 0.353, 0.095, 0.309, 0.292, 0.251, 0.291,
0.273, 0.167, 0.251, 0.435, 0.263, 0.386, 0.431, 0.433, 0.304,
0.157, 0.223, 0.335, 0.656, 0.722, 1, 0.619, 0.685, 0.246, 0.232,
0.181, 0.345, 0.236, 0.172, 0.18, 0.228, 0.159, 0.226, 0.451,
0.314, 0.396, 0.405, 0.437, 0.332, 0.157, 0.382, 0.391, 0.578,
0.527, 0.619, 1, 0.532, 0.285, 0.3, 0.271, 0.395, 0.252, 0.175,
0.296, 0.255, 0.25, 0.274, 0.427, 0.362, 0.357, 0.501, 0.388,
0.326, 0.195, 0.184, 0.325, 0.723, 0.714, 0.685, 0.532, 1, 0.17,
0.28, 0.113, 0.28, 0.26, 0.248, 0.242, 0.274, 0.208, 0.274, 0.446,
0.266, 0.483, 0.504, 0.424, 0.116, 0.057, -0.075, 0.099, 0.311,
0.203, 0.246, 0.285, 0.17, 1, 0.484, 0.585, 0.408, 0.172, 0.154,
0.124, 0.289, 0.317, 0.19, 0.173, 0.405, 0.16, 0.262, 0.531,
0.308, 0.15, 0.091, 0.11, 0.344, 0.353, 0.232, 0.3, 0.28, 0.484,
1, 0.428, 0.535, 0.35, 0.24, 0.314, 0.362, 0.35, 0.29, 0.202,
0.399, 0.304, 0.251, 0.412, 0.314, 0.145, 0.14, 0.16, 0.215,
0.095, 0.181, 0.271, 0.113, 0.585, 0.428, 1, 0.512, 0.131, 0.173,
0.119, 0.278, 0.349, 0.11, 0.246, 0.355, 0.193, 0.35, 0.414,
0.489, 0.239, 0.321, 0.327, 0.344, 0.309, 0.345, 0.395, 0.28,
0.408, 0.535, 0.512, 1, 0.195, 0.139, 0.281, 0.194, 0.323, 0.263,
0.241, 0.425, 0.279, 0.382, 0.358, 0.125, 0.103, 0.177, 0.066,
0.28, 0.292, 0.236, 0.252, 0.26, 0.172, 0.35, 0.131, 0.195, 1,
0.37, 0.412, 0.341, 0.201, 0.206, 0.302, 0.183, 0.243, 0.242,
0.304, 0.238, 0.131, 0.065, 0.127, 0.229, 0.251, 0.172, 0.175,
0.248, 0.154, 0.24, 0.173, 0.139, 0.37, 1, 0.325, 0.345, 0.334,
0.192, 0.272, 0.232, 0.246, 0.256, 0.165, 0.414, 0.272, 0.263,
0.322, 0.187, 0.291, 0.18, 0.296, 0.242, 0.124, 0.314, 0.119,
0.281, 0.412, 0.325, 1, 0.324, 0.344, 0.258, 0.388, 0.348, 0.283,
0.36, 0.262, 0.176, 0.005, 0.177, 0.187, 0.208, 0.273, 0.228,
0.255, 0.274, 0.289, 0.362, 0.278, 0.194, 0.341, 0.345, 0.324,
1, 0.448, 0.324, 0.262, 0.173, 0.273, 0.287, 0.326, 0.368, 0.255,
0.211, 0.251, 0.263, 0.167, 0.159, 0.25, 0.208, 0.317, 0.35,
0.349, 0.323, 0.201, 0.334, 0.344, 0.448, 1, 0.358, 0.301, 0.357,
0.317, 0.272, 0.405, 0.27, 0.112, 0.312, 0.137, 0.19, 0.251,
0.226, 0.274, 0.274, 0.19, 0.29, 0.11, 0.263, 0.206, 0.192, 0.258,
0.324, 0.358, 1, 0.167, 0.331, 0.342, 0.303, 0.374, 0.365, 0.292,
0.297, 0.339, 0.398, 0.435, 0.451, 0.427, 0.446, 0.173, 0.202,
0.246, 0.241, 0.302, 0.272, 0.388, 0.262, 0.301, 0.167, 1, 0.413,
0.463, 0.509, 0.366, 0.369, 0.306, 0.165, 0.349, 0.318, 0.263,
0.314, 0.362, 0.266, 0.405, 0.399, 0.355, 0.425, 0.183, 0.232,
0.348, 0.173, 0.357, 0.331, 0.413, 1, 0.374, 0.451, 0.448, 0.413,
0.232, 0.25, 0.38, 0.441, 0.386, 0.396, 0.357, 0.483, 0.16, 0.304,
0.193, 0.279, 0.243, 0.246, 0.283, 0.273, 0.317, 0.342, 0.463,
0.374, 1, 0.503, 0.375, 0.474, 0.348, 0.383, 0.335, 0.435, 0.431,
0.405, 0.501, 0.504, 0.262, 0.251, 0.35, 0.382, 0.242, 0.256,
0.36, 0.287, 0.272, 0.303, 0.509, 0.451, 0.503, 1, 0.434, 0.282,
0.211, 0.203, 0.248, 0.42, 0.433, 0.437, 0.388, 0.424, 0.531,
0.412, 0.414, 0.358, 0.304, 0.165, 0.262, 0.326, 0.405, 0.374,
0.366, 0.448, 0.375, 0.434, 1), dim = c(24, 24), dimnames = list(
c("VisualPerception", "Cubes", "PaperFormBoard", "Flags",
"GeneralInformation", "PargraphComprehension", "SentenceCompletion",
"WordClassification", "WordMeaning", "Addition", "Code",
"CountingDots", "StraightCurvedCapitals", "WordRecognition",
"NumberRecognition", "FigureRecognition", "ObjectNumber",
"NumberFigure", "FigureWord", "Deduction", "NumericalPuzzles",
"ProblemReasoning", "SeriesCompletion", "ArithmeticProblems"
), c("VisualPerception", "Cubes", "PaperFormBoard", "Flags",
"GeneralInformation", "PargraphComprehension", "SentenceCompletion",
"WordClassification", "WordMeaning", "Addition", "Code",
"CountingDots", "StraightCurvedCapitals", "WordRecognition",
"NumberRecognition", "FigureRecognition", "ObjectNumber",
"NumberFigure", "FigureWord", "Deduction", "NumericalPuzzles",
"ProblemReasoning", "SeriesCompletion", "ArithmeticProblems"
))), center = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0), n.obs = 145), names = c("cov",
"center", "n.obs"))
| 5,023 | gpl-2.0 |
32b5814be04ef6ed07b5b884809385407005a98f | jyfeather/LASSO-BN | R/auc_real.R | rm(list = ls())
require("genlasso") # Lasso solver
require("ROCR") # ROC
require("Matrix")
set.seed(2015)
kIteration <- 200
node.num <- 22
sig.set <- c(0.1, 0.3, 0.5, 0.7, 1, 1.5) # Mean shift magnitude
var.df <- 1 # 1, 2, 3, 4, 5 guessed amount of mean shift vars
ns <- 1 # 1, 2, 5, 10
load("./dat/real/weighM")
load("./dat/real/shifts")
## LASSO-BN
auc <- matrix(data = NA, nrow = length(sig.set), ncol = length(shifts.pos))
for (pos in 1:length(shifts.pos)) {
shift.real <- rep(0, node.num)
shift.real[shifts.pos[pos]] = 1
for (i in 1:length(sig.set)) {
load(paste("./dat/real/dat",sig.set[i], shifts.pos[pos], sep = "_"))
dat <- dat[1:(kIteration*ns),]
size <- nrow(dat)
tmp.coef <- matrix(data=0, nrow=node.num, ncol=size)
tmp.x <- diag(node.num)
tmp.least <- solve(t(tmp.x) %*% tmp.x) %*% tmp.x
for (m in 1:size) {
tmp.y <- solve(W) %*% dat[m,]
tmp.least2 <- tmp.least %*% tmp.y
tmp.coef[sort(abs(tmp.least2), decreasing=T, index.return=T)$ix[1:var.df], m] = 1
}
shift.l1 <- rep(0, node.num)
for (k in 1:node.num) {
shift.l1[k] <- nnzero(tmp.coef[k,]) / size
}
roc.pred <- prediction(shift.l1, shift.real)
roc.perf <- performance(roc.pred, "auc")
auc[i, pos] = as.numeric(roc.perf@y.values)
}
}
print(auc)
write.csv(auc, file="./dat/auc.csv")
## VS-MSPC
auc <- matrix(data = NA, nrow = length(sig.set), ncol = length(shifts.pos))
for (pos in 1:length(shifts.pos)) {
shift.real <- rep(0, node.num)
shift.real[shifts.pos[pos]] = 1
for (i in 1:length(sig.set)) {
load(paste("./dat/real/dat",sig.set[i], shifts.pos[pos], sep = "_"))
dat <- dat[1:(kIteration*ns),]
size <- nrow(dat)
W <- t(chol(cov(dat)))
tmp.coef <- matrix(data=0, nrow=node.num, ncol=size)
tmp.x <- solve(W)
for (m in 1:size) {
tmp.y <- tmp.x %*% dat[m,]
fit <- genlasso(tmp.y, tmp.x, diag(node.num))
tmp.coef[sort(fit$beta[,var.df+1], decreasing = T, index.return = T)$ix[1:var.df], m] <- 1
}
shift.l1 <- rep(0, node.num)
for (k in 1:node.num) {
shift.l1[k] <- nnzero(tmp.coef[k,]) / size
}
roc.pred <- prediction(shift.l1, shift.real)
roc.perf <- performance(roc.pred, "auc")
auc[i, pos] = as.numeric(roc.perf@y.values)
}
}
print(auc)
write.csv(auc, file="./dat/auc.csv")
| 2,364 | mit |
5430aa0337d5f5f0393413efc8a523835afcc6d3 | thkhavi/leafangle_supplement | h2_and_qtl/rqtl_mqm_scripts/scantwo_perm_R07018xR07020.R | ################################################################################
# Calculate Penalties for curated Multiple QTL Mapping in R\qtl #
# Written by Sandra Truong 10/14/2014 #
# Much of the code originates from http://www.rqtl.org/tutorials #
################################################################################
# Rscript this_script.R ${SCANTWOPERMPATH} ${PERMUTATIONS_PER_JOB} ${JOBNUMBER} ${RQTLCROSSPATH} ${CROSS}
# takes in arguement --args
args <- commandArgs(TRUE)
operm_scantwo_filepath <- args[1]
setwd(file.path(operm_scantwo_filepath))
perms_per_job = args[2]
operm_scantwo_iteration <- paste(args[3], "operm_scantwo", sep="_")
operm_scantwo_name <- paste(operm_scantwo_iteration, "RDS", sep=".")
input_file_directory <- file.path(args[4])
input_file_name <- paste("./", args[5], ".csv", sep="")
input_file_name_cross <- file.path(input_file_name)
generation_interval = 5
phenotype_list=c("angle_leaf_3_avg_gh204A_2013_normalized",
"angle_leaf_4_avg_gh204A_2013_normalized",
"angle_leaf_3_avg_csfield_2014_rep1_normalized",
"angle_leaf_4_avg_csfield_2014_rep1_normalized",
"angle_leaf_5_avg_csfield_2014_rep1_normalized",
"angle_leaf_3_avg_csfield_2014_rep2_normalized",
"angle_leaf_4_avg_csfield_2014_rep2_normalized",
"angle_leaf_5_avg_csfield_2014_rep2_normalized")
################################################################################
# Direct path to R libraries in wsgi-hpc
# install R libraries
# > install.packages("qtl", lib=c("/data/thkhavi/r_lib")
.libPaths("/data/thkhavi/r_lib")
# load R/qtl library
library(qtl)
# load snow library
library(snow)
################################################################################
# Read in cross
cross_inputcross <- read.cross(format="csvr",
dir=input_file_directory,
file=input_file_name_cross,
BC.gen=0,
F.gen=generation_interval,
genotypes=c("AA","AB","BB","D","C"))
# If the cross type is considered a RIL keep the next line, if not comment out with "#"
# cross_inputcross <- convert2riself(cross_inputcross)
# scantwo as currently implemented (10/2014) is unable to handle > 1400 markers
# If your genetic map is made of more than 1400 markers, you may need to thin out markers:
# Choose the distance (in cM) to thin out
marker_distance = 2
# Check and drop markers, if appropriate
if (totmar(cross_inputcross) > 100)
{
cross_inputcross_map <- pull.map(cross_inputcross)
markers2keep <- lapply(cross_inputcross_map, pickMarkerSubset, min.distance=marker_distance)
cross_sub <- pull.markers(cross_inputcross, unlist(markers2keep))
cross_inputcross <- cross_sub
}
cross_inputcross <- calc.genoprob(cross_inputcross, map.function="haldane")
cross_inputcross <- sim.geno(cross_inputcross, map.function="haldane")
print(totmar(cross_inputcross))
# scantwo permutations
seed_number_base = 85842518
seed_number = seed_number_base + as.integer(args[3])
set.seed(seed_number)
operm_scantwo <- scantwo(cross=cross_inputcross, n.perm = as.integer(perms_per_job), pheno.col = phenotype_list, n.cluster = 8)
saveRDS(operm_scantwo, file = operm_scantwo_name)
| 3,398 | gpl-2.0 |
1c89d8e03f966a4b6e584e30de574532513b7b1c | zskidmor/GenVisR | R/covBars_qual.R | #' Construct coverage cohort plot
#'
#' given a matrix construct a plot to display coverage as percentage bars for a
#' group of samples
#' @name covBars_qual
#' @param x object of class matrix containing rows for the coverage and columns
#' the sample names
#' @return a list of data frame and color vector
covBars_qual <- function(x)
{
# Check that x is a matrix with at least 1 row
if(!is.matrix(x))
{
memo <- paste0("Argument supplied to x is not a matrix... ",
"attempting to coerce")
message(memo)
x <- as.matrix(x)
}
if(nrow(x) < 1)
{
memo <- paste0("argument supplied to x needs at least one row")
stop(memo)
}
# Check that rownames of x can be converted to integers
if(is.null(rownames(x)))
{
memo <- paste0("all rownames of x are missing, they will be converted",
" to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else {
naind <- which(is.na(as.integer(rownames(x))))
if(length(naind)==nrow(x))
{
memo <- paste0("no rownames of x can be interpreted as integers, ",
"they will be converted to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else if(length(naind) > 0) {
paste0("some rownames of x cannot be interpreted as integers, ",
"they will be removed")
message(memo)
x <- x[-naind,]
}
}
return(list(x))
}
| 1,621 | cc0-1.0 |
1c89d8e03f966a4b6e584e30de574532513b7b1c | zskidmor/GGgenome | R/covBars_qual.R | #' Construct coverage cohort plot
#'
#' given a matrix construct a plot to display coverage as percentage bars for a
#' group of samples
#' @name covBars_qual
#' @param x object of class matrix containing rows for the coverage and columns
#' the sample names
#' @return a list of data frame and color vector
covBars_qual <- function(x)
{
# Check that x is a matrix with at least 1 row
if(!is.matrix(x))
{
memo <- paste0("Argument supplied to x is not a matrix... ",
"attempting to coerce")
message(memo)
x <- as.matrix(x)
}
if(nrow(x) < 1)
{
memo <- paste0("argument supplied to x needs at least one row")
stop(memo)
}
# Check that rownames of x can be converted to integers
if(is.null(rownames(x)))
{
memo <- paste0("all rownames of x are missing, they will be converted",
" to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else {
naind <- which(is.na(as.integer(rownames(x))))
if(length(naind)==nrow(x))
{
memo <- paste0("no rownames of x can be interpreted as integers, ",
"they will be converted to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else if(length(naind) > 0) {
paste0("some rownames of x cannot be interpreted as integers, ",
"they will be removed")
message(memo)
x <- x[-naind,]
}
}
return(list(x))
}
| 1,621 | cc0-1.0 |
1c89d8e03f966a4b6e584e30de574532513b7b1c | Alanocallaghan/GenVisR | R/covBars_qual.R | #' Construct coverage cohort plot
#'
#' given a matrix construct a plot to display coverage as percentage bars for a
#' group of samples
#' @name covBars_qual
#' @param x object of class matrix containing rows for the coverage and columns
#' the sample names
#' @return a list of data frame and color vector
covBars_qual <- function(x)
{
# Check that x is a matrix with at least 1 row
if(!is.matrix(x))
{
memo <- paste0("Argument supplied to x is not a matrix... ",
"attempting to coerce")
message(memo)
x <- as.matrix(x)
}
if(nrow(x) < 1)
{
memo <- paste0("argument supplied to x needs at least one row")
stop(memo)
}
# Check that rownames of x can be converted to integers
if(is.null(rownames(x)))
{
memo <- paste0("all rownames of x are missing, they will be converted",
" to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else {
naind <- which(is.na(as.integer(rownames(x))))
if(length(naind)==nrow(x))
{
memo <- paste0("no rownames of x can be interpreted as integers, ",
"they will be converted to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else if(length(naind) > 0) {
paste0("some rownames of x cannot be interpreted as integers, ",
"they will be removed")
message(memo)
x <- x[-naind,]
}
}
return(list(x))
}
| 1,621 | cc0-1.0 |
1c89d8e03f966a4b6e584e30de574532513b7b1c | ahwagner/GenVisR | R/covBars_qual.R | #' Construct coverage cohort plot
#'
#' given a matrix construct a plot to display coverage as percentage bars for a
#' group of samples
#' @name covBars_qual
#' @param x object of class matrix containing rows for the coverage and columns
#' the sample names
#' @return a list of data frame and color vector
covBars_qual <- function(x)
{
# Check that x is a matrix with at least 1 row
if(!is.matrix(x))
{
memo <- paste0("Argument supplied to x is not a matrix... ",
"attempting to coerce")
message(memo)
x <- as.matrix(x)
}
if(nrow(x) < 1)
{
memo <- paste0("argument supplied to x needs at least one row")
stop(memo)
}
# Check that rownames of x can be converted to integers
if(is.null(rownames(x)))
{
memo <- paste0("all rownames of x are missing, they will be converted",
" to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else {
naind <- which(is.na(as.integer(rownames(x))))
if(length(naind)==nrow(x))
{
memo <- paste0("no rownames of x can be interpreted as integers, ",
"they will be converted to integers starting at 0")
message(memo)
rownames(x) = as.character(0:(nrow(x)-1))
} else if(length(naind) > 0) {
paste0("some rownames of x cannot be interpreted as integers, ",
"they will be removed")
message(memo)
x <- x[-naind,]
}
}
return(list(x))
}
| 1,621 | cc0-1.0 |
7192f17c13810bf0cad9a5333c8338350063c9f2 | athyuttamre/accessible-facebook-ui | public/conversejs/components/otr/test/plot.R | #!/usr/bin/env Rscript
# most from ry
# https://github.com/joyent/node/blob/master/benchmark/plot.R
library(ggplot2)
hist_png_filename <- "hist.png"
png(filename = hist_png_filename, width = 480, height = 380, units = "px")
da = read.csv(
"./data.csv",
sep="\t",
header=F,
col.names = c("time")
)
qplot(
time,
data=da,
geom="histogram",
#binwidth=10,
main="xxx",
xlab="key generation time (ms)"
)
print(hist_png_filename) | 448 | mit |
aa969a55ec80e4b042ac0eab07551f6b56e46a0d | alonzi/fundamentals | coding_tips/R/pipes.R | # stolen from Hadley Wickam
# Packages in the tidyverse load %>% for you automatically, so you don’t usually load magrittr explicitly.
f(x,y) # is pretty easy to read
f(g(x,y),z) # is a little harder to read
# R let's you pipe objects into arguments of functions with %>%
f(g(x,y),z)
# becomes
x %>%
g(y) %>%
f(z)
# here's another example from http://kbroman.org/hipsteR/
round(exp(diff(log(x))), 1)
#becomes
x %>%
log() %>%
diff() %>%
exp() %>%
round(1)
# if you don't want to pipe into the first argument use a .
2 %>% log(5, base=.)
# is equivalent to
log(5,base=2)
# even better example that will replace the previous example (stolen from Hadley Wickham)
foo_foo <- little_bunny()
bop_on(
scoop_up(
hop_through(foo_foo, forest),
field_mouse
),
head
)
# becomes
foo_foo %>%
hop_through(forest) %>%
scoop_up(field_mouse) %>%
bop_on(head)
| 899 | gpl-2.0 |
5c3530738d96bd0fde56a906a9211259bbd5236f | JackyCode/Data_Science | KMeans/self_kmeans.R | ############################################################
# self_kmeans.R:
# -------------------
# tells how to use custom function to achieve the k-means
#
############################################################
# license:
# --------
# Copyright (c) 2014 JackyCode
# Distributed under the [MIT License][MIT].
# [MIT]: http://www.opensource.org/licenses/mit-license.php
#
############################################################
se_kmeans <- function(x, k) {
if (!is.matrix(x)) {
x <- as.matrix(x)
}
n <- dim(x)[1]
## 讲样品随机分成k类,并计算其中心
cluster <- sample(1:k, n, , replace = TRUE)
center <- matrix(, nrow=k, ncol=dim(x)[2])
for (i in 1:k) {
center[i,] <- apply(x[which(cluster == i),], 2, mean)
}
## 定义change_cluster,用于每次类别变动之后的比对
change_cluster = rep(0, n)
## 循环,给每个样品分类
while (!all(cluster == change_cluster)) {
change_cluster = cluster
for (i in 1:n) {
## 比较距离,可以省去开平方
dis <- diag((center - x[i,]) %*% t(center - x[i,]))
position <- which(dis == min(dis))
if (!(cluster[i] == position)) {
# 更新类别
ori_cluster_i <- cluster[i]
cluster[i] <- position
## 更新类别的中心
center[ori_cluster_i,] <- apply(x[which(cluster == ori_cluster_i),], 2, mean)
center[position,] <- apply(x[which(cluster == position),], 2, mean)
}
}
}
return(list(cluster=cluster, center=center))
}
x1 <- matrix(rnorm(500, 1, 0.5), 100, 5)
x2 <- matrix(rnorm(500, 2, 0.5), 100, 5)
x <- rbind(x1, x2)
clusters <- se_kmeans(x, 2)
plot(x, col=clusters$cluster, pch=as.character(clusters$cluster), cex=0.5)
points(clusters$center, col='green', pch='o', cex = 2) | 1,741 | mit |
1aa19e527012b03d87257233e0b7e0f058ab8b8f | Zhiwu-Zhang-Lab/GAPIT | GAPIT.Create.Indicator.R | `GAPIT.Create.Indicator` <-
function(xs, SNP.impute = "Major" ){
#Object: To esimate variance component by using EMMA algorithm and perform GWAS with P3D/EMMAx
#Output: ps, REMLs, stats, dfs, vgs, ves, BLUP, BLUP_Plus_Mean, PEV
#Authors: Alex Lipka and Zhiwu Zhang
# Last update: April 30, 2012
##############################################################################################
#Determine the number of bits of the genotype
bit=nchar(as.character(xs[1]))
#Identify the SNPs classified as missing
if(bit==1) {
xss[xss=="xs"]="N"
xs[xs=="-"]="N"
xs[xs=="+"]="N"
xs[xs=="/"]="N"
xs[xs=="K"]="Z" #K (for GT genotype)is is replaced by Z to ensure heterozygose has the largest value
}
if(bit==2) {
xs[xs=="xsxs"]="N"
xs[xs=="--"]="N"
xs[xs=="++"]="N"
xs[xs=="//"]="N"
xs[xs=="NN"]="N"
}
#Create the indicators
#Sort the SNPs by genotype frequency
xs.temp <- xs[-which(xs == "N")]
frequ<- NULL
for(i in 1:length(unique(xs.temp))) frequ <- c(frequ, length(which(xs == unique(xs)[i])))
unique.sorted <- cbind(unique(xs.temp), frequ)
print("unique.sorted is")
print(unique.sorted)
unique.sorted <- unique.sorted[order(unique.sorted[,2]),]
unique.sorted <- unique.sorted[,-2]
#Impute based on the major and minor allele frequencies
if(SNP.impute == "Major") xs[which(is.na(xs))] = unique.sorted[1]
if(SNP.impute == "Minor") xs[which(is.na(xs))] = unique.sorted[length(unique.sorted)]
if(SNP.impute == "Middle") xs[which(is.na(xs))] = unique.sorted[2]
x.ind <- NULL
for(i in unique.sorted){
x.col <- rep(NA, length(xs))
x.col[which(xs==i)] <- 1
x.col[which(xs!=i)] <- 0
x.ind <- cbind(x.ind,x.col)
}
return(x.ind)
print("GAPIT.Create.Indicator accomplished successfully!")
}#end of GAPIT.Create.Indicator function
#=============================================================================================
| 1,859 | gpl-2.0 |
3677ee16c4611fc4e61535857c8b36459c80167a | miceli/BMR | tests/dsge/gensys/nkm_dsgevar.R |
#
rm(list=ls())
library(BMR)
source("nkm_model.R")
#
data(BMRVARData)
dsgedata <- USMacroData[24:211,-c(1,3)]
dsgedata <- as.matrix(dsgedata)
for(i in 1:2){
dsgedata[,i] <- dsgedata[,i] - mean(dsgedata[,i])
}
#
obj <- new(dsgevar_gensys)
obj$set_model_fn(nkm_model_simple)
x <- c(1)
obj$eval_model(x)
#
lrem_obj = obj$lrem
lrem_obj$solve()
lrem_obj$shocks_cov <- matrix(c(1,0,0,0.125),2,2,byrow=TRUE)
sim_data <- lrem_obj$simulate(200,800)$sim_vals
sim_data <- cbind(sim_data[,3],sim_data[,5])
#
prior_pars <- cbind(c(1.0),
c(0.05))
prior_form <- c(1)
obj$set_prior(prior_form,prior_pars)
#
par_bounds <- cbind(c(-Inf),
c( Inf))
opt_bounds <- cbind(c(0.7),
c(3.0))
obj$set_bounds(opt_bounds[,1],opt_bounds[,2])
obj$opt_initial_lb <- opt_bounds[,1]
obj$opt_initial_ub <- opt_bounds[,2]
#
cons_term <- TRUE
p <- 1
lambda <- 1.0
obj$build(sim_data,cons_term,p,lambda)
mode_res <- obj$estim_mode(x,TRUE)
mode_check(obj,mode_res$mode_vals,25,1,"eta")
#
obj$mcmc_initial_lb <- opt_bounds[,1]
obj$mcmc_initial_ub <- opt_bounds[,2]
obj$estim_mcmc(x,50,100,100)
var_names <- c("Output Gap","Output","Inflation","Natural Int","Nominal Int","Labour Supply",
"Technology","MonetaryPolicy")
plot(obj,par_names="eta",save=FALSE)
IRF(obj,20,var_names=colnames(dsgedata),save=FALSE)
forecast(obj,10,back_data=10)
states(obj)
| 1,423 | gpl-2.0 |
9f4cc88e4218578049e3eb7813be4a5aa1cffe75 | lulab/PI | Rscript/machine_learning/plot_result.R | library('e1071')
require(randomForest)
require(RColorBrewer)
input=read.csv("bins.training-5classes.sampled.csv")
model_file="5classes.rf.model"
dataall=input[,c(8:12,15:16,18:19)]
classesall=subset(input,select=X1.Annotation)
#Generate training and testing sets
nall=nrow(input)
ntrain=2*floor(nall/3)
datatrain <- dataall[1:ntrain,]
classestrain <- classesall[1:ntrain,]
ntrain=ntrain+1
datatest <- dataall[ntrain:nall,]
classestest <- classesall[ntrain:nall,]
#load model
load(model_file)
pdf("mdsplot2.pdf")
MDSplot(rf,classestrain,k=2)
dev.off()
pdf("mdsplot3.pdf")
MDSplot(rf,classestrain,k=3)
dev.off()
pdf("mdsplot9.pdf")
MDSplot(rf,classestrain,k=9)
dev.off()
| 675 | gpl-2.0 |
3677ee16c4611fc4e61535857c8b36459c80167a | kthohr/BMR | tests/dsge/gensys/nkm_dsgevar.R |
#
rm(list=ls())
library(BMR)
source("nkm_model.R")
#
data(BMRVARData)
dsgedata <- USMacroData[24:211,-c(1,3)]
dsgedata <- as.matrix(dsgedata)
for(i in 1:2){
dsgedata[,i] <- dsgedata[,i] - mean(dsgedata[,i])
}
#
obj <- new(dsgevar_gensys)
obj$set_model_fn(nkm_model_simple)
x <- c(1)
obj$eval_model(x)
#
lrem_obj = obj$lrem
lrem_obj$solve()
lrem_obj$shocks_cov <- matrix(c(1,0,0,0.125),2,2,byrow=TRUE)
sim_data <- lrem_obj$simulate(200,800)$sim_vals
sim_data <- cbind(sim_data[,3],sim_data[,5])
#
prior_pars <- cbind(c(1.0),
c(0.05))
prior_form <- c(1)
obj$set_prior(prior_form,prior_pars)
#
par_bounds <- cbind(c(-Inf),
c( Inf))
opt_bounds <- cbind(c(0.7),
c(3.0))
obj$set_bounds(opt_bounds[,1],opt_bounds[,2])
obj$opt_initial_lb <- opt_bounds[,1]
obj$opt_initial_ub <- opt_bounds[,2]
#
cons_term <- TRUE
p <- 1
lambda <- 1.0
obj$build(sim_data,cons_term,p,lambda)
mode_res <- obj$estim_mode(x,TRUE)
mode_check(obj,mode_res$mode_vals,25,1,"eta")
#
obj$mcmc_initial_lb <- opt_bounds[,1]
obj$mcmc_initial_ub <- opt_bounds[,2]
obj$estim_mcmc(x,50,100,100)
var_names <- c("Output Gap","Output","Inflation","Natural Int","Nominal Int","Labour Supply",
"Technology","MonetaryPolicy")
plot(obj,par_names="eta",save=FALSE)
IRF(obj,20,var_names=colnames(dsgedata),save=FALSE)
forecast(obj,10,back_data=10)
states(obj)
| 1,423 | gpl-2.0 |
3d93551eb71de137b7cd59515f53c669cdb0b83f | glycerine/bigbird | r-3.0.2/src/gnuwin32/installer/JRins.R | # File src/gnuwin32/installer/JRins.R
#
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### JRins.R Rversion srcdir MDISDI HelpStyle Internet Producer ISDIR
.make_R.iss <- function(RW, srcdir, MDISDI=0, HelpStyle=1, Internet=0,
Producer = "R-core", ISDIR)
{
have32bit <- file_test("-d", file.path(srcdir, "bin", "i386"))
have64bit <- file_test("-d", file.path(srcdir, "bin", "x64"))
## need DOS-style paths
srcdir = gsub("/", "\\", srcdir, fixed = TRUE)
Rver <- readLines("../../../VERSION")[1L]
Rver <- sub("Under .*$", "Pre-release", Rver)
SVN <- sub("Revision: ", "", readLines("../../../SVN-REVISION"))[1L]
Rver0 <- paste(sub(" .*$", "", Rver), SVN, sep = ".")
con <- file("R.iss", "w")
cat("[Setup]\n", file = con)
if (have64bit) {
regfile <- "reg3264.iss"
types <- "types3264.iss"
cat("ArchitecturesInstallIn64BitMode=x64\n", file = con)
} else { # 32-bit only
regfile <- "reg.iss"
types <- "types32.iss"
}
suffix <- "win"
cat(paste("OutputBaseFilename=", RW, "-", suffix, sep = ""),
paste("AppName=R for Windows ", Rver, sep = ""),
paste("AppVerName=R for Windows ", Rver, sep = ""),
paste("AppVersion=", Rver, sep = ""),
paste("VersionInfoVersion=", Rver0, sep = ""),
paste("DefaultDirName={code:UserPF}\\R\\", RW, sep = ""),
paste("InfoBeforeFile=", srcdir, "\\COPYING", sep = ""),
if(Producer == "R-core") "AppPublisher=R Core Team"
else paste("AppPublisher=", Producer, sep = ""),
file = con, sep = "\n")
## different versions of the installer have different translation files
lines <- readLines("header1.iss")
check <- grepl("Languages\\", lines, fixed = TRUE)
langs <- sub(".*\\\\", "", lines[check])
langs <- sub('"$', "", langs)
avail <- dir(file.path(ISDIR, "Languages"), pattern = "[.]isl$")
drop <- !(langs %in% avail)
if(any(drop))
lines <- grep(paste0("(", paste(langs[drop], collapse = "|"), ")"),
lines, value = TRUE, invert = TRUE)
writeLines(lines, con)
lines <- readLines(regfile)
lines <- gsub("@RVER@", Rver, lines)
lines <- gsub("@Producer@", Producer, lines)
writeLines(lines, con)
lines <- readLines(types)
if(have64bit && !have32bit) {
lines <- lines[-c(3,4,10)]
lines <- gsub("user(32)* ", "", lines)
lines <- gsub("compact ", "", lines)
}
writeLines(lines, con)
lines <- readLines("code.iss")
lines <- gsub("@MDISDI@", MDISDI, lines)
lines <- gsub("@HelpStyle@", HelpStyle, lines)
lines <- gsub("@Internet@", Internet, lines)
writeLines(lines, con)
writeLines(c("", "", "[Files]"), con)
setwd(srcdir)
files <- sub("^./", "",
list.files(".", full.names = TRUE, recursive = TRUE))
for (f in files) {
dir <- sub("[^/]+$", "", f)
dir <- paste("\\", gsub("/", "\\", dir, fixed = TRUE), sep = "")
dir <- sub("\\\\$", "", dir)
component <- if (grepl("^Tcl/(bin|lib)64", f)) "x64"
else if (have64bit &&
(grepl("^Tcl/bin", f) ||
grepl("^Tcl/lib/(dde1.3|reg1.2|Tktable)", f))) "i386"
else if (grepl("/i386/", f)) "i386"
else if (grepl("/x64/", f)) "x64"
else if (grepl("(/po$|/po/|/msgs$|/msgs/|^library/translations)", f))
"translations"
else "main"
if (component == "x64" && !have64bit) next
f <- gsub("/", "\\", f, fixed = TRUE)
cat('Source: "', srcdir, '\\', f, '"; ',
'DestDir: "{app}', dir, '"; ',
'Flags: ignoreversion; ',
'Components: ', component,
file = con, sep = "")
if(f %in% c("etc\\Rprofile.site", "etc\\Rconsole"))
cat("; AfterInstall: EditOptions()", file = con)
cat("\n", file = con)
}
close(con)
}
args <- commandArgs(TRUE)
do.call(".make_R.iss", as.list(args))
| 4,565 | bsd-2-clause |
f580b38b5bef5d2c920c8abbe61b73e8a8e09dda | Prateek2690/APP_ | highcharter/ui-orig.R | #library("shiny")
#library("shinydashboard")
library("highcharter")
#library("dplyr")
#library("viridisLite")
library("markdown")
library("quantmod")
library("tidyr")
#library("ggplot2")
library("treemap")
library("forecast")
library("DT")
#rm(list = ls())
dashboardPage(
skin = "black",
dashboardHeader(title = "highcharter", disable = FALSE),
dashboardSidebar(
sidebarMenu(
menuItem("Examples", tabName = "examples", icon = icon("bar-chart")),
menuItem("Time Series", tabName = "ts", icon = icon("line-chart")),
menuItem("Plugins", tabName = "plugins", icon = icon("line-chart"))
),
div(includeMarkdown("hcterinfo.md"), style = "padding:10px")
),
dashboardBody(
tags$head(tags$script(src = "js/ga.js")),
tags$head(tags$link(rel = "stylesheet", type = "text/css", href = "css/custom_fixs.css")),
tabItems(
tabItem(tabName = "examples",
fluidRow(
column(4, selectInput("theme", label = "Theme",
choices = c(FALSE, "fivethirtyeight", "economist", "dotabuff",
"darkunica", "gridlight",
"sandsignika", "null", "handdrwran",
"chalk"))),
column(4, selectInput("credits", label = "Credits enabled", choices = c(FALSE, TRUE))),
column(4, selectInput("exporting", label = "Exporting enabled", choices = c(FALSE, TRUE)))
),
box(width = 6, highchartOutput("highchart")),
box(width = 6, highchartOutput("highmap")),
box(width = 6, highchartOutput("highohlc")),
box(width = 6, highchartOutput("highscatter")),
box(width = 6, highchartOutput("highstreemap")),
box(width = 6, highchartOutput("highheatmap")),
box(width = 12, highchartOutput("highstock"))
),
tabItem(tabName = "ts",
fluidRow(
column(4, selectInput("ts", label = "Time series",
choices = c("WWWusage", "AirPassengers",
"ldeaths", "USAccDeaths")))
),
box(width = 12, highchartOutput("tschart")),
box(width = 6, highchartOutput("tsforecast")),
box(width = 6, dataTableOutput("dfforecast")),
box(width = 6, highchartOutput("tsacf")),
box(width = 6, highchartOutput("tspacf"))
),
tabItem(tabName = "plugins",
box(width = 12, highchartOutput("pluginsfa"))
)
)
)
)
| 2,782 | mit |
3d93551eb71de137b7cd59515f53c669cdb0b83f | lajus/customr | src/gnuwin32/installer/JRins.R | # File src/gnuwin32/installer/JRins.R
#
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### JRins.R Rversion srcdir MDISDI HelpStyle Internet Producer ISDIR
.make_R.iss <- function(RW, srcdir, MDISDI=0, HelpStyle=1, Internet=0,
Producer = "R-core", ISDIR)
{
have32bit <- file_test("-d", file.path(srcdir, "bin", "i386"))
have64bit <- file_test("-d", file.path(srcdir, "bin", "x64"))
## need DOS-style paths
srcdir = gsub("/", "\\", srcdir, fixed = TRUE)
Rver <- readLines("../../../VERSION")[1L]
Rver <- sub("Under .*$", "Pre-release", Rver)
SVN <- sub("Revision: ", "", readLines("../../../SVN-REVISION"))[1L]
Rver0 <- paste(sub(" .*$", "", Rver), SVN, sep = ".")
con <- file("R.iss", "w")
cat("[Setup]\n", file = con)
if (have64bit) {
regfile <- "reg3264.iss"
types <- "types3264.iss"
cat("ArchitecturesInstallIn64BitMode=x64\n", file = con)
} else { # 32-bit only
regfile <- "reg.iss"
types <- "types32.iss"
}
suffix <- "win"
cat(paste("OutputBaseFilename=", RW, "-", suffix, sep = ""),
paste("AppName=R for Windows ", Rver, sep = ""),
paste("AppVerName=R for Windows ", Rver, sep = ""),
paste("AppVersion=", Rver, sep = ""),
paste("VersionInfoVersion=", Rver0, sep = ""),
paste("DefaultDirName={code:UserPF}\\R\\", RW, sep = ""),
paste("InfoBeforeFile=", srcdir, "\\COPYING", sep = ""),
if(Producer == "R-core") "AppPublisher=R Core Team"
else paste("AppPublisher=", Producer, sep = ""),
file = con, sep = "\n")
## different versions of the installer have different translation files
lines <- readLines("header1.iss")
check <- grepl("Languages\\", lines, fixed = TRUE)
langs <- sub(".*\\\\", "", lines[check])
langs <- sub('"$', "", langs)
avail <- dir(file.path(ISDIR, "Languages"), pattern = "[.]isl$")
drop <- !(langs %in% avail)
if(any(drop))
lines <- grep(paste0("(", paste(langs[drop], collapse = "|"), ")"),
lines, value = TRUE, invert = TRUE)
writeLines(lines, con)
lines <- readLines(regfile)
lines <- gsub("@RVER@", Rver, lines)
lines <- gsub("@Producer@", Producer, lines)
writeLines(lines, con)
lines <- readLines(types)
if(have64bit && !have32bit) {
lines <- lines[-c(3,4,10)]
lines <- gsub("user(32)* ", "", lines)
lines <- gsub("compact ", "", lines)
}
writeLines(lines, con)
lines <- readLines("code.iss")
lines <- gsub("@MDISDI@", MDISDI, lines)
lines <- gsub("@HelpStyle@", HelpStyle, lines)
lines <- gsub("@Internet@", Internet, lines)
writeLines(lines, con)
writeLines(c("", "", "[Files]"), con)
setwd(srcdir)
files <- sub("^./", "",
list.files(".", full.names = TRUE, recursive = TRUE))
for (f in files) {
dir <- sub("[^/]+$", "", f)
dir <- paste("\\", gsub("/", "\\", dir, fixed = TRUE), sep = "")
dir <- sub("\\\\$", "", dir)
component <- if (grepl("^Tcl/(bin|lib)64", f)) "x64"
else if (have64bit &&
(grepl("^Tcl/bin", f) ||
grepl("^Tcl/lib/(dde1.3|reg1.2|Tktable)", f))) "i386"
else if (grepl("/i386/", f)) "i386"
else if (grepl("/x64/", f)) "x64"
else if (grepl("(/po$|/po/|/msgs$|/msgs/|^library/translations)", f))
"translations"
else "main"
if (component == "x64" && !have64bit) next
f <- gsub("/", "\\", f, fixed = TRUE)
cat('Source: "', srcdir, '\\', f, '"; ',
'DestDir: "{app}', dir, '"; ',
'Flags: ignoreversion; ',
'Components: ', component,
file = con, sep = "")
if(f %in% c("etc\\Rprofile.site", "etc\\Rconsole"))
cat("; AfterInstall: EditOptions()", file = con)
cat("\n", file = con)
}
close(con)
}
args <- commandArgs(TRUE)
do.call(".make_R.iss", as.list(args))
| 4,565 | gpl-2.0 |
f580b38b5bef5d2c920c8abbe61b73e8a8e09dda | Prateek2690/APP_ | highcharter/highcharter/ui-orig.R | #library("shiny")
#library("shinydashboard")
library("highcharter")
#library("dplyr")
#library("viridisLite")
library("markdown")
library("quantmod")
library("tidyr")
#library("ggplot2")
library("treemap")
library("forecast")
library("DT")
#rm(list = ls())
dashboardPage(
skin = "black",
dashboardHeader(title = "highcharter", disable = FALSE),
dashboardSidebar(
sidebarMenu(
menuItem("Examples", tabName = "examples", icon = icon("bar-chart")),
menuItem("Time Series", tabName = "ts", icon = icon("line-chart")),
menuItem("Plugins", tabName = "plugins", icon = icon("line-chart"))
),
div(includeMarkdown("hcterinfo.md"), style = "padding:10px")
),
dashboardBody(
tags$head(tags$script(src = "js/ga.js")),
tags$head(tags$link(rel = "stylesheet", type = "text/css", href = "css/custom_fixs.css")),
tabItems(
tabItem(tabName = "examples",
fluidRow(
column(4, selectInput("theme", label = "Theme",
choices = c(FALSE, "fivethirtyeight", "economist", "dotabuff",
"darkunica", "gridlight",
"sandsignika", "null", "handdrwran",
"chalk"))),
column(4, selectInput("credits", label = "Credits enabled", choices = c(FALSE, TRUE))),
column(4, selectInput("exporting", label = "Exporting enabled", choices = c(FALSE, TRUE)))
),
box(width = 6, highchartOutput("highchart")),
box(width = 6, highchartOutput("highmap")),
box(width = 6, highchartOutput("highohlc")),
box(width = 6, highchartOutput("highscatter")),
box(width = 6, highchartOutput("highstreemap")),
box(width = 6, highchartOutput("highheatmap")),
box(width = 12, highchartOutput("highstock"))
),
tabItem(tabName = "ts",
fluidRow(
column(4, selectInput("ts", label = "Time series",
choices = c("WWWusage", "AirPassengers",
"ldeaths", "USAccDeaths")))
),
box(width = 12, highchartOutput("tschart")),
box(width = 6, highchartOutput("tsforecast")),
box(width = 6, dataTableOutput("dfforecast")),
box(width = 6, highchartOutput("tsacf")),
box(width = 6, highchartOutput("tspacf"))
),
tabItem(tabName = "plugins",
box(width = 12, highchartOutput("pluginsfa"))
)
)
)
)
| 2,782 | mit |
3d93551eb71de137b7cd59515f53c669cdb0b83f | cxxr-devel/cxxr-svn-mirror | src/gnuwin32/installer/JRins.R | # File src/gnuwin32/installer/JRins.R
#
# Part of the R package, http://www.R-project.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
### JRins.R Rversion srcdir MDISDI HelpStyle Internet Producer ISDIR
.make_R.iss <- function(RW, srcdir, MDISDI=0, HelpStyle=1, Internet=0,
Producer = "R-core", ISDIR)
{
have32bit <- file_test("-d", file.path(srcdir, "bin", "i386"))
have64bit <- file_test("-d", file.path(srcdir, "bin", "x64"))
## need DOS-style paths
srcdir = gsub("/", "\\", srcdir, fixed = TRUE)
Rver <- readLines("../../../VERSION")[1L]
Rver <- sub("Under .*$", "Pre-release", Rver)
SVN <- sub("Revision: ", "", readLines("../../../SVN-REVISION"))[1L]
Rver0 <- paste(sub(" .*$", "", Rver), SVN, sep = ".")
con <- file("R.iss", "w")
cat("[Setup]\n", file = con)
if (have64bit) {
regfile <- "reg3264.iss"
types <- "types3264.iss"
cat("ArchitecturesInstallIn64BitMode=x64\n", file = con)
} else { # 32-bit only
regfile <- "reg.iss"
types <- "types32.iss"
}
suffix <- "win"
cat(paste("OutputBaseFilename=", RW, "-", suffix, sep = ""),
paste("AppName=R for Windows ", Rver, sep = ""),
paste("AppVerName=R for Windows ", Rver, sep = ""),
paste("AppVersion=", Rver, sep = ""),
paste("VersionInfoVersion=", Rver0, sep = ""),
paste("DefaultDirName={code:UserPF}\\R\\", RW, sep = ""),
paste("InfoBeforeFile=", srcdir, "\\COPYING", sep = ""),
if(Producer == "R-core") "AppPublisher=R Core Team"
else paste("AppPublisher=", Producer, sep = ""),
file = con, sep = "\n")
## different versions of the installer have different translation files
lines <- readLines("header1.iss")
check <- grepl("Languages\\", lines, fixed = TRUE)
langs <- sub(".*\\\\", "", lines[check])
langs <- sub('"$', "", langs)
avail <- dir(file.path(ISDIR, "Languages"), pattern = "[.]isl$")
drop <- !(langs %in% avail)
if(any(drop))
lines <- grep(paste0("(", paste(langs[drop], collapse = "|"), ")"),
lines, value = TRUE, invert = TRUE)
writeLines(lines, con)
lines <- readLines(regfile)
lines <- gsub("@RVER@", Rver, lines)
lines <- gsub("@Producer@", Producer, lines)
writeLines(lines, con)
lines <- readLines(types)
if(have64bit && !have32bit) {
lines <- lines[-c(3,4,10)]
lines <- gsub("user(32)* ", "", lines)
lines <- gsub("compact ", "", lines)
}
writeLines(lines, con)
lines <- readLines("code.iss")
lines <- gsub("@MDISDI@", MDISDI, lines)
lines <- gsub("@HelpStyle@", HelpStyle, lines)
lines <- gsub("@Internet@", Internet, lines)
writeLines(lines, con)
writeLines(c("", "", "[Files]"), con)
setwd(srcdir)
files <- sub("^./", "",
list.files(".", full.names = TRUE, recursive = TRUE))
for (f in files) {
dir <- sub("[^/]+$", "", f)
dir <- paste("\\", gsub("/", "\\", dir, fixed = TRUE), sep = "")
dir <- sub("\\\\$", "", dir)
component <- if (grepl("^Tcl/(bin|lib)64", f)) "x64"
else if (have64bit &&
(grepl("^Tcl/bin", f) ||
grepl("^Tcl/lib/(dde1.3|reg1.2|Tktable)", f))) "i386"
else if (grepl("/i386/", f)) "i386"
else if (grepl("/x64/", f)) "x64"
else if (grepl("(/po$|/po/|/msgs$|/msgs/|^library/translations)", f))
"translations"
else "main"
if (component == "x64" && !have64bit) next
f <- gsub("/", "\\", f, fixed = TRUE)
cat('Source: "', srcdir, '\\', f, '"; ',
'DestDir: "{app}', dir, '"; ',
'Flags: ignoreversion; ',
'Components: ', component,
file = con, sep = "")
if(f %in% c("etc\\Rprofile.site", "etc\\Rconsole"))
cat("; AfterInstall: EditOptions()", file = con)
cat("\n", file = con)
}
close(con)
}
args <- commandArgs(TRUE)
do.call(".make_R.iss", as.list(args))
| 4,565 | gpl-2.0 |
3a12b5e5e12ccbd88942655110ed42ad854f3a08 | thomasvangurp/epiGBS | RnBeads/RnBeads/R/assemblies.R | ########################################################################################################################
## annotations.R
## created: 2012-08-16
## creator: Yassen Assenov
## ---------------------------------------------------------------------------------------------------------------------
## Collection of helper constants and functions related to the management of probe and region annotations.
########################################################################################################################
#' RnBeads Annotation Tables
#'
#' RnBeads uses sets of annotation tables and mappings (from regions to sites) for each of the supported genomes. The
#' structures for one assembly are stored in a separate dedicated data package. Currently, the following assemblies are
#' supported:
#' \describe{
#' \item{\code{"hg19"}}{through the package \pkg{RnBeads.hg19}}
#' \item{\code{"mm10"}}{through the package \pkg{RnBeads.mm10}}
#' \item{\code{"mm9"}}{through the package \pkg{RnBeads.mm9}}
#' \item{\code{"rn5"}}{through the package \pkg{RnBeads.rn5}}
#' }
#'
#' @details
#' The assembly-specific structures are automatically loaded upon initialization of the annotation, that is, by the
#' first valid call to any of the following functions: \code{\link{rnb.get.chromosomes}},
#' \code{\link{rnb.get.annotation}}, \code{\link{rnb.set.annotation}}, \code{\link{rnb.get.mapping}},
#' \code{\link{rnb.annotation.size}}. Adding an annotation amounts to attaching its table(s) and mapping structures to
#' the scaffold.
#'
#' @docType data
#' @keywords datasets
#' @name RnBeads.data
#' @aliases hg19 mm10 mm9 rn5
#' @format \code{list} of four elements - \code{"regions"}, \code{"sites"}, \code{"controls"} and \code{"mappings"}.
#' These elements are described below.
#' \describe{
#' \item{\code{"regions"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the built-in
#' region annotation tables. Once the default annotations are loaded, the attribute \code{"builtin"} is
#' a \code{logical} vector storing, for each region annotation, whether it is the default (built-in) or
#' custom.}
#' \item{\code{"sites"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the site and
#' probe annotation tables.}
#' \item{\code{"controls"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the control
#' probe annotation tables. The attribute \code{"sites"} is a \code{character} vector pointing to the
#' site annotation that encompasses the respective control probes.}
#' \item{\code{"mappings"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the built-in
#' region annotation tables.}
#' }
#' @author Yassen Assenov
NULL
## G L O B A L S #######################################################################################################
## Environment to contain all probe, site and region annotation tables.
##
## hg19
## $regions
## $tiling GRangesList
## $genes GRangesList
## $promoters GRangesList
## $cpgislands GRangesList
## $sites
## $CpG GRangesList
## $probes450 GRangesList
## $controls
## $controls450 data.frame
## $mappings
## $tilinig
## $CpG list of IRanges
## $probes450 list of IRanges
## $genes
## $CpG list of IRanges
## $probes450 list of IRanges
## $promoters
## $CpG list of IRanges
## $probes450 list of IRanges
## $cpgislands
## $CpG list of IRanges
## $probes450 list of IRanges
## $lengths int[ <chromosomes> , <annotations> ]
.rnb.annotations <- new.env()
## Chromosomes supported by the annotation packages
##%(chromosomes)s
CHROMOSOMES.L2S <- list("hg19" = c(1:22, "X", "Y"), "mm9" = c(1:19, "X", "Y"), "mm10" = c(1:19, "X", "Y"),
"rn5" = c(1:20, "X")
##%(assembly_table)s
)
CHROMOSOMES.S2L <- lapply(CHROMOSOMES.L2S, function(x) { paste0("chr", x) })
CHROMOSOMES <- CHROMOSOMES.S2L
for (assembly.name in names(CHROMOSOMES)) {
names(CHROMOSOMES.S2L[[assembly.name]]) <- CHROMOSOMES.L2S[[assembly.name]]
names(CHROMOSOMES[[assembly.name]]) <- names(CHROMOSOMES.L2S[[assembly.name]]) <- CHROMOSOMES[[assembly.name]]
}
rm(assembly.name)
## Control probe types
HM450.CONTROL.TARGETS <- c(
"bisulfite conversion I" = "BISULFITE CONVERSION I",
"bisulfite conversion II" = "BISULFITE CONVERSION II",
"extension" = "EXTENSION",
"hybridization" = "HYBRIDIZATION",
"negative control" = "NEGATIVE",
"non-polymorphic" = "NON-POLYMORPHIC",
"norm A" = "NORM_A",
"norm C" = "NORM_C",
"norm G" = "NORM_G",
"norm T" = "NORM_T",
"specificity I" = "SPECIFICITY I",
"specificity II" = "SPECIFICITY II",
"staining" = "STAINING",
"target removal" = "TARGET REMOVAL")
HM27.CONTROL.TARGETS<-c(
"bisulfite conversion" = "Bisulfite conversion",
"extension" = "Extension",
"hybridization" = "Hybridization",
"negative control" = "Negative",
"SNP" = "Genotyping",
"non-polymorphic" = "Non-Polymorphic",
"norm Grn" = "Normalization-Green",
"norm Red" = "Normalization-Red",
"specificity" = "Specificity",
"staining" = "Staining",
"target removal" = "Target Removal",
"pACYC174" = "pACYC174",
"pUC19" = "pUC19",
"phiX174" = "phiX174"
)
## Sample-independent control probe types (subset of CONTROL.TARGETS)
CONTROL.TARGETS.SAMPLE.INDEPENDENT <- c("STAINING", "HYBRIDIZATION", "TARGET REMOVAL", "EXTENSION")
## Genotyping probes on the 27k microarray
HM27.CY3.SNP.PROBES<-c(
"rs798149",
"rs2959823",
"rs2235751",
"rs2125573",
"rs2804694"
)
HM27.CY5.SNP.PROBES<-c(
"rs1941955",
"rs845016",
"rs866884",
"rs739259",
"rs1416770",
"rs1019916",
"rs2521373",
"rs10457834",
"rs6546473",
"rs5931272",
"rs264581"
)
## F U N C T I O N S ###################################################################################################
#' get.genome.data
#'
#' Gets the specified genome.
#'
#' @param assembly Genome assembly of interest. Currently the only supported genomes are \code{"hg19"}, \code{"mm9"},
#' \code{"mm10"} and \code{"rn5"}.
#' @return Sequence data object for the specified assembly.
#'
#' @author Yassen Assenov
#' @noRd
get.genome.data <- function(assembly) {
if (assembly == "hg19") {
suppressPackageStartupMessages(require(BSgenome.Hsapiens.UCSC.hg19))
genome.data <- Hsapiens
} else if (assembly == "mm9") {
suppressPackageStartupMessages(require(BSgenome.Mmusculus.UCSC.mm9))
genome.data <- Mmusculus
} else if (assembly == "mm10") {
suppressPackageStartupMessages(require(BSgenome.Mmusculus.UCSC.mm10))
genome.data <- Mmusculus
} else if (assembly == "rn5") {
suppressPackageStartupMessages(require(BSgenome.Rnorvegicus.UCSC.rn5))
genome.data <- Rnorvegicus
}
##
##%(assembly_package)s
else {
stop("unsupported assembly")
}
return(genome.data)
}
| 7,310 | mit |
051cf708912da9d7c4b23cef72c206df58bae18e | mul118/shinyMCE | R/shinyMCE.R | #' tinyMCE editor element
#'
#' Display a tinyMCE editor within an application page.
#' @param inputId id associated with the editor
#' @param content editor content. May be a string or HTML embedded in an \code{\link{HTML}} function
#' @param options string containing tinyMCE initialization options. See demos or source code on the tinyMCE website http://www.tinymce.com/tryit/basic.php for more information.
#' @return a tinyMCE editor element that can be included in a panel
#' @examples
#' # Basic editors
#' tinyMCE('editor1', 'Click to edit text')
#' tinyMCE('editor1', HTML('<p><strong>Click</strong> to edit text</p>'))
#'
#' # With options
#' tinyMCE('editor1', 'This is an inline tinyMCE editor', 'inline: true')
#' @import shiny
#' @export
tinyMCE <- function(inputId, content, options = NULL){
tagList(
singleton(tags$head(tags$script(src = "//cdn.tiny.cloud/1/5a5deew2z9gml5pwn95iosioop446qny3vyfh994kujzkwu6/tinymce/5/tinymce.min.js", referrerpolicy="origin"))),
tags$div(id = inputId, class = "shinytinymce", content, style = "resize: none; width: 100%; height: 100%; border-style: none; background: gainsboro;"),
tags$script(paste0('tinymce.init({selector:".shinytinymce", ', options, '});')),
singleton(tags$head(tags$script(src = 'shinyMCE/shiny-tinymce-bindings.js')))
)
}
#' Update tinyMCE editor
#'
#' Update tinyMCE editor object to display new content.
#' @param session the \code{session} object passed to function given to \code{shinyServer}
#' @param inputId id associated with the tinyMCE editor
#' @param content new content to place withing the editor
#' @import shiny
#' @export
updateTinyMCE <- function(session, inputId, content){
data_list <- list(id = inputId, content = content)
session$sendCustomMessage(type = "shinyMCE.update", data_list)
}
| 1,808 | mit |
3a12b5e5e12ccbd88942655110ed42ad854f3a08 | thomasvangurp/epiGBS | RnBeads/templates/assemblies.R | ########################################################################################################################
## annotations.R
## created: 2012-08-16
## creator: Yassen Assenov
## ---------------------------------------------------------------------------------------------------------------------
## Collection of helper constants and functions related to the management of probe and region annotations.
########################################################################################################################
#' RnBeads Annotation Tables
#'
#' RnBeads uses sets of annotation tables and mappings (from regions to sites) for each of the supported genomes. The
#' structures for one assembly are stored in a separate dedicated data package. Currently, the following assemblies are
#' supported:
#' \describe{
#' \item{\code{"hg19"}}{through the package \pkg{RnBeads.hg19}}
#' \item{\code{"mm10"}}{through the package \pkg{RnBeads.mm10}}
#' \item{\code{"mm9"}}{through the package \pkg{RnBeads.mm9}}
#' \item{\code{"rn5"}}{through the package \pkg{RnBeads.rn5}}
#' }
#'
#' @details
#' The assembly-specific structures are automatically loaded upon initialization of the annotation, that is, by the
#' first valid call to any of the following functions: \code{\link{rnb.get.chromosomes}},
#' \code{\link{rnb.get.annotation}}, \code{\link{rnb.set.annotation}}, \code{\link{rnb.get.mapping}},
#' \code{\link{rnb.annotation.size}}. Adding an annotation amounts to attaching its table(s) and mapping structures to
#' the scaffold.
#'
#' @docType data
#' @keywords datasets
#' @name RnBeads.data
#' @aliases hg19 mm10 mm9 rn5
#' @format \code{list} of four elements - \code{"regions"}, \code{"sites"}, \code{"controls"} and \code{"mappings"}.
#' These elements are described below.
#' \describe{
#' \item{\code{"regions"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the built-in
#' region annotation tables. Once the default annotations are loaded, the attribute \code{"builtin"} is
#' a \code{logical} vector storing, for each region annotation, whether it is the default (built-in) or
#' custom.}
#' \item{\code{"sites"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the site and
#' probe annotation tables.}
#' \item{\code{"controls"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the control
#' probe annotation tables. The attribute \code{"sites"} is a \code{character} vector pointing to the
#' site annotation that encompasses the respective control probes.}
#' \item{\code{"mappings"}}{\code{list} of \code{NULL}s; the names of the elements correspond to the built-in
#' region annotation tables.}
#' }
#' @author Yassen Assenov
NULL
## G L O B A L S #######################################################################################################
## Environment to contain all probe, site and region annotation tables.
##
## hg19
## $regions
## $tiling GRangesList
## $genes GRangesList
## $promoters GRangesList
## $cpgislands GRangesList
## $sites
## $CpG GRangesList
## $probes450 GRangesList
## $controls
## $controls450 data.frame
## $mappings
## $tilinig
## $CpG list of IRanges
## $probes450 list of IRanges
## $genes
## $CpG list of IRanges
## $probes450 list of IRanges
## $promoters
## $CpG list of IRanges
## $probes450 list of IRanges
## $cpgislands
## $CpG list of IRanges
## $probes450 list of IRanges
## $lengths int[ <chromosomes> , <annotations> ]
.rnb.annotations <- new.env()
## Chromosomes supported by the annotation packages
##%(chromosomes)s
CHROMOSOMES.L2S <- list("hg19" = c(1:22, "X", "Y"), "mm9" = c(1:19, "X", "Y"), "mm10" = c(1:19, "X", "Y"),
"rn5" = c(1:20, "X")
##%(assembly_table)s
)
CHROMOSOMES.S2L <- lapply(CHROMOSOMES.L2S, function(x) { paste0("chr", x) })
CHROMOSOMES <- CHROMOSOMES.S2L
for (assembly.name in names(CHROMOSOMES)) {
names(CHROMOSOMES.S2L[[assembly.name]]) <- CHROMOSOMES.L2S[[assembly.name]]
names(CHROMOSOMES[[assembly.name]]) <- names(CHROMOSOMES.L2S[[assembly.name]]) <- CHROMOSOMES[[assembly.name]]
}
rm(assembly.name)
## Control probe types
HM450.CONTROL.TARGETS <- c(
"bisulfite conversion I" = "BISULFITE CONVERSION I",
"bisulfite conversion II" = "BISULFITE CONVERSION II",
"extension" = "EXTENSION",
"hybridization" = "HYBRIDIZATION",
"negative control" = "NEGATIVE",
"non-polymorphic" = "NON-POLYMORPHIC",
"norm A" = "NORM_A",
"norm C" = "NORM_C",
"norm G" = "NORM_G",
"norm T" = "NORM_T",
"specificity I" = "SPECIFICITY I",
"specificity II" = "SPECIFICITY II",
"staining" = "STAINING",
"target removal" = "TARGET REMOVAL")
HM27.CONTROL.TARGETS<-c(
"bisulfite conversion" = "Bisulfite conversion",
"extension" = "Extension",
"hybridization" = "Hybridization",
"negative control" = "Negative",
"SNP" = "Genotyping",
"non-polymorphic" = "Non-Polymorphic",
"norm Grn" = "Normalization-Green",
"norm Red" = "Normalization-Red",
"specificity" = "Specificity",
"staining" = "Staining",
"target removal" = "Target Removal",
"pACYC174" = "pACYC174",
"pUC19" = "pUC19",
"phiX174" = "phiX174"
)
## Sample-independent control probe types (subset of CONTROL.TARGETS)
CONTROL.TARGETS.SAMPLE.INDEPENDENT <- c("STAINING", "HYBRIDIZATION", "TARGET REMOVAL", "EXTENSION")
## Genotyping probes on the 27k microarray
HM27.CY3.SNP.PROBES<-c(
"rs798149",
"rs2959823",
"rs2235751",
"rs2125573",
"rs2804694"
)
HM27.CY5.SNP.PROBES<-c(
"rs1941955",
"rs845016",
"rs866884",
"rs739259",
"rs1416770",
"rs1019916",
"rs2521373",
"rs10457834",
"rs6546473",
"rs5931272",
"rs264581"
)
## F U N C T I O N S ###################################################################################################
#' get.genome.data
#'
#' Gets the specified genome.
#'
#' @param assembly Genome assembly of interest. Currently the only supported genomes are \code{"hg19"}, \code{"mm9"},
#' \code{"mm10"} and \code{"rn5"}.
#' @return Sequence data object for the specified assembly.
#'
#' @author Yassen Assenov
#' @noRd
get.genome.data <- function(assembly) {
if (assembly == "hg19") {
suppressPackageStartupMessages(require(BSgenome.Hsapiens.UCSC.hg19))
genome.data <- Hsapiens
} else if (assembly == "mm9") {
suppressPackageStartupMessages(require(BSgenome.Mmusculus.UCSC.mm9))
genome.data <- Mmusculus
} else if (assembly == "mm10") {
suppressPackageStartupMessages(require(BSgenome.Mmusculus.UCSC.mm10))
genome.data <- Mmusculus
} else if (assembly == "rn5") {
suppressPackageStartupMessages(require(BSgenome.Rnorvegicus.UCSC.rn5))
genome.data <- Rnorvegicus
}
##
##%(assembly_package)s
else {
stop("unsupported assembly")
}
return(genome.data)
}
| 7,310 | mit |
fe7f2d65484526e64128e22478eab82c02cfba4c | natematias/reddit-data-reanalysis | analysis/gaps_summaries.R | library(ggplot2)
library(lubridate)
rm(list=ls())
#### PLOT MISSING DATA PER DAY (COMMENTS)
missing_data_comments <- read.csv("../data/aggregate_data/Missing Data Timeline - Comment Timeline.csv")
missing_data_comments$day <- as.Date(missing_data_comments$Date, format="%m/%d/%Y")
ggplot(missing_data_comments, aes(day, log1p(Count))) +
geom_line(color="cornflowerblue") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("ln Missing Comments Per Day (Calculated by Checking Missing Reply Parents)")
ggplot(missing_data_comments, aes(day, Count)) +
geom_line(color="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Missing Comments Per Day (Calculated by Checking Missing Reply Parents)")
ggplot(missing_data_comments, aes(day, Cumulative)) +
geom_area(fill="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Cumulative Missing Comments Over Time (Calculated by Checking Missing Reply Parents)")
#### PLOT MISSING DATA PER ID RANGE (COMMENTS)
missing_data_comments_ids <- read.csv("../data/aggregate_data/Missing Data Timeline - Spectral Scan Comments.csv")
missing_data_comments_ids$ID.Partition.Base.10
ggplot(missing_data_comments_ids, aes(ID.Partition.Base.10, log1p(Missing.Count))) +
geom_line(color="cornflowerblue") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("ln Missing Comments Per 1,000,000 (Calculated by Checking Missing IDs)")
ggplot(missing_data_comments_ids, aes(ID.Partition.Base.10, Missing.Count)) +
geom_line(color="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Missing Submission Per 1,000,000 (Calculated by Checking Missing IDs)")
ggplot(missing_data_comments_ids, aes(ID.Partition.Base.10, Cumulative)) +
geom_area(fill="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Missing Comments Per 1,000,000 (Calculated by Checking Missing IDs)")
#### PLOT MISSING DATA PER DAY (POSTS)
missing_data_posts <- read.csv("../data/aggregate_data/Missing Data Timeline - Submission Timeline.csv")
missing_data_posts$day <- as.Date(missing_data_posts$Date, format="%m/%d/%Y")
ggplot(missing_data_posts, aes(day, log1p(Count))) +
geom_line(color="cornflowerblue") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("ln Missing Submissions Per Day (Calculated by Checking Missing Reply Parents)")
ggplot(missing_data_posts, aes(day, Count)) +
geom_line(color="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Missing Submission Per Day (Calculated by Checking Missing Reply Parents)")
ggplot(missing_data_posts, aes(day, Cumulative)) +
geom_area(fill="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Cumulative Missing Submission Per Day (Calculated by Checking Missing Reply Parents)")
#### PLOT MISSING DATA PER ID RANGE (POSTS)
missing_data_posts_ids <- read.csv("../data/aggregate_data/Missing Data Timeline - Spectral Scan Submissions.csv")
missing_data_posts_ids$ID.Partition.Base.10
ggplot(missing_data_posts_ids, aes(ID.Partition.Base.10, log1p(Missing.Count))) +
geom_line(color="cornflowerblue") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("ln Missing Submissions Per 100,000 (Calculated by Checking Missing IDs)")
ggplot(missing_data_posts_ids, aes(ID.Partition.Base.10, Missing.Count)) +
geom_line(color="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Missing Submission Per 100,000 (Calculated by Checking Missing IDs)")
ggplot(missing_data_posts_ids, aes(ID.Partition.Base.10, Cumulative)) +
geom_area(fill="orangered4") +
theme(axis.text.x = element_text(hjust=0, vjust=1, size=14),
axis.title=element_text(size=14),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 16, colour = "black", vjust = -1)) +
ggtitle("Missing Submission Per 100,000 (Calculated by Checking Missing IDs)")
#### LANGUAGE ITEMS
sum(missing_data_comments$Count)
sum(missing_data_comments_ids$Missing.Count)
min(missing_data_comments$day)
max(missing_data_comments$day)
sum(missing_data_posts$Count)
sum(missing_data_posts_ids$Missing.Count)
min(missing_data_posts$day)
max(missing_data_posts$day)
sum(missing_data_comments_ids$Missing.Count)
signif(100*96.67*sum(missing_data_comments_ids$Missing.Count)/2182699117, 3)
signif(100*6.835*sum(missing_data_comments_ids$Missing.Count)/236132592, 3)
#signif(
| 6,579 | mit |
e55c7febdb970038126ceb12b470fbf5a83d8659 | graalvm/fastr | com.oracle.truffle.r.test.native/packages/testrffi/testrffi/tests/simpleTests.R | # Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 3 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 3 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
stopifnot(require(testrffi))
rffi.addInt(2L, 3L)
rffi.addDouble(2, 3)
rffi.populateIntVector(5)
rffi.populateLogicalVector(5)
rffi.mkStringFromChar()
rffi.mkStringFromBytes()
rffi.null()
try(rffi.null.E())
rffi.null.C()
rffi.isRString(character(0))
a <- c(1L,2L,3L); rffi.iterate_iarray(a)
a <- c(1L,2L,3L); rffi.iterate_iptr(a)
rffi.dotCModifiedArguments(c(0,1,2,3))
rffi.dotExternalAccessArgs(1L, 3, c(1,2,3), c('a', 'b'), 'b', TRUE, as.raw(12))
rffi.dotExternalAccessArgs(x=1L, 3, c(1,2,3), y=c('a', 'b'), 'b', TRUE, as.raw(12))
rffi.invoke12()
rffi.TYPEOF(3L)
rffi.TYPEOF(1:3)
rffi.TYPEOF(1.1:3.1)
rffi.isRString("hello")
rffi.isRString(NULL)
rffi.interactive()
x <- 1; rffi.findvar("x", globalenv())
# See issue GR-9928
# x <- "12345"; rffi.char_length(x)
rffi.test_duplicate(quote(a[,3])[[3]], 1L) # try duplicating empty symbol
result <- rffi.invokeFun(c(1,2,4), function(i) 42)
print(result[[1]])
strVec <- rffi.getStringNA();
stopifnot(anyNA(strVec))
stopifnot(rffi.isNAString(strVec))
rffi.LENGTH(strVec)
# See issue GR-9928
# this will call CHAR(x) on the NA string, which materializes it to native pointer...
# rffi.char_length(strVec)
strVec <- rffi.setStringElt(c('hello'), as.character(NA))
stopifnot(anyNA(strVec))
stopifnot(rffi.isNAString(as.character(NA)))
# See issue GR-9928
# Encoding tests
# rffi.getBytes('\u1F602\n')
# ignored: FastR does not support explicit encoding yet
# latinEncStr <- '\xFD\xDD\xD6\xF0\n'
# Encoding(latinEncStr) <- "latin1"
# rffi.getBytes(latinEncStr)
#rffi.getBytes('hello ascii')
x <- list(1)
attr(x, 'myattr') <- 'hello';
attrs <- rffi.ATTRIB(x)
stopifnot(attrs[[1]] == 'hello')
attr <- rffi.getAttrib(x, 'myattr')
stopifnot(attr == 'hello')
# Enable when GR-9876 is fixed
if (Sys.getenv("FASTR_RFFI") != "llvm") {
# loess invokes loess_raw native function passing in string value as argument and that is what we test here.
loess(dist ~ speed, cars);
}
# code snippet that simulates work with promises ala rlang package
tmp <- c(1,2,4)
some_unique_name <- TRUE
foo <- function(...) { tmp <- 'this is not the right tmp'; bar(); }
bar <- function() rffi.captureDotsWithSingleElement(parent.frame())
promiseInfo <- foo(tmp)
stopifnot('some_unique_name' %in% ls(promiseInfo[[2]]))
eval(promiseInfo[[1]], promiseInfo[[2]])
# parent.frame call in Rf_eval. Simulates pattern from rlang package
getCurrEnv <- function(r = parent.frame()) r
fn <- function(eval_fn) {
list(middle(eval_fn), getCurrEnv())
}
middle <- function(eval_fn) {
deep(eval_fn, getCurrEnv())
}
deep <- function(eval_fn, eval_env) {
# the result value of rffi.tryEval is list, first element is the actual result
eval_fn(quote(parent.frame()), eval_env)[[1]]
}
res <- fn(rffi.tryEval)
stopifnot(identical(res[[1]], res[[2]]))
# fiddling the pointers to the native arrays: we get data pointer to the first SEXP argument (vec),
# then put value 42/TRUE directly into it at index 0,
# value of symbol 'myvar' through Rf_eval at index 1,
# value of Rf_eval('max(vec)') at the last index (note that the upcall now should take max from the updated vector!)
env <- new.env()
env$myvar <- 44L;
rffi.evalAndNativeArrays(c(1L, 2L, 3L, 4L, 5L), as.symbol('myvar'), env);
env$myvar <- 3.14
rffi.evalAndNativeArrays(c(1.1, 2.2, 3), as.symbol('myvar'), env);
env$myvar <- T
rffi.evalAndNativeArrays(c(F, F, F, F), as.symbol('myvar'), env);
env$myvar <- 20L
rffi.evalAndNativeArrays(as.raw(c(1, 3, 2)), as.symbol('myvar'), env);
# Stack introspection after Rf_eval
# Apparently parent.frame does not always give what sys.frame(sys.parent()) if the Rf_eval gets explicit environment != global env
testStackIntro <- function(doSysParents) {
if (doSysParents) {
cat("sys.parents(): ", paste0(sys.parents(), collapse=","), "\n")
}
cat("sys.frame(2):", paste0(ls(sys.frame(2)), collapse=","), "\n")
cat("parent.frame():", paste0(ls(parent.frame()), collapse=","), "\n")
cat("sys.nframe():", sys.nframe(), "\n")
4242
}
rfEval <- function(expr, env, evalWrapperVar = 4422) .Call(testrffi:::C_api_Rf_eval, expr, env)
rfEval(quote(testStackIntro(T)), list2env(list(myenv=42)))
rfEval(quote(testStackIntro(T)), .GlobalEnv)
# TODO: sys.parents() give 0,1,3 in FastR instead of 0,1,2 in GNUR
eval(quote(testStackIntro(F)), list2env(list(myenv=42)))
# TODO: sys.parents() give 0,1,2 in FastR instead of 0,1,0 in GNUR, but parent.frame works
eval(quote(testStackIntro(F)), .GlobalEnv)
# TODO: fix do.call in the same way
# testStackIntro <- function(doSysParents) {
# cat("sys.parents(): ", paste0(sys.parents(), collapse=","), "\n")
# cat("sys.frame(2):", paste0(ls(sys.frame(2)), collapse=","), "\n")
# cat("parent.frame():", paste0(ls(parent.frame()), collapse=","), "\n")
# cat("sys.nframe():", sys.nframe(), "\n")
# 4242
# }
#
# do.call(testStackIntro, list(T))
# do.call(testStackIntro, list(T), envir = list2env(list(myenv=42)))
# length tests
env <- new.env(); env$a <- 42; env$b <- 44;
rffi.inlined_length(env)
rffi.inlined_length(c(1,2,3))
rffi.inlined_length(list(a = 1, b = 42))
rffi.inlined_length(as.pairlist(c(1,2,3,4,5)))
expr <- expression(x + y, 3)
rffi.inlined_length(expr)
rffi.inlined_length(expr[[1]])
# fails in FastR because DotCall class cannot recognize that the RArgsValuesAndNames
# are not meant to be extracted into individual arguments, but instead send as is
# to the native function as SEXP
#
# foo <-function(...) rffi.inlined_length(get('...'))
# foo(a = 1, b = 2, c = 3, d = 42)
# Enable when GR-10914 is fixed
if (Sys.getenv("FASTR_RFFI") != "llvm") {
testLength <- function(type) {
s <- api.Rf_allocVector(type, 1000)
print(api.LENGTH(s))
print(api.TRUELENGTH(s))
api.SETLENGTH(s, 10)
print(api.LENGTH(s))
print(api.TRUELENGTH(s))
api.SET_TRUELENGTH(s, 1000)
print(api.LENGTH(s))
print(api.TRUELENGTH(s))
}
testLength(10) # LGLSXP
testLength(13) # INTSXP
testLength(14) # REALSXP
testLength(15) # CPLXSXP
testLength(16) # STRSXP
testLength(19) # VECSXP
svec <- c("a")
charsxp <- api.STRING_ELT(svec, 0)
api.LENGTH(charsxp)
# gnur returns different value
# api.TRUELENGTH(charsxp)
api.SET_TRUELENGTH(charsxp, 1000)
api.LENGTH(charsxp)
api.TRUELENGTH(charsxp)
# gnur returns different value
# api.LEVELS(charsxp)
identical(charsxp, api.STRING_ELT(c("a"), 0))
}
rffi.parseVector('1+2')
rffi.parseVector('.*/-')
rffi.parseVector('1+')
# preserve and release object
# using loop to trigger compilation
preserved_objects <- list()
for(i in seq(5000)) {
preserved_objects[[i]] <- rffi.preserve_object(i)
}
for(i in seq(5000)) {
obj <- preserved_objects[[i]]
stopifnot(obj == i)
rffi.release_object(obj)
}
# Note: runif must not be used before this test so that it is still a promise!!!
# Following code calls Rf_eval with a language object that contains a promise instead of the expected function
set.seed(42)
rffi.RfEvalWithPromiseInPairList()
# CAR/CDR tests
rffi.CAR(NULL)
rffi.CDR(NULL)
invisible(rffi.CAR(as.symbol('a'))) # TODO: printing CHARSEXP not implemented in FastR
set.seed(42)
rffi.RfRandomFunctions()
rffi.RfRMultinom()
rffi.RfFunctions()
setAttrTarget <- c(1,2,3)
attr(setAttrTarget, 'myattr2') <- 'some value';
api.SET_ATTRIB(setAttrTarget, as.pairlist(list(myattr=42)))
setAttrTarget
setAttrTarget <- new.env()
attr(setAttrTarget, 'myattr2') <- 'some value';
api.SET_ATTRIB(setAttrTarget, as.pairlist(list(myattr=42)))
setAttrTarget
typeof(api.ATTRIB(mtcars))
api.ATTRIB(structure(c(1,2,3), myattr3 = 33))
api.ATTRIB(data.frame(1, 2, 3))
invisible(rffi.testDATAPTR('hello', testSingleString = T));
# See issue GR-9928
# rffi.testDATAPTR(c('hello', 'world'), testSingleString = F);
# SET_OBJECT
# FastR does not fully support the SET_OBJECT fully,
# the test is left here in case there is a need to actually implement it.
x <- structure(3, class='abc')
# just to make sure tirivial SET_OBJECT examples work
api.SET_OBJECT(x, 1)
api.SET_OBJECT(c(1,2,3), 0)
## before SET_OBJECT(x,0), S3 dispatching works as expected:
# foo <- function(x) UseMethod('foo')
# foo.default <- function(x) cat("foo.default\n")
# foo.abc <- function(x) cat("foo.abc\n")
# as.character.abc <- function(...) "42"
# paste(x) # "42"
# foo(x) # "foo.abc"
# api.SET_OBJECT(x, 0) # FastR throws error saying that this is not implemented
## after SET_OBJECT(x,0), S3 dispatching does not work for internals
# paste(x) # "3" -- as.character.abc not called
# inherits(x, 'abc') # TRUE
# foo(x) # "foo.abc"
## The following set/get semantics does not work in FastR as the scalar value is
## always transformed into a NEW string vector before passing it to the native function.
#svec <- "a"
#api.SETLEVELS(svec, 1)
#api.LEVELS(svec)
svec <- c("a", "b")
api.SETLEVELS(svec, 1)
api.LEVELS(svec)
env <- new.env()
env2 <- new.env()
env2$id <- "enclosing"
api.SET_ENCLOS(env, env2)
api.ENCLOS(env)$id == "enclosing"
rffi.test_R_nchar("ffff")
f1 <- function(x,y) { print("f1"); x^y }
f2 <- function(z) { print("f2"); z }
ll <- quote(f1(2, f2(3)))
rffi.test_forceAndCall(ll, 0, .GlobalEnv)
rffi.test_forceAndCall(ll, 2, .GlobalEnv)
f1 <- function(x, y, ...) { print("f1"); vars <- list(...); print(vars); x^y }
f2 <- function(z) { print("f2"); z }
f3 <- function(s) { print("f3"); s }
ll <- quote(f1(2, f2(3), ...))
testForceAndCallWithVarArgs <- function (n, ...) {
rffi.test_forceAndCall(ll, n, environment())
}
testForceAndCallWithVarArgs(0, f3("aaa"))
testForceAndCallWithVarArgs(3, f3("aaa"))
x <- c(1)
api.Rf_isObject(x)
class(x) <- "c1"
api.Rf_isObject(x)
# prints R types of C constants like R_NamesSymbol
rffi.test_constantTypes()
# findVarInFrame for "..." that is empty gives symbol for missing, i.e. ""
foo <- function(...) rffi.findvar('...', environment())
typeof(foo())
foo()
# findVarInFrame for empty argument gives symbol for missing, i.e. ""
foo <- function(x) rffi.findvar('x', environment())
typeof(foo())
foo()
# active bindings
f <- local( {
x <- 1
function(v) {
if (missing(v))
cat("get\n")
else {
cat("set\n")
x <<- v
}
x
}
})
api.R_MakeActiveBinding(as.symbol("fred"), f, .GlobalEnv)
bindingIsActive("fred", .GlobalEnv)
fred
fred <- 2
# sharing elements in native data
x <- c("abc")
y <- c("xyz")
# x[0] = y[0]
rffi.shareStringElement(x, 1L, y, 1L)
l1 <- list(1:2, c("a", "b"))
l2 <- list(3:4, c("c", "d"))
rffi.shareListElement(l1, 1L, l2, 1L)
rffi.shareListElement(l1, 1L, l2, 2L)
i1 <- c(1L, 2L)
i2 <- c(3L, 4L)
rffi.shareIntElement(i1, 1L, i2, 2L)
d1 <- c(1, 2)
d2 <- c(3, 4)
rffi.shareDoubleElement(d1, 1L, d2, 2L)
# setVar
e <- new.env()
e$x <- 1
rffi.test_setVar(as.symbol('x'), 42, e)
stopifnot(identical(e$x, 42))
rffi.test_setVar(as.symbol('y'), 42, e)
stopifnot(identical(e$y, NULL))
stopifnot(identical(globalenv()$y, 42))
v <- c(1:6)
d <- c(2.0, 3.0)
rffi.test_setAttribDimDoubleVec(v, d)
print(dim(v))
# Complex vectors
x <- c(4+3i,2+1i)
rffi.test_sort_complex(x)
# allocate large vector: checks integer overflow bug in allocation via Unsafe
# we need to force the materialization to native memory via rffi.get_dataptr,
# which returns NULL in case of an error
stopifnot(!is.null(rffi.get_dataptr(api.Rf_allocVector(14, 268435457))))
if (Sys.getenv("TESTRFFI_IGNORE_4GB_VECTOR_TEST") != "") {
vec <- double(268435457)
vec[[268435457]] <- 4.2
stopifnot(!is.null(rffi.get_dataptr(vec)))
stopifnot(vec[[268435457]] == 4.2)
}
is.null(rffi.testMissingArgWithATTRIB())
# Compact representations and RFFI:
# sequences get materialized on write, but should not get materialized on read
rffi.shareIntElement(1:2,1:3,1:4,1:5)
e <- new.env()
s <- rffi.testInstallTrChar(c('hello', 'world'), e)
stopifnot(is.symbol(s))
stopifnot(e$hello == 2L)
| 12,765 | gpl-2.0 |
a87eace14716a46a5d8be1cde654b55eb68e7512 | peter19852001/decomp | sim.R | #
# to randomly generate (synthetic) gene network in the form of matrices,
# where each link a_ij has a number representing the
# effect of gene i on gene j: +ve for activation, -ve for inhibition.
# Associated with each a_ij =/= 0 is t_ij > 0, which represents the time delay
# of the effect of gene i on gene j.
gen_grn <- function(n,is_acyclic,is_self, max.parents) {
# n is the number of gene
# is_acyclic is true iff only acyclic network is to be generated
# is_self is true iff when is_acyclic is false, to include self loops
# p_link is the probability of a link
# Returns a list of two matrices of n by n, one is the links, the other
# is the delays.
L <- n*n;
r <- rep(0,L);
# limits the number of non-zero entries in each column
for(j in 1:n) {
ps <- sample(1:n,max.parents);
for(i in 1:length(ps)) {
si <- (ps[i]-1)*n + j;
r[si] <- 1;
}
}
#
if(is_acyclic) {
# make it upper triangular
for(i in 2:n) {
for(j in 1:(i-1)) {
r[(i-1)*n + j] <- 0;
}
}
}
if(!is_self) {
# make the diagonal zero
for(i in 1:n) {r[(i-1)*n + i] <- 0;}
}
delay <- r*runif(L,0,1); # simulate less delays
r <- r*(1-2*rbinom(L,1,0.5))*runif(L,0.5,1.5);
# returns r and delay
list(links=matrix(data=r, nrow=n, ncol=n, byrow=TRUE),
delays=matrix(data=delay, nrow=n, ncol=n, byrow=TRUE))
}
permute_grn <- function(links, delays) {
# to randomly permute the genes so that the position do not give any advantage
x <- sample(1:nrow(links));
list(links=links[x,x], delays=delays[x,x])
}
sim_grn <- function(links, delays, dt, N) {
# links is a matrix encoding the links of a gene network (direction and magnitude)
# delays contains the time delay for each link
# dt is the time step to simulate, and to simulate N steps
# Returns a matrix a matrix of N by n, where n is the number of genes in the gene network.
# The network is assumed to start with zero expression for each gene
T <- ceiling(delays/dt); # turn the delays into steps
ng <- nrow(links);
r <- matrix(data=rep(0,ng*N),nrow=N,ncol=ng);
for(i in 1:N) {
for(j in 1:ng) {
x <- rnorm(1,0,0.01);
for(k in 1:ng) {
if(T[k,j] != 0) {
x <- x + (if(T[k,j] < i) r[i-T[k,j],k] else 0)*links[k,j];
}
}
r[i,j] <- x;
}
}
r
}
plot_exp <- function(r) {
# r is a n by g matrix, where n is the number of time points,
# g is the number of genes
# The values are the expression of the genes at the different time points
n <- nrow(r);
g <- ncol(r);
legend.r <- if(is.null(colnames(r))) rep("",g) else colnames(r);
for(i in 1:g) {legend.r[i] <- paste("i=",i,legend.r[i]);}
plot(x=c(1,n),y=range(r), type="n", main="Expressions",xlab="Time",ylab="Expression");
for(i in 1:g) {
lines(x=1:n, y=r[,i], type="b",col=i,pch=i);
}
legend(x="topright",legend=legend.r,pch=1:g,col=1:g);
}
compare_matrices <- function(a,b) {
# a and b are two matrices of the same size
# report the entries for which a and b are different
for(i in 1:nrow(a)) {
for(j in 1:ncol(b)) {
if(a[i,j] != b[i,j]) {cat("i: ",i, "\tj: ",j,"\ta: ",a[i,j], "\tb: ",b[i,j],"\n");}
}
}
}
compare_grn_old <- function(tlinks,tdelays, elinks,edelays) {
# tlinks and tdelays are the true links and delays, respectively
# elinks and edelays are the estimated links and delays
# all four are square matrices of the size n by n, where n is the number of genes
# Returns a list of:
# delays.right: among the positives (either true or predicted), the number of delays correctly estimated
# delays.wrong: among the positives (either true or predicted), the number of delays incorrectly estimated
# delays.sse: sum of squared errors of the delays in the TP
# links: the contingency table of the presence and absence of links (considering also the direction)
# effects: the contingency table of the signs (1 for +, 0 for no links, -1 for -) of the links
# with direction considered,
# where the rows are true signs, and columns are predicted signs,
# delays
TPs <- (sign(tlinks) != 0) | (sign(elinks) != 0);
dr <- sum(TPs & (tdelays == edelays));
dw <- sum(TPs & (tdelays != edelays));
dsse <- (tdelays - edelays)[TPs];
dsse <- sum(dsse*dsse);
# links
links.tab <- table(sign(tlinks)!=0,sign(elinks)!=0, dnn=c("True Links","Predicted Links"));
# effects
effects.tab <- table(sign(tlinks),sign(elinks), dnn=c("True Effect","Predicted Effect"));
#
list(delays.right=dr,delays.wrong=dw, delays.sse=dsse, links=links.tab, effects=effects.tab)
}
compare_grn <- function(tlinks,tdelays, grn) {
# tlinks and tdelays are the true links and delays, respectively,
# both are square matrices of the size n by n, where n is the number of genes
# grn is the result as returned by infer_grn(), and contains the predicted edges
# Returns a list of:
# links.recall: the recall of the links, consider the direction (x -> y or y -> x) of edge,
# but not the effect (+/-), and disregard the delay
# links.precision: similar to links.recall, but for the precision of the links
# effects.recall: consider the direction and sign of the effect. The recall of the effects
# effects.precision: similar to effects.recall, but for precision
# delays.recall: among the delays for true links, how many are correctly predicted (same value)
# delays.precision: among the predicted delays, how many are correct (same value)
ng <- nrow(tlinks);
n.links <- sum(sign(tlinks)!=0); # same as number of effects, and number of delays
n.nlinks <- sum(sign(tlinks)==0); # number of non-links
n.p.links <- nrow(grn); # number of predicted links, same as number of predicted effects
n.r.links <- 0; # number of true links correctly predicted
n.c.links <- 0; # number of correct prediction in the links, multiple prediction for the same link (possibly with different delays) are all counted
n.n.links <- 0;
#
n.r.effects <- 0; # number of true effects correctly predicted
n.c.effects <- 0; # number of correct prediction of the effects, with proper multiple counts
n.r.delays <- 0; # number of true delays correctly predicted
n.c.delays <- 0; # number of correct prediction of the delays, with proper multiple counts
# go through the true links
if(nrow(grn) > 0) {
for(i in 1:ng) {
for(j in 1:ng) {
s <- sign(tlinks[i,j]);
if(s != 0) {
d <- (grn$from==i) & (grn$to==j);
if(sum(d) > 0) {n.r.links <- n.r.links + 1;}
if(sum(d & (grn$delay == tdelays[i,j])) > 0) {n.r.delays <- n.r.delays + 1;}
if(s < 0) {
if(sum(d & (grn$test.value < 0)) > 0) {n.r.effects <- n.r.effects + 1;}
} else {
if(sum(d & (grn$test.value > 0)) > 0) {n.r.effects <- n.r.effects + 1;}
}
} else {
d <- (grn$from==i) & (grn$to==j);
if(sum(d) == 0) {n.n.links <- n.n.links + 1;}
}
}
}
}
# go through the predictions
if(nrow(grn) > 0) {
for(i in 1:nrow(grn)) {
x <- grn$from[i];
y <- grn$to[i];
if(tlinks[x,y] != 0) {n.c.links <- n.c.links + 1;}
if(grn$test.value[i] < 0) {
if(tlinks[x,y] < 0) {n.c.effects <- n.c.effects + 1;}
} else {
if(tlinks[x,y] > 0) {n.c.effects <- n.c.effects + 1;}
}
if(tdelays[x,y] == grn$delay[i]) {n.c.delays <- n.c.delays + 1;}
}
}
# done
list(links.recall=n.r.links/n.links, links.precision=(if(n.p.links<=0) 0 else (n.c.links/n.p.links)),
links.specificity=n.n.links/n.nlinks,
effects.recall=n.r.effects/n.links, effects.precision=(if(n.p.links<=0) 0 else (n.c.effects/n.p.links)),
delays.recall=n.r.delays/n.links, delays.precision=(if(n.p.links<=0) 0 else (n.c.delays/n.p.links)))
}
### test
#tmp <- gen_grn(20,TRUE,FALSE,0.2);
#tmp$delays <- ceiling(tmp$delays*10);
#tmpr <- sim_grn(tmp$links,tmp$delays,0.1,1000);
#plot_exp(tmpr);
#tmpz1 <- infer_grn1(tmpr,0.0001,100);
#tmpz2 <- infer_grn(tmpr,0.001,100);
#tmpz3 <- infer_grn(tmpr,0.01,100);
#tmpz <- infer_grn(tmpr,0.0001,100);
#compare_matrices(ceiling(tmp$delays*10), tmpz1$delays);
#compare_matrices(ceiling(tmp$delays*10), tmpz2$delays);
#compare_matrices(ceiling(tmp$delays*10), tmpz3$delays);
#compare_matrices(ceiling(tmp$delays*10), tmpz$delays);
#r1 <- compare_grn(tmp$links,tmp$delays, tmpz1$links,tmpz1$delays);
#r1
#r2 <- compare_grn(tmp$links,tmp$delays, tmpz2$links,tmpz2$delays);
#r2
#r3 <- compare_grn(tmp$links,tmp$delays, tmpz3$links,tmpz3$delays);
#r3
#r4 <- compare_grn(tmp$links,tmp$delays, tmpz$links,tmpz$delays);
#r4
#tmp
#tmpz
#ceiling(tmp$delays*10)
#tmpz1$delays
#ceiling(tmp$delays*10)
#tmpz2$delays
#ceiling(tmp$delays*10)
#tmpz3$delays
#ceiling(tmp$delays*10)
#tmpz$delays
##
| 8,813 | gpl-2.0 |
dd75f6967819f8cde072ad067cb78c5505d42e9c | longphin/Bayesian---STA250 | HW2/BLB/BLB_lin_reg_process.R |
# Read in and process BLB results:
mini <- FALSE
if (mini){
d <- 40
} else {
d <- 1000
}
# BLB specs:
s <- 5 # 50
r <- 50 # 100
outpath <- "output"
respath <- "final"
if (mini){
rootfilename <- "blb_lin_reg_mini"
} else {
rootfilename <- "blb_lin_reg_data"
}
results.se.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_SE.txt")
results.est.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_est.txt")
outfile <- function(outpath,r_index,s_index){
return(paste0(outpath,"/","coef_",sprintf("%02d",s_index),"_",sprintf("%02d",r_index),".txt"))
}
coefs <- vector("list",s)
blb_est <- blb_se <- matrix(NA,nrow=s,ncol=d)
# Compute BLB SE's:
for (s_index in 1:s){
coefs[[s_index]] <- matrix(NA,nrow=r,ncol=d)
for (r_index in 1:r){
tmp.filename <- outfile(outpath,r_index,s_index)
tryread <- try({tmp <- read.table(tmp.filename,header=TRUE)},silent=TRUE)
if (class(tryread)=="try-error"){
errmsg <- paste0("Failed to read file: ",tmp.filename)
stop(errmsg)
}
if (nrow(tmp) != d){
stop(paste0("Incorrect number of rows in: ",tmp.filename))
}
coefs[[s_index]][r_index,] <- as.numeric(tmp[,1])
}
blb_est[s_index,] <- apply(coefs[[s_index]],2,mean)
# SD for each subsample:
blb_se[s_index,] <- apply(coefs[[s_index]],2,sd)
}
# Average over subsamples:
blb_final_est <- apply(blb_est,2,mean)
blb_final_se <- apply(blb_se,2,mean)
cat("Experimental Final BLB Estimates's (Note: These are biased in general):\n")
print(blb_final_est)
cat("Final BLB SE's:\n")
print(blb_final_se)
cat("Writing to file...\n")
write.table(file=results.se.filename,blb_final_se,row.names=F,quote=F)
#write.table(file=results.est.filename,blb_final_est,row.names=F,quote=F)
cat("done. :)\n")
| 1,727 | mit |
1cd49fccbb37e19521b27591b91df512d913de9e | everdark/rbasic | samplecodes/src.R |
findVAR <- function() exists("VAR")
| 40 | cc0-1.0 |
dd75f6967819f8cde072ad067cb78c5505d42e9c | longphin/Stuff | HW2/BLB/BLB_lin_reg_process.R |
# Read in and process BLB results:
mini <- FALSE
if (mini){
d <- 40
} else {
d <- 1000
}
# BLB specs:
s <- 5 # 50
r <- 50 # 100
outpath <- "output"
respath <- "final"
if (mini){
rootfilename <- "blb_lin_reg_mini"
} else {
rootfilename <- "blb_lin_reg_data"
}
results.se.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_SE.txt")
results.est.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_est.txt")
outfile <- function(outpath,r_index,s_index){
return(paste0(outpath,"/","coef_",sprintf("%02d",s_index),"_",sprintf("%02d",r_index),".txt"))
}
coefs <- vector("list",s)
blb_est <- blb_se <- matrix(NA,nrow=s,ncol=d)
# Compute BLB SE's:
for (s_index in 1:s){
coefs[[s_index]] <- matrix(NA,nrow=r,ncol=d)
for (r_index in 1:r){
tmp.filename <- outfile(outpath,r_index,s_index)
tryread <- try({tmp <- read.table(tmp.filename,header=TRUE)},silent=TRUE)
if (class(tryread)=="try-error"){
errmsg <- paste0("Failed to read file: ",tmp.filename)
stop(errmsg)
}
if (nrow(tmp) != d){
stop(paste0("Incorrect number of rows in: ",tmp.filename))
}
coefs[[s_index]][r_index,] <- as.numeric(tmp[,1])
}
blb_est[s_index,] <- apply(coefs[[s_index]],2,mean)
# SD for each subsample:
blb_se[s_index,] <- apply(coefs[[s_index]],2,sd)
}
# Average over subsamples:
blb_final_est <- apply(blb_est,2,mean)
blb_final_se <- apply(blb_se,2,mean)
cat("Experimental Final BLB Estimates's (Note: These are biased in general):\n")
print(blb_final_est)
cat("Final BLB SE's:\n")
print(blb_final_se)
cat("Writing to file...\n")
write.table(file=results.se.filename,blb_final_se,row.names=F,quote=F)
#write.table(file=results.est.filename,blb_final_est,row.names=F,quote=F)
cat("done. :)\n")
| 1,727 | mit |
dd75f6967819f8cde072ad067cb78c5505d42e9c | STA250/Stuff | HW2/BLB/BLB_lin_reg_process.R |
# Read in and process BLB results:
mini <- FALSE
if (mini){
d <- 40
} else {
d <- 1000
}
# BLB specs:
s <- 5 # 50
r <- 50 # 100
outpath <- "output"
respath <- "final"
if (mini){
rootfilename <- "blb_lin_reg_mini"
} else {
rootfilename <- "blb_lin_reg_data"
}
results.se.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_SE.txt")
results.est.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_est.txt")
outfile <- function(outpath,r_index,s_index){
return(paste0(outpath,"/","coef_",sprintf("%02d",s_index),"_",sprintf("%02d",r_index),".txt"))
}
coefs <- vector("list",s)
blb_est <- blb_se <- matrix(NA,nrow=s,ncol=d)
# Compute BLB SE's:
for (s_index in 1:s){
coefs[[s_index]] <- matrix(NA,nrow=r,ncol=d)
for (r_index in 1:r){
tmp.filename <- outfile(outpath,r_index,s_index)
tryread <- try({tmp <- read.table(tmp.filename,header=TRUE)},silent=TRUE)
if (class(tryread)=="try-error"){
errmsg <- paste0("Failed to read file: ",tmp.filename)
stop(errmsg)
}
if (nrow(tmp) != d){
stop(paste0("Incorrect number of rows in: ",tmp.filename))
}
coefs[[s_index]][r_index,] <- as.numeric(tmp[,1])
}
blb_est[s_index,] <- apply(coefs[[s_index]],2,mean)
# SD for each subsample:
blb_se[s_index,] <- apply(coefs[[s_index]],2,sd)
}
# Average over subsamples:
blb_final_est <- apply(blb_est,2,mean)
blb_final_se <- apply(blb_se,2,mean)
cat("Experimental Final BLB Estimates's (Note: These are biased in general):\n")
print(blb_final_est)
cat("Final BLB SE's:\n")
print(blb_final_se)
cat("Writing to file...\n")
write.table(file=results.se.filename,blb_final_se,row.names=F,quote=F)
#write.table(file=results.est.filename,blb_final_est,row.names=F,quote=F)
cat("done. :)\n")
| 1,727 | mit |
dd75f6967819f8cde072ad067cb78c5505d42e9c | dmtryshmtv/STA250Stuff | HW2/BLB/BLB_lin_reg_process.R |
# Read in and process BLB results:
mini <- FALSE
if (mini){
d <- 40
} else {
d <- 1000
}
# BLB specs:
s <- 5 # 50
r <- 50 # 100
outpath <- "output"
respath <- "final"
if (mini){
rootfilename <- "blb_lin_reg_mini"
} else {
rootfilename <- "blb_lin_reg_data"
}
results.se.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_SE.txt")
results.est.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_est.txt")
outfile <- function(outpath,r_index,s_index){
return(paste0(outpath,"/","coef_",sprintf("%02d",s_index),"_",sprintf("%02d",r_index),".txt"))
}
coefs <- vector("list",s)
blb_est <- blb_se <- matrix(NA,nrow=s,ncol=d)
# Compute BLB SE's:
for (s_index in 1:s){
coefs[[s_index]] <- matrix(NA,nrow=r,ncol=d)
for (r_index in 1:r){
tmp.filename <- outfile(outpath,r_index,s_index)
tryread <- try({tmp <- read.table(tmp.filename,header=TRUE)},silent=TRUE)
if (class(tryread)=="try-error"){
errmsg <- paste0("Failed to read file: ",tmp.filename)
stop(errmsg)
}
if (nrow(tmp) != d){
stop(paste0("Incorrect number of rows in: ",tmp.filename))
}
coefs[[s_index]][r_index,] <- as.numeric(tmp[,1])
}
blb_est[s_index,] <- apply(coefs[[s_index]],2,mean)
# SD for each subsample:
blb_se[s_index,] <- apply(coefs[[s_index]],2,sd)
}
# Average over subsamples:
blb_final_est <- apply(blb_est,2,mean)
blb_final_se <- apply(blb_se,2,mean)
cat("Experimental Final BLB Estimates's (Note: These are biased in general):\n")
print(blb_final_est)
cat("Final BLB SE's:\n")
print(blb_final_se)
cat("Writing to file...\n")
write.table(file=results.se.filename,blb_final_se,row.names=F,quote=F)
#write.table(file=results.est.filename,blb_final_est,row.names=F,quote=F)
cat("done. :)\n")
| 1,727 | mit |
dd75f6967819f8cde072ad067cb78c5505d42e9c | minjay/Stuff | HW2/BLB/BLB_lin_reg_process.R |
# Read in and process BLB results:
mini <- FALSE
if (mini){
d <- 40
} else {
d <- 1000
}
# BLB specs:
s <- 5 # 50
r <- 50 # 100
outpath <- "output"
respath <- "final"
if (mini){
rootfilename <- "blb_lin_reg_mini"
} else {
rootfilename <- "blb_lin_reg_data"
}
results.se.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_SE.txt")
results.est.filename <- paste0(respath,"/",rootfilename,"_s",s,"_r",r,"_est.txt")
outfile <- function(outpath,r_index,s_index){
return(paste0(outpath,"/","coef_",sprintf("%02d",s_index),"_",sprintf("%02d",r_index),".txt"))
}
coefs <- vector("list",s)
blb_est <- blb_se <- matrix(NA,nrow=s,ncol=d)
# Compute BLB SE's:
for (s_index in 1:s){
coefs[[s_index]] <- matrix(NA,nrow=r,ncol=d)
for (r_index in 1:r){
tmp.filename <- outfile(outpath,r_index,s_index)
tryread <- try({tmp <- read.table(tmp.filename,header=TRUE)},silent=TRUE)
if (class(tryread)=="try-error"){
errmsg <- paste0("Failed to read file: ",tmp.filename)
stop(errmsg)
}
if (nrow(tmp) != d){
stop(paste0("Incorrect number of rows in: ",tmp.filename))
}
coefs[[s_index]][r_index,] <- as.numeric(tmp[,1])
}
blb_est[s_index,] <- apply(coefs[[s_index]],2,mean)
# SD for each subsample:
blb_se[s_index,] <- apply(coefs[[s_index]],2,sd)
}
# Average over subsamples:
blb_final_est <- apply(blb_est,2,mean)
blb_final_se <- apply(blb_se,2,mean)
cat("Experimental Final BLB Estimates's (Note: These are biased in general):\n")
print(blb_final_est)
cat("Final BLB SE's:\n")
print(blb_final_se)
cat("Writing to file...\n")
write.table(file=results.se.filename,blb_final_se,row.names=F,quote=F)
#write.table(file=results.est.filename,blb_final_est,row.names=F,quote=F)
cat("done. :)\n")
| 1,727 | mit |
39c0de72b42d7a611d7ffafbe2ee8bfda651ae75 | SaraVarela/Bucanetes | script_bucanetes.R | ##### load libraries
library (raster)
library (rgdal)
library (dismo)
##### download climatic variables for the last glacial maximum (LGM) and for the present from worldclim.org
## ** = write the directory of the variables
setwd ("**")
LGM_CCSM<- stack (raster ("bio1.bil"), raster ("bio2.bil"),raster ("bio3.bil"),
raster ("bio4.bil"),raster ("bio5.bil"),raster ("bio6.bil"),
raster ("bio7.bil"),raster ("bio8.bil"),raster ("bio9.bil"),
raster ("bio10.bil"),raster ("bio11.bil"),raster ("bio12.bil"),
raster ("bio13.bil"),raster ("bio14.bil"),raster ("bio15.bil"),
raster ("bio16.bil"),raster ("bio17.bil"),raster ("bio18.bil"),
raster ("bio19.bil"), overwrite=TRUE)
setwd ("**")
LGM_MIROC<- stack (raster ("bio1.bil"), raster ("bio2.bil"),raster ("bio3.bil"),
raster ("bio4.bil"),raster ("bio5.bil"),raster ("bio6.bil"),
raster ("bio7.bil"),raster ("bio8.bil"),raster ("bio9.bil"),
raster ("bio10.bil"),raster ("bio11.bil"),raster ("bio12.bil"),
raster ("bio13.bil"),raster ("bio14.bil"),raster ("bio15.bil"),
raster ("bio16.bil"),raster ("bio17.bil"),raster ("bio18.bil"),
raster ("bio19.bil"), overwrite=TRUE)
setwd ("**")
WC<- stack (raster ("bio1.bil"), raster ("bio2.bil"),raster ("bio3.bil"),
raster ("bio4.bil"),raster ("bio5.bil"),raster ("bio6.bil"),
raster ("bio7.bil"),raster ("bio8.bil"),raster ("bio9.bil"),
raster ("bio10.bil"),raster ("bio11.bil"),raster ("bio12.bil"),
raster ("bio13.bil"),raster ("bio14.bil"),raster ("bio15.bil"),
raster ("bio16.bil"),raster ("bio17.bil"),raster ("bio18.bil"),
raster ("bio19.bil"), overwrite=TRUE)
##### load the points of the species
bucanetes<- read.table ("bucanetes.csv", sep=",", header=F)
##### run the model
# witholding a 20% sample for testing
fold <- kfold(bucanetes, k=5)
occtest <- bucanetes[fold == 1, ]
occtrain <- bucanetes[fold != 1, ]
# select 1000 background points
bg <- randomPoints(WC, 1000)
# run the model using the training sample
model_bucanetes <- maxent(WC, occtrain)
#evaluate model results
model_evaluation <- evaluate(model_bucanetes, p=occtest, a=bg, x=WC)
model_evaluation
# training AUC
model_bucanetes@results [5]
# plot variable contribution to the model
plot(model_bucanetes)
# response curves
response(model_bucanetes)
# project the model in the present interglacial scenario
present <- predict (model_bucanetes, WC, progress="window")
plot (present, main="Interglacial")
# project the model in the Last Glacial Maximum
CCSM <- predict(model_bucanetes, LGM_CCSM, progress="window")
MIROC <- predict(model_bucanetes, LGM_MIROC, progress="window")
# multiply both predictions to contruct a consensus map
LGM<- CCSM*MIROC
# plot the maps
e <- extent(-20, 80, 10, 60)
map_LGM<- crop (LGM, e)
map_WC<- crop (present, e)
plot (map_LGM, main="Glacial")
plot (map_WC, main="Interglacial")
### contact: Sara Varela, email: svarela@paleobiogeography.org
| 3,276 | unlicense |
b01fb5fc09f468868cf321d25b1084c28e59ec79 | osofr/gridisl | R/ModelPredictionStack.R | #' S3 methods for printing model fit summary for PredictionModel R6 class object
#'
#' Prints the modeling summaries
#' @param x The model fit object produced by functions \code{make_PredictionStack}.
#' @param ... Additional options passed on to \code{print.PredictionModel}.
#' @export
print.PredictionStack <- function(x, ...) {
x$show(...)
return(invisible(NULL))
}
#' Combine models into ensemble
#'
#' Combine several fitted models into a single ensemble model of class 'PredictionStack'.
#' @param ... Different objects of class "PredictionModel" separated by a comma.
#' @export
make_PredictionStack <- function(...) {
PredictionModels <- list(...)
if (!all(unlist(lapply(PredictionModels, is.PredictionModel))) && !all(unlist(lapply(PredictionModels, is.PredictionStack)))) {
stop("All arguments must be of class 'PredictionModel' or 'PredictionStack'")
}
class(PredictionModels) <- c(class(PredictionModels), "PredictionStack")
return(PredictionStack$new(PredictionModels))
}
## *******************************************************************************************
## Needs to be renamed to ReportStack -- because is the only actual purpose of this class
## *******************************************************************************************
## @export
PredictionStack <- R6Class(classname = "PredictionStack",
cloneable = TRUE,
portable = TRUE,
class = TRUE,
# inherit = PredictionModel,
public = list(
PredictionModels = NULL,
runCV = NULL,
useH2Oframe = NULL,
nodes = NULL,
OData_train = NULL, # object of class DataStorageClass used for training
OData_valid = NULL, # object of class DataStorageClass used for scoring models (contains validation data)
SL_method = NULL,
SL_coefs = NULL,
initialize = function(PredictionModels) {
if (!all(unlist(lapply(PredictionModels, is.PredictionModel))) && !all(unlist(lapply(PredictionModels, is.PredictionStack)))) {
stop("All arguments must be of class 'PredictionModel' or 'PredictionStack'")
}
assert_that("PredictionStack" %in% class(PredictionModels))
self$PredictionModels <- PredictionModels
self$nodes <- PredictionModels[[1]]$nodes
return(self)
},
fit = function(overwrite = FALSE, data, predict = FALSE, validation_data = NULL, ...) {
stop("...not implemented...")
return(invisible(self))
},
refit_best_model = function(...) {
## 1. Out of all model objects in self$PredictionModels, first find the object idx that contains the best model
# min_by_predmodel <- lapply(lapply(self$getMSE, unlist), min)
# best_Model_idx <- which.min(unlist(min_by_predmodel))
best_Model_idx <- self$best_Model_idx
## 2. Refit the best model for that PredictionModel object only
model.fit <- self$PredictionModels[[best_Model_idx]]$refit_best_model(...) # data, subset_exprs,
## 3. Clean up all data in PredictionModel, OData pointers
self$wipe.alldat$wipe.allOData
## Remove all modeling obj stored in daughter classes (don't need these if only going to do the best re-trained model predictions)
## self$wipe.allmodels
return(invisible(model.fit))
},
# Predict the response E[Y|newdata];
# , best_refit_only
predict = function(best_only, ...) {
## obtain prediction from the best refitted model only
if (best_only) {
best_Model_idx <- self$best_Model_idx
best_pred_model <- self$PredictionModels[[best_Model_idx]]
# newdata, subset_exprs, predict_model_names = NULL, , convertResToDT,
if (gvars$verbose) { print("obtaining predictions for the best model..."); print(best_pred_model) }
preds <- best_pred_model$predict(..., best_refit_only = TRUE)
return(preds)
## try to obtain predictions from all models, non-refitted (i.e., trained on non-holdout observations only)
} else {
preds <- lapply(self$PredictionModels, function(model_obj) {
# newdata, subset_exprs, predict_model_names = NULL, best_refit_only = FALSE, convertResToDT,
model_obj$predict(..., best_refit_only = FALSE)
})
return(preds)
}
},
# Predict the response E[Y|newdata] for out of sample observations (validation set / holdouts);
predict_out_of_sample = function(best_only, ...) {
## obtain out-of-sample prediction from the best non-refitted model
if (best_only) {
best_Model_idx <- self$best_Model_idx
## NEED TO KNOW WHAT WAS THE NAME OF THE BEST MODEL WITHIN THE SAME GRID / ENSEMBLE:
best_pred_model <- self$PredictionModels[[best_Model_idx]]
predict_model_names <- best_pred_model$get_best_model_names(K = 1)
preds <- best_pred_model$predict_out_of_sample(..., predict_model_names = predict_model_names)
return(preds)
## try to obtain out-of-sample predictions from all models, non-refitted (i.e., trained on non-holdout observations only)
} else {
preds <- lapply(self$PredictionModels, function(model_obj) {
model_obj$predict_out_of_sample(...)
})
return(preds)
}
},
## Predict the response E[Y|newdata] for within sample observations from models trained on NON-HOLDOUT OBS ONLY;
## This should be usefull for split-specific SL.
## For holdout SL this is fairly straightfoward, just call predict with best_refit_only set to FALSE.
## When running internal CV SL this requires manually accessing each CV model and calling predict on each.
predict_within_sample = function(best_only, ...) {
MSE_tab <- self$getMSEtab
if (self$runCV) stop("...not implemented...")
## obtain prediction from the best non-refitted model only
if (best_only) {
best_Model_idx <- self$best_Model_idx
best_pred_model <- self$PredictionModels[[best_Model_idx]]
predict_model_names <- best_pred_model$get_best_model_names(K = 1)
preds <- best_pred_model$predict(..., predict_model_names = predict_model_names, best_refit_only = FALSE)
return(preds)
## try to obtain predictions from all models, non-refitted (i.e., trained on non-holdout observations only)
} else {
preds <- lapply(self$PredictionModels, function(model_obj) {
# newdata, subset_exprs, predict_model_names = NULL, best_refit_only = FALSE, convertResToDT,
model_obj$predict(..., best_refit_only = FALSE)
})
return(preds)
}
},
# Score models (so far only MSE) based on either out of sample CV model preds or validation data preds;
score_models = function(...) {
scored_m <- lapply(self$PredictionModels, function(PredictionModel)
PredictionModel$score_models(...,
OData_train = self$OData_train,
OData_valid = self$OData_valid))
return(invisible(self))
},
# ------------------------------------------------------------------------------
# return top K models based on smallest validation / test MSE for each PredictionModel in a stack
# ------------------------------------------------------------------------------
get_best_MSEs = function(K = 1) {
return(sort(unlist(lapply(self$PredictionModels, function(PredictionModel) PredictionModel$get_best_MSEs(K = K)))))
},
# ------------------------------------------------------------------------------
# return top overall model across *ALL* models in self$PredictionModels
# ------------------------------------------------------------------------------
get_overall_best_model = function() { return(self$PredictionModels[[self$best_Model_idx]]$get_best_models(K = 1)) },
# ------------------------------------------------------------------------------
# return top K model fits from **FOR EVERY MODEL** in list self$PredictionModels
# ------------------------------------------------------------------------------
get_best_models = function(K = 1) {
best_models <- NULL
for (idx in seq_along(self$PredictionModels))
best_models <- c(best_models, self$PredictionModels[[idx]]$get_best_models(K = K))
# best_models <- unlist(lapply(self$PredictionModels, function(PredictionModel) PredictionModel$get_best_models(K = K)))
# best_models <- best_models[names(self$get_best_MSEs(K))]
return(best_models)
},
reassignMSEs = function(sqresid_preds) {
lapply(self$PredictionModels, function(PredictionModel) PredictionModel$reassignMSEs(sqresid_preds))
return(invisible(NULL))
},
# ------------------------------------------------------------------------------
# return the parameters of the top K models **FOR EVERY MODEL** in a list self$PredictionModels
# ------------------------------------------------------------------------------
get_best_model_params = function(K = 1) {
best_model_params <- NULL
for (idx in seq_along(self$PredictionModels))
best_model_params <- c(best_model_params, self$PredictionModels[[idx]]$get_best_model_params(K = K))
# best_models <- unlist(lapply(self$PredictionModels, function(PredictionModel) PredictionModel$get_best_model_params(K = K)))
# best_models <- best_models[names(self$get_best_MSEs(K))]
return(best_model_params)
},
# ------------------------------------------------------------------------------
# return a data.frame with best mean MSEs, including SDs & corresponding model names
# ------------------------------------------------------------------------------
get_best_MSE_table = function(K = 1) {
res_tab_list <- lapply(self$PredictionModels, function(PredictionModel) PredictionModel$get_best_MSE_table(K = K))
# res_tab <- do.call("rbind", res_tab_list)
# res_tab <- res_tab[order(res_tab[["MSE"]], decreasing = FALSE), ]
res_tab <- data.table::rbindlist(res_tab_list)
data.table::setkeyv(res_tab, cols = "MSE")
return(res_tab)
},
get_modelfits_grid = function() {
res_DT_list <- lapply(self$PredictionModels, function(PredictionModel) {
if (is.PredictionStack(PredictionModel)) PredictionModel <- PredictionModel$PredictionModels[[1]]
PredictionModel$get_modelfits_grid()
})
return(res_DT_list)
},
# Output info on the general type of regression being fitted:
show = function(print_format = TRUE, model_stats = FALSE, all_fits = FALSE) {
out_res <- lapply(self$PredictionModels, function(PredictionModel) PredictionModel$show(print_format = TRUE, model_stats = FALSE, all_fits = FALSE))
cat("\n", fill = getOption("width"))
if (!is.null(self$SL_coefs)) {
print(cbind(Risk = self$SL_coefs$cvRisk, Coef = self$SL_coefs$coef))
}
return(invisible(NULL))
},
summary = function(all_fits = FALSE) {
return(lapply(self$PredictionModels, function(PredictionModel) PredictionModel$summary(all_fits = FALSE)))
},
evalMSE = function(test_values) { stop("...not implemented...") },
evalMSE_byID = function(test_values) { stop("...not implemented...") },
getmodel_byname = function(model_names, model_IDs) { stop("...not implemented...") },
define.subset.idx = function(data) { stop("not applicable to this class") }
),
active = list(
## wipe out all data stored by daughter model classes
wipe.alldat = function() {
lapply(self$PredictionModels, function(PredictionModel) PredictionModel$wipe.alldat)
return(self)
},
wipe.allOData = function() {
lapply(self$PredictionModels, function(PredictionModel) PredictionModel$wipe.allOData)
return(self)
},
## wipe out all the model objects stored by daughter model classes
wipe.allmodels = function() {
lapply(self$PredictionModels, function(PredictionModel) PredictionModel$wipe.allmodels)
return(self)
},
getMSE = function() { return(lapply(self$PredictionModels, function(PredictionModel) PredictionModel$getMSE)) },
getMSE_bysubj = function() {
best_Model_idx <- self$best_Model_idx
return(self$PredictionModels[[best_Model_idx]]$getMSE_bysubj)
},
getRMSE = function() { return(lapply(self$PredictionModels, function(PredictionModel) PredictionModel$getRMSE)) },
best_Model_idx = function() {
if (length(self$PredictionModels) == 1L) return(1L)
MSE_tab <- self$getMSEtab
metric_name <- "MSE"
top_model_info <- MSE_tab[which.min(MSE_tab[[metric_name]]), ]
best_Model_idx <- top_model_info[["Model_idx"]]
return(best_Model_idx)
},
getMSEtab = function() {
MSE_tab <- data.table::rbindlist(lapply(self$PredictionModels, '[[', "getMSEtab"))
data.table::setkeyv(MSE_tab, cols = "MSE")
return(MSE_tab)
},
# OData_train = function() { return(self$PredictionModels[[1]]$OData_train) },
# OData_valid = function() { return(self$PredictionModels[[1]]$OData_valid) },
get_out_of_sample_preds = function() {
best_Model_idx <- self$best_Model_idx
return(self$PredictionModels[[best_Model_idx]]$get_out_of_sample_preds)
}
)
)
| 13,309 | mit |
4da46794bad8aa0e8d9840f2fbfc51af38ef29e5 | berdaniera/StreamPULSE | spfns/spFunctions.R | checkpkg = function(pkg){
if(!pkg %in% rownames(installed.packages())) install.packages(pkg)
suppressPackageStartupMessages(library(pkg, character.only=TRUE))
}
checkpkg("zoo")
checkpkg("tibble")
checkpkg("readr")
checkpkg("dplyr")
# Calculate depth with water pressure (kPa), air pressure (kPa), air temp (C), depth_offset (m)
kPa2depth = function(df, depth_offset=NULL, WaterPres_kPa=NULL, AirPres_kPa=NULL, AirTemp_C=NULL){
# If parameters not individually defined, get the parameters from the dataframe (df)
if(is.null(depth_offset)) depth_offset = df$depth_offset
if(is.null(WaterPres_kPa)) WaterPres_kPa = df$WaterPres_kPa
if(is.null(AirPres_kPa)) AirPres_kPa = df$AirPres_kPa
if(is.null(AirTemp_C)) AirTemp_C = df$AirTemp_C
dkpa = WaterPres_kPa - AirPres_kPa # g/(m*s^2)
p = (999.83952 + 16.945176*AirTemp_C -
7.9870401e-03*AirTemp_C^2 - 46.170461e-06*AirTemp_C^3 +
105.56302e-09*AirTemp_C^4 - 280.54253e-12*AirTemp_C^5)/
(1+16.879850e-03*AirTemp_C) # kg/m^3
g = 9.80655 # m/s^2
depth_offset + dkpa*1000/(p*g) # m
}
# FROM STREAM METABOLIZER
calc_light <- function(solar.time, latitude, longitude, max.PAR=2326,
coef.SW.to.PAR=formals(convert_SW_to_PAR)$coef) {
app.solar.time <- solar.time %>%
convert_solartime_to_UTC(longitude=longitude, time.type='mean solar') %>%
convert_UTC_to_solartime(longitude=longitude, time.type='apparent solar')
sw <- calc_solar_insolation(
app.solar.time, latitude=latitude,
max.insolation=convert_PAR_to_SW(max.PAR, coef=1/coef.SW.to.PAR),
format=c("degrees", "radians"))
par <- convert_SW_to_PAR(sw, coef=coef.SW.to.PAR)
par
}
#### GET CLIMATE DATA FROM CHAPEL HILL
NCAirPressure = function(dates, start_datetime=NULL, end_datetime=NULL){
tf = tempfile()
download.file("ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-lite/2016/746939-93785-2016.gz",tf,mode="wb")
x = read.table(tf)
x[x==-9999] = NA
colnames(x) = c("y","m","d","h","air_temp","dewtemp","air_kPa","winddir","sindspeed","skycover","precip1h","precip6h")
x$air_kPa = x$air_kPa/100
x$air_temp = x$air_temp/10
x$DateTime_UTC = parse_datetime(paste0(x$y,"-",sprintf("%02d",x$m),"-",sprintf("%02d",x$d)," ",sprintf("%02d",x$h),":00:00 0"), "%F %T %Z")
x = as_tibble(x) %>% select(DateTime_UTC,air_temp,air_kPa)
ss = tibble(DateTime_UTC=seq(x$DateTime_UTC[1], x$DateTime_UTC[nrow(x)], by=900))
xx = left_join(ss, x)
xx = mutate(xx, air_temp=na.approx(air_temp), air_kPa=na.approx(air_kPa))
if(is.null(start_datetime)){
daterng = range(dates)
}else{
daterng = parse_datetime(c(start_datetime,end_datetime),"%Y-%m-%d %T %Z")
}
xtmp = xx %>% filter(DateTime_UTC>daterng[1] & DateTime_UTC<daterng[2])
select(xtmp, DateTime_UTC, air_kPa, air_temp)
}
| 2,777 | gpl-3.0 |
fd7bdca89ac5633b512700721a0c91a763f34dcd | IQSS/Zelig4 | tests/NO-CRAN-bootstrap.R | library(Zelig)
data(coalition)
z.out <- zelig(duration ~ fract + numst2 + crisis, model = "gamma", data = coalition[1:100, ])
x.low <- setx(z.out, fract=300, numst2 = 0, crisis=200)
x.high <- setx(z.out, fract=300, numst2 = 1, crisis=200)
s.out <- sim(z.out, x = x.low, x1 = x.high, num = 10, bootstrap=TRUE)
| 313 | gpl-2.0 |
89e9e2b61f8e062034630403197c77f8261b3f21 | MazamaScience/PWFSLSmoke | R/addWindBarbs.R | #' @keywords plotting
#' @export
#' @title Add wind barbs to a map
#' @param x vector of longitudes
#' @param y vector of latitudes
#' @param speed vector of wind speeds in knots
#' @param dir wind directions in degrees clockwise from north
#' @param circleSize size of the circle
#' @param circleFill circle fill color
#' @param lineCol line color (currently not supported)
#' @param extraBarbLength add length to barbs
#' @param barbSize size of the barb
#' @param ... additional arguments to be passed to \code{lines}
#' @description Add a multi-sided polygon to a plot.
#' @references https://commons.wikimedia.org/wiki/Wind_speed
#' @examples
#' maps::map('state', "washington")
#' x <- c(-121, -122)
#' y <- c(47.676057, 47)
#' addWindBarbs(x, y, speed = c(45,65), dir = c(45, 67),
#' circleSize = 1.8, circleFill = c('orange', 'blue'))
addWindBarbs <- function(x,
y,
speed,
dir,
circleSize = 1,
circleFill = 'transparent',
lineCol = 1,
extraBarbLength = 0,
barbSize = 1,
...) {
# Make sure all vector lengths match
lengths <- c(length(x),
length(y),
length(speed),
length(dir),
length(circleFill),
length(circleSize),
length(lineCol))
vectorLength <- max(lengths)
# TODO: check to make sure lengths are all multiples
x <- rep_len(x, length.out = vectorLength)
y <- rep_len(y, length.out = vectorLength)
speed <- rep_len(speed, length.out = vectorLength)
dir <- rep_len(dir, length.out = vectorLength)
circleFill <- rep_len(circleFill, length.out = vectorLength)
circleSize <- rep_len(circleSize, length.out = vectorLength)
lineCol <- rep_len(lineCol, length.out = vectorLength)
for (i in 1:vectorLength) {
addWindBarb(x[i], y[i], speed[i], dir[i],
circleSize[i], circleFill[i], lineCol[i],
extraBarbLength, barbSize, ...)
}
}
| 2,148 | gpl-3.0 |
00baa6ae9043b11a0021a9efd8d286a492001b9c | franticspider/q2e | tests/isotest2.R |
require(q2e)
readline("Testing IGQPGAVGPAGIR")
q2e_isodists("IGQPGAVGPAGIR")
readline("Testing GPPGPQGAR")
q2e_isodists("GPPGPQGAR")
readline("Testing ACDEFGHIKLMNPQRSTVWY")
q2e_isodists("ACDEFGHIKLMNPQRSTVWY")
readline("Testing ABCDEFGHIJKLMNOPQRSTUVWXYZ")
q2e_isodists("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
| 314 | lgpl-3.0 |
c7ac0e37f79ea727c182c0bf6e4411a980250649 | kannix68/advent_of_code_2015 | adventcode2015_01a.R | ## R (R-language)
# advent of code 2015. kannix68 (@github).
# Day 1: Not Quite Lisp.
# sorry, please currently set your directory
setwd('~/devel/advent_of_code_2015')
inFileName = 'adventcode2015_in01.txt'
#** our algorithm
algo <- function(s){
ups = gsub('\\)', '', s)
n_all = nchar(s)
n_ups = nchar(ups)
n_downs = n_all - n_ups
pos = n_ups - n_downs
return(pos)
}
#** TESTING
s = '('
res = algo(s);
stopifnot(1 == res)
s = '(())'
res = algo(s);
stopifnot(0 == res)
s = '()()'
res = algo(s);
stopifnot(0 == res)
s = '((('
res = algo(s);
stopifnot(3 == res)
s = '(()(()('
res = algo(s);
stopifnot(3 == res)
s = '))((((('
res = algo(s);
stopifnot(3 == res)
s = '())'
res = algo(s);
stopifnot(-1 == res)
s = '))('
res = algo(s);
stopifnot(-1 == res)
s = ')))'
res = algo(s);
stopifnot(-3 == res)
s = ')())())'
res = algo(s);
stopifnot(-3 == res)
#** "MAIN"
print(getwd())
ins = gsub("[\r\n]", "", readChar(inFileName, file.info(inFileName)$size) )
print('input string was read')
res = algo(ins)
print(paste("result=", res, sep=''))
| 1,053 | mit |
923b2efcf3dc138e1d35a2277079e1bb37bda019 | tangelo-hub/romanescoTools | R/getDocData.R | #' Get Documentation Information for a Function
#'
#' Get documentation information for a function, including package, title, description, examples, and argument names and descriptions.
#'
#' @param functionName name of the function
#'
#' @return a named list of documentation components
#'
#' @examples
#' a <- getDocData("glm")
#' toJSON(a)
#' @export
getDocData <- function(functionName) {
target <- gsub(".*/(.+)/help.+$", "\\1", utils:::index.search(functionName, find.package()))
if(length(target) == 0)
stop("Function ", functionName, " not found - make sure the package that has this function is loaded.", call. = FALSE)
docText <- pkgTopic(target, functionName)
classes <- sapply(docText, function(x) attr(x, "Rd_tag"))
title <- docText[[which(grepl("\\\\title", classes))]]
desc <- docText[[which(grepl("\\\\description", classes))]]
args <- docText[[which(grepl("\\\\arguments", classes))]]
title <- as.character(title[[1]])
desc <- stripJunkAndPaste(desc)
argClasses <- sapply(args, function(x) attr(x, "Rd_tag"))
argItems <- args[which(grepl("\\\\item", argClasses))]
argNames <- sapply(argItems, function(x) {
tmp <- as.character(x[[1]])
if(attr(x[[1]][[1]], "Rd_tag") == "\\dots")
tmp <- "..."
tmp
})
argDescs <- sapply(argItems, function(x) {
tmp <- stripJunkAndPaste(x[[2]])
paste(tmp, collapse = "\n")
})
args <- do.call(rbind, lapply(seq_along(argNames), function(i) {
tmp <- strsplit(argNames[i], ",")[[1]]
tmp <- gsub(" +", "", tmp)
data.frame(name = tmp, desc = argDescs[i], stringsAsFactors = FALSE)
}))
examples <- docText[[which(grepl("\\\\examples", classes))]]
examples$sep = ""
examples <- do.call(paste, examples)
list(
functionName = functionName,
package = target,
title = title,
desc = desc,
args = data.frame(name = argNames, desc = argDescs),
examples = examples
)
}
stripJunkAndPaste <- function(x) {
if(length(x) == 0)
x <- list("")
x$sep <- ""
x <- do.call(paste, x)
x <- gsub("\n", "", x)
x <- gsub(" +", " ", x)
x <- gsub("^ +", "", x)
x
}
# reference:
# http://stackoverflow.com/questions/8379570/get-functions-title-from-documentation
pkgTopic <- function(package, topic, file = NULL) {
# Find "file" name given topic name/alias
if (is.null(file)) {
topics <- pkgTopicsIndex(package)
topic_page <- subset(topics, alias == topic, select = file)$file
if(length(topic_page) < 1)
topic_page <- subset(topics, file == topic, select = file)$file
stopifnot(length(topic_page) >= 1)
file <- topic_page[1]
}
rdb_path <- file.path(system.file("help", package = package), package)
tools:::fetchRdDB(rdb_path, file)
}
pkgTopicsIndex <- function(package) {
help_path <- system.file("help", package = package)
file_path <- file.path(help_path, "AnIndex")
if (length(readLines(file_path, n = 1)) < 1) {
return(NULL)
}
topics <- read.table(file_path, sep = "\t", stringsAsFactors = FALSE, comment.char = "", quote = "", header = FALSE)
names(topics) <- c("alias", "file")
topics[complete.cases(topics), ]
}
| 3,288 | bsd-3-clause |
f6efff0bf8e07d76d2f070ca46d1285e59f0cdb5 | gustavobio/plumber | R/processor-image.R | #' @include processor.R
#' @include plumber.R
PlumberProcessor$new(
"jpeg",
function(req, res, data){
t <- tempfile()
data$file <- t
jpeg(t)
},
function(val, req, res, data){
dev.off()
con <- file(data$file, "rb")
img <- readBin(con, "raw", file.info(data$file)$size)
close(con)
res$body <- img
res$setHeader("Content-type", "image/jpeg")
res
}
)
PlumberProcessor$new(
"png",
function(req, res, data){
t <- tempfile()
data$file <- t
png(t)
},
function(val, req, res, data){
dev.off()
con <- file(data$file, "rb")
img <- readBin(con, "raw", file.info(data$file)$size)
close(con)
res$body <- img
res$setHeader("Content-type", "image/png")
res
}
)
| 748 | mit |
923b2efcf3dc138e1d35a2277079e1bb37bda019 | hafen/cardoonTools | R/getDocData.R | #' Get Documentation Information for a Function
#'
#' Get documentation information for a function, including package, title, description, examples, and argument names and descriptions.
#'
#' @param functionName name of the function
#'
#' @return a named list of documentation components
#'
#' @examples
#' a <- getDocData("glm")
#' toJSON(a)
#' @export
getDocData <- function(functionName) {
target <- gsub(".*/(.+)/help.+$", "\\1", utils:::index.search(functionName, find.package()))
if(length(target) == 0)
stop("Function ", functionName, " not found - make sure the package that has this function is loaded.", call. = FALSE)
docText <- pkgTopic(target, functionName)
classes <- sapply(docText, function(x) attr(x, "Rd_tag"))
title <- docText[[which(grepl("\\\\title", classes))]]
desc <- docText[[which(grepl("\\\\description", classes))]]
args <- docText[[which(grepl("\\\\arguments", classes))]]
title <- as.character(title[[1]])
desc <- stripJunkAndPaste(desc)
argClasses <- sapply(args, function(x) attr(x, "Rd_tag"))
argItems <- args[which(grepl("\\\\item", argClasses))]
argNames <- sapply(argItems, function(x) {
tmp <- as.character(x[[1]])
if(attr(x[[1]][[1]], "Rd_tag") == "\\dots")
tmp <- "..."
tmp
})
argDescs <- sapply(argItems, function(x) {
tmp <- stripJunkAndPaste(x[[2]])
paste(tmp, collapse = "\n")
})
args <- do.call(rbind, lapply(seq_along(argNames), function(i) {
tmp <- strsplit(argNames[i], ",")[[1]]
tmp <- gsub(" +", "", tmp)
data.frame(name = tmp, desc = argDescs[i], stringsAsFactors = FALSE)
}))
examples <- docText[[which(grepl("\\\\examples", classes))]]
examples$sep = ""
examples <- do.call(paste, examples)
list(
functionName = functionName,
package = target,
title = title,
desc = desc,
args = data.frame(name = argNames, desc = argDescs),
examples = examples
)
}
stripJunkAndPaste <- function(x) {
if(length(x) == 0)
x <- list("")
x$sep <- ""
x <- do.call(paste, x)
x <- gsub("\n", "", x)
x <- gsub(" +", " ", x)
x <- gsub("^ +", "", x)
x
}
# reference:
# http://stackoverflow.com/questions/8379570/get-functions-title-from-documentation
pkgTopic <- function(package, topic, file = NULL) {
# Find "file" name given topic name/alias
if (is.null(file)) {
topics <- pkgTopicsIndex(package)
topic_page <- subset(topics, alias == topic, select = file)$file
if(length(topic_page) < 1)
topic_page <- subset(topics, file == topic, select = file)$file
stopifnot(length(topic_page) >= 1)
file <- topic_page[1]
}
rdb_path <- file.path(system.file("help", package = package), package)
tools:::fetchRdDB(rdb_path, file)
}
pkgTopicsIndex <- function(package) {
help_path <- system.file("help", package = package)
file_path <- file.path(help_path, "AnIndex")
if (length(readLines(file_path, n = 1)) < 1) {
return(NULL)
}
topics <- read.table(file_path, sep = "\t", stringsAsFactors = FALSE, comment.char = "", quote = "", header = FALSE)
names(topics) <- c("alias", "file")
topics[complete.cases(topics), ]
}
| 3,288 | bsd-3-clause |
f6efff0bf8e07d76d2f070ca46d1285e59f0cdb5 | paulhendricks/plumber | R/processor-image.R | #' @include processor.R
#' @include plumber.R
PlumberProcessor$new(
"jpeg",
function(req, res, data){
t <- tempfile()
data$file <- t
jpeg(t)
},
function(val, req, res, data){
dev.off()
con <- file(data$file, "rb")
img <- readBin(con, "raw", file.info(data$file)$size)
close(con)
res$body <- img
res$setHeader("Content-type", "image/jpeg")
res
}
)
PlumberProcessor$new(
"png",
function(req, res, data){
t <- tempfile()
data$file <- t
png(t)
},
function(val, req, res, data){
dev.off()
con <- file(data$file, "rb")
img <- readBin(con, "raw", file.info(data$file)$size)
close(con)
res$body <- img
res$setHeader("Content-type", "image/png")
res
}
)
| 748 | mit |
7ce7532b667abf6d9df8412eb1aa6e27f7252a07 | ChristosChristofidis/h2o-3 | h2o-r/tests/Utils/shared_javapredict_GBM.R |
heading("BEGIN TEST")
conn <- new("H2OConnection", ip=myIP, port=myPort)
heading("Uploading train data to H2O")
iris_train.hex <- h2o.importFile(conn, train)
heading("Creating GBM model in H2O")
distribution <- if (exists("distribution")) distribution else "AUTO"
balance_classes <- if (exists("balance_classes")) balance_classes else FALSE
iris.gbm.h2o <- h2o.gbm(x = x, y = y, training_frame = iris_train.hex, distribution = distribution, ntrees = n.trees, max_depth = interaction.depth, min_rows = n.minobsinnode, learn_rate = shrinkage, balance_classes = balance_classes)
print(iris.gbm.h2o)
heading("Downloading Java prediction model code from H2O")
model_key <- iris.gbm.h2o@model_id
tmpdir_name <- sprintf("%s/results/tmp_model_%s", TEST_ROOT_DIR, as.character(Sys.getpid()))
cmd <- sprintf("rm -fr %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("mkdir -p %s", tmpdir_name)
safeSystem(cmd)
h2o.download_pojo(iris.gbm.h2o, tmpdir_name)
heading("Uploading test data to H2O")
iris_test.hex <- h2o.importFile(conn, test)
heading("Predicting in H2O")
iris.gbm.pred <- h2o.predict(iris.gbm.h2o, iris_test.hex)
summary(iris.gbm.pred)
head(iris.gbm.pred)
prediction1 <- as.data.frame(iris.gbm.pred)
cmd <- sprintf( "%s/out_h2o.csv", tmpdir_name)
write.csv(prediction1, cmd, quote=FALSE, row.names=FALSE)
heading("Setting up for Java POJO")
iris_test_with_response <- read.csv(test, header=T)
iris_test_without_response <- iris_test_with_response[,x]
if(is.null(ncol(iris_test_without_response))) {
iris_test_without_response <- data.frame(iris_test_without_response)
colnames(iris_test_without_response) <- x
}
write.csv(iris_test_without_response, file = sprintf("%s/in.csv", tmpdir_name), row.names=F, quote=F)
cmd <- sprintf("curl http://%s:%s/3/h2o-genmodel.jar > %s/h2o-genmodel.jar", myIP, myPort, tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("cp PredictCSV.java %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("javac -cp %s/h2o-genmodel.jar -J-Xmx4g -J-XX:MaxPermSize=256m %s/PredictCSV.java %s/%s.java", tmpdir_name, tmpdir_name, tmpdir_name, model_key)
safeSystem(cmd)
heading("Predicting with Java POJO")
cmd <- sprintf("java -ea -cp %s/h2o-genmodel.jar:%s -Xmx4g -XX:MaxPermSize=256m -XX:ReservedCodeCacheSize=256m PredictCSV --header --model %s --input %s/in.csv --output %s/out_pojo.csv", tmpdir_name, tmpdir_name, model_key, tmpdir_name, tmpdir_name)
safeSystem(cmd)
heading("Comparing predictions between H2O and Java POJO")
prediction2 <- read.csv(sprintf("%s/out_pojo.csv", tmpdir_name), header=T)
if (nrow(prediction1) != nrow(prediction2)) {
warning("Prediction mismatch")
print(paste("Rows from H2O", nrow(prediction1)))
print(paste("Rows from Java POJO", nrow(prediction2)))
stop("Number of rows mismatch")
}
match <- all.equal(prediction1, prediction2, tolerance = 1e-8)
if (! match) {
for (i in 1:nrow(prediction1)) {
rowmatches <- all(prediction1[i,] == prediction2[i,])
if (! rowmatches) {
print("----------------------------------------------------------------------")
print("")
print(paste("Prediction mismatch on data row", i, "of test file", test))
print("")
print( "(Note: That is the 1-based data row number, not the file line number.")
print( " If you have a header row, then the file line number is off by one.)")
print("")
print("----------------------------------------------------------------------")
print("")
print("Data from failing row")
print("")
print(iris_test_without_response[i,])
print("")
print("----------------------------------------------------------------------")
print("")
print("Prediction from H2O")
print("")
print(prediction1[i,])
print("")
print("----------------------------------------------------------------------")
print("")
print("Prediction from Java POJO")
print("")
print(prediction2[i,])
print("")
print("----------------------------------------------------------------------")
print("")
stop("Prediction mismatch")
}
}
stop("Paranoid; should not reach here")
}
heading("Cleaning up tmp files")
cmd <- sprintf("rm -fr %s", tmpdir_name)
safeSystem(cmd)
PASS_BANNER()
| 4,264 | apache-2.0 |
0774423aa79824fd9215129bbf74930bfc11aabc | google/rappor | pipeline/metric_status.R | #!/usr/bin/Rscript
#
# Write an overview of task status, per-metric task status, task histograms.
library(data.table)
library(ggplot2)
options(stringsAsFactors = FALSE) # get rid of annoying behavior
Log <- function(fmt, ...) {
cat(sprintf(fmt, ...))
cat('\n')
}
# max of non-NA values; NA if there are none
MaybeMax <- function(values) {
v <- values[!is.na(values)]
if (length(v) == 0) {
m <- NA
} else {
m <- max(v)
}
as.numeric(m) # data.table requires this; otherwise we get type errors
}
# mean of non-NA values; NA if there are none
MaybeMean <- function(values) {
v <- values[!is.na(values)]
if (length(v) == 0) {
m <- NA
} else {
m <- mean(v)
}
as.numeric(m) # data.table require this; otherwise we get type errors
}
WriteDistOverview <- function(summary, output_dir) {
s <- data.table(summary) # data.table syntax is easier here
by_metric <- s[ , list(
params_file = unique(params_file),
map_file = unique(map_file),
days = length(date),
max_num_reports = MaybeMax(num_reports),
# summarize status
ok = sum(status == 'OK'),
fail = sum(status == 'FAIL'),
timeout = sum(status == 'TIMEOUT'),
skipped = sum(status == 'SKIPPED'),
# TODO: Need to document the meaning of these metrics.
# All could be NA
# KiB -> MB
#max_vm5_peak_mb = MaybeMax(vm5_peak_kib * 1024 / 1e6),
#mean_vm5_mean_mb = MaybeMean(vm5_mean_kib * 1024 / 1e6),
mean_secs = MaybeMean(seconds),
mean_allocated_mass = MaybeMean(allocated_mass)
# unique failure reasons
# This can be used when there are different call stacks.
#fail_reasons = length(unique(fail_reason[fail_reason != ""]))
), by=metric]
# Case insensitive sort by metric name
by_metric <- by_metric[order(tolower(by_metric$metric)), ]
overview_path <- file.path(output_dir, 'overview.csv')
write.csv(by_metric, file = overview_path, row.names = FALSE)
Log("Wrote %s", overview_path)
by_metric
}
WriteDistMetricStatus <- function(summary, output_dir) {
# Write status.csv, num_reports.csv, and mass.csv for each metric.
s <- data.table(summary)
# loop over unique metrics, and write a CSV for each one
for (m in unique(s$metric)) {
# Select cols, and convert units. Don't need params / map / metric.
subframe <- s[s$metric == m,
list(job_id, date, status,
#vm5_peak_mb = vm5_peak_kib * 1024 / 1e6,
#vm5_mean_mb = vm5_mean_kib * 1024 / 1e6,
num_reports,
seconds,
allocated_mass, num_rappor)]
# Sort by descending date. Alphabetical sort works fine for YYYY-MM-DD.
subframe <- subframe[order(subframe$date, decreasing = TRUE), ]
out_path = file.path(output_dir, m, 'status.csv')
write.csv(subframe, file = out_path, row.names = FALSE)
Log("Wrote %s", out_path)
}
# This one is just for plotting with dygraphs. TODO: can dygraphs do
# something smarter? Maybe you need to select the column in JavaScript, and
# pass it an array, rather than CSV text.
for (m in unique(s$metric)) {
f1 <- s[s$metric == m, list(date, num_reports)]
path1 <- file.path(output_dir, m, 'num_reports.csv')
# NOTE: dygraphs (only in Firefox?) doesn't like the quotes around
# "2015-04-03". In general, we can't turn off quotes, because strings with
# double quotes will be invalid CSV files. But in this case, we only have
# date and number columns, so we can. dygraphs is mistaken here.
write.csv(f1, file = path1, row.names = FALSE, quote = FALSE)
Log("Wrote %s", path1)
# Write unallocated mass. TODO: Write the other 2 vars too?
f2 <- s[s$metric == m,
list(date,
unallocated_mass = 1.0 - allocated_mass)]
path2 <- file.path(output_dir, m, 'mass.csv')
write.csv(f2, file = path2, row.names = FALSE, quote = FALSE)
Log("Wrote %s", path2)
}
}
WritePlot <- function(p, outdir, filename, width = 800, height = 600) {
filename <- file.path(outdir, filename)
png(filename, width = width, height = height)
plot(p)
dev.off()
Log('Wrote %s', filename)
}
# Make sure the histogram has some valid input. If we don't do this, ggplot
# blows up with an unintuitive error message.
CheckHistogramInput <- function(v) {
if (all(is.na(v))) {
arg_name <- deparse(substitute(v)) # R idiom to get name
Log('FATAL: All values in %s are NA (no successful runs?)', arg_name)
quit(status = 1)
}
}
WriteDistHistograms <- function(s, output_dir) {
CheckHistogramInput(s$allocated_mass)
p <- qplot(s$allocated_mass, geom = "histogram")
t <- ggtitle("Allocated Mass by Task")
x <- xlab("allocated mass")
y <- ylab("number of tasks")
WritePlot(p + t + x + y, output_dir, 'allocated_mass.png')
CheckHistogramInput(s$num_rappor)
p <- qplot(s$num_rappor, geom = "histogram")
t <- ggtitle("Detected Strings by Task")
x <- xlab("detected strings")
y <- ylab("number of tasks")
WritePlot(p + t + x + y, output_dir, 'num_rappor.png')
CheckHistogramInput(s$num_reports)
p <- qplot(s$num_reports / 1e6, geom = "histogram")
t <- ggtitle("Raw Reports by Task")
x <- xlab("millions of reports")
y <- ylab("number of tasks")
WritePlot(p + t + x + y, output_dir, 'num_reports.png')
CheckHistogramInput(s$seconds)
p <- qplot(s$seconds, geom = "histogram")
t <- ggtitle("Analysis Duration by Task")
x <- xlab("seconds")
y <- ylab("number of tasks")
WritePlot(p + t + x + y, output_dir, 'seconds.png')
# NOTE: Skipping this for 'series' jobs.
if (sum(!is.na(s$vm5_peak_kib)) > 0) {
p <- qplot(s$vm5_peak_kib * 1024 / 1e6, geom = "histogram")
t <- ggtitle("Peak Memory Usage by Task")
x <- xlab("Peak megabytes (1e6 bytes) of memory")
y <- ylab("number of tasks")
WritePlot(p + t + x + y, output_dir, 'memory.png')
}
}
ProcessAllDist <- function(s, output_dir) {
Log('dist: Writing per-metric status.csv')
WriteDistMetricStatus(s, output_dir)
Log('dist: Writing histograms')
WriteDistHistograms(s, output_dir)
Log('dist: Writing aggregated overview.csv')
WriteDistOverview(s, output_dir)
}
# Write the single CSV file loaded by assoc-overview.html.
WriteAssocOverview <- function(summary, output_dir) {
s <- data.table(summary) # data.table syntax is easier here
by_metric <- s[ , list(
#params_file = unique(params_file),
#map_file = unique(map_file),
days = length(date),
max_num_reports = MaybeMax(num_reports),
# summarize status
ok = sum(status == 'OK'),
fail = sum(status == 'FAIL'),
timeout = sum(status == 'TIMEOUT'),
skipped = sum(status == 'SKIPPED'),
mean_total_secs = MaybeMean(total_elapsed_seconds),
mean_em_secs = MaybeMean(em_elapsed_seconds)
), by=list(metric)]
# Case insensitive sort by metric name
by_metric <- by_metric[order(tolower(by_metric$metric)), ]
overview_path <- file.path(output_dir, 'assoc-overview.csv')
write.csv(by_metric, file = overview_path, row.names = FALSE)
Log("Wrote %s", overview_path)
by_metric
}
# Write the CSV files loaded by assoc-metric.html -- that is, one
# metric-status.csv for each metric name.
WriteAssocMetricStatus <- function(summary, output_dir) {
s <- data.table(summary)
csv_list <- unique(s[, list(metric)])
for (i in 1:nrow(csv_list)) {
u <- csv_list[i, ]
# Select cols, and convert units. Don't need params / map / metric.
by_pair <- s[s$metric == u$metric,
list(days = length(date),
max_num_reports = MaybeMax(num_reports),
# summarize status
ok = sum(status == 'OK'),
fail = sum(status == 'FAIL'),
timeout = sum(status == 'TIMEOUT'),
skipped = sum(status == 'SKIPPED'),
mean_total_secs = MaybeMean(total_elapsed_seconds),
mean_em_secs = MaybeMean(em_elapsed_seconds)
),
by=list(var1, var2)]
# Case insensitive sort by var1 name
by_pair <- by_pair[order(tolower(by_pair$var1)), ]
csv_path <- file.path(output_dir, u$metric, 'metric-status.csv')
write.csv(by_pair, file = csv_path, row.names = FALSE)
Log("Wrote %s", csv_path)
}
}
# This naming convention is in task_spec.py AssocTaskSpec.
FormatAssocRelPath <- function(metric, var1, var2) {
v2 <- gsub('..', '_', var2, fixed = TRUE)
var_dir <- sprintf('%s_X_%s', var1, v2)
file.path(metric, var_dir)
}
# Write the CSV files loaded by assoc-pair.html -- that is, one pair-status.csv
# for each (metric, var1, var2) pair.
WriteAssocPairStatus <- function(summary, output_dir) {
s <- data.table(summary)
csv_list <- unique(s[, list(metric, var1, var2)])
Log('CSV list:')
print(csv_list)
# loop over unique metrics, and write a CSV for each one
for (i in 1:nrow(csv_list)) {
u <- csv_list[i, ]
# Select cols, and convert units. Don't need params / map / metric.
subframe <- s[s$metric == u$metric & s$var1 == u$var1 & s$var2 == u$var2,
list(job_id, date, status,
num_reports, d1, d2,
total_elapsed_seconds,
em_elapsed_seconds)]
# Sort by descending date. Alphabetical sort works fine for YYYY-MM-DD.
subframe <- subframe[order(subframe$date, decreasing = TRUE), ]
pair_rel_path <- FormatAssocRelPath(u$metric, u$var1, u$var2)
csv_path <- file.path(output_dir, pair_rel_path, 'pair-status.csv')
write.csv(subframe, file = csv_path, row.names = FALSE)
Log("Wrote %s", csv_path)
# Write a file with the raw variable names. Parsed by ui.sh, to pass to
# csv_to_html.py.
meta_path <- file.path(output_dir, pair_rel_path, 'pair-metadata.txt')
# NOTE: The conversion from data.table to character vector requires
# stringsAsFactors to work correctly!
lines <- as.character(u)
writeLines(lines, con = meta_path)
Log("Wrote %s", meta_path)
}
}
ProcessAllAssoc <- function(s, output_dir) {
Log('assoc: Writing pair-status.csv for each variable pair in each metric')
WriteAssocPairStatus(s, output_dir)
Log('assoc: Writing metric-status.csv for each metric')
WriteAssocMetricStatus(s, output_dir)
Log('assoc: Writing aggregated overview.csv')
WriteAssocOverview(s, output_dir)
}
main <- function(argv) {
# increase ggplot font size globally
theme_set(theme_grey(base_size = 16))
action = argv[[1]]
input = argv[[2]]
output_dir = argv[[3]]
if (action == 'dist') {
summary = read.csv(input)
ProcessAllDist(summary, output_dir)
} else if (action == 'assoc') {
summary = read.csv(input)
ProcessAllAssoc(summary, output_dir)
} else {
stop(sprintf('Invalid action %s', action))
}
Log('Done')
}
if (length(sys.frames()) == 0) {
main(commandArgs(TRUE))
}
| 11,041 | apache-2.0 |
e039d22db6eb2c4e07fa62c1c16c027c995ae275 | andrewdefries/andrewdefries.github.io | FDA_Pesticide_Glossary/OPUS.R | library("knitr")
library("rgl")
#knit("OPUS.Rmd")
#markdownToHTML('OPUS.md', 'OPUS.html', options=c("use_xhml"))
#system("pandoc -s OPUS.html -o OPUS.pdf")
knit2html('OPUS.Rmd')
| 180 | mit |
40b8b6603e58170e7eca658faf4fd9b9b15d5c2c | b0rxa/scmamp | R/data_manipulation.R | #' @title Expression based row filtering
#'
#' @description This is a simple function to filter data based on an expression defined using the colum names
#' @param data A NAMED matrix or data frame to be filtered (column names are required).
#' @param condition A string indicating the condition that the row have to fulfill to be retained. The column names are used as variables in the condition (see examples bellow).
#' @param remove.cols Either a vector of column names or a vector of column indices to be removed from the result
#' @return The original data where the rows for which the condition is \code{FALSE} and the columns in the vector \code{remove.cols} have been removed
#' @seealso \code{\link{summarizeData}}, \code{\link{writeTabular}} and the vignette \code{vignette(topic="Data_loading_and_manipulation",
#' package="scmamp")}
#' @examples
#' data(data_gh_2008)
#' names(data.gh.2008)
#' filterData(data.gh.2008, condition="CN2 > 0.7 & Kernel < 0.7", remove.cols=1:2)
#'
filterData <- function (data, condition="TRUE", remove.cols=NULL) {
checkRow <- function (row) {
# Extract columns as variables
for (i in seq(along.with=row)) {
assign(names(row)[i], row[i])
}
# Evaluate the condition
cond <- eval(parse(text=condition))
return(cond)
}
# Generate the subset of rows
sub <- apply(data, MARGIN=1, FUN=checkRow)
## Generate the colums to select
if (is.character(remove.cols)) {
id.retain <- which(!(colnames(data) %in% remove.cols))
} else {
id.retain <- which(!(1:ncol(data) %in% remove.cols))
}
# In case there are indices out of range, remove them
id.retain <- subset(id.retain,
subset=id.retain > 0 & id.retain <= ncol(data))
# Get the subset
sbst <- subset(data, subset=sub, select=id.retain)
return(sbst)
}
#' @title Summarization of data
#'
#' @description This is a simple function to apply a summarization function to a matrix or data frame.
#' @param data A matrix or data frame to be summarized.
#' @param fun Function to be used in the summarization. It can be any function that, taking as first argument a numeric vector, otuputs a numeric value. Typical examples are \code{\link{mean}}, \code{\link{median}}, \code{\link{min}}, \code{\link{max}} or \code{\link{sd}}.
#' @param group.by A vector of either column names or column indices according to which the data will be grouped to be summarized.
#' @param ignore A vector of either column names or column indices of the columns that have to be removed from the output.
#' @param ... Additional parameters to the summarization function (\code{fun}). For example, \code{na.rm=TRUE} to indicate that the missing values should be ignored.
#' @return A data frame where, for each combination of the values in the columns indicated by \code{group.by}, each column (except those in \code{ignore}) contains the summarization of the values in the original matrix that have that combination of values.
#' #' @seealso \code{\link{filterData}}, \code{\link{writeTabular}} and the vignette \code{vignette(topic="Data_loading_and_manipulation",
#' package="scmamp")}
#' @examples
#' data(data_blum_2015)
#' # Group by size and radius. Get the mean and variance of only the last two
#' # columns.
#' summarizeData (data.blum.2015, group.by=c("Radius","Size"), ignore=3:8,
#' fun=mean, na.rm=TRUE)
#' summarizeData (data.blum.2015, group.by=c("Radius","Size"), ignore=3:8,
#' fun=sd, na.rm=TRUE)
#'
summarizeData <- function (data, fun=mean, group.by=NULL, ignore=NULL, ... ) {
if (!is.data.frame(data)) {
data <- data.frame(data)
}
# Convert character definitions to colum id
if (is.character(group.by)) {
group.by <- which(colnames(data) %in% group.by)
}
if (is.character(ignore)) {
ignore <- which(colnames(data) %in% ignore)
}
## Only numeric columns can be summarized
non.numeric <- which(!unlist(lapply(data, is.numeric)))
if (!all(non.numeric %in% c(group.by, ignore))) {
warning ("Only numeric columns can be summarized. Character and factor ",
"columns should be either in the 'group.by' or the 'ignore' list. ",
"Non numeric columns will be ignored")
ignore <- unique(c(ignore, non.numeric[!(non.numeric %in% group.by)]))
}
# Remove any index out of bounds
group.by <- subset(group.by, subset=group.by > 0 & group.by <= ncol(data))
ignore <- subset(ignore, subset=ignore > 0 & ignore <= ncol(data))
if (length(intersect(group.by,ignore)) > 0) {
stop("The same column cannot be simultaneously in the 'group.by' and the ",
"'ignore' list")
}
if (is.null(group.by)) {
if (!is.null(ignore)) {
data <- data[, -ignore]
}
summ <- apply(data, MARGIN=2,
FUN=function(x) {
fun(x, ...)
})
}else{
groups <- unique(data[, group.by])
if(length(group.by)) groups <- data.frame(groups)
to.summarize <- (1:ncol(data))[-c(ignore, group.by)]
summGroup <- function (i) {
sub <- rep(TRUE, nrow(data))
for (j in seq(along.with=group.by)) {
sub <- sub & data[, group.by[j]] == groups[i,j]
}
m <- subset(data, subset=sub)
m <- m[, to.summarize]
if (length(to.summarize) == 1) {
m <- matrix(m, ncol=1)
}
apply(m, MARGIN=2,
FUN=function(x) {
fun(x , ...)
})
}
aux <- lapply(1:nrow(groups), FUN=summGroup)
summ <- cbind(groups, do.call(rbind, aux))
}
return(summ)
}
#' @title Creation of boolean matrices for highlighting results
#'
#' @description A simple function to create boolean matrices to be used when constructing LaTeX tables.
#' @param data It can be a data frame, a matrix or a vector.
#' @param find A string indicating what has to be detected. Possible values are:
#' \itemize{
#' \item{\code{'eq'}}{ All values equal to the value passed in \code{th}}
#' \item{\code{'le'}}{ All values lower or equal to the value passed in \code{th}}
#' \item{\code{'ge'}}{ All values greater or equal to the value passed in \code{th}}
#' \item{\code{'lw'}}{ All values lower than the value passed in \code{th}}
#' \item{\code{'gt'}}{ All values greater than the value passed in \code{th}}
#' \item{\code{'min'}}{ Minimum value in each row / column / matrix}
#' \item{\code{'max'}}{ Maximum value in each row / column / matrix}
#' }
#' @param th Thershold used when \code{find} is set to \code{'eq'}, \code{'ge'}, \code{'le'}, \code{'gt'} or \code{'lw'}.
#' @param by A string or string vector indicating where the min/max values have to be find. It can be \code{'row'}, \code{'col'} or \code{'mat'} for the row, column and matrix min/max respectively.
#' @return A boolean matrix that matches in dimension the output data and where the identified elements are marked as TRUE.
#' @examples
#' data('data_gh_2008')
#' booleanMatrix(data.gh.2008, find='min', by='row')
#' booleanMatrix(data.gh.2008, find='ge', th=0.5)
#'
booleanMatrix <- function (data, find='max', th=0, by='row') {
# Check whether all the values are numeric or not
if (is.data.frame(data)) {
numeric.data <- all(apply(data, FUN="is.numeric", MARGIN=c(1,2)))
} else if (is.matrix(data) | is.vector(data)) {
numeric.data <- is.numeric(data)
} else {
stop("The 'data' argument has to be either a data frame, a matrix or a vector")
}
if (!numeric.data && find!='eq') {
stop("For non-numeric matrices the only posible comparison is find='eq'")
}
if (by=='col') {
margin <- 2
} else if (by == 'row') {
margin <- 1
} else if (by != 'mat') {
stop("The 'by' argument can only take values 'col', 'row' and 'mat'")
}
matrix <- switch(find,
'eq'={
data == th
},
'ge'={
data >= th
},
'le'={
data <= th
},
'gt'={
data > th
},
'lw'={
data < th
},
'min'={
if (is.vector(data)) {
res <- data == min(data)
} else {
if(by == 'mat') {
res <- data == min(data)
} else {
res <- apply(data, MARGIN=margin,
FUN=function(x) {
return (x==min(x))
})
if (margin == 1) {
res <- t(res)
}
}
}
res
},
'max'={
if (is.vector(data)) {
res <- data == max(data)
} else {
if(length(margin) > 1) {
res <- data == max(data)
} else {
res <- apply(data, MARGIN=margin,
FUN=function(x) {
return (x==max(x))
})
if (margin == 1) {
res <- t(res)
}
}
}
res
})
return(matrix)
} | 9,624 | gpl-2.0 |
a674fc588a01d17087562dbbad63d0b84985a3e8 | seacode/rsimGmacs | R/midpoints.R | #'
#'@title Calculate the midpoints of a vector.
#'
#'@description Function to calculate the midpoints of a vector.
#'
#'@param x - the vector to calculate the midpoints for
#'
#'@return the vector of midpoints
#'
#'@export
#'
midpoints<-function(x){
n<-length(x)-1;
d<-0.5*(x[1+(1:n)]+x[1:n]);
names(d)<-names(d);
return(d);
} | 343 | mit |
a674fc588a01d17087562dbbad63d0b84985a3e8 | wStockhausen/rsimTCSAM | R/midpoints.R | #'
#'@title Calculate the midpoints of a vector.
#'
#'@description Function to calculate the midpoints of a vector.
#'
#'@param x - the vector to calculate the midpoints for
#'
#'@return the vector of midpoints
#'
#'@export
#'
midpoints<-function(x){
n<-length(x)-1;
d<-0.5*(x[1+(1:n)]+x[1:n]);
names(d)<-names(d);
return(d);
} | 343 | mit |
79e7d6656dae041bb5c4f9af4f1f2af9f8c7f950 | molgenis/NIPTeR | R/regression_result.R | regression_template <- function(result_set, chromo_focus, correction_status, samplenames,
potential_predictors, models, sample_names_train_set = NULL,
train_set_statistics = NULL, train_set_Zscores = NULL, type){
if (is.null(train_set_statistics)){
new_regression_template <- list(prediction_statistics = data.frame(result_set[[1]]), control_group_Zscores = result_set[[2]],
focus_chromosome = chromo_focus, correction_status = correction_status,
control_group_sample_names = samplenames, models = models,
potential_predictors = potential_predictors,
all_control_group_Z_scores = result_set$All_control_group_Z_scores,
additional_statistics = result_set$Additional_statistics)
}
else{
new_regression_template <- list(prediction_statistics = data.frame(result_set[[1]]), control_group_Zscores = result_set[[2]],
focus_chromosome = chromo_focus, correction_status = correction_status,
control_group_sample_names = samplenames, models = models,
potential_predictors = potential_predictors,
sample_names_train_set = sample_names_train_set,
train_set_statistics = train_set_statistics,
train_set_Zscores = train_set_Zscores)
}
class(new_regression_template) <- c(Regressions_result_class, type)
return(new_regression_template)
}
collapse_result <- function(result, value){
return(result[[value]])
}
collapse_prediction_sets <- function(result){
gsub(pattern = ",", replacement = " ", x = toString(result$predictors))
}
collapse_prac_cv_control_scores <- function(control_group_scores, additional_control_group_scores, n_models, cv_types, setnames){
endmatrix <- NULL
cols <- NULL
for (i in 1:n_models){
if (cv_types[i] == theoretical){
endmatrix <- cbind(endmatrix, control_group_scores[,i], additional_control_group_scores[,i])
}
else{
endmatrix <- cbind(endmatrix, additional_control_group_scores[,i], control_group_scores[,i])
}
}
colnames(endmatrix) <- as.vector(rbind(paste(setnames, theoretical, sep="_"), paste(setnames, practical, sep="_")))
return(endmatrix)
}
collapse_results <- function(result_set, n_models, n_predictors){
setnames <- paste("Prediction_set", 1:n_models, sep="_")
control_group_Z_scores <- Reduce(cbind, lapply(result_set, collapse_result, value = "control_group_Z_scores"))
additional_control_group_Z_scores <- Reduce(cbind, lapply(result_set, collapse_result, value = "additional_control_group_Z_scores"))
cv_types <- sapply(result_set, collapse_result, value = "cv_type")
all_control_group_Z_scores <- collapse_prac_cv_control_scores(control_group_scores = control_group_Z_scores,
additional_control_group_scores = additional_control_group_Z_scores,
n_models = n_models, cv_types = cv_types, setnames = setnames)
prediction_statistics <- rbind("Z_score_sample" = as.numeric(sapply(result_set, collapse_result, value = "sample_Z_score")),
"CV" = as.numeric(sapply(result_set, collapse_result, value = "cv")),
cv_types,
"P_value_shapiro" = as.numeric(sapply(result_set, collapse_result, value = "shapiro_P_value")),
"Predictor_chromosomes" = sapply(result_set, collapse_prediction_sets),
"Mean_test_set" = sapply(result_set, collapse_result, value = "mean_test_set"),
"CV_train_set" = sapply(result_set, collapse_result, value = "cv_train_set"))
colnames(control_group_Z_scores) <- setnames
colnames(prediction_statistics) <- setnames
additional_statistics <- sapply(result_set, collapse_all_stats)
dimnames(additional_statistics) <- list(c(rownames_additional_stats(theoretical), rownames_additional_stats(practical)),
setnames)
nipt_result <- list("PredictionStatistics" = prediction_statistics, "ControlZScores" = control_group_Z_scores,
"All_control_group_Z_scores" = all_control_group_Z_scores,
"Additional_statistics" = additional_statistics)
return(nipt_result)
}
listmodels <- function(prediction_set){
return(prediction_set$summary_model)
}
collapse_all_stats <- function(result_set){
result <- NULL
CV_type = collapse_result(result = result_set, value = "cv_type")
result_stats <- c(collapse_result(result = result_set, value = "sample_Z_score"),
collapse_result(result = result_set, value = "cv"),
collapse_result(result = result_set, value = "shapiro_P_value"))
additional_result_stats <- c(collapse_result(result = result_set, value = "additional_sample_Z_score"),
collapse_result(result = result_set, value = "additional_cv"),
collapse_result(result = result_set, value = "additional_shapiro"))
if(CV_type == theoretical){
result <- c(result_stats, additional_result_stats)
}
else{
result <- c(additional_result_stats, result_stats)
}
return(result)
}
rownames_additional_stats <- function(type){
c(paste(type, zscore, sep="_"), type, paste(type, shapiro, sep="_"))
} | 5,725 | lgpl-3.0 |
79e7d6656dae041bb5c4f9af4f1f2af9f8c7f950 | ljohansson/NIPTeR | R/regression_result.R | regression_template <- function(result_set, chromo_focus, correction_status, samplenames,
potential_predictors, models, sample_names_train_set = NULL,
train_set_statistics = NULL, train_set_Zscores = NULL, type){
if (is.null(train_set_statistics)){
new_regression_template <- list(prediction_statistics = data.frame(result_set[[1]]), control_group_Zscores = result_set[[2]],
focus_chromosome = chromo_focus, correction_status = correction_status,
control_group_sample_names = samplenames, models = models,
potential_predictors = potential_predictors,
all_control_group_Z_scores = result_set$All_control_group_Z_scores,
additional_statistics = result_set$Additional_statistics)
}
else{
new_regression_template <- list(prediction_statistics = data.frame(result_set[[1]]), control_group_Zscores = result_set[[2]],
focus_chromosome = chromo_focus, correction_status = correction_status,
control_group_sample_names = samplenames, models = models,
potential_predictors = potential_predictors,
sample_names_train_set = sample_names_train_set,
train_set_statistics = train_set_statistics,
train_set_Zscores = train_set_Zscores)
}
class(new_regression_template) <- c(Regressions_result_class, type)
return(new_regression_template)
}
collapse_result <- function(result, value){
return(result[[value]])
}
collapse_prediction_sets <- function(result){
gsub(pattern = ",", replacement = " ", x = toString(result$predictors))
}
collapse_prac_cv_control_scores <- function(control_group_scores, additional_control_group_scores, n_models, cv_types, setnames){
endmatrix <- NULL
cols <- NULL
for (i in 1:n_models){
if (cv_types[i] == theoretical){
endmatrix <- cbind(endmatrix, control_group_scores[,i], additional_control_group_scores[,i])
}
else{
endmatrix <- cbind(endmatrix, additional_control_group_scores[,i], control_group_scores[,i])
}
}
colnames(endmatrix) <- as.vector(rbind(paste(setnames, theoretical, sep="_"), paste(setnames, practical, sep="_")))
return(endmatrix)
}
collapse_results <- function(result_set, n_models, n_predictors){
setnames <- paste("Prediction_set", 1:n_models, sep="_")
control_group_Z_scores <- Reduce(cbind, lapply(result_set, collapse_result, value = "control_group_Z_scores"))
additional_control_group_Z_scores <- Reduce(cbind, lapply(result_set, collapse_result, value = "additional_control_group_Z_scores"))
cv_types <- sapply(result_set, collapse_result, value = "cv_type")
all_control_group_Z_scores <- collapse_prac_cv_control_scores(control_group_scores = control_group_Z_scores,
additional_control_group_scores = additional_control_group_Z_scores,
n_models = n_models, cv_types = cv_types, setnames = setnames)
prediction_statistics <- rbind("Z_score_sample" = as.numeric(sapply(result_set, collapse_result, value = "sample_Z_score")),
"CV" = as.numeric(sapply(result_set, collapse_result, value = "cv")),
cv_types,
"P_value_shapiro" = as.numeric(sapply(result_set, collapse_result, value = "shapiro_P_value")),
"Predictor_chromosomes" = sapply(result_set, collapse_prediction_sets),
"Mean_test_set" = sapply(result_set, collapse_result, value = "mean_test_set"),
"CV_train_set" = sapply(result_set, collapse_result, value = "cv_train_set"))
colnames(control_group_Z_scores) <- setnames
colnames(prediction_statistics) <- setnames
additional_statistics <- sapply(result_set, collapse_all_stats)
dimnames(additional_statistics) <- list(c(rownames_additional_stats(theoretical), rownames_additional_stats(practical)),
setnames)
nipt_result <- list("PredictionStatistics" = prediction_statistics, "ControlZScores" = control_group_Z_scores,
"All_control_group_Z_scores" = all_control_group_Z_scores,
"Additional_statistics" = additional_statistics)
return(nipt_result)
}
listmodels <- function(prediction_set){
return(prediction_set$summary_model)
}
collapse_all_stats <- function(result_set){
result <- NULL
CV_type = collapse_result(result = result_set, value = "cv_type")
result_stats <- c(collapse_result(result = result_set, value = "sample_Z_score"),
collapse_result(result = result_set, value = "cv"),
collapse_result(result = result_set, value = "shapiro_P_value"))
additional_result_stats <- c(collapse_result(result = result_set, value = "additional_sample_Z_score"),
collapse_result(result = result_set, value = "additional_cv"),
collapse_result(result = result_set, value = "additional_shapiro"))
if(CV_type == theoretical){
result <- c(result_stats, additional_result_stats)
}
else{
result <- c(additional_result_stats, result_stats)
}
return(result)
}
rownames_additional_stats <- function(type){
c(paste(type, zscore, sep="_"), type, paste(type, shapiro, sep="_"))
} | 5,725 | lgpl-3.0 |
9fed949cb47107181f6dc549b48bf5a89525033d | alsotoes/compstat2016 | tarea3IntegracionMonteCarlo/realMonteCarlo_example.R | fun <- function(x){
aux <- num*sqrt(10*x-x^2-24);
aux[is.nan(aux)] <- 0;
return(aux)
}
fun2 <- function(x){
aux <- sqrt(4-x^2)
aux[is.nan(aux)] <- 0
return (aux)
}
from <- -2
to <- 2
n <- 1000
x <- runif(n, from, to)
to1 <- to-from
(monteCarlo <- mean(fun(x)))
(monteCarlo <- to1*mean(fun2(x)))
| 331 | gpl-3.0 |
8167ed1e8e2be63ed2a65a72fa55930c9ffe49fc | pdcarr/KIC8462852 | astro_funcs.R | ###########################################################
AirMass <- function(JD,locObs,starLoc) {
# JD is the vector of Julian dates
# locObs is the decimal location = c(lat,long) of the observatory
# starLoc is a vector of declination degrees, minutes, seconds and right ascension in h,m,s of the star
# calculates Airmass from the time, the observer's location, and the declination of the star
# uses astroFns package
library(astroFns)
# modified Julian Date
mJD <- JD - 2400000.5
# calculate UT times
utStrings <- dmjd2ut(mJD,tz="UTC")
#break out the elements of the time as a POSIXlt class object
ltTimes <- as.POSIXlt(utStrings)
# calculate hour angles at the observatory
myHAs <- ut2ha(yr=ltTimes$year,mo=ltTimes$mon + 1,dy=ltTimes$mday,hr=ltTimes$hour,mi=ltTimes$min,se=ltTimes$sec,ra.sou =starLoc[2],lon.obs=rad2hms(locObs[2]*pi/180))
# print(myHAs)
#calculate elevation angles from hour angles and observatory latitude
myEls = elev(dec.sou=starLoc[1],ha=myHAs,lat.obs=rad2dms(locObs[1]*pi/180))
# print(myEls)
return(1/abs(sin(myEls*pi/180)))
}
| 1,076 | mit |
27e1fe154ceff3eddd0c8c6c6d4aeb4a57239019 | psobczyk/pesel_simulations | MiceAnalysis.R |
library(FactoMineR)
library(pesel)
mouse = read.table("http://factominer.free.fr/docs/souris.csv", header = T, sep = ";",
row.names = 1)
expressions = mouse[, 24: ncol(mouse)]
dim(expressions)
# 40 120
## Pesel analysis
res <- pesel(expressions, npc.min = 0, npc.max = min(ncol(expressions) -2, nrow(expressions)-2), scale = T)
res # 5
plot(res)
plot(res, posterior = FALSE)
## Pesel with exponential prior
pc_prior <- dgeom(0:min(ncol(expressions) -2, nrow(expressions)-2), 0.5)
res <- pesel(expressions, npc.min = 0, npc.max = min(ncol(expressions) -2, nrow(expressions)-2),
prior = pc_prior, scale = T)
res
plot(res)
plot(res, posterior = FALSE)
## GCV
res.gcv <- estim_ncp(expressions)
plot(res.gcv$crit)
res.gcv$ncp # 12
## PCA plots for mice data
res.pca <- PCA(cbind.data.frame(mouse[,1:2],expressions), quali.sup= 1:2, graph = F)
plot.PCA(res.pca, habillage = 2, invisible = "quali")
plotellipses(res.pca, keepvar = "Regime", axes= c(3,4))
plot.PCA(res.pca, choix = "var", axes= c(1,2), select = "contrib 20", cex = 0.7)
plot.PCA(res.pca, choix = "var", axes= c(3,4), select = "contrib 20", cex = 0.7)
| 1,157 | gpl-3.0 |
4da92e531459fe220e48102aacc78d70b1a6c05b | RGLab/preprocessData | R/skeleton.R | #' @importFrom assertthat assert_that
#' @importFrom purrr map
#' @importFrom usethis create_package
.codefile_validate <- function(code_files) {
# do they exist?
assertthat::assert_that(all(unlist(purrr::map(
code_files, file.exists
))), msg = "code_files do not all exist!")
# are the .Rmd files?
assertthat::assert_that(all(grepl(".*\\.r$", tolower(code_files)) |
grepl(".*\\.rmd$", tolower(code_files))),
msg = "code files are not Rmd or R files!"
)
}
#' Create a Data Package skeleton for use with DataPackageR.
#'
#' Creates a package skeleton directory structure for use with DataPackageR.
#' Adds the DataVersion string to DESCRIPTION, creates the DATADIGEST file, and the data-raw directory.
#' Updates the Read-and-delete-me file to reflect the additional necessary steps.
#' @name datapackage_skeleton
#' @param name \code{character} name of the package to create.
#' @rdname datapackage_skeleton
#' @param path A \code{character} path where the package is located. See \code{\link[utils]{package.skeleton}}
#' @param force \code{logical} Force the package skeleton to be recreated even if it exists. see \code{\link[utils]{package.skeleton}}
#' @param code_files Optional \code{character} vector of paths to Rmd files that process raw data
#' into R objects.
#' @param r_object_names \code{vector} of quoted r object names , tables, etc. created when the files in \code{code_files} are run.
#' @param raw_data_dir \code{character} pointing to a raw data directory. Will be moved with all its subdirectories to "inst/extdata"
#' @param dependencies \code{vector} of \code{character}, paths to R files that will be moved to "data-raw" but not included in the yaml config file. e.g., dependency scripts.
#' @note renamed \code{datapackage.skeleton()} to \code{datapackage_skeleton()}.
#' @importFrom crayon bold green
#' @export
datapackage_skeleton <-
function(name = NULL,
path = ".",
force = FALSE,
code_files = character(),
r_object_names = character(),
raw_data_dir = character(),
dependencies = character()) {
if (is.null(name)) {
stop("Must supply a package name", call. = FALSE)
}
# if (length(r_object_names) == 0) {
# stop("You must specify r_object_names", call. = FALSE)
# }
# if (length(code_files) == 0) {
# stop("You must specify code_files", call. = FALSE)
# }
if (force) {
unlink(file.path(path, name), recursive = TRUE, force = TRUE)
}
package_path <- usethis::create_package(
path = file.path(path, name),
rstudio = FALSE, open = FALSE
)
# compatibility between usethis 1.4 and 1.5.
if(is.character(package_path)){
usethis::proj_set(package_path)
}else{
# create the rest of the necessary elements in the package
package_path <- file.path(path, name)
}
description <-
desc::desc(file = file.path(package_path, "DESCRIPTION"))
description$set("DataVersion" = "0.1.0")
description$set("Version" = "1.0")
description$set("Package" = name)
description$set("Roxygen" = "list(markdown = TRUE)")
description$write()
.done(paste0("Added DataVersion string to ", crayon::blue("'DESCRIPTION'")))
usethis::use_directory("data-raw")
usethis::use_directory("data")
usethis::use_directory("inst/extdata")
# .done("Created data and data-raw directories")
con <-
file(file.path(package_path, "Read-and-delete-me"), open = "w")
writeLines(
c(
"Edit the DESCRIPTION file to reflect",
"the contents of your package.",
"Optionally put your raw data under",
"'inst/extdata/'. If the datasets are large,",
"they may reside elsewhere outside the package",
"source tree. If you passed R and Rmd files to",
"datapackage.skeleton, they should now appear in 'data-raw'.",
"When you call package_build(), your datasets will",
"be automatically documented. Edit datapackager.yml to",
"add additional files / data objects to the package.",
"After building, you should edit dat-raw/documentation.R",
"to fill in dataset documentation details and rebuild.",
"",
"NOTES",
"If your code relies on other packages,",
"add those to the @import tag of the roxygen markup.",
"The R object names you wish to make available",
"(and document) in the package must match",
"the roxygen @name tags and must be listed",
"in the yml file."
),
con
)
close(con)
# Rather than copy, read in, modify (as needed), and write.
# process the string
.copy_files_to_data_raw <- function(x, obj = c("code", "dependencies")) {
if (length(x) != 0) {
.codefile_validate(x)
# copy them over
obj <- match.arg(obj, c("code", "dependencies"))
for (y in x) {
file.copy(y, file.path(package_path, "data-raw"), overwrite = TRUE)
.done(paste0("Copied ", basename(y),
" into ", crayon::blue("'data-raw'")))
}
}
}
.copy_data_to_inst_extdata <- function(x) {
if (length(x) != 0) {
# copy them over
file.copy(x, file.path(package_path, "inst/extdata"),
recursive = TRUE, overwrite = TRUE
)
.done(paste0("Moved data into ", crayon::blue("'inst/extdata'")))
}
}
.copy_files_to_data_raw(code_files, obj = "code")
.copy_files_to_data_raw(dependencies, obj = "dependencies")
.copy_data_to_inst_extdata(raw_data_dir)
yml <- construct_yml_config(code = code_files, data = r_object_names)
yaml::write_yaml(yml, file = file.path(package_path, "datapackager.yml"))
.done(paste0("configured ", crayon::blue("'datapackager.yml'"), " file"))
oldrdfiles <-
list.files(
path = file.path(package_path, "man"),
pattern = "Rd",
full.names = TRUE
)
file.remove(file.path(package_path, "NAMESPACE"))
oldrdafiles <-
list.files(
path = file.path(package_path, "data"),
pattern = "rda",
full.names = TRUE
)
oldrfiles <-
list.files(
path = file.path(package_path, "R"),
pattern = "R",
full.names = TRUE
)
file.remove(oldrdafiles)
file.remove(oldrfiles)
file.remove(oldrdfiles)
invisible(NULL)
}
#' @rdname datapackage_skeleton
#' @name datapackage.skeleton
#' @param list Not used.
#' @param environment Not used.
#' @aliases datapackage_skeleton
#' @export
#' @examples
#' if(rmarkdown::pandoc_available()){
#' f <- tempdir()
#' f <- file.path(f,"foo.Rmd")
#' con <- file(f)
#' writeLines("```{r}\n tbl = table(sample(1:10,1000,replace=TRUE)) \n```\n",con=con)
#' close(con)
#' pname <- basename(tempfile())
#' datapackage_skeleton(name = pname,
#' path = tempdir(),
#' force = TRUE,
#' r_object_names = "tbl",
#' code_files = f)
#' }
datapackage.skeleton <- function(name = NULL,
list = character(),
environment = .GlobalEnv,
path = ".",
force = FALSE,
code_files = character(),
r_object_names = character()) {
warning("Please use datapackage_skeleton() instead of datapackage.skeleton()")
proj_path <- datapackage_skeleton(
name = name,
path = path,
force = force,
code_files = code_files,
r_object_names = r_object_names
)
if(is.character(proj_path)){
usethis::proj_set(proj_path)
}
}
.done <- function(...) {
.bullet(paste0(...), bullet = crayon::green("\u2714"))
}
.bullet <- function(lines, bullet) {
lines <- paste0(bullet, " ", lines)
.cat_line(lines)
}
.cat_line <- function(...) {
cat(..., "\n", sep = "")
}
| 7,916 | artistic-2.0 |
462f63f876ad9a99364a37cdda1e904559c9e15b | NovaInstitute/Rpackages | novaUtils/R/splitMobenzi2.R |
#' Decodes Mobenzi data with extraction of repeating sections
#'
#' @param filePaths A character argument containing the paths to .csv files (one per
#' section) as downloaded from Mobenzi.
#' @param formatOtions If true, formats the question options of code book variables
#' with 'format_char'.
#' @param tidy Should sections with the same number of rows be combined into one
#' section?
#' @param twoLists If TRUE, returns two lists - the first containing the data
#' frame(s) with the actual data and the second containing the metadata,
#' question book and code book. If FALSE, returns only one list with the different
#' data frames (those from the data as well as the dfs for the metadata, code book
#' and question book) simply as separate items in the list.
#' @return One or two lists of data frames. See params 'tidy' and 'twoLists' for
#' more information.
#' @export
splitMobenzi2 <- function(filePaths,
tidy = FALSE,
twoLists = FALSE,
formatOptions = FALSE) {
require(novaUtils)
# check for empty files
nLines <- sapply(X = filePaths, FUN = R.utils::countLines)
idxx <- which(nLines == 0)
if (length(idxx) > 0) {
warning(sprintf("Ignoring %d empty files...", length(idxx)))
filePaths <- filePaths[which(nLines > 0)]
}
if (length(filePaths) == 0) {
warning("No non-empty files found. Returning NULL.")
return(NULL)
}
# read all the files
ls_dfDataBySection <- lapply(X = filePaths, FUN = function(fp) {
df <- read.csv(file = fp, header = TRUE, stringsAsFactors = FALSE)
return(df)
})
# give the list some names
if (!is.null(names(filePaths))) {
names(ls_dfDataBySection) <- names(filePaths)
} else {
sectionNames <- fixname(basename(filePaths))
sectionNames <- gsub(pattern = "(^[[:digit:]]{1,}_)|(.csv$)",
replacement = "",
x = sectionNames)
names(ls_dfDataBySection) <- sectionNames
}
# extract the code book, questions and metadata from the list
dfCodeBook <- ls_dfDataBySection$code_book
names(dfCodeBook) <- fixname(names(dfCodeBook))
dfQuestions <- ls_dfDataBySection$questions
names(dfQuestions) <- fixname(names(dfQuestions))
dfMetadata <- ls_dfDataBySection$submissions
ls_dfDataBySection <- ls_dfDataBySection[which(!(names(ls_dfDataBySection) %in% c("code_book",
"questions",
"submissions")))]
# remove the weird ï_ that Mobenzi adds to the first variable of all the dfs
names(dfCodeBook) <- gsub(pattern = "^[[:print:]]{0,}question$",
replacement = "question",
x = fixname(names(dfCodeBook)))
names(dfQuestions) <- gsub(pattern = "^[[:print:]]{0,}question_name$",
replacement = "question_name",
x = fixname(names(dfQuestions)))
names(dfMetadata) <- gsub(pattern = "^[[:print:]]{0,}submission_id$",
replacement = "submission_id",
x = fixname(names(dfMetadata)))
# first round of formatting to dfQuestions
dfQuestions$question_name <- fixname(dfQuestions$question_name)
# decodeMobenzi, fixname and remove unnecessary/empty fields
colsToIgnore <- c("fieldworker_name",
"fieldworker_id",
"repeats_on_question",
"repeat_question_value",
"received")
ls_dfDataBySection <- lapply(X = ls_dfDataBySection, FUN = function(df) {
names(df) <- fixname(names(df))
# try to work around the problem of '_other' as option, followed by an explanatory
# text field also named '_other' that causes duplicate fields in the end
idxx <- which(duplicated(names(df)) &
(names(df) %in% dfQuestions[["question_name"]]))
#idxx2 <- grep(pattern = "[[:print:]]{1,}_other$", x = names(df))
#idxx <- intersect(idxx, idxx2)
idxxQB <- which(dfQuestions[["question_name"]] %in% names(df)[idxx])
names(df)[idxx] <- paste(names(df)[idxx], "_txt", sep = "")
dfQuestions[["question_name"]][idxxQB] <<- paste(dfQuestions[["question_name"]][idxxQB],
"_txt", sep = "")
df <- decodeMobenzi(dfSurvey = df,
dfCodeBook = dfCodeBook,
fldnmVariable = "variable",
fldnmValue = "value",
fldnmLabel = "label",
formatOpsies = formatOptions)
names(df) <- gsub(pattern = "^[[:print:]]{0,}submission_id$",
replacement = "submission_id",
x = names(df))
df <- df[, which(!(names(df) %in% colsToIgnore)), drop = FALSE]
idxx <- which(sapply(X = df, FUN = function(v) {return(all(v == "N/A"))}))
if (length(idxx) > 0) {
df <- df[, -idxx, drop = FALSE]
}
return(df)
})
# remove the '.1' etc that is sometimes added at the end of variable names
ls_dfDataBySection <- lapply(X = ls_dfDataBySection, FUN = function(df) {
names(df) <- gsub(pattern = "\\.[[:digit:]]{1,}$",
replacement = "",
x = names(df))
return(df)
})
# format fields and names of dfCodeBook
names(dfCodeBook) <- fixname(names(dfCodeBook))
dfCodeBook$question <- fixname(dfCodeBook$question)
dfCodeBook$variable <- fixname(dfCodeBook$variable)
dfCodeBook$label <- fixname(dfCodeBook$label)
# format fields and names of dfQuestions
dfQuestions$section <- format_char(dfQuestions$section)
dfQuestions$question_type <- format_char(dfQuestions$question_type)
# format names of dfMetadata
names(dfMetadata) <- fixname(names(dfMetadata))
# put the data sections back into their original order
ls_dfDataBySection <- ls_dfDataBySection[intersect(unique(dfQuestions$section),
names(ls_dfDataBySection))]
# if 'tidy', combine sections of equal nrows
if (tidy) {
ls_dfDataBySection$metadata <- dfMetadata
fldnmsBySect <- sapply(X = ls_dfDataBySection, FUN = names)
## reorder - metadata df should be first in the list
ls_dfDataBySection <- ls_dfDataBySection[c("metadata",
setdiff(names(ls_dfDataBySection),
"metadata"))]
## combine
nrows <- unlist(sapply(X = ls_dfDataBySection, FUN = nrow))
isRpt <- unlist(sapply(X = ls_dfDataBySection, FUN = function(df) {
"repeating_index" %in% names(df)
}))
lsdfData <- list()
for (nrw in unique(nrows)) {
for (isrpt in unique(isRpt)) {
idxx <- which(nrows == nrw & isRpt == isrpt)
if (length(idxx) == 0) {next}
if (length(idxx) == 1) {
lsdfData[[length(lsdfData)+1]] <- ls_dfDataBySection[[idxx]]
next
}
if (isrpt) {
nmsMergeFlds <- c("submission_id", "repeating_index")
} else {
nmsMergeFlds <- c("submission_id")
}
df <- ls_dfDataBySection[[idxx[1]]]
for (idx in idxx[2:length(idxx)]) {
df <- merge.data.frame(x = df, y = ls_dfDataBySection[[idx]],
by = nmsMergeFlds, all = TRUE)
}
lsdfData[[length(lsdfData)+1]] <- df; rm(df)
}
}
## return
if (length(lsdfData) == 1) {
names(lsdfData) <- "data"
} else {
names(lsdfData) <- paste("data", 1:length(lsdfData), sep = "")
}
if (twoLists) {
return(list(lsData = lsdfData,
lsExtra = list(code_book = dfCodeBook,
questions = dfQuestions,
fldnms_by_sect = fldnmsBySect)))
} else {
lsdfData$questions <- dfQuestions
lsdfData$code_book <- dfCodeBook
lsdfData$fldnms_by_sect <- fldnmsBySect
return(lsdfData)
}
}
# reaching this point means 'tidy' is FALSE, so return the data in lsdf format
if (!twoLists) {
ls_dfDataBySection[[length(ls_dfDataBySection) +1]] <- dfCodeBook
ls_dfDataBySection[[length(ls_dfDataBySection) +1]] <- dfQuestions
ls_dfDataBySection[[length(ls_dfDataBySection) +1]] <- dfMetadata
names(ls_dfDataBySection)[(length(ls_dfDataBySection) - 2):(length(ls_dfDataBySection))] <- fixname(c("Code Book", "Questions", "Metadata"))
return(ls_dfDataBySection)
} else {
lsExtra <- list(dfCodeBook, dfQuestions, dfMetadata)
names(lsExtra) <- fixname(c("Code Book", "Questions", "Metadata"))
return(list(lsData = ls_dfDataBySection, lsExtra = lsExtra))
}
}
| 9,276 | mit |
555c77f84714bdc169ef537de964e76f0ce6b785 | gmaubach/R-Project-Utilities | Development/t_frequencies.R | t_frequencies <- function(variable,
sort = FALSE, # sort freq
decimals = 1, # round to decimals
useNA = "always",
max_print = 100)
{
if (sort)
{
v_abs <- sort(table(variable, useNA = useNA))
} else
{
v_abs <- table(variable, useNA = useNA)
}
v_rel <- round(100 * prop.table(v_abs),
decimals)
v_abs_kum <- cumsum(v_abs)
v_rel_kum <- cumsum(v_rel)
v_table <-
cbind(v_abs, v_rel, v_abs_kum, v_rel_kum)
if (is.na(rownames(v_table)[nrow(v_table)]))
{
rownames(v_table)[nrow(v_table)] <- "NA"
}
c_row = 1
v_sum <- addmargins(v_table, c_row)
v_table <- cbind(v_sum)
v_result_table <- v_table
v_result_table["Sum", "v_abs_kum"] <- NA
v_result_table["Sum", "v_rel_kum"] <- NA
colnames(v_result_table) <-
c("abs", "rel", "abs_kum", "rel_kum")
cat("\n")
if (nrow(v_result_table) > max_print)
{
v_omitted_values <- nrow(v_result_table) - max_print
v_result_table <- v_result_table[1:max_print , ]
print(v_result_table)
warning(paste("Printed only",
max_print,
"values, omitted",
v_omitted_values,
"values!"),
call. = FALSE)
} else
{
print(v_result_table)
}
invisible(v_result_table)
}
| 1,380 | gpl-2.0 |
3ec554a9c20bb56792d95cb979bacf693dae2e71 | wStockhausen/rCompTCMs | R/modelComparisons.ModelFits.ZCsByYear.Fisheries.R | #'
#' @title Render a document of comparison plots for model fits to fishery size composition data by year
#'
#' @description Function to render a document of comparison plots for model fits to
#' fishery size composition data by year.
#'
#' @param models - named list of model results (as resLst objects) to compare
#' @param fleets - names of fleets to include (or "all")
#' @param years - years to plot, as numerical vector (or "all" to plot all years)
#' @param plot1stObs - flag (T/F) to plot observations only from first case, or character vector cases cases from which to plot observations
#' @param plotRetained - flag to plot retained catch size comps
#' @param plotTotal - flag to plot total catch size comps
#' @param nrow - number of rows per page for output plots
#' @param ncol - number of columns per page for output plots
#' @param useBars - flag to use bars for observations
#' @param usePins - flag to use pins for observations
#' @param usePinsAndPts - flag to add pts to observations when pins are used
#' @param useLines - flag to use lines for predictions
#' @param usePoints - flag to use points for predictions
#' @param pinSize - width of pin line
#' @param lineSize - prediction line size
#' @param pointSize - prediction point size
#' @param alpha - prediction transparency
#' @param stripText - [ggplot2::element_text()] object describing font and margin to use for panel strips
#' @param output_format - "word_document" or "pdf_document"
#' @param output_dir - path to folder to use for output
#' @param rmd_dir - folder enclosing rmd file
#' @param rmd - Rmd file to process (defalut="rmd/modelComparisons.ModelFits.ZCsByYear.Fisheries.Rmd")
#' @param docx_styles - full path to Word (docx) style template for Word documents
#' @param pdf_styles - full path to style template for pdf documents
#' @param clean - T/F to delete intermediate files
#'
#' @details Resulting document title will be of the form "ModelComparisons.ModelFits.ZCsByYear.Fisheries.mmm.ext",
#' where "ext" is the appropriate file extension and "mmm" is a dash-separated string of model names.
#'
#' @export
#'
modelComparisons.ModelFits.ZCsByYear.Fisheries<-function(
models,
fleets="all",
years='all',
plotRetained=TRUE,
plotTotal=TRUE,
plot1stObs=TRUE,
nrow=5,
ncol=4,
useBars=TRUE,
usePins=FALSE,
usePinsAndPts=FALSE,
useLines=TRUE,
usePoints=TRUE,
pinSize=0.2,
lineSize=1,
pointSize=1,
alpha=0.5,
stripText=ggplot2::element_text(),
output_format=c("word_document","pdf_document"),
output_dir=getwd(),
rmd=system.file("rmd/modelComparisons.ModelFits.ZCsByYear.Fisheries.Rmd",package="rCompTCMs"),
docx_styles=system.file("rmd/StylesForRmdDocs.docx",package="wtsUtilities"),
pdf_styles=system.file("rmd/StylesForRmdPDFs.sty",package="wtsUtilities"),
clean=FALSE
){
nms<-names(models);
mmm<-paste0(nms,collapse="-");
mmv<-paste0(nms,collapse=" vs ");
output_format<-output_format[1];
output_options<-NULL;
#get base folder enclosing rmd file
rmd<-normalizePath(rmd);
bsf<-dirname(rmd);
if(output_format=="word_document") {
doc_type<-"word";
ext<-"docx";
output_options<-list(reference_docx=docx_styles);
} else if(output_format=="pdf_document") {
doc_type<-"pdf";
ext<-"pdf";
output_options<-list(includes=list(in_header=pdf_styles));
}
output_file<-paste0("ModelComparisons.ModelFits.ZCsByYear.Fisheries.",mmm,".",ext);
title<-paste0("Model Comparisons: Fits to Fishery Size Composition Data -- ",mmv);
cat("Rendering to '",file.path(output_dir,output_file),"'\n",sep="")
cat("Title: '",title,"'\n",sep='')
cat("Base RMD folder \n\t'",bsf,"'\n",sep="");
rmarkdown::render(
rmd,
output_format=output_format,
output_file=output_file,
output_dir=output_dir,
intermediates_dir=output_dir,
output_options=output_options,
params=list(title=title,
Models=models,
fleets=fleets,
years=years,
plotRetained=plotRetained,
plotTotal=plotTotal,
plot1stObs=plot1stObs,
nrow=nrow,
ncol=ncol,
useBars=useBars,
usePins=usePins,
usePinsAndPts=usePinsAndPts,
useLines=useLines,
usePoints=usePoints,
pinSize=pinSize,
lineSize=lineSize,
pointSize=pointSize,
alpha=alpha,
stripText=stripText,
doc_type=doc_type),
clean=clean);
}
| 4,922 | mit |
91b18c85b2727f90361628f6a968d5d3d45a066f | USGS-R/mda.streams | R/list_metab_models.R | #' List the available metab_model objects
#'
#' @param text if specified, the query only returns metab_models whose text (or
#' description, if available) matches the word[s] in \code{text}. Note that
#' partial words are not matched -- e.g., text='nwis_0138' will not match
#' models whose title includes 'nwis_01388000'
#' @param order_by character vector of aspects of the model names to sort on.
#' Options are the same as those in the \code{out} argument to
#' \code{\link{parse_metab_model_name}}
#' @return a character vector of titles of the metab_model .RData files posted
#' on SB
#' @import sbtools
#' @import dplyr
#' @export
#' @examples
#' \dontrun{
#' mms <- list_metab_models('0.0.18')
#' }
list_metab_models = function(text, order_by=c("date","tag","row","site","strategy","title")) {
order_by <- match.arg(order_by, several.ok = TRUE)
sb_require_login("stop")
# get list of model items
model_items <-
if(missing(text)) {
query_item_identifier(scheme = get_scheme(), type = 'metab_model', limit=10000)
} else {
query_item_in_folder(text=text, folder=locate_folder('metab_models'), limit=10000)
}
model_titles <- sapply(model_items, function(item) item$title)
if(length(model_titles) > 0) {
# check unique vs total in case an old SB bug comes back (there was
# duplication & omission of items when paging through many results)
unique_model_titles <- unique(model_titles)
if(length(unique_model_titles) != length(model_titles)) warning("failed to retrieve all metab models; a retry might work")
# return
return(unique_model_titles[do.call(order, as.list(parse_metab_model_name(unique_model_titles))[order_by])])
} else {
return(character())
}
}
| 1,759 | cc0-1.0 |
91b18c85b2727f90361628f6a968d5d3d45a066f | aappling-usgs/mda.streams | R/list_metab_models.R | #' List the available metab_model objects
#'
#' @param text if specified, the query only returns metab_models whose text (or
#' description, if available) matches the word[s] in \code{text}. Note that
#' partial words are not matched -- e.g., text='nwis_0138' will not match
#' models whose title includes 'nwis_01388000'
#' @param order_by character vector of aspects of the model names to sort on.
#' Options are the same as those in the \code{out} argument to
#' \code{\link{parse_metab_model_name}}
#' @return a character vector of titles of the metab_model .RData files posted
#' on SB
#' @import sbtools
#' @import dplyr
#' @export
#' @examples
#' \dontrun{
#' mms <- list_metab_models('0.0.18')
#' }
list_metab_models = function(text, order_by=c("date","tag","row","site","strategy","title")) {
order_by <- match.arg(order_by, several.ok = TRUE)
sb_require_login("stop")
# get list of model items
model_items <-
if(missing(text)) {
query_item_identifier(scheme = get_scheme(), type = 'metab_model', limit=10000)
} else {
query_item_in_folder(text=text, folder=locate_folder('metab_models'), limit=10000)
}
model_titles <- sapply(model_items, function(item) item$title)
if(length(model_titles) > 0) {
# check unique vs total in case an old SB bug comes back (there was
# duplication & omission of items when paging through many results)
unique_model_titles <- unique(model_titles)
if(length(unique_model_titles) != length(model_titles)) warning("failed to retrieve all metab models; a retry might work")
# return
return(unique_model_titles[do.call(order, as.list(parse_metab_model_name(unique_model_titles))[order_by])])
} else {
return(character())
}
}
| 1,759 | cc0-1.0 |
cc9f81b1d65bbe0e7b7b9e6899378221be2a9537 | tweed1e/networkasymmetry | R/solve_gamma.R | ########################################################################
# solve_lambda_gamma.R
# Function to solve for unobserved \Lambda and \Gamma, given observed A and G.
# License: MIT
# ""
# Jesse Tweedle
# , 2016
########################################################################
solve_gamma <- function(R,N,args) {
beta <- args$beta
C <- args$C
A <- args$A
G <- args$G
ir <- args$ir
eta <- args$eta
epsilon <- args$epsilon
Ti <- args$Ti
Tr <- args$Tr
s <- args$s
z <- args$z
tol <- 1e-5
# plant prices
p_i0 <- p_i1 <- .sparseDiagonal(n=N,x=1)
GAM0 <- GAM1 <- G
obj = tol + 1
obj_0 <- obj + 1
counter <- 0
# while the difference between iterations is greater than tolerance
while (obj > tol) {
if (obj > obj_0 | (log(obj_0) - log(obj)) < 0.005) {
counter <- counter+1
if (counter>3) {
break
}
} else {
counter <- 0
}
obj_0 <- obj
# save last iteration of parameters
p_i0 <- p_i1
GAM0 <- GAM1
# calculate new p_mi ( = unit intermediate cost)
m2 <- Ti %*% p_i0
m2@x <- m2@x^(1-eta)
mxx <- rowSums(GAM0 * m2)
p_mi <- mxx^(1/(1-eta)) #^((1-beta)/(1-sigma))
# calculate new p_i1
p_i1 <- (C * p_mi^(1-beta) / z) %>% to_sdiag()
temp.3 <- (mxx / (1-beta)) %>% to_sdiag()
temp.4 <- Ti %*% p_i1
temp.4@x <- temp.4@x^(eta-1)
GAM1 <- temp.3 %*% (G * temp.4)
# solve for w, normalize p and p?
obj <- (diag(p_i1) - diag(p_i0))^2 %>% sum() %>% sqrt()
print(obj)
}
p_r0 <- p_r1 <- .sparseDiagonal(n=R,x=1)
LAM0 <- LAM1 <- A
obj = tol + 1
obj_0 <- obj + 1
counter <- 0
# while the difference between iterations is greater than tolerance
while (obj > tol) {
if (obj > obj_0 | (log(obj_0) - log(obj)) < 0.005) {
counter <- counter+1
if (counter>3) {
break
}
} else {
counter <- 0
}
obj_0 <- obj
# save last iteration of parameters
p_r0 <- p_r1
LAM0 <- LAM1
m1 <- Tr %*% p_i0
m1@x <- m1@x^(1-epsilon)
p_r1 <- rowSums(LAM0 * m1)^(1/(1-epsilon)) %>% to_sdiag()
temp.1 <- p_r1
temp.1@x <- temp.1@x^(1-epsilon)
temp.2 <- Tr %*% p_i1
temp.2@x <- temp.2@x^(epsilon-1)
LAM1 <- temp.1 %*% (A * temp.2)
obj <- (diag(p_r1) - diag(p_r0))^2 %>% sum() %>% sqrt()
print(obj)
}
# return the region-plant and plant-plant demand shares matrices
return(list(lambda=LAM1,gamma=GAM1,p_r=p_r1,p_i=p_i1))
}
| 2,534 | mit |
00f255b3d71c0236ff12fe84ff0768980eb99e13 | stharrold/demo | demo/app_intro/examples/2016_RMachineLearningByExample/Ch6_PredictCredit/dt_classifier.R | library(rpart)# tree models
library(caret) # feature selection
library(rpart.plot) # plot dtree
library(ROCR) # model evaluation
library(e1071) # tuning model
source("performance_plot_utils.R") # plotting curves
## separate feature and class variables
test.feature.vars <- test.data[,-1]
test.class.var <- test.data[,1]
## build initial model with training data
formula.init <- "credit.rating ~ ."
formula.init <- as.formula(formula.init)
dt.model <- rpart(formula=formula.init, method="class",data=train.data,
control = rpart.control(minsplit=20, cp=0.05))
## predict and evaluate results
dt.predictions <- predict(dt.model, test.feature.vars, type="class")
confusionMatrix(data=dt.predictions, reference=test.class.var, positive="1")
## dt specific feature selection
formula.init <- "credit.rating ~ ."
formula.init <- as.formula(formula.init)
control <- trainControl(method="repeatedcv", number=10, repeats=2)
model <- train(formula.init, data=train.data, method="rpart",
trControl=control)
importance <- varImp(model, scale=FALSE)
plot(importance, cex.lab=0.5)
## build new model with selected features
formula.new <- "credit.rating ~ account.balance + savings +
credit.amount + credit.duration.months +
previous.credit.payment.status"
formula.new <- as.formula(formula.new)
dt.model.new <- rpart(formula=formula.new, method="class",data=train.data,
control = rpart.control(minsplit=20, cp=0.05),
parms = list(prior = c(0.7, 0.3)))
## predict and evaluate results
dt.predictions.new <- predict(dt.model.new, test.feature.vars, type="class")
confusionMatrix(data=dt.predictions.new, reference=test.class.var, positive="1")
# view model details
dt.model.best <- dt.model.new
print(dt.model.best)
par(mfrow=c(1,1))
prp(dt.model.best, type=1, extra=3, varlen=0, faclen=0)
## plot model evaluation metric curves
dt.predictions.best <- predict(dt.model.best, test.feature.vars, type="prob")
dt.prediction.values <- dt.predictions.best[,2]
predictions <- prediction(dt.prediction.values, test.class.var)
par(mfrow=c(1,2))
plot.roc.curve(predictions, title.text="DT ROC Curve")
plot.pr.curve(predictions, title.text="DT Precision/Recall Curve") | 2,349 | mit |
ca53789a149e73b184ef94b33778106fe03e73bf | chipster/chipster-tools | tools/ngs/R/test-mothur.R | # TOOL test-mothur.R: "Test-Mothur"
# INPUT file.fasta: "FASTA file" TYPE GENERIC
# INPUT final.count_table: "Mothur count file" TYPE MOTHUR_COUNT
# INPUT sequences-taxonomy-assignment.txt: "Sequences taxonomy assignment file" TYPE GENERIC
# OUTPUT OPTIONAL final.unique.list
# OUTPUT OPTIONAL final.asv.shared
# OUTPUT OPTIONAL final.asv.list
# OUTPUT META phenodata.tsv
# OUTPUT OPTIONAL final.unique.shared
# OUTPUT OPTIONAL log_cluster.txt
# OUTPUT OPTIONAL log_distseqs.txt
# OUTPUT OPTIONAL log_makeshared.txt
# OUTPUT OPTIONAL log_classifyotu.txt
# OUTPUT OPTIONAL final.unique.0.03.cons.taxonomy
# OUTPUT OPTIONAL final.asv.asv.cons.taxonomy
# OUTPUT OPTIONAL final.asv.asv.cons.tax.summary
# check out if the file is compressed and if so unzip it
source(file.path(chipster.common.path,"tool-utils.R"))
source(file.path(chipster.common.path,"zip-utils.R"))
unzipIfGZipFile("file.fasta")
# binary
binary <- c(file.path(chipster.tools.path,"mothur","mothur"))
version <- system(paste(binary,"--version"),intern = TRUE)
documentVersion("Mothur",version)
library(reshape2)
#distseqs.options <- paste("dist.seqs(fasta=file.fasta)") # dist.seqs produces file.dist
#distseqs.options <- paste(distseqs.options,", processors=",chipster.threads.max,sep = "")
#distseqs.options <- paste(distseqs.options,", cutoff=",cutoff,")",sep = "")
#documentCommand(distseqs.options)
#write(distseqs.options,"distseqs.mth",append = FALSE)
#command <- paste(binary,"distseqs.mth","> log_distseqs.txt")
#system(command)
#runExternal(command, checkexit = TRUE)
cluster.options <- paste("cluster(fasta=file.fasta, count=final.count_table, method=unique)") #column=file.dist
documentCommand(cluster.options)
write(cluster.options,"cluster.mth",append = FALSE)
command <- paste(binary,"cluster.mth","> log_cluster.txt")
system(command)
runExternal(command)
#makeshared.options <- paste("make.shared(list=final.unique.list, count=final.count_table)")
#makeshared.options <- paste(makeshared.options,", label=asv)",sep = "")
makeshared.options <- paste("make.shared(count=final.count_table, label=asv)")
documentCommand(makeshared.options)
write(makeshared.options,"makeshared.mth",append = FALSE)
command <- paste(binary,"makeshared.mth","> log_makeshared.txt")
system(command)
classifyotu.options <- paste("classify.otu(list=final.asv.list, count=final.count_table, taxonomy=sequences-taxonomy-assignment.txt, label=asv)")
documentCommand(classifyotu.options)
write(classifyotu.options,"classifyotu.mth",append = FALSE)
command <- paste(binary,"classifyotu.mth","> log_classifyotu.txt")
system(command)
# read the data and tabulate it
pick <- read.table("final.count_table",header = T,sep = "\t")
tax <- read.table("sequences-taxonomy-assignment.txt",header = F,sep = "\t")
dat <- merge(pick,tax,by.x = "Representative_Sequence",by.y = "V1")
dat$V2 <- gsub(".[[:digit:]]{1,}.?[[:digit:]]?)","",as.character(dat$V2))
# cut taxonomic names
# based on default assumption of mothur-classify-counttable.R
# (i.e. that cutlevel = 0)
dat$newnames <- dat$V2
# set up the final result
data_start <- which(colnames(dat) == "total") + 1
data_end <- which(colnames(dat) == "V2") - 1
names_col <- which(colnames(dat) == "newnames")
# same manipulations here
dat <- dat[,c(names_col,data_start:data_end)]
datm <- melt(dat)
a <- aggregate(datm$value,list(datm$newnames,datm$variable),function(x) sum(x,na.rm = T))
b <- dcast(a,Group.2 ~ Group.1)
rownames(b) <- b$Group.2
b <- b[,-1]
tab <- b
# write phenodata.tsv
write.table(data.frame(sample = rownames(tab),
chiptype = "NGS",
group = rep("",length(rownames(tab)))),
"phenodata.tsv",
col.names = T,
row.names = F,
sep = "\t",
quote = F)
| 3,697 | mit |
b5a179cc122c58b014bd96c0b1a99707ffaa190d | ElCep/bazaRd | coop_viti/scrape_caves_particulieres.R | ##script pour parser les pages du site http://www.si-vitifrance.com/ pour les caves particulière
library(rgdal) ##manipumation de données spatial avec gdal
library(XML)
library(RCurl)
library(stringr) ##manipulation des chaines de charactères
rm(list=ls())
setwd("~/github/bazaRd/coop_viti/")
#####################################################################
## ICI on peut definir la date entre 07 et 13
annee<-11
#####################################################################
communes<-readOGR(dsn = "./geofla",layer="commune_s")
##c'est le champs code_dep qu'il faut utiliser pour scrapper les url
nam_col<-t(read.csv("name_col_particulieres.csv",sep = ",",header = F))
nam_col<-nam_col[1,]
nam_col<-nam_col[-1]
code_insee<-as.character(unique(communes@data$CODE_DEPT))
##construction des URL
for(h in 1: length(code_insee)){
doc<-NULL
url<-paste("http://www.observatoire-viti-france.com/docs/cvi/cvi",annee,"/cartes_inter/c_vin02_cpart_com",code_insee[h],"/embfiles/th0.xml",sep="")
verif<-sapply(url, url.exists)
if (verif){
doc = htmlTreeParse(url, useInternalNodes = T)
nam<-paste("dep",code_insee[h],sep="")
##on va recontruire les table
table<-NULL
for(i in 0:8){
myNode<-paste("//f",i,sep="")
tps <- xpathSApply(doc, myNode, fun=xmlValue)
table<-cbind(table, tps)
}
colnames(table)<-nam_col
table<-as.data.frame(table)
table<-table[-1,] ##pour conserver sous forme de tableau les table qui n'ont qu'une ligne
code<-rep(code_insee[h],length(table[,1]))
table<-cbind(table,code)
# table[,1]<-str_sub(table[,1],start=5)
assign(nam,table)
}
}
ordre.var<-grep("dep",ls())
list.var<-ls()
df.particulier<-NULL
for(o in ordre.var){
var.tps<-get(list.var[o])
df.particulier<-rbind(df.particulier,var.tps)
}
#df.coop est donc le data.frmae exploitable
write.csv(df.particulier,paste("volume_caves_particulieres_commune",annee,".csv",sep=""),row.names=F)
| 1,969 | gpl-2.0 |
dc1c8e3884bdfe4f2cc5cc8d1b2c8bc0d562e9f0 | v2south/spatial_bgsmtr | R_file/Create_W_true.R | #load pacakages
library(mvtnorm)
library(MCMCpack)
library(miscTools)
library(PottsUtils)
library(matrixcalc)
set.seed(12)
rm(list=ls())
setwd(dir = "~/spatial_bgsmtr/")
# create a W_true matrix
# W_true matrix is simulated from the hierarchical model
# groups, rows, or entries on rows are set to zero (make W_true sparse)
#load('../common/FreeSurfer_Data.RData')
# load FreeSurfer_Data.RData
load('./R_file/FreeSurfer_Data.RData')
trans_X_unnormalized = FreeSurfer_list_Data[, 2:487]
X = t(trans_X_unnormalized)
d = dim(X)[1] #number of SNPs
n = dim(X)[2] #number of observations
# We will look at all 56 phenotypes.(In our model set-up, we use c for notation.)
p = 56 #number of phenotypes
lam_1_true = 50 #lambda_1^2
lam_2_true = 50 #lambda_2^2
# Now, instead of a single scalar for variance component sigma.
# We need a 2X2 Sigma matrix for variance and covariance.
# As well as the rho.
## set up of genes/groups
group = SNP_gene_member_reduced$GENE # gives gene belonging of each SNP
group_set = unique(group) #group_set is a vector of group names
K = length(group_set)
m=rep(NA, K) #m is a vector with the number of SNPs in each group, order is the same as group_set
for (k in 1:K){
m[k]=length(which(group==group_set[k]))
}
# m=rep(NA, K) #m is a vector with the number of SNPs in each group, order is the same as group_set
# for (k in 1:K){
# m[k]=length(which(group==group_set[k]))
# }
group_size = m
omega_true=rgamma(d, (p/2+1)/2, lam_2_true/2 ) #simulating values for omega^2
tau_true=rep(NA, K) #simulating values for tau^2
for (k in 1:K){
tau_true[k]=rgamma(1, (m[k]*p/2+1)/2, lam_1_true/2)
}
Sigma_true_11 <- 1
Sigma_true_22 <- 1
Sigma_true_corr <- 0.85
Sigma_true <- matrix(c(Sigma_true_11, Sigma_true_corr * sqrt(Sigma_true_11*Sigma_true_22),
Sigma_true_corr * sqrt(Sigma_true_11*Sigma_true_22) ,Sigma_true_22) , nrow = 2, byrow = TRUE)
# Spatial dependence
rho_true <- 0.95
# For W_true, the W_ij* = (W_{ij}, W_{ij+1}) where j* = 1,2, .... c/2.
# Each pair of W_ij* is following a bivariate normal distribution.
W_true=matrix(NA, d, p)
j_star = seq(1, p, by=2)
for (k in 1:K){ #loop that simulates W.true from its distribution
idx=which(group==group_set[k])
for (i in 1:m[k]){
for(j in j_star){
# W_true[idx[i],] = rnorm(p, 0, sd=sqrt(sig_true*(1/tau_true[k]+ 1/omega_true[idx[i]])^(-1)))
W_true[idx[i], j:(j+1)] = mvrnorm(1, mu = c(0,0), Sigma = Sigma_true * (1/tau_true[k]+ 1/omega_true[idx[i]])^(-1) )
}
}
}
# For Adjacency matrix, we create a sysmetric matrix for now.
# len_c = p*(p/2+1)/4
# n_c = p/2
# vec = rbinom(len_c, 1, prob=0.2)
# A = symMatrix(data=vec, nrow=n_c, byrow = FALSE )
# Create A based on the 2D grid with first order neighborhood struture.
mask <-matrix(1,4,7)
n_grid <- 28
# Define the neighborhood structure(First order) and get the neighbor matrix.
neiStruc <- c(2,2,0,0)
neighbors <- getNeighbors(mask, neiStruc)
A <- matrix(0,p/2,p/2)
for ( i in 1:28)
{
ndx <- neighbors[i, neighbors[i,]!=(n_grid + 1)]
A[i, ndx] <- 1
}
D_A = diag(colSums(A))
inv_DA_rA = solve(D_A - rho_true*A)
W_temp = matrix(0, d, p)
# insert values of W_true into W_zeros
# select 3 genes that will have all coefficients from W_true as simulated
# APOE (1), MTHFR (10), PRNP (4), CR1 (14), TFAM (6)
import_genes = c('APOE', 'CR1', 'MTHFR', 'PRNP', 'TFAM')
import_genes_location = c(which(group == import_genes[1]), which(group == import_genes[2] ),
which(group == import_genes[3]), which(group == import_genes[4]),
which(group == import_genes[5]))
W_temp[import_genes_location, ] <- W_true[import_genes_location, ]
# Sample from other SNPs to insert their true values
import_lone_SNPs_location = sample((1:d)[-import_genes_location], 80, replace = FALSE)
import_lone_SNPs = row.names(X)[import_lone_SNPs_location]
W_temp[import_lone_SNPs_location, ] <- W_true[import_lone_SNPs_location, ]
W_true <- W_temp
row.names(W_true) <- row.names(X)
W_colname <- rep(0,p)
for ( v in 1:p)
{
W_colname[v] <- paste("BrainMeasure_", v, sep = "")
}
colnames(W_true) <- W_colname
number_true_nonzero_SNPs <- sum(as.numeric(rowSums(abs(W_true)) != 0))
# Now, by using the X and W, we can get the Y.
Y_true <- matrix(0, nrow = p ,ncol = n)
tW_X <- t(W_true) %*% X
Y_Sigma <- inv_DA_rA %x% Sigma_true
for ( l in 1:n)
{
Y_true[,l] <- mvrnorm(1, mu = tW_X[,l], Sigma = Y_Sigma)
}
# Create the scatter plot showing signal-to-noise.
plot(c(tW_X), c(Y_true))
# Create the side by side map for illustrating the two hemispher for now.
par(mfrow=c(1,2))
odd_idx <- seq(1,p,2)
even_idx <- seq(2,p,2)
image(1:4,1:7, z = matrix(Y_true[odd_idx,156], nrow = 4,ncol = 7))
image(1:4,1:7, z = matrix(Y_true[even_idx,156], nrow = 4,ncol = 7))
save( W_true, lam_1_true, lam_2_true, Sigma_true, omega_true, tau_true,
group, K, group_set, group_size,
import_genes, import_genes_location, import_lone_SNPs, import_lone_SNPs_location, Y_true,
file = 'Y_W_true.RData')
| 5,866 | gpl-3.0 |
901488a995c8c514fb67b955aec87112d3f518fd | AlejandroRuete/IgnoranceMaps | SLWapp/server.R | require(raster)
require(rgdal)
library(maptools)
Swe<-readShapePoly("data/Sweden Simple Sweref.shp", proj4string=CRS("+proj=utm +zone=33 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))
GreyColors<-colorRampPalette(c("white", "black"),interpolate="spline", space="Lab")( 16 )
RedBlue<-colorRampPalette(c("blue","white", "red"),interpolate="spline", space="Lab")( 11 )
Topo<-terrain.colors(16)
Topo[16]<-"#FFFFFFFF"
Amp <- raster("data/Amp.tif")
AmpR <- raster("data/Amp richness.tif")
Buf<-raster("data/Buf.tif")
Pel<-raster("data/Pel.tif")
Bir <- raster("data/Bir.tif")
BirR <- raster("data/Bir richness.tif")
Par<-raster("data/Par.tif")
Poe<-raster("data/Poe.tif")
Pae <- raster("data/Pae.tif")
PaeR <- raster("data/Pae richness.tif")
Pap<-raster("data/Pap.tif")
Col<-raster("data/Col.tif")
Mam <- raster("data/MamLnB.tif")
MamR <- raster("data/MamLnB richness.tif")
Alc<-raster("data/Alc.tif")
Eri<-raster("data/Eri.tif")
Opi <- raster("data/Opi.tif")
OpiR <- raster("data/Opi richness.tif")
Opca<-raster("data/Opc.tif")
Lac<-raster("data/Lac.tif")
Odo <- raster("data/Odo.tif")
OdoR <- raster("data/Odo richness.tif")
Lib<-raster("data/Lib.tif")
Neh<-raster("data/Neh.tif")
Vas <- raster("data/Vas.tif")
VasR <- raster("data/Vas richness.tif")
Pan<-raster("data/Pan.tif")
Eup<-raster("data/Eup.tif")
cellwdata<-which(!is.na(Amp[]))
##################
# Shiny server function
shinyServer(function(input, output) {
# Return the requested dataset
datasetInput <- reactive({
switch(input$dataset,
"Amphibia" = Amp,
"Aves" = Bir,
"Papilionoidea" = Pae,
"Mammals" = Mam,
"Odonata" = Odo,
"Opilions" = Opi,
"Tracheophyta" = Vas)
})
richnessInput <- reactive({
switch(input$dataset,
"Amphibia" = AmpR,
"Aves" = BirR,
"Papilionoidea" = PaeR,
"Mammals" = MamR,
"Odonata" = OdoR,
"Opilions" = OpiR,
"Tracheophyta" = VasR)
})
ignorInput <- reactive({
dataset <- datasetInput()
rich <- richnessInput()
if(input$index==TRUE){
o<-dataset
o<-dataset/rich
o[which(dataset[]==0)]<-0
dataset<-o
}
if(input$trans==1){
dataset.norm<-calc(dataset, fun=function(x){return(x/dataset@data@max)})
CI<-1-dataset.norm
}
if(input$trans==2){
dataset.log<- calc(dataset, fun=function(x){return(log(x+1))})
dataset.norm<- dataset.log/dataset.log@data@max
CI<-1-dataset.norm
}
if(input$trans==3){
obs50<-input$obs50
CI<-calc(dataset, fun=function(x){return(obs50/(x+obs50))})
}
return(CI)
}) # end ignorInput
spptargetInput<-reactive({
#############################
if(input$dataset=="Amphibia"){
if(input$target=="Common"){
sppname<-"Bufo bufo"
spp<-Buf
} #en Common
if(input$target=="Rare"){
sppname<-"Pelophylax lessonae"
spp<-Pel
} #end Rare
} #end Amphibians
#############################
if(input$dataset=="Aves"){
if(input$target=="Common"){
sppname<-"Parus major"
spp<-Par
} #end Common
if(input$target=="Rare"){
sppname<-"Poecile cinctus"
spp<-Poe
} # end Rare
} #end Birds
##############################
if(input$dataset=="Papilionoidea"){
if(input$target=="Common"){
sppname<-"Papilio machaon"
spp<-Pap
} #end Common
if(input$target=="Rare"){
sppname<-"Colias hecla"
spp<-Col
} #end rare
} #end Mammals
##############################
if(input$dataset=="Mammals"){
if(input$target=="Common"){
sppname<-"Alces alces"
spp<-Alc
} #end Common
if(input$target=="Rare"){
sppname<-"Erinaceus europaeus"
spp<-Eri
} #end rare
} #end Mammals
#############################
if(input$dataset=="Opilions"){
if(input$target=="Common"){
sppname<-"Opilio canestrinii"
spp<-Opca
} #en Common
if(input$target=="Rare"){
sppname<-"Lacinius horridus"
spp<-Lac
} #end Rare
} #end Opilions
#############################
if(input$dataset=="Odonata"){
if(input$target=="Common"){
sppname<-"Libellula quadrimaculata"
spp<-Lib
} #en Common
if(input$target=="Rare"){
sppname<-"Nehalennia speciosa"
spp<-Neh
} #end Rare
} #end Opilions
#############################
if(input$dataset=="Tracheophyta"){
if(input$target=="Common"){
sppname<-"Parnassia palustris"
spp<-Pan
} #en Common
if(input$target=="Rare"){
sppname<-"Euphrasia officinalis officinalis"
spp<-Eup
} #end Rare
} #end Vascular Plants
return(list(sppname,spp))
}) # end sppTarget
sppPAInput<-reactive({
spp<-spptargetInput()[[2]]
sppname<-spptargetInput()[[1]]
obs50<-input$obs502
if(input$trans2==1){
spp.norm<- calc(spp, fun=function(x){return(x/spp@data@max)})
spp.psabs<- 1- spp.norm
}
if(input$trans2==2){
spp.log<- calc(spp, fun=function(x){return(log(x+1))})
spp.norm<- spp.log/spp.log@data@max
spp.psabs<- 1-spp.norm
}
if(input$trans2==3){
spp.norm<- calc(spp, fun=function(x){return(x/spp@data@max)})
spp.psabs<- calc(spp, fun=function(x){return(obs50/(x+obs50))})
}
if(input$trans2==4){
spp.norm<- calc(spp, fun=function(x){return(x/spp@data@max)})
spp.psabs<- calc(spp, fun=function(x){
return(ifelse(x<obs50, 1, obs50/(x+obs50)))
})
}
return(list(spp.psabs,spp.norm))
}) # end reactive sppPA
sppOddsInput<-reactive({
spp<-spptargetInput()[[2]]
obs <- datasetInput()
rich <- richnessInput()
spp.odd<- overlay(spp, obs, rich, fun=function(x,y,z){return(x/(y/z))})
return(spp.odd)
}) # end reactive sppPA
output$ObsPlot <- renderPlot(height = 800, expr = {
par(mfrow=c(1,4), oma=c(0,0,1,1))
dataset <- datasetInput()
rich <- richnessInput()
if(input$index==TRUE){
o<-dataset
o<-dataset/rich
o[which(dataset[]==0)]<-0
dataset<-o
}
if(input$trans==2) {
#dataset<- calc(datasetInput(), fun=function(x){return(log(x+1))})
dataset<- calc(dataset, fun=function(x){return(log(x+1))})
}
CI<-ignorInput()
########
par(mar=c(0,0,0,3),cex=1,las=0, tck=.5, bty="n")
plot(dataset, zlim=c(0,dataset@data@max), bty="n", legend=FALSE, axes=FALSE, col=rev(Topo))
r.range <- c(dataset@data@min, dataset@data@max)
r.rangeseq<-seq(r.range[1], r.range[2],
by=round((r.range[2]-r.range[1])/10,ifelse(r.range[2]>1000,-2,ifelse(r.range[2]>100,-1,0))))
# par(mar=c(0,0,8,3))
plot(dataset, legend.only=TRUE, zlim=c(0,dataset@data@max), col=rev(Topo),
legend.width=3, legend.shrink=0.5,
axis.args=list(at=r.rangeseq,
labels=r.rangeseq,
cex.axis=1.5),
legend.args=list(text=ifelse(input$index==TRUE,paste(ifelse(input$trans!=2,"Obs Index","Log(Obs Index)")," for", as.character(input$dataset)),paste(ifelse(input$trans!=2,"No.","Log(No.)"),"of Obs for", as.character(input$dataset))),
side=2, font=2, line=1.5, cex=1))
# par(mar=c(0,0,0,3))
plot(Swe, lwd=1.5, border="grey50", add=TRUE)
scale.lng<-100000 #(m)
segments(max(coordinates(dataset)[,1]),min(coordinates(dataset)[,2]),max(coordinates(dataset)[,1])-scale.lng,min(coordinates(dataset)[,2]),lwd=2)
text(max(coordinates(dataset)[,1])-scale.lng/2,min(coordinates(dataset)[,2])+50000, labels=paste(scale.lng/1000, "km"),cex=1.5)
#######
par(mar=c(0,0,0,3),cex=1,las=0, tck=.05, bty="n")
plot(CI, zlim=c(0,1), bty="n", legend=FALSE, axes=FALSE, col=RedBlue)
plot(CI, legend.only=TRUE, zlim=c(0,1),col=RedBlue,
legend.width=3, legend.shrink=0.5,
axis.args=list(at=seq(0, 1, .2),
labels=seq(0, 1, .2),
cex.axis=1.5),
legend.args=list(text=paste("Ignorance for", as.character(input$dataset)),
side=2, font=2, line=1.5, cex=1))
plot(Swe, lwd=1.5, add=TRUE)
scale.lng<-100000 #(m)
segments(max(coordinates(dataset)[,1]),min(coordinates(dataset)[,2]),max(coordinates(dataset)[,1])-scale.lng,min(coordinates(dataset)[,2]),lwd=2)
text(max(coordinates(dataset)[,1])-scale.lng/2,min(coordinates(dataset)[,2])+50000, labels=paste(scale.lng/1000, "km"),cex=1.5)
########
spp.psabs<-sppPAInput()[[1]]
spp.norm<-sppPAInput()[[2]]
par(mar=c(0,0,0,3),cex=1,las=0, tck=.05, bty="n")
plot(spp.psabs, zlim=c(0,1), bty="n", legend=FALSE, axes=FALSE,col=RedBlue)
plot(spp.psabs, legend.only=TRUE, zlim=c(0,1),col=RedBlue,
legend.width=3, legend.shrink=0.5,
axis.args=list(at=seq(0, 1, .2),
labels=seq(0, 1, .2),
cex.axis=1.5),
legend.args=list(text=paste("Ps. absence of",spptargetInput()[[1]]),
side=2, font=2, line=1.5, cex=1))
plot(Swe, lwd=1.5, add=TRUE)
scale.lng<-100000 #(m)
segments(max(coordinates(dataset)[,1]),min(coordinates(dataset)[,2]),max(coordinates(dataset)[,1])-scale.lng,min(coordinates(dataset)[,2]),lwd=2)
text(max(coordinates(dataset)[,1])-scale.lng/2,min(coordinates(dataset)[,2])+50000, labels=paste(scale.lng/1000, "km"),cex=1.5)
#######
fun="prod" #alt "geomean"
sppOdds<-sppOddsInput()
maxOdds<-ceiling(max(sppOdds[], na.rm = TRUE))
oddstep<-ifelse(maxOdds/5 < 1, round(maxOdds/5, 1), round(maxOdds/5))
par(mar=c(0,0,0,3),cex=1,las=0, tck=.05, bty="n")
plot(sppOdds, zlim=c(0,maxOdds), bty="n", legend=FALSE, axes=FALSE,col=GreyColors)
plot(sppOdds, legend.only=TRUE, zlim=c(0,maxOdds), col=GreyColors,
legend.width=3, legend.shrink=0.5,
axis.args=list(at=seq(0, maxOdds, oddstep),
labels=seq(0, maxOdds, oddstep),
cex.axis=1.5),
legend.args=list(text=paste("Population Size Index of",spptargetInput()[[1]]),
side=2, font=2, line=1.5, cex=1))
plot(overlay(spp.psabs,1-CI,fun=fun),
zlim=c(input$minAbs,1),col="#FF0000",alpha=input$alpha, legend=FALSE, add=T)
plot(overlay(1-spp.psabs,1-CI,fun=fun), #1-spp.psabs,
zlim=c(input$minPres,1),col="#00FF00",alpha=input$alpha,legend=FALSE, add=T)
plot(Swe, lwd=1.5, border="grey50", add=TRUE)
scale.lng<-100000 #(m)
segments(max(coordinates(dataset)[,1]),min(coordinates(dataset)[,2]),max(coordinates(dataset)[,1])-scale.lng,min(coordinates(dataset)[,2]),lwd=2)
text(max(coordinates(dataset)[,1])-scale.lng/2,min(coordinates(dataset)[,2])+50000, labels=paste(scale.lng/1000, "km"),cex=1.5)
legend("topleft", c(paste0("Certain ps.absence (", input$minAbs," - 1)"), paste0("Certain presence (", input$minPres," - 1)")),
col=c(paste0(c("#FF0000","#00FF00"),input$alpha * 100)),
bty="n", pch= 15, cex=1.5)
}) #end outputPlot
output$TransPlot <- renderPlot({
par(mfrow=c(1,3), oma=c(1,0,1,0))
richV <- as.numeric(richnessInput()[cellwdata])
datasetV<-as.numeric(datasetInput()[cellwdata])
if(input$index==TRUE){datasetI<-ifelse(datasetV==0, 0, datasetV/richV) }
if(input$index==FALSE){datasetI<-datasetV}
if(input$trans!=2) {dataset.D<-datasetI}
if(input$trans==2) {
dataset.log<- log(datasetI+1)
dataset.D<- dataset.log
}
## Density plot
par(mar=c(4,4,3,2),cex=1)
#plot(density(dataset.D, from=0), #na.rm=T,
hist(dataset.D, from=0, col="lightblue", #na.rm=T,
xlab=ifelse(input$index==TRUE,paste(ifelse(input$trans!=2,"Obs Index","Log(Obs Index)")," for", as.character(input$dataset)),paste(ifelse(input$trans!=2,"No.","Log(No.)"),"of Obs for", as.character(input$dataset))),
#paste(ifelse(input$trans!=2,"No.","Log(No.)"),"of Observations for", as.character(input$dataset)),
ylab="No. cells",
main=paste("No. records for", as.character(input$dataset)))
## Species Discovery plot
plot(dataset.D, richV,
pch=19,
xlab=ifelse(input$index==TRUE,paste(ifelse(input$trans!=2,"Obs Index","Log(Obs Index)")," for", as.character(input$dataset)),paste(ifelse(input$trans!=2,"No.","Log(No.)"),"of Obs for", as.character(input$dataset))),
#paste(ifelse(input$trans!=2,"No.","Log(No.)"),"of Observations for", as.character(input$dataset)),
ylab="Richness",
main=paste("Richnes vs. Observations for", as.character(input$dataset)))
#abline(a=0,b=1)
## Algorithms plot
maxX<-max(datasetI)
transnorm<-function(x, maxX){
norm<-x/maxX
norm<- 1- norm
return(norm)
}
par(mar=c(4,4,3,2),cex=1)
curve(transnorm(x,maxX), from=0,to=maxX, n = 1001, ylim=c(0,1), lwd=2,
xlab=ifelse(input$index==TRUE,paste("Obs Index for", as.character(input$dataset)),paste("No. of Obs for", as.character(input$dataset))),
#paste("No. of Observations for", as.character(input$dataset)), #paste(ifelse(input$trans!=2,"No.","Log(No.)"),"of Observations for", as.character(input$dataset)),
ylab="Ignorance score",
main="Ignorance scores")
translog<-function(x,dec){
logx<-log(x+dec)#+abs(min(log(x+dec))) ## second term not needed if dec = 1
logx.norm<-logx/max(logx)
logCI<-1 -(logx.norm)
return(logCI)
}
curve(translog(x,1), col=4, lwd=2,add=T)
obs50<-input$obs50
par(mar=c(4,4,3,2),cex=1)
curve(obs50/(x+obs50), lwd=2, add=T, col=2)
abline(v=1, lty=3)
abline(v=obs50, lty=3, col=2)
abline(h=0.5, lty=3, col=2)
# exp1<-expression(Normalized = 1 - x/ max(x),
# LogNormalized = 1 - log(x+1)/max( log(x+1) ),
# Inversed = O[0.5]/(x+O[0.5]))
legend("topright", legend=c("Normalized","Log-Normalized","Half-ignorance"),
lty=1, lwd=2, col=c("black","blue","red"),bty="n")
}) #end outputPlot
}) #end server
| 16,996 | gpl-3.0 |
81d8e554ba916dcfeaa7cffd2e1d939939e379b7 | MicroPasts/MicroPasts-Scripts | crowdSourcingAdmin/contributorLists.R | ## Creation of contributors to project text file.
# Set working directory (for example as below)
setwd("~/Documents/research/micropasts/analysis/contributions/") #MacOSX
#setwd("C:\\micropasts\\analysis") #Windows
#setwd("micropasts/analysis") #Linux
# Create CSV directory if it does not exist
if (!file.exists('csv')){
dir.create('csv')
}
# Create archives directory if it does not exist
if (!file.exists('archives')){
dir.create('archives')
}
# Create JSON folder if it does not exist
if (!file.exists('json')){
dir.create('json')
}
# Load library
library(jsonlite)
# Set the project name
project <- 'wgs'
# Set the base url of the application
baseUrl <- 'http://crowdsourced.micropasts.org/app/'
# Set the task runs api path
taskruns <- '/tasks/export?type=task_run&format=json'
# Form the export url
url <- paste(baseUrl,project,taskruns, sep='')
# Create the archive path
archive <- paste('archives/', project, 'TasksRun.zip', sep='')
# Create the task run file name
taskruns <- paste(project, '_task_run.json', sep= '' )
# Create the task run file path
taskrunsPath <- paste('json/', project, '_task_run.json', sep= '' )
# Import tasks from json, this method has changed due to coding changes by SciFabric to their code
download.file(url, archive)
# Unzip the archive
unzip(archive)
# Rename the archive
file.rename(taskruns, taskrunsPath)
# Get the user id from the task run data
data <- fromJSON(paste(readLines(taskrunsPath), collapse=""))
data <- as.data.frame(data)
user_id <- data$user_id
as.data.frame(user_id) -> user_id
# Load user data
# http://crowdsourced.micropasts.org/admin/users/export?format=csv (when logged in as admin)
# This saves as all_users.csv and put this in the csv folder
users <- read.csv('csv/all_users.csv', sep=",", header=TRUE)
userList <- users[,c("id","fullname")]
# Rename column id to user_id for merging
names(userList) <- c("user_id", "fullname")
# Merge the data
contributors <- merge(user_id, userList, by="user_id")
as.vector(contributors$fullname) -> names
#Extract and print unique names
unique(names) -> names
thanks <- paste(as.character(names), collapse=", ")
# Write the thank you list to a text file.
fileConn<-file(paste(project, '.txt', sep=''))
writeLines(c(thanks), fileConn)
close(fileConn) | 2,279 | apache-2.0 |
531c5900fb4f7b992f5b9c96dc9b66cee686f5bb | polde-live/sgh-labs | 20161015_przetw/20161015_setup.R | # Laboratorium z przetwarzania danych
# 2016-10-15
# Biblioteki do realizacji poszczególnych zadań
library(sas7bdat)
library(dplyr)
library(lubridate)
library(stringr)
# Uwaga - tylko na komputerze z uczelni!
setwd(d);
# Czytanie zbioru sas do f
# Biblioteka sas7bdat
# Funkcja sas7bdat::read.sas7bdat
readsas <- function(filename) {
sasfolder <- './sas_data/'
return (sas7bdat::read.sas7bdat(paste0(sasfolder, filename, '.sas7bdat')))
}
| 453 | unlicense |
40185032b62834a5b0d82a34f06f44037e84e304 | DFITC/fts | c1/1-1.R | #
# Copyright (c) 2015-2016 by Yuchao Zhao, Xiaoye Meng.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
require(fBasics)
da = read.table("data/d-3stocks9908.txt", header = T)
rtn = da[, 2:4]
# (a)
apply(rtn * 100, 2, basicStats)
# (b)
lrtn = log(1 + rtn)
# (c)
apply(lrtn * 100, 2, basicStats)
# (d)
# \frac{\sqrt{T}\hat{\mu}_x}{\hat{\sigma}_x}
apply(lrtn, 2, t.test)
| 962 | gpl-3.0 |
40185032b62834a5b0d82a34f06f44037e84e304 | xiaoyem/fts | c1/1-1.R | #
# Copyright (c) 2015-2016 by Yuchao Zhao, Xiaoye Meng.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
require(fBasics)
da = read.table("data/d-3stocks9908.txt", header = T)
rtn = da[, 2:4]
# (a)
apply(rtn * 100, 2, basicStats)
# (b)
lrtn = log(1 + rtn)
# (c)
apply(lrtn * 100, 2, basicStats)
# (d)
# \frac{\sqrt{T}\hat{\mu}_x}{\hat{\sigma}_x}
apply(lrtn, 2, t.test)
| 962 | gpl-3.0 |
986619e724f5fb856711673e172f75b1db02cd78 | keboola/application-sample | gettingStarted.R | #' this script will get you going
#'
#'
installedPackages <- rownames(installed.packages())
cranPackages <- c("devtools", "shiny", "DT", "ggplot2", "plotly")
new.packages <- cranPackages[!(cranPackages %in% installedPackages)]
if(length(new.packages)) install.packages(new.packages)
library(devtools)
if (("aws.signature" %in% installedPackages) == FALSE) {
devtools::install_github("cloudyr/aws.signature")
}
if (("keboola.sapi.r.client" %in% installedPackages) == FALSE) {
devtools::install_github("keboola/sapi-r-client")
}
if (("keboola.shiny.lib" %in% installedPackages) == FALSE) {
devtools::install_github("keboola/shiny-lib")
}
library(keboola.sapi.r.client)
library(shiny)
token <- readline(prompt="Please enter your KBC token:")
client <- SapiClient$new(token)
print(unlist(lapply(client$listBuckets(),function(x){
x$id
})))
bucket <- readline(prompt="Please enter the bucket to explore:")
launchKeboolaApp <- function(appUrl) {
browseURL(paste0(appUrl,"?bucket=",bucket,"&token=",token))
}
runKeboolaApp <- function(){
runApp(launch.browser=launchKeboolaApp)
}
| 1,124 | mit |
3174222824dc7bdad8b99df7621dabff92790827 | mexicoevalua/app_municipios | server.R | library(shiny)
# Load the ggplot2 package which provides
# the 'mpg' dataset.
data <- read.csv("data/data_table.csv", encoding="utf8")
# Define a server for the Shiny app
shinyServer(function(input, output) {
# Filter data based on selections
output$table <- renderDataTable({
if (input$estado != "Todos"){
data <- data[data$Estado == input$estado,]
}
if (input$year != "Todos"){
data <- data[data$Año == input$year,]
}
if (input$crimen != "Todos"){
data <- data[data$Crimen == input$crimen,]
}
data
})
}) | 569 | mit |
412cbe6ae82169b0f817a624bd0a398246dc1d46 | ttriche/dma | R/model.update3.R | model.update3 <-
function (piold, gamma, eps, y, yhat, predvar) {
# Revised June 29, 2009:
# Modified to regularize the posterior model probabilities away from zero
# by adding eps to each one and renormalizing.
# August 23, 2007. Update model posterior probabilities using
# flattening. See C8338-9.
# This will be used in makf3.
# Inputs:
# piold K-vector of input model probabilities
# gamma flattening parameter
# eps minimum threshold for model probabilities
# y observed value of y_t
# yhat K-vector of predicted values of y_t | y_{t-1} from rm.Kalman
# predvar K-vector of predicted variances of y_t | y_{t-1} from rm.Kalman
# Output:
# pinew K-vector of updated model probabilities
# Form predicted pi values
pipred <- piold^gamma / sum(piold^gamma)
# Update pi values
logpyt <- -0.5*log(predvar) - 0.5*(y-yhat)^2/predvar
logpyt <- logpyt - max(logpyt)
pyt <- exp (logpyt)
pinew <- pipred * pyt
pinew <- pinew/sum(pinew)
pinew <- pinew + eps
pinew <- pinew/sum(pinew)
# Output
list (pinew=as.vector(pinew))
}
| 1,040 | gpl-2.0 |
c4091591db74fc2d986696c1fe146ea20ebe3ba0 | SCAR/solong | data-raw/equations_krill.R | refs$Goeb2007 <- bibentry(bibtype="Article",key="Goeb2007",
author=c(person(c("M","E"),"Goebel"),person(c("J","D"),"Lipsky"),person(c("C","S"),"Reiss"),person(c("V","J"),"Loeb")),
year=2007,
title="Using carapace measurements to determine the sex of Antarctic krill, Euphausia superba",
journal="Polar Biology",volume=30,pages="307-315",
doi="10.1007/s00300-006-0184-8")
refs$Morr1988 <- bibentry(bibtype="Article",key="Morr1988",
author=c(person(c("D","J"),"Morris"),person(c("J","L"),"Watkins"),person("C","Ricketts"),
person("F","Buchholz"),person("J","Priddle")),
year=1988,
title="An assessmant of the merits of length and weight measurements of Antarctic krill Euphausia superba",
journal="British Antarctic Survey Bulletin",
volume=79,pages="27-50")
refs$Hewi2004 <- bibentry(bibtype="Article",key="Hewi2004",
author=c(person(c("R","P"),"Hewitt"),person("J","Watkins"),person("M","Naganobu"),
person("V","Sushin"),person(c("A","S"),"Brierley"),person("D","Demer"),
person("S","Kasatkina"),person("Y","Takao"),person("C","Goss"),
person("A","Malyshko"),person("M","Brandon")),
year=2004,
title="Biomass of Antarctic krill in the Scotia Sea in January/February 2000 and its use in revising an estimate of precautionary yield",
journal="Deep Sea Research Part II: Topical Studies in Oceanography",
volume=51,pages="1215-1236",doi="10.1016/j.dsr2.2004.06.011")
refs$Mayz2003 <- bibentry(bibtype = "Article", key = "Mayz2003",
author = c(person("P", "Mayzaud"),
person("M", "Boutoute"),
person("F", "Alonzo")),
year = 2003,
title = "Lipid composition of the euphausiids Euphausia vallentini and Thysanoessa macrura during summer in the Southern Indian Ocean",
journal = "Antarctic Science",
volume = 15, pages = "463-475", doi = "10.1017/S0954102003001573")
##refs$Mayz1998 <- bibentry(bibtype = "Article", key = "Mayz1998",
## author = c(person("P", "Mayzaud"),
## person("E", "Albessard"),
## person("J", "Cuzin-Roudy")),
## year = 1998,
## title = "Changes in lipid composition of the Antarctic krill Euphausia superba in the Indian sector of the Antarctic Ocean: influence of geographical location, sexual maturity stage and distribution among organs",
## journal = "Marine Ecology Progress Series",
## volume = 173, pages = "149-162", doi = "10.3354/meps173149")
refs$Farb1994 <- bibentry(bibtype = "Article", key = "Farb1994",
author = person("J", "F\ue4rber-Lorda"),
year = 1994,
title = "Length-weight relationships and coefficient of condition of Euphausia superba and Thysanoessa macrura (Crustacea: Euphausiacea) in southwest Indian Ocean during summer",
journal = "Marine Biology",
volume = 118, pages = "645-650", doi = "10.1007/BF00347512")
refs$FaMa2010 <- bibentry(bibtype = "Article", key = "FaMa2010",
author = c(person("J", "F\ue4rber-Lorda"),
person("P", "Mayzaud")),
year = 2010,
title = "Morphology and total lipids in Thysanoessa macura from the southern part of the Indian Ocean during summer. Spatial and sex differences",
journal = "Deep-Sea Research II",
volume = 57, pages = "565-571", doi = "10.1016/j.dsr2.2009.11.001")
refs$Melv2018 <- bibentry(bibtype = "Article", key = "Melv2018",
author = c(person(c("J", "E"), "Melvin"),
person("S", "Kawaguchi"),
person("R", "King"),
person(c("K", "M"), "Swadling")),
year = 2018,
title = "The carapace matters: refinement of the instantaneous growth rate method for Antarctic krill Euphausia superba Dana, 1850 (Euphausiacea)",
journal = "Journal of Crustacean Biology",
pages = "1-8", doi = "10.1093/jcbiol/ruy069")
refs$PuJo1988 <- bibentry(bibtype = "Article", key = "PuJo1988",
author = c(person(c("R", "A"), "Puddicombe"),
person(c("G", "W"), "Johnstone")),
year = 1988,
title = "The breeding season diet of Adelie penguins at the Vestfold Hills, East Antarctica",
journal = "Hydrobiologia",
volume = 165, pages = "239-253", doi = "10.1007/bf00025593")
refs$Farb1990 <- bibentry(bibtype = "Article", key = "Farb1990",
author = person("J", "F\ue4rber-Lorda"),
year = 1990,
title = "Somatic length relationships and ontogenetic morphometric differentiation of Euphausia superba and Thysanoessa macrura of the southwest Indian Ocean during summer (February 1981)",
journal = "Deep Sea Research Part A Oceanographic Research Papers",
volume = 37, pages = "1135-1143", doi = "10.1016/0198-0149(90)90055-Z")
alleq_krill <- function(id) {
switch(id,
"236217J_TL_Goeb2007"=list(taxon_name="Euphausia superba",
taxon_aphia_id=236217,
equation=function(RCL)tibble(allometric_value=10.43+2.26*RCL),
inputs=tibble(property="removed carapace length",units="mm",sample_minimum=9,sample_maximum=12),
return_property="total length",
return_units="mm",
reliability=tribble(~type,~value,
"N",154,
"R^2",0.40),
notes="Applies to juvenile animals",
reference=refs$Goeb2007),
"236217F_TL_Goeb2007"=list(taxon_name="Euphausia superba",
taxon_aphia_id=236217,
equation=function(RCL)tibble(allometric_value=11.6+2.13*RCL),
inputs=tibble(property="removed carapace length",units="mm",sample_minimum=9,sample_maximum=21),
return_property="total length",
return_units="mm",
reliability=tribble(~type,~value,
"N",463,
"R^2",0.883),
notes="Applies to adult female animals",
reference=refs$Goeb2007),
"236217M_TL_Goeb2007"=list(taxon_name="Euphausia superba",
taxon_aphia_id=236217,
equation=function(RCL)tibble(allometric_value=0.62+3.13*RCL),
inputs=tibble(property="removed carapace length",units="mm",sample_minimum=9,sample_maximum=18),
return_property="total length",
return_units="mm",
reliability=tribble(~type,~value,
"N",514,
"R^2",0.777),
notes="Applies to adult male animals",
reference=refs$Goeb2007),
"236217_WW_Morr1988"=list(taxon_name="Euphausia superba",
taxon_aphia_id=236217,
equation=function(AT){ a <- 3.85; expon <- 3.20;
out <- a*1e-06*(AT^expon)
tibble(allometric_value=replace(out,AT<22 | AT>48,NA))},
inputs=tibble(property="total length",units="mm",sample_minimum=22,sample_maximum=48),
return_property="wet weight",
return_units="g",
reliability=tribble(~type,~value,
"N",4217),
notes="Parameters from Morris et al. (1988) Table IV. Equation may not be valid outside of the range of data used to fit the equation; such values set to NA here",
reference=refs$Morr1988),
"236217_WW_Hewi2004"=list(taxon_name="Euphausia superba",
taxon_aphia_id=236217,
equation=function(SL){ a <- 2.236; expon <- 3.314;
out <- a*1e-06*(SL^expon)
tibble(allometric_value=out)},
inputs=tibble(property="standard length",units="mm"),
return_property="wet weight",
return_units="g",
notes="Parameters from Hewitt et al. (2004) equ. 3",
reference=refs$Hewi2004),
## Mayz2003
## Thysanoessa macrura 236219
"236219A_WW~TL_Mayz2003" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 10^(4.38 * log10(TL) - 3.64)),
inputs = tibble(property = "total length", units = "mm"),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 23,
"R^2", 0.814),
notes = "Applies to adult animals",
reference = refs$Mayz2003),
"236219J_WW~TL_Mayz2003" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 10^(2.83 * log10(TL) - 1.72)),
inputs = tibble(property = "total length", units = "mm"),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 37,
"R^2", 0.859),
notes = "Applies to juvenile animals",
reference = refs$Mayz2003),
## Euphausia vallentini 221054
"221054M_WW~TL_Mayz2003" = list(taxon_name = "Euphausia vallentini",
taxon_aphia_id = 221054,
equation = function(TL) tibble(allometric_value = 10^(2.60 * log10(TL) - 1.53)),
inputs = tibble(property = "total length", units = "mm"),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 57,
"R^2", 0.804),
notes = "Applies to male animals",
reference = refs$Mayz2003),
"221054F_WW~TL_Mayz2003" = list(taxon_name = "Euphausia vallentini",
taxon_aphia_id = 221054,
equation = function(TL) tibble(allometric_value = 10^(1.87 * log10(TL) - 0.52)),
inputs = tibble(property = "total length", units = "mm"),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 71,
"R^2", 0.575),
notes = "Applies to female animals",
reference = refs$Mayz2003),
## lipid weights to wet weights
"221054_LpW~WW_Mayz2003" = list(taxon_name = "Euphausia vallentini",
taxon_aphia_id = 221054,
equation = function(WW) tibble(allometric_value = 10^(2.39 * log10(WW) - 5.01)),
inputs = tibble(property = "wet weight", units = "mg"),
return_property = "lipid weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 26,
"R^2", 0.757),
notes = "No difference between sexes observed, so equation was derived from data from males and females combined",
reference = refs$Mayz2003),
"236219F_LpW~WW_Mayz2003" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(WW) tibble(allometric_value = 10^(2.86 * log10(WW) - 4.97)),
inputs = tibble(property = "wet weight", units = "mg"),
return_property = "lipid weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 17,
"R^2", 0.792),
notes = "Applies to female animals",
reference = refs$Mayz2003),
"236219M_LpW~WW_Mayz2003" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(WW) tibble(allometric_value = 10^(1.53 * log10(WW) - 2.80)),
inputs = tibble(property = "wet weight", units = "mg"),
return_property = "lipid weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 6,
"R^2", 0.835),
notes = "Applies to male animals",
reference = refs$Mayz2003),
"236219J_LpW~WW_Mayz2003" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(WW) tibble(allometric_value = 10^(1.04 * log10(WW) - 1.65)),
inputs = tibble(property = "wet weight", units = "mg"),
return_property = "lipid weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 37,
"R^2", 0.300),
notes = "Applies to juvenile animals",
reference = refs$Mayz2003),
## ## Mayzaud et al 1998
## ## Euphausia superba 236217,
## "236217_WW~TL_Mayz1998" = list(taxon_name="Euphausia superba",
## taxon_aphia_id=236217,
## equation = function(TL) tibble(allometric_value = 10^(-0.08 + 3.12 * log10(TL))),
## inputs = tibble(property = "total length", units = "mm"),
## return_property = "wet weight",
## return_units = "mg",
## reliability = tribble(~type, ~value,
## "N", 121,
## "R^2", 0.967),
## notes = "Applies to males, females, and subadult animals",
## reference = refs$Mayz1998),
## this looks wrong, it doesn't match the figure in the paper. Not including
## Farber-Lorda 1994
"236219_WW~TL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 0.00157 * (TL ^ 3.721)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 8.87, sample_maximum = 21.82),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 106,
"R^2", 0.911),
notes = "Applies to males, females, and subadult animals",
reference = refs$Farb1994),
"236219SA_WW~TL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 1.65e-03 * (TL ^ 3.705)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 8.87, sample_maximum = 16.92),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 70),
notes = "Applies to subadult animals",
reference = refs$Farb1994),
"236219A_WW~TL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 0.13e-03 * (TL ^ 4.564)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 17.2, sample_maximum = 21.82),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 36),
notes = "Applies to adult animals of both sexes",
reference = refs$Farb1994),
"236219M_WW~TL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 0.20e-03 * (TL ^ 4.382)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 17.22, sample_maximum = 21.05),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 15),
notes = "Applies to adult male animals",
reference = refs$Farb1994),
"236219F_WW~TL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 1.25e-03 * (TL ^ 3.824)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 17.2, sample_maximum = 21.82),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 21),
notes = "Applies to adult female animals",
reference = refs$Farb1994),
"236219SA_WW~CL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 0.215 * (CL ^ 3.330)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 2.86, sample_maximum = 5.73),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 70),
notes = "Applies to subadult animals",
reference = refs$Farb1994),
"236219A_WW~CL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 0.279 * (CL ^ 3.120)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5.27, sample_maximum = 7.80),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 36),
notes = "Applies to adult animals of both sexes",
reference = refs$Farb1994),
"236219M_WW~CL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 0.076 * (CL ^ 3.826)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5.29, sample_maximum = 6.70),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 14),
notes = "Applies to adult male animals",
reference = refs$Farb1994),
"236219F_WW~CL_Farb1994" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 0.833 * (CL ^ 2.559)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5.27, sample_maximum = 7.80),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 22),
notes = "Applies to adult female animals",
reference = refs$Farb1994),
"236217_WW~TL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(TL) tibble(allometric_value = 0.00503 * (TL ^ 3.283)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 22.8, sample_maximum = 49.1),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 343,
"R^2", 0.946),
notes = "Applies to males, females (mature and spawned), and subadult animals",
reference = refs$Farb1994),
"236217SA_WW~TL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(TL) tibble(allometric_value = 7.16e-03 * (TL ^ 3.183)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 22.8, sample_maximum = 35.0),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 161),
notes = "Applies to subadult animals",
reference = refs$Farb1994),
"236217MI_WW~TL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(TL) tibble(allometric_value = 2.32e-03 * (TL ^ 3.490)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 33.3, sample_maximum = 41.2),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 45),
notes = "Applies to male stage I animals",
reference = refs$Farb1994),
"236217MII_WW~TL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(TL) tibble(allometric_value = 2.72e-03 * (TL ^ 3.463)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 36.0, sample_maximum = 43.0),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 24),
notes = "Applies to male stage II animals",
reference = refs$Farb1994),
"236217F_WW~TL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(TL) tibble(allometric_value = 5.87e-03 * (TL ^ 3.247)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 33.4, sample_maximum = 49.1),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 62),
notes = "Applies to mature female animals",
reference = refs$Farb1994),
"236217SF_WW~TL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(TL) tibble(allometric_value = 1.70e-03 * (TL ^ 3.562)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 33.9, sample_maximum = 43.7),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 53),
notes = "Applies to spawned female animals",
reference = refs$Farb1994),
"236217SA_WW~CL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 35.87e-03 * (CL ^ 3.701)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 8.7, sample_maximum = 14.1),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 86),
notes = "Applies to subadult animals",
reference = refs$Farb1994),
"236217MI_WW~CL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 24.79e-03 * (CL ^ 3.841)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 12.1, sample_maximum = 15.5),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 45),
notes = "Applies to male stage I animals",
reference = refs$Farb1994),
"236217MII_WW~CL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 29.04e-03 * (CL ^ 3.955)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 12.4, sample_maximum = 15.4),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 24),
notes = "Applies to male stage II animals",
reference = refs$Farb1994),
"236217F_WW~CL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 25.77e-03 * (CL ^ 3.714)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 12.8, sample_maximum = 20.3),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 43),
notes = "Applies to mature female animals",
reference = refs$Farb1994),
"236217SF_WW~CL_Farb1994" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 26.68e-03 * (CL ^ 3.687)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 13.8, sample_maximum = 18.8),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 32),
notes = "Applies to spawned female animals",
reference = refs$Farb1994),
"236217F_TL~CL_Melv2018" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = -3.23 + 0.422*CL),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 26, sample_maximum = 52),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 93,
"R^2", 0.89),
notes = "Applies to post-moult female animals",
reference = refs$Melv2018),
"236217M_TL~CL_Melv2018" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = -0.304 + 0.33*CL),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 25, sample_maximum = 51),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 46,
"R^2", 0.89),
notes = "Applies to post-moult male animals",
reference = refs$Melv2018),
"236216_TL~CL_PuJo1988" = list(taxon_name = "Euphausia crystallorophias",
taxon_aphia_id = 236216,
equation = function(CL) tibble(allometric_value = 1.512*CL + 13.28),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5.2, sample_maximum = 13.8),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 343),
reference = refs$PuJo1988),
"236217_TL~CL_PuJo1988" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 2.857*CL + 2.63),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 6.2, sample_maximum = 19.0),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 262),
reference = refs$PuJo1988),
"236219_ED~CL_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 0.9807*(CL^0.4187)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 3.5, sample_maximum = 7.8),
return_property = "eye diameter",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 41,
"R^2", 0.764),
notes = "Sample minimum and maximum are approximate",
reference = refs$FaMa2010),
"236219_ED~TL_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 0.4361*(TL^0.5411)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 12, sample_maximum = 21.5),
return_property = "eye diameter",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 41,
"R^2", 0.818),
notes = "Sample minimum and maximum are approximate",
reference = refs$FaMa2010),
"236219_ED~WW_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(WW) tibble(allometric_value = 1.04*(WW^0.1557)),
inputs = tibble(property = "wet weight", units = "mg", sample_minimum = 15, sample_maximum = 160),
return_property = "eye diameter",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 41,
"R^2", 0.8198),
notes = "Sample minimum and maximum are approximate",
reference = refs$FaMa2010),
"236219_CL~TL_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 0.1699*(TL^1.232)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 12, sample_maximum = 21.5),
return_property = "carapace length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 60,
"R^2", 0.975),
notes = "Sample minimum and maximum are approximate",
reference = refs$FaMa2010),
"236219_WW~TL_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(TL) tibble(allometric_value = 0.002098*(TL^3.6446)),
inputs = tibble(property = "total length", units = "mm", sample_minimum = 12, sample_maximum = 21.5),
return_property = "carapace length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 60,
"R^2", 0.9529),
notes = "Sample minimum and maximum are approximate",
reference = refs$FaMa2010),
"236219_WW~CL_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 0.5335*(CL^2.793)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 3.5, sample_maximum = 7.8),
return_property = "wet weight",
return_units = "mg",
reliability = tribble(~type, ~value,
"N", 60,
"R^2", 0.960),
notes = "Sample minimum and maximum are approximate",
reference = refs$FaMa2010),
"236219_LpWW~WW_FaMa2010" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(WW) tibble(allometric_value = 0.0003721 * (WW ^ 1.098) * 100),
inputs = tibble(property = "wet weight", units = "mg", sample_minimum = 15, sample_maximum = 160),
return_property = "lipid content wet weight",
return_units = "%",
reliability = tribble(~type, ~value,
"N", 60,
"R^2", 0.770),
notes = "Sample minimum and maximum are approximate. Adapted from F\ue4rber-Lorda & Mayzaud (2010) figure 9 and its corresponding equation",
reference = refs$FaMa2010),
"236217SAJ_TL~CL_Farb1990" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 2.376 * CL + 2.182),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 8, sample_maximum = 14),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 147,
"R^2", 0.94),
notes = "Applies to subadult and juvenile animals. Sample minimum and maximum are approximate.",
reference = refs$Farb1990),
"236217M1_TL~CL_Farb1990" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 2.131 * CL + 6.753),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 12, sample_maximum = 16),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 120,
"R^2", 0.74),
notes = "Applies to male animals from Group 1 (as defined by F\ue4rber-Lorda 1990). Sample minimum and maximum are approximate.",
reference = refs$Farb1990),
"236217M2_TL~CL_Farb1990" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 2.748 * CL + 1.869),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 12, sample_maximum = 15.5),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 60,
"R^2", 0.73),
notes = "Applies to male animals from Group 2 (as defined by F\ue4rber-Lorda 1990). Sample minimum and maximum are approximate.",
reference = refs$Farb1990),
"236217F_TL~CL_Farb1990" = list(taxon_name = "Euphausia superba",
taxon_aphia_id = 236217,
equation = function(CL) tibble(allometric_value = 1.836 * CL + 9.139),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 13, sample_maximum = 21),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 112,
"R^2", 0.88),
notes = "Applies to female animals. Sample minimum and maximum are approximate.",
reference = refs$Farb1990),
"236219J_TL~CL_Farb1990" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 2.719 * CL + 2.112),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 2.5, sample_maximum = 5.5),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 173,
"R^2", 0.92),
notes = "Applies to juvenile animals. Sample minimum and maximum are approximate",
reference = refs$Farb1990),
"236219M_TL~CL_Farb1990" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 2.060 * CL + 5.602),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5, sample_maximum = 7),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 33,
"R^2", 0.85),
notes = "Applies to adult male animals. Sample minimum and maximum are approximate",
reference = refs$Farb1990),
"236219F_TL~CL_Farb1990" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 2.132 * CL + 4.607),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5.5, sample_maximum = 8),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 41,
"R^2", 0.89),
notes = "Applies to adult female animals. Sample minimum and maximum are approximate",
reference = refs$Farb1990),
"236219A_TL~CL_Farb1990" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 1.857 * CL + 7.142),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 5.5, sample_maximum = 8),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 75,
"R^2", 0.87),
notes = "Applies to adult animals. Sample minimum and maximum are approximate",
reference = refs$Farb1990),
"236219_TL~CL_Farb1990" = list(taxon_name = "Thysanoessa macrura",
taxon_aphia_id = 236219,
equation = function(CL) tibble(allometric_value = 4.221 * (CL ^ 0.812)),
inputs = tibble(property = "carapace length", units = "mm", sample_minimum = 2.5, sample_maximum = 8),
return_property = "total length",
return_units = "mm",
reliability = tribble(~type, ~value,
"N", 249),
notes = "Derived from a combined sample of juvenile and adult male and female animals. Sample minimum and maximum are approximate",
reference = refs$Farb1990),
stop("unrecognized equation ID: ",id))
}
| 55,797 | mit |
cf98983afb6d24fc0dec9d44cbcdd2b0114126e1 | isidiomartins/TESTE | ui.R | # ui.R
library(shiny)
portarias <- readRDS("portarias_MAPA_2016-09-08.RDS")
# Define UI for application that draws a histogram
shinyUI(fluidPage(
HTML('<div align = "center">'),
# Application title
titlePanel("Portarias do Ministério da Agricultura, Pecuária e Abastecimento"),
HTML('</div>'),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
dateInput(inputId = "data", label = "Portarias publicadas a partir da data:",
value = Sys.Date() - 3, format = "dd/mm/yyyy"),
br(),
br(),
textInput(inputId = "termo",label = "Digite aqui os termos da busca",
value = ""),
br(),
actionButton(inputId = "buscar", label = "Buscar")),
# Show a plot of the generated distribution
mainPanel(
selectInput(inputId = "numero_portaria",
label = "Escolha o número da portaria que deseja ver:",
choices = "", selected = ""),
br(),
verbatimTextOutput("resultado")
)
)
))
| 1,057 | gpl-3.0 |
dc48243b883e97fb8159dd7431e529afe5047c80 | rgorman/syntacto_stylistics | R_files/code/open_ling/chunkByCountRelPos_July28.R | library(XML)
source("code/corpusFunctions.R")
input.dir <- "../rel_pos_prose"
files.v <- dir(path=input.dir, pattern=".*xml")
#the following script calls the user-defined function "getSwordChunkMaster).
#this function will return a list of lists of tables, each table with a maximum of words = the second variable
book.freqs.l <- list()
for(i in 1:length(files.v)){
doc.object <- xmlTreeParse(file.path(input.dir, files.v[i]), useInternalNodes=TRUE)
chunk.data.l <- getSwordChunkMaster(doc.object, 100)
book.freqs.l[[files.v[i]]] <-chunk.data.l
}
summary(book.freqs.l)
freqs.l <- list()
#convert list into matrix object
#this code requires the user defined function "my.apply"
freqs.l <- lapply(book.freqs.l, my.apply)
summary(freqs.l)
freqs.df <- do.call(rbind, freqs.l)
#the result is a long form data frame
dim(freqs.df)
# create .csv file for inspection (very optional!!)
write.csv(freqs.df, file="sWord_output/inspect1.csv")
#make name labels for the file
bookids.v <- gsub(".xml.\\d+", "", rownames(freqs.df))
#make book-with-chunk id labes
book.chunk.ids <- paste(bookids.v, freqs.df$ID, sep="_")
#replace the ID column in freqs.df
freqs.df$ID <- book.chunk.ids
#cross tabulate data
result.t <- xtabs(Freq ~ ID+Var1, data=freqs.df)
dim(result.t)
#convert to a data frame
final.df <- as.data.frame.matrix(result.t)
#make author vector and strip work name and book numbers from it
author.v <- gsub("_.+", "", rownames(final.df))
head(author.v)
unique(author.v)
length(author.v)
author.v
#reduce the feature set
freq.means.v <- colMeans(final.df[, ])
#collect column means of a given magnitude
keepers.v <- which(freq.means.v >=.00008)
#collect column means of a given magnitude for NaiveBayes
keepers.v <- which(freq.means.v >=.0009)
#use keepers.v to make a smaller data frame object for analysis
smaller.df <- final.df[, keepers.v]
smaller.df <- ordered.df[, 1:107]
dim(smaller.df)
# order columns by column mean, largest to smallest and create object with results
ordered.df <- smaller.df[, order(colMeans(smaller.df), decreasing=TRUE)]
View(ordered.df)
# reseve full ordered.df and smaller.df for backup
ordered.df.backup <- ordered.df
smaller.df.backup <- smaller.df
# reduce variables from ordered.df (165 for rel-pos files is the sweet spot)
smaller.df <- ordered.df[, 1:165]
View(smaller.df)
| 2,362 | cc0-1.0 |
dd8c175a13cfe3e4e443f51822c8f520f2e69bfc | pchmieli/h2o-3 | h2o-r/h2o-package/R/export.R | #`
#` Data Export
#`
#` Export data to local disk or HDFS.
#` Save models to local disk or HDFS.
#' Export an H2O Data Frame to a File
#'
#' Exports an H2O Frame (which can be either VA or FV) to a file.
#' This file may be on the H2O instace's local filesystem, or to HDFS (preface
#' the path with hdfs://) or to S3N (preface the path with s3n://).
#'
#' In the case of existing files \code{forse = TRUE} will overwrite the file.
#' Otherwise, the operation will fail.
#'
#' @param data An H2O Frame data frame.
#' @param path The path to write the file to. Must include the directory and
#' filename. May be prefaced with hdfs:// or s3n://. Each row of data
#' appears as line of the file.
#' @param force logical, indicates how to deal with files that already exist.
#' @examples
#'\dontrun{
#' library(h2o)
#' h2o.init()
#' irisPath <- system.file("extdata", "iris.csv", package = "h2o")
#' iris.hex <- h2o.uploadFile(path = irisPath)
#'
#' # These aren't real paths
#' # h2o.exportFile(iris.hex, path = "/path/on/h2o/server/filesystem/iris.csv")
#' # h2o.exportFile(iris.hex, path = "hdfs://path/in/hdfs/iris.csv")
#' # h2o.exportFile(iris.hex, path = "s3n://path/in/s3/iris.csv")
#' }
#' @export
h2o.exportFile <- function(data, path, force = FALSE) {
if (!is.Frame(data))
stop("`data` must be an H2O Frame object")
if(!is.character(path) || length(path) != 1L || is.na(path) || !nzchar(path))
stop("`path` must be a non-empty character string")
if(!is.logical(force) || length(force) != 1L || is.na(force))
stop("`force` must be TRUE or FALSE")
.h2o.__remoteSend(.h2o.__EXPORT_FILES(data,path,force))
}
#'
#' Export a Model to HDFS
#'
#' Exports an \linkS4class{H2OModel} to HDFS.
#'
#' @param object an \linkS4class{H2OModel} class object.
#' @param path The path to write the model to. Must include the driectory and
#' filename.
#' @param force logical, indicates how to deal with files that already exist.
#' @export
h2o.exportHDFS <- function(object, path, force=FALSE) { h2o.exportFile(object,path,force) }
#' Download H2O Data to Disk
#'
#' Download an H2O data set to a CSV file on the local disk
#'
#' @section Warning: Files located on the H2O server may be very large! Make
#' sure you have enough hard drive space to accomodate the entire file.
#' @param data an H2O Frame object to be downloaded.
#' @param filename A string indicating the name that the CSV file should be
#' should be saved to.
#' @examples
#' \donttest{
#' library(h2o)
#' h2o.init()
#' irisPath <- system.file("extdata", "iris_wheader.csv", package = "h2o")
#' iris.hex <- h2o.uploadFile(path = irisPath)
#'
#' myFile <- paste(getwd(), "my_iris_file.csv", sep = .Platform$file.sep)
#' h2o.downloadCSV(iris.hex, myFile)
#' file.info(myFile)
#' file.remove(myFile)
#' }
#' @export
h2o.downloadCSV <- function(data, filename) {
if (!is.Frame(data))
stop("`data` must be an H2O Frame object")
conn = h2o.getConnection()
str <- paste0('http://', conn@ip, ':', conn@port, '/3/DownloadDataset?frame_id=', attr(.eval.frame(data), "id"))
has_wget <- nzchar(Sys.which('wget'))
has_curl <- nzchar(Sys.which('curl'))
if(!(has_wget || has_curl))
stop("could not find wget or curl in system environment")
if(has_wget){
cmd <- "wget"
args <- paste("-O", filename, str)
} else {
cmd <- "curl"
args <- paste("-o", filename, str)
}
cat("cmd:", cmd, "\n")
cat("args:", args, "\n")
val <- system2(cmd, args, wait = TRUE)
if(val != 0L)
cat("Bad return val", val, "\n")
}
# ------------------- Save H2O Model to Disk ----------------------------------------------------
#'
#' Save an H2O Model Object to Disk
#'
#' Save an \linkS4class{H2OModel} to disk.
#'
#' In the case of existing files \code{force = TRUE} will overwrite the file.
#' Otherwise, the operation will fail.
#'
#' @param object an \linkS4class{H2OModel} object.
#' @param path string indicating the directory the model will be written to.
#' @param force logical, indicates how to deal with files that already exist.
#' @seealso \code{\link{h2o.loadModel}} for loading a model to H2O from disk
#' @examples
#' \dontrun{
#' # library(h2o)
#' # h2o.init()
#' # prostate.hex <- h2o.importFile(path = paste("https://raw.github.com",
#' # "h2oai/h2o-2/master/smalldata/logreg/prostate.csv", sep = "/"),
#' # destination_frame = "prostate.hex")
#' # prostate.glm <- h2o.glm(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"),
#' # training_frame = prostate.hex, family = "binomial", alpha = 0.5)
#' # h2o.saveModel(object = prostate.glm, path = "/Users/UserName/Desktop", force=TRUE)
#' }
#' @export
h2o.saveModel <- function(object, path="", force=FALSE) {
if(!is(object, "H2OModel")) stop("`object` must be an H2OModel object")
if(!is.character(path) || length(path) != 1L || is.na(path)) stop("`path` must be a character string")
if(!is.logical(force) || length(force) != 1L || is.na(force)) stop("`force` must be TRUE or FALSE")
force <- as.integer(force)
path <- file.path(path, object@model_id)
res <- .h2o.__remoteSend(paste0("Models.bin/",object@model_id),dir=path,force=force,h2oRestApiVersion=99)
res$dir
}
| 5,183 | apache-2.0 |
90ef48829092b8d1cc8b2a2c11e17638477cc566 | yukoga/useful-r | chap3 preprocessing and transform/3-2 how to deal with missing data.R | employee.IQ.JP <- data.frame(
IQ = c(78, 84, 84, 85, 87, 91, 92, 94, 94, 96, 99, 105, 105, 106, 108, 112, 113, 115, 118, 134),
JobPerformance = c(9, 13, 10, 8, 7, 7, 9, 9, 11, 7, 7, 10, 11, 15, 10, 10, 12, 14, 16, 12)
)
employee.IQ.JP
# create missing data flag
library(ggplot2)
employee.IQ.JP$MCAR <- employee.IQ.JP$JobPerformance
employee.IQ.JP$MCAR[c(1, 3, 10, 20)] <- NA
employee.IQ.JP$MCAR.is.missing <- as.factor(as.integer(is.na(employee.IQ.JP$MCAR)))
p <- ggplot(data = employee.IQ.JP, aes(x=IQ, y=JobPerformance, colour = MCAR.is.missing)) + geom_point(aes(shape = MCAR.is.missing), size = 5) + theme_bw() %+replace% theme(legend.position = "bottom")
print(p)
employee.IQ.JP$MAR <- employee.IQ.JP$JobPerformance
employee.IQ.JP$MAR[1:5] <- NA
employee.IQ.JP$MAR.is.missing <- as.factor(as.integer(is.na(employee.IQ.JP$MAR)))
p2 <- ggplot(data = employee.IQ.JP, aes(x=IQ, y=JobPerformance, colour = MAR.is.missing)) + geom_point(aes(shape = MAR.is.missing), size = 5) + theme_bw() %+replace% theme(legend.position = "bottom")
print(p2)
employee.IQ.JP$MNAR <- employee.IQ.JP$JobPerformance
employee.IQ.JP$MNAR[c(4:6, 10:11)] <- NA
employee.IQ.JP$MNAR.is.missing <- as.factor(as.integer(is.na(employee.IQ.JP$MNAR)))
p3 <- ggplot(data = employee.IQ.JP, aes(x=IQ, y=JobPerformance, colour = MNAR.is.missing)) + geom_point(aes(shape = MNAR.is.missing), size = 5) + theme_bw() %+replace% theme(legend.position = "bottom")
print(p3) | 1,439 | mit |
959d065853c0f18a409f8862008af865b8fe2389 | nickreich/hospital-surv-data | code/read-data.R | #########################################
#########################################
### Research project: ###
### characterizing seasonal epidemics ###
### Emily Ramos, Nick Reich ###
### Modified 11/17/14 ###
#########################################
#########################################
############################
### The data and setting ###
############################
#We have data from a children’s hospital in the U.S. reporting weekly counts of laboratory-confirmed viral infections. Specifically, we have data on the following viruses from 2001-2012 (unless other time range specified): flu A, flu B, RSV, Adenovirus, Parainfluenza, HMPV (2006-), Rhinovirus, Pertussis, Enterovirus, Diarrhea Viruses, Coronavirus (2009-).
## data should be stored in hospital-surv-data/data
require(reshape2)
require(ggplot2)
## load data
dat <- read.csv("chco.csv")
## fix dates
dat$Date <- as.Date(dat$Date..Month.Year., "%m/%d/%y")
dat <- dat[,-which(colnames(dat)=="Date..Month.Year.")]
## melt data -- maybe should change to dplyr syntax!
dat_melted <- melt(dat, id="Date")
## nice all-pathogen plot
qplot(x=Date, ymin=0, ymax=value, geom="linerange", data=dat_melted, color=variable) + facet_grid(variable~., scales="free_y") + theme(legend.position="none")
#####################
### The questions ###
#####################
#The goal of this project is to, for a given pathogen, describe the characteristics of the annual epidemic curves. Some follow clear seasonal patterns others less so. Starting with some of the ones that do have strongly seasonal characteristics (e.g. RSV, flu A and B, HMPV, Enterovirus, Coronavirus), we want to create a few metrics to evaluate the average behavior and the variability in average behavior across years. Some example metrics we could use:
#• the “season” for each of these infections may not coincide with the calendar year. Is there a more appropriate “start” time to use than January 1 if we want to evaluate each “year” differently?
#• average and sd of week with peak incidence
#• average and sd number of weeks it takes to observe X% (80? 90? 100?) of the annual cases
#• avg and sd number of weeks pre and post peak.
#• avg and sd total number of cases
#RSV
rsv <- dat_melted[which(dat_melted$variable == "RSV"),]
summary(rsv$value)
mean(rsv$value)
#8.56
sd(rsv$value)
#13.83
#Total.Flu.A
FluA <- dat_melted[which(dat_melted$variable == "Total.Flu.A"),]
summary(FluA$value)
mean(FluA$value)
#7.5
sd(FluA$value)
#29.55
#Flu.B
FluB <- dat_melted[which(dat_melted$variable == "Flu.B"),]
summary(FluB$value)
mean(FluB$value)
#1.20
sd(FluB$value)
#3.88
#HMPV
### Should I replace NA's with 0's??????????????
HMPV <- dat_melted[which(dat_melted$variable == "HMPV"),]
summary(HMPV$value)
mean(HMPV$value)
#too many missing
sd(HMPV$value)
#too many missing
#Paraflu
Paraflu <- dat_melted[which(dat_melted$variable == "Paraflu"),]
summary(Paraflu$value)
mean(Paraflu$value)
#3.74
sd(Paraflu$value)
#4.35
#Adenovirus
Adenovirus <- dat_melted[which(dat_melted$variable == "Adenovirus"),]
summary(Adenovirus$value)
mean(Adenovirus$value)
#2.8
sd(Adenovirus$value)
#2.51
#Rhinovirus
Rhinovirus <- dat_melted[which(dat_melted$variable == "Rhinovirus"),]
summary(Rhinovirus$value)
mean(Rhinovirus$value)
#8.6
sd(Rhinovirus$value)
#14.0
#Coronavirus
####### Again, too many NAs???????????????????????????????????
Coronavirus <- dat_melted[which(dat_melted$variable == "Coronavirus"),]
summary(Coronavirus$value)
mean(Coronavirus$value)
#
sd(Coronavirus$value)
#
#B..Pertussis
BPertussis <- dat_melted[which(dat_melted$variable == "B..Pertussis"),]
summary(BPertussis$value)
mean(BPertussis$value)
#4.81
sd(BPertussis$value)
#5.16
#Enterovirus
Enterovirus <- dat_melted[which(dat_melted$variable == "Enterovirus"),]
summary(Enterovirus$value)
mean(Enterovirus$value)
#1.66
sd(Enterovirus$value)
#2.46
#Diarrhea.Viruses
DiaVirus <- dat_melted[which(dat_melted$variable == "Diarrhea.Viruses"),]
summary(DiaVirus$value)
mean(DiaVirus$value)
#1.63
sd(DiaVirus$value)
#2.30
##################
### The Goals ###
##################
# The goals of this effort are to create a write-up with definitions and justifications of the different metrics chosen, along with tables and figures to display the results.
| 4,334 | gpl-2.0 |
dc48243b883e97fb8159dd7431e529afe5047c80 | rgorman/SyntaxMetrics | R_files/code/open_ling/chunkByCountRelPos_July28.R | library(XML)
source("code/corpusFunctions.R")
input.dir <- "../rel_pos_prose"
files.v <- dir(path=input.dir, pattern=".*xml")
#the following script calls the user-defined function "getSwordChunkMaster).
#this function will return a list of lists of tables, each table with a maximum of words = the second variable
book.freqs.l <- list()
for(i in 1:length(files.v)){
doc.object <- xmlTreeParse(file.path(input.dir, files.v[i]), useInternalNodes=TRUE)
chunk.data.l <- getSwordChunkMaster(doc.object, 100)
book.freqs.l[[files.v[i]]] <-chunk.data.l
}
summary(book.freqs.l)
freqs.l <- list()
#convert list into matrix object
#this code requires the user defined function "my.apply"
freqs.l <- lapply(book.freqs.l, my.apply)
summary(freqs.l)
freqs.df <- do.call(rbind, freqs.l)
#the result is a long form data frame
dim(freqs.df)
# create .csv file for inspection (very optional!!)
write.csv(freqs.df, file="sWord_output/inspect1.csv")
#make name labels for the file
bookids.v <- gsub(".xml.\\d+", "", rownames(freqs.df))
#make book-with-chunk id labes
book.chunk.ids <- paste(bookids.v, freqs.df$ID, sep="_")
#replace the ID column in freqs.df
freqs.df$ID <- book.chunk.ids
#cross tabulate data
result.t <- xtabs(Freq ~ ID+Var1, data=freqs.df)
dim(result.t)
#convert to a data frame
final.df <- as.data.frame.matrix(result.t)
#make author vector and strip work name and book numbers from it
author.v <- gsub("_.+", "", rownames(final.df))
head(author.v)
unique(author.v)
length(author.v)
author.v
#reduce the feature set
freq.means.v <- colMeans(final.df[, ])
#collect column means of a given magnitude
keepers.v <- which(freq.means.v >=.00008)
#collect column means of a given magnitude for NaiveBayes
keepers.v <- which(freq.means.v >=.0009)
#use keepers.v to make a smaller data frame object for analysis
smaller.df <- final.df[, keepers.v]
smaller.df <- ordered.df[, 1:107]
dim(smaller.df)
# order columns by column mean, largest to smallest and create object with results
ordered.df <- smaller.df[, order(colMeans(smaller.df), decreasing=TRUE)]
View(ordered.df)
# reseve full ordered.df and smaller.df for backup
ordered.df.backup <- ordered.df
smaller.df.backup <- smaller.df
# reduce variables from ordered.df (165 for rel-pos files is the sweet spot)
smaller.df <- ordered.df[, 1:165]
View(smaller.df)
| 2,362 | gpl-2.0 |
6777b724cbc1a265ab6b5bd8818244ab3415e177 | ArdiaD/PeerPerformance | R/sharpeTesting.R | ## Set of R functions for Sharpe ratio testing
# #' @name .sharpeTesting
# #' @import compiler
.sharpeTesting <- function(x, y, control = list()) {
x <- as.matrix(x)
y <- as.matrix(y)
# process control parameters
ctr <- processControl(control)
# check if enough data are available for testing
dxy <- x - y
idx <- (!is.nan(dxy) & !is.na(dxy))
rets <- cbind(x[idx], y[idx])
T <- sum(idx)
if (T < ctr$minObs) {
stop("intersection of 'x' and 'y' is shorter than 'minObs'")
}
# sharpe testing
if (ctr$type == 1) {
# ==> asymptotic approach
tmp <- sharpeTestAsymptotic(rets, ctr$hac, ctr$ttype)
} else {
# ==> bootstrap approach (iid and circular block bootstrap)
if (ctr$bBoot == 0) {
ctr$bBoot <- sharpeBlockSize(x, y, ctr)
}
bsids <- bootIndices(T, ctr$nBoot, ctr$bBoot)
tmp <- sharpeTestBootstrap(rets, bsids, ctr$bBoot, ctr$ttype, ctr$pBoot)
}
# info on the funds
info <- infoFund(rets)
## form output
out <- list(n = T, sharpe = info$sharpe, dsharpe = -diff(info$sharpe),
tstat = as.vector(tmp$tstat), pval = as.vector(tmp$pval))
return(out)
}
#' @name sharpeTesting
#' @title Testing the difference of Sharpe ratios
#' @description Function which performs the testing of the difference of Sharpe ratios.
#' @details The Sharpe ratio (Sharpe 1992) is one industry standard for measuring the
#' absolute risk adjusted performance of hedge funds. This function performs
#' the testing of Sharpe ratio difference for two funds using the approach by
#' Ledoit and Wolf (2002).
#'
#' For the testing, only the intersection of non-\code{NA} observations for the
#' two funds are used.
#'
#' The argument \code{control} is a list that can supply any of the following
#' components:
#' \itemize{
#' \item \code{'type'} Asymptotic approach (\code{type = 1}) or
#' studentized circular bootstrap approach (\code{type = 2}). Default:
#' \code{type = 1}.
#' \item \code{'ttype'} Test based on ratio (\code{type = 1})
#' or product (\code{type = 2}). Default: \code{type = 2}.
#' \item \code{'hac'} Heteroscedastic-autocorrelation consistent standard
#' errors. Default: \code{hac = FALSE}.
#' \item \code{'nBoot'} Number of boostrap replications for computing the p-value. Default: \code{nBoot =
#' 499}.
#' \item \code{'bBoot'} Block length in the circular bootstrap. Default:
#' \code{bBoot = 1}, i.e. iid bootstrap. \code{bBoot = 0} uses optimal
#' block-length.
#' \item \code{'pBoot'} Symmetric p-value (\code{pBoot = 1}) or
#' asymmetric p-value (\code{pBoot = 2}). Default: \code{pBoot = 1}.
#' }
#' @param x Vector (of lenght \eqn{T}) of returns for the first fund. \code{NA}
#' values are allowed.
#' @param y Vector (of lenght \eqn{T}) returns for the second fund. \code{NA}
#' values are allowed.
#' @param control Control parameters (see *Details*).
#' @return A list with the following components:\cr
#'
#' \code{n}: Number of non-\code{NA} concordant observations.\cr
#'
#' \code{sharpe}: Vector (of length 2) of unconditional Sharpe ratios.\cr
#'
#' \code{dsharpe}: Sharpe ratios difference.\cr
#'
#' \code{tstat}: t-stat of Sharpe ratios differences.\cr
#'
#' \code{pval}: pvalues of test of Sharpe ratios differences.
#' @note Further details on the methdology with an application to the hedge
#' fund industry is given in in Ardia and Boudt (2018).
#'
#' Some internal functions where adapted from Michael Wolf MATLAB code.
#' @author David Ardia and Kris Boudt.
#' @seealso \code{\link{sharpe}}, \code{\link{sharpeScreening}} and
#' \code{\link{msharpeTesting}}.
#' @references
#' Ardia, D., Boudt, K. (2015).
#' Testing equality of modified Sharpe ratios.
#' \emph{Finance Research Letters} \bold{13}, pp.97--104.
#' \doi{10.1016/j.frl.2015.02.008}
#'
#' Ardia, D., Boudt, K. (2018).
#' The peer performance ratios of hedge funds.
#' \emph{Journal of Banking and Finance} \bold{87}, pp.351-.368.
#' \doi{10.1016/j.jbankfin.2017.10.014}
#'
#' Barras, L., Scaillet, O., Wermers, R. (2010).
#' False discoveries in mutual fund performance: Measuring luck in estimated alphas.
#' \emph{Journal of Finance} \bold{65}(1), pp.179--216.
#'
#' Sharpe, W.F. (1994).
#' The Sharpe ratio.
#' \emph{Journal of Portfolio Management} \bold{21}(1), pp.49--58.
#'
#' Ledoit, O., Wolf, M. (2008).
#' Robust performance hypothesis testing with the Sharpe ratio.
#' \emph{Journal of Empirical Finance} \bold{15}(5), pp.850--859.
#'
#' Storey, J. (2002).
#' A direct approach to false discovery rates.
#' \emph{Journal of the Royal Statistical Society B} \bold{64}(3), pp.479--498.
#' @keywords htest
#' @examples
#' ## Load the data (randomized data of monthly hedge fund returns)
#' data("hfdata")
#' x = hfdata[,1]
#' y = hfdata[,2]
#'
#' ## Run Sharpe testing (asymptotic)
#' ctr = list(type = 1)
#' out = sharpeTesting(x, y, control = ctr)
#' print(out)
#'
#' ## Run Sharpe testing (asymptotic hac)
#' ctr = list(type = 1, hac = TRUE)
#' out = sharpeTesting(x, y, control = ctr)
#' print(out)
#'
#' ## Run Sharpe testing (iid bootstrap)
#' set.seed(1234)
#' ctr = list(type = 2, nBoot = 250)
#' out = sharpeTesting(x, y, control = ctr)
#' print(out)
#'
#' ## Run Sharpe testing (circular bootstrap)
#' set.seed(1234)
#' ctr = list(type = 2, nBoot = 250, bBoot = 5)
#' out = sharpeTesting(x, y, control = ctr)
#' print(out)
#' @export
#' @import compiler
sharpeTesting <- compiler::cmpfun(.sharpeTesting)
#@name .sharpe.ratio.diff
#@title Difference of sharpe ratios
.sharpe.ratio.diff <- function(X, Y, ttype) {
if (is.null(Y)) {
Y <- X[, 2, drop = FALSE]
X <- X[, 1, drop = FALSE]
}
n <- nrow(X)
mu1.hat <- colMeans(X)
mu2.hat <- colMeans(Y)
X_ <- sweep(x = X, MARGIN = 2, STATS = mu1.hat, FUN = "-")
Y_ <- sweep(x = Y, MARGIN = 2, STATS = mu2.hat, FUN = "-")
sig1.hat <- sqrt(colSums(X_^2)/(n - 1))
sig2.hat <- sqrt(colSums(Y_^2)/(n - 1))
if (ttype == 1) {
SR1.hat <- mu1.hat/sig1.hat
SR2.hat <- mu2.hat/sig2.hat
} else {
SR1.hat <- mu1.hat * sig2.hat
SR2.hat <- mu2.hat * sig1.hat
}
diff <- SR1.hat - SR2.hat
return(diff)
}
sharpe.ratio.diff <- compiler::cmpfun(.sharpe.ratio.diff)
# #' @name .sharpeTestAsymptotic
# #' @title Asymptotic Sharpe test
# #' @importFrom stats pnorm
# #' @import compiler
.sharpeTestAsymptotic <- function(rets, hac, ttype) {
dsharpe <- sharpe.ratio.diff(rets, Y = NULL, ttype)
se <- se.sharpe.asymptotic(rets, hac, ttype)
tstat <- dsharpe/se
pval <- 2 * stats::pnorm(-abs(tstat)) # asymptotic normal p-value
out <- list(dsharpe = dsharpe, tstat = tstat, se = se, pval = pval)
return(out)
}
sharpeTestAsymptotic <- compiler::cmpfun(.sharpeTestAsymptotic)
# #' @name .se.sharpe.asymptotic
# #' @title Asymptotic standard error
# #' @importFrom stats cov ar
# #' @import compiler
.se.sharpe.asymptotic <- function(X, hac, ttype) {
# estimation of (robust) Psi function; see Ledoit Wolf paper
compute.Psi.hat <- function(V.hat, hac) {
if (hac) {
T <- length(V.hat[, 1])
alpha.hat <- compute.alpha.hat(V.hat)
S.star <- 2.6614 * (alpha.hat * T)^0.2
Psi.hat <- compute.Gamma.hat(V.hat, 0)
j <- 1
while (j < S.star) {
Gamma.hat <- compute.Gamma.hat(V.hat, j)
Psi.hat <- Psi.hat + kernel.Parzen(j/S.star) * (Gamma.hat +
t(Gamma.hat))
j <- j + 1
}
Psi.hat <- (T/(T - 4)) * Psi.hat
} else {
Psi.hat <- stats::cov(V.hat)
}
return(Psi.hat)
}
# Parzen kernel
kernel.Parzen <- function(x) {
if (abs(x) <= 0.5)
result <- 1 - 6 * x^2 + 6 * abs(x)^3 else if (abs(x) <= 1)
result <- 2 * (1 - abs(x))^3 else result <- 0
return(result)
}
compute.alpha.hat <- function(V.hat) {
p <- ncol(V.hat)
num <- den <- 0
for (i in 1:p) {
fit <- stats::ar(V.hat[, i], 0, 1, method = "ols")
rho.hat <- as.numeric(fit[2])
sig.hat <- sqrt(as.numeric(fit[3]))
num <- num + 4 * rho.hat^2 * sig.hat^4/(1 - rho.hat)^8
den <- den + sig.hat^4/(1 - rho.hat)^4
}
return(num/den)
}
compute.Gamma.hat <- function(V.hat, j) {
T <- nrow(V.hat)
p <- ncol(V.hat)
Gamma.hat <- matrix(0, p, p)
if (j >= T)
stop("j must be smaller than the row dimension!")
for (i in ((j + 1):T)) {
Gamma.hat <- Gamma.hat + tcrossprod(V.hat[i, ], V.hat[i - j,
])
}
Gamma.hat <- Gamma.hat/T
return(Gamma.hat)
}
T <- nrow(X)
if (ttype == 1) {
mu.hat <- colMeans(X)
gamma.hat <- colMeans(X^2)
gradient <- vector("double", 4)
gradient[1] <- gamma.hat[1]/(gamma.hat[1] - mu.hat[1]^2)^1.5
gradient[2] <- -gamma.hat[2]/(gamma.hat[2] - mu.hat[2]^2)^1.5
gradient[3] <- -0.5 * mu.hat[1]/(gamma.hat[1] - mu.hat[1]^2)^1.5
gradient[4] <- 0.5 * mu.hat[2]/(gamma.hat[2] - mu.hat[2]^2)^1.5
V.hat <- matrix(NA, T, 4)
V.hat[, 1:2] <- sweep(x = X, MARGIN = 2, STATS = mu.hat, FUN = "-")
V.hat[, 3:4] <- sweep(x = X^2, MARGIN = 2, STATS = gamma.hat, FUN = "-")
} else {
m1 <- colMeans(X)
X_ <- sweep(x = X, MARGIN = 2, STATS = m1, FUN = "-")
m2 <- colMeans(X_^2)
g2 <- m2 + m1^2
dm1i <- c(1, 0, 0, 0)
dm1j <- c(0, 0, 1, 0)
dsigi <- 1/(2 * sqrt(m2[1])) * c(-2 * m1[1], 1, 0, 0)
dsigj <- 1/(2 * sqrt(m2[2])) * c(0, 0, -2 * m1[2], 1)
tmp1 <- dm1i * sqrt(m2[2]) + dsigj * m1[1]
tmp2 <- dm1j * sqrt(m2[1]) + dsigi * m1[2]
gradient <- tmp1 - tmp2
V.hat <- matrix(NA, T, 4)
V.hat[, c(1, 3)] <- sweep(x = X, MARGIN = 2, STATS = m1, FUN = "-")
V.hat[, c(2, 4)] <- sweep(x = X^2, MARGIN = 2, STATS = g2, FUN = "-")
}
Psi.hat <- compute.Psi.hat(V.hat, hac)
se <- as.numeric(sqrt(crossprod(gradient, Psi.hat %*% gradient)/T))
return(se)
}
se.sharpe.asymptotic <- compiler::cmpfun(.se.sharpe.asymptotic)
# #' @name .sharpeTestBootstrap
# #' @import compiler
.sharpeTestBootstrap <- function(rets, bsids, b, ttype, pBoot, d = 0) {
T <- nrow(rets)
x <- rets[, 1, drop = FALSE]
y <- rets[, 2, drop = FALSE]
dsharpe <- as.numeric(sharpe.ratio.diff(x, y, ttype) - d)
se <- se.sharpe.bootstrap(x, y, b, ttype)
# se = se.sharpe.asymptotic(X = cbind(x, y), hac = TRUE, ttype = ttype)
# bootstrap indices
nBoot <- ncol(bsids)
bsidx <- 1 + bsids%%T # ensure that the bootstrap indices match the length of the time series
bsX <- matrix(x[bsidx], T, nBoot)
bsY <- matrix(y[bsidx], T, nBoot)
bsdsharpe <- sharpe.ratio.diff(bsX, bsY, ttype)
bsse <- se.sharpe.bootstrap(bsX, bsY, b, ttype)
tstat <- dsharpe/se
if (pBoot == 1) {
# first type p-value calculation
bststat <- abs(bsdsharpe - dsharpe)/bsse
pval <- (sum(bststat > abs(tstat)) + 1)/(nBoot + 1)
# pval = sum(bststat > abs(tstat)) / nBoot
} else {
# second type p-value calculation (as in Barras)
bststat <- (bsdsharpe - dsharpe)/bsse
pval <- 2 * min(sum(bststat > tstat) + 1, sum(bststat < tstat) +
1)/(nBoot + 1)
# pval = 2 * min(sum(bststat > tstat), sum(bststat < tstat)) / nBoot
}
out <- list(dsharpe = dsharpe, tstat = tstat, se = se, bststat = bststat,
pval = pval)
return(out)
}
sharpeTestBootstrap <- compiler::cmpfun(.sharpeTestBootstrap)
# #' @name .se.sharpe.bootstrap
# #' @title Bootstrap standard error
# #' @importFrom stats cov
# #' @import compiler
.se.sharpe.bootstrap <- function(X, Y, b, ttype) {
## Compute Psi with two approaches: 1) iid bootstrap, 2) circular block
## bootstrap
compute.Psi.hat <- function(V.hat, b) {
T <- length(V.hat[, 1])
if (b == 1) {
# ==> standard estimation
Psi.hat <- stats::cov(V.hat)
} else {
# ==> block estimation
l <- floor(T/b)
Psi.hat <- matrix(0, 4, 4)
for (j in (1:l)) {
zeta <- b^0.5 * colMeans(V.hat[((j - 1) * b + 1):(j * b),
, drop = FALSE])
Psi.hat <- Psi.hat + tcrossprod(zeta)
}
Psi.hat <- Psi.hat/l
}
return(Psi.hat)
}
T <- nrow(X)
N <- ncol(Y)
if (ttype == 1) {
mu1.hat <- colMeans(X)
mu2.hat <- colMeans(Y)
gamma1.hat <- colMeans(X^2)
gamma2.hat <- colMeans(Y^2)
gradient <- array(NA, c(4, 1, N))
gradient[1, 1, ] <- gamma1.hat/(gamma1.hat - mu1.hat^2)^1.5
gradient[2, 1, ] <- -gamma2.hat/(gamma2.hat - mu2.hat^2)^1.5
gradient[3, 1, ] <- -0.5 * mu1.hat/(gamma1.hat - mu1.hat^2)^1.5
gradient[4, 1, ] <- 0.5 * mu2.hat/(gamma2.hat - mu2.hat^2)^1.5
V.hat <- array(NA, c(T, 4, N))
V.hat[, 1, ] <- sweep(x = X, MARGIN = 2, STATS = mu1.hat, FUN = "-")
V.hat[, 2, ] <- sweep(x = Y, MARGIN = 2, STATS = mu2.hat, FUN = "-")
V.hat[, 3, ] <- sweep(x = X^2, MARGIN = 2, STATS = gamma1.hat,
FUN = "-")
V.hat[, 4, ] <- sweep(x = Y^2, MARGIN = 2, STATS = gamma2.hat,
FUN = "-")
} else {
m1X <- colMeans(X)
m1Y <- colMeans(Y)
X_ <- sweep(x = X, MARGIN = 2, STATS = m1X, FUN = "-")
Y_ <- sweep(x = Y, MARGIN = 2, STATS = m1Y, FUN = "-")
m2X <- colMeans(X_^2)
m2Y <- colMeans(Y_^2)
g2X <- m2X + m1X^2
g2Y <- m2Y + m1Y^2
cst1X <- 1/(2 * sqrt(m2X))
cst1Y <- 1/(2 * sqrt(m2Y))
dm1X <- matrix(rep(c(1, 0, 0, 0), N), 4, N, FALSE)
dm1Y <- matrix(rep(c(0, 0, 1, 0), N), 4, N, FALSE)
dsigX <- rbind(-2 * cst1X * m1X, cst1X, 0, 0)
dsigY <- rbind(0, 0, -2 * cst1Y * m1Y, cst1Y)
# matrix form
m1X_ <- matrix(m1X, nrow = 4, ncol = N, byrow = TRUE)
m1Y_ <- matrix(m1Y, nrow = 4, ncol = N, byrow = TRUE)
m2X_ <- matrix(m2X, nrow = 4, ncol = N, byrow = TRUE)
m2Y_ <- matrix(m2Y, nrow = 4, ncol = N, byrow = TRUE)
dm1X_ <- matrix(dm1X, nrow = 4, ncol = N, byrow = FALSE)
dm1Y_ <- matrix(dm1Y, nrow = 4, ncol = N, byrow = FALSE)
dsigX_ <- matrix(dsigX, nrow = 4, ncol = N, byrow = FALSE)
dsigY_ <- matrix(dsigY, nrow = 4, ncol = N, byrow = FALSE)
cst2X_ <- sqrt(m2X_)
cst2Y_ <- sqrt(m2Y_)
# gradient
tmp1 <- dm1X_ * cst2Y_ + dsigY_ * m1X_
tmp2 <- dm1Y_ * cst2X_ + dsigX_ * m1Y_
# =======
gradient <- array(NA, c(4, 1, N))
gradient[1:4, 1, ] <- tmp1 - tmp2
V.hat <- array(NA, c(T, 4, N))
V.hat[, 1, ] <- sweep(x = X, MARGIN = 2, STATS = m1X, FUN = "-")
V.hat[, 3, ] <- sweep(x = Y, MARGIN = 2, STATS = m1Y, FUN = "-")
V.hat[, 2, ] <- sweep(x = X^2, MARGIN = 2, STATS = g2X, FUN = "-")
V.hat[, 4, ] <- sweep(x = Y^2, MARGIN = 2, STATS = g2Y, FUN = "-")
}
Psi.hat <- array(apply(X = V.hat, MARGIN = 3, FUN = compute.Psi.hat,
b = b), c(4, 4, N))
se <- vector("double", N)
for (i in 1:N) {
se[i] <- sqrt(crossprod(gradient[, , i], Psi.hat[, , i] %*% gradient[,
, i])/T)
}
return(se)
}
se.sharpe.bootstrap <- compiler::cmpfun(.se.sharpe.bootstrap)
| 15,101 | gpl-2.0 |
75913ce251243f34d4183b4df3a0db93f13e9178 | cran/dcemri | demo/avg152T1_RL.R | avg152T1.RL <- read.img("avg152T1_RL_nifti")
X <- nrow(avg152T1.RL)
Y <- ncol(avg152T1.RL)
Z <- nsli(avg152T1.RL)
zrange <- range(avg152T1.RL)
par(mfrow=c(10,10), mar=rep(0,4))
for (z in 1:Z) {
image(1:X, 1:Y, avg152T1.RL[X:1,,z], zlim=zrange, col=grey(0:64/64),
xlab="", ylab="", axes=FALSE)
}
| 308 | bsd-3-clause |
8230bd705262a1b8dcb3b9e5b7b88e99ee20b16e | Gargonslipfisk/NLP | Topic_Modeling_2.R | #Ruta del directorio de trabajo
setwd("~/Erreria/Topic Modelling")
#Importa corpus como caracteres
Hoteles_raw = readLines("Hotels.csv", encoding = "ANSI")
#Convierte corpus a matriz
Hoteles = as.matrix(Hoteles_raw)
library(textcat)
c <- textcat(Hoteles)
# z <- cbind(Hoteles,c)
# names(z)<- c("verbatim","lenguaje")
# x<-as.data.frame(t(z))
# x$lenguajes
# z["c"]
row_to_keep <- ifelse(c=="english", TRUE, FALSE)
Hoteles_2 <- Hoteles[row_to_keep,]
# View(Hoteles)
Hoteles <- as.matrix(Hoteles_2)
# #Limpieza de NA
# a <- character()
# for (i in 1:nrow(Hoteles))
# if (!is.na(textcat(Hoteles[i]))) a2=(c(Hoteles[i],a))
# a3 <- as.matrix(a2)
#
# #Limpieza de los que no sean EN
# a <- character()
# for (i in 1:nrow(a3))
# if (textcat(a3[i]) == "english") a4 = (c(a3[i],a))
# a5 <- as.data.frame(a4)
#https://rpubs.com/joseposada/topicModeling
# install.packages("corpus.JSS.papers",repos = "http://datacube.wu.ac.at/", type = "source")
# data("JSS_papers", package = "corpus.JSS.papers") #matriz del tipo lista 636x15
# JSS_papers <- JSS_papers[JSS_papers[,"date"] < "2010-08-05",] #361x15
# JSS_papers <- JSS_papers[sapply(JSS_papers[, "description"],Encoding) == "unknown",] #348x15
library("tm")
# library("XML")
# #closure function
# remove_HTML_markup =function(s) tryCatch({
# doc = htmlTreeParse(paste("<!DOCTYPE html>", s),
# asText = TRUE, trim = FALSE)
# xmlValue(xmlRoot(doc))},
# error = function(s) s)
# Hotels_raw <- readLines("Hotels", encoding = "ANSI") #Encoding UTF-8 da error del tipo Utf8tolowr
# Hotels <- as.matrix(Hotels_raw)
Corpus <- Corpus(VectorSource(Hoteles)) #Vcorpus-Corpus-0na Metadata: corpus specific: 0, document level (indexed): 0 Content: documents: 26672
# Sys.setlocale("LC_COLLATE", "C")
DTM <- DocumentTermMatrix(Corpus,control = list(stemming = T, stopwords = T, minWordLength = 3,removeNumbers = T, removePunctuation = T))
# dim(DTM)
rownames(DTM) <- Hoteles[,1]
###############################################################################################################################################
library("slam")
# summary(col_sums(DTM)) #Convierte sparse matrix a simple triplet matrix
TFIDF <- tapply(DTM$v/row_sums(DTM)[DTM$i], DTM$j, mean) * log2(nDocs(DTM)/col_sums(DTM > 0))
#term frequency–inverse document frequency ¿weighting scheme?
# summary(TFIDF)
DTM <- DTM[,TFIDF >= 0.1] #Filtro con TFIDF mayor o igual a 0.1 ¿?
DTM <- DTM[row_sums(DTM) > 0,] #Filtro ¿?
# summary(col_sums(DTM))
# dim(DTM)
###############################################################################################################################################
library("topicmodels")
#Set parameters for Gibbs sampling
burnin <- 4000
iter <- 2000
thin <- 500
seed <-list(2003,5,63,100001,765)
nstart <- 5
best <- TRUE
#Number of topics
k <- 5
#Run LDA using Gibbs sampling
ldaOut <- LDA(DTM,k = k, method = "Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin))
#write out results
#docs to topics
ldaOut.topics <- as.matrix(topics(ldaOut))
write.csv(ldaOut.topics,file=paste("LDAGibbs",k,"DocsToTopics.csv"))
#top 6 terms in each topic
ldaOut.terms <- as.matrix(terms(ldaOut,6))
write.csv(ldaOut.terms,file=paste("LDAGibbs",k,"TopicsToTerms.csv"))
#probabilities associated with each topic assignment
topicProbabilities <- as.data.frame(ldaOut@gamma)
write.csv(topicProbabilities,file=paste("LDAGibbs",k,"TopicProbabilities.csv"))
#Find relative importance of top 2 topics
topic1ToTopic2 <- lapply(1:nrow(DTM),function(x)
sort(topicProbabilities[x,])[k]/sort(topicProbabilities[x,])[k-1])
#Find relative importance of second and third most important topics
topic2ToTopic3 <- lapply(1:nrow(DTM),function(x)
sort(topicProbabilities[x,])[k-1]/sort(topicProbabilities[x,])[k-2])
#write to file
write.csv(topic1ToTopic2,file=paste("LDAGibbs",k,"Topic1ToTopic2.csv"))
write.csv(topic2ToTopic3,file=paste("LDAGibbs",k,"Topic2ToTopic3.csv"))
TopicModel <- list(VEM = LDA(DTM, k = k, control = list(seed = seed)),
VEM_fixed = LDA(DTM, k = k,control = list(estimate.alpha = FALSE, seed = seed)),
Gibbs = LDA(DTM, k = k, method = "Gibbs",control = list(seed = seed, burnin = 1000,thin = 100, iter = 1000)),CTM = CTM(DTM, k = k, control = list(seed = seed, var = list(tol = 10^-4), em = list(tol = 10^-3))))
#Warning messages:
#1: In class(value) <- "integer" :
# NAs introduced by coercion to integer range
# sapply(TopicModel[1:2], slot, "alpha")
Topic <- topics(TopicModel[["VEM"]], 1)
Terms <- terms(TopicModel[["VEM"]], 5)
# (topics_v24 = topics(TopicModel[["VEM"]])[grep("/v24/", JSS_papers[, "identifier"])])
# most_frequent_v24 = which.max(tabulate(topics_v24))
# terms(TopicModel[["VEM"]], 10)[, most_frequent_v24]
Terms[,1:5] | 5,023 | gpl-3.0 |
05a7d07d6dbaee6992d1d7683b9e1e35f484e884 | gleday/ShrinkNet | R/getSVD.R | #' Convenience function for singular value decomposition
#'
#' @param ii integer. Gene index.
#' @param tX p by n matrix of gene expression.
#'
#' @details
#' The function returns the singular value decomposition of X_{ii}=UDV^T where
#' X_{ii} is the transpose of tX_{ii} which represents the matrix tX without the iith row.
#'
#' @return A named list with the following elements:
#' \item{u}{A matrix containing the left singular vectors.}
#' \item{d}{A vector containing the singular values.}
#' \item{v}{A matrix containing the right singular vectors.}
#'
#' @export
getSVD <- function(ii, tX) {
.Call('ShrinkNet_getSVD', PACKAGE = 'ShrinkNet', ii, tX)
}
| 668 | gpl-2.0 |
e8d727817ab0932e9b3597eec0bd88a5bb7633eb | oneillkza/ContiBAIT | R/plotContigOrder.R | plotContigOrder.func <- function(contigOrder, lg='all', verbose=TRUE)
{
masterGroups <- sapply(1:nrow(contigOrder), function(x) strsplit(as.character(contigOrder[,1]), "\\.")[[x]][1])
if(lg == 'all'){lg <- seq(1:length(unique(masterGroups)))}
for(link in lg)
{
if(verbose){message(' -> Processing ', link)}
contigOrderGrp <- contigOrder[grep(paste(unique(masterGroups)[link],"\\.", sep=""), contigOrder[,1]),]
if(nrow(as.matrix(contigOrderGrp)) > 2)
{
contigChr <- sub(':.*', '', contigOrderGrp[,2])
primaryContigChr <- names(sort(table(contigChr), decreasing=TRUE))[1]
contigLengths <- sub('.*:', '', contigOrderGrp[,2])
contigStarts <- sub('-.*', '', contigLengths)
if( length(unique(names(contigStarts))) != length(contigStarts))
{
#If more than one contig in the same sub-LG, take the mean start position.
mergeFrame <- data.frame(lg=paste(contigOrderGrp[,1], contigChr, sep='LINK'), chr=contigChr, start=as.numeric(contigStarts)/10^6)
mergeFrameAg <- aggregate(start~lg, mergeFrame, mean)
rownames(mergeFrameAg) <- mergeFrameAg$lg
contigOrderFrame <- mergeFrameAg[mergeFrame$lg,]
contigOrderFrame <- data.frame(lg=sub('LINK.*', '', contigOrderFrame$lg), chr=sub('.*LINK', '', contigOrderFrame$lg), start=contigOrderFrame$start)
contigOrderFrame$bin <- c(1:nrow(contigOrderFrame))
contigOrderFrame$knownOrder <- (1:nrow(contigOrderFrame))[order(contigOrderFrame$start)]
}else{
orderedLocation <- unlist(sapply(1:length(unique(contigOrderGrp[,1])), function(x) rep(x, length(contigOrderGrp[,1][which(contigOrderGrp[,1] == unique(contigOrderGrp[,1])[x])]))))
contigOrderFrame <- data.frame(lg=names(contigChr), chr=contigChr, start=as.numeric(contigStarts)/10^6, bin=orderedLocation)
contigOrderFrame$knownOrder <- as.numeric(rownames(contigOrderFrame[order(contigOrderFrame$start),]))
}
spearmanCor <- cor(contigOrderFrame$bin[which(contigOrderFrame$chr == primaryContigChr)],
contigOrderFrame$knownOrder[which(contigOrderFrame$chr == primaryContigChr)],
use="everything",
method="spearman")
if(spearmanCor < 0)
{
contigOrderFrame[,4] <- contigOrderFrame[nrow(contigOrderFrame):1, 4]
spearmanCor <- spearmanCor*-1
}
spearmanCor <- round(spearmanCor, digits=2)
print(ggplot(contigOrderFrame, aes_string("bin", "start") )+
geom_point(aes_string(x="bin", y="start" , colour="chr"), size=2)+
labs(x="contiBAIT predicted location of contigs", y="Assembly ordered location of contigs (Mb)")+
geom_smooth(method="lm")+
ggtitle(paste(primaryContigChr,
" plot of ",
length(contigChr),
" fragments (",
length(unique(contigOrderFrame$bin)),
" sub-linkage groups)\nSpearman correlation = ",
spearmanCor,
sep="")))
}
}
}
####################################################################################################
#' Plot ordering of contigs within a single linkage group.
#' @param contigOrder matrix from orderAllContigs with the subdivided linkage groups and the names of the contigs to plot
#' @param lg Integer specifying the linkage group by which to plot. Default is all
#' @param verbose prints messages to the terminal (default is TRUE)
#' @aliases plotContigOrder plotContigOrder,ContigOrdering-method
#' @rdname plotContigOrder
#' @import ggplot2
#' @example inst/examples/plotContigOrder.R
#' @return A ggplot object (which will be plotted automatically if not assigned).
#' @export
####################################################################################################
setMethod('plotContigOrder',
signature = signature(contigOrder='ContigOrdering'),
definition = plotContigOrder.func
)
| 3,738 | bsd-2-clause |
07bd15ba1428d40c5b475f388468267e91bf0b6c | mingkaijiang/quasi_equil_analytical | Plots/Figure5.R |
#### Functions to generate Figure 5
#### Purpose:
#### to draw barchart of wood, slow and passive SOM pools
#### and demonstrate the effect of wood stoichiometric flexibility
################################################################################
######### Main program
Figure_5_plotting <- function() {
myDF <- read.csv("Tables/Stoichiometric_flexibility_table.csv")
# transform the df
temDF <- matrix(ncol=4, nrow=18)
temDF <- as.data.frame(temDF)
colnames(temDF) <- c("Value", "Pool", "Element", "Model")
temDF$Pool <- rep(c("Passive", "Slow", "Wood"), each = 3)
temDF$Element <- rep(c("C", "N", "P"), 6)
temDF$Model <- rep(c("Variable", "Fixed"), each=9)
temDF$Value <- c(myDF[1,2:10], myDF[2,2:10])
temDF$Pool <- as.factor(temDF$Pool)
temDF$Element <- as.factor(temDF$Element)
temDF$Model <- as.factor(temDF$Model)
temDF$Value <- as.numeric(temDF$Value)
ylabel <- bquote(.("Stock size g") ~ m^-2)
temDF <- temDF[temDF$Element != "C", ]
temDF <- temDF[temDF$Element != "P", ]
require(ggplot2)
# making bar plots
tiff("Plots/Figure5.tiff",
width = 10, height = 5, units = "in", res = 300)
p1 <- ggplot(temDF, aes(x=Element, y=Value, fill=Model)) +
geom_bar(position='dodge', stat='identity') +
facet_wrap( ~ Pool, scales="free") +
theme(panel.background=element_blank(), axis.line = element_line(color="grey")) +
#theme_bw() +
labs(list(x = "Nutrient element", y = ylabel, fill = "Model"))
print(p1)
dev.off()
}
Figure_5_plotting()
| 1,642 | gpl-3.0 |
c0370bfa2a9389fa002791d82ff6aef28cf9ed5e | rho-devel/rho | src/extra/testr/filtered-test-suite/isnan/tc_isnan_8.R | expected <- eval(parse(text="c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE)"));
test(id=0, code={
argv <- eval(parse(text="list(c(-Inf, 2.17292368994844e-311, 4.34584737989688e-311, 8.69169475979376e-311, 1.73833895195875e-310, 3.4766779039175e-310, 6.953355807835e-310, 1.390671161567e-309, 2.781342323134e-309, 5.562684646268e-309, 1.1125369292536e-308, 2.2250738585072e-308, 4.4501477170144e-308, 8.90029543402881e-308, 1.78005908680576e-307, 2.2250738585072e-303, 2.2250738585072e-298, 1.79769313486232e+298, 1.79769313486232e+303, 2.24711641857789e+307, 4.49423283715579e+307, 8.98846567431158e+307, 1.79769313486232e+308, Inf, Inf, NaN, NA))"));
do.call(`is.nan`, argv);
}, o=expected);
| 852 | gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.