content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Choptank_eList has model results already, but the model
# hasn't been run since it was originally saved. This takes
# the .RData files available in this package and converts
# them into their original, pre-modeled form. This is
# especially necessary for testing any modeling function.
eList_Ch <- Choptank_eList
info_stale_Ch <- getInfo(eList_Ch)
daily_stale_Ch <- getDaily(eList_Ch)
sample_stale_Ch <- getSample(eList_Ch)
surfaces_stale_Ch <- getSurfaces(eList_Ch)
info_orig_Ch <- info_stale_Ch[, 1:(which(names(info_stale_Ch) == "bottomLogQ") - 1)]
daily_orig_Ch <- daily_stale_Ch[, 1:(which(names(daily_stale_Ch) == "Q30") - 1)]
sample_orig_Ch <- sample_stale_Ch[, 1:(which(names(sample_stale_Ch) == "yHat") - 1)]
surfaces_orig_Ch <- NA
eList_orig_Ch <- mergeReport(info_orig_Ch, daily_orig_Ch, sample_orig_Ch, surfaces_orig_Ch, verbose = FALSE)
# Arkansas_eList has model results already, but the model
# hasn't been run since it was originally saved. This takes
# the .RData files available in this package and converts
# them into their original, pre-modeled form. This is
# especially necessary for testing any modeling function.
eList_Ar <- Arkansas_eList
info_stale_Ar <- getInfo(eList_Ar)
daily_stale_Ar <- getDaily(eList_Ar)
sample_stale_Ar <- getSample(eList_Ar)
surfaces_stale_Ar <- getSurfaces(eList_Ar)
info_orig_Ar <- info_stale_Ar[, 1:(which(names(info_stale_Ar) == "bottomLogQ") - 1)]
daily_orig_Ar <- daily_stale_Ar[, 1:(which(names(daily_stale_Ar) == "Q30") - 1)]
sample_orig_Ar <- sample_stale_Ar[, 1:(which(names(sample_stale_Ar) == "yHat") - 1)]
surfaces_orig_Ar <- NA
eList_orig_Ar <- mergeReport(info_orig_Ar, daily_orig_Ar, sample_orig_Ar, surfaces_orig_Ar, verbose = FALSE)
| /tests/testthat/helper-originaldata.R | permissive | limnoliver/EGRET | R | false | false | 1,720 | r | # Choptank_eList has model results already, but the model
# hasn't been run since it was originally saved. This takes
# the .RData files available in this package and converts
# them into their original, pre-modeled form. This is
# especially necessary for testing any modeling function.
eList_Ch <- Choptank_eList
info_stale_Ch <- getInfo(eList_Ch)
daily_stale_Ch <- getDaily(eList_Ch)
sample_stale_Ch <- getSample(eList_Ch)
surfaces_stale_Ch <- getSurfaces(eList_Ch)
info_orig_Ch <- info_stale_Ch[, 1:(which(names(info_stale_Ch) == "bottomLogQ") - 1)]
daily_orig_Ch <- daily_stale_Ch[, 1:(which(names(daily_stale_Ch) == "Q30") - 1)]
sample_orig_Ch <- sample_stale_Ch[, 1:(which(names(sample_stale_Ch) == "yHat") - 1)]
surfaces_orig_Ch <- NA
eList_orig_Ch <- mergeReport(info_orig_Ch, daily_orig_Ch, sample_orig_Ch, surfaces_orig_Ch, verbose = FALSE)
# Arkansas_eList has model results already, but the model
# hasn't been run since it was originally saved. This takes
# the .RData files available in this package and converts
# them into their original, pre-modeled form. This is
# especially necessary for testing any modeling function.
eList_Ar <- Arkansas_eList
info_stale_Ar <- getInfo(eList_Ar)
daily_stale_Ar <- getDaily(eList_Ar)
sample_stale_Ar <- getSample(eList_Ar)
surfaces_stale_Ar <- getSurfaces(eList_Ar)
info_orig_Ar <- info_stale_Ar[, 1:(which(names(info_stale_Ar) == "bottomLogQ") - 1)]
daily_orig_Ar <- daily_stale_Ar[, 1:(which(names(daily_stale_Ar) == "Q30") - 1)]
sample_orig_Ar <- sample_stale_Ar[, 1:(which(names(sample_stale_Ar) == "yHat") - 1)]
surfaces_orig_Ar <- NA
eList_orig_Ar <- mergeReport(info_orig_Ar, daily_orig_Ar, sample_orig_Ar, surfaces_orig_Ar, verbose = FALSE)
|
# ------------------------------------------------------------------------------
# pslide_*()
test_that("pslide_*() works", {
expect_equivalent(pslide_vec(list(1L, 1L), ~.x + .y), 2L)
expect_equivalent(pslide_int(list(1L, 1L), ~.x + .y), 2L)
})
test_that("pslide_*() retains names of first input", {
expect_equivalent(pslide_vec(list(c(x = 1L), c(y = 1L)), ~.x + .y), c(x = 2L))
expect_equivalent(pslide_int(list(c(x = 1L), c(y = 1L)), ~.x + .y), c(x = 2L))
})
test_that("pslide_vec() can simplify automatically", {
expect_equivalent(pslide_vec(list(1, 2), ~.x + .y, .ptype = NULL), 3)
})
test_that("pslide_vec() errors if it can't simplify", {
fn <- function(x, y) if (x == 1L) {1} else {"hi"}
expect_error(
pslide_vec(list(1:2, 1:2), fn, .ptype = NULL),
class = "vctrs_error_incompatible_type"
)
})
test_that("pslide_*() errors if it can't cast", {
fn <- function(x, y) if (x == 1L) {1} else {"hi"}
expect_error(
pslide_int(list(1:2, 1:2), fn),
class = "vctrs_error_incompatible_type"
)
})
# ------------------------------------------------------------------------------
# suffix tests
test_that("pslide_int() works", {
expect_equivalent(pslide_int(list(1L, 1L), ~.x + .y), 2L)
})
test_that("pslide_int() can coerce", {
expect_equivalent(pslide_int(list(1, 1), ~.x + .y), 2L)
})
test_that("pslide_dbl() works", {
expect_equivalent(pslide_dbl(list(1, 1), ~.x), 1)
})
test_that("pslide_dbl() can coerce", {
expect_equivalent(pslide_dbl(list(1L, 1L), ~.x + .y), 2)
})
test_that("pslide_chr() works", {
expect_equivalent(pslide_chr(list("x", 1), ~.x), "x")
})
test_that("pslide_chr() cannot coerce", {
expect_error(pslide_chr(list(1, 1), ~.x + .y), class = "vctrs_error_incompatible_type")
})
test_that("pslide_lgl() works", {
expect_equivalent(pslide_lgl(list(TRUE, 1), ~.x), TRUE)
})
test_that("pslide_lgl() can coerce", {
expect_equivalent(pslide_lgl(list(1, 0), ~.x + .y), TRUE)
})
# ------------------------------------------------------------------------------
# data frame suffix tests
test_that("pslide_dfr() works", {
expect_equal(
pslide_dfr(list(1:2, 1:2), ~c(.x, .y), .before = 1),
data.frame(
...1 = c(1, 1),
...2 = c(1, 2),
...3 = c(NA, 1),
...4 = c(NA, 2)
)
)
x <- 1:2
expect_equal(
pslide_dfr(list(x, x), ~data.frame(x = .x, y = .y), .before = 1),
data.frame(x = c(1, 1, 2), y = c(1, 1, 2))
)
})
test_that("pslide_dfc() works", {
x <- 1:2
expect_equal(
pslide_dfc(list(x, x), ~data.frame(x = .x, y = .y), .before = 1),
data.frame(
x...1 = c(1, 1),
y...2 = c(1, 1),
x...3 = c(1, 2),
y...4 = c(1, 2)
)
)
})
# ------------------------------------------------------------------------------
# .ptype
test_that("`.ptype = NULL` is size stable (#78)", {
expect_length(pslide_vec(list(1:4, 1:4), ~.x, .step = 2), 4)
expect_length(pslide_vec(list(1:4, 1:4), ~1, .before = 1, .complete = TRUE), 4)
})
test_that("`pslide_vec()` falls back to `c()` method as required", {
local_c_foobar()
expect_identical(pslide_vec(list(1:3, 1:3), ~foobar(.x), .ptype = foobar(integer())), foobar(1:3))
expect_condition(pslide_vec(list(1:3, 1:3), ~foobar(.x), .ptype = foobar(integer())), class = "slider_c_foobar")
expect_identical(pslide_vec(list(1:3, 1:3), ~foobar(.x)), foobar(1:3))
expect_condition(pslide_vec(list(1:3, 1:3), ~foobar(.x)), class = "slider_c_foobar")
})
# ------------------------------------------------------------------------------
# .step
test_that(".step produces typed `NA` values", {
expect_identical(pslide_int(list(1:3, 1:3), ~.x, .step = 2), c(1L, NA, 3L))
expect_identical(pslide_dbl(list(1:3, 1:3), ~.x, .step = 2), c(1, NA, 3))
expect_identical(pslide_chr(list(c("a", "b", "c"), 1:3), ~.x, .step = 2), c("a", NA, "c"))
expect_identical(pslide_vec(list(1:3, 1:3), ~.x, .step = 2), c(1L, NA, 3L))
expect_identical(pslide_vec(list(1:3, 1:3), ~.x, .step = 2, .ptype = integer()), c(1L, NA, 3L))
})
# ------------------------------------------------------------------------------
# .complete
test_that(".complete produces typed `NA` values", {
expect_identical(pslide_int(list(1:3, 1:3), ~1L, .before = 1, .complete = TRUE), c(NA, 1L, 1L))
expect_identical(pslide_dbl(list(1:3, 1:3), ~1, .before = 1, .complete = TRUE), c(NA, 1, 1))
expect_identical(pslide_chr(list(1:3, 1:3), ~"1", .before = 1, .complete = TRUE), c(NA, "1", "1"))
expect_identical(pslide_vec(list(1:3, 1:3), ~1, .before = 1, .complete = TRUE), c(NA, 1, 1))
expect_identical(pslide_vec(list(1:3, 1:3), ~1, .before = 1, .complete = TRUE, .ptype = integer()), c(NA, 1L, 1L))
})
| /tests/testthat/test-pslide-vec.R | permissive | jimsforks/slider | R | false | false | 4,670 | r | # ------------------------------------------------------------------------------
# pslide_*()
test_that("pslide_*() works", {
expect_equivalent(pslide_vec(list(1L, 1L), ~.x + .y), 2L)
expect_equivalent(pslide_int(list(1L, 1L), ~.x + .y), 2L)
})
test_that("pslide_*() retains names of first input", {
expect_equivalent(pslide_vec(list(c(x = 1L), c(y = 1L)), ~.x + .y), c(x = 2L))
expect_equivalent(pslide_int(list(c(x = 1L), c(y = 1L)), ~.x + .y), c(x = 2L))
})
test_that("pslide_vec() can simplify automatically", {
expect_equivalent(pslide_vec(list(1, 2), ~.x + .y, .ptype = NULL), 3)
})
test_that("pslide_vec() errors if it can't simplify", {
fn <- function(x, y) if (x == 1L) {1} else {"hi"}
expect_error(
pslide_vec(list(1:2, 1:2), fn, .ptype = NULL),
class = "vctrs_error_incompatible_type"
)
})
test_that("pslide_*() errors if it can't cast", {
fn <- function(x, y) if (x == 1L) {1} else {"hi"}
expect_error(
pslide_int(list(1:2, 1:2), fn),
class = "vctrs_error_incompatible_type"
)
})
# ------------------------------------------------------------------------------
# suffix tests
test_that("pslide_int() works", {
expect_equivalent(pslide_int(list(1L, 1L), ~.x + .y), 2L)
})
test_that("pslide_int() can coerce", {
expect_equivalent(pslide_int(list(1, 1), ~.x + .y), 2L)
})
test_that("pslide_dbl() works", {
expect_equivalent(pslide_dbl(list(1, 1), ~.x), 1)
})
test_that("pslide_dbl() can coerce", {
expect_equivalent(pslide_dbl(list(1L, 1L), ~.x + .y), 2)
})
test_that("pslide_chr() works", {
expect_equivalent(pslide_chr(list("x", 1), ~.x), "x")
})
test_that("pslide_chr() cannot coerce", {
expect_error(pslide_chr(list(1, 1), ~.x + .y), class = "vctrs_error_incompatible_type")
})
test_that("pslide_lgl() works", {
expect_equivalent(pslide_lgl(list(TRUE, 1), ~.x), TRUE)
})
test_that("pslide_lgl() can coerce", {
expect_equivalent(pslide_lgl(list(1, 0), ~.x + .y), TRUE)
})
# ------------------------------------------------------------------------------
# data frame suffix tests
test_that("pslide_dfr() works", {
expect_equal(
pslide_dfr(list(1:2, 1:2), ~c(.x, .y), .before = 1),
data.frame(
...1 = c(1, 1),
...2 = c(1, 2),
...3 = c(NA, 1),
...4 = c(NA, 2)
)
)
x <- 1:2
expect_equal(
pslide_dfr(list(x, x), ~data.frame(x = .x, y = .y), .before = 1),
data.frame(x = c(1, 1, 2), y = c(1, 1, 2))
)
})
test_that("pslide_dfc() works", {
x <- 1:2
expect_equal(
pslide_dfc(list(x, x), ~data.frame(x = .x, y = .y), .before = 1),
data.frame(
x...1 = c(1, 1),
y...2 = c(1, 1),
x...3 = c(1, 2),
y...4 = c(1, 2)
)
)
})
# ------------------------------------------------------------------------------
# .ptype
test_that("`.ptype = NULL` is size stable (#78)", {
expect_length(pslide_vec(list(1:4, 1:4), ~.x, .step = 2), 4)
expect_length(pslide_vec(list(1:4, 1:4), ~1, .before = 1, .complete = TRUE), 4)
})
test_that("`pslide_vec()` falls back to `c()` method as required", {
local_c_foobar()
expect_identical(pslide_vec(list(1:3, 1:3), ~foobar(.x), .ptype = foobar(integer())), foobar(1:3))
expect_condition(pslide_vec(list(1:3, 1:3), ~foobar(.x), .ptype = foobar(integer())), class = "slider_c_foobar")
expect_identical(pslide_vec(list(1:3, 1:3), ~foobar(.x)), foobar(1:3))
expect_condition(pslide_vec(list(1:3, 1:3), ~foobar(.x)), class = "slider_c_foobar")
})
# ------------------------------------------------------------------------------
# .step
test_that(".step produces typed `NA` values", {
expect_identical(pslide_int(list(1:3, 1:3), ~.x, .step = 2), c(1L, NA, 3L))
expect_identical(pslide_dbl(list(1:3, 1:3), ~.x, .step = 2), c(1, NA, 3))
expect_identical(pslide_chr(list(c("a", "b", "c"), 1:3), ~.x, .step = 2), c("a", NA, "c"))
expect_identical(pslide_vec(list(1:3, 1:3), ~.x, .step = 2), c(1L, NA, 3L))
expect_identical(pslide_vec(list(1:3, 1:3), ~.x, .step = 2, .ptype = integer()), c(1L, NA, 3L))
})
# ------------------------------------------------------------------------------
# .complete
test_that(".complete produces typed `NA` values", {
expect_identical(pslide_int(list(1:3, 1:3), ~1L, .before = 1, .complete = TRUE), c(NA, 1L, 1L))
expect_identical(pslide_dbl(list(1:3, 1:3), ~1, .before = 1, .complete = TRUE), c(NA, 1, 1))
expect_identical(pslide_chr(list(1:3, 1:3), ~"1", .before = 1, .complete = TRUE), c(NA, "1", "1"))
expect_identical(pslide_vec(list(1:3, 1:3), ~1, .before = 1, .complete = TRUE), c(NA, 1, 1))
expect_identical(pslide_vec(list(1:3, 1:3), ~1, .before = 1, .complete = TRUE, .ptype = integer()), c(NA, 1L, 1L))
})
|
## This function creates a special "matrix" object that can cache its inverse.
## The function has 4 methods:
## set: sets the value of an identity matrix in the global environment - used to initialize global veraibles
## get: gets the value of an identity matrix from the global environment
## setCacheMatrix: set the value of a solved identity matrix in the global environment
## getCacheMatrix: get the value of a solved identity matrix from the global environment
## Sample usage:
## m <- makeCacheMatrix()
## m$set(matrix(c(0,2,2,0),2,2))
## m$get()
## m$setCacheMatrix(a)
## m$getCacheMatrix()
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ##initialize the value of 'm' within local scope
set <- function(y) {
x <<- y ##initialize the value of the global variable 'x'
m <<- NULL ##initialize the value of the global variable 'm'
}
get <- function() x ##gets the matrix from the global variable 'x'
setCacheMatrix <- function(solve) m <<- solve ##sets the value of the global variable 'm'
getCacheMatrix <- function() m ##gets the value of the global variable 'm'
list(set = set, get = get, ##lists methods for function
setCacheMatrix = setCacheMatrix,
getCacheMatrix = getCacheMatrix)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix().
##If the inverse has already been calculated (i.e. the cached value is not null),
## then cacheSolve will retrieve the inverse from the cache.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getCacheMatrix() ##get the cached value of the solved matrix
if(!is.null(m)) { ##if the cached value is not null, return the cached value
message("getting cached data")
return(m)
}
data <- x$get() ##if the cached value is null, get the original matrix
m <- solve(data, ...) ##solve the identity matrix
x$setCacheMatrix(m) ##call a function to set the cached value to the solved identity matrix
m ##print the value of 'm' to the screen
} | /cachematrix.R | no_license | ebrule/ProgrammingAssignment2 | R | false | false | 2,455 | r | ## This function creates a special "matrix" object that can cache its inverse.
## The function has 4 methods:
## set: sets the value of an identity matrix in the global environment - used to initialize global veraibles
## get: gets the value of an identity matrix from the global environment
## setCacheMatrix: set the value of a solved identity matrix in the global environment
## getCacheMatrix: get the value of a solved identity matrix from the global environment
## Sample usage:
## m <- makeCacheMatrix()
## m$set(matrix(c(0,2,2,0),2,2))
## m$get()
## m$setCacheMatrix(a)
## m$getCacheMatrix()
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ##initialize the value of 'm' within local scope
set <- function(y) {
x <<- y ##initialize the value of the global variable 'x'
m <<- NULL ##initialize the value of the global variable 'm'
}
get <- function() x ##gets the matrix from the global variable 'x'
setCacheMatrix <- function(solve) m <<- solve ##sets the value of the global variable 'm'
getCacheMatrix <- function() m ##gets the value of the global variable 'm'
list(set = set, get = get, ##lists methods for function
setCacheMatrix = setCacheMatrix,
getCacheMatrix = getCacheMatrix)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix().
##If the inverse has already been calculated (i.e. the cached value is not null),
## then cacheSolve will retrieve the inverse from the cache.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getCacheMatrix() ##get the cached value of the solved matrix
if(!is.null(m)) { ##if the cached value is not null, return the cached value
message("getting cached data")
return(m)
}
data <- x$get() ##if the cached value is null, get the original matrix
m <- solve(data, ...) ##solve the identity matrix
x$setCacheMatrix(m) ##call a function to set the cached value to the solved identity matrix
m ##print the value of 'm' to the screen
} |
library(pksensi)
### Name: install_mcsim
### Title: Install MCSim
### Aliases: install_mcsim
### ** Examples
## Not run: install_mcsim(version = 6.0.1, mxstep = 10000)
| /data/genthat_extracted_code/pksensi/examples/install_mcsim.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 176 | r | library(pksensi)
### Name: install_mcsim
### Title: Install MCSim
### Aliases: install_mcsim
### ** Examples
## Not run: install_mcsim(version = 6.0.1, mxstep = 10000)
|
#' Plot LCc Values
#'
#' @param LC (data.frame) plot LC values from getLCc
#'
#' @return a plot of LCc value time series
#'
#' @export
#'
plotLCc = function(LC = NULL) {
lc = as.matrix(LC[, !(names(LC) %in% names(col_lc))])
info = LC[, (names(LC) %in% names(col_lc))]
ncol = ceiling(nrow(lc) / 4)
nrow = ceiling(nrow(lc) / ncol)
par(mfrow = c(nrow, ncol), mar = c(2, 2, 2, 2))
for (i in 1:nrow(lc)) {
plot(
lc[i, ],
type = 'o',
pch = 16,
main = info$description[i],
col.main = 'red',
cex.main = .75,
axes = F
)
axis(1, at = c(1:12), labels = c(1:12))
axis(2,
las = 1,
at = seq(0, 1, .1),
labels = seq(0, 1, .1))
}
}
| /R/plotLCc.R | permissive | yangxhcaf/ET | R | false | false | 723 | r | #' Plot LCc Values
#'
#' @param LC (data.frame) plot LC values from getLCc
#'
#' @return a plot of LCc value time series
#'
#' @export
#'
plotLCc = function(LC = NULL) {
lc = as.matrix(LC[, !(names(LC) %in% names(col_lc))])
info = LC[, (names(LC) %in% names(col_lc))]
ncol = ceiling(nrow(lc) / 4)
nrow = ceiling(nrow(lc) / ncol)
par(mfrow = c(nrow, ncol), mar = c(2, 2, 2, 2))
for (i in 1:nrow(lc)) {
plot(
lc[i, ],
type = 'o',
pch = 16,
main = info$description[i],
col.main = 'red',
cex.main = .75,
axes = F
)
axis(1, at = c(1:12), labels = c(1:12))
axis(2,
las = 1,
at = seq(0, 1, .1),
labels = seq(0, 1, .1))
}
}
|
# Remove all objects - usually start with this
rm(list=ls())
# Strategy characteristics:
# Load in strategies.
Strategy = NULL
source('/Users/amyhurford/Desktop/BIOL-3295/Labs/Lab 6/AmyStrategy1.R')
Strategy = rbind(Strategy,c(Name=1, b=b, d=d))
source('/Users/amyhurford/Desktop/BIOL-3295/Labs/Lab 6/AmyStrategy2.R')
Strategy = rbind(Strategy, c(Name=2, b=b, d=d))
Strategy = as.data.frame(Strategy)
Total.Strategies = length(Strategy[,1])
total.time = 200
Number = matrix(0,total.time,Total.Strategies)
Number[1,1:2] = 1000/Total.Strategies
for(t in seq(1,total.time-1)){
# Choose an individual to die
dscore = Strategy$d*Number[t,]
cum.dscore = cumsum(dscore)
r1 = runif(1, 0, cum.dscore[Total.Strategies])
strategy.d = min(which(cum.dscore>r1))
Number[t+1,] = Number[t,]
Number[t+1,strategy.d] = Number[t,strategy.d]-1
# Choose an individual to give birth
bscore = Strategy$b*Number[t+1,]
cum.bscore = cumsum(bscore)
r1 = runif(1, 0, cum.bscore[Total.Strategies])
strategy.b = min(which(cum.bscore>r1))
Number[t+1,strategy.b] = Number[t+1,strategy.b]+1
}
plot(seq(1,total.time), Number[,1], typ="l", col = "red", ylim = c(min(Number),max(Number)), xlab = "time", ylab = "Popn Size", main = "Moran model - DB process")
lines(seq(1,total.time), Number[,2], typ="l", col = "blue") | /Labs/Lab 6/Evolutionary_Simulation.R | no_license | ashbrom35/BIOL-3295 | R | false | false | 1,284 | r | # Remove all objects - usually start with this
rm(list=ls())
# Strategy characteristics:
# Load in strategies.
Strategy = NULL
source('/Users/amyhurford/Desktop/BIOL-3295/Labs/Lab 6/AmyStrategy1.R')
Strategy = rbind(Strategy,c(Name=1, b=b, d=d))
source('/Users/amyhurford/Desktop/BIOL-3295/Labs/Lab 6/AmyStrategy2.R')
Strategy = rbind(Strategy, c(Name=2, b=b, d=d))
Strategy = as.data.frame(Strategy)
Total.Strategies = length(Strategy[,1])
total.time = 200
Number = matrix(0,total.time,Total.Strategies)
Number[1,1:2] = 1000/Total.Strategies
for(t in seq(1,total.time-1)){
# Choose an individual to die
dscore = Strategy$d*Number[t,]
cum.dscore = cumsum(dscore)
r1 = runif(1, 0, cum.dscore[Total.Strategies])
strategy.d = min(which(cum.dscore>r1))
Number[t+1,] = Number[t,]
Number[t+1,strategy.d] = Number[t,strategy.d]-1
# Choose an individual to give birth
bscore = Strategy$b*Number[t+1,]
cum.bscore = cumsum(bscore)
r1 = runif(1, 0, cum.bscore[Total.Strategies])
strategy.b = min(which(cum.bscore>r1))
Number[t+1,strategy.b] = Number[t+1,strategy.b]+1
}
plot(seq(1,total.time), Number[,1], typ="l", col = "red", ylim = c(min(Number),max(Number)), xlab = "time", ylab = "Popn Size", main = "Moran model - DB process")
lines(seq(1,total.time), Number[,2], typ="l", col = "blue") |
\name{response_trt_graph}
\alias{response_trt_graph}
\title{
Response by Treatment Graphs
}
\description{
Read in an xls file outputted by MTA for FieldPro and give a graph of response variable by treatment number. The type of graph returned can be selected by the user.
}
\usage{
response_trt_graph(filename, trialname, graphtype = "p", sheetnumber = 3, ...)
}
\arguments{
\item{filename}{
The XLS file from MTA that will be read in.
}
\item{trialname}{
The name of the trial that the graph will be made from
}
\item{graphtype}{
The type of plot that will be drawn:
"p" for a scatterplot with an R squared value,
"l" for a plot with just lines,
"b" for both lines and points,
"c" for lines with blank spaces where the points would be,
"o" for lines going through the points,
"h" for a histogram of the response variable,
"s" for a step plot,
"n" for no plot.
}
\item{sheetnumber}{
The sheet number for the EVALUATION MEANS data. The standard output from MTA is the third sheet.
}
\item{\dots}{
Arguments to be passed to methods in plot or hist functions, such as colors or size.
}
}
\value{
A graph of the selected type given by the user is displayed, but no value is returned. If no plot type is selected, a scatterplot will be returned.
}
\author{
Christopher Landau
}
\examples{
## Scatterplot of response variable by treatments
path1<-system.file("extdata", "YIELDALL_MTA_FLATFILE.xls", package = "fieldproanalysis")
response_trt_graph(path1,"US 101/10/01 002 01")
## Histogram of the response variables
response_trt_graph(path1,"US 101/10/01 002 01", graphtype = "h")
}
| /man/response_trt_graph.Rd | no_license | brentpm2/FIELD_PRO_ANALYSIS | R | false | false | 1,632 | rd | \name{response_trt_graph}
\alias{response_trt_graph}
\title{
Response by Treatment Graphs
}
\description{
Read in an xls file outputted by MTA for FieldPro and give a graph of response variable by treatment number. The type of graph returned can be selected by the user.
}
\usage{
response_trt_graph(filename, trialname, graphtype = "p", sheetnumber = 3, ...)
}
\arguments{
\item{filename}{
The XLS file from MTA that will be read in.
}
\item{trialname}{
The name of the trial that the graph will be made from
}
\item{graphtype}{
The type of plot that will be drawn:
"p" for a scatterplot with an R squared value,
"l" for a plot with just lines,
"b" for both lines and points,
"c" for lines with blank spaces where the points would be,
"o" for lines going through the points,
"h" for a histogram of the response variable,
"s" for a step plot,
"n" for no plot.
}
\item{sheetnumber}{
The sheet number for the EVALUATION MEANS data. The standard output from MTA is the third sheet.
}
\item{\dots}{
Arguments to be passed to methods in plot or hist functions, such as colors or size.
}
}
\value{
A graph of the selected type given by the user is displayed, but no value is returned. If no plot type is selected, a scatterplot will be returned.
}
\author{
Christopher Landau
}
\examples{
## Scatterplot of response variable by treatments
path1<-system.file("extdata", "YIELDALL_MTA_FLATFILE.xls", package = "fieldproanalysis")
response_trt_graph(path1,"US 101/10/01 002 01")
## Histogram of the response variables
response_trt_graph(path1,"US 101/10/01 002 01", graphtype = "h")
}
|
setwd("C:\\Users\\achatterjee\\Downloads")
dat<-read.csv('Prostate.csv')
#Split data into a training and validation set
tdat<-dat[dat$train==TRUE,]
vdat<-dat[dat$train==FALSE,]
#center and scale all predictors - necessary for shrinkage methods
tdats<-data.frame(
lcavol =(tdat$lcavol -mean(tdat$lcavol ))/sd(tdat$lcavol ),
lweight =(tdat$lweight -mean(tdat$lweight))/sd(tdat$lweight),
age =(tdat$age -mean(tdat$age ))/sd(tdat$age ),
lbph =(tdat$lbph -mean(tdat$lbph ))/sd(tdat$lbph ),
svi =(tdat$svi -mean(tdat$svi ))/sd(tdat$svi ),
lcp =(tdat$lcp -mean(tdat$lcp ))/sd(tdat$lcp ),
gleason =(tdat$gleason -mean(tdat$gleason))/sd(tdat$gleason),
pgg45 =(tdat$pgg45 -mean(tdat$pgg45 ))/sd(tdat$pgg45 )
)
lpsa =tdat$lpsa
#By default, refer to objects in tdat
attach(tdats)
#examine the correlation among the values in the data
pairs(tdats)
cor(tdats)
#1. Some basic regressions
### scatter plot
#relationship between lpsa (the outcome) and each predictor
par(mfrow=c(2,4))
plot(age,lpsa,pch=16,cex.lab=1.5)
agefit<-summary(lm(lpsa~age))
abline(agefit$coeff[1,1],agefit$coeff[2,1],col='red',lwd=3)
plot(gleason,lpsa,pch=16,cex.lab=1.5)
gleasonfit<-summary(lm(lpsa~gleason))
abline(gleasonfit$coeff[1,1],gleasonfit$coeff[2,1],col='red',lwd=3)
plot(lbph,lpsa,pch=16,cex.lab=1.5)
lbphfit<-summary(lm(lpsa~lbph))
abline(lbphfit$coeff[1,1],lbphfit$coeff[2,1],col='red',lwd=3)
plot(lcavol,lpsa,pch=16,cex.lab=1.5)
lcavolfit<-summary(lm(lpsa~lcavol))
abline(lcavolfit$coeff[1,1],lcavolfit$coeff[2,1],col='red',lwd=3)
plot(lweight,lpsa,pch=16,cex.lab=1.5)
lweightfit<-summary(lm(lpsa~lweight))
abline(lweightfit$coeff[1,1],lweightfit$coeff[2,1],col='red',lwd=3)
plot(svi,lpsa,pch=16,cex.lab=1.5)
svifit<-summary(lm(lpsa~svi))
abline(svifit$coeff[1,1],svifit$coeff[2,1],col='red',lwd=3)
plot(lcp,lpsa,pch=16,cex.lab=1.5)
lcpfit<-summary(lm(lpsa~lcp))
abline(lcpfit$coeff[1,1],lcpfit$coeff[2,1],col='red',lwd=3)
plot(pgg45,lpsa,pch=16,cex.lab=1.5)
pgg45fit<-summary(lm(lpsa~pgg45))
abline(pgg45fit$coeff[1,1],pgg45fit$coeff[2,1],col='red',lwd=3)
#Examine the ordinary least squares fit of the full model using multiple regression
model<-summary(lm(lpsa~.,data=tdats))
model
##### 2. implementing lasso ###############
#Load the lars packagem, which can fit the lasso
install.packages(lars)
library(lars)
#define a lasso-object
lasso<-lars(x=as.matrix(tdats),y=lpsa,type='lasso',trace=TRUE,normalize=TRUE,intercept=TRUE)
lasso
coef(lasso) #look at lasso coefficients at each step
#predict.lars(object=lasso,s=.375,mode='fraction',type='coefficients')
absum<-sum(abs(model$coeff[-1,1]))
#Build the lasso plot from the ground up
lassum<-apply(abs(coef(lasso)),1,sum) #Sum of absolute value of OLS coefficients
s<-lassum/absum
plot( s,coef(lasso)[,1],ylim=c(-.3,0.7),type='l',lwd=2,xlab='Shrinkage factor s',
main='Lasso path - coefficients as a function of shrinkage factor s',
xlim=c(0,1.2),axes=FALSE,ylab='Coefficient',cex.lab=1.5,cex.axis=1.4)
axis(1,at=seq(0,1,.2),cex.axis=1.4)
axis(2,at=seq(-.3,.7,.2),cex.axis=1.4)
lines(s,coef(lasso)[,2],lwd=2)
lines(s,coef(lasso)[,3],lwd=2)
lines(s,coef(lasso)[,4],lwd=2)
lines(s,coef(lasso)[,5],lwd=2)
lines(s,coef(lasso)[,6],lwd=2)
lines(s,coef(lasso)[,7],lwd=2)
lines(s,coef(lasso)[,8],lwd=2)
text(1.05,0.72,'lcavol')
text(1.03,0.34,'svi')
text(1.05,0.30,'lweight')
text(1.05,0.26,'ppg45')
text(1.04,0.20,'lbph')
text(1.06,-.02,'gleason')
text(1.03,-.15,'age')
text(1.03,-.29,'lcp')
abline(v=s,col='lightgray',lty=3)
####3. Cross validation and choosing s #####
##10 fold Cross validation to choose a value of s
set.seed(389173367) #sets 'starting point' for list of random numbers
#Genreate a vector of holdout labels
cvlab<-sample(1:10,67,replace=TRUE)
#How many of each label are there?
table(cvlab)
#Create a vector of candidate s values
#Try each s value on all cross validated sets
svec<-seq(0,1,.05)
J<-length(svec)
#Initialize a list to store lasso objects from k fold cross validation
lassolist<-list()
#Initialize a list to store predictions from each lasso set
pred<-list()
#Initialize a matrix to store MSE
#Rows correspond to the J values of s, columns correspond to the ten holdout sets
MSEstore<-matrix(NA,J,10)
#Use a for loop to get each lasso fit holding out the ith set
#Then predict the ith set using the holdout model
for(i in 1:10){
lassolist[[i]]<-lars(x=as.matrix(tdats)[cvlab!=i,],y=lpsa[cvlab!=i],type='lasso',trace=FALSE,normalize=TRUE,intercept=TRUE)
pred[[i]]<-predict.lars(object=lassolist[[i]],newx=tdats[cvlab==i,],s=svec,mode='fraction',type='fit')$fit
#Start a new loop to get MSE for each combination of ith holdout set and jth value of s
for(j in 1:J){
MSEstore[j,i]<-mean((pred[[i]][,j]-lpsa[cvlab==i])^2) #This computes MSE
}
}
#These apply statements compute mean and standard error of the observed MSEs at J values of s across the 10 holdout sets
meanMSE<-apply(MSEstore,1,mean)
stdMSE<-apply(MSEstore,1,sd)/sqrt(10)
plot(svec,meanMSE,ylim=c(0.5,1.75),pch=16,col=colors()[258],axes=FALSE,cex=1.2,
xlab='Shrinkage factor s',ylab='Mean square error',cex.lab=1.7,main='Average CV prediction error as a function of s')
axis(1,cex.axis=1.4,cex.axis=1.2)
axis(2,las=1,at=seq(0.5,1.75,.25),cex.axis=1.2)
lines(svec,meanMSE,lty=1,col=colors()[258])
for(i in 1:J)segments(svec[i],(meanMSE[i]-stdMSE[i]),svec[i],(meanMSE[i]+stdMSE[i]))
abline(h=(meanMSE+stdMSE)[18],lty=2)
points(svec[9],meanMSE[9],col='red',pch=15,cex=1.3)
legend(.35,1.5,legend=c('mean MSE','standard error (SE)','1 SE above lowest mean','chosen value of s'),
pch=c(16,NA,NA,15),col=c(colors()[258],1,1,'red'),cex=1.1,lty=c(1,1,2,NA))
#These are the coefficients for the chosen model
predict.lars(lasso,s=.4,mode='fraction',type='coefficients')
| /In class activities/lasso.R | no_license | mathmodeler2718/Regression | R | false | false | 5,993 | r | setwd("C:\\Users\\achatterjee\\Downloads")
dat<-read.csv('Prostate.csv')
#Split data into a training and validation set
tdat<-dat[dat$train==TRUE,]
vdat<-dat[dat$train==FALSE,]
#center and scale all predictors - necessary for shrinkage methods
tdats<-data.frame(
lcavol =(tdat$lcavol -mean(tdat$lcavol ))/sd(tdat$lcavol ),
lweight =(tdat$lweight -mean(tdat$lweight))/sd(tdat$lweight),
age =(tdat$age -mean(tdat$age ))/sd(tdat$age ),
lbph =(tdat$lbph -mean(tdat$lbph ))/sd(tdat$lbph ),
svi =(tdat$svi -mean(tdat$svi ))/sd(tdat$svi ),
lcp =(tdat$lcp -mean(tdat$lcp ))/sd(tdat$lcp ),
gleason =(tdat$gleason -mean(tdat$gleason))/sd(tdat$gleason),
pgg45 =(tdat$pgg45 -mean(tdat$pgg45 ))/sd(tdat$pgg45 )
)
lpsa =tdat$lpsa
#By default, refer to objects in tdat
attach(tdats)
#examine the correlation among the values in the data
pairs(tdats)
cor(tdats)
#1. Some basic regressions
### scatter plot
#relationship between lpsa (the outcome) and each predictor
par(mfrow=c(2,4))
plot(age,lpsa,pch=16,cex.lab=1.5)
agefit<-summary(lm(lpsa~age))
abline(agefit$coeff[1,1],agefit$coeff[2,1],col='red',lwd=3)
plot(gleason,lpsa,pch=16,cex.lab=1.5)
gleasonfit<-summary(lm(lpsa~gleason))
abline(gleasonfit$coeff[1,1],gleasonfit$coeff[2,1],col='red',lwd=3)
plot(lbph,lpsa,pch=16,cex.lab=1.5)
lbphfit<-summary(lm(lpsa~lbph))
abline(lbphfit$coeff[1,1],lbphfit$coeff[2,1],col='red',lwd=3)
plot(lcavol,lpsa,pch=16,cex.lab=1.5)
lcavolfit<-summary(lm(lpsa~lcavol))
abline(lcavolfit$coeff[1,1],lcavolfit$coeff[2,1],col='red',lwd=3)
plot(lweight,lpsa,pch=16,cex.lab=1.5)
lweightfit<-summary(lm(lpsa~lweight))
abline(lweightfit$coeff[1,1],lweightfit$coeff[2,1],col='red',lwd=3)
plot(svi,lpsa,pch=16,cex.lab=1.5)
svifit<-summary(lm(lpsa~svi))
abline(svifit$coeff[1,1],svifit$coeff[2,1],col='red',lwd=3)
plot(lcp,lpsa,pch=16,cex.lab=1.5)
lcpfit<-summary(lm(lpsa~lcp))
abline(lcpfit$coeff[1,1],lcpfit$coeff[2,1],col='red',lwd=3)
plot(pgg45,lpsa,pch=16,cex.lab=1.5)
pgg45fit<-summary(lm(lpsa~pgg45))
abline(pgg45fit$coeff[1,1],pgg45fit$coeff[2,1],col='red',lwd=3)
#Examine the ordinary least squares fit of the full model using multiple regression
model<-summary(lm(lpsa~.,data=tdats))
model
##### 2. implementing lasso ###############
#Load the lars packagem, which can fit the lasso
install.packages(lars)
library(lars)
#define a lasso-object
lasso<-lars(x=as.matrix(tdats),y=lpsa,type='lasso',trace=TRUE,normalize=TRUE,intercept=TRUE)
lasso
coef(lasso) #look at lasso coefficients at each step
#predict.lars(object=lasso,s=.375,mode='fraction',type='coefficients')
absum<-sum(abs(model$coeff[-1,1]))
#Build the lasso plot from the ground up
lassum<-apply(abs(coef(lasso)),1,sum) #Sum of absolute value of OLS coefficients
s<-lassum/absum
plot( s,coef(lasso)[,1],ylim=c(-.3,0.7),type='l',lwd=2,xlab='Shrinkage factor s',
main='Lasso path - coefficients as a function of shrinkage factor s',
xlim=c(0,1.2),axes=FALSE,ylab='Coefficient',cex.lab=1.5,cex.axis=1.4)
axis(1,at=seq(0,1,.2),cex.axis=1.4)
axis(2,at=seq(-.3,.7,.2),cex.axis=1.4)
lines(s,coef(lasso)[,2],lwd=2)
lines(s,coef(lasso)[,3],lwd=2)
lines(s,coef(lasso)[,4],lwd=2)
lines(s,coef(lasso)[,5],lwd=2)
lines(s,coef(lasso)[,6],lwd=2)
lines(s,coef(lasso)[,7],lwd=2)
lines(s,coef(lasso)[,8],lwd=2)
text(1.05,0.72,'lcavol')
text(1.03,0.34,'svi')
text(1.05,0.30,'lweight')
text(1.05,0.26,'ppg45')
text(1.04,0.20,'lbph')
text(1.06,-.02,'gleason')
text(1.03,-.15,'age')
text(1.03,-.29,'lcp')
abline(v=s,col='lightgray',lty=3)
####3. Cross validation and choosing s #####
##10 fold Cross validation to choose a value of s
set.seed(389173367) #sets 'starting point' for list of random numbers
#Genreate a vector of holdout labels
cvlab<-sample(1:10,67,replace=TRUE)
#How many of each label are there?
table(cvlab)
#Create a vector of candidate s values
#Try each s value on all cross validated sets
svec<-seq(0,1,.05)
J<-length(svec)
#Initialize a list to store lasso objects from k fold cross validation
lassolist<-list()
#Initialize a list to store predictions from each lasso set
pred<-list()
#Initialize a matrix to store MSE
#Rows correspond to the J values of s, columns correspond to the ten holdout sets
MSEstore<-matrix(NA,J,10)
#Use a for loop to get each lasso fit holding out the ith set
#Then predict the ith set using the holdout model
for(i in 1:10){
lassolist[[i]]<-lars(x=as.matrix(tdats)[cvlab!=i,],y=lpsa[cvlab!=i],type='lasso',trace=FALSE,normalize=TRUE,intercept=TRUE)
pred[[i]]<-predict.lars(object=lassolist[[i]],newx=tdats[cvlab==i,],s=svec,mode='fraction',type='fit')$fit
#Start a new loop to get MSE for each combination of ith holdout set and jth value of s
for(j in 1:J){
MSEstore[j,i]<-mean((pred[[i]][,j]-lpsa[cvlab==i])^2) #This computes MSE
}
}
#These apply statements compute mean and standard error of the observed MSEs at J values of s across the 10 holdout sets
meanMSE<-apply(MSEstore,1,mean)
stdMSE<-apply(MSEstore,1,sd)/sqrt(10)
plot(svec,meanMSE,ylim=c(0.5,1.75),pch=16,col=colors()[258],axes=FALSE,cex=1.2,
xlab='Shrinkage factor s',ylab='Mean square error',cex.lab=1.7,main='Average CV prediction error as a function of s')
axis(1,cex.axis=1.4,cex.axis=1.2)
axis(2,las=1,at=seq(0.5,1.75,.25),cex.axis=1.2)
lines(svec,meanMSE,lty=1,col=colors()[258])
for(i in 1:J)segments(svec[i],(meanMSE[i]-stdMSE[i]),svec[i],(meanMSE[i]+stdMSE[i]))
abline(h=(meanMSE+stdMSE)[18],lty=2)
points(svec[9],meanMSE[9],col='red',pch=15,cex=1.3)
legend(.35,1.5,legend=c('mean MSE','standard error (SE)','1 SE above lowest mean','chosen value of s'),
pch=c(16,NA,NA,15),col=c(colors()[258],1,1,'red'),cex=1.1,lty=c(1,1,2,NA))
#These are the coefficients for the chosen model
predict.lars(lasso,s=.4,mode='fraction',type='coefficients')
|
rm(list = ls())
library(tidyverse)
library(ggtext)
library(lemon)
source("helper_functions.R")
source("theme/theme_swd.R")
theme_set(theme_swd() + theme(axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
plot.margin = unit(c(1,1,1,4),"cm"),
plot.subtitle = element_markdown(),
strip.text.y.left = element_text(angle = 0,hjust=1,color = GRAY2, size = 10),
legend.position = c(-.35,.9),
legend.key.size = unit(1,"lines"),
legend.title = element_blank(),
legend.text = element_markdown(size = 9,color = GRAY6),
legend.spacing = unit(1, "cm"),
plot.title.position = "panel",
plot.title = element_text(hjust = -.82, margin = margin(b = .5, unit = "cm"), size = 14, color = GRAY2)
))
df <- read_csv(file.path("data","FIG0315-16.csv")) %>%
select(-Rank) %>% pivot_longer(cols = !Category, names_to = "Business", values_to = "Result") %>%
# Reorder factors to match the original plot
mutate(Category = forcats::fct_relevel(Category,"Price","Convenience", "Relationship", "Service", "Selection")) %>%
mutate(Business = forcats::fct_rev(forcats::fct_relevel(Business, "Our business", "Competitor A", "Competitor B", "Competitor C", "Competitor D", "Competitor E"))) %>%
# Calculate ranks
group_by(Category) %>% mutate(rank = paste((7 - rank(Result)),"of 6")) %>% mutate(rank = if_else(Business == "Our business",rank,""))
pt <- ggplot(df) + geom_col(aes(x = Business, y = Result, fill = Business), width = 1) +
scale_fill_manual(values = c(GRAY9,GRAY9,GRAY9,GRAY9,GRAY9, BLUE2), labels = c("Competitor E","Competitor D","Competitor C","Competitor B","Competitor A",sprintf("<span style='color:%s'>**Our business**</span>",BLUE2)),
guide = guide_legend(reverse = T)) +
facet_grid(rows=vars(Category),switch = "y") +
geom_text(aes(x = Business, y = Result, label = rank),nudge_y = .15,nudge_x = .2,color = BLUE2,size = 3) +
coord_flip(clip = "off") +
labs(title = "Performance overview", subtitle = sprintf("Weighted performance index | <span style='color:%s'>relative rank</span>",BLUE2))
pt %>% save_and_show_plot(width = 7.5, height = 5, "FIG03016.png")
| /FIG0316.R | no_license | wal/storytelling-with-data-ggplot | R | false | false | 2,716 | r | rm(list = ls())
library(tidyverse)
library(ggtext)
library(lemon)
source("helper_functions.R")
source("theme/theme_swd.R")
theme_set(theme_swd() + theme(axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
plot.margin = unit(c(1,1,1,4),"cm"),
plot.subtitle = element_markdown(),
strip.text.y.left = element_text(angle = 0,hjust=1,color = GRAY2, size = 10),
legend.position = c(-.35,.9),
legend.key.size = unit(1,"lines"),
legend.title = element_blank(),
legend.text = element_markdown(size = 9,color = GRAY6),
legend.spacing = unit(1, "cm"),
plot.title.position = "panel",
plot.title = element_text(hjust = -.82, margin = margin(b = .5, unit = "cm"), size = 14, color = GRAY2)
))
df <- read_csv(file.path("data","FIG0315-16.csv")) %>%
select(-Rank) %>% pivot_longer(cols = !Category, names_to = "Business", values_to = "Result") %>%
# Reorder factors to match the original plot
mutate(Category = forcats::fct_relevel(Category,"Price","Convenience", "Relationship", "Service", "Selection")) %>%
mutate(Business = forcats::fct_rev(forcats::fct_relevel(Business, "Our business", "Competitor A", "Competitor B", "Competitor C", "Competitor D", "Competitor E"))) %>%
# Calculate ranks
group_by(Category) %>% mutate(rank = paste((7 - rank(Result)),"of 6")) %>% mutate(rank = if_else(Business == "Our business",rank,""))
pt <- ggplot(df) + geom_col(aes(x = Business, y = Result, fill = Business), width = 1) +
scale_fill_manual(values = c(GRAY9,GRAY9,GRAY9,GRAY9,GRAY9, BLUE2), labels = c("Competitor E","Competitor D","Competitor C","Competitor B","Competitor A",sprintf("<span style='color:%s'>**Our business**</span>",BLUE2)),
guide = guide_legend(reverse = T)) +
facet_grid(rows=vars(Category),switch = "y") +
geom_text(aes(x = Business, y = Result, label = rank),nudge_y = .15,nudge_x = .2,color = BLUE2,size = 3) +
coord_flip(clip = "off") +
labs(title = "Performance overview", subtitle = sprintf("Weighted performance index | <span style='color:%s'>relative rank</span>",BLUE2))
pt %>% save_and_show_plot(width = 7.5, height = 5, "FIG03016.png")
|
library(curl)
library(plyr)
library(readr)
library(dplyr)
library(tidyr)
library(readxl)
WDI_bulk_url <- "http://databank.worldbank.org/data/download/WDI_csv.zip"
WDI_local_filename <- "WDI_csv.zip"
WDI_last_modified <- "WDI_csv.lastmodified"
WDI_local_path <- "WDI_csv"
download_if_modified <- function() {
h <- new_handle()
if (file.exists(WDI_last_modified)) {
last_modified <- read_file(WDI_last_modified)
handle_setheaders(h, .list = list("If-Modified-Since" = last_modified))
}
tryCatch({
message("Downloading WDI bulk file if changed...")
curl_download(WDI_bulk_url, WDI_local_filename, quiet = T, handle = h)
unzip(WDI_local_filename, exdir = WDI_local_path)
message("WDI bulk file has been updated")
last_modified = handle_data(h)$modified
last_modified = strftime(
last_modified,
format = "%a, %d %b %Y %H:%M:%S",
tz = "GMT", usetz=T
)
write_file(last_modified, WDI_last_modified)
}, error = function(e) {
if (handle_data(h)$status_code == 304) {
message("WDI bulk file has not been updated, using cached copy.")
}
}
)
h
}
h <- download_if_modified()
wdi_data <- read_csv(file.path(WDI_local_path, "WDIData.csv"))
wdi_series <- read_csv(file.path(WDI_local_path, "WDISeries.csv"))
wdi_countries <- read_csv(file.path(WDI_local_path, "WDICountry.csv"))
wdi_data <- wdi_data %>%
select(-starts_with("X")) %>% # Remove column read_csv created from trailing , in file
select(-`Indicator Name`, -`Country Name`)
wdi_long <- wdi_data %>% gather("year", "value", `1960`:`2016`, convert=T)
summ_country_indicator <- wdi_long %>%
group_by(`Indicator Code`, `Country Code`) %>%
summarise(
value.count = sum(!is.na(value)),
latest.year = max(year[!is.na(value)]),
latest.year.finite = ifelse(is.finite(latest.year), latest.year, NA)
)
summ_indicator <- summ_country_indicator %>%
group_by(`Indicator Code`) %>%
summarise(
years.min = min(value.count),
years.max = max(value.count),
years.p10 = quantile(value.count, 0.10),
years.p25 = quantile(value.count, 0.25),
years.median = quantile(value.count, 0.50),
years.p75 = quantile(value.count, 0.75),
countries.maxyears = sum(value.count == max(value.count)),
latest.year.min = min(latest.year),
latest.year.max = max(latest.year),
latest.year.p10 = quantile(latest.year, 0.10),
latest.year.p25 = quantile(latest.year, 0.25),
latest.year.median = quantile(latest.year, 0.50),
latest.year.p75 = quantile(latest.year, 0.75),
latest.year.noninf.p10 = quantile(latest.year.finite, 0.10, na.rm=T),
latest.year.noninf.p25 = quantile(latest.year.finite, 0.25, na.rm=T),
latest.year.noninf.median = quantile(latest.year.finite, 0.50, na.rm=T),
latest.year.noninf.p75 = quantile(latest.year.finite, 0.75, na.rm=T),
)
wdi_series_codes <- wdi_series %>%
select(`Series Code`) %>%
mutate(
code_parts = strsplit(`Series Code`, ".", fixed=T),
code_part_1 = sapply(code_parts, first),
code_part_2 = sapply(code_parts, function(x) x[2])
)
# Treemap ####
library(ggplot2)
library(treemapify)
ggplot(wdi_series_codes, aes(area = 1, fill = code_part_1, subgroup = code_part_1)) +
geom_treemap(color="white") +
geom_treemap_subgroup_text(place = "centre", colour = "white", size = 16, min.size = 14, fontface="bold") +
theme(legend.position = "none")
# Time & Region coverage ####
country_count <- wdi_long %>% pull(`Country Code`) %>% unique %>% length
indicator_decade <- wdi_long %>%
# left_join(wdi_countries %>% select(`Country Code`, Region)) %>%
# filter(!is.na(Region)) %>%
mutate(decade = floor(year/10)*10) %>%
group_by(decade) %>%
group_by(`Indicator Code`, add = TRUE) %>%
filter(!is.na(value)) %>%
summarise(countries = length(unique(`Country Code`))) %>%
ungroup %>%
complete(decade, `Indicator Code`, fill = list(countries = 0)) %>%
spread(decade, countries)
regions <- list(
"LCN" = "Latin America & Caribbean",
"SAS" = "South Asia",
"SSF" = "Sub-Saharan Africa",
"ECS" = "Europe & Central Asia",
"MEA" = "Middle East & North Africa",
"EAS" = "East Asia & Pacific",
"NAC" = "North America"
)
indicator_region <- wdi_long %>%
filter(year >= 2010) %>%
left_join(wdi_countries %>% select(`Country Code`, Region)) %>%
filter(!is.na(Region)) %>%
group_by(Region) %>%
group_by(`Indicator Code`, add = TRUE) %>%
filter(!is.na(value)) %>%
summarise(countries = length(unique(`Country Code`))) %>%
ungroup %>%
complete(Region, `Indicator Code`, fill = list(countries = 0)) %>%
spread(Region, countries)
names(indicator_region)[match(regions, names(indicator_region))] <- names(regions)
region_country_count <- indicator_region %>%
select(-`Indicator Code`) %>%
summarise_all(max)
indicator_meta <- indicator_decade %>%
full_join(indicator_region) %>%
mutate_all(funs(coalesce(., 0))) %>%
left_join(wdi_series %>% select(`Series Code`, `Indicator Name`), by = c("Indicator Code" = "Series Code"))
library(htmltools)
make_decade <- function(decade, countries, country_count) {
prop <- countries / country_count
src <- paste0(
"images/",
if (prop == 0.0) "zero" else if (prop < 0.2) "low" else if (prop > 0.8) "high" else "medium",
"_time.png"
)
tags$img(src = src, title = paste0(decade, "s: ", countries, " countries available"), width=16, height=16)
}
make_region <- function(region_code, region, countries, region_country_count) {
prop <- countries / region_country_count
src <- paste0(
"images/",
if (prop == 0.0) "zero" else if (prop < 0.2) "low" else if (prop > 0.8) "high" else "medium",
"_",
region_code,
".png"
)
tags$img(src = src, title = paste0(region, ": ", countries, " countries available (out of ", region_country_count, ")"), width=16, height=16)
}
make_indicator <- function(df) {
tags$tr(
tags$td(df$`Indicator Name`),
tags$td(tags$a(href=paste0("//data.worldbank.org/indicator/", df$`Indicator Code`), df$`Indicator Code`)),
tags$td(
make_decade(1960, df$`1960`, country_count),
make_decade(1970, df$`1970`, country_count),
make_decade(1980, df$`1980`, country_count),
make_decade(1990, df$`1990`, country_count),
make_decade(2000, df$`2000`, country_count),
make_decade(2010, df$`2010`, country_count)
),
tags$td(
lapply(names(regions), function(code) { make_region(code, regions[[code]], df[[code]], region_country_count[[code]]) })
)
)
}
make_indicator_group <- function(df) {
group <- tags$tr(tags$th(colspan = 4, df$`Indicator Group`[1]), class="group")
head <- tags$tr(tags$th("Indicator"),tags$th("Code"),tags$th("Time coverage"),tags$th("Region coverage"), class="headers")
rows <- alply(df, 1, make_indicator)
c(list(group), list(head), rows)
}
make_indicators_table <- function(meta) {
groups <- dlply(meta, .(`Indicator Group`), make_indicator_group)
tags$table(unlist(groups, recursive = FALSE), class="indicators")
}
bytopic <- indicator_meta %>%
left_join(wdi_series %>% select(`Series Code`, `Topic`), by = c("Indicator Code"="Series Code")) %>%
rename(`Indicator Group` = Topic) %>%
arrange(`Indicator Group`, `Indicator Name`)
ind_table <- make_indicators_table(bytopic)
page <- tags$html(
tags$link(rel="stylesheet", type="text/css", href="theme.css"),
tags$body(ind_table)
)
write_file(as.character(page), "docs/index.html")
poverty <- read_xlsx("poverty_indicators.xlsx") %>% select(`Indicator Group`,`Indicator Code`)
poverty <- poverty %>%
left_join(indicator_meta) %>%
mutate_all(function(x) ifelse(is.na(x), 0, x)) %>%
mutate(`Indicator Group` = factor(`Indicator Group`, unique(`Indicator Group`)))
ind_table <- make_indicators_table(poverty)
page <- tags$html(
tags$link(rel="stylesheet", type="text/css", href="theme.css"),
tags$body(ind_table)
)
write_file(as.character(page), "test.html")
| /WDI_coverage.R | no_license | econandrew/WDI_coverage | R | false | false | 7,938 | r | library(curl)
library(plyr)
library(readr)
library(dplyr)
library(tidyr)
library(readxl)
WDI_bulk_url <- "http://databank.worldbank.org/data/download/WDI_csv.zip"
WDI_local_filename <- "WDI_csv.zip"
WDI_last_modified <- "WDI_csv.lastmodified"
WDI_local_path <- "WDI_csv"
download_if_modified <- function() {
h <- new_handle()
if (file.exists(WDI_last_modified)) {
last_modified <- read_file(WDI_last_modified)
handle_setheaders(h, .list = list("If-Modified-Since" = last_modified))
}
tryCatch({
message("Downloading WDI bulk file if changed...")
curl_download(WDI_bulk_url, WDI_local_filename, quiet = T, handle = h)
unzip(WDI_local_filename, exdir = WDI_local_path)
message("WDI bulk file has been updated")
last_modified = handle_data(h)$modified
last_modified = strftime(
last_modified,
format = "%a, %d %b %Y %H:%M:%S",
tz = "GMT", usetz=T
)
write_file(last_modified, WDI_last_modified)
}, error = function(e) {
if (handle_data(h)$status_code == 304) {
message("WDI bulk file has not been updated, using cached copy.")
}
}
)
h
}
h <- download_if_modified()
wdi_data <- read_csv(file.path(WDI_local_path, "WDIData.csv"))
wdi_series <- read_csv(file.path(WDI_local_path, "WDISeries.csv"))
wdi_countries <- read_csv(file.path(WDI_local_path, "WDICountry.csv"))
wdi_data <- wdi_data %>%
select(-starts_with("X")) %>% # Remove column read_csv created from trailing , in file
select(-`Indicator Name`, -`Country Name`)
wdi_long <- wdi_data %>% gather("year", "value", `1960`:`2016`, convert=T)
summ_country_indicator <- wdi_long %>%
group_by(`Indicator Code`, `Country Code`) %>%
summarise(
value.count = sum(!is.na(value)),
latest.year = max(year[!is.na(value)]),
latest.year.finite = ifelse(is.finite(latest.year), latest.year, NA)
)
summ_indicator <- summ_country_indicator %>%
group_by(`Indicator Code`) %>%
summarise(
years.min = min(value.count),
years.max = max(value.count),
years.p10 = quantile(value.count, 0.10),
years.p25 = quantile(value.count, 0.25),
years.median = quantile(value.count, 0.50),
years.p75 = quantile(value.count, 0.75),
countries.maxyears = sum(value.count == max(value.count)),
latest.year.min = min(latest.year),
latest.year.max = max(latest.year),
latest.year.p10 = quantile(latest.year, 0.10),
latest.year.p25 = quantile(latest.year, 0.25),
latest.year.median = quantile(latest.year, 0.50),
latest.year.p75 = quantile(latest.year, 0.75),
latest.year.noninf.p10 = quantile(latest.year.finite, 0.10, na.rm=T),
latest.year.noninf.p25 = quantile(latest.year.finite, 0.25, na.rm=T),
latest.year.noninf.median = quantile(latest.year.finite, 0.50, na.rm=T),
latest.year.noninf.p75 = quantile(latest.year.finite, 0.75, na.rm=T),
)
wdi_series_codes <- wdi_series %>%
select(`Series Code`) %>%
mutate(
code_parts = strsplit(`Series Code`, ".", fixed=T),
code_part_1 = sapply(code_parts, first),
code_part_2 = sapply(code_parts, function(x) x[2])
)
# Treemap ####
library(ggplot2)
library(treemapify)
ggplot(wdi_series_codes, aes(area = 1, fill = code_part_1, subgroup = code_part_1)) +
geom_treemap(color="white") +
geom_treemap_subgroup_text(place = "centre", colour = "white", size = 16, min.size = 14, fontface="bold") +
theme(legend.position = "none")
# Time & Region coverage ####
country_count <- wdi_long %>% pull(`Country Code`) %>% unique %>% length
indicator_decade <- wdi_long %>%
# left_join(wdi_countries %>% select(`Country Code`, Region)) %>%
# filter(!is.na(Region)) %>%
mutate(decade = floor(year/10)*10) %>%
group_by(decade) %>%
group_by(`Indicator Code`, add = TRUE) %>%
filter(!is.na(value)) %>%
summarise(countries = length(unique(`Country Code`))) %>%
ungroup %>%
complete(decade, `Indicator Code`, fill = list(countries = 0)) %>%
spread(decade, countries)
regions <- list(
"LCN" = "Latin America & Caribbean",
"SAS" = "South Asia",
"SSF" = "Sub-Saharan Africa",
"ECS" = "Europe & Central Asia",
"MEA" = "Middle East & North Africa",
"EAS" = "East Asia & Pacific",
"NAC" = "North America"
)
indicator_region <- wdi_long %>%
filter(year >= 2010) %>%
left_join(wdi_countries %>% select(`Country Code`, Region)) %>%
filter(!is.na(Region)) %>%
group_by(Region) %>%
group_by(`Indicator Code`, add = TRUE) %>%
filter(!is.na(value)) %>%
summarise(countries = length(unique(`Country Code`))) %>%
ungroup %>%
complete(Region, `Indicator Code`, fill = list(countries = 0)) %>%
spread(Region, countries)
names(indicator_region)[match(regions, names(indicator_region))] <- names(regions)
region_country_count <- indicator_region %>%
select(-`Indicator Code`) %>%
summarise_all(max)
indicator_meta <- indicator_decade %>%
full_join(indicator_region) %>%
mutate_all(funs(coalesce(., 0))) %>%
left_join(wdi_series %>% select(`Series Code`, `Indicator Name`), by = c("Indicator Code" = "Series Code"))
library(htmltools)
make_decade <- function(decade, countries, country_count) {
prop <- countries / country_count
src <- paste0(
"images/",
if (prop == 0.0) "zero" else if (prop < 0.2) "low" else if (prop > 0.8) "high" else "medium",
"_time.png"
)
tags$img(src = src, title = paste0(decade, "s: ", countries, " countries available"), width=16, height=16)
}
make_region <- function(region_code, region, countries, region_country_count) {
prop <- countries / region_country_count
src <- paste0(
"images/",
if (prop == 0.0) "zero" else if (prop < 0.2) "low" else if (prop > 0.8) "high" else "medium",
"_",
region_code,
".png"
)
tags$img(src = src, title = paste0(region, ": ", countries, " countries available (out of ", region_country_count, ")"), width=16, height=16)
}
make_indicator <- function(df) {
tags$tr(
tags$td(df$`Indicator Name`),
tags$td(tags$a(href=paste0("//data.worldbank.org/indicator/", df$`Indicator Code`), df$`Indicator Code`)),
tags$td(
make_decade(1960, df$`1960`, country_count),
make_decade(1970, df$`1970`, country_count),
make_decade(1980, df$`1980`, country_count),
make_decade(1990, df$`1990`, country_count),
make_decade(2000, df$`2000`, country_count),
make_decade(2010, df$`2010`, country_count)
),
tags$td(
lapply(names(regions), function(code) { make_region(code, regions[[code]], df[[code]], region_country_count[[code]]) })
)
)
}
make_indicator_group <- function(df) {
group <- tags$tr(tags$th(colspan = 4, df$`Indicator Group`[1]), class="group")
head <- tags$tr(tags$th("Indicator"),tags$th("Code"),tags$th("Time coverage"),tags$th("Region coverage"), class="headers")
rows <- alply(df, 1, make_indicator)
c(list(group), list(head), rows)
}
make_indicators_table <- function(meta) {
groups <- dlply(meta, .(`Indicator Group`), make_indicator_group)
tags$table(unlist(groups, recursive = FALSE), class="indicators")
}
bytopic <- indicator_meta %>%
left_join(wdi_series %>% select(`Series Code`, `Topic`), by = c("Indicator Code"="Series Code")) %>%
rename(`Indicator Group` = Topic) %>%
arrange(`Indicator Group`, `Indicator Name`)
ind_table <- make_indicators_table(bytopic)
page <- tags$html(
tags$link(rel="stylesheet", type="text/css", href="theme.css"),
tags$body(ind_table)
)
write_file(as.character(page), "docs/index.html")
poverty <- read_xlsx("poverty_indicators.xlsx") %>% select(`Indicator Group`,`Indicator Code`)
poverty <- poverty %>%
left_join(indicator_meta) %>%
mutate_all(function(x) ifelse(is.na(x), 0, x)) %>%
mutate(`Indicator Group` = factor(`Indicator Group`, unique(`Indicator Group`)))
ind_table <- make_indicators_table(poverty)
page <- tags$html(
tags$link(rel="stylesheet", type="text/css", href="theme.css"),
tags$body(ind_table)
)
write_file(as.character(page), "test.html")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SmoothedPG.R
\name{getCoherencySdNaive-SmoothedPG}
\alias{getCoherencySdNaive-SmoothedPG}
\alias{getCoherencySdNaive,SmoothedPG-method}
\title{Get estimates for the standard deviation of the coherency computed from
smoothed quantile periodogram.}
\usage{
\S4method{getCoherencySdNaive}{SmoothedPG}(
object,
frequencies = 2 * pi * (0:(lenTS(object@qPG@freqRep@Y) -
1))/lenTS(object@qPG@freqRep@Y),
levels.1 = getLevels(object, 1),
levels.2 = getLevels(object, 2),
d1 = 1:(dim(object@values)[2]),
d2 = 1:(dim(object@values)[4]),
type = c("1", "2"),
impl = c("R", "C")
)
}
\arguments{
\item{object}{\code{\link{SmoothedPG}} of which to get the estimates for the
standard deviation.}
\item{frequencies}{a vector of frequencies for which to get the result}
\item{levels.1}{the first vector of levels for which to get the result}
\item{levels.2}{the second vector of levels for which to get the result}
\item{d1}{optional parameter that determine for which j1 to return the
data; may be a vector of elements 1, ..., D}
\item{d2}{same as d1, but for j2}
\item{type}{can be "1", where cov(Z, Conj(Z)) is subtracted, or "2", where
it's not}
\item{impl}{choose "R" or "C" for one of the two implementations available}
}
\value{
Returns the estimate described above.
}
\description{
Determines and returns an array of dimension \code{[J,K1,K2]},
where \code{J=length(frequencies)}, \code{K1=length(levels.1)}, and
\code{K2=length(levels.2))}. Whether
available or not, boostrap repetitions are ignored by this procedure.
At position \code{(j,k1,k2)}
the returned value is the standard deviation estimated corresponding to
\code{frequencies[j]}, \code{levels.1[k1]} and \code{levels.2[k2]} that are
closest to the
\code{frequencies}, \code{levels.1} and \code{levels.2}
available in \code{object}; \code{\link{closest.pos}} is used to determine
what closest to means.
}
\details{
If not only one, but multiple time series are under study, the dimension of
the returned vector is of dimension \code{[J,P,K1,P,K2]}, where \code{P}
denotes the dimension of the time series.
Requires that the \code{\link{SmoothedPG}} is available at all Fourier
frequencies from \eqn{(0,\pi]}{(0,pi]}. If this is not the case the missing
values are imputed by taking one that is available and has a frequency
that is closest to the missing Fourier frequency; \code{closest.pos} is used
to determine which one this is.
A precise definition on how the standard deviations of the smoothed quantile
periodogram are estimated is given in Barunik and Kley (2015). The estimate
returned is denoted by
\eqn{\sigma(\tau_1, \tau_2; \omega)}{sigma(tau1, tau2; omega)} on p. 26 of
the arXiv preprint.
Note the ``standard deviation'' estimated here is not the square root of the
complex-valued variance. It's real part is the square root of the variance
of the real part of the estimator and the imaginary part is the square root
of the imaginary part of the variance of the estimator.
}
\references{
Kley, T., Volgushev, S., Dette, H. & Hallin, M. (2016).
Quantile Spectral Processes: Asymptotic Analysis and Inference.
\emph{Bernoulli}, \bold{22}(3), 1770--1807.
[cf. \url{http://arxiv.org/abs/1401.8104}]
Barunik, J. & Kley, T. (2015).
Quantile Cross-Spectral Measures of Dependence between Economic Variables.
[preprint available from the authors]
}
\keyword{Access-functions}
| /man/getCoherencySdNaive-SmoothedPG.Rd | no_license | cran/quantspec | R | false | true | 3,540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SmoothedPG.R
\name{getCoherencySdNaive-SmoothedPG}
\alias{getCoherencySdNaive-SmoothedPG}
\alias{getCoherencySdNaive,SmoothedPG-method}
\title{Get estimates for the standard deviation of the coherency computed from
smoothed quantile periodogram.}
\usage{
\S4method{getCoherencySdNaive}{SmoothedPG}(
object,
frequencies = 2 * pi * (0:(lenTS(object@qPG@freqRep@Y) -
1))/lenTS(object@qPG@freqRep@Y),
levels.1 = getLevels(object, 1),
levels.2 = getLevels(object, 2),
d1 = 1:(dim(object@values)[2]),
d2 = 1:(dim(object@values)[4]),
type = c("1", "2"),
impl = c("R", "C")
)
}
\arguments{
\item{object}{\code{\link{SmoothedPG}} of which to get the estimates for the
standard deviation.}
\item{frequencies}{a vector of frequencies for which to get the result}
\item{levels.1}{the first vector of levels for which to get the result}
\item{levels.2}{the second vector of levels for which to get the result}
\item{d1}{optional parameter that determine for which j1 to return the
data; may be a vector of elements 1, ..., D}
\item{d2}{same as d1, but for j2}
\item{type}{can be "1", where cov(Z, Conj(Z)) is subtracted, or "2", where
it's not}
\item{impl}{choose "R" or "C" for one of the two implementations available}
}
\value{
Returns the estimate described above.
}
\description{
Determines and returns an array of dimension \code{[J,K1,K2]},
where \code{J=length(frequencies)}, \code{K1=length(levels.1)}, and
\code{K2=length(levels.2))}. Whether
available or not, boostrap repetitions are ignored by this procedure.
At position \code{(j,k1,k2)}
the returned value is the standard deviation estimated corresponding to
\code{frequencies[j]}, \code{levels.1[k1]} and \code{levels.2[k2]} that are
closest to the
\code{frequencies}, \code{levels.1} and \code{levels.2}
available in \code{object}; \code{\link{closest.pos}} is used to determine
what closest to means.
}
\details{
If not only one, but multiple time series are under study, the dimension of
the returned vector is of dimension \code{[J,P,K1,P,K2]}, where \code{P}
denotes the dimension of the time series.
Requires that the \code{\link{SmoothedPG}} is available at all Fourier
frequencies from \eqn{(0,\pi]}{(0,pi]}. If this is not the case the missing
values are imputed by taking one that is available and has a frequency
that is closest to the missing Fourier frequency; \code{closest.pos} is used
to determine which one this is.
A precise definition on how the standard deviations of the smoothed quantile
periodogram are estimated is given in Barunik and Kley (2015). The estimate
returned is denoted by
\eqn{\sigma(\tau_1, \tau_2; \omega)}{sigma(tau1, tau2; omega)} on p. 26 of
the arXiv preprint.
Note the ``standard deviation'' estimated here is not the square root of the
complex-valued variance. It's real part is the square root of the variance
of the real part of the estimator and the imaginary part is the square root
of the imaginary part of the variance of the estimator.
}
\references{
Kley, T., Volgushev, S., Dette, H. & Hallin, M. (2016).
Quantile Spectral Processes: Asymptotic Analysis and Inference.
\emph{Bernoulli}, \bold{22}(3), 1770--1807.
[cf. \url{http://arxiv.org/abs/1401.8104}]
Barunik, J. & Kley, T. (2015).
Quantile Cross-Spectral Measures of Dependence between Economic Variables.
[preprint available from the authors]
}
\keyword{Access-functions}
|
library(tidyhydat)
### Name: hy_daily
### Title: Extract all daily water level and flow measurements
### Aliases: hy_daily
### ** Examples
## Not run:
##D hy_daily(station_number = c("02JE013","08MF005"))
## End(Not run)
| /data/genthat_extracted_code/tidyhydat/examples/hy_daily.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 229 | r | library(tidyhydat)
### Name: hy_daily
### Title: Extract all daily water level and flow measurements
### Aliases: hy_daily
### ** Examples
## Not run:
##D hy_daily(station_number = c("02JE013","08MF005"))
## End(Not run)
|
# Rely on the 'WorldPhones' dataset in the datasets
# package (which generally comes preloaded).
library(datasets)
# Use a fluid Bootstrap layout
fluidPage(
tags$head(
tags$link(
rel = "icon",
type = "image/x-icon",
href = "http://localhost:1984/default.ico")
),
# Give the page a title
titlePanel("Telephones by region"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("region", "Region:",
choices=colnames(WorldPhones)),
hr(),
helpText("Data from AT&T (1961) The World's Telephones."),
hr(),
downloadButton("report", "Generate report")
),
# Create a spot for the barplot
mainPanel(
plotOutput("phonePlot")
)
)
)
| /ui.R | no_license | makrez/shiny_deployment_example | R | false | false | 797 | r | # Rely on the 'WorldPhones' dataset in the datasets
# package (which generally comes preloaded).
library(datasets)
# Use a fluid Bootstrap layout
fluidPage(
tags$head(
tags$link(
rel = "icon",
type = "image/x-icon",
href = "http://localhost:1984/default.ico")
),
# Give the page a title
titlePanel("Telephones by region"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("region", "Region:",
choices=colnames(WorldPhones)),
hr(),
helpText("Data from AT&T (1961) The World's Telephones."),
hr(),
downloadButton("report", "Generate report")
),
# Create a spot for the barplot
mainPanel(
plotOutput("phonePlot")
)
)
)
|
#' @title Generate Genotyping Error Matrix
#'
#' @description Generate a matrix with the probabilities of observed genotypes
#' (columns) conditional on actual genotypes (rows), or return a function to
#' generate such matrices (using a single value Err as input to that
#' function).
#'
#' @details By default (\code{flavour} = "version2.0"), \code{Err} is
#' interpreted as a locus-level error rate (rather than allele-level), and
#' equals the probability that an actual heterozygote is observed as either
#' homozygote (i.e., the probability that it is observed as AA = probability
#' that observed as aa = \code{Err}/2). The probability that one homozygote is
#' observed as the other is (\code{Err}/2\eqn{)^2}.
#'
#' The inbuilt 'flavours' correspond to the presumed and simulated error
#' structures, which have changed with sequoia versions. The most appropriate
#' error structure will depend on the genotyping platform; 'version0.9' and
#' 'version1.1' were inspired by SNP array genotyping while 'version1.3' and
#' 'version2.0' are intended to be more general.
#'
#' Pr(observed genotype (columns) | actual genotype (rows)):
#'
#' \emph{version2.0:}
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{(1-E/2)^2} \tab \eqn{E(1-E/2)} \tab \eqn{(E/2)^2} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{(E/2)^2} \tab \eqn{E(1-E/2)} \tab \eqn{(1-E/2)^2} \cr
#' }
#'
#' \emph{version1.3}
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E-(E/2)^2} \tab \eqn{E} \tab \eqn{(E/2)^2} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{(E/2)^2} \tab \eqn{E} \tab \eqn{1-E-(E/2)^2} \cr
#' }
#'
#' \emph{version1.1}
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E} \tab \eqn{E/2} \tab \eqn{E/2} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{E/2} \tab \eqn{E/2} \tab \eqn{1-E} \cr
#' }
#'
#' \emph{version0.9} (not recommended)
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E} \tab \eqn{E} \tab \eqn{0} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{0} \tab \eqn{E} \tab \eqn{1-E} \cr
#' }
#'
#' When \code{Err} is a length 3 vector, or if \code{Return = 'vector'} these
#' are the following probabilities:
#' \itemize{
#' \item hom|hom: an actual homozygote is observed as the other homozygote
#' \item het|hom: an actual homozygote is observed as heterozygote
#' \item hom|het: an actual heterozygote is observed as homozygote
#' }
#'
#' and Pr(observed genotype (columns) | actual genotype (rows)) is then:
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E_1-E_2} \tab \eqn{E_2} \tab \eqn{E_1} \cr
#' \strong{1} \tab \eqn{E_3} \tab \eqn{1-2E_3} \tab \eqn{E_3} \cr
#' \strong{2} \tab \eqn{E_1} \tab \eqn{E_2} \tab \eqn{1-E_1-E_2} \cr
#' }
#'
#' The only assumption made is that the two alleles can be treated equally,
#' i.e. observing actual allele $A$ as $a$ is as likely as observing actual $a$
#' as $A$, and so e.g. P(obs=1|act=0) = P(obs=1|act=2).
#'
#' When the SNPs are scored via sequencing (e.g. RADseq or DArTseq), the 3rd
#' error rate (hom|het) is typically considerably higher than the other two,
#' while for SNP arrays it tends to be similar to P(het|hom).
#'
#'
#' @param Err estimated genotyping error rate, as a single number, or 3x3 or 4x4
#' matrix, or length 3 vector. If a single number, an error model is used that
#' aims to deal with scoring errors typical for SNP arrays. If a matrix, this
#' should be the probability of observed genotype (columns) conditional on
#' actual genotype (rows). Each row must therefore sum to 1. If
#' \code{Return='function'}, this may be \code{NA}. If a vector, these are the
#' probabilities (observed given actual) hom|other hom, het|hom, and hom|het.
#' @param flavour matrix-generating function, or one of 'version2.0',
#' 'version1.3' (='SNPchip'), 'version1.1' (='version111'), referring to the
#' sequoia version in which it was used as default. Only used if \code{Err} is
#' a single number.
#' @param Return output, 'matrix' (default), 'function', or 'vector'
#'
#' @return Depending on \code{Return}, either:
#' \itemize{
#' \item \code{'matrix'}: a 3x3 matrix, with probabilities of observed genotypes
#' (columns) conditional on actual (rows)
#' \item \code{'function'}: a function taking a single value \code{Err} as input, and
#' generating a 3x3 matrix
#' \item \code{'vector'}: a length 3 vector, with the probabilities (observed given
#' actual) hom|other hom, het|hom, and hom|het.
#' }
#'
#' @seealso \code{\link{EstEr}} to estimate genotyping error rate as a length 3
#' vector.
#'
#' @examples
#' ErM <- ErrToM(Err = 0.05)
#' ErM
#' ErrToM(ErM, Return = 'vector')
#'
#' ErrToM(Err = 0.05, flavour = function(E) { # Whalen, Gorjanc & Hickey 2018
#' matrix(c(1-E*3/4, E/2, E/4,
#' E/4, 1-2*E/4, E/4,
#' E/4, E/2, 1-E*3/4),
#' 3,3, byrow=TRUE) })
#' ErrToM(Err = c(0.05/4, 0.05/2, 0.05/4))
#'
#' @export
ErrToM <- function(Err = NA,
flavour = "version2.0",
Return = "matrix")
{
if (length(Err)==1 && is.na(Err) && Return == "function") Err <- 0.1 # only used for testing
if (!is.atomic(Err) || !length(Err) %in% c(1,3,9,16)) stop("'Err' must be a single number, length 3 vector, or 3x3 matrix")
if (any(Err<0 | Err>1) || !is.double(Err)) stop("'Err' must be (a) number(s) between 0 and 1")
# ErrM: observed (columns) conditional on actual (rows)
ErrM <- NULL
ErFunc <- NULL
if (is.matrix(Err)) {
if (nrow(Err)==3 & ncol(Err)==3) {
ErrM <- Err
} else if (nrow(Err)==4 & ncol(Err)==4) {
ErrM <- shrinkEM(Err)
} else {
stop("Error matrix should be a 3x3 or 4x4 matrix")
}
} else if (length(Err)==3) {
ErrM <- ErV2M(Err)
} else {
if (is.function(flavour)) {
ErrM <- flavour(Err)
if (!is.matrix(ErrM)) stop("ErFunc(E) should return a 3x3 or 4x4 matrix")
if (!(all(dim(ErrM)==4) | all(dim(ErrM)==3)) ) stop("ErFunc(E) should return a 4x4 or 3x3 matrix")
ErrM.B <- flavour(Err+0.1)
if (all(ErrM.B == ErrM)) stop("ErFunc(E) is not a function of error rate E")
ErFunc <- flavour
} else if (flavour %in% c("version2.0", "2.0")) {
ErFunc <- function(E) {
matrix(c((1-E/2)^2, E*(1-E/2), (E/2)^2,
E/2, 1-E, E/2,
(E/2)^2, E*(1-E/2), (1-E/2)^2),
3,3, byrow=TRUE)
}
} else if (flavour %in% c("version1.3", "SNPchip", "1.3")) {
ErFunc <- function(E) {
matrix(c(1-E-(E/2)^2, E, (E/2)^2,
E/2, 1-E, E/2,
(E/2)^2, E, 1-E-(E/2)^2),
3,3, byrow=TRUE)
}
} else if (flavour %in% c("version111", "version1.1", "1.1")) {
ErFunc <- function(E) {
matrix(c(1-E, E/2, E/2,
E/2, 1-E, E/2,
E/2, E/2, 1-E),
3,3, byrow=TRUE)
}
} else if (flavour %in% c("version0.9", "version0.7", "0.7", "0.9")) {
ErFunc <- function(E) {
matrix(c(1-E, E, 0,
E/2, 1-E, E/2,
0, E, 1-E),
3,3, byrow=TRUE)
}
} else {
stop("Unknown ErrFlavour, choose 'version2.0', 'version1.3', 'version1.1', \n",
"or specify vector or matrix(-generating function) via 'Err'")
}
ErrM <- ErFunc(Err)
}
if (!is.double(ErrM) || any(ErrM<0 | ErrM>1)) {
stop("Error matrix values must be between 0 and 1")
}
if (!all(abs(rowSums(ErrM) - 1) < sqrt(.Machine$double.eps))) {
stop("Error matrix rows must sum to 1")
}
if (Return == "matrix") {
dimnames(ErrM) <- list(paste0("act-", 0:2), paste0("obs-", 0:2, "|act"))
return( ErrM )
} else if (Return == 'vector') {
ErrV <- ErM2V(ErrM)
return( ErrV )
} else if (Return == "function") {
if (!is.null(ErFunc)) {
return( ErFunc )
} else {
stop("Don't know how to make error function from error matrix")
}
} else {
stop("Unknown Return format")
}
}
#===============================================================================
#===============================================================================
# 4x4 matrix (aa, aA, Aa, AA) to 3x3 matrix
shrinkEM <- function(EM4) {
EM3 <- matrix(NA, 3,3)
EM3[c(1,3), c(1,3)] <- EM4[c(1,4), c(1,4)]
EM3[2, c(1,3)] <- EM4[2, c(1,4)]+EM4[3, c(1,4)]
EM3[c(1,3), 2] <- EM4[c(1,4), 2] + EM4[c(1,4), 2]
EM3[2, 2] <- sum(EM4[2:3, 2:3])
return( EM3 )
}
# vector -> matrix Pr(observed genotype (columns) | actual genotype (rows)):
ErV2M <- function(ErV)
{
ErrM <- matrix(NA, 3,3, dimnames = list(act=0:2, obs=0:2))
ErrM['0', c('0','1','2')] <- c(1-ErV[1]-ErV[2], ErV[2], ErV[1])
ErrM['1', c('0','1','2')] <- c(ErV[3], 1-2*ErV[3], ErV[3])
ErrM['2', c('0','1','2')] <- c(ErV[1], ErV[2], 1-ErV[1]-ErV[2])
return( ErrM )
}
# matrix --> vector: hom -> other hom, hom -> het, het -> hom
ErM2V <- function(ErrM) {
ErV <- setNames(rep(NA,3), c('hom|hom', 'het|hom', 'hom|het'))
ErV['hom|hom'] <- (ErrM[1,3] + ErrM[3,1])/2
ErV['het|hom'] <- (ErrM[1,2] + ErrM[3,2])/2
ErV['hom|het'] <- (ErrM[2,1] + ErrM[2,3])/2
return( ErV )
}
| /R/ErrToM.R | no_license | JiscaH/sequoia | R | false | false | 9,674 | r | #' @title Generate Genotyping Error Matrix
#'
#' @description Generate a matrix with the probabilities of observed genotypes
#' (columns) conditional on actual genotypes (rows), or return a function to
#' generate such matrices (using a single value Err as input to that
#' function).
#'
#' @details By default (\code{flavour} = "version2.0"), \code{Err} is
#' interpreted as a locus-level error rate (rather than allele-level), and
#' equals the probability that an actual heterozygote is observed as either
#' homozygote (i.e., the probability that it is observed as AA = probability
#' that observed as aa = \code{Err}/2). The probability that one homozygote is
#' observed as the other is (\code{Err}/2\eqn{)^2}.
#'
#' The inbuilt 'flavours' correspond to the presumed and simulated error
#' structures, which have changed with sequoia versions. The most appropriate
#' error structure will depend on the genotyping platform; 'version0.9' and
#' 'version1.1' were inspired by SNP array genotyping while 'version1.3' and
#' 'version2.0' are intended to be more general.
#'
#' Pr(observed genotype (columns) | actual genotype (rows)):
#'
#' \emph{version2.0:}
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{(1-E/2)^2} \tab \eqn{E(1-E/2)} \tab \eqn{(E/2)^2} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{(E/2)^2} \tab \eqn{E(1-E/2)} \tab \eqn{(1-E/2)^2} \cr
#' }
#'
#' \emph{version1.3}
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E-(E/2)^2} \tab \eqn{E} \tab \eqn{(E/2)^2} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{(E/2)^2} \tab \eqn{E} \tab \eqn{1-E-(E/2)^2} \cr
#' }
#'
#' \emph{version1.1}
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E} \tab \eqn{E/2} \tab \eqn{E/2} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{E/2} \tab \eqn{E/2} \tab \eqn{1-E} \cr
#' }
#'
#' \emph{version0.9} (not recommended)
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E} \tab \eqn{E} \tab \eqn{0} \cr
#' \strong{1} \tab \eqn{E/2} \tab \eqn{1-E} \tab \eqn{E/2} \cr
#' \strong{2} \tab \eqn{0} \tab \eqn{E} \tab \eqn{1-E} \cr
#' }
#'
#' When \code{Err} is a length 3 vector, or if \code{Return = 'vector'} these
#' are the following probabilities:
#' \itemize{
#' \item hom|hom: an actual homozygote is observed as the other homozygote
#' \item het|hom: an actual homozygote is observed as heterozygote
#' \item hom|het: an actual heterozygote is observed as homozygote
#' }
#'
#' and Pr(observed genotype (columns) | actual genotype (rows)) is then:
#' \tabular{lccc}{
#' \tab \strong{0} \tab \strong{1} \tab \strong{2} \cr
#' \strong{0} \tab \eqn{1-E_1-E_2} \tab \eqn{E_2} \tab \eqn{E_1} \cr
#' \strong{1} \tab \eqn{E_3} \tab \eqn{1-2E_3} \tab \eqn{E_3} \cr
#' \strong{2} \tab \eqn{E_1} \tab \eqn{E_2} \tab \eqn{1-E_1-E_2} \cr
#' }
#'
#' The only assumption made is that the two alleles can be treated equally,
#' i.e. observing actual allele $A$ as $a$ is as likely as observing actual $a$
#' as $A$, and so e.g. P(obs=1|act=0) = P(obs=1|act=2).
#'
#' When the SNPs are scored via sequencing (e.g. RADseq or DArTseq), the 3rd
#' error rate (hom|het) is typically considerably higher than the other two,
#' while for SNP arrays it tends to be similar to P(het|hom).
#'
#'
#' @param Err estimated genotyping error rate, as a single number, or 3x3 or 4x4
#' matrix, or length 3 vector. If a single number, an error model is used that
#' aims to deal with scoring errors typical for SNP arrays. If a matrix, this
#' should be the probability of observed genotype (columns) conditional on
#' actual genotype (rows). Each row must therefore sum to 1. If
#' \code{Return='function'}, this may be \code{NA}. If a vector, these are the
#' probabilities (observed given actual) hom|other hom, het|hom, and hom|het.
#' @param flavour matrix-generating function, or one of 'version2.0',
#' 'version1.3' (='SNPchip'), 'version1.1' (='version111'), referring to the
#' sequoia version in which it was used as default. Only used if \code{Err} is
#' a single number.
#' @param Return output, 'matrix' (default), 'function', or 'vector'
#'
#' @return Depending on \code{Return}, either:
#' \itemize{
#' \item \code{'matrix'}: a 3x3 matrix, with probabilities of observed genotypes
#' (columns) conditional on actual (rows)
#' \item \code{'function'}: a function taking a single value \code{Err} as input, and
#' generating a 3x3 matrix
#' \item \code{'vector'}: a length 3 vector, with the probabilities (observed given
#' actual) hom|other hom, het|hom, and hom|het.
#' }
#'
#' @seealso \code{\link{EstEr}} to estimate genotyping error rate as a length 3
#' vector.
#'
#' @examples
#' ErM <- ErrToM(Err = 0.05)
#' ErM
#' ErrToM(ErM, Return = 'vector')
#'
#' ErrToM(Err = 0.05, flavour = function(E) { # Whalen, Gorjanc & Hickey 2018
#' matrix(c(1-E*3/4, E/2, E/4,
#' E/4, 1-2*E/4, E/4,
#' E/4, E/2, 1-E*3/4),
#' 3,3, byrow=TRUE) })
#' ErrToM(Err = c(0.05/4, 0.05/2, 0.05/4))
#'
#' @export
ErrToM <- function(Err = NA,
flavour = "version2.0",
Return = "matrix")
{
if (length(Err)==1 && is.na(Err) && Return == "function") Err <- 0.1 # only used for testing
if (!is.atomic(Err) || !length(Err) %in% c(1,3,9,16)) stop("'Err' must be a single number, length 3 vector, or 3x3 matrix")
if (any(Err<0 | Err>1) || !is.double(Err)) stop("'Err' must be (a) number(s) between 0 and 1")
# ErrM: observed (columns) conditional on actual (rows)
ErrM <- NULL
ErFunc <- NULL
if (is.matrix(Err)) {
if (nrow(Err)==3 & ncol(Err)==3) {
ErrM <- Err
} else if (nrow(Err)==4 & ncol(Err)==4) {
ErrM <- shrinkEM(Err)
} else {
stop("Error matrix should be a 3x3 or 4x4 matrix")
}
} else if (length(Err)==3) {
ErrM <- ErV2M(Err)
} else {
if (is.function(flavour)) {
ErrM <- flavour(Err)
if (!is.matrix(ErrM)) stop("ErFunc(E) should return a 3x3 or 4x4 matrix")
if (!(all(dim(ErrM)==4) | all(dim(ErrM)==3)) ) stop("ErFunc(E) should return a 4x4 or 3x3 matrix")
ErrM.B <- flavour(Err+0.1)
if (all(ErrM.B == ErrM)) stop("ErFunc(E) is not a function of error rate E")
ErFunc <- flavour
} else if (flavour %in% c("version2.0", "2.0")) {
ErFunc <- function(E) {
matrix(c((1-E/2)^2, E*(1-E/2), (E/2)^2,
E/2, 1-E, E/2,
(E/2)^2, E*(1-E/2), (1-E/2)^2),
3,3, byrow=TRUE)
}
} else if (flavour %in% c("version1.3", "SNPchip", "1.3")) {
ErFunc <- function(E) {
matrix(c(1-E-(E/2)^2, E, (E/2)^2,
E/2, 1-E, E/2,
(E/2)^2, E, 1-E-(E/2)^2),
3,3, byrow=TRUE)
}
} else if (flavour %in% c("version111", "version1.1", "1.1")) {
ErFunc <- function(E) {
matrix(c(1-E, E/2, E/2,
E/2, 1-E, E/2,
E/2, E/2, 1-E),
3,3, byrow=TRUE)
}
} else if (flavour %in% c("version0.9", "version0.7", "0.7", "0.9")) {
ErFunc <- function(E) {
matrix(c(1-E, E, 0,
E/2, 1-E, E/2,
0, E, 1-E),
3,3, byrow=TRUE)
}
} else {
stop("Unknown ErrFlavour, choose 'version2.0', 'version1.3', 'version1.1', \n",
"or specify vector or matrix(-generating function) via 'Err'")
}
ErrM <- ErFunc(Err)
}
if (!is.double(ErrM) || any(ErrM<0 | ErrM>1)) {
stop("Error matrix values must be between 0 and 1")
}
if (!all(abs(rowSums(ErrM) - 1) < sqrt(.Machine$double.eps))) {
stop("Error matrix rows must sum to 1")
}
if (Return == "matrix") {
dimnames(ErrM) <- list(paste0("act-", 0:2), paste0("obs-", 0:2, "|act"))
return( ErrM )
} else if (Return == 'vector') {
ErrV <- ErM2V(ErrM)
return( ErrV )
} else if (Return == "function") {
if (!is.null(ErFunc)) {
return( ErFunc )
} else {
stop("Don't know how to make error function from error matrix")
}
} else {
stop("Unknown Return format")
}
}
#===============================================================================
#===============================================================================
# 4x4 matrix (aa, aA, Aa, AA) to 3x3 matrix
shrinkEM <- function(EM4) {
EM3 <- matrix(NA, 3,3)
EM3[c(1,3), c(1,3)] <- EM4[c(1,4), c(1,4)]
EM3[2, c(1,3)] <- EM4[2, c(1,4)]+EM4[3, c(1,4)]
EM3[c(1,3), 2] <- EM4[c(1,4), 2] + EM4[c(1,4), 2]
EM3[2, 2] <- sum(EM4[2:3, 2:3])
return( EM3 )
}
# vector -> matrix Pr(observed genotype (columns) | actual genotype (rows)):
ErV2M <- function(ErV)
{
ErrM <- matrix(NA, 3,3, dimnames = list(act=0:2, obs=0:2))
ErrM['0', c('0','1','2')] <- c(1-ErV[1]-ErV[2], ErV[2], ErV[1])
ErrM['1', c('0','1','2')] <- c(ErV[3], 1-2*ErV[3], ErV[3])
ErrM['2', c('0','1','2')] <- c(ErV[1], ErV[2], 1-ErV[1]-ErV[2])
return( ErrM )
}
# matrix --> vector: hom -> other hom, hom -> het, het -> hom
ErM2V <- function(ErrM) {
ErV <- setNames(rep(NA,3), c('hom|hom', 'het|hom', 'hom|het'))
ErV['hom|hom'] <- (ErrM[1,3] + ErrM[3,1])/2
ErV['het|hom'] <- (ErrM[1,2] + ErrM[3,2])/2
ErV['hom|het'] <- (ErrM[2,1] + ErrM[2,3])/2
return( ErV )
}
|
# Install devtools
install.packages("devtools")
# Load devtools
library("devtools")
# Install caiomsouzarpackage
# https://github.com/caiomsouza/caiomsouzarpackage
devtools::install_github("caiomsouza/caiomsouzarpackage")
#Load caiomsouzarpackage
library("caiomsouzarpackage")
# cat_function doc
?cat_function
# suma.dos.numeros doc
?suma.dos.numeros
# Try suma.dos.numeros
suma.dos.numeros(2,2)
suma.dos.numeros(2,8)
suma.dos.numeros(2.5)
suma.dos.numeros(2.8)
# Remove caiomsouzarpackage
remove.packages("caiomsouzarpackage")
| /mod7/R/src/06_ejercicio_extra/06_ejercicio_extra.R | no_license | caiomsouza/u-tad | R | false | false | 539 | r | # Install devtools
install.packages("devtools")
# Load devtools
library("devtools")
# Install caiomsouzarpackage
# https://github.com/caiomsouza/caiomsouzarpackage
devtools::install_github("caiomsouza/caiomsouzarpackage")
#Load caiomsouzarpackage
library("caiomsouzarpackage")
# cat_function doc
?cat_function
# suma.dos.numeros doc
?suma.dos.numeros
# Try suma.dos.numeros
suma.dos.numeros(2,2)
suma.dos.numeros(2,8)
suma.dos.numeros(2.5)
suma.dos.numeros(2.8)
# Remove caiomsouzarpackage
remove.packages("caiomsouzarpackage")
|
#' @export
precintcon.plot.pn <- function(
...,
interval = 30,
scale = "a",
xlab = NA,
ylab = "PN",
fontsize = 10,
axis.text.color = "black",
legend = NULL,
export = FALSE,
export.name = "pn_plot.png",
width = 10,
height = 10,
units = "cm"
) {
l <- list(...)
varl <- as.list(match.call()[1:length(l)+1])
if (length(l) > 1 && !export)
par(ask=T)
if (!is.null(legend) && length(varl) != length(legend)) {
stop(paste("legend should has length equals to the number of input data. legend parameter length",
length(legend), ": number of input data", length(varl)))
} else if (!is.null(legend))
varl <- as.vector(legend)
mapply(function(d, n, interval, scale, axis.text.color, fontsize,
xlab, ylab, export, export.name, width, height, units) {
if (is.element("precintcon.pn", class(d)) ||
is.element("precintcon.monthly", class(d)) ||
is.element("precintcon.daily", class(d))) {
d <- precintcon.pn.analysis(d, interval, scale)
d <- cbind(d, data.frame(dataset=paste(n)))
graph <- NA
if ((scale == "a") || (scale == "d")) {
if (is.na(xlab))
xlab <- "Year"
graph <- ggplot(d, aes_string("year", "pn")) + geom_point(size=2) +
scale_x_continuous(expand = c(.02, .02), breaks = seq(min(d$year), max(d$year), by = 2))
} else {
ddd <- NA
if (scale == "s") {
if (is.na(xlab))
xlab = "Season"
ddd <- 2 * (d$season - 1) + d$season
} else if (scale == "m"){
if (is.na(xlab))
xlab = "Month"
ddd <- d$month
}
d <-
cbind(
d, data.frame(
date = as.Date(paste("01", ddd, d$year, sep = "/"), "%d/%m/%Y")));
graph <-
ggplot(d, aes_string("date", "pn")) + geom_point(size = 1.1) +
scale_x_date(labels = date_format("%b %y"))
}
graph <-
graph +
geom_line(size=.5) +
xlab(xlab) +
ylab(ylab) +
theme(text = element_text(size = fontsize),
axis.text = element_text(color = axis.text.color),
axis.text.x = element_text(angle = 25),
axis.title.x = element_text(vjust = .1)) +
facet_grid(. ~ dataset)
if (!export) {
print(graph)
} else {
export.name <- paste(n, export.name, sep="_")
ggsave(export.name, plot = graph, height = height, width = width, units = units)
}
}
}, l, varl, interval = interval, scale = scale,
axis.text.color = axis.text.color, MoreArgs = list(fontsize = fontsize,
width = width, height = height, units = units, xlab = xlab, ylab = ylab,
export = export, export.name = export.name), SIMPLIFY = FALSE)
par(ask=F)
}
| /R/precintcon.plot.pn.r | no_license | lucasvenez/precintcon | R | false | false | 2,817 | r | #' @export
precintcon.plot.pn <- function(
...,
interval = 30,
scale = "a",
xlab = NA,
ylab = "PN",
fontsize = 10,
axis.text.color = "black",
legend = NULL,
export = FALSE,
export.name = "pn_plot.png",
width = 10,
height = 10,
units = "cm"
) {
l <- list(...)
varl <- as.list(match.call()[1:length(l)+1])
if (length(l) > 1 && !export)
par(ask=T)
if (!is.null(legend) && length(varl) != length(legend)) {
stop(paste("legend should has length equals to the number of input data. legend parameter length",
length(legend), ": number of input data", length(varl)))
} else if (!is.null(legend))
varl <- as.vector(legend)
mapply(function(d, n, interval, scale, axis.text.color, fontsize,
xlab, ylab, export, export.name, width, height, units) {
if (is.element("precintcon.pn", class(d)) ||
is.element("precintcon.monthly", class(d)) ||
is.element("precintcon.daily", class(d))) {
d <- precintcon.pn.analysis(d, interval, scale)
d <- cbind(d, data.frame(dataset=paste(n)))
graph <- NA
if ((scale == "a") || (scale == "d")) {
if (is.na(xlab))
xlab <- "Year"
graph <- ggplot(d, aes_string("year", "pn")) + geom_point(size=2) +
scale_x_continuous(expand = c(.02, .02), breaks = seq(min(d$year), max(d$year), by = 2))
} else {
ddd <- NA
if (scale == "s") {
if (is.na(xlab))
xlab = "Season"
ddd <- 2 * (d$season - 1) + d$season
} else if (scale == "m"){
if (is.na(xlab))
xlab = "Month"
ddd <- d$month
}
d <-
cbind(
d, data.frame(
date = as.Date(paste("01", ddd, d$year, sep = "/"), "%d/%m/%Y")));
graph <-
ggplot(d, aes_string("date", "pn")) + geom_point(size = 1.1) +
scale_x_date(labels = date_format("%b %y"))
}
graph <-
graph +
geom_line(size=.5) +
xlab(xlab) +
ylab(ylab) +
theme(text = element_text(size = fontsize),
axis.text = element_text(color = axis.text.color),
axis.text.x = element_text(angle = 25),
axis.title.x = element_text(vjust = .1)) +
facet_grid(. ~ dataset)
if (!export) {
print(graph)
} else {
export.name <- paste(n, export.name, sep="_")
ggsave(export.name, plot = graph, height = height, width = width, units = units)
}
}
}, l, varl, interval = interval, scale = scale,
axis.text.color = axis.text.color, MoreArgs = list(fontsize = fontsize,
width = width, height = height, units = units, xlab = xlab, ylab = ylab,
export = export, export.name = export.name), SIMPLIFY = FALSE)
par(ask=F)
}
|
#install.packages("gdm")
library(gdm)
library(parallel)
library(iterators)
library(foParallel)
library(foreach)
library(sp)
library(raster)
## just gonna try with bellii for now
## using lucas' names to make sure i understand how works -- will need to update
#data <- read.csv("Icterus_ingroup_environmentaldata.csv",header=T)
data = unique(inputdata)
#rownames(data) <- data$Sample
rownames(data) = data$CATALOG.NUMBER
data = data[data$SPP=="BELLII",]
# Genetic data (response variable)
#gene <- data[,c("Sample","Latitude","Longitude")]
gene = data[,c("CATALOG.NUMBER","LAT","LONG")]
colnames(gene) = c("Sample","Latitude","Longitude")
rownames(gene) <- gene$Sample
#samples = c("A1","A10","A3","A4","A5","A6","A8","A9","B10","B4","B5","B7","B9","C1","C10","C2","C3","C4","C5","C6","C7","C9","D1","D10","D3","D4","D6","D7","D8","D9","E1","E10","E2","E3","E4","E5","E6","E7","E8","E9","F1","F2","F3","F5","F6","F7","F8","F9","G1","G2","G3","G4","G6","G7","G8","G9","H1","H2","H3","H4","H5","H6","H8","H9")
samples = gene$Sample
#gene <- gene[samples,]
#remove = c(1,3,7,14,16,17,23,25,31,33,34,41,43,45,49,50,52,53,57,58,62)
#remove = which(is.na(morphdf$BILL.HEIGHT))
#gene <- gene[-remove,]
#GENE <- read.csv("Icterus_ingroup_IBS_dissimilarity.csv",header=T)[-remove,-remove]
GENE = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_morph.csv",
header=T)#[-remove,-remove]
colnames(GENE)[1] = "Sample"
#GENE <- cbind(gene$Sample,GENE)
#colnames(GENE) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
# Environmental data
#bioclim <- data[samples,c(1,3,4,9:28)][-remove,]
bioclim = data[,c(40:58)]
colnames(bioclim) = c("BIO1","BIO2","BIO3","BIO4","BIO5",
"BIO6","BIO7","BIO8","BIO9","BIO10",
"BIO11","BIO12","BIO13","BIO14","BIO15",
"BIO16","BIO17","BIO18","BIO19")
# bioclim <- cbind(data[,c(2,6,7)][-remove,],rep(1,55))
# colnames(bioclim) <- c("siteCol","Latitude","Longitude","dummy")
temp_pca <- prcomp(bioclim[,c("BIO1","BIO2","BIO3","BIO4","BIO5","BIO6","BIO7","BIO8","BIO9","BIO10","BIO11")],center=TRUE,scale.=TRUE)
summary(temp_pca)
biplot(temp_pca,cex=0.5)
temp_scores <- predict(temp_pca)[,1:4]
colnames(temp_scores) <- c("PC1T","PC2T","PC3T","PC4T")
prec_pca <- prcomp(bioclim[,c("BIO12","BIO13","BIO14","BIO15","BIO16","BIO17","BIO18","BIO19")],center=TRUE,scale.=TRUE)
summary(prec_pca)
biplot(prec_pca,cex=0.5)
prec_scores <- predict(prec_pca)[,1:4]
colnames(prec_scores) <- c("PC1P","PC2P","PC3P","PC4P")
bioclim_final <- cbind(bioclim[,1:3],temp_scores,prec_scores)
# Predictor dissimilarity matrices
# ENV <- read.csv("environmental_distance.csv")[-remove,-remove]
# ENV <- cbind(pheno$siteCol,ENV)
# colnames(ENV) <- c("siteCol",82717,82718,82719,82720,82721,82722,106308,144750,156482,254754,328897,328900,388899,398723,398724,398730,398733,398737,398738,398748,398750,398753,398754,398757,398758,398760,398761,398763,398764,398765,399356,399359,521922,776163,776164,776165,776166,778430,778431,787600,813830,26638,26641,26653,26656,26666,26667,26673,26675,26677,26687,26697,26699,26705,1822)
# PREC <- read.csv("precipitation.dist.csv")[-remove,-remove]
# PREC <- cbind(pheno$siteCol,PREC)
# colnames(PREC) <- c("siteCol",82717,82718,82719,82720,82721,82722,106308,144750,156482,254754,328897,328900,388899,398723,398724,398730,398733,398737,398738,398748,398750,398753,398754,398757,398758,398760,398761,398763,398764,398765,399356,399359,521922,776163,776164,776165,776166,778430,778431,787600,813830,26638,26641,26653,26656,26666,26667,26673,26675,26677,26687,26697,26699,26705,1822)
#
# TEMP <- read.csv("temperature.dist.csv")[-remove,-remove]
# TEMP <- cbind(pheno$siteCol,TEMP)
# colnames(TEMP) <- c("siteCol",82717,82718,82719,82720,82721,82722,106308,144750,156482,254754,328897,328900,388899,398723,398724,398730,398733,398737,398738,398748,398750,398753,398754,398757,398758,398760,398761,398763,398764,398765,399356,399359,521922,776163,776164,776165,776166,778430,778431,787600,813830,26638,26641,26653,26656,26666,26667,26673,26675,26677,26687,26697,26699,26705,1822)
#LGM <- read.csv("LGM_resistance.csv",header=T)[-remove,-remove]
LGM = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_str.csv",
header=T)
colnames(LGM)[1] = "Sample"
#LGM <- cbind(gene$Sample,LGM)
#colnames(LGM) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
#IBD <- read.csv("Present_plain_resistance.csv",header=T)[-remove,-remove]
IBD = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_geog.csv",
header=T)
colnames(IBD)[1] = "Sample"
#IBD <- cbind(gene$Sample,IBD)
#colnames(IBD) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
PRES = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_env.csv",
header=T)
colnames(PRES)[1] = "Sample"
#PRES <- read.csv("Present_resistance.csv",header=T)[-remove,-remove]
#PRES <- cbind(gene$Sample,PRES)
#colnames(PRES) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
# GDM
#GENE = merge(GENE,gene) ## not sure if here is where it goes
bioclim_final$Sample = rownames(bioclim_final)
bioclim_final = merge(bioclim_final,gene)
rownames(bioclim_final) = bioclim_final$Sample
#bioclim_final = bioclim_final[,c(-1)]
rownames(bioclim_final) = stringr::str_replace_all(rownames(bioclim_final), "[^[:alnum:]]", "")
colnames(bioclim_final) = stringr::str_replace_all(colnames(bioclim_final), "[^[:alnum:]]", "")
rownames(GENE) = stringr::str_replace_all(rownames(GENE), "[^[:alnum:]]", "")
colnames(GENE) = stringr::str_replace_all(colnames(GENE), "[^[:alnum:]]", "")
rownames(IBD) = stringr::str_replace_all(rownames(IBD), "[^[:alnum:]]", "")
colnames(IBD) = stringr::str_replace_all(colnames(IBD), "[^[:alnum:]]", "")
rownames(LGM) = stringr::str_replace_all(rownames(LGM), "[^[:alnum:]]", "")
colnames(LGM) = stringr::str_replace_all(colnames(LGM), "[^[:alnum:]]", "")
rownames(PRES) = stringr::str_replace_all(rownames(PRES), "[^[:alnum:]]", "")
colnames(PRES) = stringr::str_replace_all(colnames(PRES), "[^[:alnum:]]", "")
bioclim_final$Sample = stringr::str_replace_all(bioclim_final$Sample, "[^[:alnum:]]", "")
GENE$Sample = stringr::str_replace_all(GENE$Sample, "[^[:alnum:]]", "")
LGM$Sample = stringr::str_replace_all(LGM$Sample, "[^[:alnum:]]", "")
IBD$Sample = stringr::str_replace_all(IBD$Sample, "[^[:alnum:]]", "")
PRES$Sample = stringr::str_replace_all(PRES$Sample, "[^[:alnum:]]", "")
formatted <- formatsitepair(GENE, 3, siteColumn="Sample",
XColumn="Longitude", YColumn="Latitude",
predData = bioclim_final,
distPreds=list(as.matrix(IBD),as.matrix(LGM),as.matrix(PRES)))
names(formatted)
formatted$distance = scales::rescale(formatted$distance)
model <- gdm(formatted, geo=FALSE, splines=NULL, knots=NULL)
summary(model)
str(model)
model$explained
model$predictors
model$coefficients
length(model$predictors)
plot(model)
plot(model, plot.layout=c(4,3))
modTest <- gdm.varImp(formatted, geo=F, nPerm=100, parallel=T, cores=4)
#Deviance
modTest[[1]]
write.csv(modTest[[1]],"variable_importance_test.csv")
#Importance
#Significance
modTest[[3]]
write.csv(modTest[[3]],"variable_significance_test.csv")
modTest_b <- gdm.varImp(formatted,fullModelOnly = FALSE, geo=F, nPerm=100, parallel=T, cores=4)
barplot(modTest[[2]][,1],main = "full Model")
barplot(modTest[[2]][,2],main = "full Model - 1")
barplot(modTest[[2]][,3],main = "full Model - 2")
barplot(modTest[[2]][,4],main = "full Model - 3")
barplot(modTest[[2]][,5],main = "full Model - 4")
barplot(modTest[[2]][,6],main = "full Model - 5")
barplot(modTest[[2]][,7],main = "full Model - 6")
barplot(modTest[[2]][,8],main = "full Model - 7")
barplot(modTest[[2]][,9],main = "full Model - 8")
barplot(modTest[[2]][,10],main = "full Model - 9")
save.image("PTO.RData")
| /scripts/GDM Analyses/GDM_fromlucas.R | no_license | kaiyaprovost/GDM_pipeline | R | false | false | 8,785 | r | #install.packages("gdm")
library(gdm)
library(parallel)
library(iterators)
library(foParallel)
library(foreach)
library(sp)
library(raster)
## just gonna try with bellii for now
## using lucas' names to make sure i understand how works -- will need to update
#data <- read.csv("Icterus_ingroup_environmentaldata.csv",header=T)
data = unique(inputdata)
#rownames(data) <- data$Sample
rownames(data) = data$CATALOG.NUMBER
data = data[data$SPP=="BELLII",]
# Genetic data (response variable)
#gene <- data[,c("Sample","Latitude","Longitude")]
gene = data[,c("CATALOG.NUMBER","LAT","LONG")]
colnames(gene) = c("Sample","Latitude","Longitude")
rownames(gene) <- gene$Sample
#samples = c("A1","A10","A3","A4","A5","A6","A8","A9","B10","B4","B5","B7","B9","C1","C10","C2","C3","C4","C5","C6","C7","C9","D1","D10","D3","D4","D6","D7","D8","D9","E1","E10","E2","E3","E4","E5","E6","E7","E8","E9","F1","F2","F3","F5","F6","F7","F8","F9","G1","G2","G3","G4","G6","G7","G8","G9","H1","H2","H3","H4","H5","H6","H8","H9")
samples = gene$Sample
#gene <- gene[samples,]
#remove = c(1,3,7,14,16,17,23,25,31,33,34,41,43,45,49,50,52,53,57,58,62)
#remove = which(is.na(morphdf$BILL.HEIGHT))
#gene <- gene[-remove,]
#GENE <- read.csv("Icterus_ingroup_IBS_dissimilarity.csv",header=T)[-remove,-remove]
GENE = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_morph.csv",
header=T)#[-remove,-remove]
colnames(GENE)[1] = "Sample"
#GENE <- cbind(gene$Sample,GENE)
#colnames(GENE) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
# Environmental data
#bioclim <- data[samples,c(1,3,4,9:28)][-remove,]
bioclim = data[,c(40:58)]
colnames(bioclim) = c("BIO1","BIO2","BIO3","BIO4","BIO5",
"BIO6","BIO7","BIO8","BIO9","BIO10",
"BIO11","BIO12","BIO13","BIO14","BIO15",
"BIO16","BIO17","BIO18","BIO19")
# bioclim <- cbind(data[,c(2,6,7)][-remove,],rep(1,55))
# colnames(bioclim) <- c("siteCol","Latitude","Longitude","dummy")
temp_pca <- prcomp(bioclim[,c("BIO1","BIO2","BIO3","BIO4","BIO5","BIO6","BIO7","BIO8","BIO9","BIO10","BIO11")],center=TRUE,scale.=TRUE)
summary(temp_pca)
biplot(temp_pca,cex=0.5)
temp_scores <- predict(temp_pca)[,1:4]
colnames(temp_scores) <- c("PC1T","PC2T","PC3T","PC4T")
prec_pca <- prcomp(bioclim[,c("BIO12","BIO13","BIO14","BIO15","BIO16","BIO17","BIO18","BIO19")],center=TRUE,scale.=TRUE)
summary(prec_pca)
biplot(prec_pca,cex=0.5)
prec_scores <- predict(prec_pca)[,1:4]
colnames(prec_scores) <- c("PC1P","PC2P","PC3P","PC4P")
bioclim_final <- cbind(bioclim[,1:3],temp_scores,prec_scores)
# Predictor dissimilarity matrices
# ENV <- read.csv("environmental_distance.csv")[-remove,-remove]
# ENV <- cbind(pheno$siteCol,ENV)
# colnames(ENV) <- c("siteCol",82717,82718,82719,82720,82721,82722,106308,144750,156482,254754,328897,328900,388899,398723,398724,398730,398733,398737,398738,398748,398750,398753,398754,398757,398758,398760,398761,398763,398764,398765,399356,399359,521922,776163,776164,776165,776166,778430,778431,787600,813830,26638,26641,26653,26656,26666,26667,26673,26675,26677,26687,26697,26699,26705,1822)
# PREC <- read.csv("precipitation.dist.csv")[-remove,-remove]
# PREC <- cbind(pheno$siteCol,PREC)
# colnames(PREC) <- c("siteCol",82717,82718,82719,82720,82721,82722,106308,144750,156482,254754,328897,328900,388899,398723,398724,398730,398733,398737,398738,398748,398750,398753,398754,398757,398758,398760,398761,398763,398764,398765,399356,399359,521922,776163,776164,776165,776166,778430,778431,787600,813830,26638,26641,26653,26656,26666,26667,26673,26675,26677,26687,26697,26699,26705,1822)
#
# TEMP <- read.csv("temperature.dist.csv")[-remove,-remove]
# TEMP <- cbind(pheno$siteCol,TEMP)
# colnames(TEMP) <- c("siteCol",82717,82718,82719,82720,82721,82722,106308,144750,156482,254754,328897,328900,388899,398723,398724,398730,398733,398737,398738,398748,398750,398753,398754,398757,398758,398760,398761,398763,398764,398765,399356,399359,521922,776163,776164,776165,776166,778430,778431,787600,813830,26638,26641,26653,26656,26666,26667,26673,26675,26677,26687,26697,26699,26705,1822)
#LGM <- read.csv("LGM_resistance.csv",header=T)[-remove,-remove]
LGM = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_str.csv",
header=T)
colnames(LGM)[1] = "Sample"
#LGM <- cbind(gene$Sample,LGM)
#colnames(LGM) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
#IBD <- read.csv("Present_plain_resistance.csv",header=T)[-remove,-remove]
IBD = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_geog.csv",
header=T)
colnames(IBD)[1] = "Sample"
#IBD <- cbind(gene$Sample,IBD)
#colnames(IBD) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
PRES = read.csv("/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/Distances/BELLII_distancematrix_env.csv",
header=T)
colnames(PRES)[1] = "Sample"
#PRES <- read.csv("Present_resistance.csv",header=T)[-remove,-remove]
#PRES <- cbind(gene$Sample,PRES)
#colnames(PRES) <- c("Sample","A10","A4","A5","A6","A9","B10","B4","B5","B7","B9","C10","C4","C5","C6","C7","C9","D10","D4","D6","D7","D8","D9","E10","E4","E5","E6","E7","E8","E9","F2","F5","F7","F8","F9","G3","G7","G8","G9","H3","H4","H5","H8","H9")
# GDM
#GENE = merge(GENE,gene) ## not sure if here is where it goes
bioclim_final$Sample = rownames(bioclim_final)
bioclim_final = merge(bioclim_final,gene)
rownames(bioclim_final) = bioclim_final$Sample
#bioclim_final = bioclim_final[,c(-1)]
rownames(bioclim_final) = stringr::str_replace_all(rownames(bioclim_final), "[^[:alnum:]]", "")
colnames(bioclim_final) = stringr::str_replace_all(colnames(bioclim_final), "[^[:alnum:]]", "")
rownames(GENE) = stringr::str_replace_all(rownames(GENE), "[^[:alnum:]]", "")
colnames(GENE) = stringr::str_replace_all(colnames(GENE), "[^[:alnum:]]", "")
rownames(IBD) = stringr::str_replace_all(rownames(IBD), "[^[:alnum:]]", "")
colnames(IBD) = stringr::str_replace_all(colnames(IBD), "[^[:alnum:]]", "")
rownames(LGM) = stringr::str_replace_all(rownames(LGM), "[^[:alnum:]]", "")
colnames(LGM) = stringr::str_replace_all(colnames(LGM), "[^[:alnum:]]", "")
rownames(PRES) = stringr::str_replace_all(rownames(PRES), "[^[:alnum:]]", "")
colnames(PRES) = stringr::str_replace_all(colnames(PRES), "[^[:alnum:]]", "")
bioclim_final$Sample = stringr::str_replace_all(bioclim_final$Sample, "[^[:alnum:]]", "")
GENE$Sample = stringr::str_replace_all(GENE$Sample, "[^[:alnum:]]", "")
LGM$Sample = stringr::str_replace_all(LGM$Sample, "[^[:alnum:]]", "")
IBD$Sample = stringr::str_replace_all(IBD$Sample, "[^[:alnum:]]", "")
PRES$Sample = stringr::str_replace_all(PRES$Sample, "[^[:alnum:]]", "")
formatted <- formatsitepair(GENE, 3, siteColumn="Sample",
XColumn="Longitude", YColumn="Latitude",
predData = bioclim_final,
distPreds=list(as.matrix(IBD),as.matrix(LGM),as.matrix(PRES)))
names(formatted)
formatted$distance = scales::rescale(formatted$distance)
model <- gdm(formatted, geo=FALSE, splines=NULL, knots=NULL)
summary(model)
str(model)
model$explained
model$predictors
model$coefficients
length(model$predictors)
plot(model)
plot(model, plot.layout=c(4,3))
modTest <- gdm.varImp(formatted, geo=F, nPerm=100, parallel=T, cores=4)
#Deviance
modTest[[1]]
write.csv(modTest[[1]],"variable_importance_test.csv")
#Importance
#Significance
modTest[[3]]
write.csv(modTest[[3]],"variable_significance_test.csv")
modTest_b <- gdm.varImp(formatted,fullModelOnly = FALSE, geo=F, nPerm=100, parallel=T, cores=4)
barplot(modTest[[2]][,1],main = "full Model")
barplot(modTest[[2]][,2],main = "full Model - 1")
barplot(modTest[[2]][,3],main = "full Model - 2")
barplot(modTest[[2]][,4],main = "full Model - 3")
barplot(modTest[[2]][,5],main = "full Model - 4")
barplot(modTest[[2]][,6],main = "full Model - 5")
barplot(modTest[[2]][,7],main = "full Model - 6")
barplot(modTest[[2]][,8],main = "full Model - 7")
barplot(modTest[[2]][,9],main = "full Model - 8")
barplot(modTest[[2]][,10],main = "full Model - 9")
save.image("PTO.RData")
|
#' Collect False Negative and False Positive Values.
#'
#' Function to Collect the False Negative and False Positive Values from each dataset, for the purpose of knowing how many percentage of mislabelled and defective modules are present.
#' @param Obtain the FN and FP values through a dataframe, Defaults to MSD (dataframe containing percentage values of all FN,FP)
#' @keywords MSD, FN, FP
#' @export
#' @examples
#' CollectFNFP()
CollectFNFP <- function(MSD)
{
FN <- {}
FP <- {}
MSD <- data.frame()
for(i in 1:length(data))
{
class <- data[[i]]$Type_1;
scored.class <- data[[i]]$Type_3;
FN[i] <- sum(class == 0 & scored.class == 1)/nrow(data[[i]]);
FP[i] <- sum(class == 1 & scored.class == 0)/nrow(data[[i]]);
MSDx <- data.frame(Labels = "Mislabelled Non-defective Modules", Percentage = FN[i]);
MSD <- rbind(MSD, MSDx);
MSDx <- data.frame(Labels = "Actual Defective Modules Mislabelled", Percentage = FP[i]);
MSD <- rbind(MSD, MSDx); }
return(MSD)
}
| /R/CollectFNFP.R | no_license | suraj-yathish/MCSlibrary | R | false | false | 1,016 | r | #' Collect False Negative and False Positive Values.
#'
#' Function to Collect the False Negative and False Positive Values from each dataset, for the purpose of knowing how many percentage of mislabelled and defective modules are present.
#' @param Obtain the FN and FP values through a dataframe, Defaults to MSD (dataframe containing percentage values of all FN,FP)
#' @keywords MSD, FN, FP
#' @export
#' @examples
#' CollectFNFP()
CollectFNFP <- function(MSD)
{
FN <- {}
FP <- {}
MSD <- data.frame()
for(i in 1:length(data))
{
class <- data[[i]]$Type_1;
scored.class <- data[[i]]$Type_3;
FN[i] <- sum(class == 0 & scored.class == 1)/nrow(data[[i]]);
FP[i] <- sum(class == 1 & scored.class == 0)/nrow(data[[i]]);
MSDx <- data.frame(Labels = "Mislabelled Non-defective Modules", Percentage = FN[i]);
MSD <- rbind(MSD, MSDx);
MSDx <- data.frame(Labels = "Actual Defective Modules Mislabelled", Percentage = FP[i]);
MSD <- rbind(MSD, MSDx); }
return(MSD)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/get_recent_personal_explanations_votes_specific_member_by_category.R
\name{get_recent_personal_explanations_votes_specific_member_by_category}
\alias{get_recent_personal_explanations_votes_specific_member_by_category}
\title{Get Recent Personal Explanation Votes by a Specific Member by Category}
\usage{
get_recent_personal_explanations_votes_specific_member_by_category(member_id,
congress, category, myAPI_Key)
}
\arguments{
\item{member_id}{The ID of the member to retrieve; it is assigned by the Biographical Directory of the United States Congress or can be retrieved from a member list request.}
\item{congress}{110-115}
\item{category}{options: voted-incorrectly ( Voted yes or no by mistake), official-business (Away on official congressional business), ambiguous (No reason given), travel-difficulties ( Travel delays and issues), personal (Personal or family reason), claims-voted (Vote made but not recorded), medical ( Medical issue for lawmaker, not family), weather, memorial, misunderstanding, leave-of-absence, prior-commitment, election-related, military-service, other}
\item{myAPI_Key}{use the congress API, you must sign up for an API key. The API key must be included in all API requests to the server, set as a header.}
}
\value{
List of returned JSON from endpoint that retrieves the 20 most recent personal explanations by a specific member in the specified Congress, and supports paginated requests using multiples of 20.
}
\description{
Lawmakers, mostly in the House but also in the Senate, can make personal explanations for missed or mistaken votes in the Congressional Record. To get recent personal explanations by a specific member that are parsed to individual votes and have an additional category attribute describing the general reason for the absence or incorrect vote, use the following function. HTTP Request: GET https://api.propublica.org/congress/v1/members/{member_id}/explanations/{congress}/votes.json
}
\examples{
\dontrun{
get_recent_personal_explanations_votes_specific_member_by_category('S001193', 115, 'personal')
}
}
| /man/get_recent_personal_explanations_votes_specific_member_by_category.Rd | no_license | DavytJ/ProPublicaR | R | false | true | 2,208 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in
% R/get_recent_personal_explanations_votes_specific_member_by_category.R
\name{get_recent_personal_explanations_votes_specific_member_by_category}
\alias{get_recent_personal_explanations_votes_specific_member_by_category}
\title{Get Recent Personal Explanation Votes by a Specific Member by Category}
\usage{
get_recent_personal_explanations_votes_specific_member_by_category(member_id,
congress, category, myAPI_Key)
}
\arguments{
\item{member_id}{The ID of the member to retrieve; it is assigned by the Biographical Directory of the United States Congress or can be retrieved from a member list request.}
\item{congress}{110-115}
\item{category}{options: voted-incorrectly ( Voted yes or no by mistake), official-business (Away on official congressional business), ambiguous (No reason given), travel-difficulties ( Travel delays and issues), personal (Personal or family reason), claims-voted (Vote made but not recorded), medical ( Medical issue for lawmaker, not family), weather, memorial, misunderstanding, leave-of-absence, prior-commitment, election-related, military-service, other}
\item{myAPI_Key}{use the congress API, you must sign up for an API key. The API key must be included in all API requests to the server, set as a header.}
}
\value{
List of returned JSON from endpoint that retrieves the 20 most recent personal explanations by a specific member in the specified Congress, and supports paginated requests using multiples of 20.
}
\description{
Lawmakers, mostly in the House but also in the Senate, can make personal explanations for missed or mistaken votes in the Congressional Record. To get recent personal explanations by a specific member that are parsed to individual votes and have an additional category attribute describing the general reason for the absence or incorrect vote, use the following function. HTTP Request: GET https://api.propublica.org/congress/v1/members/{member_id}/explanations/{congress}/votes.json
}
\examples{
\dontrun{
get_recent_personal_explanations_votes_specific_member_by_category('S001193', 115, 'personal')
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/disp.R
\name{disp}
\alias{disp}
\title{Generate a table with identified bubble/crisis periods}
\usage{
disp(OT, obs)
}
\arguments{
\item{OT}{A date vector. Bubbles/crisis periods identified by the
\code{spymonitor::locate} function.}
\item{obs}{A positive integer. The number of observations.}
}
\value{
A vector of strings with bubble/crisis periods.
}
\description{
\code{disp} generates a data frame with bubble/crisis
periods identified by the PSY procedure
}
\examples{
data(spread)
y <- spread$value[150:200]
obs <- length(y)
swindow0 <- floor(obs*(0.01 + 1.8/sqrt(obs)))
dim <- obs - swindow0 + 1
Tb <- 24 + swindow0 - 1
# Estimate PSY statistics and CVs
bsadf <- PSY(y, swindow0)
quantilesBsadf <- cvPSYwmboot(y, swindow0, Tb=Tb, nboot = 49, nCores = 2)
quantile95 <- quantilesBsadf \%*\% matrix(1, nrow = 1, ncol = dim)
# locate bubble/crisis dates
ind95 <- (bsadf > t(quantile95[2, ])) * 1
monitorDates <- spread$date[swindow0:obs]
OT <- locate(ind95, monitorDates)
# Show bubble/crisis periods
disp(OT, obs)
}
\references{
Phillips, P. C. B., Shi, S., & Yu, J. (2015a). Testing for
multiple bubbles: Historical episodes of exuberance and collapse in the S&P
500. \emph{International Economic Review}, 56(4), 1034--1078.
Phillips, P. C. B., Shi, S., & Yu, J. (2015b). Testing for
multiple bubbles: Limit Theory for Real-Time Detectors. \emph{International
Economic Review}, 56(4), 1079--1134.
* Phillips, P. C. B., & Shi, S.(forthcoming). Real time
monitoring of asset markets: Bubbles and crisis. In Hrishikesh D. Vinod and
C.R. Rao (Eds.), \emph{Handbook of Statistics Volume 41 - Econometrics
Using R}.
}
| /man/disp.Rd | no_license | cran/psymonitor | R | false | true | 1,831 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/disp.R
\name{disp}
\alias{disp}
\title{Generate a table with identified bubble/crisis periods}
\usage{
disp(OT, obs)
}
\arguments{
\item{OT}{A date vector. Bubbles/crisis periods identified by the
\code{spymonitor::locate} function.}
\item{obs}{A positive integer. The number of observations.}
}
\value{
A vector of strings with bubble/crisis periods.
}
\description{
\code{disp} generates a data frame with bubble/crisis
periods identified by the PSY procedure
}
\examples{
data(spread)
y <- spread$value[150:200]
obs <- length(y)
swindow0 <- floor(obs*(0.01 + 1.8/sqrt(obs)))
dim <- obs - swindow0 + 1
Tb <- 24 + swindow0 - 1
# Estimate PSY statistics and CVs
bsadf <- PSY(y, swindow0)
quantilesBsadf <- cvPSYwmboot(y, swindow0, Tb=Tb, nboot = 49, nCores = 2)
quantile95 <- quantilesBsadf \%*\% matrix(1, nrow = 1, ncol = dim)
# locate bubble/crisis dates
ind95 <- (bsadf > t(quantile95[2, ])) * 1
monitorDates <- spread$date[swindow0:obs]
OT <- locate(ind95, monitorDates)
# Show bubble/crisis periods
disp(OT, obs)
}
\references{
Phillips, P. C. B., Shi, S., & Yu, J. (2015a). Testing for
multiple bubbles: Historical episodes of exuberance and collapse in the S&P
500. \emph{International Economic Review}, 56(4), 1034--1078.
Phillips, P. C. B., Shi, S., & Yu, J. (2015b). Testing for
multiple bubbles: Limit Theory for Real-Time Detectors. \emph{International
Economic Review}, 56(4), 1079--1134.
* Phillips, P. C. B., & Shi, S.(forthcoming). Real time
monitoring of asset markets: Bubbles and crisis. In Hrishikesh D. Vinod and
C.R. Rao (Eds.), \emph{Handbook of Statistics Volume 41 - Econometrics
Using R}.
}
|
\name{summary.warn}
\alias{summary.warn}
\title{
Summaries of posterior weaning parameters
}
\description{
\code{summary.warn} calculates simple summaries of posterior samples for the four weaning parameters, which is the maximum density estimators and its marginal probabilities, a joint probability for the combination of the maximum density weaning ages, mean squared distance between the measured and modeled nitrogen isotope ratio (d15N), number of non-adult individuals used, and number of particles used in sequential Monte Carlo (SMC) sampling.
}
\usage{
\method{summary}{warn}(object, \dots)
}
\arguments{
\item{object}{an object of class \code{"warn"} generated by \code{\link{warn}}.}
\item{\dots}{additional arguments affecting the summary produced.}
}
\value{
\code{summary.warn} returns a list containing following components and those succeeded from \code{"warn"}:\cr
\item{call}{the matched call.}
\item{mde}{the maximum density estimators and the marginal probabilities.}
\item{prob.2d.age}{the joint probability for the combination of the maximum density weaning ages.}
\item{dist.mde}{the mean squared distance between the measured and modeled d15Ns.}
\item{individual}{the number of non-adult individuals used.}
\item{particle}{the number of particles used in SMC sampling.}
}
\references{
Tsutaya, T., and Yoneda, M. (2013). Quantitative reconstruction of weaning ages in archaeological human populations using bone collagen nitrogen isotope ratios and approximate Bayesian computation. \emph{PLoS ONE} \bold{8}, e72327.
}
\seealso{
\code{\link{WARN}}, \code{\link{warn}}, \code{\link{warnCI}}, \code{\link{warnProb}}, \code{\link{plot.warn}}
}
\examples{
## See ?warn for examples.
}
\keyword{math}
| /man/summary.warn.Rd | no_license | cran/WARN | R | false | false | 1,748 | rd | \name{summary.warn}
\alias{summary.warn}
\title{
Summaries of posterior weaning parameters
}
\description{
\code{summary.warn} calculates simple summaries of posterior samples for the four weaning parameters, which is the maximum density estimators and its marginal probabilities, a joint probability for the combination of the maximum density weaning ages, mean squared distance between the measured and modeled nitrogen isotope ratio (d15N), number of non-adult individuals used, and number of particles used in sequential Monte Carlo (SMC) sampling.
}
\usage{
\method{summary}{warn}(object, \dots)
}
\arguments{
\item{object}{an object of class \code{"warn"} generated by \code{\link{warn}}.}
\item{\dots}{additional arguments affecting the summary produced.}
}
\value{
\code{summary.warn} returns a list containing following components and those succeeded from \code{"warn"}:\cr
\item{call}{the matched call.}
\item{mde}{the maximum density estimators and the marginal probabilities.}
\item{prob.2d.age}{the joint probability for the combination of the maximum density weaning ages.}
\item{dist.mde}{the mean squared distance between the measured and modeled d15Ns.}
\item{individual}{the number of non-adult individuals used.}
\item{particle}{the number of particles used in SMC sampling.}
}
\references{
Tsutaya, T., and Yoneda, M. (2013). Quantitative reconstruction of weaning ages in archaeological human populations using bone collagen nitrogen isotope ratios and approximate Bayesian computation. \emph{PLoS ONE} \bold{8}, e72327.
}
\seealso{
\code{\link{WARN}}, \code{\link{warn}}, \code{\link{warnCI}}, \code{\link{warnProb}}, \code{\link{plot.warn}}
}
\examples{
## See ?warn for examples.
}
\keyword{math}
|
\name{overfittingMFA}
\alias{overfittingMFA}
\title{
Basic MCMC sampler
}
\description{
Gibbs sampling for fitting a mixture model of factor analyzers.
}
\usage{
overfittingMFA(x_data, originalX, outputDirectory, Kmax, m, thinning, burn,
g, h, alpha_prior, alpha_sigma, beta_sigma,
start_values, q, zStart, gibbs_z)
}
\arguments{
\item{x_data}{
normalized data
}
\item{originalX}{
observed raw data (only for plotting purpose)
}
\item{outputDirectory}{
Name of the output folder
}
\item{Kmax}{
Number of mixture components
}
\item{m}{
Number of iterations
}
\item{thinning}{
Thinning of chain
}
\item{burn}{
Burn-in period
}
\item{g}{
Prior parameter \eqn{g}. Default value: \eqn{g = 2}.
}
\item{h}{
Prior parameter \eqn{h}. Default value: \eqn{h = 1}.
}
\item{alpha_prior}{
Parameters of the Dirichlet prior distribution of mixture weights.
}
\item{alpha_sigma}{
Prior parameter \eqn{\alpha}. Default value: \eqn{\alpha = 2}.
}
\item{beta_sigma}{
Prior parameter \eqn{\beta}. Default value: \eqn{\beta = 1}.
}
\item{start_values}{
Optional (not used)
}
\item{q}{
Number of factors.
}
\item{zStart}{
Optional (not used)
}
\item{gibbs_z}{
Optional
}
}
\value{
List of files}
\author{
Panagiotis Papastamoulis
}
| /fabMixPackage/version_4.2/fabMix/man/overfittingMFA.Rd | no_license | mqbssppe/overfittingFABMix | R | false | false | 1,268 | rd | \name{overfittingMFA}
\alias{overfittingMFA}
\title{
Basic MCMC sampler
}
\description{
Gibbs sampling for fitting a mixture model of factor analyzers.
}
\usage{
overfittingMFA(x_data, originalX, outputDirectory, Kmax, m, thinning, burn,
g, h, alpha_prior, alpha_sigma, beta_sigma,
start_values, q, zStart, gibbs_z)
}
\arguments{
\item{x_data}{
normalized data
}
\item{originalX}{
observed raw data (only for plotting purpose)
}
\item{outputDirectory}{
Name of the output folder
}
\item{Kmax}{
Number of mixture components
}
\item{m}{
Number of iterations
}
\item{thinning}{
Thinning of chain
}
\item{burn}{
Burn-in period
}
\item{g}{
Prior parameter \eqn{g}. Default value: \eqn{g = 2}.
}
\item{h}{
Prior parameter \eqn{h}. Default value: \eqn{h = 1}.
}
\item{alpha_prior}{
Parameters of the Dirichlet prior distribution of mixture weights.
}
\item{alpha_sigma}{
Prior parameter \eqn{\alpha}. Default value: \eqn{\alpha = 2}.
}
\item{beta_sigma}{
Prior parameter \eqn{\beta}. Default value: \eqn{\beta = 1}.
}
\item{start_values}{
Optional (not used)
}
\item{q}{
Number of factors.
}
\item{zStart}{
Optional (not used)
}
\item{gibbs_z}{
Optional
}
}
\value{
List of files}
\author{
Panagiotis Papastamoulis
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(ncdf4)
library(chron)
library(shiny)
library(sp)
library(rgdal)
library(raster)
library(tmap)
data(World)
#library(maptools)
# Define server logic required to draw a histogram
#setwd("D:/Moje dokumenty/Praca_magisterska/MagisterkaNetCDF")
#
ncname <- "SBCAPE"#nazwa pliku
ncfname <- paste(ncname, ".nc", sep = "")
dname <- "SBCAPE" #parapmetr zmienny
ncin <- nc_open(ncfname)
lon <- ncvar_get(ncin, "lon")
nlon <- dim(lon)
lat <- ncvar_get(ncin, "lat", verbose = F)
nlat <- dim(lat)
t <- ncvar_get(ncin, "time")
tunits <- ncatt_get(ncin, "time", "units")
nt <- dim(t)
######################################################
tmp.array <- ncvar_get(ncin, dname)
dlname <- ncatt_get(ncin, dname, "long_name")
dunits <- ncatt_get(ncin, dname, "units")
fillvalue <- ncatt_get(ncin, dname, "_FillValue")
tustr <- strsplit(tunits$value, " ")
tdstr <- strsplit(unlist(tustr)[3], "-")
tmonth = as.integer(unlist(tdstr)[2])
tday = as.integer(unlist(tdstr)[3])
tyear = as.integer(unlist(tdstr)[1])
#chron(t, origin = c(tmonth, tday, tyear))
#tmp.array[tmp.array == fillvalue$value] <- NA
#tunits$value # informacje o sposobie przechowywania czasu w pliku
czas <- as.POSIXct(t, origin="1970-01-01", tz='GMT')
#m <- 1
shinyServer(function(input, output) {
output$czas <- renderUI({
m <- input$bins
titlePanel(paste(format(czas[m],"%Y-%m-%d %H:%M")))
})
output$distPlot <- renderPlot({
colory <- c("white","cyan","green","yellow","orange", "red", "#600000")
#colfunc <- colorRampPalette(c("white","cyan","green","yellow","orange", "red", "#600000"))
m <- input$bins
tmp.slice <- tmp.array[, , m]
grid <- expand.grid(lon = lon, lat = lat)
lonlat <- expand.grid(lon, lat)
tmp.vec <- as.vector(tmp.slice)
length(tmp.vec)
tmp.df01 <- data.frame(cbind(lonlat, tmp.vec))
names(tmp.df01) <- c("lon", "lat", paste(dname, as.character(m), sep = "_"))
pts <- tmp.df01
colnames(pts) <- c("x","y","z")
coordinates(pts)=~x+y
proj4string(pts)=CRS("+init=epsg:4326") # set it to lat-long
pts = spTransform(pts,CRS("+init=epsg:4326"))
gridded(pts) = TRUE
r = raster(pts)
projection(r) = CRS("+init=epsg:4326")
#writeRaster(r, filename=paste(format(czas[m],"%Y-%m-%d %H:%M"),".tif", sep=""), options=c('TFW=YES'))
tm_shape(r, n.x = 5) +
tm_raster(n=50,palette = colory, auto.palette.mapping = FALSE, interpolate = T, breaks = seq(0,4500, 250),
title="CAPE \n[J/Kg^-2]")+
tm_format_Europe(title = NA, title.position = c("left", "top"),attr.outside=T,legend.outside=TRUE,
legend.text.size = 1.5,legend.outside.position=c("left"),
attr.position = c("right", "bottom"), legend.frame = TRUE,
inner.margins = c(.0, .0, 0, 0))+tm_scale_bar(size = 1)+
tm_shape(World, unit = "km") +
tm_polygons(alpha = 0)+
tm_xlab("latitude", size = 1, rotation = 0)+
tm_ylab("longitude", size = 1, rotation = 90)+
tm_grid(x = c(-20,0,20,40,60),y=c(40,50,60,70,80), labels.inside.frame=T)+
tmap_mode("plot")
}, height = 700, width = 700)
})
| /server.R | no_license | bczernecki/shiny | R | false | false | 3,399 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(ncdf4)
library(chron)
library(shiny)
library(sp)
library(rgdal)
library(raster)
library(tmap)
data(World)
#library(maptools)
# Define server logic required to draw a histogram
#setwd("D:/Moje dokumenty/Praca_magisterska/MagisterkaNetCDF")
#
ncname <- "SBCAPE"#nazwa pliku
ncfname <- paste(ncname, ".nc", sep = "")
dname <- "SBCAPE" #parapmetr zmienny
ncin <- nc_open(ncfname)
lon <- ncvar_get(ncin, "lon")
nlon <- dim(lon)
lat <- ncvar_get(ncin, "lat", verbose = F)
nlat <- dim(lat)
t <- ncvar_get(ncin, "time")
tunits <- ncatt_get(ncin, "time", "units")
nt <- dim(t)
######################################################
tmp.array <- ncvar_get(ncin, dname)
dlname <- ncatt_get(ncin, dname, "long_name")
dunits <- ncatt_get(ncin, dname, "units")
fillvalue <- ncatt_get(ncin, dname, "_FillValue")
tustr <- strsplit(tunits$value, " ")
tdstr <- strsplit(unlist(tustr)[3], "-")
tmonth = as.integer(unlist(tdstr)[2])
tday = as.integer(unlist(tdstr)[3])
tyear = as.integer(unlist(tdstr)[1])
#chron(t, origin = c(tmonth, tday, tyear))
#tmp.array[tmp.array == fillvalue$value] <- NA
#tunits$value # informacje o sposobie przechowywania czasu w pliku
czas <- as.POSIXct(t, origin="1970-01-01", tz='GMT')
#m <- 1
shinyServer(function(input, output) {
output$czas <- renderUI({
m <- input$bins
titlePanel(paste(format(czas[m],"%Y-%m-%d %H:%M")))
})
output$distPlot <- renderPlot({
colory <- c("white","cyan","green","yellow","orange", "red", "#600000")
#colfunc <- colorRampPalette(c("white","cyan","green","yellow","orange", "red", "#600000"))
m <- input$bins
tmp.slice <- tmp.array[, , m]
grid <- expand.grid(lon = lon, lat = lat)
lonlat <- expand.grid(lon, lat)
tmp.vec <- as.vector(tmp.slice)
length(tmp.vec)
tmp.df01 <- data.frame(cbind(lonlat, tmp.vec))
names(tmp.df01) <- c("lon", "lat", paste(dname, as.character(m), sep = "_"))
pts <- tmp.df01
colnames(pts) <- c("x","y","z")
coordinates(pts)=~x+y
proj4string(pts)=CRS("+init=epsg:4326") # set it to lat-long
pts = spTransform(pts,CRS("+init=epsg:4326"))
gridded(pts) = TRUE
r = raster(pts)
projection(r) = CRS("+init=epsg:4326")
#writeRaster(r, filename=paste(format(czas[m],"%Y-%m-%d %H:%M"),".tif", sep=""), options=c('TFW=YES'))
tm_shape(r, n.x = 5) +
tm_raster(n=50,palette = colory, auto.palette.mapping = FALSE, interpolate = T, breaks = seq(0,4500, 250),
title="CAPE \n[J/Kg^-2]")+
tm_format_Europe(title = NA, title.position = c("left", "top"),attr.outside=T,legend.outside=TRUE,
legend.text.size = 1.5,legend.outside.position=c("left"),
attr.position = c("right", "bottom"), legend.frame = TRUE,
inner.margins = c(.0, .0, 0, 0))+tm_scale_bar(size = 1)+
tm_shape(World, unit = "km") +
tm_polygons(alpha = 0)+
tm_xlab("latitude", size = 1, rotation = 0)+
tm_ylab("longitude", size = 1, rotation = 90)+
tm_grid(x = c(-20,0,20,40,60),y=c(40,50,60,70,80), labels.inside.frame=T)+
tmap_mode("plot")
}, height = 700, width = 700)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.shield_operations.R
\name{list_attacks}
\alias{list_attacks}
\title{Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period}
\usage{
list_attacks(ResourceArns = NULL, StartTime = NULL, EndTime = NULL,
NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{ResourceArns}{The ARN (Amazon Resource Name) of the resource that was attacked. If this is left blank, all applicable resources for this account will be included.}
\item{StartTime}{The start of the time period for the attacks. This is a \code{timestamp} type. The sample request above indicates a \code{number} type because the default used by WAF is Unix time in seconds. However any valid \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types}{timestamp format} is allowed.}
\item{EndTime}{The end of the time period for the attacks. This is a \code{timestamp} type. The sample request above indicates a \code{number} type because the default used by WAF is Unix time in seconds. However any valid \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types}{timestamp format} is allowed.}
\item{NextToken}{The \code{ListAttacksRequest.NextMarker} value from a previous call to \code{ListAttacksRequest}. Pass null if this is the first call.}
\item{MaxResults}{The maximum number of AttackSummary objects to be returned. If this is left blank, the first 20 results will be returned.
This is a maximum value; it is possible that AWS WAF will return the results in smaller batches. That is, the number of AttackSummary objects returned could be less than \code{MaxResults}, even if there are still more AttackSummary objects yet to return. If there are more AttackSummary objects to return, AWS WAF will always also return a \code{NextToken}.}
}
\description{
Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period.
}
\section{Accepted Parameters}{
\preformatted{list_attacks(
ResourceArns = list(
"string"
),
StartTime = list(
FromInclusive = as.POSIXct("2015-01-01"),
ToExclusive = as.POSIXct("2015-01-01")
),
EndTime = list(
FromInclusive = as.POSIXct("2015-01-01"),
ToExclusive = as.POSIXct("2015-01-01")
),
NextToken = "string",
MaxResults = 123
)
}
}
| /service/paws.shield/man/list_attacks.Rd | permissive | CR-Mercado/paws | R | false | true | 2,367 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.shield_operations.R
\name{list_attacks}
\alias{list_attacks}
\title{Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period}
\usage{
list_attacks(ResourceArns = NULL, StartTime = NULL, EndTime = NULL,
NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{ResourceArns}{The ARN (Amazon Resource Name) of the resource that was attacked. If this is left blank, all applicable resources for this account will be included.}
\item{StartTime}{The start of the time period for the attacks. This is a \code{timestamp} type. The sample request above indicates a \code{number} type because the default used by WAF is Unix time in seconds. However any valid \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types}{timestamp format} is allowed.}
\item{EndTime}{The end of the time period for the attacks. This is a \code{timestamp} type. The sample request above indicates a \code{number} type because the default used by WAF is Unix time in seconds. However any valid \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types}{timestamp format} is allowed.}
\item{NextToken}{The \code{ListAttacksRequest.NextMarker} value from a previous call to \code{ListAttacksRequest}. Pass null if this is the first call.}
\item{MaxResults}{The maximum number of AttackSummary objects to be returned. If this is left blank, the first 20 results will be returned.
This is a maximum value; it is possible that AWS WAF will return the results in smaller batches. That is, the number of AttackSummary objects returned could be less than \code{MaxResults}, even if there are still more AttackSummary objects yet to return. If there are more AttackSummary objects to return, AWS WAF will always also return a \code{NextToken}.}
}
\description{
Returns all ongoing DDoS attacks or all DDoS attacks during a specified time period.
}
\section{Accepted Parameters}{
\preformatted{list_attacks(
ResourceArns = list(
"string"
),
StartTime = list(
FromInclusive = as.POSIXct("2015-01-01"),
ToExclusive = as.POSIXct("2015-01-01")
),
EndTime = list(
FromInclusive = as.POSIXct("2015-01-01"),
ToExclusive = as.POSIXct("2015-01-01")
),
NextToken = "string",
MaxResults = 123
)
}
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5735
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5735
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/trivial_query60_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1669
c no.of clauses 5735
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 5735
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/trivial_query60_1344n.qdimacs 1669 5735 E1 [] 0 104 1562 5735 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/trivial_query60_1344n/trivial_query60_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 714 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5735
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5735
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/trivial_query60_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1669
c no.of clauses 5735
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 5735
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/trivial_query60_1344n.qdimacs 1669 5735 E1 [] 0 104 1562 5735 NONE
|
## Transaction Summarization Metrics: Per Day, Month, & Year
##
## tx is transaction
## columns to keep: transaction count (txCount), and use adjustedTransaction Amount
## daily blockchain transaction volume (by count & by amount), monthly, & yearly (some coins dont have but one year.. take this into account)
## this is daily tx count & tx amount, for all of the coins in one bucket
## ONLY BY COUNT IS DONE, SINCE SOME CSVs DO NOT CONTAIN adjustedTransaction Amount
## FUTURE WORK
## daily blockchain database growth in MB
## daily crypto traffic bandwidth
## ************************* INGEST DATA FROM MULTIPLE CSV FILES *************************
setwd("/Users/aaronfl1p/Desktop/boxy/coinMetrics")
## Obtain strings of the full file path for each .csv in the 'coinData08122018' folder
coinDataFiles <- list.files(path="/Users/aaronfl1p/Desktop/boxy/coinMetrics/coinData08122018", pattern="*.csv", full.names=TRUE, recursive=FALSE)
# Initialize the allcoindata df
#allCoinData <- data.frame(file = "", date = "", txVolume.USD. = "", txCount = "")
allCoinData <- data.frame(file = "", date = "", txCount = "")
allCoinData <- allCoinData[-1,]
## Loop through files in the 'coinData08122018' folder and put all data into one df called 'allCoinData'
## Open each file, make a df, then only choose the 'date', 'txCount'
for(i in 1:length(coinDataFiles)){ # For each csv filename
currentFileName <- coinDataFiles[i] # Grab the ith filename
currentCoinDF <- read.csv(currentFileName, header = F, stringsAsFactors = F) # Turn the ith file into a df
colnames(currentCoinDF) <- currentCoinDF[1,] # Add column names as the first row of the .csv
currentCoinDF <- currentCoinDF[-1,] # Remove the first row, since they are colnames now
#currentCoinDF <- currentCoinDF[, c("date", "txVolume(USD)", "txCount")] # Choose only these three columns
currentCoinDF <- currentCoinDF[, c("date", "txCount")] # Choose only these two columns
currentCoinDF$fileIndex <- i # Add the file number (1 through 75) to the df, for future reference
allCoinData <- rbind(allCoinData, currentCoinDF) # Concatenate the df with the df of the previous loop iteration
}
allCoinData$date <- as.factor(allCoinData$date)
allCoinData$txCount <- as.numeric(allCoinData$txCount)
## Create MonthYear Column, for Per Month Analysis
allCoinData$MonthYear <- substr(allCoinData$date, 1, 7)
## Create Year Column, for Per Year Analysis
allCoinData$Year <- substr(allCoinData$date, 1, 4)
## ************************* CALCULATE DAILY/MONTHLY/YEARLY TRANSACTION SUMS *************************
library(plyr)
allCoinData <- allCoinData[complete.cases(allCoinData),] # Remove rows with NA's
## Transaction Count Per Day
dailySums <- ddply(allCoinData, .(date), summarize, dailySum = sum(txCount, na.rm=T))
## Transaction Count Per MonthYear
monthlySums <- ddply(allCoinData, .(MonthYear), summarize, monthlySum = sum(txCount, na.rm=T))
## Transaction Count Per Year
yearlySums <- ddply(allCoinData, .(Year), summarize, yearlySum = sum(txCount, na.rm=T))
write.csv(allCoinData, "allCoinData.csv", row.names = F)
## ************************* PLOT DAILY/MONTHLY/YEARLY TRANSACTION SUMS *************************
library(ggplot2)
library(scales)
plot(x=yearlySums$Year, y=yearlySums$yearlySum) ## works
## Plot of Yearly Transaction Count
ggplot(data=yearlySums, aes(x=Year, y=yearlySum)) +
geom_point() +
ylab("Transaction Count") +
scale_y_continuous(labels = comma, breaks = seq(0, 500000000, by=50000000))
## Plot of Monthly Transaction Count
ggplot(data=monthlySums, aes(x=MonthYear, y=monthlySum)) +
geom_point() +
ylab("Transaction Count") + xlab("Month") +
scale_y_continuous(labels = comma, breaks = seq(0, 150000000, by=10000000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 6))
## Plot of Daily Transaction Count
ggplot(data=dailySums, aes(x=date, y=dailySum)) +
geom_point() +
ylab("Transaction Count") + xlab("Day") +
scale_y_continuous(labels = comma, breaks = seq(0, 10000000, by=500000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 3))
# daily again... this one looks better
dailySums$date <- as.Date(dailySums$date)
ggplot(dailySums, aes(date, dailySum)) + geom_line() +
scale_y_continuous(labels = comma, breaks = seq(0, 10000000, by=500000)) +
xlab("") + ylab("Daily Transaction Count")
| /coinMetrics.R | no_license | aarondevlin/blockchain_transaction_analysis | R | false | false | 4,665 | r |
## Transaction Summarization Metrics: Per Day, Month, & Year
##
## tx is transaction
## columns to keep: transaction count (txCount), and use adjustedTransaction Amount
## daily blockchain transaction volume (by count & by amount), monthly, & yearly (some coins dont have but one year.. take this into account)
## this is daily tx count & tx amount, for all of the coins in one bucket
## ONLY BY COUNT IS DONE, SINCE SOME CSVs DO NOT CONTAIN adjustedTransaction Amount
## FUTURE WORK
## daily blockchain database growth in MB
## daily crypto traffic bandwidth
## ************************* INGEST DATA FROM MULTIPLE CSV FILES *************************
setwd("/Users/aaronfl1p/Desktop/boxy/coinMetrics")
## Obtain strings of the full file path for each .csv in the 'coinData08122018' folder
coinDataFiles <- list.files(path="/Users/aaronfl1p/Desktop/boxy/coinMetrics/coinData08122018", pattern="*.csv", full.names=TRUE, recursive=FALSE)
# Initialize the allcoindata df
#allCoinData <- data.frame(file = "", date = "", txVolume.USD. = "", txCount = "")
allCoinData <- data.frame(file = "", date = "", txCount = "")
allCoinData <- allCoinData[-1,]
## Loop through files in the 'coinData08122018' folder and put all data into one df called 'allCoinData'
## Open each file, make a df, then only choose the 'date', 'txCount'
for(i in 1:length(coinDataFiles)){ # For each csv filename
currentFileName <- coinDataFiles[i] # Grab the ith filename
currentCoinDF <- read.csv(currentFileName, header = F, stringsAsFactors = F) # Turn the ith file into a df
colnames(currentCoinDF) <- currentCoinDF[1,] # Add column names as the first row of the .csv
currentCoinDF <- currentCoinDF[-1,] # Remove the first row, since they are colnames now
#currentCoinDF <- currentCoinDF[, c("date", "txVolume(USD)", "txCount")] # Choose only these three columns
currentCoinDF <- currentCoinDF[, c("date", "txCount")] # Choose only these two columns
currentCoinDF$fileIndex <- i # Add the file number (1 through 75) to the df, for future reference
allCoinData <- rbind(allCoinData, currentCoinDF) # Concatenate the df with the df of the previous loop iteration
}
allCoinData$date <- as.factor(allCoinData$date)
allCoinData$txCount <- as.numeric(allCoinData$txCount)
## Create MonthYear Column, for Per Month Analysis
allCoinData$MonthYear <- substr(allCoinData$date, 1, 7)
## Create Year Column, for Per Year Analysis
allCoinData$Year <- substr(allCoinData$date, 1, 4)
## ************************* CALCULATE DAILY/MONTHLY/YEARLY TRANSACTION SUMS *************************
library(plyr)
allCoinData <- allCoinData[complete.cases(allCoinData),] # Remove rows with NA's
## Transaction Count Per Day
dailySums <- ddply(allCoinData, .(date), summarize, dailySum = sum(txCount, na.rm=T))
## Transaction Count Per MonthYear
monthlySums <- ddply(allCoinData, .(MonthYear), summarize, monthlySum = sum(txCount, na.rm=T))
## Transaction Count Per Year
yearlySums <- ddply(allCoinData, .(Year), summarize, yearlySum = sum(txCount, na.rm=T))
write.csv(allCoinData, "allCoinData.csv", row.names = F)
## ************************* PLOT DAILY/MONTHLY/YEARLY TRANSACTION SUMS *************************
library(ggplot2)
library(scales)
plot(x=yearlySums$Year, y=yearlySums$yearlySum) ## works
## Plot of Yearly Transaction Count
ggplot(data=yearlySums, aes(x=Year, y=yearlySum)) +
geom_point() +
ylab("Transaction Count") +
scale_y_continuous(labels = comma, breaks = seq(0, 500000000, by=50000000))
## Plot of Monthly Transaction Count
ggplot(data=monthlySums, aes(x=MonthYear, y=monthlySum)) +
geom_point() +
ylab("Transaction Count") + xlab("Month") +
scale_y_continuous(labels = comma, breaks = seq(0, 150000000, by=10000000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 6))
## Plot of Daily Transaction Count
ggplot(data=dailySums, aes(x=date, y=dailySum)) +
geom_point() +
ylab("Transaction Count") + xlab("Day") +
scale_y_continuous(labels = comma, breaks = seq(0, 10000000, by=500000)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5, size = 3))
# daily again... this one looks better
dailySums$date <- as.Date(dailySums$date)
ggplot(dailySums, aes(date, dailySum)) + geom_line() +
scale_y_continuous(labels = comma, breaks = seq(0, 10000000, by=500000)) +
xlab("") + ylab("Daily Transaction Count")
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.16615020770332e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615843876-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,232 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.16615020770332e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/safebrowsing_objects.R
\name{ClientInfo}
\alias{ClientInfo}
\title{ClientInfo Object}
\usage{
ClientInfo(clientId = NULL, clientVersion = NULL)
}
\arguments{
\item{clientId}{A client ID that (hopefully) uniquely identifies the client implementation of the Safe Browsing API}
\item{clientVersion}{The version of the client implementation}
}
\value{
ClientInfo object
}
\description{
ClientInfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The client metadata associated with Safe Browsing API requests.
}
| /googlesafebrowsingv4.auto/man/ClientInfo.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 626 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/safebrowsing_objects.R
\name{ClientInfo}
\alias{ClientInfo}
\title{ClientInfo Object}
\usage{
ClientInfo(clientId = NULL, clientVersion = NULL)
}
\arguments{
\item{clientId}{A client ID that (hopefully) uniquely identifies the client implementation of the Safe Browsing API}
\item{clientVersion}{The version of the client implementation}
}
\value{
ClientInfo object
}
\description{
ClientInfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The client metadata associated with Safe Browsing API requests.
}
|
#Elian Gonzalez Hernandez
#--------------------------------------------------------------------------------------------
#Pregunta 1
#--------------------------------------------------------------------------------------------
#Esta secuencia es para dibujar la funcion.Alerta debo fijarme en el intervalo de la integral
x = seq(0,2,by=0.001)
f = function(x){((x^2)+cos(x))}
y = f(x)
df <- data.frame(x,y)
g <- ggplot(df, aes(x))
g <- g + geom_line(aes(y=y), colour="red")
g
#Creo una uniforme en el intervalo de la integral y escogi por que quiero una muestra de 100000
sample.x = runif(100000,0,2)
accept = c()
sample.accept = c()
#primero hayar el maximo de la funcion f = function(x){((x^2)+cos(x))} para utilizarlo dentro del for
#Alertaaaa hay que encerrar cada funcion entre parentesis por separado pq sino no lo calcula bien
#Se escribiria de la sgte forma---- max[((x^2)+cos(x))] in [0,2]
#Se obtiene ---- (4+(cos(2))
for(i in 1:length(sample.x)){
#Genero la uniforme siempre en el intervalo[0,1] dentro del for
U = runif(1, 0, 2)
#Tomo los valores del sample en el mismo intervalo de la integral [0,2] y sustituyo el valor del max = (4+(cos(2))
if(dunif(sample.x[i], 0,2)*(4+cos(2))*U <= f(sample.x[i])) {
accept[i] = 'Yes'
sample.accept[i] = 1
}
#Tomo los valores del sample en el mismo intervalo de la integral [0,2] y sustituyo el valor del max = (4+(cos(2))
else if(dunif(sample.x[i],0,2)*(4+cos(2))*U > f(sample.x[i])) {
accept[i] = 'No'
sample.accept[i] = 0
}
}
#Esta es la formula que me va a dar el valor de la integral I=c(b-a)p en hit and miss
#phat es mi p es todos los aceptados entre el tamano total de mi muestra
#c es el maximo de la funcion que se obtiene arriba en este caso es (4+cos(2))
#(b-a) es la resta de los intervalos de la integral en este caso es (2-(0))
phat = sum(sample.accept)/100000
phat
#Aqui se aplica la formula
I = (4+cos(2))*(2)*phat
I ##El resultado de la integral es 3.567224
#Aqui se compara con la integral que calcula R
Ireal = integrate(f,0,2)
Ireal ##3.575964 with absolute error < 4e-14
#--------------------------------------------------------------------------------------------
#Pregunta 2 inciso a
#--------------------------------------------------------------------------------------------
#Aqui pongo los intervalos que me dan y escojo el len osea la muestra que quiera
x <- seq(0, 1, len = 100000)
#Aqui pongo la funcion para dibujarla
f = function(x){(2*pi*sqrt(2-(2*(x^4))))}
plot(f,0,1)
#Creo una uniforme en el intervalo de la integral y escogi por que quiero una muestra de 100000
sample.x = runif(100000,0,1)
accept = c()
sample.accept = c()
#En el metodo de aceptacion y rechazo general hay que hayar el maximo de la divicion de dos funciones de densidad
#(siempre es la funcion de densidad aqui),en este caso me piden hayar la fun density f(x)(0,1) a partir de una Uniforme(0,1)
#Primero se hayan las dos funciones de densidad y luego se dividen para al final encontrar el maximo de esa divicion.
#Cuando se trata de buscar la funcion de densidad es mejor buscarla por la funsion de distribucion y ahi aparecen las dos.
#Ya me dan la funcion de densidad f(x): (5/8)*(2-(2*(x^4)))
#De esta forma se busca la func distribution de la uniforme ---- uniform distribution min = 0, max=1
#y se escoge la de densidad ----- en este caso da como resultado 1
#Luego se busca el maximo de la divicion entre ella de la sgte forma:
#Alertaaaa hay que encerrar cada funcion entre parentesis por separado pq sino no lo calcula bien
#Se escribiria de la sgte forma---- max[ ((5/8)*(2-(2*(x^4))))/1 ] in [0,1]
#Se obtiene ---- 5/4 y es el que se utiliza en el for que es el maximo de la divicion entre ellas
for(i in 1:length(sample.x)){
U = runif(1, 0, 1)
if(dunif(sample.x[i], 0, 1)*(5/4)*U <= f(sample.x[i])) {
accept[i] = 'Yes'
sample.accept[i] = sample.x[i]
}
else if(dunif(sample.x[i],0,1)*(5/4)*U > f(sample.x[i])) {
accept[i] = 'No'
sample.accept[i] = 0
}
}
#Aqui guardo en un data frame todos los aceptados y rechazados
T = data.frame(sample.x, accept = factor(accept, levels= c('Yes','No')),sample.accept)
#Aqui guardo solo los aceptados en Taccept
Taccept = T[accept == 'Yes',]
#Aqui aplico la funcion G = (2*pi*sqrt(2-(2*(x^4))))/(5/8)*(2-(2*(x^4))) y saco la longitud de los aceptados
G = function(x){(2*pi*sqrt(2-(2*(x^4))))/((5/8)*(2-(2*(x^4))))}
Naccept = length(which(T$accept == 'Yes'))
#Aqui continuo aplicando el metodo demonte carlos y esta formula es la que me va a dar
#el valor aproximado de la integral I=sum(x)/n donde mis x son los Taccept$sample.accept
#y n es la cantidad total de los aceptados Naccept
I = sum(G(Taccept$sample.accept))/Naccept
I ##Este es el valor aproximado de la integral 8.064748
#Aqui compruebo con el resultado de R
F = function(x){(2*pi*sqrt(2-(2*(x^4))))}
Ireal = integrate(F,0,1)
Ireal #7.76633 with absolute error < 0.00049
#--------------------------------------------------------------------------------------------
#Pregunta 2 inciso b
#--------------------------------------------------------------------------------------------
#Aqui hago el metodo de hit and miss para comparar con el resultado que me dio como me piden
#Esta secuencia es para dibujar la funcion
x = seq(0, 1, len = 100000)
f = function(x){2*pi*sqrt(2-(2*(x^4)))}
y = f(x)
df <- data.frame(x,y)
g <- ggplot(df, aes(x))
g <- g + geom_line(aes(y=y), colour="red")
g
#Creo una uniforme en el intervalo de la integral y escogi por que quiero una muestra de 100000
sample.x = runif(100000,0,1)
accept = c()
sample.accept = c()
#primero hayar el maximo de la funcion f = function(x){2*pi*sqrt(2-(2*(x^4)))} para utilizarlo dentro del for
#Alertaaaa hay que encerrar cada funcion entre parentesis por separado pq sino no lo calcula bien
#Se escribiria de la sgte forma---- max[2*pi*sqrt(2-(2*(x^4)))] in [0,1]
#Se obtiene ---- 2*pi*sqrt(2) como maximo
for(i in 1:length(sample.x)){
U = runif(1, 0, 1)
if(dunif(sample.x[i], 0, 1)*(2*pi*sqrt(2))*U <= f(sample.x[i])) {
accept[i] = 'Yes'
sample.accept[i] = 1
}
else if(dunif(sample.x[i],0,1)*(2*pi*sqrt(2))*U > f(sample.x[i])) {
accept[i] = 'No'
sample.accept[i] = 0
}
}
#Esta es la formula que me va a dar el valor de la integral I=c(b-a)p en hit and miss
#phat es mi p es todos los aceptados entre el tamano total de mi muestra
phat = sum(sample.accept)/100000
phat
#Aqui se aplica la formula para calcular la integral
I = (2*pi*sqrt(2))*(1-(0))*phat
I ## Este es el valor de la integral resultante 7.762694
#Aqui se compara con la integral que calcula R
Ireal = integrate(f,0,1)
Ireal ##Este es el resultado de la integral calculado por R es: 7.76633 with absolute error < 0.00049
| /R Script/Examen Elian Gonzalez.R | no_license | elian880530/elian880530.github.io | R | false | false | 6,744 | r | #Elian Gonzalez Hernandez
#--------------------------------------------------------------------------------------------
#Pregunta 1
#--------------------------------------------------------------------------------------------
#Esta secuencia es para dibujar la funcion.Alerta debo fijarme en el intervalo de la integral
x = seq(0,2,by=0.001)
f = function(x){((x^2)+cos(x))}
y = f(x)
df <- data.frame(x,y)
g <- ggplot(df, aes(x))
g <- g + geom_line(aes(y=y), colour="red")
g
#Creo una uniforme en el intervalo de la integral y escogi por que quiero una muestra de 100000
sample.x = runif(100000,0,2)
accept = c()
sample.accept = c()
#primero hayar el maximo de la funcion f = function(x){((x^2)+cos(x))} para utilizarlo dentro del for
#Alertaaaa hay que encerrar cada funcion entre parentesis por separado pq sino no lo calcula bien
#Se escribiria de la sgte forma---- max[((x^2)+cos(x))] in [0,2]
#Se obtiene ---- (4+(cos(2))
for(i in 1:length(sample.x)){
#Genero la uniforme siempre en el intervalo[0,1] dentro del for
U = runif(1, 0, 2)
#Tomo los valores del sample en el mismo intervalo de la integral [0,2] y sustituyo el valor del max = (4+(cos(2))
if(dunif(sample.x[i], 0,2)*(4+cos(2))*U <= f(sample.x[i])) {
accept[i] = 'Yes'
sample.accept[i] = 1
}
#Tomo los valores del sample en el mismo intervalo de la integral [0,2] y sustituyo el valor del max = (4+(cos(2))
else if(dunif(sample.x[i],0,2)*(4+cos(2))*U > f(sample.x[i])) {
accept[i] = 'No'
sample.accept[i] = 0
}
}
#Esta es la formula que me va a dar el valor de la integral I=c(b-a)p en hit and miss
#phat es mi p es todos los aceptados entre el tamano total de mi muestra
#c es el maximo de la funcion que se obtiene arriba en este caso es (4+cos(2))
#(b-a) es la resta de los intervalos de la integral en este caso es (2-(0))
phat = sum(sample.accept)/100000
phat
#Aqui se aplica la formula
I = (4+cos(2))*(2)*phat
I ##El resultado de la integral es 3.567224
#Aqui se compara con la integral que calcula R
Ireal = integrate(f,0,2)
Ireal ##3.575964 with absolute error < 4e-14
#--------------------------------------------------------------------------------------------
#Pregunta 2 inciso a
#--------------------------------------------------------------------------------------------
#Aqui pongo los intervalos que me dan y escojo el len osea la muestra que quiera
x <- seq(0, 1, len = 100000)
#Aqui pongo la funcion para dibujarla
f = function(x){(2*pi*sqrt(2-(2*(x^4))))}
plot(f,0,1)
#Creo una uniforme en el intervalo de la integral y escogi por que quiero una muestra de 100000
sample.x = runif(100000,0,1)
accept = c()
sample.accept = c()
#En el metodo de aceptacion y rechazo general hay que hayar el maximo de la divicion de dos funciones de densidad
#(siempre es la funcion de densidad aqui),en este caso me piden hayar la fun density f(x)(0,1) a partir de una Uniforme(0,1)
#Primero se hayan las dos funciones de densidad y luego se dividen para al final encontrar el maximo de esa divicion.
#Cuando se trata de buscar la funcion de densidad es mejor buscarla por la funsion de distribucion y ahi aparecen las dos.
#Ya me dan la funcion de densidad f(x): (5/8)*(2-(2*(x^4)))
#De esta forma se busca la func distribution de la uniforme ---- uniform distribution min = 0, max=1
#y se escoge la de densidad ----- en este caso da como resultado 1
#Luego se busca el maximo de la divicion entre ella de la sgte forma:
#Alertaaaa hay que encerrar cada funcion entre parentesis por separado pq sino no lo calcula bien
#Se escribiria de la sgte forma---- max[ ((5/8)*(2-(2*(x^4))))/1 ] in [0,1]
#Se obtiene ---- 5/4 y es el que se utiliza en el for que es el maximo de la divicion entre ellas
for(i in 1:length(sample.x)){
U = runif(1, 0, 1)
if(dunif(sample.x[i], 0, 1)*(5/4)*U <= f(sample.x[i])) {
accept[i] = 'Yes'
sample.accept[i] = sample.x[i]
}
else if(dunif(sample.x[i],0,1)*(5/4)*U > f(sample.x[i])) {
accept[i] = 'No'
sample.accept[i] = 0
}
}
#Aqui guardo en un data frame todos los aceptados y rechazados
T = data.frame(sample.x, accept = factor(accept, levels= c('Yes','No')),sample.accept)
#Aqui guardo solo los aceptados en Taccept
Taccept = T[accept == 'Yes',]
#Aqui aplico la funcion G = (2*pi*sqrt(2-(2*(x^4))))/(5/8)*(2-(2*(x^4))) y saco la longitud de los aceptados
G = function(x){(2*pi*sqrt(2-(2*(x^4))))/((5/8)*(2-(2*(x^4))))}
Naccept = length(which(T$accept == 'Yes'))
#Aqui continuo aplicando el metodo demonte carlos y esta formula es la que me va a dar
#el valor aproximado de la integral I=sum(x)/n donde mis x son los Taccept$sample.accept
#y n es la cantidad total de los aceptados Naccept
I = sum(G(Taccept$sample.accept))/Naccept
I ##Este es el valor aproximado de la integral 8.064748
#Aqui compruebo con el resultado de R
F = function(x){(2*pi*sqrt(2-(2*(x^4))))}
Ireal = integrate(F,0,1)
Ireal #7.76633 with absolute error < 0.00049
#--------------------------------------------------------------------------------------------
#Pregunta 2 inciso b
#--------------------------------------------------------------------------------------------
#Aqui hago el metodo de hit and miss para comparar con el resultado que me dio como me piden
#Esta secuencia es para dibujar la funcion
x = seq(0, 1, len = 100000)
f = function(x){2*pi*sqrt(2-(2*(x^4)))}
y = f(x)
df <- data.frame(x,y)
g <- ggplot(df, aes(x))
g <- g + geom_line(aes(y=y), colour="red")
g
#Creo una uniforme en el intervalo de la integral y escogi por que quiero una muestra de 100000
sample.x = runif(100000,0,1)
accept = c()
sample.accept = c()
#primero hayar el maximo de la funcion f = function(x){2*pi*sqrt(2-(2*(x^4)))} para utilizarlo dentro del for
#Alertaaaa hay que encerrar cada funcion entre parentesis por separado pq sino no lo calcula bien
#Se escribiria de la sgte forma---- max[2*pi*sqrt(2-(2*(x^4)))] in [0,1]
#Se obtiene ---- 2*pi*sqrt(2) como maximo
for(i in 1:length(sample.x)){
U = runif(1, 0, 1)
if(dunif(sample.x[i], 0, 1)*(2*pi*sqrt(2))*U <= f(sample.x[i])) {
accept[i] = 'Yes'
sample.accept[i] = 1
}
else if(dunif(sample.x[i],0,1)*(2*pi*sqrt(2))*U > f(sample.x[i])) {
accept[i] = 'No'
sample.accept[i] = 0
}
}
#Esta es la formula que me va a dar el valor de la integral I=c(b-a)p en hit and miss
#phat es mi p es todos los aceptados entre el tamano total de mi muestra
phat = sum(sample.accept)/100000
phat
#Aqui se aplica la formula para calcular la integral
I = (2*pi*sqrt(2))*(1-(0))*phat
I ## Este es el valor de la integral resultante 7.762694
#Aqui se compara con la integral que calcula R
Ireal = integrate(f,0,1)
Ireal ##Este es el resultado de la integral calculado por R es: 7.76633 with absolute error < 0.00049
|
## init
"SoilProfileCollection" <- function(
idcol='id',
depthcols=c('top','bottom'),
metadata=data.frame(stringsAsFactors=FALSE),
horizons,
site=data.frame(stringsAsFactors=FALSE),
sp=new('SpatialPoints'), # this is a bogus place-holder
diagnostic=data.frame(stringsAsFactors=FALSE)
){
# creation of the object (includes a validity check)
new("SoilProfileCollection", idcol=idcol, depthcols=depthcols, metadata=metadata, horizons=horizons, site=site, sp=sp, diagnostic=diagnostic)
}
## show
setMethod(
f='show',
signature='SoilProfileCollection',
definition=function(object) {
n.profiles <- length(object)
cat("Object of class ", class(object), "\n", sep = "")
cat("Number of profiles: ", n.profiles, "\n", sep="")
if(n.profiles > 1)
cat("Depth range: ", min(object), "-", max(object), " ", depth_units(object), "\n", sep="")
cat("\nHorizon attributes:\n")
print(head(horizons(object)))
# in the presence of site data
if (nrow(site(object)) > 0) {
cat("\nSampling site attributes:\n")
print(head(site(object)))
}
# presence of spatial data
if(nrow(coordinates(object)) == n.profiles) {
cat('\nSpatial Data:\n')
show(object@sp@bbox)
show(proj4string(object))
}
}
)
## summary
##
## accessors
##
## ID column name
if (!isGeneric("idname"))
setGeneric("idname", function(object, ...) standardGeneric("idname"))
setMethod("idname", "SoilProfileCollection",
function(object)
return(object@idcol)
)
## distinct profile IDs
if (!isGeneric("profile_id"))
setGeneric("profile_id", function(object, ...) standardGeneric("profile_id"))
setMethod("profile_id", "SoilProfileCollection",
function(object)
unique(as.character(horizons(object)[[idname(object)]]))
)
## horizon depth column names
if (!isGeneric("horizonDepths"))
setGeneric("horizonDepths", function(object, ...) standardGeneric("horizonDepths"))
setMethod("horizonDepths", "SoilProfileCollection",
function(object)
return(object@depthcols)
)
## spatial data: coordinates
setMethod("coordinates", "SoilProfileCollection",
function(obj) {
return(coordinates(obj@sp))
}
)
## site data
if (!isGeneric("site"))
setGeneric("site", function(object, ...) standardGeneric("site"))
# retrieves the site data frame
setMethod("site", "SoilProfileCollection",
function(object) {
return(object@site)
}
)
## diagnostic horizons: stored as a DF, must be join()-ed to other data via ID
## note: ordering may or may not be the same as in site data
if (!isGeneric("diagnostic_hz"))
setGeneric("diagnostic_hz", function(object, ...) standardGeneric("diagnostic_hz"))
setMethod(f='diagnostic_hz', signature='SoilProfileCollection',
function(object){
return(object@diagnostic)
}
)
## horizon data
# returns a data.frame with horizons data
if (!isGeneric("horizons"))
setGeneric("horizons", function(object, ...) standardGeneric("horizons"))
setMethod(f='horizons', signature='SoilProfileCollection',
function(object){
return(object@horizons)
}
)
## metadata
# returns a data.frame
if (!isGeneric("metadata"))
setGeneric("metadata", function(object, ...) standardGeneric("metadata"))
setMethod(f='metadata', signature='SoilProfileCollection',
function(object){
return(object@metadata)
}
)
## depth_units
# returns a data.frame
if (!isGeneric("depth_units"))
setGeneric("depth_units", function(object, ...) standardGeneric("depth_units"))
setMethod(f='depth_units', signature='SoilProfileCollection',
function(object){
u <- as.character(aqp::metadata(object)[['depth_units']])
# give a warning if not defined
if(u == '')
message('Note: depth depth_units have not yet been defined.')
return(u)
}
)
## TODO: strip-out idname
## get site column names
if (!isGeneric("siteNames"))
setGeneric("siteNames", function(object, ...) standardGeneric("siteNames"))
setMethod("siteNames", "SoilProfileCollection",
function(object) {
res <- names(object@site)
return(res)
}
)
## TODO: strip-out idname
## get horizon column names
if (!isGeneric("horizonNames"))
setGeneric("horizonNames", function(object, ...) standardGeneric("horizonNames"))
setMethod("horizonNames", "SoilProfileCollection",
function(object) {
res <- names(object@horizons)
return(res)
}
)
##
## overloads
##
### This will be greatly improved with new class structure
## concatentation
## TODO: concatenation of data with duplicated IDs in @site, but unique data in other @site fields, will result in corrupt SPC
## TODO: duplicates in @sp will cause errors
## TODO: duplicates are removed in all other slots... does this make sense?
rbind.SoilProfileCollection <- function(...) {
# setup some defaults
options(stringsAsFactors=FALSE)
# parse dots
objects <- list(...)
names(objects) <- NULL
# short-circuits
if(length(objects) == 0)
return(NULL)
if(length(objects) == 1)
return(objects[1])
# combine pieces
# should have length of 1
o.idname <- unique(lapply(objects, idname))
o.depth.units <- unique(lapply(objects, depth_units))
o.hz.depths <- unique(lapply(objects, horizonDepths))
o.m <- unique(lapply(objects, aqp::metadata))
o.coords <- unique(lapply(objects, function(i) ncol(coordinates(i))))
o.p4s <- unique(lapply(objects, proj4string))
# should have length > 1
o.h <- lapply(objects, horizons)
o.s <- lapply(objects, site)
o.d <- lapply(objects, diagnostic_hz)
o.sp <- lapply(objects, slot, 'sp')
# sanity checks:
if(length(o.idname) > 1)
stop('inconsistent ID names', call.=FALSE)
if(length(o.depth.units) > 1)
stop('inconsistent depth units', call.=FALSE)
if(length(o.hz.depths) > 1)
stop('inconsistent depth columns', call.=FALSE)
if(length(o.m) > 1)
stop('inconsistent metadata', call.=FALSE)
# spatial data may be missing...
if(length(o.coords) > 1)
stop('inconsistent spatial data', call.=FALSE)
if(length(o.p4s) > 1)
stop('inconsistent CRS', call.=FALSE)
# generate new SPC components
o.h <- unique(do.call('rbind', o.h)) # horizon data
o.s <- unique(do.call('rbind', o.s)) # site data
o.d <- unique(do.call('rbind', o.d)) # diagnostic data, leave as-is
## 2015-12-18: removed re-ordering, was creating corrupt SPC objects
## site and horizon data
# spatial points require some more effort when spatial data are missing
o.1.sp <- objects[[1]]@sp
if(ncol(coordinates(o.1.sp)) == 1) # missing spatial data
o.sp <- o.1.sp # copy the first filler
## 2015-12-18: added call to specific function: "sp::rbind.SpatialPoints"
# not missing spatial data
else
o.sp <- do.call("rbind.SpatialPoints", o.sp) # rbind properly
# make SPC and return
res <- SoilProfileCollection(idcol=o.idname[[1]], depthcols=o.hz.depths[[1]], metadata=o.m[[1]], horizons=o.h, site=o.s, sp=o.sp, diagnostic=o.d)
# # one more final check:
# print(profile_id(res))
# print( site(res)[[idname(res)]])
# print(site(res))
if(length(profile_id(res)) != length(site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
if(! all.equal(profile_id(res), site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
return(res)
}
## TODO: this doesn't work as expected ... fix in 2.0
## overload rbind
#setMethod("rbind", "SoilProfileCollection", .rbind.SoilProfileCollection)
# return a concatenated vector of horizon + site names
# note that we strip out the ID column name from @site
setMethod("names", "SoilProfileCollection",
function(x) {
res <- c(horizons=horizonNames(x), site=siteNames(x)[-1])
return(res)
}
)
# overload min() to give us the min depth within a collection
setMethod(f='min', signature='SoilProfileCollection',
definition=function(x, v=NULL) {
# get bottom depth column name
hz_bottom_depths <- horizonDepths(x)[2]
# optionally use a horizon-level property refine calculation
if(!missing(v)) {
# combine bottom depths with IDs and variable
h <- horizons(x)[, c(hz_bottom_depths, idname(x), v)]
} else {
# combine bottom depths with IDs
h <- horizons(x)[, c(hz_bottom_depths, idname(x))]
}
# filter out missing data
h <- h[complete.cases(h), ]
# compute max by ID
d <- tapply(h[, 1], h[, 2], max, na.rm=TRUE)
# return the shallowest depth
return(min(d, na.rm=TRUE))
}
)
# overload max() to give us the max depth within a collection
setMethod(f='max', signature='SoilProfileCollection',
definition=function(x, v=NULL){
# get bottom depth column name
hz_bottom_depths <- horizonDepths(x)[2]
# optionally use a horizon-level property refine calculation
if(!missing(v)) {
# combine bottom depths with IDs and variable
h <- horizons(x)[, c(hz_bottom_depths, idname(x), v)]
} else {
# combine bottom depths with IDs
h <- horizons(x)[, c(hz_bottom_depths, idname(x))]
}
# filter out missing data
h <- h[complete.cases(h), ]
# compute max by ID
d <- tapply(h[, 1], h[, 2], max, na.rm=TRUE)
# return the deepest depth
return(max(d, na.rm=TRUE))
}
)
# overload length() to give us the number of profiles in the collection
setMethod(f='length', signature='SoilProfileCollection',
definition=function(x){
l <- length(profile_id(x))
return(l)
}
)
# overload nrow() to give us the number of horizons in the collection
setMethod(f='nrow', signature='SoilProfileCollection',
definition=function(x){
nrow(x@horizons)
}
)
# overload unique() via digest eval of unique profiles
uniqueSPC <- function(x, vars){
# compute hash by profile, for selected variables
md5 <- profileApply(x, function(i) {
# unlist in order to drop row names
digest(unlist(as(i, 'data.frame')[, vars]))
})
# get unique hashes
u.md5 <- unique(md5)
# list profile idx by hash:
profiles.by.hash <- sapply(u.md5, function(i) which(md5 == i), simplify=FALSE)
# get an index of the first copy of each profile
u.profiles <- sapply(profiles.by.hash, function(i) i[1])
# return an index of unique profiles
# down-grade to un-named vector of indices
return(as.vector(u.profiles))
}
setMethod(f='unique', signature='SoilProfileCollection', definition=uniqueSPC)
## standard column access: search horizons, then site
setMethod("$", "SoilProfileCollection",
function(x, name) {
# get names from site and hz data
s.names <- siteNames(x)
h.names <- horizonNames(x)
# ## note: warnings may be issued when using auto-complete feature in RStudio
# # when site data are initialized from an external DF, it is possible that
# # there will be duplicate column names
# if((name %in% h.names) && (name %in% s.names)) {
# warning('column name is present in horizon and site data, extracting from horizon data only', call.=FALSE)
# }
# get column from horizon data
if (name %in% h.names) {
res <- horizons(x)[[name]]
} else {
# otherwise check site data
if (name %in% s.names) {
res <- site(x)[[name]]
} else {
# if still missing return NULL
res <- NULL
}
}
return(res)
}
)
## problem: when making new columns how can the function determine where to insert the replacement>?
setReplaceMethod("$", "SoilProfileCollection",
function(x, name, value) {
# extract hz and site data
h <- horizons(x)
s <- site(x)
# working with horizon data
if (name %in% names(h)) {
h[[name]] <- value
horizons(x) <- h
return(x)
}
# working with site data
if(name %in% names(s)) {
s[[name]] <- value
# TODO: use site(x) <- s
x@site <- s
return(x)
}
# ambiguous: use length of replacement to determing: horizon / site
else {
n.site <- nrow(s)
n.hz <- nrow(h)
l <- length(value)
if(l == n.hz) {
h[[name]] <- value
horizons(x) <- h
return(x)
}
if(l == n.site) {
s[[name]] <- value
# TODO: use site(x) <- s
x@site <- s
return(x)
}
else
stop('length of replacement must equal number of sites or number of horizons')
}
# done
}
)
## subset method for SoilProfileCollection objects
## s: site-level subsetting criteria (properly quoted)
## h: horizon-level subsetting criteria (properly quoted)
## result: SoilProfileCollection with all profiles that match _either_ criteria- i.e. greedy matching
if (!isGeneric("subsetProfiles"))
setGeneric("subsetProfiles", function(object, s, h, ...) standardGeneric("subsetProfiles"))
setMethod("subsetProfiles", "SoilProfileCollection",
function(object, s, h, ...) {
# sanity checks
if(missing(s) & missing(h))
stop('must provide either, site or horizon level subsetting criteria', call.=FALSE)
# extract parts
s.d <- site(object)
h.d <- horizons(object)
id.col <- idname(object)
object.ids <- profile_id(object)
# subset using conventional data.frame methods
if(!missing(s))
s.d.sub.IDs <- subset(s.d, select=id.col, subset=eval(parse(text=s)))[, 1] # convert to vector
else
s.d.sub.IDs <- NA
if(!missing(h))
h.d.sub.IDs <- subset(h.d, select=id.col, subset=eval(parse(text=h)))[, 1] # convert to vector
else
h.d.sub.IDs <- NA
# intersect IDs if s and h were used
if(!missing(h) & !missing(s))
matching.IDs <- intersect(s.d.sub.IDs, h.d.sub.IDs)
# if only h, or only s were used, then
else
matching.IDs <- unique(na.omit(c(s.d.sub.IDs, h.d.sub.IDs)))
# convert IDs into a numerical profile index
# note: no matches results in idx == 0
idx <- match(matching.IDs, object.ids)
# subset SoilProfileCollection
return(object[idx, ])
}
)
### NOTE: this DOES NOT re-order data, only subsets!
##
## matrix / DF style access: only to horizon data
##
## i = profile index
## j = horizon / slice index
##
setMethod("[", signature=c("SoilProfileCollection", i="ANY", j="ANY"),
function(x, i, j) {
# check for missing i and j
if(missing(i) & missing(j))
stop('must provide either a profile index or horizon/slice index, or both', call.=FALSE)
# convert to integer
if(!missing(i)) {
if(any(is.na(i)))
stop('NA not permitted in profile index', call.=FALSE)
# convert logical to integer per standard vector/list indexing rules (thanks Jos? Padarian for the suggestion!)
if(is.logical(i)) i <- (1:length(x))[i]
i <- as.integer(i)
}
else # if no index is provided, the user wants all profiles
i <- 1:length(x)
# sanity check
if(!missing(j)) {
j <- as.integer(j)
if(any(is.na(j)))
stop('NA not permitted in horizon/slice index', call.=FALSE)
}
# extract requested profile IDs
p.ids <- profile_id(x)[i]
# extract all horizon data
h <- horizons(x)
# keep only the requested horizon data (filtered by profile ID)
h <- h[h[[idname(x)]] %in% p.ids, ]
# keep only the requested site data, (filtered by profile ID)
s.all <- site(x)
s.i <- which(s.all[[idname(x)]] %in% p.ids)
s <- s.all[s.i, , drop=FALSE] # need to use drop=FALSE when @site contains only a single column
# subset spatial data if exists
if(nrow(coordinates(x)) == length(x))
sp <- x@sp[i]
else
sp <- x@sp
# subset diagnostic data, but only if it exists
# note that not all profiles have diagnostic hz data
d <- diagnostic_hz(x)
if(length(d) > 0) # some data
d <- d[which(d[[idname(x)]] %in% p.ids), ]
# subset horizons/slices based on j --> only when j is given
if(!missing(j))
h <- ddply(h, idname(x), .fun=function(y) y[j, ])
# if there is REAL data in @sp, and we only have 1 row of hz per coordinate- return SPDF
# for now test for our custom dummy SP obj: number of coordinates == number of profiles
# also need to test that there is only 1 horizon/slice per location
# only produces a SPDF when j index is present
if(nrow(coordinates(x)) == length(x) & length(p.ids) == nrow(h) & !missing(j)) {
# combine with coordinates
cat('result is a SpatialPointsDataFrame object\n')
# note that we are filtering based on 'i' - an index of selected profiles
# since the order of our slices and coordinates are the same
# it is safe to use 'match.ID=FALSE'
# this gets around a potential problem when dimnames(x)[[1]] aren't consecutive
# values-- often the case when subsetting has been performed
# if site data, join hz+site
if(nrow(s) > 0) {
return(SpatialPointsDataFrame(as(x, 'SpatialPoints')[i, ], data=join(h, s, by=idname(x)), match.ID=FALSE))
}
# no site data
else {
return(SpatialPointsDataFrame(as(x, 'SpatialPoints')[i, ], data=h, match.ID=FALSE))
}
}
# in this case there may be missing coordinates, or we have more than 1 slice of hz data
else {
res <- SoilProfileCollection(idcol=idname(x), depthcols=horizonDepths(x), metadata=aqp::metadata(x), horizons=h, site=s, sp=sp, diagnostic=d)
# one more final check:
if(length(profile_id(res)) != length(site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
if(! all(profile_id(res) == site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
return(res)
}
# done
}
)
| /R/SoilProfileCollection-methods.R | no_license | IKWENZI/aqp | R | false | false | 18,251 | r | ## init
"SoilProfileCollection" <- function(
idcol='id',
depthcols=c('top','bottom'),
metadata=data.frame(stringsAsFactors=FALSE),
horizons,
site=data.frame(stringsAsFactors=FALSE),
sp=new('SpatialPoints'), # this is a bogus place-holder
diagnostic=data.frame(stringsAsFactors=FALSE)
){
# creation of the object (includes a validity check)
new("SoilProfileCollection", idcol=idcol, depthcols=depthcols, metadata=metadata, horizons=horizons, site=site, sp=sp, diagnostic=diagnostic)
}
## show
setMethod(
f='show',
signature='SoilProfileCollection',
definition=function(object) {
n.profiles <- length(object)
cat("Object of class ", class(object), "\n", sep = "")
cat("Number of profiles: ", n.profiles, "\n", sep="")
if(n.profiles > 1)
cat("Depth range: ", min(object), "-", max(object), " ", depth_units(object), "\n", sep="")
cat("\nHorizon attributes:\n")
print(head(horizons(object)))
# in the presence of site data
if (nrow(site(object)) > 0) {
cat("\nSampling site attributes:\n")
print(head(site(object)))
}
# presence of spatial data
if(nrow(coordinates(object)) == n.profiles) {
cat('\nSpatial Data:\n')
show(object@sp@bbox)
show(proj4string(object))
}
}
)
## summary
##
## accessors
##
## ID column name
if (!isGeneric("idname"))
setGeneric("idname", function(object, ...) standardGeneric("idname"))
setMethod("idname", "SoilProfileCollection",
function(object)
return(object@idcol)
)
## distinct profile IDs
if (!isGeneric("profile_id"))
setGeneric("profile_id", function(object, ...) standardGeneric("profile_id"))
setMethod("profile_id", "SoilProfileCollection",
function(object)
unique(as.character(horizons(object)[[idname(object)]]))
)
## horizon depth column names
if (!isGeneric("horizonDepths"))
setGeneric("horizonDepths", function(object, ...) standardGeneric("horizonDepths"))
setMethod("horizonDepths", "SoilProfileCollection",
function(object)
return(object@depthcols)
)
## spatial data: coordinates
setMethod("coordinates", "SoilProfileCollection",
function(obj) {
return(coordinates(obj@sp))
}
)
## site data
if (!isGeneric("site"))
setGeneric("site", function(object, ...) standardGeneric("site"))
# retrieves the site data frame
setMethod("site", "SoilProfileCollection",
function(object) {
return(object@site)
}
)
## diagnostic horizons: stored as a DF, must be join()-ed to other data via ID
## note: ordering may or may not be the same as in site data
if (!isGeneric("diagnostic_hz"))
setGeneric("diagnostic_hz", function(object, ...) standardGeneric("diagnostic_hz"))
setMethod(f='diagnostic_hz', signature='SoilProfileCollection',
function(object){
return(object@diagnostic)
}
)
## horizon data
# returns a data.frame with horizons data
if (!isGeneric("horizons"))
setGeneric("horizons", function(object, ...) standardGeneric("horizons"))
setMethod(f='horizons', signature='SoilProfileCollection',
function(object){
return(object@horizons)
}
)
## metadata
# returns a data.frame
if (!isGeneric("metadata"))
setGeneric("metadata", function(object, ...) standardGeneric("metadata"))
setMethod(f='metadata', signature='SoilProfileCollection',
function(object){
return(object@metadata)
}
)
## depth_units
# returns a data.frame
if (!isGeneric("depth_units"))
setGeneric("depth_units", function(object, ...) standardGeneric("depth_units"))
setMethod(f='depth_units', signature='SoilProfileCollection',
function(object){
u <- as.character(aqp::metadata(object)[['depth_units']])
# give a warning if not defined
if(u == '')
message('Note: depth depth_units have not yet been defined.')
return(u)
}
)
## TODO: strip-out idname
## get site column names
if (!isGeneric("siteNames"))
setGeneric("siteNames", function(object, ...) standardGeneric("siteNames"))
setMethod("siteNames", "SoilProfileCollection",
function(object) {
res <- names(object@site)
return(res)
}
)
## TODO: strip-out idname
## get horizon column names
if (!isGeneric("horizonNames"))
setGeneric("horizonNames", function(object, ...) standardGeneric("horizonNames"))
setMethod("horizonNames", "SoilProfileCollection",
function(object) {
res <- names(object@horizons)
return(res)
}
)
##
## overloads
##
### This will be greatly improved with new class structure
## concatentation
## TODO: concatenation of data with duplicated IDs in @site, but unique data in other @site fields, will result in corrupt SPC
## TODO: duplicates in @sp will cause errors
## TODO: duplicates are removed in all other slots... does this make sense?
rbind.SoilProfileCollection <- function(...) {
# setup some defaults
options(stringsAsFactors=FALSE)
# parse dots
objects <- list(...)
names(objects) <- NULL
# short-circuits
if(length(objects) == 0)
return(NULL)
if(length(objects) == 1)
return(objects[1])
# combine pieces
# should have length of 1
o.idname <- unique(lapply(objects, idname))
o.depth.units <- unique(lapply(objects, depth_units))
o.hz.depths <- unique(lapply(objects, horizonDepths))
o.m <- unique(lapply(objects, aqp::metadata))
o.coords <- unique(lapply(objects, function(i) ncol(coordinates(i))))
o.p4s <- unique(lapply(objects, proj4string))
# should have length > 1
o.h <- lapply(objects, horizons)
o.s <- lapply(objects, site)
o.d <- lapply(objects, diagnostic_hz)
o.sp <- lapply(objects, slot, 'sp')
# sanity checks:
if(length(o.idname) > 1)
stop('inconsistent ID names', call.=FALSE)
if(length(o.depth.units) > 1)
stop('inconsistent depth units', call.=FALSE)
if(length(o.hz.depths) > 1)
stop('inconsistent depth columns', call.=FALSE)
if(length(o.m) > 1)
stop('inconsistent metadata', call.=FALSE)
# spatial data may be missing...
if(length(o.coords) > 1)
stop('inconsistent spatial data', call.=FALSE)
if(length(o.p4s) > 1)
stop('inconsistent CRS', call.=FALSE)
# generate new SPC components
o.h <- unique(do.call('rbind', o.h)) # horizon data
o.s <- unique(do.call('rbind', o.s)) # site data
o.d <- unique(do.call('rbind', o.d)) # diagnostic data, leave as-is
## 2015-12-18: removed re-ordering, was creating corrupt SPC objects
## site and horizon data
# spatial points require some more effort when spatial data are missing
o.1.sp <- objects[[1]]@sp
if(ncol(coordinates(o.1.sp)) == 1) # missing spatial data
o.sp <- o.1.sp # copy the first filler
## 2015-12-18: added call to specific function: "sp::rbind.SpatialPoints"
# not missing spatial data
else
o.sp <- do.call("rbind.SpatialPoints", o.sp) # rbind properly
# make SPC and return
res <- SoilProfileCollection(idcol=o.idname[[1]], depthcols=o.hz.depths[[1]], metadata=o.m[[1]], horizons=o.h, site=o.s, sp=o.sp, diagnostic=o.d)
# # one more final check:
# print(profile_id(res))
# print( site(res)[[idname(res)]])
# print(site(res))
if(length(profile_id(res)) != length(site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
if(! all.equal(profile_id(res), site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
return(res)
}
## TODO: this doesn't work as expected ... fix in 2.0
## overload rbind
#setMethod("rbind", "SoilProfileCollection", .rbind.SoilProfileCollection)
# return a concatenated vector of horizon + site names
# note that we strip out the ID column name from @site
setMethod("names", "SoilProfileCollection",
function(x) {
res <- c(horizons=horizonNames(x), site=siteNames(x)[-1])
return(res)
}
)
# overload min() to give us the min depth within a collection
setMethod(f='min', signature='SoilProfileCollection',
definition=function(x, v=NULL) {
# get bottom depth column name
hz_bottom_depths <- horizonDepths(x)[2]
# optionally use a horizon-level property refine calculation
if(!missing(v)) {
# combine bottom depths with IDs and variable
h <- horizons(x)[, c(hz_bottom_depths, idname(x), v)]
} else {
# combine bottom depths with IDs
h <- horizons(x)[, c(hz_bottom_depths, idname(x))]
}
# filter out missing data
h <- h[complete.cases(h), ]
# compute max by ID
d <- tapply(h[, 1], h[, 2], max, na.rm=TRUE)
# return the shallowest depth
return(min(d, na.rm=TRUE))
}
)
# overload max() to give us the max depth within a collection
setMethod(f='max', signature='SoilProfileCollection',
definition=function(x, v=NULL){
# get bottom depth column name
hz_bottom_depths <- horizonDepths(x)[2]
# optionally use a horizon-level property refine calculation
if(!missing(v)) {
# combine bottom depths with IDs and variable
h <- horizons(x)[, c(hz_bottom_depths, idname(x), v)]
} else {
# combine bottom depths with IDs
h <- horizons(x)[, c(hz_bottom_depths, idname(x))]
}
# filter out missing data
h <- h[complete.cases(h), ]
# compute max by ID
d <- tapply(h[, 1], h[, 2], max, na.rm=TRUE)
# return the deepest depth
return(max(d, na.rm=TRUE))
}
)
# overload length() to give us the number of profiles in the collection
setMethod(f='length', signature='SoilProfileCollection',
definition=function(x){
l <- length(profile_id(x))
return(l)
}
)
# overload nrow() to give us the number of horizons in the collection
setMethod(f='nrow', signature='SoilProfileCollection',
definition=function(x){
nrow(x@horizons)
}
)
# overload unique() via digest eval of unique profiles
uniqueSPC <- function(x, vars){
# compute hash by profile, for selected variables
md5 <- profileApply(x, function(i) {
# unlist in order to drop row names
digest(unlist(as(i, 'data.frame')[, vars]))
})
# get unique hashes
u.md5 <- unique(md5)
# list profile idx by hash:
profiles.by.hash <- sapply(u.md5, function(i) which(md5 == i), simplify=FALSE)
# get an index of the first copy of each profile
u.profiles <- sapply(profiles.by.hash, function(i) i[1])
# return an index of unique profiles
# down-grade to un-named vector of indices
return(as.vector(u.profiles))
}
setMethod(f='unique', signature='SoilProfileCollection', definition=uniqueSPC)
## standard column access: search horizons, then site
setMethod("$", "SoilProfileCollection",
function(x, name) {
# get names from site and hz data
s.names <- siteNames(x)
h.names <- horizonNames(x)
# ## note: warnings may be issued when using auto-complete feature in RStudio
# # when site data are initialized from an external DF, it is possible that
# # there will be duplicate column names
# if((name %in% h.names) && (name %in% s.names)) {
# warning('column name is present in horizon and site data, extracting from horizon data only', call.=FALSE)
# }
# get column from horizon data
if (name %in% h.names) {
res <- horizons(x)[[name]]
} else {
# otherwise check site data
if (name %in% s.names) {
res <- site(x)[[name]]
} else {
# if still missing return NULL
res <- NULL
}
}
return(res)
}
)
## problem: when making new columns how can the function determine where to insert the replacement>?
setReplaceMethod("$", "SoilProfileCollection",
function(x, name, value) {
# extract hz and site data
h <- horizons(x)
s <- site(x)
# working with horizon data
if (name %in% names(h)) {
h[[name]] <- value
horizons(x) <- h
return(x)
}
# working with site data
if(name %in% names(s)) {
s[[name]] <- value
# TODO: use site(x) <- s
x@site <- s
return(x)
}
# ambiguous: use length of replacement to determing: horizon / site
else {
n.site <- nrow(s)
n.hz <- nrow(h)
l <- length(value)
if(l == n.hz) {
h[[name]] <- value
horizons(x) <- h
return(x)
}
if(l == n.site) {
s[[name]] <- value
# TODO: use site(x) <- s
x@site <- s
return(x)
}
else
stop('length of replacement must equal number of sites or number of horizons')
}
# done
}
)
## subset method for SoilProfileCollection objects
## s: site-level subsetting criteria (properly quoted)
## h: horizon-level subsetting criteria (properly quoted)
## result: SoilProfileCollection with all profiles that match _either_ criteria- i.e. greedy matching
if (!isGeneric("subsetProfiles"))
setGeneric("subsetProfiles", function(object, s, h, ...) standardGeneric("subsetProfiles"))
setMethod("subsetProfiles", "SoilProfileCollection",
function(object, s, h, ...) {
# sanity checks
if(missing(s) & missing(h))
stop('must provide either, site or horizon level subsetting criteria', call.=FALSE)
# extract parts
s.d <- site(object)
h.d <- horizons(object)
id.col <- idname(object)
object.ids <- profile_id(object)
# subset using conventional data.frame methods
if(!missing(s))
s.d.sub.IDs <- subset(s.d, select=id.col, subset=eval(parse(text=s)))[, 1] # convert to vector
else
s.d.sub.IDs <- NA
if(!missing(h))
h.d.sub.IDs <- subset(h.d, select=id.col, subset=eval(parse(text=h)))[, 1] # convert to vector
else
h.d.sub.IDs <- NA
# intersect IDs if s and h were used
if(!missing(h) & !missing(s))
matching.IDs <- intersect(s.d.sub.IDs, h.d.sub.IDs)
# if only h, or only s were used, then
else
matching.IDs <- unique(na.omit(c(s.d.sub.IDs, h.d.sub.IDs)))
# convert IDs into a numerical profile index
# note: no matches results in idx == 0
idx <- match(matching.IDs, object.ids)
# subset SoilProfileCollection
return(object[idx, ])
}
)
### NOTE: this DOES NOT re-order data, only subsets!
##
## matrix / DF style access: only to horizon data
##
## i = profile index
## j = horizon / slice index
##
setMethod("[", signature=c("SoilProfileCollection", i="ANY", j="ANY"),
function(x, i, j) {
# check for missing i and j
if(missing(i) & missing(j))
stop('must provide either a profile index or horizon/slice index, or both', call.=FALSE)
# convert to integer
if(!missing(i)) {
if(any(is.na(i)))
stop('NA not permitted in profile index', call.=FALSE)
# convert logical to integer per standard vector/list indexing rules (thanks Jos? Padarian for the suggestion!)
if(is.logical(i)) i <- (1:length(x))[i]
i <- as.integer(i)
}
else # if no index is provided, the user wants all profiles
i <- 1:length(x)
# sanity check
if(!missing(j)) {
j <- as.integer(j)
if(any(is.na(j)))
stop('NA not permitted in horizon/slice index', call.=FALSE)
}
# extract requested profile IDs
p.ids <- profile_id(x)[i]
# extract all horizon data
h <- horizons(x)
# keep only the requested horizon data (filtered by profile ID)
h <- h[h[[idname(x)]] %in% p.ids, ]
# keep only the requested site data, (filtered by profile ID)
s.all <- site(x)
s.i <- which(s.all[[idname(x)]] %in% p.ids)
s <- s.all[s.i, , drop=FALSE] # need to use drop=FALSE when @site contains only a single column
# subset spatial data if exists
if(nrow(coordinates(x)) == length(x))
sp <- x@sp[i]
else
sp <- x@sp
# subset diagnostic data, but only if it exists
# note that not all profiles have diagnostic hz data
d <- diagnostic_hz(x)
if(length(d) > 0) # some data
d <- d[which(d[[idname(x)]] %in% p.ids), ]
# subset horizons/slices based on j --> only when j is given
if(!missing(j))
h <- ddply(h, idname(x), .fun=function(y) y[j, ])
# if there is REAL data in @sp, and we only have 1 row of hz per coordinate- return SPDF
# for now test for our custom dummy SP obj: number of coordinates == number of profiles
# also need to test that there is only 1 horizon/slice per location
# only produces a SPDF when j index is present
if(nrow(coordinates(x)) == length(x) & length(p.ids) == nrow(h) & !missing(j)) {
# combine with coordinates
cat('result is a SpatialPointsDataFrame object\n')
# note that we are filtering based on 'i' - an index of selected profiles
# since the order of our slices and coordinates are the same
# it is safe to use 'match.ID=FALSE'
# this gets around a potential problem when dimnames(x)[[1]] aren't consecutive
# values-- often the case when subsetting has been performed
# if site data, join hz+site
if(nrow(s) > 0) {
return(SpatialPointsDataFrame(as(x, 'SpatialPoints')[i, ], data=join(h, s, by=idname(x)), match.ID=FALSE))
}
# no site data
else {
return(SpatialPointsDataFrame(as(x, 'SpatialPoints')[i, ], data=h, match.ID=FALSE))
}
}
# in this case there may be missing coordinates, or we have more than 1 slice of hz data
else {
res <- SoilProfileCollection(idcol=idname(x), depthcols=horizonDepths(x), metadata=aqp::metadata(x), horizons=h, site=s, sp=sp, diagnostic=d)
# one more final check:
if(length(profile_id(res)) != length(site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
if(! all(profile_id(res) == site(res)[[idname(res)]]))
stop("SPC object corruption. This shouldn't happen and will be fixed in aqp 2.0", call. = FALSE)
return(res)
}
# done
}
)
|
pollutantmean <- function(directory, pollutant, id = 1:332){
# create the path of the directory
path <- paste0(getwd(),'/',directory)
# create data frame df the will contain all the data according to the given id-s
df <- data.frame()
for (i in id) {
if (i < 10) {
data <- read.csv(paste0(path,'/00',as.character(i),".csv"), header = T)
df <- rbind(df, data)
}
else if (i < 100) {
data <- read.csv(paste0(path,'/0',as.character(i),".csv"), header = T)
df <- rbind(df, data)
}
else{
data <- read.csv(paste0(path,'/',as.character(i),".csv"), header = T)
df <- rbind(df, data)
}
}
# retun the mean of the overall data 'df' excloding the NAs
return(mean(df[, pollutant], na.rm = T))
}
# Example usage
pollutantmean("specdata", "sulfate", 1:10)
pollutantmean("specdata", "nitrate", 70:72)
pollutantmean("specdata", "nitrate", 23) | /2- R Programming/Week 2/pollutantmean.R | no_license | jrreda/JHU-Data-Science | R | false | false | 930 | r | pollutantmean <- function(directory, pollutant, id = 1:332){
# create the path of the directory
path <- paste0(getwd(),'/',directory)
# create data frame df the will contain all the data according to the given id-s
df <- data.frame()
for (i in id) {
if (i < 10) {
data <- read.csv(paste0(path,'/00',as.character(i),".csv"), header = T)
df <- rbind(df, data)
}
else if (i < 100) {
data <- read.csv(paste0(path,'/0',as.character(i),".csv"), header = T)
df <- rbind(df, data)
}
else{
data <- read.csv(paste0(path,'/',as.character(i),".csv"), header = T)
df <- rbind(df, data)
}
}
# retun the mean of the overall data 'df' excloding the NAs
return(mean(df[, pollutant], na.rm = T))
}
# Example usage
pollutantmean("specdata", "sulfate", 1:10)
pollutantmean("specdata", "nitrate", 70:72)
pollutantmean("specdata", "nitrate", 23) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postgresDBConnector.R
\name{disconnectFromPostgresDB}
\alias{disconnectFromPostgresDB}
\title{Disconnects from postgres database}
\usage{
disconnectFromPostgresDB(connection)
}
\arguments{
\item{connection}{Postgres connection object}
}
\description{
Disconnects from postgres database
}
| /Plumber/man/disconnectFromPostgresDB.Rd | permissive | pbelai/hackTheCrisis | R | false | true | 366 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postgresDBConnector.R
\name{disconnectFromPostgresDB}
\alias{disconnectFromPostgresDB}
\title{Disconnects from postgres database}
\usage{
disconnectFromPostgresDB(connection)
}
\arguments{
\item{connection}{Postgres connection object}
}
\description{
Disconnects from postgres database
}
|
##################################################################################################################################
##################################################################################################################################
library(ShortRead)
library(rtracklayer)
library(tsTools)
args = commandArgs(trailingOnly=TRUE)
##################################################################################################################################
##################################################################################################################################
my_gtf <- import(args[1])
my_genes <- my_gtf[my_gtf$type == "gene" & my_gtf$gene_biotype == "protein_coding"]
my_chromosomes <- seqlevels(my_genes)[1:21]
my_genes <- my_genes[seqnames(my_genes) %in% my_chromosomes]
my_TSS <- data.frame(chr = seqnames(my_genes),
center = ifelse(strand(my_genes) == "+", start(my_genes), end(my_genes)),
strand = strand(my_genes),
row.names = my_genes$gene_id)
my_TTS <- data.frame(chr = seqnames(my_genes),
center = ifelse(strand(my_genes) == "-", start(my_genes), end(my_genes)),
strand = strand(my_genes),
row.names = my_genes$gene_id)
##################################################################################################################################
##################################################################################################################################
norm_cov <- readRDS(args[2])
mat.TSS <- coverageWindowsCenteredStranded(centers = my_TSS,
coverage = norm_cov,
window.size = 10000)
saveRDS(mat.TSS, file=args[3])
mat.TTS <- coverageWindowsCenteredStranded(centers = my_TTS,
coverage = norm_cov,
window.size = 10000)
saveRDS(mat.TTS, file=args[4])
##################################################################################################################################
##################################################################################################################################
| /ChILseq_ESC/scripts/coverage2matrix.R | permissive | tschauer/Abe_etal_2022 | R | false | false | 2,352 | r |
##################################################################################################################################
##################################################################################################################################
library(ShortRead)
library(rtracklayer)
library(tsTools)
args = commandArgs(trailingOnly=TRUE)
##################################################################################################################################
##################################################################################################################################
my_gtf <- import(args[1])
my_genes <- my_gtf[my_gtf$type == "gene" & my_gtf$gene_biotype == "protein_coding"]
my_chromosomes <- seqlevels(my_genes)[1:21]
my_genes <- my_genes[seqnames(my_genes) %in% my_chromosomes]
my_TSS <- data.frame(chr = seqnames(my_genes),
center = ifelse(strand(my_genes) == "+", start(my_genes), end(my_genes)),
strand = strand(my_genes),
row.names = my_genes$gene_id)
my_TTS <- data.frame(chr = seqnames(my_genes),
center = ifelse(strand(my_genes) == "-", start(my_genes), end(my_genes)),
strand = strand(my_genes),
row.names = my_genes$gene_id)
##################################################################################################################################
##################################################################################################################################
norm_cov <- readRDS(args[2])
mat.TSS <- coverageWindowsCenteredStranded(centers = my_TSS,
coverage = norm_cov,
window.size = 10000)
saveRDS(mat.TSS, file=args[3])
mat.TTS <- coverageWindowsCenteredStranded(centers = my_TTS,
coverage = norm_cov,
window.size = 10000)
saveRDS(mat.TTS, file=args[4])
##################################################################################################################################
##################################################################################################################################
|
library(shiny)
library(shinydashboard)
library(plotly)
library(dplyr)
library(lubridate)
library(leaflet)
library(htmltools)
library(htmlwidgets)
library(dplyr)
library(lubridate)
library(ggplot2)
#setwd("c:/Users/ujwala/Documents/Cousera_Assignment_3")
seattle <- read.csv("seattle.csv",header = TRUE,sep = ",")
server <- function(input,output) {
seattle$Occurred.Date.or.Date.Range.Start <-
mdy_hms(seattle$Occurred.Date.or.Date.Range.Start)
seattle$DOW <- wday(seattle$Occurred.Date.or.Date.Range.Start)
seattle$TOD <- hour(seattle$Occurred.Date.or.Date.Range.Start)
time_day <-
seattle %>% select(DOW, TOD) %>% group_by(DOW, TOD) %>% summarise(crime_count = n())
seattle_small <- seattle[,c(7,9,12,13,16:21)]
seattle_small <- seattle_small[,-c(2,5,6)]
location <-
seattle %>% select(Longitude, Latitude,Summarized.Offense.Description) %>% group_by(Longitude, Latitude,Summarized.Offense.Description) %>% summarise(crime_Frequency = n()) %>% arrange(desc(crime_Frequency))
location <- location[-1,]
location
output$text <- renderText({"What does this app do?
This R shiny App shows how crime frequency is varying with factors like Offense type,district sector,zone,month and year.
It also shows the locations of crimes on the map according to the crime frequency and offense type."})
output$hist <-
renderPlotly({
plot_ly(
data = dat(),
type = "bar",
#y = ~ get(input$x_axis),
#x = ~ dat()$crime_Frequency,orientation = 'h'
x = ~ get(input$x_axis),
y = ~ dat()$crime_Frequency,orientation = 'v'
) %>% layout(
xaxis = list(title = ""),
yaxis = list(title = 'Crime Frequency')
)
})
dat <- reactive({a <- input$crime_Frequency
seattle_small %>% group_by_(input$x_axis) %>% summarize(crime_Frequency = n()) %>% arrange(desc(crime_Frequency)) %>% filter(crime_Frequency >= a)
})
dat2 <- reactive({b <- input$loc
location %>% filter(crime_Frequency >= b) %>% filter(Summarized.Offense.Description == input$crime_type)})
dat3 <- reactive({b <- input$loc
location %>% filter(crime_Frequency >= b)})
output$map1 <-
renderLeaflet({
if(input$crime_type == "All"){
dat3() %>% leaflet() %>% addTiles() %>% addCircleMarkers(
lng = ~ Longitude,
lat = ~ Latitude,
radius = ~ crime_Frequency,
clusterOptions = markerOptions(clickable = TRUE, draggable = TRUE),
label = ~ htmlEscape(as.character(crime_Frequency))
)
}else
dat2() %>% leaflet() %>% addTiles() %>% addCircleMarkers(
lng = ~ Longitude,
lat = ~ Latitude,
radius = ~ crime_Frequency,
clusterOptions = markerOptions(clickable = TRUE, draggable = TRUE),
label = ~ htmlEscape(as.character(crime_Frequency))
)})
}
| /server.R | no_license | ujwalamusku/courserassignment3 | R | false | false | 2,949 | r | library(shiny)
library(shinydashboard)
library(plotly)
library(dplyr)
library(lubridate)
library(leaflet)
library(htmltools)
library(htmlwidgets)
library(dplyr)
library(lubridate)
library(ggplot2)
#setwd("c:/Users/ujwala/Documents/Cousera_Assignment_3")
seattle <- read.csv("seattle.csv",header = TRUE,sep = ",")
server <- function(input,output) {
seattle$Occurred.Date.or.Date.Range.Start <-
mdy_hms(seattle$Occurred.Date.or.Date.Range.Start)
seattle$DOW <- wday(seattle$Occurred.Date.or.Date.Range.Start)
seattle$TOD <- hour(seattle$Occurred.Date.or.Date.Range.Start)
time_day <-
seattle %>% select(DOW, TOD) %>% group_by(DOW, TOD) %>% summarise(crime_count = n())
seattle_small <- seattle[,c(7,9,12,13,16:21)]
seattle_small <- seattle_small[,-c(2,5,6)]
location <-
seattle %>% select(Longitude, Latitude,Summarized.Offense.Description) %>% group_by(Longitude, Latitude,Summarized.Offense.Description) %>% summarise(crime_Frequency = n()) %>% arrange(desc(crime_Frequency))
location <- location[-1,]
location
output$text <- renderText({"What does this app do?
This R shiny App shows how crime frequency is varying with factors like Offense type,district sector,zone,month and year.
It also shows the locations of crimes on the map according to the crime frequency and offense type."})
output$hist <-
renderPlotly({
plot_ly(
data = dat(),
type = "bar",
#y = ~ get(input$x_axis),
#x = ~ dat()$crime_Frequency,orientation = 'h'
x = ~ get(input$x_axis),
y = ~ dat()$crime_Frequency,orientation = 'v'
) %>% layout(
xaxis = list(title = ""),
yaxis = list(title = 'Crime Frequency')
)
})
dat <- reactive({a <- input$crime_Frequency
seattle_small %>% group_by_(input$x_axis) %>% summarize(crime_Frequency = n()) %>% arrange(desc(crime_Frequency)) %>% filter(crime_Frequency >= a)
})
dat2 <- reactive({b <- input$loc
location %>% filter(crime_Frequency >= b) %>% filter(Summarized.Offense.Description == input$crime_type)})
dat3 <- reactive({b <- input$loc
location %>% filter(crime_Frequency >= b)})
output$map1 <-
renderLeaflet({
if(input$crime_type == "All"){
dat3() %>% leaflet() %>% addTiles() %>% addCircleMarkers(
lng = ~ Longitude,
lat = ~ Latitude,
radius = ~ crime_Frequency,
clusterOptions = markerOptions(clickable = TRUE, draggable = TRUE),
label = ~ htmlEscape(as.character(crime_Frequency))
)
}else
dat2() %>% leaflet() %>% addTiles() %>% addCircleMarkers(
lng = ~ Longitude,
lat = ~ Latitude,
radius = ~ crime_Frequency,
clusterOptions = markerOptions(clickable = TRUE, draggable = TRUE),
label = ~ htmlEscape(as.character(crime_Frequency))
)})
}
|
#' Returns corrected metadata if code has been changed by external force, forcing us to do
#' recalculation. NULL otherwise
code_has_been_changed<-function(metadata)
{
digests<-calculate_code_digest(metadata)
if (is.null(metadata$codeCRC) || metadata$codeCRC != digests)
{
metadata$codeCRC <- digests
return(metadata)
} else {
return(NULL)
}
}
| /R/is_object_dirty.R | no_license | adamryczkowski/depwalker | R | false | false | 373 | r |
#' Returns corrected metadata if code has been changed by external force, forcing us to do
#' recalculation. NULL otherwise
code_has_been_changed<-function(metadata)
{
digests<-calculate_code_digest(metadata)
if (is.null(metadata$codeCRC) || metadata$codeCRC != digests)
{
metadata$codeCRC <- digests
return(metadata)
} else {
return(NULL)
}
}
|
call_iatlas_module <- function(
name,
server_function,
input,
session,
...,
tab_id = "explorertabs"
){
link_string <- paste0("link_to_", name)
shiny::observeEvent(input[[link_string]], {
shinydashboard::updateTabItems(
session,
inputId = tab_id,
selected = name
)
})
x <-server_function(name, ...)
return(x)
}
| /R/call_iatlas_module.R | permissive | CRI-iAtlas/iatlas-app | R | false | false | 363 | r | call_iatlas_module <- function(
name,
server_function,
input,
session,
...,
tab_id = "explorertabs"
){
link_string <- paste0("link_to_", name)
shiny::observeEvent(input[[link_string]], {
shinydashboard::updateTabItems(
session,
inputId = tab_id,
selected = name
)
})
x <-server_function(name, ...)
return(x)
}
|
parella_sim <- function(N, n, zeros=FALSE, parameters="normal", seed=NULL){
if (!is.null(seed)){
set.seed(seed)
}
if (is.null(N)) stop("The argument N (number of items) must be provided")
if (is.null(n)) stop("The argument n (number of persons) must be provided")
if (is.null(zeros)) zeros <- FALSE
if (is.null(parameters)) parameters <- "normal"
if(zeros==TRUE){
beta <- rnorm(N);beta <-sort(beta)
if (parameters=="normal") theta <- rnorm(n)
if (parameters=="uniform") theta <- runif(n,min = min(beta),max = max(beta))
theta <- sort(theta)
X <- Pma <- matrix(NA,n,N)
for(i in 1:n){
Pma[i,] <- (1+ (theta[i] - beta)^2)^{-1}
X[i,] <- rbinom(N,1,Pma[i,])
}
rnk <- 1:N
dim2 <- NA
if (N<= 26) dim2 <- LETTERS[rnk] else dim2 <- as.character(rnk)
dimnames(X)[[2]] <- dim2
dimnames(Pma)[[2]] <- dim2
names(rnk) <- dim2
nam <- sample(dim2)
X <- X[,nam]
Prob <- Pma[,nam]
sim.data <-list(obs_ord=nam,true_ord=dim2,items=N, sample=n,seed=seed,
dat=as.data.frame(X),probs=Prob,item.patameters=beta[as.numeric(rnk[nam])],subject.parameters=theta)
return(sim.data)
}
if(zeros==FALSE){
n1 <- n
n <- 10*n
beta <- rnorm(N);beta <-sort(beta)
if (parameters=="normal") theta <- rnorm(n)
if (parameters=="uniform") theta <- runif(n,min = min(beta),max = max(beta))
theta <- sort(theta)
X <- Pma <- matrix(NA,n,N)
for(i in 1:n){
Pma[i,] <- (1+ (theta[i] - beta)^2)^{-1}
X[i,] <- rbinom(N,1,Pma[i,])
}
Amat<-cbind(X,theta)
Prob <- Pma
TotalScore<-apply(X,1,sum)
indx0 <- which(TotalScore!=0)
Emat <- Amat[indx0,]
Prob <- Pma[indx0,]
sampling <- sample(nrow(Emat), n1)
Kmat <- Emat[sampling, ]
Prob <- Prob[sampling,]
or<-order(Kmat[,N+1],decreasing = FALSE)
rnk <- 1:N
X<-Kmat[or,rnk]
Prob <- Prob[or,]
dim2 <- NA
if (N<= 26) dim2 <- LETTERS[rnk] else dim2 <- paste("Item",as.character(rnk), sep = "")
dimnames(X)[[2]] <- dim2
dimnames(Prob)[[2]] <- dim2
names(rnk) <- dim2
nam <- sample(dim2)
X <- X[,nam]
sim.data <-list(obs_ord=nam,true_ord=dim2,items=N, sample=n1,seed=seed,
dat=as.data.frame(X),probs=Prob[,nam], item.patameters=beta[as.numeric(rnk[nam])],
subject.parameters=Kmat[or,N+1])
return(sim.data)
}
}
| /R/parella_sim.R | no_license | cran/mudfold | R | false | false | 2,480 | r | parella_sim <- function(N, n, zeros=FALSE, parameters="normal", seed=NULL){
if (!is.null(seed)){
set.seed(seed)
}
if (is.null(N)) stop("The argument N (number of items) must be provided")
if (is.null(n)) stop("The argument n (number of persons) must be provided")
if (is.null(zeros)) zeros <- FALSE
if (is.null(parameters)) parameters <- "normal"
if(zeros==TRUE){
beta <- rnorm(N);beta <-sort(beta)
if (parameters=="normal") theta <- rnorm(n)
if (parameters=="uniform") theta <- runif(n,min = min(beta),max = max(beta))
theta <- sort(theta)
X <- Pma <- matrix(NA,n,N)
for(i in 1:n){
Pma[i,] <- (1+ (theta[i] - beta)^2)^{-1}
X[i,] <- rbinom(N,1,Pma[i,])
}
rnk <- 1:N
dim2 <- NA
if (N<= 26) dim2 <- LETTERS[rnk] else dim2 <- as.character(rnk)
dimnames(X)[[2]] <- dim2
dimnames(Pma)[[2]] <- dim2
names(rnk) <- dim2
nam <- sample(dim2)
X <- X[,nam]
Prob <- Pma[,nam]
sim.data <-list(obs_ord=nam,true_ord=dim2,items=N, sample=n,seed=seed,
dat=as.data.frame(X),probs=Prob,item.patameters=beta[as.numeric(rnk[nam])],subject.parameters=theta)
return(sim.data)
}
if(zeros==FALSE){
n1 <- n
n <- 10*n
beta <- rnorm(N);beta <-sort(beta)
if (parameters=="normal") theta <- rnorm(n)
if (parameters=="uniform") theta <- runif(n,min = min(beta),max = max(beta))
theta <- sort(theta)
X <- Pma <- matrix(NA,n,N)
for(i in 1:n){
Pma[i,] <- (1+ (theta[i] - beta)^2)^{-1}
X[i,] <- rbinom(N,1,Pma[i,])
}
Amat<-cbind(X,theta)
Prob <- Pma
TotalScore<-apply(X,1,sum)
indx0 <- which(TotalScore!=0)
Emat <- Amat[indx0,]
Prob <- Pma[indx0,]
sampling <- sample(nrow(Emat), n1)
Kmat <- Emat[sampling, ]
Prob <- Prob[sampling,]
or<-order(Kmat[,N+1],decreasing = FALSE)
rnk <- 1:N
X<-Kmat[or,rnk]
Prob <- Prob[or,]
dim2 <- NA
if (N<= 26) dim2 <- LETTERS[rnk] else dim2 <- paste("Item",as.character(rnk), sep = "")
dimnames(X)[[2]] <- dim2
dimnames(Prob)[[2]] <- dim2
names(rnk) <- dim2
nam <- sample(dim2)
X <- X[,nam]
sim.data <-list(obs_ord=nam,true_ord=dim2,items=N, sample=n1,seed=seed,
dat=as.data.frame(X),probs=Prob[,nam], item.patameters=beta[as.numeric(rnk[nam])],
subject.parameters=Kmat[or,N+1])
return(sim.data)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tenxSummarizedExperiment.R
\name{tenxSummarizedExperiment}
\alias{tenxSummarizedExperiment}
\alias{as.tenxSummarizedExperiment}
\alias{matrixSummarizedExperiment}
\alias{as.matrixSummarizedExperiment}
\alias{dgCMatrixSummarizedExperiment}
\alias{as.dgCMatrixSummarizedExperiment}
\title{Create a 'SummarizedExperiment' from an 10xGenomics hdf5 file.}
\usage{
tenxSummarizedExperiment(h5path, i, j, rowData, colData)
as.tenxSummarizedExperiment(x, rowData, colData)
matrixSummarizedExperiment(h5path, i, j, rowData, colData)
as.matrixSummarizedExperiment(x, rowData, colData)
dgCMatrixSummarizedExperiment(h5path, i, j, rowData, colData)
as.dgCMatrixSummarizedExperiment(x, rowData, colData)
}
\arguments{
\item{h5path}{character(1) file path to a 1M_neurons_*.h5 file.}
\item{i}{Optional integer(), character(), or logical() index used to subset
rows (genes) of the \code{TENxGenomics} object.}
\item{j}{Optional integer(), character(), or logical() index used to subset
columns (samples) of the \code{TENxGenomics} object.}
\item{rowData}{Optional \code{DataFrame()} with as many rows as
there are genes in the 10xGenomics file or object. If missing,
an object is created with 'gene' and 'genename' fields from the
hdf5 file.}
\item{colData}{Optional \code{DataFrame()} with as many rows as
there are samples in the 10xGenomics file or object. If
missing, and object is constructed from the barcodes of the
hdf5 file. The sequence and library portions are separated, and
the mouse is inferred (libraries <= 69 are from mouse "A",
others are from mouse "B").}
\item{x}{A \code{TENxGenomics-class} instance.}
}
\value{
\code{tenxSummarizedExperiment()} and
\code{as.tenxSummarizedExperiment()} return a
\code{SummarizedExperiment} instance where the assay() data are
represented as a \code{TENxGenomics} object. Down-stream
analysis will typically extract this object from (a subset of)
the SummarizedExperiment, and coerce it to a, e.g,. matrix,
\code{as.matrix(assay(se[, 1:100]))}.
\code{matrixSummarizedExperiment()} and
\code{as.matrixSummarizedExperiment()} return a
\code{SummarizedExperiment} instance where the assay() data are
represented as a \code{Matrix::dgCMatrix} object. There are
practical limits to the size of this object (e.g., 20k
samples); the code is most efficient when consecutive samples
are selected.
\code{dgCMatrixSummarizedExperiment()} and
\code{as.dgCMatrixSummarizedExperiment()} return a
\code{SummarizedExperiment} instance where the assay() data are
represented as a \code{Matrix::dgCMatrix} object. There are
practical limits to the size of this object; the code is most
efficient when consecutive samples are selected.
}
\description{
The SummarizedExperiment \code{assay()} contains the
\code{TENxGenomics} object corresponding to the underlying hdf5
file. It also contains row and column annotations provided by
the user or inferred from the hdf5 file. Inferred data requires
a simple match between the file name prefix and
\dQuote{1M_neurons_}; if the file name prefix does not match,
row and column annotations are not created.
}
| /man/tenxSummarizedExperiment.Rd | no_license | IMB-Computational-Genomics-Lab/TENxGenomics | R | false | true | 3,253 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tenxSummarizedExperiment.R
\name{tenxSummarizedExperiment}
\alias{tenxSummarizedExperiment}
\alias{as.tenxSummarizedExperiment}
\alias{matrixSummarizedExperiment}
\alias{as.matrixSummarizedExperiment}
\alias{dgCMatrixSummarizedExperiment}
\alias{as.dgCMatrixSummarizedExperiment}
\title{Create a 'SummarizedExperiment' from an 10xGenomics hdf5 file.}
\usage{
tenxSummarizedExperiment(h5path, i, j, rowData, colData)
as.tenxSummarizedExperiment(x, rowData, colData)
matrixSummarizedExperiment(h5path, i, j, rowData, colData)
as.matrixSummarizedExperiment(x, rowData, colData)
dgCMatrixSummarizedExperiment(h5path, i, j, rowData, colData)
as.dgCMatrixSummarizedExperiment(x, rowData, colData)
}
\arguments{
\item{h5path}{character(1) file path to a 1M_neurons_*.h5 file.}
\item{i}{Optional integer(), character(), or logical() index used to subset
rows (genes) of the \code{TENxGenomics} object.}
\item{j}{Optional integer(), character(), or logical() index used to subset
columns (samples) of the \code{TENxGenomics} object.}
\item{rowData}{Optional \code{DataFrame()} with as many rows as
there are genes in the 10xGenomics file or object. If missing,
an object is created with 'gene' and 'genename' fields from the
hdf5 file.}
\item{colData}{Optional \code{DataFrame()} with as many rows as
there are samples in the 10xGenomics file or object. If
missing, and object is constructed from the barcodes of the
hdf5 file. The sequence and library portions are separated, and
the mouse is inferred (libraries <= 69 are from mouse "A",
others are from mouse "B").}
\item{x}{A \code{TENxGenomics-class} instance.}
}
\value{
\code{tenxSummarizedExperiment()} and
\code{as.tenxSummarizedExperiment()} return a
\code{SummarizedExperiment} instance where the assay() data are
represented as a \code{TENxGenomics} object. Down-stream
analysis will typically extract this object from (a subset of)
the SummarizedExperiment, and coerce it to a, e.g,. matrix,
\code{as.matrix(assay(se[, 1:100]))}.
\code{matrixSummarizedExperiment()} and
\code{as.matrixSummarizedExperiment()} return a
\code{SummarizedExperiment} instance where the assay() data are
represented as a \code{Matrix::dgCMatrix} object. There are
practical limits to the size of this object (e.g., 20k
samples); the code is most efficient when consecutive samples
are selected.
\code{dgCMatrixSummarizedExperiment()} and
\code{as.dgCMatrixSummarizedExperiment()} return a
\code{SummarizedExperiment} instance where the assay() data are
represented as a \code{Matrix::dgCMatrix} object. There are
practical limits to the size of this object; the code is most
efficient when consecutive samples are selected.
}
\description{
The SummarizedExperiment \code{assay()} contains the
\code{TENxGenomics} object corresponding to the underlying hdf5
file. It also contains row and column annotations provided by
the user or inferred from the hdf5 file. Inferred data requires
a simple match between the file name prefix and
\dQuote{1M_neurons_}; if the file name prefix does not match,
row and column annotations are not created.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_heterogeneous_dataset.R
\name{make_heterogeneous_dataset}
\alias{make_heterogeneous_dataset}
\title{Build a heterogeneou dataset of predefined size}
\usage{
make_heterogeneous_dataset(
epitopes,
proteins,
taxonomy_list,
nPos,
nNeg,
removeIDs = NULL,
hostIDs = NULL,
min_epit = 8,
max_epit = 25,
only_exact = FALSE,
pos.mismatch.rm = "all",
set.positive = "mode",
window_size = 2 * min_epit - 1,
max.N = 2,
save_folder = "./",
rnd.seed = NULL,
ncpus = 1
)
}
\arguments{
\item{epitopes}{data frame of epitope data (returned by \code{\link[=get_LBCE]{get_LBCE()}}).}
\item{proteins}{data frame of protein data (returned by \code{\link[=get_proteins]{get_proteins()}}).}
\item{taxonomy_list}{list containing taxonomy information
(generated by \code{\link[=get_taxonomy]{get_taxonomy()}})}
\item{nPos}{number of positive examples to extract. \strong{NOTE}: this refers to
the number of unique positive examples extracted from \code{epitopes}, not to the
size of the data frame returned (which is obtained after windowing using
\code{\link[=make_window_df]{make_window_df()}}).}
\item{nNeg}{number of negative examples to extract}
\item{removeIDs}{vector of organism IDs to remove (see \code{\link[=filter_epitopes]{filter_epitopes()}}).
Useful for, e.g., using a Class-level \code{orgIDs} and removing some species
or genera. If \code{NULL} then no removal is performed.}
\item{hostIDs}{vector of host IDs to retain (see \code{\link[=filter_epitopes]{filter_epitopes()}}).
If \code{NULL} then no host filtering is performed.}
\item{min_epit}{positive integer, shortest epitope to be considered}
\item{max_epit}{positive integer, longest epitope to be considered}
\item{only_exact}{logical, should only sequences labelled as "Exact Epitope"
in variable \emph{epit_struc_def} (within \code{epitopes}) be considered?}
\item{pos.mismatch.rm}{should epitopes with position mismatches be removed?
Use "all" (default) for removing any position mismatch or "align"
if the routine should attempt to
search the epitope sequence in the protein sequence.}
\item{set.positive}{how to decide whether an observation should be of the
"Positive" (+1) class? Use "any" to set a sequence as positive if
$n_positive > 0$, "mode" to set it if $n_positive >= n_negative$,
or "all" to set it if $n_negative == 0$. Defaults to "mode".}
\item{window_size}{positive integer, size of window to use.}
\item{max.N}{maximum length of N-peptide frequency features to be calculated.}
\item{save_folder}{path to folder for saving the results.}
\item{rnd.seed}{seed for random number generator}
\item{ncpus}{number of cores to use for data windowing and feature
calculation.}
}
\value{
Data frame containing the resulting dataset.
}
\description{
This function extracts observations related to heterogeneous organisms from
IEDB data and returns a data set that can be used to train machine learning
models.
}
\details{
The heterogeneous data set is assembled by sampling entries from
\code{epitopes} by organism taxID (after filtering using \code{removeIDs} and
\code{hostIDs}) until the desired number of positive and negative observations is
reached. Random subsampling is performed if required to return the exact
number of unique epitope examples.
}
\author{
Felipe Campelo (\email{f.campelo@aston.ac.uk})
}
| /man/make_heterogeneous_dataset.Rd | no_license | richelbilderbeek/epitopes | R | false | true | 3,407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_heterogeneous_dataset.R
\name{make_heterogeneous_dataset}
\alias{make_heterogeneous_dataset}
\title{Build a heterogeneou dataset of predefined size}
\usage{
make_heterogeneous_dataset(
epitopes,
proteins,
taxonomy_list,
nPos,
nNeg,
removeIDs = NULL,
hostIDs = NULL,
min_epit = 8,
max_epit = 25,
only_exact = FALSE,
pos.mismatch.rm = "all",
set.positive = "mode",
window_size = 2 * min_epit - 1,
max.N = 2,
save_folder = "./",
rnd.seed = NULL,
ncpus = 1
)
}
\arguments{
\item{epitopes}{data frame of epitope data (returned by \code{\link[=get_LBCE]{get_LBCE()}}).}
\item{proteins}{data frame of protein data (returned by \code{\link[=get_proteins]{get_proteins()}}).}
\item{taxonomy_list}{list containing taxonomy information
(generated by \code{\link[=get_taxonomy]{get_taxonomy()}})}
\item{nPos}{number of positive examples to extract. \strong{NOTE}: this refers to
the number of unique positive examples extracted from \code{epitopes}, not to the
size of the data frame returned (which is obtained after windowing using
\code{\link[=make_window_df]{make_window_df()}}).}
\item{nNeg}{number of negative examples to extract}
\item{removeIDs}{vector of organism IDs to remove (see \code{\link[=filter_epitopes]{filter_epitopes()}}).
Useful for, e.g., using a Class-level \code{orgIDs} and removing some species
or genera. If \code{NULL} then no removal is performed.}
\item{hostIDs}{vector of host IDs to retain (see \code{\link[=filter_epitopes]{filter_epitopes()}}).
If \code{NULL} then no host filtering is performed.}
\item{min_epit}{positive integer, shortest epitope to be considered}
\item{max_epit}{positive integer, longest epitope to be considered}
\item{only_exact}{logical, should only sequences labelled as "Exact Epitope"
in variable \emph{epit_struc_def} (within \code{epitopes}) be considered?}
\item{pos.mismatch.rm}{should epitopes with position mismatches be removed?
Use "all" (default) for removing any position mismatch or "align"
if the routine should attempt to
search the epitope sequence in the protein sequence.}
\item{set.positive}{how to decide whether an observation should be of the
"Positive" (+1) class? Use "any" to set a sequence as positive if
$n_positive > 0$, "mode" to set it if $n_positive >= n_negative$,
or "all" to set it if $n_negative == 0$. Defaults to "mode".}
\item{window_size}{positive integer, size of window to use.}
\item{max.N}{maximum length of N-peptide frequency features to be calculated.}
\item{save_folder}{path to folder for saving the results.}
\item{rnd.seed}{seed for random number generator}
\item{ncpus}{number of cores to use for data windowing and feature
calculation.}
}
\value{
Data frame containing the resulting dataset.
}
\description{
This function extracts observations related to heterogeneous organisms from
IEDB data and returns a data set that can be used to train machine learning
models.
}
\details{
The heterogeneous data set is assembled by sampling entries from
\code{epitopes} by organism taxID (after filtering using \code{removeIDs} and
\code{hostIDs}) until the desired number of positive and negative observations is
reached. Random subsampling is performed if required to return the exact
number of unique epitope examples.
}
\author{
Felipe Campelo (\email{f.campelo@aston.ac.uk})
}
|
test_that("non-missing scalar replaces all missing values", {
x <- c(NA, 1)
expect_equal(coalesce(x, 1), c(1, 1))
})
test_that("coerces to common type", {
expect_identical(coalesce(NA, 1), 1)
f <- factor("x", levels = c("x", "y"))
expect_identical(coalesce(NA, f), f)
})
test_that("finds non-missing values in multiple positions", {
x1 <- c(1L, NA, NA)
x2 <- c(NA, 2L, NA)
x3 <- c(NA, NA, 3L)
expect_equal(coalesce(x1, x2, x3), 1:3)
})
test_that("can specify ptype", {
x <- NA
expect_equal(coalesce(x, 1, .ptype = integer()), 1L)
})
test_that("can specify output size", {
x <- NA
expect_equal(coalesce(x, 1, .size = 2), c(1, 1))
})
test_that("coalesce. works", {
x <- c(NA, 1)
expect_equal(suppressWarnings(coalesce.(x, 1)), c(1, 1))
})
| /tests/testthat/test-coalesce.R | no_license | cran/tidytable | R | false | false | 774 | r | test_that("non-missing scalar replaces all missing values", {
x <- c(NA, 1)
expect_equal(coalesce(x, 1), c(1, 1))
})
test_that("coerces to common type", {
expect_identical(coalesce(NA, 1), 1)
f <- factor("x", levels = c("x", "y"))
expect_identical(coalesce(NA, f), f)
})
test_that("finds non-missing values in multiple positions", {
x1 <- c(1L, NA, NA)
x2 <- c(NA, 2L, NA)
x3 <- c(NA, NA, 3L)
expect_equal(coalesce(x1, x2, x3), 1:3)
})
test_that("can specify ptype", {
x <- NA
expect_equal(coalesce(x, 1, .ptype = integer()), 1L)
})
test_that("can specify output size", {
x <- NA
expect_equal(coalesce(x, 1, .size = 2), c(1, 1))
})
test_that("coalesce. works", {
x <- c(NA, 1)
expect_equal(suppressWarnings(coalesce.(x, 1)), c(1, 1))
})
|
plotlyPointMassDistribution <- function(plotrange, input, distType, probrange) {
xseq <- seq(round(min(0, as.numeric(plotrange[1])), 0), round(max(as.numeric(plotrange[2]),
10), 0), 1)
f60 <- 0
graphtype <- ""
if (input$FunctionType == "PDF/PMF") {
f60 <- dpoint(xseq, as.numeric(input$PMD_Location))
f60[as.numeric(input$PMD_Location) + abs(round(plotrange[1])) + 1] <- 1
graphtype <- "PMF"
} else if (input$FunctionType == "CDF/CMF") {
f60 <- ppoint(xseq, as.numeric(input$PMD_Location))
graphtype <- "CMF"
} else {
graphtype <- ""
}
if (graphtype != "") {
xsize = length(xseq)
colors = c(rep("rgb(31, 119, 180)", xsize))
for (index in 1:xsize) {
if (xseq[index] >= round(probrange[1], 0) && xseq[index] <= round(probrange[2],
0)) {
colors[index] = "rgb(255, 127, 14)"
}
}
fig <- plot_ly(x = xseq, y = f60, name = distType, type = "bar", marker = list(color = colors),
text = f60, hovertemplate = paste("<br><b>Prob. </b>: %{y}</br>", "<b>X</b>: %{x}",
"<b>Y</b>: %{y}"), )
fig <- fig %>%
plotly::layout(title = paste(distributions[60], " - ", graphtype, sep = ""),
hovermode = "x", hoverlabel = list(namelength = 100), yaxis = list(fixedrange = TRUE,
zeroline = TRUE, range = c(min(f60), max(f60)), type = "linear"),
xaxis = list(showticklabels = TRUE, title = "* All x values rounded to nearest integers",
zeroline = TRUE, showline = TRUE, showgrid = TRUE, linecolor = "rgb(204, 204, 204)",
linewidth = 2, mirror = TRUE, fixedrange = TRUE, range = c(plotrange[1],
plotrange[2])), showlegend = FALSE)
fig <- fig %>%
config(editable = FALSE)
fig
}
}
| /plotlyFunctions/PointMass.R | no_license | SOCR/ProbDistCalc_RShiny | R | false | false | 1,922 | r | plotlyPointMassDistribution <- function(plotrange, input, distType, probrange) {
xseq <- seq(round(min(0, as.numeric(plotrange[1])), 0), round(max(as.numeric(plotrange[2]),
10), 0), 1)
f60 <- 0
graphtype <- ""
if (input$FunctionType == "PDF/PMF") {
f60 <- dpoint(xseq, as.numeric(input$PMD_Location))
f60[as.numeric(input$PMD_Location) + abs(round(plotrange[1])) + 1] <- 1
graphtype <- "PMF"
} else if (input$FunctionType == "CDF/CMF") {
f60 <- ppoint(xseq, as.numeric(input$PMD_Location))
graphtype <- "CMF"
} else {
graphtype <- ""
}
if (graphtype != "") {
xsize = length(xseq)
colors = c(rep("rgb(31, 119, 180)", xsize))
for (index in 1:xsize) {
if (xseq[index] >= round(probrange[1], 0) && xseq[index] <= round(probrange[2],
0)) {
colors[index] = "rgb(255, 127, 14)"
}
}
fig <- plot_ly(x = xseq, y = f60, name = distType, type = "bar", marker = list(color = colors),
text = f60, hovertemplate = paste("<br><b>Prob. </b>: %{y}</br>", "<b>X</b>: %{x}",
"<b>Y</b>: %{y}"), )
fig <- fig %>%
plotly::layout(title = paste(distributions[60], " - ", graphtype, sep = ""),
hovermode = "x", hoverlabel = list(namelength = 100), yaxis = list(fixedrange = TRUE,
zeroline = TRUE, range = c(min(f60), max(f60)), type = "linear"),
xaxis = list(showticklabels = TRUE, title = "* All x values rounded to nearest integers",
zeroline = TRUE, showline = TRUE, showgrid = TRUE, linecolor = "rgb(204, 204, 204)",
linewidth = 2, mirror = TRUE, fixedrange = TRUE, range = c(plotrange[1],
plotrange[2])), showlegend = FALSE)
fig <- fig %>%
config(editable = FALSE)
fig
}
}
|
context("addCountGenos")
# Settings ----
# VCF file
vcfFile <- system.file("extdata", "moderate.vcf", package = "TVTB")
# TVTB parameters
tparam <- TVTBparam(Genotypes("0|0", c("0|1", "1|0"), "1|1"))
# Pre-process variants
vcf <- VariantAnnotation::readVcf(vcfFile, param = tparam)
vcf <- VariantAnnotation::expand(vcf, row.names = TRUE)
# Arguments ----
test_that("addCountGenos supports all signatures",{
## Implicitely tested by higher-level methods
# samples = "missing"
expect_s4_class(
addCountGenos(
vcf, het(tparam), "NHET", "Number of heterozygous genotypes"),
"ExpandedVCF"
)
# samples = "numeric"
expect_s4_class(
addCountGenos(
vcf, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes", samples = 1:ncol(vcf)),
"ExpandedVCF"
)
# samples = "character"
expect_s4_class(
addCountGenos(
vcf, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes",
colnames(geno(vcf)[["GT"]])),
"ExpandedVCF"
)
})
vcf_NHET <- addCountGenos(
vcf, c("0|1", "1|0"), "NHET", "Number of heterozygous genotypes")
# Argument: force ----
test_that("force=FALSE throws an error if the field exists",{
expect_error(
addCountGenos(
vcf_NHET, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes")
)
})
test_that("force=TRUE messages that the field will be updated",{
expect_message(
addCountGenos(
vcf_NHET, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes",
colnames(geno(vcf)[["GT"]]), TRUE)
)
})
| /tests/testthat/test_addCountGenos-methods.R | permissive | kevinrue/TVTB | R | false | false | 1,681 | r | context("addCountGenos")
# Settings ----
# VCF file
vcfFile <- system.file("extdata", "moderate.vcf", package = "TVTB")
# TVTB parameters
tparam <- TVTBparam(Genotypes("0|0", c("0|1", "1|0"), "1|1"))
# Pre-process variants
vcf <- VariantAnnotation::readVcf(vcfFile, param = tparam)
vcf <- VariantAnnotation::expand(vcf, row.names = TRUE)
# Arguments ----
test_that("addCountGenos supports all signatures",{
## Implicitely tested by higher-level methods
# samples = "missing"
expect_s4_class(
addCountGenos(
vcf, het(tparam), "NHET", "Number of heterozygous genotypes"),
"ExpandedVCF"
)
# samples = "numeric"
expect_s4_class(
addCountGenos(
vcf, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes", samples = 1:ncol(vcf)),
"ExpandedVCF"
)
# samples = "character"
expect_s4_class(
addCountGenos(
vcf, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes",
colnames(geno(vcf)[["GT"]])),
"ExpandedVCF"
)
})
vcf_NHET <- addCountGenos(
vcf, c("0|1", "1|0"), "NHET", "Number of heterozygous genotypes")
# Argument: force ----
test_that("force=FALSE throws an error if the field exists",{
expect_error(
addCountGenos(
vcf_NHET, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes")
)
})
test_that("force=TRUE messages that the field will be updated",{
expect_message(
addCountGenos(
vcf_NHET, c("0|1", "1|0"),
"NHET", "Number of heterozygous genotypes",
colnames(geno(vcf)[["GT"]]), TRUE)
)
})
|
library(EGRET)
### Name: readUserDaily
### Title: Import user daily data for EGRET analysis
### Aliases: readUserDaily
### Keywords: USGS WRTDS data file import
### ** Examples
filePath <- system.file("extdata", package="EGRET")
fileName <- "ChoptankRiverFlow.txt"
Daily <- readUserDaily(filePath,fileName,separator="\t")
| /data/genthat_extracted_code/EGRET/examples/readUserDaily.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 329 | r | library(EGRET)
### Name: readUserDaily
### Title: Import user daily data for EGRET analysis
### Aliases: readUserDaily
### Keywords: USGS WRTDS data file import
### ** Examples
filePath <- system.file("extdata", package="EGRET")
fileName <- "ChoptankRiverFlow.txt"
Daily <- readUserDaily(filePath,fileName,separator="\t")
|
library(foreach)
source('./dataSourcer.R')
BASE_DAT_FNAME <- '/tic-tac-toe-'
BASE_DAT_DIR <- './data/tic-tac-toe/'
runKNN <- function(){
trainingDat <- featurizer.TicTac(getTicTacTrainData())
testDat <- featurizer.TicTac(getTicTacTestData())
xvalRunTime <- system.time(xvalNums <- sum(xval(trainingDat)[,'numGood'])/
nrow(trainingDat))['elapsed']
testGood <- sum(validate(testDat,trainingDat ))/nrow(testDat)
print(paste0('6-fold x-validation clock time: ',round(xvalRunTime,digits = 3),' seconds'))
print(paste0('6-fold x-val % correct: ',round(xvalNums,digits=4)*100))
print(paste0('test data % correct: ',round(testGood,digits=4)*100))
}
hamDist <- function(newDat,oldDat){
newDat <- newDat[-1]
oldDat <- oldDat[-1]
sum(!(xor(oldDat,newDat)))
}
classify <- function(obs,trainingDat,n=1){
dist <- apply(trainingDat,1,function(tr)hamDist(obs,tr))
topSamp <- order(dist,decreasing = T)
trainingDat[topSamp[1],1]
}
validate <- function(obs,trainingDat){
f <- function(obsp){!xor(classify(matrix(obsp,ncol=ncol(obs)),trainingDat),(obsp[1]))}
matrix(apply(obs,1,f),ncol=1)
}
# cross-validation
xval <- function(trainingDat){
nums <- rep(1:6,ceiling(nrow(trainingDat)/6))[1:nrow(trainingDat)]
sets <- sample(nums,nrow(trainingDat),replace = F)
foreach(i=1:6,.combine=rbind,
.export = c('validate','classify','hamDist')
) %do% {
holdout <- which(sets==i)
train <- setdiff(1:nrow(trainingDat),holdout)
data.frame(i=i,numGood=sum(validate(trainingDat[holdout,,drop=F],
trainingDat[train,,drop=F]))
)
}
}
| /k-nearest.R | no_license | ngg123/badges | R | false | false | 1,635 | r | library(foreach)
source('./dataSourcer.R')
BASE_DAT_FNAME <- '/tic-tac-toe-'
BASE_DAT_DIR <- './data/tic-tac-toe/'
runKNN <- function(){
trainingDat <- featurizer.TicTac(getTicTacTrainData())
testDat <- featurizer.TicTac(getTicTacTestData())
xvalRunTime <- system.time(xvalNums <- sum(xval(trainingDat)[,'numGood'])/
nrow(trainingDat))['elapsed']
testGood <- sum(validate(testDat,trainingDat ))/nrow(testDat)
print(paste0('6-fold x-validation clock time: ',round(xvalRunTime,digits = 3),' seconds'))
print(paste0('6-fold x-val % correct: ',round(xvalNums,digits=4)*100))
print(paste0('test data % correct: ',round(testGood,digits=4)*100))
}
hamDist <- function(newDat,oldDat){
newDat <- newDat[-1]
oldDat <- oldDat[-1]
sum(!(xor(oldDat,newDat)))
}
classify <- function(obs,trainingDat,n=1){
dist <- apply(trainingDat,1,function(tr)hamDist(obs,tr))
topSamp <- order(dist,decreasing = T)
trainingDat[topSamp[1],1]
}
validate <- function(obs,trainingDat){
f <- function(obsp){!xor(classify(matrix(obsp,ncol=ncol(obs)),trainingDat),(obsp[1]))}
matrix(apply(obs,1,f),ncol=1)
}
# cross-validation
xval <- function(trainingDat){
nums <- rep(1:6,ceiling(nrow(trainingDat)/6))[1:nrow(trainingDat)]
sets <- sample(nums,nrow(trainingDat),replace = F)
foreach(i=1:6,.combine=rbind,
.export = c('validate','classify','hamDist')
) %do% {
holdout <- which(sets==i)
train <- setdiff(1:nrow(trainingDat),holdout)
data.frame(i=i,numGood=sum(validate(trainingDat[holdout,,drop=F],
trainingDat[train,,drop=F]))
)
}
}
|
#
# This Shiny web application demonstrates the use of custom image files
# in place of icons for value boxes in Shiny Dashboard by overriding two
# functions:
#
# 'icon' from the shiny package and 'valueBox' from the shinydashboard package.
#
# Each function adds minimal, specific additional handling of image files.
# Note: A custom css file must also be included so that value boxes can
# display the icons. For that reason, do not expect images in place of icons to
# work elsewhere in shiny or shinydashboard.
# Motivation: libraries like font awesome and glyphicon cannot be expected to
# provide a substantial suite of icons tailored to probability and statistics
# or many other subjects. Examples here use seven custom icons for inspiration,
# which are simply tiny png files of native R plots. These png files must be
# placed in the app's www/ directory.
#
library(shiny)
library(shinydashboard)
library(purrr)
post <- "https://blog.snap.uaf.edu/2017/01/11/custom-images-for-shiny-dashboard-valuebox-icons/"
gist <- "https://gist.github.com/leonawicz/0fab3796b02a62b7f3bd0c02a171f0b7"
ui <- dashboardPage(
dashboardHeader(title="Custom Icons"),
dashboardSidebar(
sidebarMenu(
menuItem("Light icons", tabName = "light"),
menuItem("Dark icons", tabName = "dark")
),
div(a(href=post, "Related blog post"), style="width: 80%; padding: 15px"),
div(a(href=gist, "Github gist"), style="width: 80%; padding: 15px")
),
dashboardBody(
tags$head( # must include css
tags$style(HTML("
.img-local {
}
.small-box .img-local {
position: absolute;
top: auto;
bottom: 5px;
right: 5px;
z-index: 0;
font-size: 70px;
color: rgba(0, 0, 0, 0.15);
}"
))
),
tabItems(
tabItem(tabName = "light",
fluidRow(valueBoxOutput("distLight", width=3)),
fluidRow(
box(plotOutput("hist1"),
br(),
h4("Some random values for the bottom six value boxes showing delta change:"),
verbatimTextOutput("vals1"), status="primary", width=6),
box(uiOutput("vBoxesLight"), status="primary", width=6)
)
),
tabItem(tabName = "dark",
fluidRow(valueBoxOutput("distDark", width=3)),
fluidRow(
box(plotOutput("hist2"),
br(),
h4("Some random values for the bottom six value boxes\nshowing delta change:"),
verbatimTextOutput("vals2"), status="primary", width=6),
box(uiOutput("vBoxesDark"), status="primary", width=6)
)
)
)
),
title="Custom icons"
)
server <- function(input, output) {
source("override.R", local = TRUE) # override 'icon' and 'valueBox'
clrs <- c("yellow", "orange", "purple", "red", "blue", "navy",
"light-blue", "teal", "olive", "green", "fuchsia", "maroon")
pTextSize <- function(x, value) tags$p(x, style=paste0("font-size: ", value, "%;"))
vbox <- function(vb){ # taglist around all 12 value boxes
tagList(
fluidRow(
tags$head(tags$style(HTML(".small-box {height: 100px}"))),
column(6, vb[[1]], vb[[5]], vb[[3]]),
column(6, vb[[2]], vb[[6]], vb[[4]])
),
fluidRow(
column(6, vb[[7]], vb[[8]], vb[[9]]),
column(6, vb[[10]], vb[[11]], vb[[12]])
)
)
}
# image files
fileparts1 <- c(paste0("normal_", c("mean", "sd", "min", "max", "median"), "_"), "boxplot_iqr_")
files_white <- paste0("stat_icon_", fileparts1, "white.png")
files_black <- paste0("stat_icon_", fileparts1, "black.png")
fileparts2 <- c(
paste0("ts_", c("deltaDec_", "deltaInc_")), "bar_deltaNeg_",
paste0("ts_", c("deltaPctDec_", "deltaPctInc_")), "bar_deltaPos_")
files_white <- c(files_white, paste0("stat_icon_", fileparts2, "white.png"))
files_black <- c(files_black, paste0("stat_icon_", fileparts2, "black.png"))
# data
set.seed(1)
x <- rnorm(1000, 100, 10)
del <- c(-154, 47, -81, "-12%", "114%", 60) # values for delta change example icons
del.lab <- c("Total change", "Total change", "Max loss", "% change", "% change", "Max growth")
val <- round(c(mean(x), sd(x), min(x), max(x), median(x)))
val <- c(val, paste(round(quantile(x, probs = c(0.25, 0.75))), collapse=" - "), del)
val <- map2(val, c(rep(100, 5), 75, rep(100, 6)), ~pTextSize(.x, .y))
text <- map(c("Mean", "Std Dev", "Min", "Max", "Median", "IQR", del.lab), ~pTextSize(.x, 150))
output$vBoxesLight <- renderUI({
vb <- map(1:12, ~valueBox(
val[[.x]], text[[.x]],
icon=icon(list(src=files_white[.x], width="80px"), lib="local"),
color=clrs[.x], width=NULL)
)
vbox(vb)
})
output$vBoxesDark <- renderUI({
vb <- map(1:12, ~valueBox(
val[[.x]], text[[.x]],
icon=icon(list(src=files_black[.x], width="80px"), lib="local"),
color=clrs[.x], width=NULL)
)
vbox(vb)
})
output$distLight <- renderValueBox({
x <- "stat_icon_normal_dist_white.png"
valueBox("Data", "light image icon color",
icon=icon(list(src=x, width="80px"), lib="local"),
color="black", width=NULL)
})
output$distDark <- renderValueBox({
x <- "stat_icon_normal_dist_black.png"
valueBox("Data", "dark image icon color",
icon=icon(list(src=x, width="80px"), lib="local"),
color="aqua", width=NULL)
})
output$hist1 <- renderPlot({ hist(x) })
output$hist2 <- renderPlot({ hist(x) })
output$vals1 <- renderText({ del })
output$vals2 <- renderText({ del })
}
# Run the application
shinyApp(ui = ui, server = server)
| /customIconsDemo/app.R | no_license | eefermat/jfsp | R | false | false | 5,759 | r | #
# This Shiny web application demonstrates the use of custom image files
# in place of icons for value boxes in Shiny Dashboard by overriding two
# functions:
#
# 'icon' from the shiny package and 'valueBox' from the shinydashboard package.
#
# Each function adds minimal, specific additional handling of image files.
# Note: A custom css file must also be included so that value boxes can
# display the icons. For that reason, do not expect images in place of icons to
# work elsewhere in shiny or shinydashboard.
# Motivation: libraries like font awesome and glyphicon cannot be expected to
# provide a substantial suite of icons tailored to probability and statistics
# or many other subjects. Examples here use seven custom icons for inspiration,
# which are simply tiny png files of native R plots. These png files must be
# placed in the app's www/ directory.
#
library(shiny)
library(shinydashboard)
library(purrr)
post <- "https://blog.snap.uaf.edu/2017/01/11/custom-images-for-shiny-dashboard-valuebox-icons/"
gist <- "https://gist.github.com/leonawicz/0fab3796b02a62b7f3bd0c02a171f0b7"
ui <- dashboardPage(
dashboardHeader(title="Custom Icons"),
dashboardSidebar(
sidebarMenu(
menuItem("Light icons", tabName = "light"),
menuItem("Dark icons", tabName = "dark")
),
div(a(href=post, "Related blog post"), style="width: 80%; padding: 15px"),
div(a(href=gist, "Github gist"), style="width: 80%; padding: 15px")
),
dashboardBody(
tags$head( # must include css
tags$style(HTML("
.img-local {
}
.small-box .img-local {
position: absolute;
top: auto;
bottom: 5px;
right: 5px;
z-index: 0;
font-size: 70px;
color: rgba(0, 0, 0, 0.15);
}"
))
),
tabItems(
tabItem(tabName = "light",
fluidRow(valueBoxOutput("distLight", width=3)),
fluidRow(
box(plotOutput("hist1"),
br(),
h4("Some random values for the bottom six value boxes showing delta change:"),
verbatimTextOutput("vals1"), status="primary", width=6),
box(uiOutput("vBoxesLight"), status="primary", width=6)
)
),
tabItem(tabName = "dark",
fluidRow(valueBoxOutput("distDark", width=3)),
fluidRow(
box(plotOutput("hist2"),
br(),
h4("Some random values for the bottom six value boxes\nshowing delta change:"),
verbatimTextOutput("vals2"), status="primary", width=6),
box(uiOutput("vBoxesDark"), status="primary", width=6)
)
)
)
),
title="Custom icons"
)
server <- function(input, output) {
source("override.R", local = TRUE) # override 'icon' and 'valueBox'
clrs <- c("yellow", "orange", "purple", "red", "blue", "navy",
"light-blue", "teal", "olive", "green", "fuchsia", "maroon")
pTextSize <- function(x, value) tags$p(x, style=paste0("font-size: ", value, "%;"))
vbox <- function(vb){ # taglist around all 12 value boxes
tagList(
fluidRow(
tags$head(tags$style(HTML(".small-box {height: 100px}"))),
column(6, vb[[1]], vb[[5]], vb[[3]]),
column(6, vb[[2]], vb[[6]], vb[[4]])
),
fluidRow(
column(6, vb[[7]], vb[[8]], vb[[9]]),
column(6, vb[[10]], vb[[11]], vb[[12]])
)
)
}
# image files
fileparts1 <- c(paste0("normal_", c("mean", "sd", "min", "max", "median"), "_"), "boxplot_iqr_")
files_white <- paste0("stat_icon_", fileparts1, "white.png")
files_black <- paste0("stat_icon_", fileparts1, "black.png")
fileparts2 <- c(
paste0("ts_", c("deltaDec_", "deltaInc_")), "bar_deltaNeg_",
paste0("ts_", c("deltaPctDec_", "deltaPctInc_")), "bar_deltaPos_")
files_white <- c(files_white, paste0("stat_icon_", fileparts2, "white.png"))
files_black <- c(files_black, paste0("stat_icon_", fileparts2, "black.png"))
# data
set.seed(1)
x <- rnorm(1000, 100, 10)
del <- c(-154, 47, -81, "-12%", "114%", 60) # values for delta change example icons
del.lab <- c("Total change", "Total change", "Max loss", "% change", "% change", "Max growth")
val <- round(c(mean(x), sd(x), min(x), max(x), median(x)))
val <- c(val, paste(round(quantile(x, probs = c(0.25, 0.75))), collapse=" - "), del)
val <- map2(val, c(rep(100, 5), 75, rep(100, 6)), ~pTextSize(.x, .y))
text <- map(c("Mean", "Std Dev", "Min", "Max", "Median", "IQR", del.lab), ~pTextSize(.x, 150))
output$vBoxesLight <- renderUI({
vb <- map(1:12, ~valueBox(
val[[.x]], text[[.x]],
icon=icon(list(src=files_white[.x], width="80px"), lib="local"),
color=clrs[.x], width=NULL)
)
vbox(vb)
})
output$vBoxesDark <- renderUI({
vb <- map(1:12, ~valueBox(
val[[.x]], text[[.x]],
icon=icon(list(src=files_black[.x], width="80px"), lib="local"),
color=clrs[.x], width=NULL)
)
vbox(vb)
})
output$distLight <- renderValueBox({
x <- "stat_icon_normal_dist_white.png"
valueBox("Data", "light image icon color",
icon=icon(list(src=x, width="80px"), lib="local"),
color="black", width=NULL)
})
output$distDark <- renderValueBox({
x <- "stat_icon_normal_dist_black.png"
valueBox("Data", "dark image icon color",
icon=icon(list(src=x, width="80px"), lib="local"),
color="aqua", width=NULL)
})
output$hist1 <- renderPlot({ hist(x) })
output$hist2 <- renderPlot({ hist(x) })
output$vals1 <- renderText({ del })
output$vals2 <- renderText({ del })
}
# Run the application
shinyApp(ui = ui, server = server)
|
# sentiment analysis
library(tidytext)
library(tidyr)
library(wordcloud)
library(ggplot2)
theme_set(theme_bw())
library(stringr)
library(forcats)
library(wordcloud)
# import data
mvll_tidy <- vargas_llosa() %>%
unnest_tokens(word, text)
mvll_tidy <- readRDS('mvll_tidy.rds') %>%
unnest_tokens(word, text)
mvll_sentiment <- mvll_tidy %>%
inner_join(get_sentiments("bing")) %>%
count(title, index = line %/% 80 , sentiment) %>%
spread(sentiment, n) %>%
mutate(sentiment = positive - negative)
ggplot(mvll_sentiment, aes(index, sentiment, fill=title)) +
geom_col() +
facet_wrap(~title, scales = "free_x")
s <- mvll_tidy %>%
filter(title == "Notebooks of Don Rigoberto" ) %>%
filter(!str_detect(word, "\u2019")) %>% # remove didn't, they're, etc.
anti_join(stop_words) %>%
count(word, sort = TRUE) %>%
with(wordcloud(word, n, max.words = 40))
word_counts <- mvll_tidy %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup
}
# Esto no iria
word_counts %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +
geom_col(show_legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
coord_flip()
# Word frequency
library(tidytext)
library(dplyr)
library(ggplot2)
source('./vargas_llosa.R')
tidy_mvll <- vargas_llosa() %>%
unnest_tokens(word, text)
book_words <- tidy_mvll %>%
count(title, word, sort = TRUE) %>%
ungroup %>%
bind_tf_idf(word, title, n)
plt <- book_words %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
grupo_1 <- libros$title[1:4]
plt %>%
filter(title %in% libros$title[10:13]) %>%
group_by(title) %>%
top_n(10) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill = title)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~title, ncol = 2, scales="free") +
coord_flip() | /src/sentiment.R | no_license | rlabuonora/mvll_nlp | R | false | false | 1,961 | r | # sentiment analysis
library(tidytext)
library(tidyr)
library(wordcloud)
library(ggplot2)
theme_set(theme_bw())
library(stringr)
library(forcats)
library(wordcloud)
# import data
mvll_tidy <- vargas_llosa() %>%
unnest_tokens(word, text)
mvll_tidy <- readRDS('mvll_tidy.rds') %>%
unnest_tokens(word, text)
mvll_sentiment <- mvll_tidy %>%
inner_join(get_sentiments("bing")) %>%
count(title, index = line %/% 80 , sentiment) %>%
spread(sentiment, n) %>%
mutate(sentiment = positive - negative)
ggplot(mvll_sentiment, aes(index, sentiment, fill=title)) +
geom_col() +
facet_wrap(~title, scales = "free_x")
s <- mvll_tidy %>%
filter(title == "Notebooks of Don Rigoberto" ) %>%
filter(!str_detect(word, "\u2019")) %>% # remove didn't, they're, etc.
anti_join(stop_words) %>%
count(word, sort = TRUE) %>%
with(wordcloud(word, n, max.words = 40))
word_counts <- mvll_tidy %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup
}
# Esto no iria
word_counts %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +
geom_col(show_legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
coord_flip()
# Word frequency
library(tidytext)
library(dplyr)
library(ggplot2)
source('./vargas_llosa.R')
tidy_mvll <- vargas_llosa() %>%
unnest_tokens(word, text)
book_words <- tidy_mvll %>%
count(title, word, sort = TRUE) %>%
ungroup %>%
bind_tf_idf(word, title, n)
plt <- book_words %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word))))
grupo_1 <- libros$title[1:4]
plt %>%
filter(title %in% libros$title[10:13]) %>%
group_by(title) %>%
top_n(10) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill = title)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~title, ncol = 2, scales="free") +
coord_flip() |
load.data <- function(){
setwd("/Users/North_Point/Dropbox/MOOC/Data_Science/Exploratory_Analysis/week1/New_Submission")
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
data
}
clean.data <- function(){
dat <- load.data()
dat$date <- as.Date(dat$Date, "%d/%m/%Y")
start_date = as.Date("2007-02-01", "%Y-%m-%d")
end_date = as.Date("2007-02-02", "%Y-%m-%d")
subdat <- subset(dat, date >= start_date & date <= end_date)
subdat$time <- paste(subdat$Date, subdat$Time)
subdat$time <- strptime(subdat$time, "%d/%m/%Y %H:%M:%S")
subdat <- subset(subdat, select = -c(Date, Time, date))
subdat
}
plot.1 <- function(){
dat <- clean.data()
par(mfrow = c(1,1))
with(dat, hist(Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
dev.copy(png, "plot1.png")
dev.off()
}
plot.2 <- function(){
dat <- clean.data()
par(mfrow = c(1,1))
with(dat, plot(time, Global_active_power, type = "l", ylab = "Global Active Power (killowatts)", xlab = " " ))
dev.copy(png, "plot2.png")
dev.off()
}
plot.3 <- function(){
dat <- clean.data()
par(mfrow = c(1,1))
with(dat, plot(time, Sub_metering_1, type = "n", ylab = "Energy sub metering"))
with(dat, lines(time, Sub_metering_1))
with(dat, lines(time, Sub_metering_2, col = "red"))
with(dat, lines(time, Sub_metering_3, col = "blue"))
legend("topright", pch = "_", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, "plot3.png")
dev.off()
}
plot.4 <- function(){
dat <- clean.data()
par(mfrow = c(2,2))
with(dat, plot(time, Global_active_power, type = "l", ylab = "Global Active Power", xlab = " " ))
with(dat, plot(time, Voltage, type = "l", ylab = "Voltage", xlab = "datetime" ))
with(dat, plot(time, Sub_metering_1, type = "n", ylab = "Energy sub metering"))
with(dat, lines(time, Sub_metering_1))
with(dat, lines(time, Sub_metering_2, col = "red"))
with(dat, lines(time, Sub_metering_3, col = "blue"))
legend("topright", bty = "n", pch = "_", cex = 0.75, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(dat, plot(time, Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime" ))
dev.copy(png, "plot4.png")
dev.off()
}
plot.2()
| /plot2.R | no_license | North-Point/ExData_Plotting1 | R | false | false | 2,659 | r | load.data <- function(){
setwd("/Users/North_Point/Dropbox/MOOC/Data_Science/Exploratory_Analysis/week1/New_Submission")
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
data
}
clean.data <- function(){
dat <- load.data()
dat$date <- as.Date(dat$Date, "%d/%m/%Y")
start_date = as.Date("2007-02-01", "%Y-%m-%d")
end_date = as.Date("2007-02-02", "%Y-%m-%d")
subdat <- subset(dat, date >= start_date & date <= end_date)
subdat$time <- paste(subdat$Date, subdat$Time)
subdat$time <- strptime(subdat$time, "%d/%m/%Y %H:%M:%S")
subdat <- subset(subdat, select = -c(Date, Time, date))
subdat
}
plot.1 <- function(){
dat <- clean.data()
par(mfrow = c(1,1))
with(dat, hist(Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
dev.copy(png, "plot1.png")
dev.off()
}
plot.2 <- function(){
dat <- clean.data()
par(mfrow = c(1,1))
with(dat, plot(time, Global_active_power, type = "l", ylab = "Global Active Power (killowatts)", xlab = " " ))
dev.copy(png, "plot2.png")
dev.off()
}
plot.3 <- function(){
dat <- clean.data()
par(mfrow = c(1,1))
with(dat, plot(time, Sub_metering_1, type = "n", ylab = "Energy sub metering"))
with(dat, lines(time, Sub_metering_1))
with(dat, lines(time, Sub_metering_2, col = "red"))
with(dat, lines(time, Sub_metering_3, col = "blue"))
legend("topright", pch = "_", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, "plot3.png")
dev.off()
}
plot.4 <- function(){
dat <- clean.data()
par(mfrow = c(2,2))
with(dat, plot(time, Global_active_power, type = "l", ylab = "Global Active Power", xlab = " " ))
with(dat, plot(time, Voltage, type = "l", ylab = "Voltage", xlab = "datetime" ))
with(dat, plot(time, Sub_metering_1, type = "n", ylab = "Energy sub metering"))
with(dat, lines(time, Sub_metering_1))
with(dat, lines(time, Sub_metering_2, col = "red"))
with(dat, lines(time, Sub_metering_3, col = "blue"))
legend("topright", bty = "n", pch = "_", cex = 0.75, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(dat, plot(time, Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime" ))
dev.copy(png, "plot4.png")
dev.off()
}
plot.2()
|
parSBL<-function(x, Samples, estim.sigma2, aAlpha, verbose=TRUE, ...)
{
setwd(x)
if (verbose)
cat("Creating SBL directory ...")
if (!"SBL"%in%dir())
system("mkdir SBL")
if (verbose)
cat("done \n")
if (missing(Samples))
Samples<-attr(x,"Samples")
if (length(Samples)>2)
stop(" 'Samples' must be the number of samples or a vector indicating the first and last sample")
if (length(Samples)==1)
Samples<-c(1,Samples)
if (verbose)
cat("Retrieving annotation data ...")
load("SBL/gen.info.Rdata")
if (verbose)
cat("done \n")
analize.i<-function(i,estim.sigma2, aAlpha, gen.info, verbose)
{
if (verbose)
cat(" Array #",i,"...")
load(paste("SBL/setupGADA",i,sep=""))
attr(temp,"gen.info")<-gen.info
step1<-SBL(temp, estim.sigma2=estim.sigma2, aAlpha=aAlpha, saveInfo=FALSE)
save(step1,file=paste("SBL/sbl",i,sep="" ),compress=TRUE)
if (verbose)
cat(" Array #",i,"...done \n")
}
if (verbose)
cat("Segmentation procedure for",Samples[2]-Samples[1]+1,"samples ... \n")
res<-plapply(Samples[1]:Samples[2],function(i) try(analize.i(i, estim.sigma2=estim.sigma2, aAlpha=aAlpha, gen.info=gen.info, verbose=verbose), TRUE))
if (verbose)
cat("Segmentation procedure for",Samples[2]-Samples[1]+1,"samples ...done \n")
error<-sum(unlist(lapply(res, function(x) inherits(x, "try-error"))))
if (error>0)
{
cat("WARNING!!! \n")
cat(" Segmentation procedure failed for",sum(error),"samples \n")
cat(" (type error to see what happened) \n")
error <<- res
}
}
| /R/parSBL.R | no_license | tf2/CNsolidate | R | false | false | 1,646 | r | parSBL<-function(x, Samples, estim.sigma2, aAlpha, verbose=TRUE, ...)
{
setwd(x)
if (verbose)
cat("Creating SBL directory ...")
if (!"SBL"%in%dir())
system("mkdir SBL")
if (verbose)
cat("done \n")
if (missing(Samples))
Samples<-attr(x,"Samples")
if (length(Samples)>2)
stop(" 'Samples' must be the number of samples or a vector indicating the first and last sample")
if (length(Samples)==1)
Samples<-c(1,Samples)
if (verbose)
cat("Retrieving annotation data ...")
load("SBL/gen.info.Rdata")
if (verbose)
cat("done \n")
analize.i<-function(i,estim.sigma2, aAlpha, gen.info, verbose)
{
if (verbose)
cat(" Array #",i,"...")
load(paste("SBL/setupGADA",i,sep=""))
attr(temp,"gen.info")<-gen.info
step1<-SBL(temp, estim.sigma2=estim.sigma2, aAlpha=aAlpha, saveInfo=FALSE)
save(step1,file=paste("SBL/sbl",i,sep="" ),compress=TRUE)
if (verbose)
cat(" Array #",i,"...done \n")
}
if (verbose)
cat("Segmentation procedure for",Samples[2]-Samples[1]+1,"samples ... \n")
res<-plapply(Samples[1]:Samples[2],function(i) try(analize.i(i, estim.sigma2=estim.sigma2, aAlpha=aAlpha, gen.info=gen.info, verbose=verbose), TRUE))
if (verbose)
cat("Segmentation procedure for",Samples[2]-Samples[1]+1,"samples ...done \n")
error<-sum(unlist(lapply(res, function(x) inherits(x, "try-error"))))
if (error>0)
{
cat("WARNING!!! \n")
cat(" Segmentation procedure failed for",sum(error),"samples \n")
cat(" (type error to see what happened) \n")
error <<- res
}
}
|
rforcecom.upsert <-
function(session, objectName, externalIdField, externalId, fields){
# Load packages
#if(!require(XML)){ install.packages("XML"); stop(!require(XML)) }
#if(!require(RCurl)){ install.packages("RCurl"); stop(!require(RCurl)) }
#if(!require(plyr)){ install.packages("plyr"); stop(!require(plyr)) }
# Create XML
xmlElem <- ""
for(i in 1:length(fields)){
fieldValue <- iconv(fields[i], from="", to="UTF-8")
xmlElem <- paste(xmlElem, "<", names(fields[i]), ">",fields[i] ,"</", names(fields[i]), ">",sep="")
}
xmlBody <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><root>", xmlElem, "</root>", sep="")
# Send records
h <- basicHeaderGatherer()
t <- basicTextGatherer()
endpointPath <- rforcecom.api.getExternalIdFieldEndpoint(session['apiVersion'], objectName, externalIdField, externalId)
URL <- paste(session['instanceURL'], endpointPath, sep="")
OAuthString <- paste("Bearer", session['sessionID'])
httpHeader <- c("Authorization"=OAuthString, "Accept"="application/xml", 'Content-Type'="application/xml")
resultSet <- curlPerform(url=URL, httpheader=httpHeader, headerfunction = h$update, writefunction = t$update, ssl.verifypeer=F, postfields=xmlBody, customrequest="PATCH")
# BEGIN DEBUG
if(exists("rforcecom.debug") && rforcecom.debug){ message(URL) }
if(exists("rforcecom.debug") && rforcecom.debug){ message(t$value()) }
# END DEBUG
# Parse XML
if(t$value() != ""){
x.root <- xmlRoot(xmlTreeParse(t$value(), asText=T))
# Check whether it success or not
errorcode <- NA
errormessage <- NA
try(errorcode <- iconv(xmlValue(x.root[['Error']][['errorCode']]), from="UTF-8", to=""), TRUE)
try(errormessage <- iconv(xmlValue(x.root[['Error']][['message']]), from="UTF-8", to=""), TRUE)
if(!is.na(errorcode) && !is.na(errormessage)){
stop(paste(errorcode, errormessage, sep=": "))
}
# Parse XML
xdf <- xmlToDataFrame(getNodeSet(xmlParse(t$value()),'//Result'))
return(xdf)
}
}
| /OldVersions/RForcecom-0.7/RForcecom/R/rforcecom.upsert.R | permissive | oristides/RForcecom | R | false | false | 1,967 | r | rforcecom.upsert <-
function(session, objectName, externalIdField, externalId, fields){
# Load packages
#if(!require(XML)){ install.packages("XML"); stop(!require(XML)) }
#if(!require(RCurl)){ install.packages("RCurl"); stop(!require(RCurl)) }
#if(!require(plyr)){ install.packages("plyr"); stop(!require(plyr)) }
# Create XML
xmlElem <- ""
for(i in 1:length(fields)){
fieldValue <- iconv(fields[i], from="", to="UTF-8")
xmlElem <- paste(xmlElem, "<", names(fields[i]), ">",fields[i] ,"</", names(fields[i]), ">",sep="")
}
xmlBody <- paste("<?xml version=\"1.0\" encoding=\"UTF-8\"?><root>", xmlElem, "</root>", sep="")
# Send records
h <- basicHeaderGatherer()
t <- basicTextGatherer()
endpointPath <- rforcecom.api.getExternalIdFieldEndpoint(session['apiVersion'], objectName, externalIdField, externalId)
URL <- paste(session['instanceURL'], endpointPath, sep="")
OAuthString <- paste("Bearer", session['sessionID'])
httpHeader <- c("Authorization"=OAuthString, "Accept"="application/xml", 'Content-Type'="application/xml")
resultSet <- curlPerform(url=URL, httpheader=httpHeader, headerfunction = h$update, writefunction = t$update, ssl.verifypeer=F, postfields=xmlBody, customrequest="PATCH")
# BEGIN DEBUG
if(exists("rforcecom.debug") && rforcecom.debug){ message(URL) }
if(exists("rforcecom.debug") && rforcecom.debug){ message(t$value()) }
# END DEBUG
# Parse XML
if(t$value() != ""){
x.root <- xmlRoot(xmlTreeParse(t$value(), asText=T))
# Check whether it success or not
errorcode <- NA
errormessage <- NA
try(errorcode <- iconv(xmlValue(x.root[['Error']][['errorCode']]), from="UTF-8", to=""), TRUE)
try(errormessage <- iconv(xmlValue(x.root[['Error']][['message']]), from="UTF-8", to=""), TRUE)
if(!is.na(errorcode) && !is.na(errormessage)){
stop(paste(errorcode, errormessage, sep=": "))
}
# Parse XML
xdf <- xmlToDataFrame(getNodeSet(xmlParse(t$value()),'//Result'))
return(xdf)
}
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <-function(inverse) i<<- inverse
getinverse <-function() i
list(set = set, get =get,setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data,...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | xiangyan0525/ProgrammingAssignment2 | R | false | false | 692 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <-function(inverse) i<<- inverse
getinverse <-function() i
list(set = set, get =get,setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data,...)
x$setinverse(i)
i
}
|
#' Creates a new audit_measure object
#'
#' This family of functions are used to create a new audit_measure
#' object.
#' Audit measure objects have two subclasses: audit_measure_continuous
#' and audit_measure_discrete. You can create both using the
#' audit_measure function (to allow us to create multiple objects at
#' once, for example from a script).
#'
#' Audit measure objects describe the rules which will be applied to
#' a cohort to derive a set of aggregated output measures.
#' The denominator for these measures will be the cohort that they
#' are applied over.
#'
#' Any exclusions from the cohort are specified in exclusions.
#' Each measure can have one or more numerators. This allows measures
#' to be described in groups or a breakdown of results into
#' categories.
#'
#' @param stem_name The name of the new audit measure
#' @param description A short textual description of what the audit
#' measure is intended to measure.
#' @param exclusions Any exclusions from the population expressed as
#' an expression.
#' @param numerators One or more numerators for the measure. This is
#' either the variable if we are looking at a continuous measure, or
#' the pass / fail criteria expressed as a boolean expression.
#' Results can be broken down by using a series of numerators.
#' @param numerator_descriptors A descriptor for each numerator. These
#' are optional but if you provide one you must provide one for every
#' numerator.
#' @param is_key_indicator Is this measure a key indicator?
#' @param reference_id A unique reference number for this audit
#' measure. Different audits use different reference schemes. We
#' don't use this reference for calculations, but it is included in
#' metadata and may be used for some of the outputs.
#' @param csv_columns a character vector of all the columns from the
#' raw CSV file needed to make this audit measure.
#' @param measure_type Describes the type of variable - whether it is
#' discrete (usually TRUE/FALSE), or continuous (where medians and
#' quartiles may be used to describe the output).
#'
#' @return The created audit_measure object.
#' @export
audit_measure <- function(stem_name,
description,
exclusions = NULL,
numerators,
numerator_descriptors = NULL,
new_numerators = NULL,
is_key_indicator = FALSE,
reference_id = NULL,
csv_columns = NULL,
measure_type = c("discrete",
"continuous")) {
if (!is.character(stem_name)) {
stop("stem_name must be a string")
}
if (is.character(exclusions)) {
exclusions <- rlang::parse_expr(exclusions)
}
if (!is.null(exclusions) & !rlang::is_expression(exclusions)) {
stop("exclusions must be either NULL or an expression")
}
if (!rlang::is_expression(numerators) & !rlang::is_list(numerators)) {
stop(glue::glue("numerators must be either an expression or a
list of expressions"))
}
if (!is.null(numerator_descriptors) &
( (rlang::is_expression(numerators) &
length(numerator_descriptors) != 1) |
(rlang::is_list(numerators) &
(length(numerators) != length(numerator_descriptors))))) {
stop(glue::glue("if numerator_descriptors are provided you must \\
provide one for each numerator: there are \\
{length(numerators)} numerators and \\
{length(numerator_descriptors)} numerator \\
descriptors."))
}
if (!tibble::is_tibble(new_numerators)) {
if (rlang::is_expression(new_numerators)) {
new_numerators = tibble::tribble(
~numerator, ~fun, ~descriptor,
stem_name, new_numerators, description)
} else {
stop("numerators must be either a tibble or an expression")
}
} else {
if (!all(c("numerator", "fun", "descriptor") %in%
names(new_numerators))) {
stop(glue::glue("numerators must contain the columns \\
numerator, fun and descriptor"))
}
}
if (!is.null(csv_columns) & !is.character(csv_columns)) {
stop("csv_columns must be either NULL or a character vector")
}
if (is.list(numerators)) {
names(numerators) <- glue::glue("{stem_name}{names(numerators)}")
} else {
numerators <- list(numerators)
names(numerators) <- stem_name
}
if (!is.character(description)) {
stop("description must be a character string")
}
if (!is.logical(is_key_indicator)) {
stop("is_key_indicator must be logical (TRUE or FALSE)")
}
if (length(measure_type) != 1) {
stop("measure_type must set to either 'discrete' or 'continuous'")
}
audit_measure_output <- rlang::list2(
"stem_name" = stem_name,
"exclusions" = exclusions,
"numerators" = numerators,
"numerator_descriptors" = numerator_descriptors,
"new_numerators" = new_numerators,
"is_key_indicator" = is_key_indicator,
"reference_id" = reference_id,
"csv_columns" = csv_columns,
"measure_type" = measure_type,
"description" = description)
if (measure_type == "discrete") {
class(audit_measure_output) <-
c("audit_measure_discrete", "audit_measure")
}
if (measure_type == "continuous") {
class(audit_measure_output) <-
c("audit_measure_continuous", "audit_measure")
}
return(audit_measure_output)
}
#' Test if the object is an audit_measure
#'
#' This function returns `TRUE` for audit_measures
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the `audit_measure`
#' class.
#' @export
is_audit_measure <- function(x) {
"audit_measure" %in% class(x)
}
#' @rdname is_audit_measure
#' @usage NULL
#' @export
is.audit_measure <- is_audit_measure
#' @rdname audit_measure
#' @export
audit_measure_continuous <- function(stem_name,
description,
exclusions = NULL,
numerators,
numerator_descriptors = NULL,
csv_columns = NULL) {
return(audit_measure(
stem_name = stem_name,
description = description,
exclusions = exclusions,
numerators = numerators,
numerator_descriptors = numerator_descriptors,
csv_columns = csv_columns,
measure_type = c("continuous")))
}
#' @rdname audit_measure
#' @export
audit_measure_discrete <- function(stem_name,
description,
exclusions = NULL,
numerators,
numerator_descriptors = NULL,
csv_columns = NULL) {
return(audit_measure(
stem_name = stem_name,
description = description,
exclusions = exclusions,
numerators = numerators,
numerator_descriptors = numerator_descriptors,
csv_columns = csv_columns,
measure_type = c("discrete")))
}
#' Create outputs from an audit_measure object
#'
#' \code{create_output} is a method to produce a set of instructions
#' to pass to \code{dplyr::summarise}, which is then applied over a
#' cohort to produce our cohort results. The instructions are passed
#' as \code{rlang::exprs}, which can be appended to the summary step
#' using the \code{rlang::!!!} operator.
#' Output columns have a consistent naming convention.
#'
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data:
#' \itemize{
#' \item \strong{median} {Only applicable to
#' \code{audit_measure_continuous} types. It calculates the median
#' and returns it in a column named with the \code{audit_measure}'s
#' stem name followed by Q2, e.g. \strong{myMeasureQ2}.
#' Multiple numerators are not supported by the median output type
#' and will generate an error. This may be fixed if there is a need
#' for it in future. Exclusion criteria are also not currently
#' handled - this is likely to be addressed in future.}
#' \item \strong{quartiles} {This option is similar to \code{median}
#' but in addition to generating Q2, it also creates the lower and
#' upper quartiles, suffixed by Q1 and Q3.}
#' }
#' @export
create_output <- function(x,
numerator = NULL,
output_type) {
UseMethod("create_output")
}
#' @export
create_output.default <- function(x,
numerator = NULL,
output_type) {
stop("Unknown class")
}
#' @rdname create_output
#' @export
create_output.audit_measure_continuous <- function(x,
numerator = NULL,
output_type) {
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
measure <- x
numerator_with_exclusions <- measure$numerators[[1]]
if (!is.null(measure$exclusions)) {
numerator_with_exclusions <- rlang::expr(dplyr::if_else(
!! measure$exclusions,
NA_integer_,
!! measure$numerators[[1]]
))
}
probs <- list("median" = 0.5,
"quartiles" = c(0.5, 0.25, 0.75))
indicator_name <- list("median" = "Q2",
"quartiles" = c("Q2", "Q1", "Q3"))
output <- purrr::map(probs[[output_type]], ~ rlang::expr(
stats::quantile(!! numerator_with_exclusions,
probs = !!.x,
na.rm = TRUE)))
names(output) <- glue::glue(
"{measure$stem_name}{indicator_name[[output_type]]}")
output
}
#' @rdname create_output
#' @export
create_output.audit_measure_discrete <- function(x,
numerator = NULL,
output_type) {
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
measure <- x
# If we specified a numerator then select only that one;
# otherwise provide all numerators (multiple numerators on one
# go not yet supported)
if (!is.null(numerator)) {
numerator_output <- measure$numerators[
glue::glue("{measure$stem_name}{numerator}")]
} else {
numerator_output <- measure$numerators
}
# If there are exclusion criteria add these to the denominator
# and any numerators. The numerator sums all rows where the
# measure is TRUE and not excluded.
if (is.null(measure$exclusions)) {
denom <- rlang::expr(dplyr::n())
nums <- purrr::map(numerator_output, ~ rlang::expr(
sum( (!!.x), na.rm = TRUE)))
} else {
denom <- rlang::expr(dplyr::n() - sum(!!measure$exclusions))
nums <- purrr::map(numerator_output, ~ rlang::expr(
sum( (!!.x) & !(!!measure$exclusions), na.rm = TRUE)))
}
# If we are outputting the denominator and numerator, set up
# the names for the output, and put the outputs into a vector.
# Set both these vectors as NULL initially so if we aren't
# using them we can just 'add null' without checking.
d_name <- NULL
n_names <- NULL
d_output <- NULL
n_outputs <- NULL
if (startsWith(output_type, "d_n_")) {
d_name <- glue::glue("{measure$stem_name}D")
n_names <- glue::glue("{names(numerator_output)}N")
d_output <- denom
n_outputs <- nums
d_sym <- rlang::sym(d_name)
pct_outputs <- lapply(rlang::syms(n_names),
function (x) rlang::expr(round(!!x / !!d_sym * 100, 1)))
} else {
pct_outputs <- lapply(nums,
function (x) rlang::expr(round(!!x / !!denom * 100, 1)))
}
# Build the percentage functions from the denominator and any
# number of numerator sections. Then name the list items.
output <- c(d_output, rbind(n_outputs, pct_outputs))
names(output) <- c(d_name,
rbind(n_names,
glue::glue("{names(numerator_output)}Pct")))
output
}
#' Create descriptions of an output from an audit_measure
#'
#' This function is a companion to create_output and is called by
#' create_output_tbl. Where create_output creates a set of expressions
#' for an audit_measure; create_descriptors outputs a string vector of
#' descriptors for that measure. These are used in audit outputs. The
#' function inputs miror those of create_output.
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data:
#' @export
create_descriptors <- function(x,
numerator = NULL,
output_type) {
UseMethod("create_descriptors")
}
#' @export
create_descriptors.default <- function(x,
numerator = NULL,
output_type) {
stop("Unknown class")
}
#' @rdname create_descriptors
#' @export
create_descriptors.audit_measure_continuous <- function(x,
numerator = NULL,
output_type) {
measure <- x
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
rows <- if (output_type == "quartiles") 3 else 1
descriptors <- vector("character", rows)
descriptors[[1]] <- if (!is.null(measure$numerator_descriptors[[1]])) {
measure$description
} else {
measure$stem_name
}
descriptors
}
#' @rdname create_descriptors
#' @export
create_descriptors.audit_measure_discrete <- function(x,
numerator = NULL,
output_type) {
measure <- x
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
d_n_rows <- if (startsWith(output_type, "d_n_")) 1 else 0
num_rows <- if (is.null(numerator)) {
length(measure$numerators)
} else 1
rows <- (1 + d_n_rows) * num_rows + d_n_rows
if (is.null(measure$numerator_descriptors)) {
return (rep(measure$stem_name, times = rows))
}
descriptors <- vector("character", rows)
if (is.null(numerator)) {
descriptors <- if (d_n_rows == 0) {
measure$numerator_descriptors
} else {
c(measure$description,
rbind(measure$numerator_descriptors, ""))
}
} else {
if (d_n_rows == 1) {
descriptors[1] <- measure$description
}
descriptors[d_n_rows + 1] <-
measure$numerator_descriptors[[
glue::glue("{measure$stem_name}{numerator}")]]
}
descriptors
}
#' Create categories of an output from an audit_measure
#'
#' This function is a companion to create_output and is called by
#' create_output_tbl. Where create_output creates a set of expressions
#' for an audit_measure; create_categories outputs a string vector of
#' the category type for that measure, allowing us to sort the
#' aggregated data once produced. Unlike the other class methods, an
#' extra parameter, category, must be supplied: this is the string
#' category name which will be assigned to all output rows.
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data.
#' @param category Name of the category as a string. This will be set
#' as a blank string if unspecified.
#' @export
create_categories <- function(x,
numerator = NULL,
output_type,
category = "") {
UseMethod("create_categories")
}
#' @export
create_categories.default <- function(x,
numerator = NULL,
output_type,
category = "") {
stop("Unknown class")
}
#' @rdname create_categories
#' @export
create_categories.audit_measure_continuous <- function(x,
numerator = NULL,
output_type,
category = "") {
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
rows <- if (output_type == "quartiles") 3 else 1
rep(category, times = rows)
}
#' @rdname create_categories
#' @export
create_categories.audit_measure_discrete <- function(x,
numerator = NULL,
output_type,
category = "") {
measure <- x
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
d_n_rows <- if (startsWith(output_type, "d_n_")) 1 else 0
num_rows <- if (is.null(numerator)) {
length(measure$numerators)
} else 1
rows <- (1 + d_n_rows) * num_rows + d_n_rows
rep(category, times = rows)
}
#' Create data type label for an output from an audit_measure
#'
#' This function is a companion to create_output and is called by
#' create_output_tbl. Where create_output creates a set of expressions
#' for an audit_measure; create_data_type_label creates a label which
#' describes what sort of data the row represents (ie. denominator,
#' numerator, median, percentage etc).
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data.
#' @export
create_data_type_label <- function(x,
numerator = NULL,
output_type) {
UseMethod("create_data_type_label")
}
#' @export
create_data_type_label.default <- function(x,
numerator = NULL,
output_type) {
stop("Unknown class")
}
#' @rdname create_data_type_label
#' @export
create_data_type_label.audit_measure_continuous <- function(x,
numerator = NULL,
output_type) {
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
rows <- if (output_type == "quartiles") {
return(c("Median", "Lower IQR", "Upper IQR"))
}
return("Median")
}
#' @rdname create_data_type_label
#' @export
create_data_type_label.audit_measure_discrete <- function(x,
numerator = NULL,
output_type) {
measure <- x
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
d_n_rows <- if (startsWith(output_type, "d_n_")) 1 else 0
field_labels <- if (d_n_rows == 1) c("n", "%") else "%"
num_rows <- if (is.null(numerator)) {
length(measure$numerators)
} else 1
descriptors <- rep(field_labels,
times = num_rows)
if (d_n_rows == 1) {
descriptors <- c("d", descriptors)
}
descriptors
}
#' Create a list of outputs from a table of audit_measure objects
#'
#' This function operates on a tibble containing the following
#' columns:
#' \itemize{
#' \item \strong{x} {The output measure as a quosure.}
#' \item \strong{numerator} {a numerator name if one is required.}
#' \item \strong{output_type} {The type of output required. For
#' continuous variables this should be \strong{median} or
#' \strong{quartiles}, for discrete ones \strong{pct} or
#' \strong{d_n_pct}.}
#' }
#' At present the tibble will need to be created in code because we
#' are referring to the quosure directly; in future though we will
#' use a list for all the measures and a name to refer to in the list
#' will be the column in the table.
#' @export
create_output_tbl <- function(outputs_table) {
tibble::tibble(
categories = unlist(purrr::pmap(
.f = create_categories,
.l = outputs_table)),
descriptors = unlist(purrr::pmap(
.f = create_descriptors,
.l = dplyr::select(outputs_table,
"x",
"numerator", "output_type"))),
data_types = unlist(purrr::pmap(
.f = create_data_type_label,
.l = dplyr::select(outputs_table,
"x",
"numerator", "output_type"))),
exprs = unlist(purrr::pmap(
.f = create_output,
.l = dplyr::select(outputs_table,
"x",
"numerator", "output_type")))
)
}
# This is experimental work to replace creation of the outputs table
# With more efficient / easier to follow code.
# First we pick which numerators we want.
# Then we recode the output type (so far we just use that
# to produce the 'units' but we can use case to recode to a list of
# units with their functions).
# We then separate the vector (we will need to unnest the list if we
# opt for a list format.
# As part of the review we should look to merge numerators and
# numerator_descriptors in audit_measure as a tibble (ultimately as
# class objects; so a single operation extracts them and removes the
# danger of having mismatched labels.
#' Create a list of outputs from a table of audit_measure objects
#' @export
new_output_table <- function(outputs_table) {
outputs_table$output_type <-
dplyr::recode(outputs_table$output_type,
d_n_pct = "d-n,%",
pct = "%",
quartiles = "Q1,Q2,Q3",
median = "Q2")
outputs_table <-
tidyr::separate_rows(outputs_table,
"output_type",
sep = "-")
outputs_table <-
tidyr::hoist(outputs_table, "x",
new_numerators = "new_numerators")
outputs_table <-
dplyr::mutate(outputs_table,
new_numerators = dplyr::if_else(
output_type != "d",
new_numerators,
NULL))
outputs_table <- dplyr::select(outputs_table, -"x")
outputs_table <- dplyr::rename(outputs_table,
"desired_numerator" = "numerator")
outputs_table <-
tidyr::unnest(outputs_table,
col = "new_numerators",
keep_empty = TRUE)
outputs_table <- dplyr::filter(outputs_table,
(numerator == desired_numerator) | (desired_numerator == "NULL")
| (fun == "NULL"))
outputs_table <- dplyr::select(outputs_table, -"desired_numerator")
# outputs_table$output_type <-
# dplyr::recode(outputs_table$output_type,
# d_n_pct = "d,n,%",
# pct = "%",
# quartiles = "Q1,Q2,Q3",
# median = "Q2")
outputs_table <-
tidyr::separate_rows(outputs_table,
"output_type",
sep = ",")
outputs_table <- dplyr::select(outputs_table,
"categories" = category,
"descriptors" = descriptor,
"output_type",
"exprs"= fun)
}
#' Given an outputs_table containing a list of aggregated data
#' outputs, produce a list of all the data columns (raw and modified)
#' needed to create those outputs.
#' @param outputs_table A table of audit outputs (previously
#' created using create_output_tbl())
#' @return A character vector of names of the data columns needed
#' to create the
csv_columns_from_output_table <- function(outputs_table) {
unique(
unlist(
purrr::map(.x = outputs_table[["x"]],
.f = "csv_columns")
))
}
| /R/audit_measure.R | no_license | md0u80c9/SSNAPStats | R | false | false | 25,927 | r | #' Creates a new audit_measure object
#'
#' This family of functions are used to create a new audit_measure
#' object.
#' Audit measure objects have two subclasses: audit_measure_continuous
#' and audit_measure_discrete. You can create both using the
#' audit_measure function (to allow us to create multiple objects at
#' once, for example from a script).
#'
#' Audit measure objects describe the rules which will be applied to
#' a cohort to derive a set of aggregated output measures.
#' The denominator for these measures will be the cohort that they
#' are applied over.
#'
#' Any exclusions from the cohort are specified in exclusions.
#' Each measure can have one or more numerators. This allows measures
#' to be described in groups or a breakdown of results into
#' categories.
#'
#' @param stem_name The name of the new audit measure
#' @param description A short textual description of what the audit
#' measure is intended to measure.
#' @param exclusions Any exclusions from the population expressed as
#' an expression.
#' @param numerators One or more numerators for the measure. This is
#' either the variable if we are looking at a continuous measure, or
#' the pass / fail criteria expressed as a boolean expression.
#' Results can be broken down by using a series of numerators.
#' @param numerator_descriptors A descriptor for each numerator. These
#' are optional but if you provide one you must provide one for every
#' numerator.
#' @param is_key_indicator Is this measure a key indicator?
#' @param reference_id A unique reference number for this audit
#' measure. Different audits use different reference schemes. We
#' don't use this reference for calculations, but it is included in
#' metadata and may be used for some of the outputs.
#' @param csv_columns a character vector of all the columns from the
#' raw CSV file needed to make this audit measure.
#' @param measure_type Describes the type of variable - whether it is
#' discrete (usually TRUE/FALSE), or continuous (where medians and
#' quartiles may be used to describe the output).
#'
#' @return The created audit_measure object.
#' @export
audit_measure <- function(stem_name,
description,
exclusions = NULL,
numerators,
numerator_descriptors = NULL,
new_numerators = NULL,
is_key_indicator = FALSE,
reference_id = NULL,
csv_columns = NULL,
measure_type = c("discrete",
"continuous")) {
if (!is.character(stem_name)) {
stop("stem_name must be a string")
}
if (is.character(exclusions)) {
exclusions <- rlang::parse_expr(exclusions)
}
if (!is.null(exclusions) & !rlang::is_expression(exclusions)) {
stop("exclusions must be either NULL or an expression")
}
if (!rlang::is_expression(numerators) & !rlang::is_list(numerators)) {
stop(glue::glue("numerators must be either an expression or a
list of expressions"))
}
if (!is.null(numerator_descriptors) &
( (rlang::is_expression(numerators) &
length(numerator_descriptors) != 1) |
(rlang::is_list(numerators) &
(length(numerators) != length(numerator_descriptors))))) {
stop(glue::glue("if numerator_descriptors are provided you must \\
provide one for each numerator: there are \\
{length(numerators)} numerators and \\
{length(numerator_descriptors)} numerator \\
descriptors."))
}
if (!tibble::is_tibble(new_numerators)) {
if (rlang::is_expression(new_numerators)) {
new_numerators = tibble::tribble(
~numerator, ~fun, ~descriptor,
stem_name, new_numerators, description)
} else {
stop("numerators must be either a tibble or an expression")
}
} else {
if (!all(c("numerator", "fun", "descriptor") %in%
names(new_numerators))) {
stop(glue::glue("numerators must contain the columns \\
numerator, fun and descriptor"))
}
}
if (!is.null(csv_columns) & !is.character(csv_columns)) {
stop("csv_columns must be either NULL or a character vector")
}
if (is.list(numerators)) {
names(numerators) <- glue::glue("{stem_name}{names(numerators)}")
} else {
numerators <- list(numerators)
names(numerators) <- stem_name
}
if (!is.character(description)) {
stop("description must be a character string")
}
if (!is.logical(is_key_indicator)) {
stop("is_key_indicator must be logical (TRUE or FALSE)")
}
if (length(measure_type) != 1) {
stop("measure_type must set to either 'discrete' or 'continuous'")
}
audit_measure_output <- rlang::list2(
"stem_name" = stem_name,
"exclusions" = exclusions,
"numerators" = numerators,
"numerator_descriptors" = numerator_descriptors,
"new_numerators" = new_numerators,
"is_key_indicator" = is_key_indicator,
"reference_id" = reference_id,
"csv_columns" = csv_columns,
"measure_type" = measure_type,
"description" = description)
if (measure_type == "discrete") {
class(audit_measure_output) <-
c("audit_measure_discrete", "audit_measure")
}
if (measure_type == "continuous") {
class(audit_measure_output) <-
c("audit_measure_continuous", "audit_measure")
}
return(audit_measure_output)
}
#' Test if the object is an audit_measure
#'
#' This function returns `TRUE` for audit_measures
#'
#' @param x An object
#' @return `TRUE` if the object inherits from the `audit_measure`
#' class.
#' @export
is_audit_measure <- function(x) {
"audit_measure" %in% class(x)
}
#' @rdname is_audit_measure
#' @usage NULL
#' @export
is.audit_measure <- is_audit_measure
#' @rdname audit_measure
#' @export
audit_measure_continuous <- function(stem_name,
description,
exclusions = NULL,
numerators,
numerator_descriptors = NULL,
csv_columns = NULL) {
return(audit_measure(
stem_name = stem_name,
description = description,
exclusions = exclusions,
numerators = numerators,
numerator_descriptors = numerator_descriptors,
csv_columns = csv_columns,
measure_type = c("continuous")))
}
#' @rdname audit_measure
#' @export
audit_measure_discrete <- function(stem_name,
description,
exclusions = NULL,
numerators,
numerator_descriptors = NULL,
csv_columns = NULL) {
return(audit_measure(
stem_name = stem_name,
description = description,
exclusions = exclusions,
numerators = numerators,
numerator_descriptors = numerator_descriptors,
csv_columns = csv_columns,
measure_type = c("discrete")))
}
#' Create outputs from an audit_measure object
#'
#' \code{create_output} is a method to produce a set of instructions
#' to pass to \code{dplyr::summarise}, which is then applied over a
#' cohort to produce our cohort results. The instructions are passed
#' as \code{rlang::exprs}, which can be appended to the summary step
#' using the \code{rlang::!!!} operator.
#' Output columns have a consistent naming convention.
#'
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data:
#' \itemize{
#' \item \strong{median} {Only applicable to
#' \code{audit_measure_continuous} types. It calculates the median
#' and returns it in a column named with the \code{audit_measure}'s
#' stem name followed by Q2, e.g. \strong{myMeasureQ2}.
#' Multiple numerators are not supported by the median output type
#' and will generate an error. This may be fixed if there is a need
#' for it in future. Exclusion criteria are also not currently
#' handled - this is likely to be addressed in future.}
#' \item \strong{quartiles} {This option is similar to \code{median}
#' but in addition to generating Q2, it also creates the lower and
#' upper quartiles, suffixed by Q1 and Q3.}
#' }
#' @export
create_output <- function(x,
numerator = NULL,
output_type) {
UseMethod("create_output")
}
#' @export
create_output.default <- function(x,
numerator = NULL,
output_type) {
stop("Unknown class")
}
#' @rdname create_output
#' @export
create_output.audit_measure_continuous <- function(x,
numerator = NULL,
output_type) {
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
measure <- x
numerator_with_exclusions <- measure$numerators[[1]]
if (!is.null(measure$exclusions)) {
numerator_with_exclusions <- rlang::expr(dplyr::if_else(
!! measure$exclusions,
NA_integer_,
!! measure$numerators[[1]]
))
}
probs <- list("median" = 0.5,
"quartiles" = c(0.5, 0.25, 0.75))
indicator_name <- list("median" = "Q2",
"quartiles" = c("Q2", "Q1", "Q3"))
output <- purrr::map(probs[[output_type]], ~ rlang::expr(
stats::quantile(!! numerator_with_exclusions,
probs = !!.x,
na.rm = TRUE)))
names(output) <- glue::glue(
"{measure$stem_name}{indicator_name[[output_type]]}")
output
}
#' @rdname create_output
#' @export
create_output.audit_measure_discrete <- function(x,
numerator = NULL,
output_type) {
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
measure <- x
# If we specified a numerator then select only that one;
# otherwise provide all numerators (multiple numerators on one
# go not yet supported)
if (!is.null(numerator)) {
numerator_output <- measure$numerators[
glue::glue("{measure$stem_name}{numerator}")]
} else {
numerator_output <- measure$numerators
}
# If there are exclusion criteria add these to the denominator
# and any numerators. The numerator sums all rows where the
# measure is TRUE and not excluded.
if (is.null(measure$exclusions)) {
denom <- rlang::expr(dplyr::n())
nums <- purrr::map(numerator_output, ~ rlang::expr(
sum( (!!.x), na.rm = TRUE)))
} else {
denom <- rlang::expr(dplyr::n() - sum(!!measure$exclusions))
nums <- purrr::map(numerator_output, ~ rlang::expr(
sum( (!!.x) & !(!!measure$exclusions), na.rm = TRUE)))
}
# If we are outputting the denominator and numerator, set up
# the names for the output, and put the outputs into a vector.
# Set both these vectors as NULL initially so if we aren't
# using them we can just 'add null' without checking.
d_name <- NULL
n_names <- NULL
d_output <- NULL
n_outputs <- NULL
if (startsWith(output_type, "d_n_")) {
d_name <- glue::glue("{measure$stem_name}D")
n_names <- glue::glue("{names(numerator_output)}N")
d_output <- denom
n_outputs <- nums
d_sym <- rlang::sym(d_name)
pct_outputs <- lapply(rlang::syms(n_names),
function (x) rlang::expr(round(!!x / !!d_sym * 100, 1)))
} else {
pct_outputs <- lapply(nums,
function (x) rlang::expr(round(!!x / !!denom * 100, 1)))
}
# Build the percentage functions from the denominator and any
# number of numerator sections. Then name the list items.
output <- c(d_output, rbind(n_outputs, pct_outputs))
names(output) <- c(d_name,
rbind(n_names,
glue::glue("{names(numerator_output)}Pct")))
output
}
#' Create descriptions of an output from an audit_measure
#'
#' This function is a companion to create_output and is called by
#' create_output_tbl. Where create_output creates a set of expressions
#' for an audit_measure; create_descriptors outputs a string vector of
#' descriptors for that measure. These are used in audit outputs. The
#' function inputs miror those of create_output.
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data:
#' @export
create_descriptors <- function(x,
numerator = NULL,
output_type) {
UseMethod("create_descriptors")
}
#' @export
create_descriptors.default <- function(x,
numerator = NULL,
output_type) {
stop("Unknown class")
}
#' @rdname create_descriptors
#' @export
create_descriptors.audit_measure_continuous <- function(x,
numerator = NULL,
output_type) {
measure <- x
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
rows <- if (output_type == "quartiles") 3 else 1
descriptors <- vector("character", rows)
descriptors[[1]] <- if (!is.null(measure$numerator_descriptors[[1]])) {
measure$description
} else {
measure$stem_name
}
descriptors
}
#' @rdname create_descriptors
#' @export
create_descriptors.audit_measure_discrete <- function(x,
numerator = NULL,
output_type) {
measure <- x
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
d_n_rows <- if (startsWith(output_type, "d_n_")) 1 else 0
num_rows <- if (is.null(numerator)) {
length(measure$numerators)
} else 1
rows <- (1 + d_n_rows) * num_rows + d_n_rows
if (is.null(measure$numerator_descriptors)) {
return (rep(measure$stem_name, times = rows))
}
descriptors <- vector("character", rows)
if (is.null(numerator)) {
descriptors <- if (d_n_rows == 0) {
measure$numerator_descriptors
} else {
c(measure$description,
rbind(measure$numerator_descriptors, ""))
}
} else {
if (d_n_rows == 1) {
descriptors[1] <- measure$description
}
descriptors[d_n_rows + 1] <-
measure$numerator_descriptors[[
glue::glue("{measure$stem_name}{numerator}")]]
}
descriptors
}
#' Create categories of an output from an audit_measure
#'
#' This function is a companion to create_output and is called by
#' create_output_tbl. Where create_output creates a set of expressions
#' for an audit_measure; create_categories outputs a string vector of
#' the category type for that measure, allowing us to sort the
#' aggregated data once produced. Unlike the other class methods, an
#' extra parameter, category, must be supplied: this is the string
#' category name which will be assigned to all output rows.
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data.
#' @param category Name of the category as a string. This will be set
#' as a blank string if unspecified.
#' @export
create_categories <- function(x,
numerator = NULL,
output_type,
category = "") {
UseMethod("create_categories")
}
#' @export
create_categories.default <- function(x,
numerator = NULL,
output_type,
category = "") {
stop("Unknown class")
}
#' @rdname create_categories
#' @export
create_categories.audit_measure_continuous <- function(x,
numerator = NULL,
output_type,
category = "") {
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
rows <- if (output_type == "quartiles") 3 else 1
rep(category, times = rows)
}
#' @rdname create_categories
#' @export
create_categories.audit_measure_discrete <- function(x,
numerator = NULL,
output_type,
category = "") {
measure <- x
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
d_n_rows <- if (startsWith(output_type, "d_n_")) 1 else 0
num_rows <- if (is.null(numerator)) {
length(measure$numerators)
} else 1
rows <- (1 + d_n_rows) * num_rows + d_n_rows
rep(category, times = rows)
}
#' Create data type label for an output from an audit_measure
#'
#' This function is a companion to create_output and is called by
#' create_output_tbl. Where create_output creates a set of expressions
#' for an audit_measure; create_data_type_label creates a label which
#' describes what sort of data the row represents (ie. denominator,
#' numerator, median, percentage etc).
#' @param x An \code{audit_measure} object.
#' @param numerator If the measure has more than one numerator and
#' only one of the numerators is required, this should be set to the
#' name of the numerator. If only one numerator is specified in the
#' measure, or all numerators are to be used, then this should be
#' omitted (or set to NULL).
#' @param output_type Determines which sort of aggregated results
#' are being requested from the data.
#' @export
create_data_type_label <- function(x,
numerator = NULL,
output_type) {
UseMethod("create_data_type_label")
}
#' @export
create_data_type_label.default <- function(x,
numerator = NULL,
output_type) {
stop("Unknown class")
}
#' @rdname create_data_type_label
#' @export
create_data_type_label.audit_measure_continuous <- function(x,
numerator = NULL,
output_type) {
available_output_types <- c("median", "quartiles")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a continuous variable must be either median or
quartiles"))
}
rows <- if (output_type == "quartiles") {
return(c("Median", "Lower IQR", "Upper IQR"))
}
return("Median")
}
#' @rdname create_data_type_label
#' @export
create_data_type_label.audit_measure_discrete <- function(x,
numerator = NULL,
output_type) {
measure <- x
available_output_types <- c("d_n_pct",
"d_n_percentage",
"pct",
"percentage")
if (!output_type %in% available_output_types) {
stop(glue::glue(
"output type for a discrete variable must be either pct or
d_n_pct"))
}
d_n_rows <- if (startsWith(output_type, "d_n_")) 1 else 0
field_labels <- if (d_n_rows == 1) c("n", "%") else "%"
num_rows <- if (is.null(numerator)) {
length(measure$numerators)
} else 1
descriptors <- rep(field_labels,
times = num_rows)
if (d_n_rows == 1) {
descriptors <- c("d", descriptors)
}
descriptors
}
#' Create a list of outputs from a table of audit_measure objects
#'
#' This function operates on a tibble containing the following
#' columns:
#' \itemize{
#' \item \strong{x} {The output measure as a quosure.}
#' \item \strong{numerator} {a numerator name if one is required.}
#' \item \strong{output_type} {The type of output required. For
#' continuous variables this should be \strong{median} or
#' \strong{quartiles}, for discrete ones \strong{pct} or
#' \strong{d_n_pct}.}
#' }
#' At present the tibble will need to be created in code because we
#' are referring to the quosure directly; in future though we will
#' use a list for all the measures and a name to refer to in the list
#' will be the column in the table.
#' @export
create_output_tbl <- function(outputs_table) {
tibble::tibble(
categories = unlist(purrr::pmap(
.f = create_categories,
.l = outputs_table)),
descriptors = unlist(purrr::pmap(
.f = create_descriptors,
.l = dplyr::select(outputs_table,
"x",
"numerator", "output_type"))),
data_types = unlist(purrr::pmap(
.f = create_data_type_label,
.l = dplyr::select(outputs_table,
"x",
"numerator", "output_type"))),
exprs = unlist(purrr::pmap(
.f = create_output,
.l = dplyr::select(outputs_table,
"x",
"numerator", "output_type")))
)
}
# This is experimental work to replace creation of the outputs table
# With more efficient / easier to follow code.
# First we pick which numerators we want.
# Then we recode the output type (so far we just use that
# to produce the 'units' but we can use case to recode to a list of
# units with their functions).
# We then separate the vector (we will need to unnest the list if we
# opt for a list format.
# As part of the review we should look to merge numerators and
# numerator_descriptors in audit_measure as a tibble (ultimately as
# class objects; so a single operation extracts them and removes the
# danger of having mismatched labels.
#' Create a list of outputs from a table of audit_measure objects
#' @export
new_output_table <- function(outputs_table) {
outputs_table$output_type <-
dplyr::recode(outputs_table$output_type,
d_n_pct = "d-n,%",
pct = "%",
quartiles = "Q1,Q2,Q3",
median = "Q2")
outputs_table <-
tidyr::separate_rows(outputs_table,
"output_type",
sep = "-")
outputs_table <-
tidyr::hoist(outputs_table, "x",
new_numerators = "new_numerators")
outputs_table <-
dplyr::mutate(outputs_table,
new_numerators = dplyr::if_else(
output_type != "d",
new_numerators,
NULL))
outputs_table <- dplyr::select(outputs_table, -"x")
outputs_table <- dplyr::rename(outputs_table,
"desired_numerator" = "numerator")
outputs_table <-
tidyr::unnest(outputs_table,
col = "new_numerators",
keep_empty = TRUE)
outputs_table <- dplyr::filter(outputs_table,
(numerator == desired_numerator) | (desired_numerator == "NULL")
| (fun == "NULL"))
outputs_table <- dplyr::select(outputs_table, -"desired_numerator")
# outputs_table$output_type <-
# dplyr::recode(outputs_table$output_type,
# d_n_pct = "d,n,%",
# pct = "%",
# quartiles = "Q1,Q2,Q3",
# median = "Q2")
outputs_table <-
tidyr::separate_rows(outputs_table,
"output_type",
sep = ",")
outputs_table <- dplyr::select(outputs_table,
"categories" = category,
"descriptors" = descriptor,
"output_type",
"exprs"= fun)
}
#' Given an outputs_table containing a list of aggregated data
#' outputs, produce a list of all the data columns (raw and modified)
#' needed to create those outputs.
#' @param outputs_table A table of audit outputs (previously
#' created using create_output_tbl())
#' @return A character vector of names of the data columns needed
#' to create the
csv_columns_from_output_table <- function(outputs_table) {
unique(
unlist(
purrr::map(.x = outputs_table[["x"]],
.f = "csv_columns")
))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hash_sentiment_emojis.R
\docType{data}
\name{hash_emojis_identifier}
\alias{hash_emojis_identifier}
\title{Emoji Identifier Lookup Table}
\format{
A data frame with 734 rows and 2 variables
}
\usage{
data(hash_emojis_identifier)
}
\description{
A dataset containing ASCII byte code representation of emojis and their
accompanying identifier (for use in the \pkg{textclean} or \pkg{sentimentr}
packages).
}
\details{
\itemize{
\item x. Byte code representation of emojis
\item y. Emoji description
}
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1991-2018 Unicode, Inc. All rights reserved.
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Unicode data files and any associated documentation
(the "Data Files") or Unicode software and any associated documentation
(the "Software") to deal in the Data Files or Software
without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, and/or sell copies of
the Data Files or Software, and to permit persons to whom the Data Files
or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies
of the Data Files or Software, or
(b) this copyright and permission notice appear in associated
Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.
}
\references{
\url{http://www.unicode.org/emoji/charts/full-emoji-list.html}
}
\keyword{datasets}
| /man/hash_emojis_identifier.Rd | no_license | Mpellet771/lexicon | R | false | true | 2,425 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hash_sentiment_emojis.R
\docType{data}
\name{hash_emojis_identifier}
\alias{hash_emojis_identifier}
\title{Emoji Identifier Lookup Table}
\format{
A data frame with 734 rows and 2 variables
}
\usage{
data(hash_emojis_identifier)
}
\description{
A dataset containing ASCII byte code representation of emojis and their
accompanying identifier (for use in the \pkg{textclean} or \pkg{sentimentr}
packages).
}
\details{
\itemize{
\item x. Byte code representation of emojis
\item y. Emoji description
}
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1991-2018 Unicode, Inc. All rights reserved.
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
Permission is hereby granted, free of charge, to any person obtaining
a copy of the Unicode data files and any associated documentation
(the "Data Files") or Unicode software and any associated documentation
(the "Software") to deal in the Data Files or Software
without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, and/or sell copies of
the Data Files or Software, and to permit persons to whom the Data Files
or Software are furnished to do so, provided that either
(a) this copyright and permission notice appear with all copies
of the Data Files or Software, or
(b) this copyright and permission notice appear in associated
Documentation.
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.
}
\references{
\url{http://www.unicode.org/emoji/charts/full-emoji-list.html}
}
\keyword{datasets}
|
## download the raw data
url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(url = url, 'plotting_data.zip')
## unzip and save
unzip('plotting_data.zip')
### read into a dataframe
dat <- read.csv('household_power_consumption.txt', sep = ';',
header = TRUE, nrow= 1E5, stringsAsFactors = FALSE )
dat$Datetime <- paste(dat$Date, dat$Time, sep = ' ')
dat$Datetime <- strptime(dat$Datetime, '%d/%m/%Y %H:%M:%S')
### convert time columns
dat$Date <- as.Date(dat$Date, '%d/%m/%Y')
## filter to only the relevant records for the assignment
#### otherwise the size of the data frame is too large
dat <- dat[dat$Date %in% c(as.Date('2007-02-01'),as.Date('2007-02-02')),]
dat$Time <- format(strptime(dat$Time, '%H:%M:%S'),'%H:%M:%S')
dat$Global_active_power <- as.numeric(dat$Global_active_power)
dat$Global_reactive_power <- as.numeric(dat$Global_reactive_power)
dat$Voltage <- as.numeric(dat$Voltage)
dat$Global_intensity <- as.numeric(dat$Global_intensity)
dat$Sub_metering_1 <- as.numeric(dat$Sub_metering_1)
dat$Sub_metering_2 <- as.numeric(dat$Sub_metering_2)
dat$Sub_metering_3 <- as.numeric(dat$Sub_metering_3)
## Plot #1
png('plot1.png', width = 480, height = 480)
hist(dat$Global_active_power, col = 'red',
main = 'Global Active Power', xlab = "Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | komoroka/ExData_Plotting1 | R | false | false | 1,386 | r | ## download the raw data
url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(url = url, 'plotting_data.zip')
## unzip and save
unzip('plotting_data.zip')
### read into a dataframe
dat <- read.csv('household_power_consumption.txt', sep = ';',
header = TRUE, nrow= 1E5, stringsAsFactors = FALSE )
dat$Datetime <- paste(dat$Date, dat$Time, sep = ' ')
dat$Datetime <- strptime(dat$Datetime, '%d/%m/%Y %H:%M:%S')
### convert time columns
dat$Date <- as.Date(dat$Date, '%d/%m/%Y')
## filter to only the relevant records for the assignment
#### otherwise the size of the data frame is too large
dat <- dat[dat$Date %in% c(as.Date('2007-02-01'),as.Date('2007-02-02')),]
dat$Time <- format(strptime(dat$Time, '%H:%M:%S'),'%H:%M:%S')
dat$Global_active_power <- as.numeric(dat$Global_active_power)
dat$Global_reactive_power <- as.numeric(dat$Global_reactive_power)
dat$Voltage <- as.numeric(dat$Voltage)
dat$Global_intensity <- as.numeric(dat$Global_intensity)
dat$Sub_metering_1 <- as.numeric(dat$Sub_metering_1)
dat$Sub_metering_2 <- as.numeric(dat$Sub_metering_2)
dat$Sub_metering_3 <- as.numeric(dat$Sub_metering_3)
## Plot #1
png('plot1.png', width = 480, height = 480)
hist(dat$Global_active_power, col = 'red',
main = 'Global Active Power', xlab = "Global Active Power (kilowatts)")
dev.off()
|
#For PMCIDs w/o DOI (or for which doi did not yield date) in CORD19 dataset (note specific version)
#get publication data from EuropePMC
#create matching table to integrate in CORD19 instance used in ASReview
#info on package #europepmc:
#https://github.com/ropensci/europepmc
#https://cran.r-project.org/web/packages/europepmc/europepmc.pdf
#info on EuropePMC API
#https://europepmc.org/RestfulWebService
#install.packages("tidyverse")
#install.packages("europepmc")
library(tidyverse)
library(europepmc)
# define function to query EuropePMC API for PMCIDs
getEuropePMC <- function(pmcid){
query <- paste0("pmcid:",pmcid)
res <- epmc_search(query)
return(res)
}
#add progress bar
getEuropePMC_progress <- function(pmcid){
pb$tick()$print()
result <- getEuropePMC(pmcid)
return(result)
}
#define function to format date
#NB include doi and PMID for potential future addition to dataset
extractData <- function(x){
res <- x %>%
select(pmcid, pmid, doi, firstPublicationDate) %>%
mutate(created = as.Date(firstPublicationDate)) %>%
select(-firstPublicationDate)
return(res)
}
#define function to add date to id list
#remove doi, pmid columns b/c not needed here
joinDate <- function(x,y){
y <- y %>%
select(pmcid, created)
res <- x %>%
left_join(y, by = "pmcid")
return(res)
}
#define function to merge date columns (doi and pmcid results)
mergeDate <- function(x){
res <- x %>%
mutate(date = case_when(
is.na(date) ~ created,
!is.na(date) ~ date)) %>%
select(-created)
return(res)
}
#----------------------------------------------------
# read file with article IDs + dates (for dois) for CORD19
filename <- "CORD19v5_R/output/CORD19id_date.csv"
CORD19id_date <- read_csv(filename, col_types = cols(pmcid = col_character(),
pubmed_id = col_character()))
#extract PMCIDs for records without date, as character vector
#can note down numbers in this step
pmcid_list <- CORD19id_date %>%
filter(is.na(date)) %>%
filter(!is.na(pmcid)) %>%
pull(pmcid) %>%
unique()
#740 records without (proper) date
#255 with PMCID
#for testing
#pmcid_list <- pmcid_list %>%
# head(100)
#set parameter for progress bar
pb <- progress_estimated(length(pmcid_list))
# get data from EuropePMC
# app 1 min/100 DOIS, progress bar shown
pmcid_date <- map_dfr(pmcid_list, getEuropePMC_progress)
# NB this gives an message for each result - find a way to suppress them
#extract data and format date
pmcid_date <- extractData(pmcid_date)
#255 records retrieved from EuropePMC
filename2 <- "CORD19v5_R/data/pmcid_date.csv"
# write to csv for later use in matching
write_csv(pmcid_date, filename2)
# read file for processing at later time
#pmcid_date2 <- read_csv(filename2, col_types = cols(pmcid = col_character()))
#join dates to list of ids
CORD19id_date <- joinDate(CORD19id_date, pmcid_date)
#merge dates for doi and pmcid
CORD19id_date <- mergeDate(CORD19id_date)
#check still missing dates
#count <- CORD19id_date %>%
# filter(is.na(date))
filename3 <- "CORD19v5_R/output/CORD19id_date_v5.csv"
# write to csv
write_csv(CORD19id_date, filename3)
# read file for processing at later time
#CORD19id_date <- read_csv(filename3, col_types = cols(pmcid = col_character(),
# pubmed_id = col_character()))
| /Cord19_dataset/CORD19v5_R/02_CORD19_match_PMCID.R | permissive | GerbrichFerdinands/asreview-covid19 | R | false | false | 3,474 | r | #For PMCIDs w/o DOI (or for which doi did not yield date) in CORD19 dataset (note specific version)
#get publication data from EuropePMC
#create matching table to integrate in CORD19 instance used in ASReview
#info on package #europepmc:
#https://github.com/ropensci/europepmc
#https://cran.r-project.org/web/packages/europepmc/europepmc.pdf
#info on EuropePMC API
#https://europepmc.org/RestfulWebService
#install.packages("tidyverse")
#install.packages("europepmc")
library(tidyverse)
library(europepmc)
# define function to query EuropePMC API for PMCIDs
getEuropePMC <- function(pmcid){
query <- paste0("pmcid:",pmcid)
res <- epmc_search(query)
return(res)
}
#add progress bar
getEuropePMC_progress <- function(pmcid){
pb$tick()$print()
result <- getEuropePMC(pmcid)
return(result)
}
#define function to format date
#NB include doi and PMID for potential future addition to dataset
extractData <- function(x){
res <- x %>%
select(pmcid, pmid, doi, firstPublicationDate) %>%
mutate(created = as.Date(firstPublicationDate)) %>%
select(-firstPublicationDate)
return(res)
}
#define function to add date to id list
#remove doi, pmid columns b/c not needed here
joinDate <- function(x,y){
y <- y %>%
select(pmcid, created)
res <- x %>%
left_join(y, by = "pmcid")
return(res)
}
#define function to merge date columns (doi and pmcid results)
mergeDate <- function(x){
res <- x %>%
mutate(date = case_when(
is.na(date) ~ created,
!is.na(date) ~ date)) %>%
select(-created)
return(res)
}
#----------------------------------------------------
# read file with article IDs + dates (for dois) for CORD19
filename <- "CORD19v5_R/output/CORD19id_date.csv"
CORD19id_date <- read_csv(filename, col_types = cols(pmcid = col_character(),
pubmed_id = col_character()))
#extract PMCIDs for records without date, as character vector
#can note down numbers in this step
pmcid_list <- CORD19id_date %>%
filter(is.na(date)) %>%
filter(!is.na(pmcid)) %>%
pull(pmcid) %>%
unique()
#740 records without (proper) date
#255 with PMCID
#for testing
#pmcid_list <- pmcid_list %>%
# head(100)
#set parameter for progress bar
pb <- progress_estimated(length(pmcid_list))
# get data from EuropePMC
# app 1 min/100 DOIS, progress bar shown
pmcid_date <- map_dfr(pmcid_list, getEuropePMC_progress)
# NB this gives an message for each result - find a way to suppress them
#extract data and format date
pmcid_date <- extractData(pmcid_date)
#255 records retrieved from EuropePMC
filename2 <- "CORD19v5_R/data/pmcid_date.csv"
# write to csv for later use in matching
write_csv(pmcid_date, filename2)
# read file for processing at later time
#pmcid_date2 <- read_csv(filename2, col_types = cols(pmcid = col_character()))
#join dates to list of ids
CORD19id_date <- joinDate(CORD19id_date, pmcid_date)
#merge dates for doi and pmcid
CORD19id_date <- mergeDate(CORD19id_date)
#check still missing dates
#count <- CORD19id_date %>%
# filter(is.na(date))
filename3 <- "CORD19v5_R/output/CORD19id_date_v5.csv"
# write to csv
write_csv(CORD19id_date, filename3)
# read file for processing at later time
#CORD19id_date <- read_csv(filename3, col_types = cols(pmcid = col_character(),
# pubmed_id = col_character()))
|
building <- function(X1Y, ny, model) {
if (model == "logistic") {
Xa <- as.data.frame(X1Y[,10:18])
YY <- as.numeric(X1Y[,ny])
#if (max(YY) != 1) {stop("response must be logistic")}
model <- glm(YY ~ ., data = cbind.data.frame(YY, Xa), family = "binomial")
} else if (model == "pls") {
pena <- ppls::penalized.pls.cv(as.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), lambda = seq(-1000, 1000, 1), k=10, scale = T)
lambda <- pena$lambda.opt
ncomp <- pena$ncomp.opt
model <- ppls::penalized.pls.cv(as.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), lambda = lambda, ncomp = ncomp, k=10)
} else if (model == "lasso") {
lambda <- best.lambda(X1Y, ny=ny, alpha = 1)
model = glmnet::glmnet(data.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), alpha = 1, lambda = lambda)
} else if (model == "ridge") {
lambda <- best.lambda(X1Y, ny=ny, alpha = 0)
model = glmnet::glmnet(data.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), alpha = 0, lambda = lambda)
}else if (model == "b_ppls") {
pena <- ppls::ppls.splines.cv(as.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), lambda = seq(-1000, 1000, 10), k=10, scale = T, reduce.knots= TRUE)
lambda <- pena$lambda.opt
ncomp <- pena$ncomp.opt
dummy <- ppls::X2s(as.matrix(X1Y[,10:18]), reduce.knots = TRUE)
P <- ppls::Penalty.matrix(m = ncol(dummy$Z))
model <- ppls::penalized.pls.cv(as.matrix(dummy$Z), P = P, ncomp = ncomp, lambda = lambda, as.numeric(X1Y[,ny]), k=10)
}
return(model)
} | /CCC_method/R/building.R | no_license | lucanard/CCC | R | false | false | 1,509 | r | building <- function(X1Y, ny, model) {
if (model == "logistic") {
Xa <- as.data.frame(X1Y[,10:18])
YY <- as.numeric(X1Y[,ny])
#if (max(YY) != 1) {stop("response must be logistic")}
model <- glm(YY ~ ., data = cbind.data.frame(YY, Xa), family = "binomial")
} else if (model == "pls") {
pena <- ppls::penalized.pls.cv(as.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), lambda = seq(-1000, 1000, 1), k=10, scale = T)
lambda <- pena$lambda.opt
ncomp <- pena$ncomp.opt
model <- ppls::penalized.pls.cv(as.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), lambda = lambda, ncomp = ncomp, k=10)
} else if (model == "lasso") {
lambda <- best.lambda(X1Y, ny=ny, alpha = 1)
model = glmnet::glmnet(data.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), alpha = 1, lambda = lambda)
} else if (model == "ridge") {
lambda <- best.lambda(X1Y, ny=ny, alpha = 0)
model = glmnet::glmnet(data.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), alpha = 0, lambda = lambda)
}else if (model == "b_ppls") {
pena <- ppls::ppls.splines.cv(as.matrix(X1Y[,10:18]), as.numeric(X1Y[,ny]), lambda = seq(-1000, 1000, 10), k=10, scale = T, reduce.knots= TRUE)
lambda <- pena$lambda.opt
ncomp <- pena$ncomp.opt
dummy <- ppls::X2s(as.matrix(X1Y[,10:18]), reduce.knots = TRUE)
P <- ppls::Penalty.matrix(m = ncol(dummy$Z))
model <- ppls::penalized.pls.cv(as.matrix(dummy$Z), P = P, ncomp = ncomp, lambda = lambda, as.numeric(X1Y[,ny]), k=10)
}
return(model)
} |
##
# johaGL 2021
# Get seu object with only Sat cells (for Dr. Brun)
# (note: myonuclei excluded only for graphics)
##
library(ggplot2)
library(tidyverse)
library(Seurat)
library(scales)
library(viridis)
library(Matrix)
library(reticulate)
library(monocle)
library(patchwork)
library(cowplot)
library(RColorBrewer)
library(inlmisc)
prloc = "~/INMG_SingleCell/"
datadir = "data/Oprescu/"
rdsdir= "rds/OprescuTimePoints/"
resu = "results/OprescuTimePoints/"
sat.opr = "sat_seu_2021.rds"
CELLTYPEcol = "celltype" # the name I give to this metadata column
REDUC = "umap" #if desired use "tsne" ==> is FIt-SNE indeed !!
setwd(prloc)
print(" markers-Clusters were verified by the boss")
seu <- readRDS(paste0(rdsdir,"muscpostSEU.rds"))
print("visualizing initial DimPlots : calculated clusters, plus markers")
plotsNums = list(); clusby=c(CELLTYPEcol, "seurat_clusters")
plotsNums = lapply(clusby, function(y){
p = DimPlot(seu, reduction = REDUC, group.by = y,
label=T, repel=T, label.size = 3 ) })
plot_grid(plotlist=plotsNums)
markerstab = read.table(paste0(resu,"tablesMarkersSubPops/musc_checkMarkersAndClusters.txt"),
header = T,sep='\t')
markerstab$cluster = as.factor(markerstab$cluster)
tmpdf = data.frame(numclus= seu@meta.data$seurat_clusters)
tmpdf = left_join(tmpdf, markerstab,by=c("numclus"="cluster"))
tmpdf <- tmpdf %>% mutate(nb_mark = paste0(numclus," ",concatmarkers))
seu@meta.data$nb_mark = as.factor(tmpdf$nb_mark)
# *
plotplusmarkers = DimPlot(seu, reduction = REDUC, group.by = "nb_mark", pt.size = 0.3,
label=T, repel=T, label.size = 3 ) + theme(legend.text = element_text(size=8))
tmpdf <- tmpdf %>% mutate(nb_newtype = case_when(
nb_mark == "0 Mb_Csrp3" ~ "myonuclei",
nb_mark == "1 Amd1_Myh4" ~ "myonuclei",
nb_mark == "2 Top2a_Hmgb2" ~ "MuSCrenew",
nb_mark == "3 Crip1_Spp1" ~ "MuSCprol",
nb_mark == "4 Meg3_Fos" ~ "Asc",
nb_mark == "5 Myog_Cdkn1c" ~ "Myocytes.early",
nb_mark == "6 Myl1_Mylpf" ~ "Myocytes.late",
nb_mark == "7 mt-Nd2_Myh1" ~ "myonuclei",
nb_mark == "8 Lyz2_Apoe" ~ "Imb",
nb_mark == "9 Mpz_Pmp22" ~ "Mpz_Pmp22", # 9: neuromuscular junction cells?
TRUE ~ "Qsc"
) )
seu@meta.data$newtype = tmpdf$nb_newtype
# *
plotNEWtypes = DimPlot(seu, reduction = REDUC, group.by = "newtype", pt.size = 0.3,
label=T, repel=T, label.size = 3 )+ theme(legend.text = element_text(size=8))
# TODO : fix, it did not print
#pdf(paste0(resu,"cartMusc_subclustersPrep.pdf"),width=12)
plot_grid( plotNEWtypes,plotplusmarkers,
plotsNums[[2]], plotsNums[[1]],
nrow= 2 ) + plot_annotation(title="MuSC and SC clustering, steps (inversed order), random colors")
#dev.off()
print("PROBLEMATIC cluster 9, exclude it")
head(seu@active.ident)
seu.ok <- subset(seu, idents="9",invert=T)
seu.ok@meta.data$newtype <- factor(seu.ok@meta.data$newtype,
levels=c("Asc","Imb","MuSCprol","MuSCrenew", "Myocytes.early",
"Myocytes.late","myonuclei","Qsc"))
seu.ok@meta.data$orig.ident <- factor(x=seu.ok@meta.data$orig.ident,
levels=c( "0.5 DPI", "2 DPI", "3.5 DPI", "5 DPI", "10 DPI", "21 DPI", "Noninjured"))
MANUALCOLORS=c("orange2","lightblue","gold2","deepskyblue4","peachpuff2","aquamarine3","cadetblue4","violetred")
a <- DimPlot(seu.ok, reduction = REDUC, group.by = "newtype", pt.size = 0.4,
label=T, repel=T, label.size = 3 ) +
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values = MANUALCOLORS) +
labs(title="", subtitle="sub-populations")
b <- DimPlot(seu.ok, reduction = REDUC, group.by="orig.ident", pt.size = 0.4,
label=T, repel=T, label.size = 3 )+
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values=rev(viridis_pal()(7))) +
labs(title="", subtitle="time-points")
abtitle <- ggdraw() + draw_label("Satellite cells and myonuclei")
pdf(paste0(resu,"sCs_myo_forDrBrun.pdf"),width=10, height = 5)
plot_grid(abtitle,plot_grid(a,b),nrow=2, rel_heights = c(1,15))
dev.off()
#df4plot <- as.data.frame(seu.ok@reductions[["umap"]]@cell.embeddings)
#ggplot(df4plot) + geom_point(aes(UMAP_1, UMAP_2)) + theme_classic()
# =============================================================================
# save this seurat object as new one , FILTER OUT MYONUCLEI
# =============================================================================
seu.ok <- subset(seu.ok, idents=c(0,1,6,7), invert=T)
MANUALCOLORS2=c("orange2","lightblue","gold2","deepskyblue4","peachpuff2","violetred")
a2 <- DimPlot(seu.ok, reduction = REDUC, group.by = "newtype", pt.size = 0.4,
label=T, repel=T, label.size = 3 ) +
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values = MANUALCOLORS2) +
labs(title="", subtitle="sub-populations")
b2 <- DimPlot(seu.ok, reduction = REDUC, group.by="orig.ident", pt.size = 0.4,
label=T, repel=T, label.size = 3 )+
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values=rev(viridis_pal()(7))) +
labs(title="", subtitle="time-points")
abtitle2 <- ggdraw() + draw_label("Satellite cells")
pdf(paste0(resu,"SAT_forDrBrun_fitsne.pdf"),width=10, height = 5)
plot_grid(abtitle2,plot_grid(a2,b2),nrow=2, rel_heights = c(1,15))
dev.off()
saveRDS(seu.ok, file=paste0(rdsdir, sat.opr))
| /scripts/review_sCs.R | no_license | LeGrand-Lab/INMG_SingleCell | R | false | false | 5,749 | r | ##
# johaGL 2021
# Get seu object with only Sat cells (for Dr. Brun)
# (note: myonuclei excluded only for graphics)
##
library(ggplot2)
library(tidyverse)
library(Seurat)
library(scales)
library(viridis)
library(Matrix)
library(reticulate)
library(monocle)
library(patchwork)
library(cowplot)
library(RColorBrewer)
library(inlmisc)
prloc = "~/INMG_SingleCell/"
datadir = "data/Oprescu/"
rdsdir= "rds/OprescuTimePoints/"
resu = "results/OprescuTimePoints/"
sat.opr = "sat_seu_2021.rds"
CELLTYPEcol = "celltype" # the name I give to this metadata column
REDUC = "umap" #if desired use "tsne" ==> is FIt-SNE indeed !!
setwd(prloc)
print(" markers-Clusters were verified by the boss")
seu <- readRDS(paste0(rdsdir,"muscpostSEU.rds"))
print("visualizing initial DimPlots : calculated clusters, plus markers")
plotsNums = list(); clusby=c(CELLTYPEcol, "seurat_clusters")
plotsNums = lapply(clusby, function(y){
p = DimPlot(seu, reduction = REDUC, group.by = y,
label=T, repel=T, label.size = 3 ) })
plot_grid(plotlist=plotsNums)
markerstab = read.table(paste0(resu,"tablesMarkersSubPops/musc_checkMarkersAndClusters.txt"),
header = T,sep='\t')
markerstab$cluster = as.factor(markerstab$cluster)
tmpdf = data.frame(numclus= seu@meta.data$seurat_clusters)
tmpdf = left_join(tmpdf, markerstab,by=c("numclus"="cluster"))
tmpdf <- tmpdf %>% mutate(nb_mark = paste0(numclus," ",concatmarkers))
seu@meta.data$nb_mark = as.factor(tmpdf$nb_mark)
# *
plotplusmarkers = DimPlot(seu, reduction = REDUC, group.by = "nb_mark", pt.size = 0.3,
label=T, repel=T, label.size = 3 ) + theme(legend.text = element_text(size=8))
tmpdf <- tmpdf %>% mutate(nb_newtype = case_when(
nb_mark == "0 Mb_Csrp3" ~ "myonuclei",
nb_mark == "1 Amd1_Myh4" ~ "myonuclei",
nb_mark == "2 Top2a_Hmgb2" ~ "MuSCrenew",
nb_mark == "3 Crip1_Spp1" ~ "MuSCprol",
nb_mark == "4 Meg3_Fos" ~ "Asc",
nb_mark == "5 Myog_Cdkn1c" ~ "Myocytes.early",
nb_mark == "6 Myl1_Mylpf" ~ "Myocytes.late",
nb_mark == "7 mt-Nd2_Myh1" ~ "myonuclei",
nb_mark == "8 Lyz2_Apoe" ~ "Imb",
nb_mark == "9 Mpz_Pmp22" ~ "Mpz_Pmp22", # 9: neuromuscular junction cells?
TRUE ~ "Qsc"
) )
seu@meta.data$newtype = tmpdf$nb_newtype
# *
plotNEWtypes = DimPlot(seu, reduction = REDUC, group.by = "newtype", pt.size = 0.3,
label=T, repel=T, label.size = 3 )+ theme(legend.text = element_text(size=8))
# TODO : fix, it did not print
#pdf(paste0(resu,"cartMusc_subclustersPrep.pdf"),width=12)
plot_grid( plotNEWtypes,plotplusmarkers,
plotsNums[[2]], plotsNums[[1]],
nrow= 2 ) + plot_annotation(title="MuSC and SC clustering, steps (inversed order), random colors")
#dev.off()
print("PROBLEMATIC cluster 9, exclude it")
head(seu@active.ident)
seu.ok <- subset(seu, idents="9",invert=T)
seu.ok@meta.data$newtype <- factor(seu.ok@meta.data$newtype,
levels=c("Asc","Imb","MuSCprol","MuSCrenew", "Myocytes.early",
"Myocytes.late","myonuclei","Qsc"))
seu.ok@meta.data$orig.ident <- factor(x=seu.ok@meta.data$orig.ident,
levels=c( "0.5 DPI", "2 DPI", "3.5 DPI", "5 DPI", "10 DPI", "21 DPI", "Noninjured"))
MANUALCOLORS=c("orange2","lightblue","gold2","deepskyblue4","peachpuff2","aquamarine3","cadetblue4","violetred")
a <- DimPlot(seu.ok, reduction = REDUC, group.by = "newtype", pt.size = 0.4,
label=T, repel=T, label.size = 3 ) +
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values = MANUALCOLORS) +
labs(title="", subtitle="sub-populations")
b <- DimPlot(seu.ok, reduction = REDUC, group.by="orig.ident", pt.size = 0.4,
label=T, repel=T, label.size = 3 )+
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values=rev(viridis_pal()(7))) +
labs(title="", subtitle="time-points")
abtitle <- ggdraw() + draw_label("Satellite cells and myonuclei")
pdf(paste0(resu,"sCs_myo_forDrBrun.pdf"),width=10, height = 5)
plot_grid(abtitle,plot_grid(a,b),nrow=2, rel_heights = c(1,15))
dev.off()
#df4plot <- as.data.frame(seu.ok@reductions[["umap"]]@cell.embeddings)
#ggplot(df4plot) + geom_point(aes(UMAP_1, UMAP_2)) + theme_classic()
# =============================================================================
# save this seurat object as new one , FILTER OUT MYONUCLEI
# =============================================================================
seu.ok <- subset(seu.ok, idents=c(0,1,6,7), invert=T)
MANUALCOLORS2=c("orange2","lightblue","gold2","deepskyblue4","peachpuff2","violetred")
a2 <- DimPlot(seu.ok, reduction = REDUC, group.by = "newtype", pt.size = 0.4,
label=T, repel=T, label.size = 3 ) +
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values = MANUALCOLORS2) +
labs(title="", subtitle="sub-populations")
b2 <- DimPlot(seu.ok, reduction = REDUC, group.by="orig.ident", pt.size = 0.4,
label=T, repel=T, label.size = 3 )+
theme(legend.text = element_text(size=8), axis.text=element_text(size=8),
axis.title=element_text(size=8)) +
scale_color_manual(values=rev(viridis_pal()(7))) +
labs(title="", subtitle="time-points")
abtitle2 <- ggdraw() + draw_label("Satellite cells")
pdf(paste0(resu,"SAT_forDrBrun_fitsne.pdf"),width=10, height = 5)
plot_grid(abtitle2,plot_grid(a2,b2),nrow=2, rel_heights = c(1,15))
dev.off()
saveRDS(seu.ok, file=paste0(rdsdir, sat.opr))
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of diana2nd02
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Submit the study results to the study coordinating center
#'
#' @details
#' This will upload the file \code{StudyResults.zip} to the study coordinating center using Amazon S3.
#' This requires an active internet connection.
#'
#' @param outputFolder Name of local folder where the results were generated; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param key The key string as provided by the study coordinator
#' @param secret The secret string as provided by the study coordinator
#'
#' @return
#' TRUE if the upload was successful.
#'
#' @export
submitResults <- function(outputFolder, key, secret) {
zipName <- file.path(outputFolder, "StudyResults.zip")
if (!file.exists(zipName)) {
stop(paste("Cannot find file", zipName))
}
writeLines(paste0("Uploading file '", zipName, "' to study coordinating center"))
result <- OhdsiSharing::putS3File(file = zipName,
bucket = "ohdsi-study-skeleton",
key = key,
secret = secret)
if (result) {
writeLines("Upload complete")
} else {
writeLines("Upload failed. Please contact the study coordinator")
}
invisible(result)
}
| /OhdsiDataThonKorea2019/Studies/diana2nd02/R/SubmitResults.R | permissive | ohdsi-korea/OhdsiKoreaTutorials | R | false | false | 2,005 | r | # Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of diana2nd02
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Submit the study results to the study coordinating center
#'
#' @details
#' This will upload the file \code{StudyResults.zip} to the study coordinating center using Amazon S3.
#' This requires an active internet connection.
#'
#' @param outputFolder Name of local folder where the results were generated; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param key The key string as provided by the study coordinator
#' @param secret The secret string as provided by the study coordinator
#'
#' @return
#' TRUE if the upload was successful.
#'
#' @export
submitResults <- function(outputFolder, key, secret) {
zipName <- file.path(outputFolder, "StudyResults.zip")
if (!file.exists(zipName)) {
stop(paste("Cannot find file", zipName))
}
writeLines(paste0("Uploading file '", zipName, "' to study coordinating center"))
result <- OhdsiSharing::putS3File(file = zipName,
bucket = "ohdsi-study-skeleton",
key = key,
secret = secret)
if (result) {
writeLines("Upload complete")
} else {
writeLines("Upload failed. Please contact the study coordinator")
}
invisible(result)
}
|
library(Compositional)
### Name: Total variability
### Title: Total variability
### Aliases: totvar
### ** Examples
x <- as.matrix(iris[, 1:4])
x <- x / rowSums(x)
totvar(x)
| /data/genthat_extracted_code/Compositional/examples/totvar.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 181 | r | library(Compositional)
### Name: Total variability
### Title: Total variability
### Aliases: totvar
### ** Examples
x <- as.matrix(iris[, 1:4])
x <- x / rowSums(x)
totvar(x)
|
#' @title Dewpoint Temperature
#' @docType data
#' @name tdew
#' @usage tdew
#' @format A rasterbrick (.nc)
#' @description Input dewpoint temperature dataset
#' @keywords datasets
NULL
| /R/tdew.r | no_license | cran/lue | R | false | false | 194 | r | #' @title Dewpoint Temperature
#' @docType data
#' @name tdew
#' @usage tdew
#' @format A rasterbrick (.nc)
#' @description Input dewpoint temperature dataset
#' @keywords datasets
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runCicero.R
\name{generate_cicero_models}
\alias{generate_cicero_models}
\title{Generate cicero models}
\usage{
generate_cicero_models(cds, distance_parameter, s = 0.75,
window = 5e+05, max_elements = 200,
genomic_coords = cicero::human.hg19.genome)
}
\arguments{
\item{cds}{A cicero CDS object generated using \code{\link{make_cicero_cds}}.}
\item{distance_parameter}{Distance based penalty parameter value. Generally
the mean of the calculated \code{distance_parameter} values from
\code{\link{estimate_distance_parameter}}.}
\item{s}{Power law value. See details.}
\item{window}{Size of the genomic window to query, in base pairs.}
\item{max_elements}{Maximum number of elements per window allowed. Prevents
very large models from slowing performance.}
\item{genomic_coords}{Either a data frame or a path (character) to a file
with chromosome lengths. The file should have two columns, the first is a
the chromosome name (ex. "chr1") and the second is the chromosome length
in base pairs. See \code{data(human.hg19.genome)} for an example. If a
file, should be tab-separated and without header.}
}
\value{
A list of results for each window. Either a \code{glasso} object, or
a character description of why the window was skipped. This list can be
directly input into \code{\link{assemble_connections}} to create a
reconciled list of cicero co-accessibility scores.
}
\description{
Function to generate graphical lasso models on all sites in a CDS object
within overlapping genomic windows.
}
\details{
The purpose of this function is to compute the raw covariances
between each pair of sites within overlapping windows of the genome.
Within each window, the function then estimates a regularized correlation
matrix using the graphical LASSO (Friedman et al., 2008), penalizing pairs
of distant sites more than proximal sites. The scaling parameter,
\code{distance_parameter}, in combination with the power law value \code{s}
determines the distance-based penalty.
The parameter \code{s} is a constant that captures the power-law
distribution of contact frequencies between different locations in the
genome as a function of their linear distance. For a complete discussion
of the various polymer models of DNA packed into the nucleus and of
justifiable values for s, we refer readers to (Dekker et al., 2013) for a
discussion of justifiable values for s. We use a value of 0.75 by default
in Cicero, which corresponds to the “tension globule” polymer model of DNA
(Sanborn et al., 2015). This parameter must be the same as the s parameter
for \code{\link{estimate_distance_parameter}}.
Further details are available in the publication that accompanies this
package. Run \code{citation("cicero")} for publication details.
}
\examples{
#data("cicero_data")
data("human.hg19.genome")
#sample_genome <- subset(human.hg19.genome, V1 == "chr18")
#sample_genome$V2[1] <- 100000
#input_cds <- make_atac_cds(cicero_data, binarize = TRUE)
#input_cds <- reduceDimension(input_cds, max_components = 2, num_dim=6,
# reduction_method = 'tSNE',
# norm_method = "none")
#tsne_coords <- t(reducedDimA(input_cds))
#row.names(tsne_coords) <- row.names(pData(input_cds))
#cicero_cds <- make_cicero_cds(input_cds, reduced_coordinates = tsne_coords)
#model_output <- generate_cicero_models(cicero_cds,
# distance_parameter = 0.3,
# genomic_coords = sample_genome)
}
\references{
\itemize{
\item Dekker, J., Marti-Renom, M.A., and Mirny, L.A. (2013). Exploring
the three-dimensional organization of genomes: interpreting chromatin
interaction data. Nat. Rev. Genet. 14, 390–403.
\item Friedman, J., Hastie, T., and Tibshirani, R. (2008). Sparse
inverse covariance estimation with the graphical lasso. Biostatistics 9,
432–441.
\item Sanborn, A.L., Rao, S.S.P., Huang, S.-C., Durand, N.C., Huntley,
M.H., Jewett, A.I., Bochkov, I.D., Chinnappan, D., Cutkosky, A., Li, J.,
et al. (2015). Chromatin extrusion explains key features of loop and
domain formation in wild-type and engineered genomes. Proc. Natl. Acad.
Sci. U. S. A. 112, E6456–E6465.
}
}
\seealso{
\code{\link{estimate_distance_parameter}}
}
| /man/generate_cicero_models.Rd | permissive | hypercompetent/cicero | R | false | true | 4,428 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runCicero.R
\name{generate_cicero_models}
\alias{generate_cicero_models}
\title{Generate cicero models}
\usage{
generate_cicero_models(cds, distance_parameter, s = 0.75,
window = 5e+05, max_elements = 200,
genomic_coords = cicero::human.hg19.genome)
}
\arguments{
\item{cds}{A cicero CDS object generated using \code{\link{make_cicero_cds}}.}
\item{distance_parameter}{Distance based penalty parameter value. Generally
the mean of the calculated \code{distance_parameter} values from
\code{\link{estimate_distance_parameter}}.}
\item{s}{Power law value. See details.}
\item{window}{Size of the genomic window to query, in base pairs.}
\item{max_elements}{Maximum number of elements per window allowed. Prevents
very large models from slowing performance.}
\item{genomic_coords}{Either a data frame or a path (character) to a file
with chromosome lengths. The file should have two columns, the first is a
the chromosome name (ex. "chr1") and the second is the chromosome length
in base pairs. See \code{data(human.hg19.genome)} for an example. If a
file, should be tab-separated and without header.}
}
\value{
A list of results for each window. Either a \code{glasso} object, or
a character description of why the window was skipped. This list can be
directly input into \code{\link{assemble_connections}} to create a
reconciled list of cicero co-accessibility scores.
}
\description{
Function to generate graphical lasso models on all sites in a CDS object
within overlapping genomic windows.
}
\details{
The purpose of this function is to compute the raw covariances
between each pair of sites within overlapping windows of the genome.
Within each window, the function then estimates a regularized correlation
matrix using the graphical LASSO (Friedman et al., 2008), penalizing pairs
of distant sites more than proximal sites. The scaling parameter,
\code{distance_parameter}, in combination with the power law value \code{s}
determines the distance-based penalty.
The parameter \code{s} is a constant that captures the power-law
distribution of contact frequencies between different locations in the
genome as a function of their linear distance. For a complete discussion
of the various polymer models of DNA packed into the nucleus and of
justifiable values for s, we refer readers to (Dekker et al., 2013) for a
discussion of justifiable values for s. We use a value of 0.75 by default
in Cicero, which corresponds to the “tension globule” polymer model of DNA
(Sanborn et al., 2015). This parameter must be the same as the s parameter
for \code{\link{estimate_distance_parameter}}.
Further details are available in the publication that accompanies this
package. Run \code{citation("cicero")} for publication details.
}
\examples{
#data("cicero_data")
data("human.hg19.genome")
#sample_genome <- subset(human.hg19.genome, V1 == "chr18")
#sample_genome$V2[1] <- 100000
#input_cds <- make_atac_cds(cicero_data, binarize = TRUE)
#input_cds <- reduceDimension(input_cds, max_components = 2, num_dim=6,
# reduction_method = 'tSNE',
# norm_method = "none")
#tsne_coords <- t(reducedDimA(input_cds))
#row.names(tsne_coords) <- row.names(pData(input_cds))
#cicero_cds <- make_cicero_cds(input_cds, reduced_coordinates = tsne_coords)
#model_output <- generate_cicero_models(cicero_cds,
# distance_parameter = 0.3,
# genomic_coords = sample_genome)
}
\references{
\itemize{
\item Dekker, J., Marti-Renom, M.A., and Mirny, L.A. (2013). Exploring
the three-dimensional organization of genomes: interpreting chromatin
interaction data. Nat. Rev. Genet. 14, 390–403.
\item Friedman, J., Hastie, T., and Tibshirani, R. (2008). Sparse
inverse covariance estimation with the graphical lasso. Biostatistics 9,
432–441.
\item Sanborn, A.L., Rao, S.S.P., Huang, S.-C., Durand, N.C., Huntley,
M.H., Jewett, A.I., Bochkov, I.D., Chinnappan, D., Cutkosky, A., Li, J.,
et al. (2015). Chromatin extrusion explains key features of loop and
domain formation in wild-type and engineered genomes. Proc. Natl. Acad.
Sci. U. S. A. 112, E6456–E6465.
}
}
\seealso{
\code{\link{estimate_distance_parameter}}
}
|
# No Remotes ----
# Attachments ----
to_install <- c("curl", "data.table", "dplyr", "DT", "ggplot2", "glue", "httr", "jsonlite", "lubridate", "magrittr", "plotly", "pracma", "readr", "rlang", "shiny", "shinycssloaders", "shinydashboard", "shinythemes", "stats", "utils")
for (i in to_install) {
message(paste("looking for ", i))
if (!requireNamespace(i)) {
message(paste(" installing", i))
install.packages(i)
}
}
| /CryptoShiny/inst/dependencies.R | permissive | fernandopf/ThinkRProject | R | false | false | 447 | r | # No Remotes ----
# Attachments ----
to_install <- c("curl", "data.table", "dplyr", "DT", "ggplot2", "glue", "httr", "jsonlite", "lubridate", "magrittr", "plotly", "pracma", "readr", "rlang", "shiny", "shinycssloaders", "shinydashboard", "shinythemes", "stats", "utils")
for (i in to_install) {
message(paste("looking for ", i))
if (!requireNamespace(i)) {
message(paste(" installing", i))
install.packages(i)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Annotations.R
\name{CTSStoGenes}
\alias{CTSStoGenes}
\alias{CTSStoGenes,CAGEexp-method}
\title{Make a gene expression table.}
\usage{
CTSStoGenes(object)
\S4method{CTSStoGenes}{CAGEexp}(object)
}
\arguments{
\item{object}{A \code{CAGEexp} object that was annotated with the \code{\link[=annotateCTSS]{annotateCTSS()}}
function.}
}
\value{
The input object with the following modifications:
\itemize{
\item A new \code{geneExpMatrix} experiment containing gene expression levels as
a \code{\link{SummarizedExperiment}} object with one assay called \code{counts}, which
is plain \code{matrix} of integers. (This plays better than \verb{Rle DataFrames}
when interfacing with downstream packages like DESeq2, and since the number of
genes is limited, a \code{matrix} will not cause problems of performance.)
\item New \code{genes} column data added, indicating total number of gene symbols
detected per library.
\item New \code{unannotated} column data added, indicating for each sample the
number of counts that did not overlap with a known gene.
}
}
\description{
Add a gene expression table in the \code{GeneExpSE} experiment slot of an
annotated \code{\link{CAGEexp}} object.
}
\examples{
CTSStoGenes(exampleCAGEexp)
all( librarySizes(exampleCAGEexp) -
colSums(SummarizedExperiment::assay(GeneExpSE(exampleCAGEexp))) ==
exampleCAGEexp$unannotated)
}
\seealso{
\code{\link[=annotateCTSS]{annotateCTSS()}}.
Other CAGEr object modifiers:
\code{\link{CustomConsensusClusters}()},
\code{\link{aggregateTagClusters}()},
\code{\link{annotateCTSS}()},
\code{\link{clusterCTSS}()},
\code{\link{cumulativeCTSSdistribution}()},
\code{\link{getCTSS}()},
\code{\link{normalizeTagCount}()},
\code{\link{quantilePositions}()},
\code{\link{summariseChrExpr}()}
Other CAGEr gene expression analysis functions:
\code{\link{GeneExpDESeq2}()},
\code{\link{ranges2genes}()}
}
\author{
Charles Plessy
}
\concept{CAGEr gene expression analysis functions}
\concept{CAGEr object modifiers}
| /man/CTSStoGenes.Rd | no_license | snikumbh/CAGEr | R | false | true | 2,060 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Annotations.R
\name{CTSStoGenes}
\alias{CTSStoGenes}
\alias{CTSStoGenes,CAGEexp-method}
\title{Make a gene expression table.}
\usage{
CTSStoGenes(object)
\S4method{CTSStoGenes}{CAGEexp}(object)
}
\arguments{
\item{object}{A \code{CAGEexp} object that was annotated with the \code{\link[=annotateCTSS]{annotateCTSS()}}
function.}
}
\value{
The input object with the following modifications:
\itemize{
\item A new \code{geneExpMatrix} experiment containing gene expression levels as
a \code{\link{SummarizedExperiment}} object with one assay called \code{counts}, which
is plain \code{matrix} of integers. (This plays better than \verb{Rle DataFrames}
when interfacing with downstream packages like DESeq2, and since the number of
genes is limited, a \code{matrix} will not cause problems of performance.)
\item New \code{genes} column data added, indicating total number of gene symbols
detected per library.
\item New \code{unannotated} column data added, indicating for each sample the
number of counts that did not overlap with a known gene.
}
}
\description{
Add a gene expression table in the \code{GeneExpSE} experiment slot of an
annotated \code{\link{CAGEexp}} object.
}
\examples{
CTSStoGenes(exampleCAGEexp)
all( librarySizes(exampleCAGEexp) -
colSums(SummarizedExperiment::assay(GeneExpSE(exampleCAGEexp))) ==
exampleCAGEexp$unannotated)
}
\seealso{
\code{\link[=annotateCTSS]{annotateCTSS()}}.
Other CAGEr object modifiers:
\code{\link{CustomConsensusClusters}()},
\code{\link{aggregateTagClusters}()},
\code{\link{annotateCTSS}()},
\code{\link{clusterCTSS}()},
\code{\link{cumulativeCTSSdistribution}()},
\code{\link{getCTSS}()},
\code{\link{normalizeTagCount}()},
\code{\link{quantilePositions}()},
\code{\link{summariseChrExpr}()}
Other CAGEr gene expression analysis functions:
\code{\link{GeneExpDESeq2}()},
\code{\link{ranges2genes}()}
}
\author{
Charles Plessy
}
\concept{CAGEr gene expression analysis functions}
\concept{CAGEr object modifiers}
|
# K-fold cross validation example code
# K-fold 교차 검증 여러가지 예제 코드
################################################################
# 첫번째 Example code
# iris데이터 - 5개의 열과 150개의 행
# 사용 모델링 알고리즘 : Random forest
# iris의 Sepal.Length 예측 - iris Sepal.Length Predict
library(plyr)
library(dplyr)
library(randomForest)
data <- iris
glimpse(data)# 데이터 확인. dplyr 패키지에 내장
#random forest를 사용하여 sepal.length 예측.
#cross validation, using rf to predict sepal.length
k = 5
data$id <- sample(1:k, nrow(data), replace = TRUE)
list <- 1:k
# 예측 및 테스트는 폴드를 반복 할 때마다 추가되는 데이터 프레임을 설정합니다.
# prediction and test set data frames that we add to with each iteration over the folds.
# 데이터 프레임 초기화(data frame reset)
prediction <- testsetCopy <- data.frame()
# 코드 실행 시 작업진행률을 보여주는 progress.bar
#Creating a progress bar to know the status of CV
progress.bar <- create_progress_bar("text") # plyr 패키지안에 내장
progress.bar$init(k)
#function for k fold
#i는 1부터 5로 나눈후에 5번을 진행하도록 합니다.
for(i in 1:k){
# remove rows with id i from dataframe to create training set
# ex) id가 1인 것을 제외하고 나머지 id 2~5를 training set으로 사용
# select rows with id i to create test set
# ex) id가 1인 것만 test set으로 사용
trainset <- subset(data, id %in% list[-i])
testset <- subset(data, id %in% c(i))
# 랜덤포레스트 모델을 생성.
#run a random forest model
model <- randomForest(trainset$Sepal.Length ~ .-id, data = trainset, ntree = 100)
temp <- as.data.frame(predict(model, testset))
# 예측값을 예측 데이터 프레임의 끝에 추가.
# append this iteration's predictions to the end of the prediction data frame
prediction <- rbind(prediction, temp)
# 실제값(testset) testsetCopy에 추가.
# append this iteration's test set to the test set copy data frame
testsetCopy <- rbind(testsetCopy, as.data.frame(testset[,1]))
progress.bar$step()
}
# 예측값과 실제값 데이터프레임.
# add predictions and actual Sepal Length values
result <- cbind(prediction, testsetCopy[, 1])
names(result) <- c("Predicted", "Actual")
result$Difference <- abs(result$Actual - result$Predicted)
# 모델 평가로 MAE[Mean Absolute Error] 사용.
# As an example use Mean Absolute Error as Evalution
summary(result$Difference)
################################################################
# 두번째 Example code
# iris의 Species 분류 - iris Specieies classification
data <- iris
k = 5; list <- 1:k
data$id <- sample(1:k, nrow(data), replace = TRUE)
prediction <- testsetCopy <- data.frame()
#function for k fold
for(i in 1:k){
trainset <- subset(data, id %in% list[-i])
testset <- subset(data, id %in% c(i))
model <- randomForest(trainset$Species~.-id, data = trainset, ntree = 100)
temp <- as.data.frame(predict(model, testset))
prediction <- rbind(prediction, temp)
testsetCopy <- rbind(testsetCopy, as.data.frame(testset[,5]))
}
result <- cbind(prediction, testsetCopy[, 1])
names(result) <- c("Predicted", "Actual")
library(e1071) ; library(caret) # confusion matrix 내장
confusionMatrix(result$Predicted, result$Actual)
| /K-fold cross validation example code.R | no_license | Karagul/KFold-Cross-Validation | R | false | false | 3,366 | r | # K-fold cross validation example code
# K-fold 교차 검증 여러가지 예제 코드
################################################################
# 첫번째 Example code
# iris데이터 - 5개의 열과 150개의 행
# 사용 모델링 알고리즘 : Random forest
# iris의 Sepal.Length 예측 - iris Sepal.Length Predict
library(plyr)
library(dplyr)
library(randomForest)
data <- iris
glimpse(data)# 데이터 확인. dplyr 패키지에 내장
#random forest를 사용하여 sepal.length 예측.
#cross validation, using rf to predict sepal.length
k = 5
data$id <- sample(1:k, nrow(data), replace = TRUE)
list <- 1:k
# 예측 및 테스트는 폴드를 반복 할 때마다 추가되는 데이터 프레임을 설정합니다.
# prediction and test set data frames that we add to with each iteration over the folds.
# 데이터 프레임 초기화(data frame reset)
prediction <- testsetCopy <- data.frame()
# 코드 실행 시 작업진행률을 보여주는 progress.bar
#Creating a progress bar to know the status of CV
progress.bar <- create_progress_bar("text") # plyr 패키지안에 내장
progress.bar$init(k)
#function for k fold
#i는 1부터 5로 나눈후에 5번을 진행하도록 합니다.
for(i in 1:k){
# remove rows with id i from dataframe to create training set
# ex) id가 1인 것을 제외하고 나머지 id 2~5를 training set으로 사용
# select rows with id i to create test set
# ex) id가 1인 것만 test set으로 사용
trainset <- subset(data, id %in% list[-i])
testset <- subset(data, id %in% c(i))
# 랜덤포레스트 모델을 생성.
#run a random forest model
model <- randomForest(trainset$Sepal.Length ~ .-id, data = trainset, ntree = 100)
temp <- as.data.frame(predict(model, testset))
# 예측값을 예측 데이터 프레임의 끝에 추가.
# append this iteration's predictions to the end of the prediction data frame
prediction <- rbind(prediction, temp)
# 실제값(testset) testsetCopy에 추가.
# append this iteration's test set to the test set copy data frame
testsetCopy <- rbind(testsetCopy, as.data.frame(testset[,1]))
progress.bar$step()
}
# 예측값과 실제값 데이터프레임.
# add predictions and actual Sepal Length values
result <- cbind(prediction, testsetCopy[, 1])
names(result) <- c("Predicted", "Actual")
result$Difference <- abs(result$Actual - result$Predicted)
# 모델 평가로 MAE[Mean Absolute Error] 사용.
# As an example use Mean Absolute Error as Evalution
summary(result$Difference)
################################################################
# 두번째 Example code
# iris의 Species 분류 - iris Specieies classification
data <- iris
k = 5; list <- 1:k
data$id <- sample(1:k, nrow(data), replace = TRUE)
prediction <- testsetCopy <- data.frame()
#function for k fold
for(i in 1:k){
trainset <- subset(data, id %in% list[-i])
testset <- subset(data, id %in% c(i))
model <- randomForest(trainset$Species~.-id, data = trainset, ntree = 100)
temp <- as.data.frame(predict(model, testset))
prediction <- rbind(prediction, temp)
testsetCopy <- rbind(testsetCopy, as.data.frame(testset[,5]))
}
result <- cbind(prediction, testsetCopy[, 1])
names(result) <- c("Predicted", "Actual")
library(e1071) ; library(caret) # confusion matrix 내장
confusionMatrix(result$Predicted, result$Actual)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.klmer.R
\name{simulate.klmer}
\alias{simulate.klmer}
\title{Semi-Parametric Bootstrap sample form a klmer object.}
\usage{
\method{simulate}{klmer}(object, nsim = 1, seed = NULL, resids, ...)
}
\arguments{
\item{object}{Models to be simulated from.}
\item{nsim}{Number of simulations (ignored)}
\item{seed}{Seed for simulations (currently ignored)}
\item{resids}{The residuals to be resampled.}
\item{...}{Additional parameters for simulate method.}
}
\value{
A list of simulated Kfunctions for all distances in klmer object
object.
}
\description{
Semi-Parametric Bootstrap sample form a klmer object.
}
| /man/simulate.klmer.Rd | no_license | BagchiLab-Uconn/RSPPlme4 | R | false | true | 696 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.klmer.R
\name{simulate.klmer}
\alias{simulate.klmer}
\title{Semi-Parametric Bootstrap sample form a klmer object.}
\usage{
\method{simulate}{klmer}(object, nsim = 1, seed = NULL, resids, ...)
}
\arguments{
\item{object}{Models to be simulated from.}
\item{nsim}{Number of simulations (ignored)}
\item{seed}{Seed for simulations (currently ignored)}
\item{resids}{The residuals to be resampled.}
\item{...}{Additional parameters for simulate method.}
}
\value{
A list of simulated Kfunctions for all distances in klmer object
object.
}
\description{
Semi-Parametric Bootstrap sample form a klmer object.
}
|
#' show_dmsg()
#' This function generates will plot heatmaps of methylation level of sites in dmgenes
#'
#' @param mrobj A methyRaw object or a methyRawList object
#' @param dmsg A list containing GRanges objects dmsites and dmgenes returned by
#' det_dmsg()
#' @param min.nsites Required minimal number of msites in a gene for the gene
#' to be displayed in the heatmap pdf
#' @param max.nsites Required maximal number of msites in a gene for the gene
#' to be displayed in the heatmap pdf
#' @param min.pdmsites Required minimal percent of dmsites among the msites in
#' a gene for the gene to be displayed in the heatmap pdf
#' @param destrand methylKit::unite() parameter; default: FALSE.
#' destrand=TRUE combines CpG methylation calls from both strands
#' @param mc.cores Integer denoting how many cores should be used for parallel
#' diffential methylation calculations
#' @param outflabel A string to identify the study in the output file
#'
#' @return A data frame
#'
#' @importFrom methylKit percMethylation reorganize unite
#' @importFrom GenomicRanges findOverlaps values
#' @importFrom gplots heatmap.2 greenred
# @importFrom utils write.table
#' @importFrom S4Vectors subjectHits queryHits
#' @importFrom dplyr group_by_ %>%
#'
#' @examples
#' mydatf <- system.file("extdata","Am.dat",package="BWASPR")
#' myparf <- system.file("extdata","Am.par",package="BWASPR")
#' myfiles <- setup_BWASPR(datafile=mydatf,parfile=myparf)
#' samplelist <- list("forager","nurse")
#' AmHE <- mcalls2mkobj(myfiles$datafiles,species="Am",study="HE",
#' sample=samplelist,replicate=c(0),
#' type="CpGhsm",mincov=1,assembly="Amel-4.5")
#' genome_ann <- get_genome_annotation(myfiles$parameters)
#' dmsgList <- det_dmsg(AmHE,genome_ann,
#' threshold=25.0,qvalue=0.01,mc.cores=4,destrand=TRUE,
#' outfile1="AmHE-dmsites.txt",
#' outfile2="AmHE-dmgenes.txt")
#' dmgprp <- show_dmsg(AmHE,dmsgList,destrand=TRUE,
#' min.nsites=2,max.nsites=60,min.pdmsites=10,
#' mc.cores=4,outflabel="Am_HE")
#'
#' @export
show_dmsg <- function(mrobj,dmsg,destrand=FALSE,min.nsites=2,max.nsites=60,
min.pdmsites=10,mc.cores=1,outflabel="") {
message('... show_dmsg() ...')
# load dmsites and dmgenes and sample_match_list
dmsites.gr <- do.call("c", dmsg$dmsites)
dmgenes.gr <- do.call("c", dmsg$dmgenes)
sample_match_list <- as.list(unique(as.character(dmgenes.gr$comparison)))
# analyze each sample_match
dmgprp <- lapply(sample_match_list, function(sample_match) {
sample1 <- unlist(strsplit(sample_match,'\\.'))[1]
sample2 <- unlist(strsplit(sample_match,'\\.'))[3]
message(paste('... comparing ',sample1,' vs. ',sample2,' ...',sep=''))
# subset the dmsites.gr & dmgenes.gr with this sample_match
#
pair_dmsites.gr <- dmsites.gr[GenomicRanges::values(dmsites.gr)$comparison%in%sample_match]
pair_dmgenes.gr <- dmgenes.gr[GenomicRanges::values(dmgenes.gr)$comparison%in%sample_match]
# subset the mrobj with current sample_match
#
pair_mrobj <- reorganize(mrobj,sample.ids=list(sample1,sample2),
treatment=c(0,1))
pair_meth <- unite(pair_mrobj,destrand=destrand,mc.cores=mc.cores)
# calc methylation level
#
p_meth <- round(percMethylation(pair_meth,rowids=FALSE,
save.txt=FALSE),2)
pair_p_meth <- cbind(pair_meth,p_meth)
pair_p_meth.gr <- as(pair_p_meth,'GRanges')
# identify scd sites in each gene
#
match <- suppressWarnings(findOverlaps(pair_dmgenes.gr,pair_p_meth.gr,ignore.strand=TRUE))
sub_pair_p_meth.gr <- pair_p_meth.gr[subjectHits(match)]
sub_pair_dmgenes.gr <- pair_dmgenes.gr[queryHits(match)]
# identify dmsites in scd sites
#
match2 <- suppressWarnings(findOverlaps(sub_pair_p_meth.gr,pair_dmsites.gr,ignore.strand=TRUE))
pair_dmsites_index <- queryHits(match2)
# transform GRanges objects to dataframes and combine them
#
sub_pair_p_meth <- as.data.frame(sub_pair_p_meth.gr)
sub_pair_dmgenes <- as.data.frame(sub_pair_dmgenes.gr)
colnames(sub_pair_dmgenes) <- lapply(colnames(sub_pair_dmgenes),
function(i) paste('gene',i,sep='_'))
meth_dmg_comb <- cbind(sub_pair_p_meth,
sub_pair_dmgenes)
# label each scd if it is a dmsite
#
meth_dmg_comb['is.dm'] <- FALSE
meth_dmg_comb[pair_dmsites_index,'is.dm'] <- TRUE
# save
#
meth_dmg_comb <- meth_dmg_comb[colSums(! is.na(meth_dmg_comb))>0]
outfile <- paste("dmg",outflabel,sep="-")
outfile <- paste(outfile,sample_match,sep="_")
wtoutfile <- paste(outfile,"details.txt",sep="_")
write.table(meth_dmg_comb, file=wtoutfile,
sep="\t", row.names=FALSE, quote=FALSE)
# split the dataframe
#
splitter <- c('gene_ID','gene_Name','gene_gene')
splitter <- splitter[splitter%in%names(meth_dmg_comb)][1]
grouped <- meth_dmg_comb%>%group_by_(.dots=splitter)
out <- split(grouped,grouped[splitter])
# plot heatmap for each dmgene
#
phoutfile <- paste(outfile,"heatmaps.pdf",sep="_")
##pdf(paste(sample_match,'.pdf',sep=''))
pdf(phoutfile)
lapply(out,function(g) {
nsites <- dim(g)[1]
pdmsites <- 100 * sum(g$is.dm,na.rm=TRUE) / nsites
if (nsites >= min.nsites & nsites <= max.nsites & pdmsites >= min.pdmsites) {
plot <- as.matrix(g[,c(sample1,sample2)])
# make sure that there are differences to show in the heatmap:
if (! all(plot[1] == plot)) {
heatmap.2(plot,
margins=c(10,10),
dendrogram='none',
Rowv=FALSE,
col=greenred(10),
trace='none',
main=paste("Common sites",unique(g[splitter]),sep=" "),
srtCol=45,
RowSideColors=as.character(as.numeric(g$is.dm)))
}
}
})
dev.off()
return(meth_dmg_comb)
})
names(dmgprp) <- sample_match_list
message('... show_dmsg() finished ...')
return(dmgprp)
}
| /R/show_dmsg.R | no_license | littleblackfish/BWASPR | R | false | false | 6,889 | r | #' show_dmsg()
#' This function generates will plot heatmaps of methylation level of sites in dmgenes
#'
#' @param mrobj A methyRaw object or a methyRawList object
#' @param dmsg A list containing GRanges objects dmsites and dmgenes returned by
#' det_dmsg()
#' @param min.nsites Required minimal number of msites in a gene for the gene
#' to be displayed in the heatmap pdf
#' @param max.nsites Required maximal number of msites in a gene for the gene
#' to be displayed in the heatmap pdf
#' @param min.pdmsites Required minimal percent of dmsites among the msites in
#' a gene for the gene to be displayed in the heatmap pdf
#' @param destrand methylKit::unite() parameter; default: FALSE.
#' destrand=TRUE combines CpG methylation calls from both strands
#' @param mc.cores Integer denoting how many cores should be used for parallel
#' diffential methylation calculations
#' @param outflabel A string to identify the study in the output file
#'
#' @return A data frame
#'
#' @importFrom methylKit percMethylation reorganize unite
#' @importFrom GenomicRanges findOverlaps values
#' @importFrom gplots heatmap.2 greenred
# @importFrom utils write.table
#' @importFrom S4Vectors subjectHits queryHits
#' @importFrom dplyr group_by_ %>%
#'
#' @examples
#' mydatf <- system.file("extdata","Am.dat",package="BWASPR")
#' myparf <- system.file("extdata","Am.par",package="BWASPR")
#' myfiles <- setup_BWASPR(datafile=mydatf,parfile=myparf)
#' samplelist <- list("forager","nurse")
#' AmHE <- mcalls2mkobj(myfiles$datafiles,species="Am",study="HE",
#' sample=samplelist,replicate=c(0),
#' type="CpGhsm",mincov=1,assembly="Amel-4.5")
#' genome_ann <- get_genome_annotation(myfiles$parameters)
#' dmsgList <- det_dmsg(AmHE,genome_ann,
#' threshold=25.0,qvalue=0.01,mc.cores=4,destrand=TRUE,
#' outfile1="AmHE-dmsites.txt",
#' outfile2="AmHE-dmgenes.txt")
#' dmgprp <- show_dmsg(AmHE,dmsgList,destrand=TRUE,
#' min.nsites=2,max.nsites=60,min.pdmsites=10,
#' mc.cores=4,outflabel="Am_HE")
#'
#' @export
show_dmsg <- function(mrobj,dmsg,destrand=FALSE,min.nsites=2,max.nsites=60,
min.pdmsites=10,mc.cores=1,outflabel="") {
message('... show_dmsg() ...')
# load dmsites and dmgenes and sample_match_list
dmsites.gr <- do.call("c", dmsg$dmsites)
dmgenes.gr <- do.call("c", dmsg$dmgenes)
sample_match_list <- as.list(unique(as.character(dmgenes.gr$comparison)))
# analyze each sample_match
dmgprp <- lapply(sample_match_list, function(sample_match) {
sample1 <- unlist(strsplit(sample_match,'\\.'))[1]
sample2 <- unlist(strsplit(sample_match,'\\.'))[3]
message(paste('... comparing ',sample1,' vs. ',sample2,' ...',sep=''))
# subset the dmsites.gr & dmgenes.gr with this sample_match
#
pair_dmsites.gr <- dmsites.gr[GenomicRanges::values(dmsites.gr)$comparison%in%sample_match]
pair_dmgenes.gr <- dmgenes.gr[GenomicRanges::values(dmgenes.gr)$comparison%in%sample_match]
# subset the mrobj with current sample_match
#
pair_mrobj <- reorganize(mrobj,sample.ids=list(sample1,sample2),
treatment=c(0,1))
pair_meth <- unite(pair_mrobj,destrand=destrand,mc.cores=mc.cores)
# calc methylation level
#
p_meth <- round(percMethylation(pair_meth,rowids=FALSE,
save.txt=FALSE),2)
pair_p_meth <- cbind(pair_meth,p_meth)
pair_p_meth.gr <- as(pair_p_meth,'GRanges')
# identify scd sites in each gene
#
match <- suppressWarnings(findOverlaps(pair_dmgenes.gr,pair_p_meth.gr,ignore.strand=TRUE))
sub_pair_p_meth.gr <- pair_p_meth.gr[subjectHits(match)]
sub_pair_dmgenes.gr <- pair_dmgenes.gr[queryHits(match)]
# identify dmsites in scd sites
#
match2 <- suppressWarnings(findOverlaps(sub_pair_p_meth.gr,pair_dmsites.gr,ignore.strand=TRUE))
pair_dmsites_index <- queryHits(match2)
# transform GRanges objects to dataframes and combine them
#
sub_pair_p_meth <- as.data.frame(sub_pair_p_meth.gr)
sub_pair_dmgenes <- as.data.frame(sub_pair_dmgenes.gr)
colnames(sub_pair_dmgenes) <- lapply(colnames(sub_pair_dmgenes),
function(i) paste('gene',i,sep='_'))
meth_dmg_comb <- cbind(sub_pair_p_meth,
sub_pair_dmgenes)
# label each scd if it is a dmsite
#
meth_dmg_comb['is.dm'] <- FALSE
meth_dmg_comb[pair_dmsites_index,'is.dm'] <- TRUE
# save
#
meth_dmg_comb <- meth_dmg_comb[colSums(! is.na(meth_dmg_comb))>0]
outfile <- paste("dmg",outflabel,sep="-")
outfile <- paste(outfile,sample_match,sep="_")
wtoutfile <- paste(outfile,"details.txt",sep="_")
write.table(meth_dmg_comb, file=wtoutfile,
sep="\t", row.names=FALSE, quote=FALSE)
# split the dataframe
#
splitter <- c('gene_ID','gene_Name','gene_gene')
splitter <- splitter[splitter%in%names(meth_dmg_comb)][1]
grouped <- meth_dmg_comb%>%group_by_(.dots=splitter)
out <- split(grouped,grouped[splitter])
# plot heatmap for each dmgene
#
phoutfile <- paste(outfile,"heatmaps.pdf",sep="_")
##pdf(paste(sample_match,'.pdf',sep=''))
pdf(phoutfile)
lapply(out,function(g) {
nsites <- dim(g)[1]
pdmsites <- 100 * sum(g$is.dm,na.rm=TRUE) / nsites
if (nsites >= min.nsites & nsites <= max.nsites & pdmsites >= min.pdmsites) {
plot <- as.matrix(g[,c(sample1,sample2)])
# make sure that there are differences to show in the heatmap:
if (! all(plot[1] == plot)) {
heatmap.2(plot,
margins=c(10,10),
dendrogram='none',
Rowv=FALSE,
col=greenred(10),
trace='none',
main=paste("Common sites",unique(g[splitter]),sep=" "),
srtCol=45,
RowSideColors=as.character(as.numeric(g$is.dm)))
}
}
})
dev.off()
return(meth_dmg_comb)
})
names(dmgprp) <- sample_match_list
message('... show_dmsg() finished ...')
return(dmgprp)
}
|
install.packages("highcharter")
library(highcharter)
data("mpg", package = "ggplot2")
head(mpg)
hchart(mpg, "point", hcaes(x = displ, y = cty))
data(diamonds, economics_long, mpg, package = "ggplot2")
library(dplyr)
head(mpg)
hchart(mpg, "scatter", hcaes(x = displ, y = hwy, group = class)) | /highcharter.R | no_license | UrszulaCzerwinska/AppliedPredictiveModeling | R | false | false | 291 | r | install.packages("highcharter")
library(highcharter)
data("mpg", package = "ggplot2")
head(mpg)
hchart(mpg, "point", hcaes(x = displ, y = cty))
data(diamonds, economics_long, mpg, package = "ggplot2")
library(dplyr)
head(mpg)
hchart(mpg, "scatter", hcaes(x = displ, y = hwy, group = class)) |
library(lubridate)
library(tidyverse)
library(cowplot)
climate_recent_raw <- read_csv('data/climate_2018_2019.csv')
# https://www.ncdc.noaa.gov/cdo-web/search
climate_normals_raw <- read_csv('data/climate_normals.csv')
climate_2018 <- climate_recent_raw %>%
separate(DATE, into = c('YEAR','DATE')) %>%
mutate(DATE = as.integer(DATE)) %>%
dplyr::select(name = NAME, month = DATE, year = YEAR, temp = TAVG, precip = PRCP)
climate_normals <- climate_normals_raw %>%
dplyr::select(name = NAME, month = DATE, temp = `MLY-TAVG-NORMAL`, temp_sd = `MLY-TAVG-STDDEV`,
precip = `MLY-PRCP-NORMAL`, precip_25 = `MLY-PRCP-25PCTL`, precip_75 = `MLY-PRCP-75PCTL`) %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
mutate(year = '1980-2010') %>%
filter(name != 'Tower Falls')
climatology <- full_join(climate_2018, climate_normals)
temp <-
ggplot(climatology, aes(x = month, y = temp)) +
geom_col(aes(fill = year), position = 'dodge') +
geom_errorbar(aes(ymin = temp - temp_sd/sqrt(26), ymax = temp + temp_sd/sqrt(26), group = year),
position = position_dodge2(width = 0.2, padding = 0.8)) +
scale_fill_brewer('Year', palette = 'Dark2', guide = F) +
scale_x_continuous(breaks = c(6,7,8,9), labels = c('June','July','Aug.','Sept.')) +
theme_bw(base_size = 12) +
facet_wrap(~name) +
labs(y = 'Mean summer montly temperature (deg. C)') +
theme(axis.title.x = element_blank(),
strip.background = element_blank())
precip <-
ggplot(climatology, aes(x = month, y = precip)) +
geom_col(aes(fill = year), position = 'dodge') +
geom_errorbar(aes(ymin = precip_25, ymax = precip_75, group = year),
position = position_dodge2(width = 0.2, padding = 0.8)) +
scale_fill_brewer('Year', palette = 'Dark2') +
scale_x_continuous(breaks = c(6,7,8,9), labels = c('June','July','Aug.','Sept.')) +
theme_bw(base_size = 12) +
facet_wrap(~name) +
labs(y = 'Mean summer monthly precipitation (mm)') +
theme(#legend.position = c(0.91,0.91),
legend.background = element_blank(),
axis.title.x = element_blank(),
strip.background = element_blank())
plot_grid(temp, precip, rel_widths = c(1, 1.3))
snotel <- read_csv('data/snotel.csv') %>%
mutate(date = parse_date(date, format = '%b-%y')) %>%
mutate(SWE = SWE/10) #%>%
#filter(month(date) %in% c(2,3,4,5)) %>%
#group_by(date = year(date)) %>%
#summarise(SWE = mean(SWE)) %>%
#mutate(date = parse_date(as.character(date), format = '%Y'))
month_means <- snotel %>%
group_by(station_name, month(date)) %>%
summarise(mean = mean(SWE, na.rm = T)) %>%
filter(`month(date)` == 5)
ggplot(snotel, aes(x = date, y = SWE)) +
geom_hline(data = month_means, aes(yintercept = mean), linetype = 'dashed') +
geom_line(size = 1) +
#geom_line(aes(x = date, y = med_SWE), color = 'blue', size = 1) +
geom_point(data = filter(snotel, month(date) == 05), aes(x = date, y = SWE), shape = 21, fill = 'grey50', size = 2.5) +
geom_text_repel(data = filter(snotel, month(date) == 05 & year(date) == 2018),
aes(x = date, y = SWE, label = 'May 2018'), nudge_y = 20, nudge_x = 10) +
facet_wrap(~station_name) +
theme_bw(base_size = 14) +
labs(x = 'Year', y = 'Snow Water Equivalent (cm)')
climate_2018_raw <- read_csv('data/climate_gy_2018.csv')
#climate_2018 <- climate_2018_raw %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
separate(DATE, into = c('YEAR','DATE')) %>%
mutate(DATE = as.integer(DATE)) %>%
dplyr::select(name = NAME, date = DATE, temp = TAVG, precip = PRCP) %>%
mutate(version = '2018')
climate_normals_raw <- read_csv('data/climate_normals_gy.csv')
climate_2018 <- climate_recent_raw %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
separate(DATE, into = c('YEAR','DATE')) %>%
mutate(DATE = as.integer(DATE)) %>%
dplyr::select(name = NAME, month = DATE, year = YEAR, temp = TAVG, precip = PRCP)
climate_normals <- climate_normals_raw %>%
dplyr::select(name = NAME, month = DATE, temp = `MLY-TAVG-NORMAL`, temp_sd = `MLY-TAVG-STDDEV`,
precip = `MLY-PRCP-NORMAL`, precip_25 = `MLY-PRCP-25PCTL`, precip_75 = `MLY-PRCP-75PCTL`) %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
mutate(year = '1980-2010') %>%
filter(name != 'Tower Falls')
| /code/climatology_analysis.R | no_license | tylerhoecker/seed_addition_GYE | R | false | false | 4,700 | r | library(lubridate)
library(tidyverse)
library(cowplot)
climate_recent_raw <- read_csv('data/climate_2018_2019.csv')
# https://www.ncdc.noaa.gov/cdo-web/search
climate_normals_raw <- read_csv('data/climate_normals.csv')
climate_2018 <- climate_recent_raw %>%
separate(DATE, into = c('YEAR','DATE')) %>%
mutate(DATE = as.integer(DATE)) %>%
dplyr::select(name = NAME, month = DATE, year = YEAR, temp = TAVG, precip = PRCP)
climate_normals <- climate_normals_raw %>%
dplyr::select(name = NAME, month = DATE, temp = `MLY-TAVG-NORMAL`, temp_sd = `MLY-TAVG-STDDEV`,
precip = `MLY-PRCP-NORMAL`, precip_25 = `MLY-PRCP-25PCTL`, precip_75 = `MLY-PRCP-75PCTL`) %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
mutate(year = '1980-2010') %>%
filter(name != 'Tower Falls')
climatology <- full_join(climate_2018, climate_normals)
temp <-
ggplot(climatology, aes(x = month, y = temp)) +
geom_col(aes(fill = year), position = 'dodge') +
geom_errorbar(aes(ymin = temp - temp_sd/sqrt(26), ymax = temp + temp_sd/sqrt(26), group = year),
position = position_dodge2(width = 0.2, padding = 0.8)) +
scale_fill_brewer('Year', palette = 'Dark2', guide = F) +
scale_x_continuous(breaks = c(6,7,8,9), labels = c('June','July','Aug.','Sept.')) +
theme_bw(base_size = 12) +
facet_wrap(~name) +
labs(y = 'Mean summer montly temperature (deg. C)') +
theme(axis.title.x = element_blank(),
strip.background = element_blank())
precip <-
ggplot(climatology, aes(x = month, y = precip)) +
geom_col(aes(fill = year), position = 'dodge') +
geom_errorbar(aes(ymin = precip_25, ymax = precip_75, group = year),
position = position_dodge2(width = 0.2, padding = 0.8)) +
scale_fill_brewer('Year', palette = 'Dark2') +
scale_x_continuous(breaks = c(6,7,8,9), labels = c('June','July','Aug.','Sept.')) +
theme_bw(base_size = 12) +
facet_wrap(~name) +
labs(y = 'Mean summer monthly precipitation (mm)') +
theme(#legend.position = c(0.91,0.91),
legend.background = element_blank(),
axis.title.x = element_blank(),
strip.background = element_blank())
plot_grid(temp, precip, rel_widths = c(1, 1.3))
snotel <- read_csv('data/snotel.csv') %>%
mutate(date = parse_date(date, format = '%b-%y')) %>%
mutate(SWE = SWE/10) #%>%
#filter(month(date) %in% c(2,3,4,5)) %>%
#group_by(date = year(date)) %>%
#summarise(SWE = mean(SWE)) %>%
#mutate(date = parse_date(as.character(date), format = '%Y'))
month_means <- snotel %>%
group_by(station_name, month(date)) %>%
summarise(mean = mean(SWE, na.rm = T)) %>%
filter(`month(date)` == 5)
ggplot(snotel, aes(x = date, y = SWE)) +
geom_hline(data = month_means, aes(yintercept = mean), linetype = 'dashed') +
geom_line(size = 1) +
#geom_line(aes(x = date, y = med_SWE), color = 'blue', size = 1) +
geom_point(data = filter(snotel, month(date) == 05), aes(x = date, y = SWE), shape = 21, fill = 'grey50', size = 2.5) +
geom_text_repel(data = filter(snotel, month(date) == 05 & year(date) == 2018),
aes(x = date, y = SWE, label = 'May 2018'), nudge_y = 20, nudge_x = 10) +
facet_wrap(~station_name) +
theme_bw(base_size = 14) +
labs(x = 'Year', y = 'Snow Water Equivalent (cm)')
climate_2018_raw <- read_csv('data/climate_gy_2018.csv')
#climate_2018 <- climate_2018_raw %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
separate(DATE, into = c('YEAR','DATE')) %>%
mutate(DATE = as.integer(DATE)) %>%
dplyr::select(name = NAME, date = DATE, temp = TAVG, precip = PRCP) %>%
mutate(version = '2018')
climate_normals_raw <- read_csv('data/climate_normals_gy.csv')
climate_2018 <- climate_recent_raw %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
separate(DATE, into = c('YEAR','DATE')) %>%
mutate(DATE = as.integer(DATE)) %>%
dplyr::select(name = NAME, month = DATE, year = YEAR, temp = TAVG, precip = PRCP)
climate_normals <- climate_normals_raw %>%
dplyr::select(name = NAME, month = DATE, temp = `MLY-TAVG-NORMAL`, temp_sd = `MLY-TAVG-STDDEV`,
precip = `MLY-PRCP-NORMAL`, precip_25 = `MLY-PRCP-25PCTL`, precip_75 = `MLY-PRCP-75PCTL`) %>%
# summarise_at(vars(temp,precip), list(mean = ~mean(., na.rm = T),
# sd = ~sd(., na.rm = T))) %>%
mutate(year = '1980-2010') %>%
filter(name != 'Tower Falls')
|
rootdir<-"/data/h_gelbard_lab/projects/20230606_9686_AG_RNAseq_iSGS_estrogen/deseq2_proteincoding_genetable/result"
inputfile<-"RNAseq_human.define"
pvalue<-0.05
useRawPvalue<-0
foldChange<-2
minMedianInGroup<-5
detectedInBothGroup<-0
showLabelInPCA<-1
showDEGeneCluster<-0
addCountOne<-0
usePearsonInHCA<-1
top25only<-0
performWilcox<-0
textSize<-10
transformTable<-0
exportSignificantGeneName<-1
thread<-8
outputPdf<-FALSE;outputPng<-TRUE;outputTIFF<-FALSE;showVolcanoLegend<-TRUE;usePearsonInHCA<-TRUE;showLabelInPCA<-TRUE;top25cvInHCA<-FALSE;
cooksCutoff<-0.99
#predefined_condition_end
options(bitmapType='cairo')
suffix<-"";
if(top25only){
suffix=paste0(suffix,"_top25")
}
if(detectedInBothGroup){
suffix=paste0(suffix, "_detectedInBothGroup")
}
if(minMedianInGroup > 0){
suffix=paste0(suffix, "_min", minMedianInGroup)
}
if(useRawPvalue){
alpha<-0.1
suffix=paste0(suffix, "_pvalue", pvalue)
}else{
alpha<-pvalue
suffix=paste0(suffix, "_fdr", pvalue)
}
zeroCount=0
if(addCountOne){
zeroCount=1
minMedianInGroup=minMedianInGroup+1
}
if(!exists("idIndex")){
idIndex<-1
}
if(!exists("outputPdf")){
outputPdf<-FALSE
}
if(!exists("outputPng") | !outputPdf ){
outputPng<-TRUE
}
if(!exists("outputTIFF")){
outputTIFF<-FALSE
}
if(!exists("filterBaseMean")){
filterBaseMean<-0
}
if(!exists("filterBaseMeanValue")){
filterBaseMeanValue<-30
}
outputFormat<-c()
if(outputPdf){
outputFormat<-c("PDF")
}
if(outputPng){
outputFormat<-c(outputFormat, "PNG")
}
if(outputTIFF){
outputFormat<-c(outputFormat, "TIFF")
}
if(!exists("countSep")){
countSep="\t"
}
if(!exists("usePearsonInHCA")){
usePearsonInHCA=0
}
if(!exists("exportSignificantGeneName")){
exportSignificantGeneName<-1
}
if(exists("libraryFile")){
if (libraryKey != 'None'){
if (grepl(".csv$", libraryFile)){
librarySize<-read.csv(libraryFile, row.names=1,check.names=FALSE)
librarySize<-unlist(librarySize[libraryKey,,drop=T])
cat("Using ", libraryKey, " in " , libraryFile , " as library size. \n")
}else{
librarySize<-read.table(libraryFile, row.names=1,check.names=FALSE,header=T,stringsAsFactor=F)
librarySize<-unlist(librarySize[,libraryKey,drop=T])
cat("Using ", libraryKey, " in " , libraryFile , " as library size. \n")
}
}
}
if(!exists("thread")){
thread<-1
}
if(!exists("showVolcanoLegend")){
showVolcanoLegend<-1
}
if(!exists("cooksCutoff")){
cooksCutoff<-FALSE
}
library("DESeq2")
library("heatmap3")
library("lattice")
#library("reshape")
library("ggplot2")
library("grid")
library("scales")
library("reshape2")
library("VennDiagram")
library("RColorBrewer")
#library("preprocessCore")
library("BiocParallel")
library("ggrepel")
library("stringr")
library("data.table")
setwd(rootdir)
comparisons_data<-read.table(inputfile, header=T, check.names=F , sep="\t", stringsAsFactors = F)
##Solving node stack overflow problem start###
#when there are too many genes, drawing dendrogram may failed due to node stack overflow,
#It could be solved by forcing stats:::plotNode to be run as interpreted code rather then byte-compiled code via a nasty hack.
#http://stackoverflow.com/questions/16559250/error-in-heatmap-2-gplots/25877485#25877485
#align two count table
align<-function(data1,data2,by=0,suffixes=c(deparse(substitute(data1)),deparse(substitute(data2))),sort=T) {
if (is.null(data1)) {
return(data2)
} else if (is.null(data2)) {
return(data1)
}
data<-merge(data1,data2,by=by,all=T,suffixes=suffixes,sort=sort)
row.names(data)<-data[,1]
data<-data[,-1]
return (data)
}
theme_bw3 <- function (axis.x.rotate=F) {
is_ggplot2_newver = packageVersion("ggplot2") >= "3.4.0"
if(is_ggplot2_newver){
eline = element_line(colour = "black", linewidth = 0.5)
}else{
eline = element_line(colour = "black", size = 0.5)
}
result = theme_bw() +
theme(
strip.background = element_rect(fill = NA, colour = 'black'),
panel.border = element_rect(fill = NA, color = "black"),
plot.title = element_text(hjust = 0.5),
axis.line = eline
)
if (axis.x.rotate){
result = result + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
}
return(result)
}
# Convert a byte-compiled function to an interpreted-code function
unByteCode <- function(fun)
{
FUN <- eval(parse(text=deparse(fun)))
environment(FUN) <- environment(fun)
FUN
}
# Replace function definition inside of a locked environment **HACK**
assignEdgewise <- function(name, env, value)
{
unlockBinding(name, env=env)
assign( name, envir=env, value=value)
lockBinding(name, env=env)
invisible(value)
}
# Replace byte-compiled function in a locked environment with an interpreted-code
# function
unByteCodeAssign <- function(fun)
{
name <- gsub('^.*::+','', deparse(substitute(fun)))
FUN <- unByteCode(fun)
retval <- assignEdgewise(name=name,
env=environment(FUN),
value=FUN
)
invisible(retval)
}
# Use the above functions to convert stats:::plotNode to interpreted-code:
unByteCodeAssign(stats:::plotNode)
# Now raise the interpreted code recursion limit (you may need to adjust this,
# decreasing if it uses to much memory, increasing if you get a recursion depth error ).
options(expressions=5e4)
##Solving node stack overflow problem end###
hmcols <- colorRampPalette(c("green", "black", "red"))(256)
openPlot<-function(filePrefix, format, pdfWidth, pdfHeight, otherWidth, otherHeight, figureName){
fileName<-paste0(filePrefix, ".", tolower(format))
if(format == "PDF"){
pdf(fileName, width=pdfWidth, height=pdfHeight, useDingbats=FALSE)
}else if(format == "TIFF"){
tiff(filename=fileName, width=otherWidth, height=otherHeight, res=300)
}else {
png(filename=fileName, width=otherWidth, height=otherHeight, res=300)
}
cat("saving", figureName, "to ", fileName, "\n")
}
drawPlot<-function(filePrefix, outputFormat, pdfWidth, pdfHeight, otherWidth, otherHeight, p, figureName){
for(format in outputFormat){
openPlot(filePrefix, format, pdfWidth, pdfHeight, otherWidth, otherHeight, figureName)
print(p)
dev.off()
}
}
drawHCA<-function(prefix, rldselect, ispaired, designData, conditionColors, gnames, outputFormat){
genecount<-nrow(rldselect)
showRowDendro = genecount <= 50
if(genecount > 2){
cexCol = max(1.0, 0.2 + 1/log10(ncol(rldselect)))
if(ispaired){
htColors<-rainbow(length(unique(designData$Paired)))
gsColors<-as.matrix(data.frame(Group=conditionColors, Sample=htColors[designData$Paired]))
}else{
gsColors = conditionColors;
}
if (genecount<=30) {
labRow=row.names(rldselect)
margins=c(12,8)
} else {
labRow=NA
margins=c(12,5)
}
filePrefix<-paste0(prefix, "_DESeq2-vsd-heatmap")
for(format in outputFormat){
openPlot(filePrefix, format, 10, 10, 3000, 3000, "HCA")
if(usePearsonInHCA){
heatmap3(rldselect,
col = hmcols,
ColSideColors = gsColors,
margins=margins,
scale="r",
labRow=labRow,
showRowDendro=showRowDendro,
main=paste0("Hierarchical Cluster Using ", genecount, " Genes"),
cexCol=cexCol,
useRaster=FALSE,
legendfun=function() showLegend(legend=paste0("Group ", gnames), col=c("red","blue"),cex=1.0,x="center"))
}else{
heatmap3(rldselect,
col = hmcols,
ColSideColors = gsColors,
margins=margins,
scale="r",
distfun=dist,
labRow=labRow,
showRowDendro=showRowDendro,
main=paste0("Hierarchical Cluster Using ", genecount, " Genes"),
cexCol=cexCol,
useRaster=FALSE,
legendfun=function() showLegend(legend=paste0("Group ", gnames), col=c("red","blue"),cex=1.0,x="center"))
}
dev.off()
}
}
}
drawPCA<-function(prefix, rldmatrix, showLabelInPCA, designData, condition, outputFormat,scalePCs=TRUE){
genecount<-nrow(rldmatrix)
if(genecount > 2){
pca<-prcomp(t(rldmatrix))
supca<-summary(pca)$importance
pcadata<-data.frame(pca$x)
if (scalePCs) {
pcadata=as.data.frame(scale(pcadata))
}
pcalabs=paste0(colnames(pcadata), "(", round(supca[2,] * 100), "%)")
pcadata$sample<-row.names(pcadata)
pcadata$Group<-condition
if(showLabelInPCA){
g <- ggplot(pcadata, aes(x=PC1, y=PC2, label=sample)) +
geom_text_repel(size=4)
}else{
g <- ggplot(pcadata, aes(x=PC1, y=PC2)) +
labs(color = "Group")
}
g <- g + geom_point(aes(col=Group), size=4) +
scale_x_continuous(limits=c(min(pcadata$PC1) * 1.2,max(pcadata$PC1) * 1.2)) +
scale_y_continuous(limits=c(min(pcadata$PC2) * 1.2,max(pcadata$PC2) * 1.2)) +
geom_hline(aes(yintercept=0), size=.2) +
geom_vline(aes(xintercept=0), size=.2) +
xlab(pcalabs[1]) + ylab(pcalabs[2]) +
scale_color_manual(values=c("red", "blue")) +
theme_bw3() + theme(legend.position="top")
filePrefix<-paste0(prefix, "_DESeq2-vsd-pca")
drawPlot(filePrefix, outputFormat, 6, 5, 3000, 3000, g, "PCA")
}
}
myEstimateSizeFactors<-function(dds){
if(exists("librarySize")){
cat("Estimate size factor based on library size\n")
curLibrarySize<-librarySize[colnames(dds)]
#based on DESeq2 introduction
curSizeFactor<- curLibrarySize / exp(mean(log(curLibrarySize)))
sizeFactors(dds)<-curSizeFactor
}else{
cat("Estimate size factor based on reads\n")
sfres<-try(dds<-estimateSizeFactors(dds))
if (class(sfres) == "try-error") {
library(edgeR)
countNum<-counts(dds)
y<-calcNormFactors(countNum, methold="TMM")
cs<-colSums(countNum)
cs<-cs / median(cs)
sf<-y * cs
sizeFactors(dds)<-sf
}
}
return(dds)
}
#for volcano plot
reverselog_trans <- function(base = exp(1)) {
trans <- function(x) -log(x, base)
inv <- function(x) base^(-x)
trans_new(paste0("reverselog-", format(base)), trans, inv,
log_breaks(base = base),
domain = c(1e-100, Inf))
}
###########################
#end function
###########################
#
# ###################################################################
# #change comparisons_data, need to be removed before adding to pipeline
# comparisons_data=rbind(comparisons_data,comparisons_data)
# comparisons_data[3:4,1]=c("Control_placenta_vs_Heart","Diabetic_placenta_vs_Heart")
# comparisons_data[3:4,6]=c("Control_placenta_vs_Heart","Diabetic_placenta_vs_Heart")
# comparisons_data[,1]=paste0("Test_",comparisons_data[,1])
# comparisons_data[,3]="/scratch/cqs/zhaos/RolandaLister/20200907_RolandaLister4363_4369_RnaSeq/pipeline/deseq2_proteincoding_genetable/result/test.design"
# comparisons_data[,6]=paste0("Test_",comparisons_data[,6])
# comparisons_data$designFormula="~Tissue + Condition+Tissue:Condition"
# comparisons_data$contrast=c("Condition_Diabetic_vs_Control",paste0("Condition_Diabetic_vs_Control",";","Tissueplacenta.ConditionDiabetic"),
# "Tissue_placenta_vs_Heart",paste0("Tissue_placenta_vs_Heart",";","Tissueplacenta.ConditionDiabetic"))
# #comparisons_data=comparisons_data[1,,drop=FALSE]
# #end change comparisons_data
# ###################################################################
countfiles<-unlist(unique(comparisons_data$CountFile))
allComparisons<-unlist(unique(comparisons_data$ComparisonName))
if(length(allComparisons) != nrow(comparisons_data)){
error(paste("Comparison names cannot be repeated ", comparisons_data$ComparisonName, sep=": "))
}
allTitles<-comparisons_data$ComparisonTitle
names(allTitles)<-comparisons_data$ComparisonName
dataAllOut<-NULL
resultAllOut<-NULL
allSigNameList<-list()
allSigDirectionList<-list()
sigTableAll<-NULL
sigTableAllGene<-NULL
sigTableAllVar<-c("baseMean","log2FoldChange","lfcSE","stat","pvalue","padj","FoldChange")
n_first=-1
if(file.exists("fileList1.txt")){
options_table = read.table("fileList1.txt", sep="\t")
myoptions = split(options_table$V1, options_table$V2)
feature_name_regex = myoptions$feature_name_regex
if("n_first" %in% names(myoptions)){
n_first = as.numeric(myoptions$n_first)
}
}else{
feature_name_regex=NA
}
countfile_index = 1
titles<-NULL
validComparisons<-c()
for(countfile_index in c(1:length(countfiles))){
countfile = countfiles[countfile_index]
comparisons = comparisons_data[comparisons_data$CountFile == countfile,]
if(n_first != -1){
data<-data.frame(fread(countfile, nrows=n_first), row.names=idIndex,check.names=FALSE)
}else{
if (grepl(".csv$",countfile)) {
data<-read.csv(countfile,header=T,row.names=idIndex,as.is=T,check.names=FALSE)
} else {
data<-read.delim(countfile,header=T,row.names=idIndex,as.is=T,check.names=FALSE, sep=countSep)
}
}
if(transformTable){
data<-t(data)
}
if(!is.na(feature_name_regex)){
if(!is.null(feature_name_regex)){
if(feature_name_regex != ""){
rownames(data) = str_match(rownames(data), feature_name_regex)[,2]
}
}
}
data<-data[,colnames(data) != "Feature_length"]
colClass<-sapply(data, class)
countNotNumIndex<-which((colClass!="numeric" & colClass!="integer") | grepl("Gene_Id", colnames(data)))
if (length(countNotNumIndex)==0) {
index<-1;
indecies<-c()
} else {
index<-max(countNotNumIndex)+1
indecies<-c(1:(index-1))
}
countData<-data[,c(index:ncol(data))]
countData[is.na(countData)] <- 0
countData<-round(countData)
if(addCountOne){
countData<-countData+1
}
comparisonNames=comparisons$ComparisonName
pairedspearman<-list()
newVarInData<-setdiff(colnames(data),colnames(dataAllOut))
if (length(newVarInData)>0) {
dataAllOut<-align(dataAllOut,data[,newVarInData,drop=FALSE])
}
resultAllOutVar<-c("baseMean","log2FoldChange","pvalue","padj")
comparison_index = 1
for(comparison_index in c(1:nrow(comparisons))){
comparisonName=comparisons$ComparisonName[comparison_index]
comparisonTitle=comparisons$ComparisonTitle[comparison_index]
if ("pairOnlyCovariant" %in% colnames(comparisons)) {
pairOnlyCovariant=comparisons$pairOnlyCovariant[comparison_index]
if (is.na(pairOnlyCovariant) || (pairOnlyCovariant=="")) {
pairOnlyCovariant=NULL
}
}else{
pairOnlyCovariant=NULL
}
if ("designFormula" %in% colnames(comparisons)) {
designFormula=comparisons$designFormula[comparison_index]
print(paste0("designFormula = ", designFormula, "\n"))
if (is.na(designFormula) || (designFormula=="")) {
designFormula=NULL
} else {
designFormula=as.formula(designFormula)
}
} else {
designFormula=NULL
}
if ("contrast" %in% colnames(comparisons)) {
contrast=comparisons$contrast[comparison_index]
if (is.na(contrast) || (contrast=="")) {
contrast=NULL
} else {
contrast=list(strsplit(contrast,";")[[1]])
}
} else {
contrast=NULL
}
if ("collapse_by" %in% colnames(comparisons)) {
collapse_by=comparisons$collapse_by[comparison_index]
if (is.na(collapse_by) || (collapse_by=="")) {
collapse_by=NULL
}
} else {
collapse_by=NULL
}
titles<-c(titles, comparisonTitle)
cat(comparisonName, " ", comparisonTitle, "\n")
designFile=comparisons$ConditionFile[comparison_index]
#comment here as has many group names
gnames=unlist(comparisons[comparison_index, c("ReferenceGroupName", "SampleGroupName")])
#gnames=as.character(unique(designData$Condition))
designData<-read.table(designFile, sep="\t", header=T)
designData$Condition<-factor(designData$Condition, levels=gnames)
if(!is.null(pairOnlyCovariant)){
if(!any(colnames(designData) == pairOnlyCovariant)){
stop(paste0("Cannot find pairOnlyCovariant ", pairOnlyCovariant, " in ",designFile))
}
tbl = table(designData[,pairOnlyCovariant])
tbl = tbl[tbl == 2]
designData=designData[designData[,pairOnlyCovariant] %in% names(tbl),,drop=F]
}
missedSamples<-as.character(designData$Sample)[!(as.character(designData$Sample) %in% colnames(countData))]
if(length(missedSamples) > 0){
message=paste0("There are missed sample defined in design file but not in real data: ", missedSamples)
warning(message)
writeLines(message,paste0(comparisonName,".error"))
next
}
comparisonData<-countData[,colnames(countData) %in% as.character(designData$Sample),drop=F]
if(ncol(comparisonData) != nrow(designData)){
message=paste0("Data not matched, there are ", nrow(designData), " samples in design file ", designFile, " but ", ncol(comparisonData), " samples in data ")
warning(message)
writeLines(message,paste0(comparisonName,".error"))
next
}
comparisonData<-comparisonData[,as.character(designData$Sample)]
if(!is.null(collapse_by)){
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = ~1)
dds=collapseReplicates(dds, designData[,collapse_by], designData$Sample)
designData<-designData[!duplicated(designData[,collapse_by]),]
designData$Sample<-designData[,collapse_by]
designData<-designData[,colnames(designData) != collapse_by]
comparisonData<-counts(dds)[,designData$Sample]
rm(dds)
}
if(ncol(designData) >= 3){
cat("Data with covariances!\n")
}else{
cat("Data without covariances!\n")
}
if (any(colnames(designData)=="Paired")) {
ispaired<-TRUE
cat("Paired Data!\n")
}else{
ispaired<-FALSE
cat("Not Paired Data!\n")
}
temp<-apply(designData,2,function(x) length(unique(x)))
if (any(temp==1)) {
cat(paste0("Factors with only 1 level in design matrix: ",colnames(designData)[which(temp==1)],"\n"))
cat("They will be removed")
cat("\n")
designData<-designData[,which(temp!=1)]
}
temp<-apply(designData[,-1,drop=F],2,rank)
if (length(unique(rowSums(temp)))==1 | identical(temp[,1],temp[,-1])) {
cat(paste0("The model matrix is not full rank, so the model cannot be fit as specified"))
cat("\n")
cat("Only Condition variable will be kept.")
cat("\n")
designData<-designData[,which(colnames(designData)%in% c("Sample","Condition"))]
}
prefix<-paste0(comparisonName, suffix)
if(top25only){
ranks=apply(comparisonData, 2, function(x){
y=x[x > 0]
q=quantile(y)
return(x>=q[4])
})
select=apply(ranks, 1, function(x){
any(x)
})
comparisonData=comparisonData[select,]
}
if(detectedInBothGroup){
conds<-unique(designData$Condition)
data1<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[1]],drop=FALSE]
data2<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[2]],drop=FALSE]
med1<-apply(data1, 1, median) > zeroCount
med2<-apply(data2, 1, median) > zeroCount
med<-med1 & med2
comparisonData<-comparisonData[med,]
}
if(performWilcox){
#quantile and wilcox
quantileData=normalize.quantiles(data.matrix(comparisonData))
colnames(quantileData)=colnames(comparisonData)
rownames(quantileData)=rownames(comparisonData)
write.csv(quantileData, file=paste0(prefix, "_quantile.csv"), row.names = T)
data1<-quantileData[, colnames(quantileData) %in% designData$Sample[designData$Condition==conds[1]],drop=FALSE]
data2<-quantileData[, colnames(quantileData) %in% designData$Sample[designData$Condition==conds[2]],drop=FALSE]
diffData=data.frame(quantileData)
diffData$pvalues=unlist(lapply(c(1:nrow(data1)), function(index){
d1=data1[index,]
d2=data2[index,]
test=wilcox.test(d1,d2)
test$p.value
}))
diffData$log2MedianFoldChange=unlist(lapply(c(1:nrow(data1)), function(index){
d1=data1[index,]
d2=data2[index,]
log2(median(d2) / median(d1))
}))
diffData$log2MeanFoldChange=unlist(lapply(c(1:nrow(data1)), function(index){
d1=data1[index,]
d2=data2[index,]
log2(mean(d2) / mean(d1))
}))
diffData=diffData[order(diffData$pvalues),]
write.csv(diffData, file=paste0(prefix, "_quantile_wilcox.csv"), row.names = T)
filterData=diffData[diffData$pvalues<=pvalue & abs(diffData$log2MedianFoldChange) > log2(foldChange),]
write.csv(filterData, file=paste0(prefix, "_quantile_wilcox_sig.csv"), row.names = T)
}
if(minMedianInGroup > 0){
conds<-unique(designData$Condition)
data1<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[1]],drop=FALSE]
data2<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[2]],drop=FALSE]
med1<-apply(data1, 1, median) >= minMedianInGroup
med2<-apply(data2, 1, median) >= minMedianInGroup
med<-med1 | med2
geneNumBeforeFilter=nrow(comparisonData)
comparisonData<-comparisonData[med,]
cat(nrow(comparisonData), " genes with minimum median count in group larger or equals than ", minMedianInGroup, ". ",geneNumBeforeFilter-nrow(comparisonData)," genes removed\n")
}
if (nrow(comparisonData)<=1) {
message=paste0("Error: Only ", nrow(comparisonData), " Genes can be used in DESeq2 analysis in comparison ",comparisonName,", ignored. \n")
warning(message)
writeLines(message,paste0(comparisonName,".error"))
next;
}
validComparisons<-c(validComparisons, comparisonName)
if(ispaired){
pairedSamples = unique(designData$Paired)
spcorr<-unlist(lapply(c(1:length(pairedSamples)), function(x){
samples<-designData$Sample[designData$Paired==pairedSamples[x]]
cor(comparisonData[,samples[1]],comparisonData[,samples[2]],method="spearman")
}))
sptable<-data.frame(Name=pairedSamples, Spcorr=spcorr)
write.csv(sptable, file=paste0(prefix, "_Spearman.csv"), row.names=FALSE)
dir.create("details", showWarnings = FALSE)
lapply(c(1:length(pairedSamples)), function(x){
samples<-designData$Sample[designData$Paired==pairedSamples[x]]
log2c1<-log2(comparisonData[,samples[1]]+1)
log2c2<-log2(comparisonData[,samples[2]]+1)
png(paste0("details/", prefix, "_Spearman_", pairedSamples[x], ".png"), width=2000, height=2000, res=300)
plot(log2c1, log2c2, xlab=paste0(samples[1], " [log2(Count + 1)]"), ylab=paste0(samples[2], " [log2(Count + 1)]"))
text(3,15,paste0("SpearmanCorr=", sprintf("%0.3f", spcorr[x])))
dev.off()
})
pairedspearman[[comparisonName]]<-spcorr
}
notEmptyData<-apply(comparisonData, 1, max) > 0
comparisonData<-comparisonData[notEmptyData,]
if(ispaired){
colnames(comparisonData)<-unlist(lapply(c(1:ncol(comparisonData)), function(i){paste0(designData$Paired[i], "_", colnames(comparisonData)[i])}))
}
rownames(designData)<-colnames(comparisonData)
conditionColors<-as.matrix(data.frame(Group=c("red", "blue")[designData$Condition]))
write.csv(comparisonData, file=paste0(prefix, ".csv"))
#some basic graph
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = ~1)
colnames(dds)<-colnames(comparisonData)
dds<-myEstimateSizeFactors(dds)
if(filterBaseMean){
cat(paste0("filter by basemean: ", filterBaseMeanValue, "\n"))
baseMeans = rowMeans(counts(dds, normalized=TRUE))
write.csv(baseMeans, file=paste0(prefix, ".basemean.csv"))
dds<-dds[baseMeans > filterBaseMeanValue,]
comparisonData=comparisonData[baseMeans > filterBaseMeanValue,]
}
rld_normed<-log2(counts(dds,normalized=TRUE) + 1)
write.csv(format(rld_normed, digits=3), paste0(prefix, "_DESeq2-log2-normalized-counts.csv"))
#draw density graph
rldmatrix<-as.matrix(log2(counts(dds,normalized=FALSE) + 1))
rsdata<-melt(rldmatrix)
colnames(rsdata)<-c("Gene", "Sample", "log2Count")
png(filename=paste0(prefix, "_DESeq2-log2-density.png"), width=4000, height=3000, res=300)
g<-ggplot(rsdata) + geom_density(aes(x=log2Count, colour=Sample)) + xlab("DESeq2 log2 transformed count") + guides(color = FALSE)
print(g)
dev.off()
width=max(4000, ncol(rldmatrix) * 40 + 1000)
height=max(3000, ncol(rldmatrix) * 40)
png(filename=paste0(prefix, "_DESeq2-log2-density-individual.png"), width=width, height=height, res=300)
g<-ggplot(rsdata) + geom_density(aes(x=log2Count, colour=Sample)) + facet_wrap(~Sample, scales = "free") + xlab("DESeq2 log2 transformed count") + guides(color = FALSE)
print(g)
dev.off()
fitType<-"parametric"
if(nrow(comparisonData) < 5){
fitType<-"mean"
}
while(1){
#varianceStabilizingTransformation
vsdres<-try(vsd <- varianceStabilizingTransformation(dds, blind=TRUE,fitType=fitType))
if(class(vsdres) == "try-error"){
if(grepl("every gene contains at least one zero", vsdres[1])){
removed<-removed+1
keptNumber<-length(zeronumbers) - percent10 * removed
keptSample<-zeronumbers[1:keptNumber]
excludedSample<-zeronumbers[(keptNumber+1):length(zeronumbers)]
comparisonData<-comparisonData[, colnames(comparisonData) %in% keptSample]
designData<-designData[rownames(designData) %in% keptSample,]
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = ~1)
colnames(dds)<-colnames(comparisonData)
} else if (grepl("newsplit: out of vertex space", vsdres[1]) | fitType != "mean") {
message=paste0("Warning: varianceStabilizingTransformation function can't run. fitType was set to mean to try again")
warning(message)
fitType<-"mean"
writeLines(message,paste0(comparisonName,".error"))
} else {
message=paste0(paste0("Error: varianceStabilizingTransformation function can't run. ", vsdres))
writeLines(message,paste0(comparisonName,".error"))
stop(message)
}
}else if(all(is.na(assay(vsd)))){
fitType<-"mean"
} else{
conditionColors<-as.matrix(data.frame(Group=c("red", "blue")[designData$Condition]))
break
}
}
if(nrow(comparisonData) > 1){
assayvsd<-assay(vsd)
write.csv(format(assayvsd, digits=3), file=paste0(prefix, "_DESeq2-vsd.csv"))
rldmatrix=as.matrix(assayvsd)
#draw pca graph
drawPCA(paste0(prefix,"_geneAll"), rldmatrix, showLabelInPCA, designData, designData$Condition, outputFormat)
if(exists("top25cvInHCA") && top25cvInHCA){
rv<-rowVars(rldmatrix)
countHT<-rldmatrix[rv>=quantile(rv)[4],]
drawHCA(paste0(prefix,"_geneTop25variance"), countHT, ispaired, designData, conditionColors, gnames, outputFormat)
}else{
#draw heatmap
drawHCA(paste0(prefix,"_geneAll"), rldmatrix, ispaired, designData, conditionColors, gnames, outputFormat)
}
}
#different expression analysis
if (is.null(designFormula)) {
designFormula=as.formula(paste0("~",paste0(c(colnames(designData)[-c(1:2)],"Condition"),collapse="+")))
}
cat(paste0("", designFormula), "\n")
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = designFormula)
dds<-myEstimateSizeFactors(dds)
bpparam<-MulticoreParam(thread)
# parallel<-ifelse(thread <= 1, FALSE, TRUE)
parallel=FALSE
ddsres<-try(dds <- DESeq(dds,fitType=fitType, parallel=parallel, BPPARAM=bpparam))
if(class(ddsres) == "try-error"){
if( grepl("One can instead use the gene-wise estimates as final estimates", ddsres[1])){
dds <- estimateDispersionsGeneEst(dds)
dispersions(dds) <- mcols(dds)$dispGeneEst
dds<-nbinomWaldTest(dds)
}else if(grepl("newsplit: out of vertex space", ddsres[1])){
dds <- DESeq(dds,fitType="mean", parallel=parallel, BPPARAM=bpparam)
}else{
stop(paste0("DESeq2 failed: ", ddsres[1]))
}
}
if (!is.null(contrast)) {
res<-results(dds, cooksCutoff=cooksCutoff, alpha=alpha, parallel=parallel, BPPARAM=bpparam,contrast=contrast)
} else {
res<-results(dds, cooksCutoff=cooksCutoff, alpha=alpha, parallel=parallel, BPPARAM=bpparam)
}
res$FoldChange<-2^res$log2FoldChange
baseMeanPerLvl <- sapply( levels(dds$Condition), function(lvl) rowMeans( counts(dds,normalized=TRUE)[,dds$Condition == lvl,drop=FALSE] ) )
colnames(baseMeanPerLvl)<-paste0("baseMean_", colnames(baseMeanPerLvl))
res<-cbind(res, baseMeanPerLvl)
cat("DESeq2 finished.\n")
if (useRawPvalue==1) {
select<-(!is.na(res$pvalue)) & (res$pvalue<pvalue) & ((res$log2FoldChange >= log2(foldChange)) | (res$log2FoldChange <= -log2(foldChange)))
} else {
select<-(!is.na(res$padj)) & (res$padj<pvalue) & ((res$log2FoldChange >= log2(foldChange)) | (res$log2FoldChange <= -log2(foldChange)))
}
if(length(indecies) > 0){
inddata<-data[rownames(comparisonData),indecies,drop=F]
tbb<-cbind(inddata, as.data.frame(comparisonData), res)
}else{
tbb<-cbind(as.data.frame(comparisonData), res)
}
tbbselect<-tbb[select,,drop=F]
tbbAllOut<-as.data.frame(tbb[,resultAllOutVar,drop=F])
tbbAllOut$Significant<-select
colnames(tbbAllOut)<-paste0(colnames(tbbAllOut)," (",comparisonName,")")
resultAllOut<-cbind(as.data.frame(resultAllOut)[row.names(dataAllOut),],as.matrix(tbbAllOut[row.names(dataAllOut),]))
row.names(resultAllOut)<-row.names(dataAllOut)
tbb<-tbb[order(tbb$pvalue),,drop=F]
write.csv(as.data.frame(tbb),paste0(prefix, "_DESeq2.csv"))
tbbselect<-tbbselect[order(tbbselect$pvalue),,drop=F]
sigFile=paste0(prefix, "_DESeq2_sig.csv")
sigTable<-as.data.frame(tbbselect)
write.csv(sigTable,sigFile)
allSigNameList[[comparisonName]]<-row.names(sigTable)
allSigDirectionList[[comparisonName]]<-sign(sigTable$log2FoldChange)
if(nrow(sigTable) > 0){
sigTable$comparisonName<-comparisonName
if (("Feature_gene_name" %in% colnames(sigTable)) & (!("Feature_gene_name" %in% sigTableAllVar))){
sigTableAllVar<-c("Feature_gene_name", sigTableAllVar)
}
sigTableAll<-rbind(sigTableAll,sigTable[,c("comparisonName",sigTableAllVar),drop=FALSE],make.row.names=FALSE)
sigTableAllGene<-c(sigTableAllGene,row.names(sigTable))
}
geneNameField = NULL
lowColNames = tolower(colnames(tbb))
for(name in c("Feature_gene_name", "Gene.Symbol", "Gene_Symbol", "Gene Symbol")){
lowName = tolower(name)
if(lowName %in% lowColNames){
geneNameField=colnames(tbb)[match(lowName, lowColNames)]
break
}
}
if(!is.null(geneNameField)){
write.table(tbb[,c(geneNameField, "stat"),drop=F],paste0(prefix, "_DESeq2_GSEA.rnk"),row.names=F,col.names=F,sep="\t", quote=F)
if(exportSignificantGeneName){
write.table(tbbselect[,c(geneNameField),drop=F], paste0(prefix, "_DESeq2_sig_genename.txt"),row.names=F,col.names=F,sep="\t", quote=F)
}
}else{
write.table(tbb[,c("stat"),drop=F],paste0(prefix, "_DESeq2_GSEA.rnk"),row.names=T,col.names=F,sep="\t", quote=F)
if(exportSignificantGeneName){
write.table(data.frame(name=rownames(tbbselect)), paste0(prefix, "_DESeq2_sig_genename.txt"),row.names=F,col.names=F,sep="\t", quote=F)
}
}
if(showDEGeneCluster){
siggenes<-rownames(rldmatrix) %in% rownames(tbbselect)
nonDEmatrix<-rldmatrix[!siggenes,,drop=F]
DEmatrix<-rldmatrix[siggenes,,drop=F]
drawPCA(paste0(prefix,"_geneDE"),DEmatrix , showLabelInPCA, designData, conditionColors, outputFormat)
drawHCA(paste0(prefix,"_geneDE"),DEmatrix , ispaired, designData, conditionColors, gnames, outputFormat)
drawPCA(paste0(prefix,"_geneNotDE"), nonDEmatrix, showLabelInPCA, designData, conditionColors, outputFormat)
drawHCA(paste0(prefix,"_geneNotDE"), nonDEmatrix, ispaired, designData, conditionColors, gnames, outputFormat)
}
#Top 25 Significant genes barplot
sigDiffNumber<-nrow(tbbselect)
if (sigDiffNumber>0) {
if (sigDiffNumber>25) {
print(paste0("More than 25 genes were significant. Only the top 25 genes will be used in barplot"))
diffResultSig<-tbbselect[order(tbbselect$pvalue)[1:25],]
} else {
diffResultSig<-tbbselect
}
if(!is.null(geneNameField)){
diffResultSig$Name<-as.character(diffResultSig[,geneNameField])
}else{
diffResultSig$Name<-sapply(strsplit(row.names(diffResultSig),";"),function(x) x[1])
}
if (any(duplicated(diffResultSig$Name))) {
whichIndex<-which(duplicated(diffResultSig$Name))
diffResultSig$Name[whichIndex]<-paste0(row.names(diffResultSig)[whichIndex], ":", diffResultSig$Name[whichIndex])
}
diffResultSig$Name <- factor(diffResultSig$Name, levels=diffResultSig$Name[order(diffResultSig$log2FoldChange)])
diffResultSig<-as.data.frame(diffResultSig)
p<-ggplot(diffResultSig,aes(x=Name,y=log2FoldChange,order=log2FoldChange))+geom_bar(stat="identity")+
coord_flip()+
# geom_abline(slope=0,intercept=1,colour="red",linetype = 2)+
scale_y_continuous(name=bquote(log[2]~Fold~Change))+
theme_bw3() +
theme(axis.text = element_text(colour = "black"))
filePrefix<-paste0(prefix,"_DESeq2_sig_barplot")
drawPlot(filePrefix, outputFormat, 7, 7, 3000, 3000, p, "PCA")
} else {
print(paste0("No gene with adjusted p value less than ",pvalue," and fold change larger than ",foldChange))
}
#volcano plot
changeColours<-c(grey="grey",blue="blue",red="red")
diffResult<-as.data.frame(tbb)
diffResult$log10BaseMean<-log10(diffResult$baseMean)
diffResult$colour<-"grey"
if (useRawPvalue==1) {
diffResult<-subset(diffResult, !is.na(pvalue))
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
} else {
diffResult<-subset(diffResult, !is.na(padj))
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
}
write.csv(diffResult, file=paste0(prefix, "_DESeq2_volcanoPlot.csv"))
yname=bquote(-log10(p~value))
xname=bquote(log[2]~Fold~Change)
p<-ggplot(diffResult,aes(x=log2FoldChange,y=pvalue))+
scale_y_continuous(trans=reverselog_trans(10),name=yname) +
geom_point(aes(size=log10BaseMean,colour=colour))+
scale_color_manual(values=changeColours,guide = FALSE)+
scale_x_continuous(name=xname)+
geom_hline(yintercept = 1,colour="grey",linetype = "dotted")+
geom_vline(xintercept = 0,colour="grey",linetype = "dotted")+
guides(size=guide_legend(title=bquote(log[10]~Base~Mean)))+
theme_bw()+
scale_size(range = c(3, 7))+
theme(axis.text = element_text(colour = "black",size=30),
axis.title = element_text(size=30),
legend.text= element_text(size=30),
legend.title= element_text(size=30))
if(!showVolcanoLegend){
p<-p+ theme(legend.position = "none")
pdfWidth=10
otherWidth=3000
}else{
pdfWidth=15
otherWidth=4500
}
filePrefix<-paste0(prefix,"_DESeq2_volcanoPlot")
drawPlot(filePrefix, outputFormat, pdfWidth, 10, otherWidth, 3000, p, "Volcano")
if(require("EnhancedVolcano")){
if(!("Feature_gene_name" %in% colnames(diffResult))){
diffResult$Feature_gene_name=rownames(diffResult)
}
if(packageVersion("EnhancedVolcano") == '1.8.0'){
if(useRawPvalue == 1){
yname=bquote(-log10(p~value))
yvar="pvalue"
}else{
yname=bquote(-log10(adjusted~p~value))
yvar="padj"
}
p<-EnhancedVolcano(diffResult,
lab = diffResult$Feature_gene_name,
x = 'log2FoldChange',
y = yvar,
title = comparisonTitle,
pCutoff = pvalue,
FCcutoff = log2(foldChange),
pointSize = 3.0,
labSize = 6.0,
colAlpha = 1,
subtitle = NULL) + ylab(yname)
}else{
yname=bquote(-log10(p~value))
p<-EnhancedVolcano(diffResult,
lab = diffResult$Feature_gene_name,
x = 'log2FoldChange',
y = 'pvalue',
title = comparisonTitle,
pCutoff = pvalue,
pCutoffCol = "padj",
FCcutoff = log2(foldChange),
pointSize = 3.0,
labSize = 6.0,
colAlpha = 1,
subtitle = NULL) + ylab(yname)
}
filePrefix<-paste0(prefix,"_DESeq2_volcanoEnhanced")
drawPlot(filePrefix, outputFormat, 10, 10, 3000, 3000, p, "Volcano")
}
}
if(length(pairedspearman) > 0){
filePrefix<-paste0(prefix, "_", ifelse(minMedianInGroup > 0, paste0("spearman_min", minMedianInGroup), "spearman"))
fwidth<-max(2000, 1000 * length(pairedspearman))
for(format in outputFormat){
openPlot(filePrefix, format, 7, 7, fwidth, 2000, "Spearman correlation")
boxplot(pairedspearman)
dev.off()
}
}
}
allprefix=paste0(basename(inputfile), suffix)
#Venn for all significant genes
#Output all significant genes table
if(!is.null(sigTableAll)){
sigTableAll<-cbind(Gene=sigTableAllGene,sigTableAll)
write.csv(sigTableAll,paste0(allprefix, "_DESeq2_allSig.csv"),row.names=FALSE)
#Do venn if length between 2-5
if (length(allSigNameList)>=2 & length(allSigNameList)<=5) {
venn.diagram1<-function (x, filename, height = 3000, width = 3000, resolution = 500,
units = "px", compression = "lzw", na = "stop", main = NULL,
sub = NULL, main.pos = c(0.5, 1.05), main.fontface = "plain",
main.fontfamily = "serif", main.col = "black", main.cex = 1,
main.just = c(0.5, 1), sub.pos = c(0.5, 1.05), sub.fontface = "plain",
sub.fontfamily = "serif", sub.col = "black", sub.cex = 1,
sub.just = c(0.5, 1), category.names = names(x), force.unique = TRUE,
fill=NA,
...)
{
if (is.na(fill[1])) {
if (length(x)==5) {
fill = c("dodgerblue", "goldenrod1", "darkorange1", "seagreen3", "orchid3")
} else if (length(x)==4) {
fill = c("dodgerblue", "goldenrod1", "seagreen3", "orchid3")
} else if (length(x)==3) {
fill = c("dodgerblue", "goldenrod1", "seagreen3")
} else if (length(x)==2) {
fill = c("dodgerblue", "goldenrod1")
}
}
if (force.unique) {
for (i in 1:length(x)) {
x[[i]] <- unique(x[[i]])
}
}
if ("none" == na) {
x <- x
}
else if ("stop" == na) {
for (i in 1:length(x)) {
if (any(is.na(x[[i]]))) {
stop("NAs in dataset", call. = FALSE)
}
}
}
else if ("remove" == na) {
for (i in 1:length(x)) {
x[[i]] <- x[[i]][!is.na(x[[i]])]
}
}
else {
stop("Invalid na option: valid options are \"none\", \"stop\", and \"remove\"")
}
if (0 == length(x) | length(x) > 5) {
stop("Incorrect number of elements.", call. = FALSE)
}
if (1 == length(x)) {
list.names <- category.names
if (is.null(list.names)) {
list.names <- ""
}
grob.list <- VennDiagram::draw.single.venn(area = length(x[[1]]),
category = list.names, ind = FALSE,fill=fill, ...)
}
else if (2 == length(x)) {
grob.list <- VennDiagram::draw.pairwise.venn(area1 = length(x[[1]]),
area2 = length(x[[2]]), cross.area = length(intersect(x[[1]],
x[[2]])), category = category.names, ind = FALSE,
fill=fill,
...)
}
else if (3 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
list.names <- category.names
nab <- intersect(A, B)
nbc <- intersect(B, C)
nac <- intersect(A, C)
nabc <- intersect(nab, C)
grob.list <- VennDiagram::draw.triple.venn(area1 = length(A),
area2 = length(B), area3 = length(C), n12 = length(nab),
n23 = length(nbc), n13 = length(nac), n123 = length(nabc),
category = list.names, ind = FALSE, list.order = 1:3,
fill=fill,
...)
}
else if (4 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
D <- x[[4]]
list.names <- category.names
n12 <- intersect(A, B)
n13 <- intersect(A, C)
n14 <- intersect(A, D)
n23 <- intersect(B, C)
n24 <- intersect(B, D)
n34 <- intersect(C, D)
n123 <- intersect(n12, C)
n124 <- intersect(n12, D)
n134 <- intersect(n13, D)
n234 <- intersect(n23, D)
n1234 <- intersect(n123, D)
grob.list <- VennDiagram::draw.quad.venn(area1 = length(A),
area2 = length(B), area3 = length(C), area4 = length(D),
n12 = length(n12), n13 = length(n13), n14 = length(n14),
n23 = length(n23), n24 = length(n24), n34 = length(n34),
n123 = length(n123), n124 = length(n124), n134 = length(n134),
n234 = length(n234), n1234 = length(n1234), category = list.names,
ind = FALSE, fill=fill,...)
}
else if (5 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
D <- x[[4]]
E <- x[[5]]
list.names <- category.names
n12 <- intersect(A, B)
n13 <- intersect(A, C)
n14 <- intersect(A, D)
n15 <- intersect(A, E)
n23 <- intersect(B, C)
n24 <- intersect(B, D)
n25 <- intersect(B, E)
n34 <- intersect(C, D)
n35 <- intersect(C, E)
n45 <- intersect(D, E)
n123 <- intersect(n12, C)
n124 <- intersect(n12, D)
n125 <- intersect(n12, E)
n134 <- intersect(n13, D)
n135 <- intersect(n13, E)
n145 <- intersect(n14, E)
n234 <- intersect(n23, D)
n235 <- intersect(n23, E)
n245 <- intersect(n24, E)
n345 <- intersect(n34, E)
n1234 <- intersect(n123, D)
n1235 <- intersect(n123, E)
n1245 <- intersect(n124, E)
n1345 <- intersect(n134, E)
n2345 <- intersect(n234, E)
n12345 <- intersect(n1234, E)
grob.list <- VennDiagram::draw.quintuple.venn(area1 = length(A),
area2 = length(B), area3 = length(C), area4 = length(D),
area5 = length(E), n12 = length(n12), n13 = length(n13),
n14 = length(n14), n15 = length(n15), n23 = length(n23),
n24 = length(n24), n25 = length(n25), n34 = length(n34),
n35 = length(n35), n45 = length(n45), n123 = length(n123),
n124 = length(n124), n125 = length(n125), n134 = length(n134),
n135 = length(n135), n145 = length(n145), n234 = length(n234),
n235 = length(n235), n245 = length(n245), n345 = length(n345),
n1234 = length(n1234), n1235 = length(n1235), n1245 = length(n1245),
n1345 = length(n1345), n2345 = length(n2345), n12345 = length(n12345),
category = list.names, ind = FALSE,fill=fill, ...)
}
else {
stop("Invalid size of input object")
}
if (!is.null(sub)) {
grob.list <- add.title(gList = grob.list, x = sub, pos = sub.pos,
fontface = sub.fontface, fontfamily = sub.fontfamily,
col = sub.col, cex = sub.cex)
}
if (!is.null(main)) {
grob.list <- add.title(gList = grob.list, x = main, pos = main.pos,
fontface = main.fontface, fontfamily = main.fontfamily,
col = main.col, cex = main.cex)
}
grid.newpage()
grid.draw(grob.list)
return(1)
# return(grob.list)
}
makeColors<-function(n,colorNames="Set1") {
maxN<-brewer.pal.info[colorNames,"maxcolors"]
if (n<=maxN) {
colors<-brewer.pal(n, colorNames)
if (length(colors)>n) {
colors<-colors[1:n]
}
} else {
colors<-colorRampPalette(brewer.pal(maxN, colorNames))(n)
}
return(colors)
}
colors<-makeColors(length(allSigNameList))
for(format in outputFormat){
filePrefix<-paste0(allprefix,"_significantVenn")
openPlot(filePrefix, format, 7, 7, 2000, 2000, "Venn")
venn.diagram1(allSigNameList,cex=2,cat.cex=2,cat.col=colors,fill=colors)
dev.off()
}
}
#Do heatmap significant genes if length larger or equal than 2
if (length(allSigNameList)>=2) {
temp<-cbind(unlist(allSigNameList),unlist(allSigDirectionList))
colnames(temp)<-c("Gene","Direction")
temp<-cbind(temp,comparisonName=rep(names(allSigNameList),sapply(allSigNameList,length)))
temp<-data.frame(temp)
dataForFigure<-temp
#geting dataForFigure order in figure
temp$Direction<-as.integer(as.character(temp$Direction))
temp<-acast(temp, Gene~comparisonName ,value.var="Direction")
temp<-temp[do.call(order, data.frame(temp)),]
maxNameChr<-max(nchar(row.names(temp)))
if (maxNameChr>70) {
tmpNames<-substr(row.names(temp),0,70)
if(length(tmpNames) == length(unique(tmpNames))){
row.names(temp)<-tmpNames
dataForFigure$Gene<-substr(dataForFigure$Gene,0,70)
warning(paste0("The gene names were too long (",maxNameChr,"). Only first 70 letters were kept."))
}
}
dataForFigure$Gene<-factor(dataForFigure$Gene,levels=row.names(temp))
g<-ggplot(dataForFigure, aes(comparisonName, Gene))+
geom_tile(aes(fill=Direction), color="white") +
scale_fill_manual(values=c("light green", "red")) +
theme(axis.text.x = element_text(angle=90, vjust=0.5, size=11, hjust=0.5, face="bold"),
axis.text.y = element_text(size=textSize, face="bold")) +
coord_equal()
width=min(max(2500, 60 * length(unique(dataForFigure$comparisonName))),30000)
height=min(max(2000, 40 * length(unique(dataForFigure$Gene))),30000)
filePrefix<-paste0(allprefix,"_significantHeatmap")
drawPlot(filePrefix, outputFormat, 7, 7, width, height, g, "Significant Heatmap")
}
}
if (! is.null(resultAllOut)) {
#write a file with all information
resultAllOut<-cbind(dataAllOut,resultAllOut[row.names(dataAllOut),])
write.csv(resultAllOut,paste0(allprefix, "_DESeq2.csv"))
if(length(validComparisons) > 1 ){
#volcano plot for all comparisons
temp<-resultAllOut[,-(1:ncol(dataAllOut))]
diffResult<-NULL
diffResultVar<-unique(sapply(strsplit(colnames(temp)," "),function(x) x[1]))
for (i in 1:(length(validComparisons))) {
temp1<-temp[,(i*length(diffResultVar)-(length(diffResultVar)-1)):(i*length(diffResultVar))]
colnames(temp1)<-diffResultVar
temp1$Comparison<-validComparisons[i]
if (is.null(diffResult)) {
diffResult<-temp1
} else {
diffResult<-rbind(diffResult,temp1)
}
}
changeColours<-c(grey="grey",blue="blue",red="red")
diffResult$log10BaseMean<-log10(diffResult$baseMean)
diffResult$Comparison<-allTitles[diffResult$Comparison]
diffResult$Comparison<-factor(diffResult$Comparison,levels=unique(diffResult$Comparison))
diffResult$colour<-"grey"
if (useRawPvalue==1) {
diffResult<-subset(diffResult, !is.na(pvalue))
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
} else {
diffResult<-subset(diffResult, !is.na(padj))
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
}
if (useRawPvalue==1) {
p<-ggplot(diffResult,aes(x=log2FoldChange,y=pvalue))+
scale_y_continuous(trans=reverselog_trans(10),name=bquote(p~value))
} else {
p<-ggplot(diffResult,aes(x=log2FoldChange,y=padj))+
scale_y_continuous(trans=reverselog_trans(10),name=bquote(Adjusted~p~value))
}
p<-p+geom_point(aes(size=log10BaseMean,colour=colour))+
scale_color_manual(values=changeColours,guide = FALSE)+
scale_x_continuous(name=bquote(log[2]~Fold~Change))+
geom_hline(yintercept = 1,colour="grey",linetype = "dotted")+
geom_vline(xintercept = 0,colour="grey",linetype = "dotted")+
guides(size=guide_legend(title=bquote(log[10]~Base~Mean)))+
theme_bw()+
scale_size(range = c(3, 7))+
facet_grid(. ~ Comparison)+
theme(axis.text = element_text(colour = "black",size=25),
axis.title = element_text(size=25),
legend.text= element_text(size=25),
legend.title= element_text(size=25),
strip.text.x = element_text(size = 25),
strip.background=element_rect(fill="white"))
pwidth<-max(12,4*length(allComparisons)+4)
owidth<-max(4000, 1500*length(allComparisons)+1000)
filePrefix<-paste0(allprefix,"_DESeq2_volcanoPlot")
drawPlot(filePrefix, outputFormat, pwidth, 7, owidth, 2000, p, "Volcano")
#output a summary table with numbers of gisnificant changed genes
sigGeneSummaryTable<-t(table(diffResult[,"Significant"],diffResult[,"Comparison"]))
notSigIndex<-match("0", colnames(sigGeneSummaryTable))
if(is.na(notSigIndex)){
notSignificant=0
}else{
notSignificant=sigGeneSummaryTable[,notSigIndex]
}
sigIndex<-match("1", colnames(sigGeneSummaryTable))
if(is.na(sigIndex)){
significant=0
}else{
significant=sigGeneSummaryTable[,sigIndex]
}
dSigGeneSummaryTable<-data.frame(Comparison=row.names(sigGeneSummaryTable),GeneInComparison=rowSums(sigGeneSummaryTable),NotSignificant=notSignificant,Significant=significant)
write.csv(dSigGeneSummaryTable,paste0(allprefix, "_DESeq2_sigGeneSummary.csv"),row.names=FALSE)
}
}
#export session information
writeLines(capture.output(sessionInfo()), paste0(basename(inputfile),".DESeq2.SessionInfo.txt"))
deseq2version<-paste0("DESeq2,v", packageVersion("DESeq2"))
writeLines(deseq2version, paste0(basename(inputfile),".DESeq2.version"))
#save R Data
save.image(paste0(basename(inputfile),".DESeq2.RData"))
| /lib/Comparison/DESeq2.r | permissive | shengqh/ngsperl | R | false | false | 53,163 | r |
rootdir<-"/data/h_gelbard_lab/projects/20230606_9686_AG_RNAseq_iSGS_estrogen/deseq2_proteincoding_genetable/result"
inputfile<-"RNAseq_human.define"
pvalue<-0.05
useRawPvalue<-0
foldChange<-2
minMedianInGroup<-5
detectedInBothGroup<-0
showLabelInPCA<-1
showDEGeneCluster<-0
addCountOne<-0
usePearsonInHCA<-1
top25only<-0
performWilcox<-0
textSize<-10
transformTable<-0
exportSignificantGeneName<-1
thread<-8
outputPdf<-FALSE;outputPng<-TRUE;outputTIFF<-FALSE;showVolcanoLegend<-TRUE;usePearsonInHCA<-TRUE;showLabelInPCA<-TRUE;top25cvInHCA<-FALSE;
cooksCutoff<-0.99
#predefined_condition_end
options(bitmapType='cairo')
suffix<-"";
if(top25only){
suffix=paste0(suffix,"_top25")
}
if(detectedInBothGroup){
suffix=paste0(suffix, "_detectedInBothGroup")
}
if(minMedianInGroup > 0){
suffix=paste0(suffix, "_min", minMedianInGroup)
}
if(useRawPvalue){
alpha<-0.1
suffix=paste0(suffix, "_pvalue", pvalue)
}else{
alpha<-pvalue
suffix=paste0(suffix, "_fdr", pvalue)
}
zeroCount=0
if(addCountOne){
zeroCount=1
minMedianInGroup=minMedianInGroup+1
}
if(!exists("idIndex")){
idIndex<-1
}
if(!exists("outputPdf")){
outputPdf<-FALSE
}
if(!exists("outputPng") | !outputPdf ){
outputPng<-TRUE
}
if(!exists("outputTIFF")){
outputTIFF<-FALSE
}
if(!exists("filterBaseMean")){
filterBaseMean<-0
}
if(!exists("filterBaseMeanValue")){
filterBaseMeanValue<-30
}
outputFormat<-c()
if(outputPdf){
outputFormat<-c("PDF")
}
if(outputPng){
outputFormat<-c(outputFormat, "PNG")
}
if(outputTIFF){
outputFormat<-c(outputFormat, "TIFF")
}
if(!exists("countSep")){
countSep="\t"
}
if(!exists("usePearsonInHCA")){
usePearsonInHCA=0
}
if(!exists("exportSignificantGeneName")){
exportSignificantGeneName<-1
}
if(exists("libraryFile")){
if (libraryKey != 'None'){
if (grepl(".csv$", libraryFile)){
librarySize<-read.csv(libraryFile, row.names=1,check.names=FALSE)
librarySize<-unlist(librarySize[libraryKey,,drop=T])
cat("Using ", libraryKey, " in " , libraryFile , " as library size. \n")
}else{
librarySize<-read.table(libraryFile, row.names=1,check.names=FALSE,header=T,stringsAsFactor=F)
librarySize<-unlist(librarySize[,libraryKey,drop=T])
cat("Using ", libraryKey, " in " , libraryFile , " as library size. \n")
}
}
}
if(!exists("thread")){
thread<-1
}
if(!exists("showVolcanoLegend")){
showVolcanoLegend<-1
}
if(!exists("cooksCutoff")){
cooksCutoff<-FALSE
}
library("DESeq2")
library("heatmap3")
library("lattice")
#library("reshape")
library("ggplot2")
library("grid")
library("scales")
library("reshape2")
library("VennDiagram")
library("RColorBrewer")
#library("preprocessCore")
library("BiocParallel")
library("ggrepel")
library("stringr")
library("data.table")
setwd(rootdir)
comparisons_data<-read.table(inputfile, header=T, check.names=F , sep="\t", stringsAsFactors = F)
##Solving node stack overflow problem start###
#when there are too many genes, drawing dendrogram may failed due to node stack overflow,
#It could be solved by forcing stats:::plotNode to be run as interpreted code rather then byte-compiled code via a nasty hack.
#http://stackoverflow.com/questions/16559250/error-in-heatmap-2-gplots/25877485#25877485
#align two count table
align<-function(data1,data2,by=0,suffixes=c(deparse(substitute(data1)),deparse(substitute(data2))),sort=T) {
if (is.null(data1)) {
return(data2)
} else if (is.null(data2)) {
return(data1)
}
data<-merge(data1,data2,by=by,all=T,suffixes=suffixes,sort=sort)
row.names(data)<-data[,1]
data<-data[,-1]
return (data)
}
theme_bw3 <- function (axis.x.rotate=F) {
is_ggplot2_newver = packageVersion("ggplot2") >= "3.4.0"
if(is_ggplot2_newver){
eline = element_line(colour = "black", linewidth = 0.5)
}else{
eline = element_line(colour = "black", size = 0.5)
}
result = theme_bw() +
theme(
strip.background = element_rect(fill = NA, colour = 'black'),
panel.border = element_rect(fill = NA, color = "black"),
plot.title = element_text(hjust = 0.5),
axis.line = eline
)
if (axis.x.rotate){
result = result + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
}
return(result)
}
# Convert a byte-compiled function to an interpreted-code function
unByteCode <- function(fun)
{
FUN <- eval(parse(text=deparse(fun)))
environment(FUN) <- environment(fun)
FUN
}
# Replace function definition inside of a locked environment **HACK**
assignEdgewise <- function(name, env, value)
{
unlockBinding(name, env=env)
assign( name, envir=env, value=value)
lockBinding(name, env=env)
invisible(value)
}
# Replace byte-compiled function in a locked environment with an interpreted-code
# function
unByteCodeAssign <- function(fun)
{
name <- gsub('^.*::+','', deparse(substitute(fun)))
FUN <- unByteCode(fun)
retval <- assignEdgewise(name=name,
env=environment(FUN),
value=FUN
)
invisible(retval)
}
# Use the above functions to convert stats:::plotNode to interpreted-code:
unByteCodeAssign(stats:::plotNode)
# Now raise the interpreted code recursion limit (you may need to adjust this,
# decreasing if it uses to much memory, increasing if you get a recursion depth error ).
options(expressions=5e4)
##Solving node stack overflow problem end###
hmcols <- colorRampPalette(c("green", "black", "red"))(256)
openPlot<-function(filePrefix, format, pdfWidth, pdfHeight, otherWidth, otherHeight, figureName){
fileName<-paste0(filePrefix, ".", tolower(format))
if(format == "PDF"){
pdf(fileName, width=pdfWidth, height=pdfHeight, useDingbats=FALSE)
}else if(format == "TIFF"){
tiff(filename=fileName, width=otherWidth, height=otherHeight, res=300)
}else {
png(filename=fileName, width=otherWidth, height=otherHeight, res=300)
}
cat("saving", figureName, "to ", fileName, "\n")
}
drawPlot<-function(filePrefix, outputFormat, pdfWidth, pdfHeight, otherWidth, otherHeight, p, figureName){
for(format in outputFormat){
openPlot(filePrefix, format, pdfWidth, pdfHeight, otherWidth, otherHeight, figureName)
print(p)
dev.off()
}
}
drawHCA<-function(prefix, rldselect, ispaired, designData, conditionColors, gnames, outputFormat){
genecount<-nrow(rldselect)
showRowDendro = genecount <= 50
if(genecount > 2){
cexCol = max(1.0, 0.2 + 1/log10(ncol(rldselect)))
if(ispaired){
htColors<-rainbow(length(unique(designData$Paired)))
gsColors<-as.matrix(data.frame(Group=conditionColors, Sample=htColors[designData$Paired]))
}else{
gsColors = conditionColors;
}
if (genecount<=30) {
labRow=row.names(rldselect)
margins=c(12,8)
} else {
labRow=NA
margins=c(12,5)
}
filePrefix<-paste0(prefix, "_DESeq2-vsd-heatmap")
for(format in outputFormat){
openPlot(filePrefix, format, 10, 10, 3000, 3000, "HCA")
if(usePearsonInHCA){
heatmap3(rldselect,
col = hmcols,
ColSideColors = gsColors,
margins=margins,
scale="r",
labRow=labRow,
showRowDendro=showRowDendro,
main=paste0("Hierarchical Cluster Using ", genecount, " Genes"),
cexCol=cexCol,
useRaster=FALSE,
legendfun=function() showLegend(legend=paste0("Group ", gnames), col=c("red","blue"),cex=1.0,x="center"))
}else{
heatmap3(rldselect,
col = hmcols,
ColSideColors = gsColors,
margins=margins,
scale="r",
distfun=dist,
labRow=labRow,
showRowDendro=showRowDendro,
main=paste0("Hierarchical Cluster Using ", genecount, " Genes"),
cexCol=cexCol,
useRaster=FALSE,
legendfun=function() showLegend(legend=paste0("Group ", gnames), col=c("red","blue"),cex=1.0,x="center"))
}
dev.off()
}
}
}
drawPCA<-function(prefix, rldmatrix, showLabelInPCA, designData, condition, outputFormat,scalePCs=TRUE){
genecount<-nrow(rldmatrix)
if(genecount > 2){
pca<-prcomp(t(rldmatrix))
supca<-summary(pca)$importance
pcadata<-data.frame(pca$x)
if (scalePCs) {
pcadata=as.data.frame(scale(pcadata))
}
pcalabs=paste0(colnames(pcadata), "(", round(supca[2,] * 100), "%)")
pcadata$sample<-row.names(pcadata)
pcadata$Group<-condition
if(showLabelInPCA){
g <- ggplot(pcadata, aes(x=PC1, y=PC2, label=sample)) +
geom_text_repel(size=4)
}else{
g <- ggplot(pcadata, aes(x=PC1, y=PC2)) +
labs(color = "Group")
}
g <- g + geom_point(aes(col=Group), size=4) +
scale_x_continuous(limits=c(min(pcadata$PC1) * 1.2,max(pcadata$PC1) * 1.2)) +
scale_y_continuous(limits=c(min(pcadata$PC2) * 1.2,max(pcadata$PC2) * 1.2)) +
geom_hline(aes(yintercept=0), size=.2) +
geom_vline(aes(xintercept=0), size=.2) +
xlab(pcalabs[1]) + ylab(pcalabs[2]) +
scale_color_manual(values=c("red", "blue")) +
theme_bw3() + theme(legend.position="top")
filePrefix<-paste0(prefix, "_DESeq2-vsd-pca")
drawPlot(filePrefix, outputFormat, 6, 5, 3000, 3000, g, "PCA")
}
}
myEstimateSizeFactors<-function(dds){
if(exists("librarySize")){
cat("Estimate size factor based on library size\n")
curLibrarySize<-librarySize[colnames(dds)]
#based on DESeq2 introduction
curSizeFactor<- curLibrarySize / exp(mean(log(curLibrarySize)))
sizeFactors(dds)<-curSizeFactor
}else{
cat("Estimate size factor based on reads\n")
sfres<-try(dds<-estimateSizeFactors(dds))
if (class(sfres) == "try-error") {
library(edgeR)
countNum<-counts(dds)
y<-calcNormFactors(countNum, methold="TMM")
cs<-colSums(countNum)
cs<-cs / median(cs)
sf<-y * cs
sizeFactors(dds)<-sf
}
}
return(dds)
}
#for volcano plot
reverselog_trans <- function(base = exp(1)) {
trans <- function(x) -log(x, base)
inv <- function(x) base^(-x)
trans_new(paste0("reverselog-", format(base)), trans, inv,
log_breaks(base = base),
domain = c(1e-100, Inf))
}
###########################
#end function
###########################
#
# ###################################################################
# #change comparisons_data, need to be removed before adding to pipeline
# comparisons_data=rbind(comparisons_data,comparisons_data)
# comparisons_data[3:4,1]=c("Control_placenta_vs_Heart","Diabetic_placenta_vs_Heart")
# comparisons_data[3:4,6]=c("Control_placenta_vs_Heart","Diabetic_placenta_vs_Heart")
# comparisons_data[,1]=paste0("Test_",comparisons_data[,1])
# comparisons_data[,3]="/scratch/cqs/zhaos/RolandaLister/20200907_RolandaLister4363_4369_RnaSeq/pipeline/deseq2_proteincoding_genetable/result/test.design"
# comparisons_data[,6]=paste0("Test_",comparisons_data[,6])
# comparisons_data$designFormula="~Tissue + Condition+Tissue:Condition"
# comparisons_data$contrast=c("Condition_Diabetic_vs_Control",paste0("Condition_Diabetic_vs_Control",";","Tissueplacenta.ConditionDiabetic"),
# "Tissue_placenta_vs_Heart",paste0("Tissue_placenta_vs_Heart",";","Tissueplacenta.ConditionDiabetic"))
# #comparisons_data=comparisons_data[1,,drop=FALSE]
# #end change comparisons_data
# ###################################################################
countfiles<-unlist(unique(comparisons_data$CountFile))
allComparisons<-unlist(unique(comparisons_data$ComparisonName))
if(length(allComparisons) != nrow(comparisons_data)){
error(paste("Comparison names cannot be repeated ", comparisons_data$ComparisonName, sep=": "))
}
allTitles<-comparisons_data$ComparisonTitle
names(allTitles)<-comparisons_data$ComparisonName
dataAllOut<-NULL
resultAllOut<-NULL
allSigNameList<-list()
allSigDirectionList<-list()
sigTableAll<-NULL
sigTableAllGene<-NULL
sigTableAllVar<-c("baseMean","log2FoldChange","lfcSE","stat","pvalue","padj","FoldChange")
n_first=-1
if(file.exists("fileList1.txt")){
options_table = read.table("fileList1.txt", sep="\t")
myoptions = split(options_table$V1, options_table$V2)
feature_name_regex = myoptions$feature_name_regex
if("n_first" %in% names(myoptions)){
n_first = as.numeric(myoptions$n_first)
}
}else{
feature_name_regex=NA
}
countfile_index = 1
titles<-NULL
validComparisons<-c()
for(countfile_index in c(1:length(countfiles))){
countfile = countfiles[countfile_index]
comparisons = comparisons_data[comparisons_data$CountFile == countfile,]
if(n_first != -1){
data<-data.frame(fread(countfile, nrows=n_first), row.names=idIndex,check.names=FALSE)
}else{
if (grepl(".csv$",countfile)) {
data<-read.csv(countfile,header=T,row.names=idIndex,as.is=T,check.names=FALSE)
} else {
data<-read.delim(countfile,header=T,row.names=idIndex,as.is=T,check.names=FALSE, sep=countSep)
}
}
if(transformTable){
data<-t(data)
}
if(!is.na(feature_name_regex)){
if(!is.null(feature_name_regex)){
if(feature_name_regex != ""){
rownames(data) = str_match(rownames(data), feature_name_regex)[,2]
}
}
}
data<-data[,colnames(data) != "Feature_length"]
colClass<-sapply(data, class)
countNotNumIndex<-which((colClass!="numeric" & colClass!="integer") | grepl("Gene_Id", colnames(data)))
if (length(countNotNumIndex)==0) {
index<-1;
indecies<-c()
} else {
index<-max(countNotNumIndex)+1
indecies<-c(1:(index-1))
}
countData<-data[,c(index:ncol(data))]
countData[is.na(countData)] <- 0
countData<-round(countData)
if(addCountOne){
countData<-countData+1
}
comparisonNames=comparisons$ComparisonName
pairedspearman<-list()
newVarInData<-setdiff(colnames(data),colnames(dataAllOut))
if (length(newVarInData)>0) {
dataAllOut<-align(dataAllOut,data[,newVarInData,drop=FALSE])
}
resultAllOutVar<-c("baseMean","log2FoldChange","pvalue","padj")
comparison_index = 1
for(comparison_index in c(1:nrow(comparisons))){
comparisonName=comparisons$ComparisonName[comparison_index]
comparisonTitle=comparisons$ComparisonTitle[comparison_index]
if ("pairOnlyCovariant" %in% colnames(comparisons)) {
pairOnlyCovariant=comparisons$pairOnlyCovariant[comparison_index]
if (is.na(pairOnlyCovariant) || (pairOnlyCovariant=="")) {
pairOnlyCovariant=NULL
}
}else{
pairOnlyCovariant=NULL
}
if ("designFormula" %in% colnames(comparisons)) {
designFormula=comparisons$designFormula[comparison_index]
print(paste0("designFormula = ", designFormula, "\n"))
if (is.na(designFormula) || (designFormula=="")) {
designFormula=NULL
} else {
designFormula=as.formula(designFormula)
}
} else {
designFormula=NULL
}
if ("contrast" %in% colnames(comparisons)) {
contrast=comparisons$contrast[comparison_index]
if (is.na(contrast) || (contrast=="")) {
contrast=NULL
} else {
contrast=list(strsplit(contrast,";")[[1]])
}
} else {
contrast=NULL
}
if ("collapse_by" %in% colnames(comparisons)) {
collapse_by=comparisons$collapse_by[comparison_index]
if (is.na(collapse_by) || (collapse_by=="")) {
collapse_by=NULL
}
} else {
collapse_by=NULL
}
titles<-c(titles, comparisonTitle)
cat(comparisonName, " ", comparisonTitle, "\n")
designFile=comparisons$ConditionFile[comparison_index]
#comment here as has many group names
gnames=unlist(comparisons[comparison_index, c("ReferenceGroupName", "SampleGroupName")])
#gnames=as.character(unique(designData$Condition))
designData<-read.table(designFile, sep="\t", header=T)
designData$Condition<-factor(designData$Condition, levels=gnames)
if(!is.null(pairOnlyCovariant)){
if(!any(colnames(designData) == pairOnlyCovariant)){
stop(paste0("Cannot find pairOnlyCovariant ", pairOnlyCovariant, " in ",designFile))
}
tbl = table(designData[,pairOnlyCovariant])
tbl = tbl[tbl == 2]
designData=designData[designData[,pairOnlyCovariant] %in% names(tbl),,drop=F]
}
missedSamples<-as.character(designData$Sample)[!(as.character(designData$Sample) %in% colnames(countData))]
if(length(missedSamples) > 0){
message=paste0("There are missed sample defined in design file but not in real data: ", missedSamples)
warning(message)
writeLines(message,paste0(comparisonName,".error"))
next
}
comparisonData<-countData[,colnames(countData) %in% as.character(designData$Sample),drop=F]
if(ncol(comparisonData) != nrow(designData)){
message=paste0("Data not matched, there are ", nrow(designData), " samples in design file ", designFile, " but ", ncol(comparisonData), " samples in data ")
warning(message)
writeLines(message,paste0(comparisonName,".error"))
next
}
comparisonData<-comparisonData[,as.character(designData$Sample)]
if(!is.null(collapse_by)){
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = ~1)
dds=collapseReplicates(dds, designData[,collapse_by], designData$Sample)
designData<-designData[!duplicated(designData[,collapse_by]),]
designData$Sample<-designData[,collapse_by]
designData<-designData[,colnames(designData) != collapse_by]
comparisonData<-counts(dds)[,designData$Sample]
rm(dds)
}
if(ncol(designData) >= 3){
cat("Data with covariances!\n")
}else{
cat("Data without covariances!\n")
}
if (any(colnames(designData)=="Paired")) {
ispaired<-TRUE
cat("Paired Data!\n")
}else{
ispaired<-FALSE
cat("Not Paired Data!\n")
}
temp<-apply(designData,2,function(x) length(unique(x)))
if (any(temp==1)) {
cat(paste0("Factors with only 1 level in design matrix: ",colnames(designData)[which(temp==1)],"\n"))
cat("They will be removed")
cat("\n")
designData<-designData[,which(temp!=1)]
}
temp<-apply(designData[,-1,drop=F],2,rank)
if (length(unique(rowSums(temp)))==1 | identical(temp[,1],temp[,-1])) {
cat(paste0("The model matrix is not full rank, so the model cannot be fit as specified"))
cat("\n")
cat("Only Condition variable will be kept.")
cat("\n")
designData<-designData[,which(colnames(designData)%in% c("Sample","Condition"))]
}
prefix<-paste0(comparisonName, suffix)
if(top25only){
ranks=apply(comparisonData, 2, function(x){
y=x[x > 0]
q=quantile(y)
return(x>=q[4])
})
select=apply(ranks, 1, function(x){
any(x)
})
comparisonData=comparisonData[select,]
}
if(detectedInBothGroup){
conds<-unique(designData$Condition)
data1<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[1]],drop=FALSE]
data2<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[2]],drop=FALSE]
med1<-apply(data1, 1, median) > zeroCount
med2<-apply(data2, 1, median) > zeroCount
med<-med1 & med2
comparisonData<-comparisonData[med,]
}
if(performWilcox){
#quantile and wilcox
quantileData=normalize.quantiles(data.matrix(comparisonData))
colnames(quantileData)=colnames(comparisonData)
rownames(quantileData)=rownames(comparisonData)
write.csv(quantileData, file=paste0(prefix, "_quantile.csv"), row.names = T)
data1<-quantileData[, colnames(quantileData) %in% designData$Sample[designData$Condition==conds[1]],drop=FALSE]
data2<-quantileData[, colnames(quantileData) %in% designData$Sample[designData$Condition==conds[2]],drop=FALSE]
diffData=data.frame(quantileData)
diffData$pvalues=unlist(lapply(c(1:nrow(data1)), function(index){
d1=data1[index,]
d2=data2[index,]
test=wilcox.test(d1,d2)
test$p.value
}))
diffData$log2MedianFoldChange=unlist(lapply(c(1:nrow(data1)), function(index){
d1=data1[index,]
d2=data2[index,]
log2(median(d2) / median(d1))
}))
diffData$log2MeanFoldChange=unlist(lapply(c(1:nrow(data1)), function(index){
d1=data1[index,]
d2=data2[index,]
log2(mean(d2) / mean(d1))
}))
diffData=diffData[order(diffData$pvalues),]
write.csv(diffData, file=paste0(prefix, "_quantile_wilcox.csv"), row.names = T)
filterData=diffData[diffData$pvalues<=pvalue & abs(diffData$log2MedianFoldChange) > log2(foldChange),]
write.csv(filterData, file=paste0(prefix, "_quantile_wilcox_sig.csv"), row.names = T)
}
if(minMedianInGroup > 0){
conds<-unique(designData$Condition)
data1<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[1]],drop=FALSE]
data2<-comparisonData[, colnames(comparisonData) %in% designData$Sample[designData$Condition==conds[2]],drop=FALSE]
med1<-apply(data1, 1, median) >= minMedianInGroup
med2<-apply(data2, 1, median) >= minMedianInGroup
med<-med1 | med2
geneNumBeforeFilter=nrow(comparisonData)
comparisonData<-comparisonData[med,]
cat(nrow(comparisonData), " genes with minimum median count in group larger or equals than ", minMedianInGroup, ". ",geneNumBeforeFilter-nrow(comparisonData)," genes removed\n")
}
if (nrow(comparisonData)<=1) {
message=paste0("Error: Only ", nrow(comparisonData), " Genes can be used in DESeq2 analysis in comparison ",comparisonName,", ignored. \n")
warning(message)
writeLines(message,paste0(comparisonName,".error"))
next;
}
validComparisons<-c(validComparisons, comparisonName)
if(ispaired){
pairedSamples = unique(designData$Paired)
spcorr<-unlist(lapply(c(1:length(pairedSamples)), function(x){
samples<-designData$Sample[designData$Paired==pairedSamples[x]]
cor(comparisonData[,samples[1]],comparisonData[,samples[2]],method="spearman")
}))
sptable<-data.frame(Name=pairedSamples, Spcorr=spcorr)
write.csv(sptable, file=paste0(prefix, "_Spearman.csv"), row.names=FALSE)
dir.create("details", showWarnings = FALSE)
lapply(c(1:length(pairedSamples)), function(x){
samples<-designData$Sample[designData$Paired==pairedSamples[x]]
log2c1<-log2(comparisonData[,samples[1]]+1)
log2c2<-log2(comparisonData[,samples[2]]+1)
png(paste0("details/", prefix, "_Spearman_", pairedSamples[x], ".png"), width=2000, height=2000, res=300)
plot(log2c1, log2c2, xlab=paste0(samples[1], " [log2(Count + 1)]"), ylab=paste0(samples[2], " [log2(Count + 1)]"))
text(3,15,paste0("SpearmanCorr=", sprintf("%0.3f", spcorr[x])))
dev.off()
})
pairedspearman[[comparisonName]]<-spcorr
}
notEmptyData<-apply(comparisonData, 1, max) > 0
comparisonData<-comparisonData[notEmptyData,]
if(ispaired){
colnames(comparisonData)<-unlist(lapply(c(1:ncol(comparisonData)), function(i){paste0(designData$Paired[i], "_", colnames(comparisonData)[i])}))
}
rownames(designData)<-colnames(comparisonData)
conditionColors<-as.matrix(data.frame(Group=c("red", "blue")[designData$Condition]))
write.csv(comparisonData, file=paste0(prefix, ".csv"))
#some basic graph
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = ~1)
colnames(dds)<-colnames(comparisonData)
dds<-myEstimateSizeFactors(dds)
if(filterBaseMean){
cat(paste0("filter by basemean: ", filterBaseMeanValue, "\n"))
baseMeans = rowMeans(counts(dds, normalized=TRUE))
write.csv(baseMeans, file=paste0(prefix, ".basemean.csv"))
dds<-dds[baseMeans > filterBaseMeanValue,]
comparisonData=comparisonData[baseMeans > filterBaseMeanValue,]
}
rld_normed<-log2(counts(dds,normalized=TRUE) + 1)
write.csv(format(rld_normed, digits=3), paste0(prefix, "_DESeq2-log2-normalized-counts.csv"))
#draw density graph
rldmatrix<-as.matrix(log2(counts(dds,normalized=FALSE) + 1))
rsdata<-melt(rldmatrix)
colnames(rsdata)<-c("Gene", "Sample", "log2Count")
png(filename=paste0(prefix, "_DESeq2-log2-density.png"), width=4000, height=3000, res=300)
g<-ggplot(rsdata) + geom_density(aes(x=log2Count, colour=Sample)) + xlab("DESeq2 log2 transformed count") + guides(color = FALSE)
print(g)
dev.off()
width=max(4000, ncol(rldmatrix) * 40 + 1000)
height=max(3000, ncol(rldmatrix) * 40)
png(filename=paste0(prefix, "_DESeq2-log2-density-individual.png"), width=width, height=height, res=300)
g<-ggplot(rsdata) + geom_density(aes(x=log2Count, colour=Sample)) + facet_wrap(~Sample, scales = "free") + xlab("DESeq2 log2 transformed count") + guides(color = FALSE)
print(g)
dev.off()
fitType<-"parametric"
if(nrow(comparisonData) < 5){
fitType<-"mean"
}
while(1){
#varianceStabilizingTransformation
vsdres<-try(vsd <- varianceStabilizingTransformation(dds, blind=TRUE,fitType=fitType))
if(class(vsdres) == "try-error"){
if(grepl("every gene contains at least one zero", vsdres[1])){
removed<-removed+1
keptNumber<-length(zeronumbers) - percent10 * removed
keptSample<-zeronumbers[1:keptNumber]
excludedSample<-zeronumbers[(keptNumber+1):length(zeronumbers)]
comparisonData<-comparisonData[, colnames(comparisonData) %in% keptSample]
designData<-designData[rownames(designData) %in% keptSample,]
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = ~1)
colnames(dds)<-colnames(comparisonData)
} else if (grepl("newsplit: out of vertex space", vsdres[1]) | fitType != "mean") {
message=paste0("Warning: varianceStabilizingTransformation function can't run. fitType was set to mean to try again")
warning(message)
fitType<-"mean"
writeLines(message,paste0(comparisonName,".error"))
} else {
message=paste0(paste0("Error: varianceStabilizingTransformation function can't run. ", vsdres))
writeLines(message,paste0(comparisonName,".error"))
stop(message)
}
}else if(all(is.na(assay(vsd)))){
fitType<-"mean"
} else{
conditionColors<-as.matrix(data.frame(Group=c("red", "blue")[designData$Condition]))
break
}
}
if(nrow(comparisonData) > 1){
assayvsd<-assay(vsd)
write.csv(format(assayvsd, digits=3), file=paste0(prefix, "_DESeq2-vsd.csv"))
rldmatrix=as.matrix(assayvsd)
#draw pca graph
drawPCA(paste0(prefix,"_geneAll"), rldmatrix, showLabelInPCA, designData, designData$Condition, outputFormat)
if(exists("top25cvInHCA") && top25cvInHCA){
rv<-rowVars(rldmatrix)
countHT<-rldmatrix[rv>=quantile(rv)[4],]
drawHCA(paste0(prefix,"_geneTop25variance"), countHT, ispaired, designData, conditionColors, gnames, outputFormat)
}else{
#draw heatmap
drawHCA(paste0(prefix,"_geneAll"), rldmatrix, ispaired, designData, conditionColors, gnames, outputFormat)
}
}
#different expression analysis
if (is.null(designFormula)) {
designFormula=as.formula(paste0("~",paste0(c(colnames(designData)[-c(1:2)],"Condition"),collapse="+")))
}
cat(paste0("", designFormula), "\n")
dds=DESeqDataSetFromMatrix(countData = comparisonData,
colData = designData,
design = designFormula)
dds<-myEstimateSizeFactors(dds)
bpparam<-MulticoreParam(thread)
# parallel<-ifelse(thread <= 1, FALSE, TRUE)
parallel=FALSE
ddsres<-try(dds <- DESeq(dds,fitType=fitType, parallel=parallel, BPPARAM=bpparam))
if(class(ddsres) == "try-error"){
if( grepl("One can instead use the gene-wise estimates as final estimates", ddsres[1])){
dds <- estimateDispersionsGeneEst(dds)
dispersions(dds) <- mcols(dds)$dispGeneEst
dds<-nbinomWaldTest(dds)
}else if(grepl("newsplit: out of vertex space", ddsres[1])){
dds <- DESeq(dds,fitType="mean", parallel=parallel, BPPARAM=bpparam)
}else{
stop(paste0("DESeq2 failed: ", ddsres[1]))
}
}
if (!is.null(contrast)) {
res<-results(dds, cooksCutoff=cooksCutoff, alpha=alpha, parallel=parallel, BPPARAM=bpparam,contrast=contrast)
} else {
res<-results(dds, cooksCutoff=cooksCutoff, alpha=alpha, parallel=parallel, BPPARAM=bpparam)
}
res$FoldChange<-2^res$log2FoldChange
baseMeanPerLvl <- sapply( levels(dds$Condition), function(lvl) rowMeans( counts(dds,normalized=TRUE)[,dds$Condition == lvl,drop=FALSE] ) )
colnames(baseMeanPerLvl)<-paste0("baseMean_", colnames(baseMeanPerLvl))
res<-cbind(res, baseMeanPerLvl)
cat("DESeq2 finished.\n")
if (useRawPvalue==1) {
select<-(!is.na(res$pvalue)) & (res$pvalue<pvalue) & ((res$log2FoldChange >= log2(foldChange)) | (res$log2FoldChange <= -log2(foldChange)))
} else {
select<-(!is.na(res$padj)) & (res$padj<pvalue) & ((res$log2FoldChange >= log2(foldChange)) | (res$log2FoldChange <= -log2(foldChange)))
}
if(length(indecies) > 0){
inddata<-data[rownames(comparisonData),indecies,drop=F]
tbb<-cbind(inddata, as.data.frame(comparisonData), res)
}else{
tbb<-cbind(as.data.frame(comparisonData), res)
}
tbbselect<-tbb[select,,drop=F]
tbbAllOut<-as.data.frame(tbb[,resultAllOutVar,drop=F])
tbbAllOut$Significant<-select
colnames(tbbAllOut)<-paste0(colnames(tbbAllOut)," (",comparisonName,")")
resultAllOut<-cbind(as.data.frame(resultAllOut)[row.names(dataAllOut),],as.matrix(tbbAllOut[row.names(dataAllOut),]))
row.names(resultAllOut)<-row.names(dataAllOut)
tbb<-tbb[order(tbb$pvalue),,drop=F]
write.csv(as.data.frame(tbb),paste0(prefix, "_DESeq2.csv"))
tbbselect<-tbbselect[order(tbbselect$pvalue),,drop=F]
sigFile=paste0(prefix, "_DESeq2_sig.csv")
sigTable<-as.data.frame(tbbselect)
write.csv(sigTable,sigFile)
allSigNameList[[comparisonName]]<-row.names(sigTable)
allSigDirectionList[[comparisonName]]<-sign(sigTable$log2FoldChange)
if(nrow(sigTable) > 0){
sigTable$comparisonName<-comparisonName
if (("Feature_gene_name" %in% colnames(sigTable)) & (!("Feature_gene_name" %in% sigTableAllVar))){
sigTableAllVar<-c("Feature_gene_name", sigTableAllVar)
}
sigTableAll<-rbind(sigTableAll,sigTable[,c("comparisonName",sigTableAllVar),drop=FALSE],make.row.names=FALSE)
sigTableAllGene<-c(sigTableAllGene,row.names(sigTable))
}
geneNameField = NULL
lowColNames = tolower(colnames(tbb))
for(name in c("Feature_gene_name", "Gene.Symbol", "Gene_Symbol", "Gene Symbol")){
lowName = tolower(name)
if(lowName %in% lowColNames){
geneNameField=colnames(tbb)[match(lowName, lowColNames)]
break
}
}
if(!is.null(geneNameField)){
write.table(tbb[,c(geneNameField, "stat"),drop=F],paste0(prefix, "_DESeq2_GSEA.rnk"),row.names=F,col.names=F,sep="\t", quote=F)
if(exportSignificantGeneName){
write.table(tbbselect[,c(geneNameField),drop=F], paste0(prefix, "_DESeq2_sig_genename.txt"),row.names=F,col.names=F,sep="\t", quote=F)
}
}else{
write.table(tbb[,c("stat"),drop=F],paste0(prefix, "_DESeq2_GSEA.rnk"),row.names=T,col.names=F,sep="\t", quote=F)
if(exportSignificantGeneName){
write.table(data.frame(name=rownames(tbbselect)), paste0(prefix, "_DESeq2_sig_genename.txt"),row.names=F,col.names=F,sep="\t", quote=F)
}
}
if(showDEGeneCluster){
siggenes<-rownames(rldmatrix) %in% rownames(tbbselect)
nonDEmatrix<-rldmatrix[!siggenes,,drop=F]
DEmatrix<-rldmatrix[siggenes,,drop=F]
drawPCA(paste0(prefix,"_geneDE"),DEmatrix , showLabelInPCA, designData, conditionColors, outputFormat)
drawHCA(paste0(prefix,"_geneDE"),DEmatrix , ispaired, designData, conditionColors, gnames, outputFormat)
drawPCA(paste0(prefix,"_geneNotDE"), nonDEmatrix, showLabelInPCA, designData, conditionColors, outputFormat)
drawHCA(paste0(prefix,"_geneNotDE"), nonDEmatrix, ispaired, designData, conditionColors, gnames, outputFormat)
}
#Top 25 Significant genes barplot
sigDiffNumber<-nrow(tbbselect)
if (sigDiffNumber>0) {
if (sigDiffNumber>25) {
print(paste0("More than 25 genes were significant. Only the top 25 genes will be used in barplot"))
diffResultSig<-tbbselect[order(tbbselect$pvalue)[1:25],]
} else {
diffResultSig<-tbbselect
}
if(!is.null(geneNameField)){
diffResultSig$Name<-as.character(diffResultSig[,geneNameField])
}else{
diffResultSig$Name<-sapply(strsplit(row.names(diffResultSig),";"),function(x) x[1])
}
if (any(duplicated(diffResultSig$Name))) {
whichIndex<-which(duplicated(diffResultSig$Name))
diffResultSig$Name[whichIndex]<-paste0(row.names(diffResultSig)[whichIndex], ":", diffResultSig$Name[whichIndex])
}
diffResultSig$Name <- factor(diffResultSig$Name, levels=diffResultSig$Name[order(diffResultSig$log2FoldChange)])
diffResultSig<-as.data.frame(diffResultSig)
p<-ggplot(diffResultSig,aes(x=Name,y=log2FoldChange,order=log2FoldChange))+geom_bar(stat="identity")+
coord_flip()+
# geom_abline(slope=0,intercept=1,colour="red",linetype = 2)+
scale_y_continuous(name=bquote(log[2]~Fold~Change))+
theme_bw3() +
theme(axis.text = element_text(colour = "black"))
filePrefix<-paste0(prefix,"_DESeq2_sig_barplot")
drawPlot(filePrefix, outputFormat, 7, 7, 3000, 3000, p, "PCA")
} else {
print(paste0("No gene with adjusted p value less than ",pvalue," and fold change larger than ",foldChange))
}
#volcano plot
changeColours<-c(grey="grey",blue="blue",red="red")
diffResult<-as.data.frame(tbb)
diffResult$log10BaseMean<-log10(diffResult$baseMean)
diffResult$colour<-"grey"
if (useRawPvalue==1) {
diffResult<-subset(diffResult, !is.na(pvalue))
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
} else {
diffResult<-subset(diffResult, !is.na(padj))
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
}
write.csv(diffResult, file=paste0(prefix, "_DESeq2_volcanoPlot.csv"))
yname=bquote(-log10(p~value))
xname=bquote(log[2]~Fold~Change)
p<-ggplot(diffResult,aes(x=log2FoldChange,y=pvalue))+
scale_y_continuous(trans=reverselog_trans(10),name=yname) +
geom_point(aes(size=log10BaseMean,colour=colour))+
scale_color_manual(values=changeColours,guide = FALSE)+
scale_x_continuous(name=xname)+
geom_hline(yintercept = 1,colour="grey",linetype = "dotted")+
geom_vline(xintercept = 0,colour="grey",linetype = "dotted")+
guides(size=guide_legend(title=bquote(log[10]~Base~Mean)))+
theme_bw()+
scale_size(range = c(3, 7))+
theme(axis.text = element_text(colour = "black",size=30),
axis.title = element_text(size=30),
legend.text= element_text(size=30),
legend.title= element_text(size=30))
if(!showVolcanoLegend){
p<-p+ theme(legend.position = "none")
pdfWidth=10
otherWidth=3000
}else{
pdfWidth=15
otherWidth=4500
}
filePrefix<-paste0(prefix,"_DESeq2_volcanoPlot")
drawPlot(filePrefix, outputFormat, pdfWidth, 10, otherWidth, 3000, p, "Volcano")
if(require("EnhancedVolcano")){
if(!("Feature_gene_name" %in% colnames(diffResult))){
diffResult$Feature_gene_name=rownames(diffResult)
}
if(packageVersion("EnhancedVolcano") == '1.8.0'){
if(useRawPvalue == 1){
yname=bquote(-log10(p~value))
yvar="pvalue"
}else{
yname=bquote(-log10(adjusted~p~value))
yvar="padj"
}
p<-EnhancedVolcano(diffResult,
lab = diffResult$Feature_gene_name,
x = 'log2FoldChange',
y = yvar,
title = comparisonTitle,
pCutoff = pvalue,
FCcutoff = log2(foldChange),
pointSize = 3.0,
labSize = 6.0,
colAlpha = 1,
subtitle = NULL) + ylab(yname)
}else{
yname=bquote(-log10(p~value))
p<-EnhancedVolcano(diffResult,
lab = diffResult$Feature_gene_name,
x = 'log2FoldChange',
y = 'pvalue',
title = comparisonTitle,
pCutoff = pvalue,
pCutoffCol = "padj",
FCcutoff = log2(foldChange),
pointSize = 3.0,
labSize = 6.0,
colAlpha = 1,
subtitle = NULL) + ylab(yname)
}
filePrefix<-paste0(prefix,"_DESeq2_volcanoEnhanced")
drawPlot(filePrefix, outputFormat, 10, 10, 3000, 3000, p, "Volcano")
}
}
if(length(pairedspearman) > 0){
filePrefix<-paste0(prefix, "_", ifelse(minMedianInGroup > 0, paste0("spearman_min", minMedianInGroup), "spearman"))
fwidth<-max(2000, 1000 * length(pairedspearman))
for(format in outputFormat){
openPlot(filePrefix, format, 7, 7, fwidth, 2000, "Spearman correlation")
boxplot(pairedspearman)
dev.off()
}
}
}
allprefix=paste0(basename(inputfile), suffix)
#Venn for all significant genes
#Output all significant genes table
if(!is.null(sigTableAll)){
sigTableAll<-cbind(Gene=sigTableAllGene,sigTableAll)
write.csv(sigTableAll,paste0(allprefix, "_DESeq2_allSig.csv"),row.names=FALSE)
#Do venn if length between 2-5
if (length(allSigNameList)>=2 & length(allSigNameList)<=5) {
venn.diagram1<-function (x, filename, height = 3000, width = 3000, resolution = 500,
units = "px", compression = "lzw", na = "stop", main = NULL,
sub = NULL, main.pos = c(0.5, 1.05), main.fontface = "plain",
main.fontfamily = "serif", main.col = "black", main.cex = 1,
main.just = c(0.5, 1), sub.pos = c(0.5, 1.05), sub.fontface = "plain",
sub.fontfamily = "serif", sub.col = "black", sub.cex = 1,
sub.just = c(0.5, 1), category.names = names(x), force.unique = TRUE,
fill=NA,
...)
{
if (is.na(fill[1])) {
if (length(x)==5) {
fill = c("dodgerblue", "goldenrod1", "darkorange1", "seagreen3", "orchid3")
} else if (length(x)==4) {
fill = c("dodgerblue", "goldenrod1", "seagreen3", "orchid3")
} else if (length(x)==3) {
fill = c("dodgerblue", "goldenrod1", "seagreen3")
} else if (length(x)==2) {
fill = c("dodgerblue", "goldenrod1")
}
}
if (force.unique) {
for (i in 1:length(x)) {
x[[i]] <- unique(x[[i]])
}
}
if ("none" == na) {
x <- x
}
else if ("stop" == na) {
for (i in 1:length(x)) {
if (any(is.na(x[[i]]))) {
stop("NAs in dataset", call. = FALSE)
}
}
}
else if ("remove" == na) {
for (i in 1:length(x)) {
x[[i]] <- x[[i]][!is.na(x[[i]])]
}
}
else {
stop("Invalid na option: valid options are \"none\", \"stop\", and \"remove\"")
}
if (0 == length(x) | length(x) > 5) {
stop("Incorrect number of elements.", call. = FALSE)
}
if (1 == length(x)) {
list.names <- category.names
if (is.null(list.names)) {
list.names <- ""
}
grob.list <- VennDiagram::draw.single.venn(area = length(x[[1]]),
category = list.names, ind = FALSE,fill=fill, ...)
}
else if (2 == length(x)) {
grob.list <- VennDiagram::draw.pairwise.venn(area1 = length(x[[1]]),
area2 = length(x[[2]]), cross.area = length(intersect(x[[1]],
x[[2]])), category = category.names, ind = FALSE,
fill=fill,
...)
}
else if (3 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
list.names <- category.names
nab <- intersect(A, B)
nbc <- intersect(B, C)
nac <- intersect(A, C)
nabc <- intersect(nab, C)
grob.list <- VennDiagram::draw.triple.venn(area1 = length(A),
area2 = length(B), area3 = length(C), n12 = length(nab),
n23 = length(nbc), n13 = length(nac), n123 = length(nabc),
category = list.names, ind = FALSE, list.order = 1:3,
fill=fill,
...)
}
else if (4 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
D <- x[[4]]
list.names <- category.names
n12 <- intersect(A, B)
n13 <- intersect(A, C)
n14 <- intersect(A, D)
n23 <- intersect(B, C)
n24 <- intersect(B, D)
n34 <- intersect(C, D)
n123 <- intersect(n12, C)
n124 <- intersect(n12, D)
n134 <- intersect(n13, D)
n234 <- intersect(n23, D)
n1234 <- intersect(n123, D)
grob.list <- VennDiagram::draw.quad.venn(area1 = length(A),
area2 = length(B), area3 = length(C), area4 = length(D),
n12 = length(n12), n13 = length(n13), n14 = length(n14),
n23 = length(n23), n24 = length(n24), n34 = length(n34),
n123 = length(n123), n124 = length(n124), n134 = length(n134),
n234 = length(n234), n1234 = length(n1234), category = list.names,
ind = FALSE, fill=fill,...)
}
else if (5 == length(x)) {
A <- x[[1]]
B <- x[[2]]
C <- x[[3]]
D <- x[[4]]
E <- x[[5]]
list.names <- category.names
n12 <- intersect(A, B)
n13 <- intersect(A, C)
n14 <- intersect(A, D)
n15 <- intersect(A, E)
n23 <- intersect(B, C)
n24 <- intersect(B, D)
n25 <- intersect(B, E)
n34 <- intersect(C, D)
n35 <- intersect(C, E)
n45 <- intersect(D, E)
n123 <- intersect(n12, C)
n124 <- intersect(n12, D)
n125 <- intersect(n12, E)
n134 <- intersect(n13, D)
n135 <- intersect(n13, E)
n145 <- intersect(n14, E)
n234 <- intersect(n23, D)
n235 <- intersect(n23, E)
n245 <- intersect(n24, E)
n345 <- intersect(n34, E)
n1234 <- intersect(n123, D)
n1235 <- intersect(n123, E)
n1245 <- intersect(n124, E)
n1345 <- intersect(n134, E)
n2345 <- intersect(n234, E)
n12345 <- intersect(n1234, E)
grob.list <- VennDiagram::draw.quintuple.venn(area1 = length(A),
area2 = length(B), area3 = length(C), area4 = length(D),
area5 = length(E), n12 = length(n12), n13 = length(n13),
n14 = length(n14), n15 = length(n15), n23 = length(n23),
n24 = length(n24), n25 = length(n25), n34 = length(n34),
n35 = length(n35), n45 = length(n45), n123 = length(n123),
n124 = length(n124), n125 = length(n125), n134 = length(n134),
n135 = length(n135), n145 = length(n145), n234 = length(n234),
n235 = length(n235), n245 = length(n245), n345 = length(n345),
n1234 = length(n1234), n1235 = length(n1235), n1245 = length(n1245),
n1345 = length(n1345), n2345 = length(n2345), n12345 = length(n12345),
category = list.names, ind = FALSE,fill=fill, ...)
}
else {
stop("Invalid size of input object")
}
if (!is.null(sub)) {
grob.list <- add.title(gList = grob.list, x = sub, pos = sub.pos,
fontface = sub.fontface, fontfamily = sub.fontfamily,
col = sub.col, cex = sub.cex)
}
if (!is.null(main)) {
grob.list <- add.title(gList = grob.list, x = main, pos = main.pos,
fontface = main.fontface, fontfamily = main.fontfamily,
col = main.col, cex = main.cex)
}
grid.newpage()
grid.draw(grob.list)
return(1)
# return(grob.list)
}
makeColors<-function(n,colorNames="Set1") {
maxN<-brewer.pal.info[colorNames,"maxcolors"]
if (n<=maxN) {
colors<-brewer.pal(n, colorNames)
if (length(colors)>n) {
colors<-colors[1:n]
}
} else {
colors<-colorRampPalette(brewer.pal(maxN, colorNames))(n)
}
return(colors)
}
colors<-makeColors(length(allSigNameList))
for(format in outputFormat){
filePrefix<-paste0(allprefix,"_significantVenn")
openPlot(filePrefix, format, 7, 7, 2000, 2000, "Venn")
venn.diagram1(allSigNameList,cex=2,cat.cex=2,cat.col=colors,fill=colors)
dev.off()
}
}
#Do heatmap significant genes if length larger or equal than 2
if (length(allSigNameList)>=2) {
temp<-cbind(unlist(allSigNameList),unlist(allSigDirectionList))
colnames(temp)<-c("Gene","Direction")
temp<-cbind(temp,comparisonName=rep(names(allSigNameList),sapply(allSigNameList,length)))
temp<-data.frame(temp)
dataForFigure<-temp
#geting dataForFigure order in figure
temp$Direction<-as.integer(as.character(temp$Direction))
temp<-acast(temp, Gene~comparisonName ,value.var="Direction")
temp<-temp[do.call(order, data.frame(temp)),]
maxNameChr<-max(nchar(row.names(temp)))
if (maxNameChr>70) {
tmpNames<-substr(row.names(temp),0,70)
if(length(tmpNames) == length(unique(tmpNames))){
row.names(temp)<-tmpNames
dataForFigure$Gene<-substr(dataForFigure$Gene,0,70)
warning(paste0("The gene names were too long (",maxNameChr,"). Only first 70 letters were kept."))
}
}
dataForFigure$Gene<-factor(dataForFigure$Gene,levels=row.names(temp))
g<-ggplot(dataForFigure, aes(comparisonName, Gene))+
geom_tile(aes(fill=Direction), color="white") +
scale_fill_manual(values=c("light green", "red")) +
theme(axis.text.x = element_text(angle=90, vjust=0.5, size=11, hjust=0.5, face="bold"),
axis.text.y = element_text(size=textSize, face="bold")) +
coord_equal()
width=min(max(2500, 60 * length(unique(dataForFigure$comparisonName))),30000)
height=min(max(2000, 40 * length(unique(dataForFigure$Gene))),30000)
filePrefix<-paste0(allprefix,"_significantHeatmap")
drawPlot(filePrefix, outputFormat, 7, 7, width, height, g, "Significant Heatmap")
}
}
if (! is.null(resultAllOut)) {
#write a file with all information
resultAllOut<-cbind(dataAllOut,resultAllOut[row.names(dataAllOut),])
write.csv(resultAllOut,paste0(allprefix, "_DESeq2.csv"))
if(length(validComparisons) > 1 ){
#volcano plot for all comparisons
temp<-resultAllOut[,-(1:ncol(dataAllOut))]
diffResult<-NULL
diffResultVar<-unique(sapply(strsplit(colnames(temp)," "),function(x) x[1]))
for (i in 1:(length(validComparisons))) {
temp1<-temp[,(i*length(diffResultVar)-(length(diffResultVar)-1)):(i*length(diffResultVar))]
colnames(temp1)<-diffResultVar
temp1$Comparison<-validComparisons[i]
if (is.null(diffResult)) {
diffResult<-temp1
} else {
diffResult<-rbind(diffResult,temp1)
}
}
changeColours<-c(grey="grey",blue="blue",red="red")
diffResult$log10BaseMean<-log10(diffResult$baseMean)
diffResult$Comparison<-allTitles[diffResult$Comparison]
diffResult$Comparison<-factor(diffResult$Comparison,levels=unique(diffResult$Comparison))
diffResult$colour<-"grey"
if (useRawPvalue==1) {
diffResult<-subset(diffResult, !is.na(pvalue))
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$pvalue<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
} else {
diffResult<-subset(diffResult, !is.na(padj))
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange>=log2(foldChange))]<-"red"
diffResult$colour[which(diffResult$padj<=pvalue & diffResult$log2FoldChange<=-log2(foldChange))]<-"blue"
}
if (useRawPvalue==1) {
p<-ggplot(diffResult,aes(x=log2FoldChange,y=pvalue))+
scale_y_continuous(trans=reverselog_trans(10),name=bquote(p~value))
} else {
p<-ggplot(diffResult,aes(x=log2FoldChange,y=padj))+
scale_y_continuous(trans=reverselog_trans(10),name=bquote(Adjusted~p~value))
}
p<-p+geom_point(aes(size=log10BaseMean,colour=colour))+
scale_color_manual(values=changeColours,guide = FALSE)+
scale_x_continuous(name=bquote(log[2]~Fold~Change))+
geom_hline(yintercept = 1,colour="grey",linetype = "dotted")+
geom_vline(xintercept = 0,colour="grey",linetype = "dotted")+
guides(size=guide_legend(title=bquote(log[10]~Base~Mean)))+
theme_bw()+
scale_size(range = c(3, 7))+
facet_grid(. ~ Comparison)+
theme(axis.text = element_text(colour = "black",size=25),
axis.title = element_text(size=25),
legend.text= element_text(size=25),
legend.title= element_text(size=25),
strip.text.x = element_text(size = 25),
strip.background=element_rect(fill="white"))
pwidth<-max(12,4*length(allComparisons)+4)
owidth<-max(4000, 1500*length(allComparisons)+1000)
filePrefix<-paste0(allprefix,"_DESeq2_volcanoPlot")
drawPlot(filePrefix, outputFormat, pwidth, 7, owidth, 2000, p, "Volcano")
#output a summary table with numbers of gisnificant changed genes
sigGeneSummaryTable<-t(table(diffResult[,"Significant"],diffResult[,"Comparison"]))
notSigIndex<-match("0", colnames(sigGeneSummaryTable))
if(is.na(notSigIndex)){
notSignificant=0
}else{
notSignificant=sigGeneSummaryTable[,notSigIndex]
}
sigIndex<-match("1", colnames(sigGeneSummaryTable))
if(is.na(sigIndex)){
significant=0
}else{
significant=sigGeneSummaryTable[,sigIndex]
}
dSigGeneSummaryTable<-data.frame(Comparison=row.names(sigGeneSummaryTable),GeneInComparison=rowSums(sigGeneSummaryTable),NotSignificant=notSignificant,Significant=significant)
write.csv(dSigGeneSummaryTable,paste0(allprefix, "_DESeq2_sigGeneSummary.csv"),row.names=FALSE)
}
}
#export session information
writeLines(capture.output(sessionInfo()), paste0(basename(inputfile),".DESeq2.SessionInfo.txt"))
deseq2version<-paste0("DESeq2,v", packageVersion("DESeq2"))
writeLines(deseq2version, paste0(basename(inputfile),".DESeq2.version"))
#save R Data
save.image(paste0(basename(inputfile),".DESeq2.RData"))
|
.sph2car = function(long, lat, radius=1, deg=TRUE){
if (is.matrix(long) || is.data.frame(long)) {
if (ncol(long) == 1) {
long = long[, 1]
}
else if (ncol(long) == 2) {
lat = long[, 2]
long = long[, 1]
}
else if (ncol(long) == 3) {
radius = long[, 3]
lat = long[, 2]
long = long[, 1]
}
}
if (missing(long) | missing(lat)) {
stop("Missing full spherical 3D input data.")
}
if (deg) {
long = long * pi/180
lat = lat * pi/180
}
return = cbind(x = radius * cos(long) * cos(lat), y = radius *
sin(long) * cos(lat), z = radius * sin(lat))
}
Rwcs_s2p = function(RA, Dec, keyvalues=NULL, pixcen='FITS', loc.diff=c(0,0), coord.type='deg',
sep=':', header=NULL, inherit=TRUE, WCSref=NULL, ctrl=2L, cores=1, ...){
assertList(keyvalues, null.ok = TRUE)
if(is.character(header) & is.null(keyvalues)){
if(length(header) > 1){
if(requireNamespace("Rfits", quietly = TRUE)){
keyvalues = Rfits::Rfits_hdr_to_keyvalues(header)
header = Rfits::Rfits_header_to_raw(Rfits::Rfits_keyvalues_to_header(keyvalues))
}else{
stop("The Rfits package is need to process the header. Install from GitHub asgr/Rfits.")
}
}
}
assertChoice(pixcen, c('R','FITS'))
assertNumeric(loc.diff, len=2)
assertChoice(coord.type, c('deg','sex'))
assertCharacter(sep, len=1)
if(length(dim(RA))==2){
Dec = RA[,2]
RA = RA[,1]
}
if(coord.type=='sex'){
RA = hms2deg(RA,sep=sep)
Dec = dms2deg(Dec,sep=sep)
}
assertNumeric(RA)
assertNumeric(Dec, len = length(RA))
if(inherit){
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
header = options()$current_header
}
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
keyvalues = options()$current_keyvalues
}
if(is.null(WCSref)){
WCSref = options()$current_WCSref
}
}
if(length(header)==1){
nkey = nchar(header)/80
if(!is.null(WCSref)){
WCSref = tolower(WCSref)
reflet = 1:26
names(reflet) = letters
if(! WCSref %in% letters){
stop('WCS ref must be 0 (base WCS) or a letter [a-z]!')
}
WCSref = reflet[WCSref]
}else{
WCSref = 0
}
if(cores == 1L){
output = Cwcs_head_s2p(
RA = RA,
Dec = Dec,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(RA), 2)
if(length(good) > 0){
output[good,] = Cwcs_head_s2p(
RA = RA[good],
Dec = Dec[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_s2p(
RA = RA[bad] + 1e-12,
Dec = Dec[bad] + 1e-12,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_s2p(
RA = RA[bad] + 1e-8,
Dec = Dec[bad] + 1e-8,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}else{
registerDoParallel(cores=cores)
maxlen = length(RA)
chunk = ceiling(maxlen/cores)
i = RAsub = Decsub = NULL
RA = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
RA[lo:hi]
}
Dec = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
Dec[lo:hi]
}
output = foreach(RAsub = RA, Decsub = Dec, .combine='rbind')%dopar%{
temp = Cwcs_head_s2p(
RA = RAsub,
Dec = Decsub,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(RAsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_head_s2p(
RA = RAsub[good],
Dec = Decsub[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_s2p(
RA = RAsub[bad] + 1e-12,
Dec = Decsub[bad] + 1e-12,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_s2p(
RA = RAsub[bad] + 1e-8,
Dec = Decsub[bad] + 1e-8,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
return(temp)
}
}
}else{
keyvalues = Rwcs_keypass(keyvalues, ...)
if(cores == 1L){
output = Cwcs_s2p(
RA = RA,
Dec = Dec,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(RA), 2)
if(length(good)>0){
output[good,] = Cwcs_s2p(
RA = RA[good],
Dec = Dec[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
}else{
registerDoParallel(cores=cores)
maxlen = length(RA)
chunk = ceiling(maxlen/cores)
i = RAsub = Decsub = NULL
RA = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
RA[lo:hi]
}
Dec = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
Dec[lo:hi]
}
output = foreach(RAsub = RA, Decsub = Dec, .combine='rbind')%dopar%{
temp = Cwcs_s2p(
RA = RAsub,
Dec = Decsub,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(RAsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_s2p(
RA = RAsub[good],
Dec = Decsub[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
return(temp)
}
}
}
if(loc.diff[1] != 0){
output[,1] = output[,1] - loc.diff[1]
}
if(loc.diff[2] != 0){
output[,2] = output[,2] - loc.diff[2]
}
if(pixcen == 'R'){
output[,1] = output[,1] - 0.5
output[,2] = output[,2] - 0.5
}
colnames(output) = c('x','y')
return(output)
}
Rwcs_p2s = function(x, y, keyvalues=NULL, pixcen='FITS', loc.diff=c(0,0), coord.type='deg',
sep=':', header=NULL, inherit=TRUE, WCSref=NULL, ctrl=2L, cores=1, ...){
assertList(keyvalues, null.ok = TRUE)
if(is.character(header) & is.null(keyvalues)){
if(length(header) > 1){
if(requireNamespace("Rfits", quietly = TRUE)){
keyvalues = Rfits::Rfits_hdr_to_keyvalues(header)
header = Rfits::Rfits_header_to_raw(Rfits::Rfits_keyvalues_to_header(keyvalues))
}else{
stop("The Rfits package is need to process the header. Install from GitHub asgr/Rfits.")
}
}
}
assertChoice(pixcen, c('R','FITS'))
assertNumeric(loc.diff, len=2)
assertChoice(coord.type, c('deg','sex'))
assertCharacter(sep, len=1)
if(length(dim(x))==2){
if(dim(x)[2]==2){
y = x[,2]
x = x[,1]
}else{
x = expand.grid(1:dim(x)[1], 1:dim(x)[2])
y = x[,2] - 0.5
x = x[,1] - 0.5
pixcen = 'R'
}
}
if(pixcen == 'R'){
x = as.numeric(x) + 0.5
y = as.numeric(y) + 0.5
}
if(loc.diff[1] != 0){
x = x + loc.diff[1]
}
if(loc.diff[2] != 0){
y = y + loc.diff[2]
}
assertNumeric(x)
assertNumeric(y, len = length(x))
if(inherit){
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
header = options()$current_header
}
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
keyvalues = options()$current_keyvalues
}
if(is.null(WCSref)){
WCSref = options()$current_WCSref
}
}
if(length(header)==1){
nkey = nchar(header)/80
if(!is.null(WCSref)){
WCSref = tolower(WCSref)
reflet = 1:26
names(reflet) = letters
if(! WCSref %in% letters){
stop('WCS ref must be 0 (base WCS) or a letter [a-z]!')
}
WCSref = reflet[WCSref]
}else{
WCSref = 0
}
if(cores == 1L){
output = Cwcs_head_p2s(
x = x,
y = y,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(x), 2)
if(length(good) > 0){
output[good,] = Cwcs_head_p2s(
x = x[good],
y = y[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_p2s(
x = x[bad] + 1e-6,
y = y[bad] + 1e-6,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_p2s(
x = x[bad] + 1e-2,
y = y[bad] + 1e-2,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}else{
registerDoParallel(cores=cores)
maxlen = length(x)
chunk = ceiling(maxlen/cores)
i = xsub = ysub = NULL
x = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
x[lo:hi]
}
y = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
y[lo:hi]
}
output = foreach(xsub = x, ysub = y, .combine='rbind')%dopar%{
temp = Cwcs_head_p2s(
x = xsub,
y = ysub,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(xsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_head_p2s(
x = xsub[good],
y = ysub[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_p2s(
x = xsub[bad] + 1e-6,
y = ysub[bad] + 1e-6,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_p2s(
x = xsub[bad] + 1e-2,
y = ysub[bad] + 1e-2,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
return(temp)
}
}
}else{
keyvalues = Rwcs_keypass(keyvalues, ...)
if(cores == 1L){
output = Cwcs_p2s(
x = x,
y = y,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(x), 2)
if(length(good)>0){
output[good,] = Cwcs_p2s(
x = x[good],
y = y[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
}else{
registerDoParallel(cores=cores)
maxlen = length(x)
chunk = ceiling(maxlen/cores)
i = xsub = ysub = NULL
x = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
x[lo:hi]
}
y = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
y[lo:hi]
}
output = foreach(xsub = x, ysub = y, .combine='rbind')%dopar%{
temp = Cwcs_p2s(
x = xsub,
y = ysub,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(xsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_p2s(
x = xsub[good],
y = ysub[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
return(temp)
}
}
}
if(coord.type=='sex'){
RAsex = deg2hms(output[,1], type='cat', sep=sep)
Decsex = deg2dms(output[,2], type='cat', sep=sep)
output = cbind(RAsex, Decsex)
}
colnames(output)=c('RA','Dec')
return(output)
}
Rwcs_keypass=function(keyvalues=NULL,
CTYPE1='RA---TAN', CTYPE2='DEC--TAN',
CRVAL1=0, CRVAL2=0,
CRPIX1=0, CRPIX2=0,
CD1_1=1, CD1_2=0,
CD2_1=0, CD2_2=1,
RADESYS='ICRS',
EQUINOX='infer',
CUNIT1='deg', CUNIT2='deg',
PV1_0=NULL, PV1_1=NULL, PV1_2=NULL, PV1_3=NULL, PV1_4=NULL,
#PV1_5=NULL, PV1_6=NULL, PV1_7=NULL, PV1_8=NULL, PV1_9=NULL, PV1_10=NULL,
PV2_0=NULL, PV2_1=NULL, PV2_2=NULL, PV2_3=NULL, PV2_4=NULL, PV2_5=NULL,
#PV2_6=NULL, PV2_7=NULL, PV2_8=NULL, PV2_9=NULL, PV2_10=NULL,
...){
if(!is.null(keyvalues)){
if(missing(CTYPE1)){if(!is.null(keyvalues$CTYPE1)){CTYPE1 = keyvalues$CTYPE1}else{message('CTYPE1 is not defined!')}}
if(missing(CTYPE2)){if(!is.null(keyvalues$CTYPE2)){CTYPE2 = keyvalues$CTYPE2}else{message('CTYPE2 is not defined!')}}
if(missing(CRVAL1)){if(!is.null(keyvalues$CRVAL1)){CRVAL1 = keyvalues$CRVAL1}else{message('CRVAL1 is not defined!')}}
if(missing(CRVAL2)){if(!is.null(keyvalues$CRVAL2)){CRVAL2 = keyvalues$CRVAL2}else{message('CRVAL2 is not defined!')}}
if(missing(CRPIX1)){if(!is.null(keyvalues$CRPIX1)){CRPIX1 = keyvalues$CRPIX1}else{message('CRPIX1 is not defined!')}}
if(missing(CRPIX2)){if(!is.null(keyvalues$CRPIX2)){CRPIX2 = keyvalues$CRPIX2}else{message('CRPIX2 is not defined!')}}
if(missing(CUNIT1)){if(!is.null(keyvalues$CUNIT1)){CUNIT1 = keyvalues$CUNIT1}else{message('CUNIT1 is not defined!')}}
if(missing(CUNIT2)){if(!is.null(keyvalues$CUNIT2)){CUNIT2 = keyvalues$CUNIT2}else{message('CUNIT2 is not defined!')}}
if(missing(CD1_1)){
if(!is.null(keyvalues$CD1_1)){
CD1_1 = keyvalues$CD1_1
}else{
if((!is.null(keyvalues$CDELT1)) & (!is.null(keyvalues$PC1_1)) & (!is.null(keyvalues$PC2_1))){
CD1_1 = keyvalues$CDELT1 * keyvalues$PC1_1
CD2_1 = keyvalues$CDELT1 * keyvalues$PC2_1
}else if((!is.null(keyvalues$CDELT1)) & (!is.null(keyvalues$PC1_1)) & (is.null(keyvalues$PC2_1))){
CD1_1 = keyvalues$CDELT1 * keyvalues$PC1_1
CD2_1 = 0
}else if((!is.null(keyvalues$CDELT1)) & (!is.null(keyvalues$CROTA2))){
CD1_1 = keyvalues$CDELT1 * cos(keyvalues$CROTA2*pi/180)
CD2_1 = keyvalues$CDELT1 * sin(keyvalues$CROTA2*pi/180)
}else if((!is.null(keyvalues$CDELT1)) & (is.null(keyvalues$CROTA2))){
CD1_1 = keyvalues$CDELT1
CD2_1 = 0 #for clarity
}else{
stop('CD1_1 and/or CD2_1 is not definable!')
}
}
}
if(missing(CD2_2)){
if(!is.null(keyvalues$CD2_2)) {
CD2_2 = keyvalues$CD2_2
}else{
if((!is.null(keyvalues$CDELT2)) & (!is.null(keyvalues$PC2_2)) & (!is.null(keyvalues$PC1_2))){
CD2_2 = keyvalues$CDELT2 * keyvalues$PC2_2
CD1_2 = keyvalues$CDELT2 * keyvalues$PC1_2
}else if((!is.null(keyvalues$CDELT2)) & (!is.null(keyvalues$PC2_2)) & (is.null(keyvalues$PC1_2))){
CD2_2 = keyvalues$CDELT2 * keyvalues$PC2_2
CD1_2 = 0
}else if((!is.null(keyvalues$CDELT2)) & (!is.null(keyvalues$CROTA2))){
CD2_2 = keyvalues$CDELT2 * cos(keyvalues$CROTA2*pi/180)
CD1_2 = -keyvalues$CDELT2 * sin(keyvalues$CROTA2*pi/180)
}else if((!is.null(keyvalues$CDELT2)) & (is.null(keyvalues$CROTA2))){
CD2_2 = keyvalues$CDELT2
CD1_2 = 0 #for clarity
}else{
stop('CD2_2 and/or CD1_2 is not definable!')
}
}
}
if(missing(CD1_2)){
if(!is.null(keyvalues$CD1_2)){
CD1_2 = keyvalues$CD1_2
}else{
CD1_2 = 0
message('CD1_2 is not definable, setting to 0!')
}
}
if(missing(CD2_1)){
if(!is.null(keyvalues$CD2_1)){
CD2_1 = keyvalues$CD2_1
}else{
CD2_1 = 0
message('CD2_1 is not definable, setting to 0!')
}
}
if (missing(RADESYS)) {
if (!is.null(keyvalues$RADESYS)) {
RADESYS = keyvalues$RADESYS
} else{
if (!is.null(keyvalues$RADECSYS)) {
RADESYS = keyvalues$RADECSYS
}else{
message('RADESYS is not defined (also no RADECSYS)!')
}
}
if (!is.null(keyvalues$EQUINOX)) {
EQUINOX = keyvalues$EQUINOX
} else{
if (!is.null(keyvalues$EPOCH)) {
EQUINOX = keyvalues$EPOCH
}else{
message('EQUINOX is not defined (also no EPOCH)!')
}
}
}
if(CUNIT1==' '){
CUNIT1 = 'deg'
message('CUNIT1 is blank, setting to \'deg\'!')
}
if(CUNIT2==' '){
CUNIT2 = 'deg'
message('CUNIT1 is blank, setting to \'deg\'!')
}
if(is.null(PV1_0)){if(!is.null(keyvalues$PV1_0)){PV1_0 = keyvalues$PV1_0}else{PV1_0 = NA}}
if(is.null(PV1_1)){if(!is.null(keyvalues$PV1_1)){PV1_1 = keyvalues$PV1_1}else{PV1_1 = NA}}
if(is.null(PV1_2)){if(!is.null(keyvalues$PV1_2)){PV1_2 = keyvalues$PV1_2}else{PV1_2 = NA}}
if(is.null(PV1_3)){if(!is.null(keyvalues$PV1_3)){PV1_3 = keyvalues$PV1_3}else{PV1_3 = NA}}
if(is.null(PV1_4)){if(!is.null(keyvalues$PV1_4)){PV1_4 = keyvalues$PV1_4}else{PV1_4 = NA}}
# Beyond this appears to be non-standard
# if(is.null(PV1_5)){if(!is.null(keyvalues$PV1_5)){PV1_5 = keyvalues$PV1_5}else{PV1_5 = NA}}
# if(is.null(PV1_6)){if(!is.null(keyvalues$PV1_6)){PV1_6 = keyvalues$PV1_6}else{PV1_6 = NA}}
# if(is.null(PV1_7)){if(!is.null(keyvalues$PV1_7)){PV1_7 = keyvalues$PV1_7}else{PV1_7 = NA}}
# if(is.null(PV1_8)){if(!is.null(keyvalues$PV1_8)){PV1_8 = keyvalues$PV1_8}else{PV1_8 = NA}}
# if(is.null(PV1_9)){if(!is.null(keyvalues$PV1_9)){PV1_9 = keyvalues$PV1_9}else{PV1_9 = NA}}
# if(is.null(PV1_10)){if(!is.null(keyvalues$PV1_10)){PV1_10 = keyvalues$PV1_10}else{PV1_10 = NA}}
if(is.null(PV2_0)){if(!is.null(keyvalues$PV2_0)){PV2_0 = keyvalues$PV2_0}else{PV2_0 = NA}}
if(is.null(PV2_1)){if(!is.null(keyvalues$PV2_1)){PV2_1 = keyvalues$PV2_1}else{PV2_1 = NA}}
if(is.null(PV2_2)){if(!is.null(keyvalues$PV2_2)){PV2_2 = keyvalues$PV2_2}else{PV2_2 = NA}}
if(is.null(PV2_3)){if(!is.null(keyvalues$PV2_3)){PV2_3 = keyvalues$PV2_3}else{PV2_3 = NA}}
if(is.null(PV2_4)){if(!is.null(keyvalues$PV2_4)){PV2_4 = keyvalues$PV2_4}else{PV2_4 = NA}}
if(is.null(PV2_5)){if(!is.null(keyvalues$PV2_5)){PV2_5 = keyvalues$PV2_5}else{PV2_5 = NA}}
# Beyond this appears to be non-standard
# if(is.null(PV2_6)){if(!is.null(keyvalues$PV2_6)){PV2_6 = keyvalues$PV2_6}else{PV2_6 = NA}}
# if(is.null(PV2_7)){if(!is.null(keyvalues$PV2_7)){PV2_7 = keyvalues$PV2_7}else{PV2_7 = NA}}
# if(is.null(PV2_8)){if(!is.null(keyvalues$PV2_8)){PV2_8 = keyvalues$PV2_8}else{PV2_8 = NA}}
# if(is.null(PV2_9)){if(!is.null(keyvalues$PV2_9)){PV2_9 = keyvalues$PV2_9}else{PV2_9 = NA}}
# if(is.null(PV2_10)){if(!is.null(keyvalues$PV2_10)){PV2_10 = keyvalues$PV2_10}else{PV2_10 = NA}}
}else{
keyvalues=list()
if(is.null(PV1_0)){PV1_0 = NA}
if(is.null(PV1_1)){PV1_1 = NA}
if(is.null(PV1_2)){PV1_2 = NA}
if(is.null(PV1_3)){PV1_3 = NA}
if(is.null(PV1_4)){PV1_4 = NA}
if(is.null(PV2_0)){PV2_0 = NA}
if(is.null(PV2_1)){PV2_1 = NA}
if(is.null(PV2_2)){PV2_2 = NA}
if(is.null(PV2_3)){PV2_3 = NA}
if(is.null(PV2_4)){PV2_4 = NA}
if(is.null(PV2_5)){PV2_5 = NA}
}
if(EQUINOX == 'infer'){
if(RADESYS %in% c('ICRS', 'FK5')){EQUINOX = 2000}else{EQUINOX = 1950}
}
allowed_proj = c(
"AZP", #zenithal/azimuthal perspective
"SZP", #slant zenithal perspective
"TAN", #gnomonic
"STG", #stereographic
"SIN", #orthographic/synthesis
"NCP", #unofficially supported SIN-like projection
"ARC", #zenithal/azimuthal equidistant
"ZPN", #zenithal/azimuthal polynomial
"ZEA", #zenithal/azimuthal equal area
"AIR", #Airy’s projection
"CYP", #cylindrical perspective
"CEA", #cylindrical equal area
"CAR", #plate carrée
"MER", #Mercator’s projection
"COP", #conic perspective
"COE", #conic equal area
"COD", #conic equidistant
"COO", #conic orthomorphic
"SFL", #Sanson-Flamsteed (“global sinusoid”)
"PAR", #parabolic
"MOL", #Mollweide’s projection
"AIT", #Hammer-Aitoff
"BON", #Bonne’s projection
"PCO", #polyconic
"TSC", #tangential spherical cube
"CSC", #COBE quadrilateralized spherical cube
"QSC", #quadrilateralized spherical cube
"HPX", #HEALPix
"XPH" #HEALPix polar, aka “butterfly”
)
allowed_axes = c(
"RA", #right ascension
"DEC", #declination
"GLON", #galactic longitude
"GLAT", #galactic latitude
"ELON", #ecliptic longitude
"ELAT", #ecliptic latitude
"HLON", #helioecliptic longitude
"HLAT", #helioecliptic latitude
"SLON", #supergalactic longitude
"SLAT" #supergalactic latitude
)
allowed_rade = c(
"ICRS",
"FK5",
"FK4",
"FK4-NO-E",
"GAPPT"
)
assertCharacter(CTYPE1, len=1)
assertCharacter(CTYPE2, len=1)
if(grepl('-SIP', CTYPE1)){message('SIP not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-SIP', '', CTYPE1)}
if(grepl('-SIP', CTYPE2)){message('SIP not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-SIP', '', CTYPE2)}
if(grepl('-TPV', CTYPE1)){message('TPV not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-TPV', '', CTYPE1)}
if(grepl('-TPV', CTYPE2)){message('TPV not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-TPV', '', CTYPE2)}
if(grepl('-DSS', CTYPE1)){message('DSS not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-DSS', '', CTYPE1)}
if(grepl('-DSS', CTYPE2)){message('DSS not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-DSS', '', CTYPE2)}
if(grepl('-WAT', CTYPE1)){message('WAT not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-WAT', '', CTYPE1)}
if(grepl('-WAT', CTYPE2)){message('WAT not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-WAT', '', CTYPE2)}
if(grepl('-TPD', CTYPE1)){message('TPD not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-TPD', '', CTYPE1)}
if(grepl('-TPD', CTYPE2)){message('TPD not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-TPD', '', CTYPE2)}
if(nchar(CTYPE1) != 8){stop('CTYPE1 must be 8 characters!')}
if(nchar(CTYPE2) != 8){stop('CTYPE2 must be 8 characters!')}
split1=strsplit(CTYPE1, '-+')[[1]]
split2=strsplit(CTYPE2, '-+')[[1]]
assertCharacter(split1, len = 2)
assertCharacter(split2, len = 2)
assertChoice(split1[1], allowed_axes)
assertChoice(split2[1], allowed_axes)
assertChoice(split1[2], allowed_proj)
assertChoice(split2[2], allowed_proj)
assertNumeric(CRVAL1, len=1)
assertNumeric(CRVAL2, len=1)
assertNumeric(CRPIX1, len=1)
assertNumeric(CRPIX2, len=1)
assertNumeric(CD1_1, len=1)
assertNumeric(CD1_2, len=1)
assertNumeric(CD2_1, len=1)
assertNumeric(CD2_2, len=1)
assertCharacter(RADESYS, len=1)
assertChoice(RADESYS, choices = allowed_rade)
assertChoice(EQUINOX, choices = c(1950, 2000))
assertNumeric(PV1_0, len=1, null.ok = FALSE)
assertNumeric(PV1_1, len=1, null.ok = FALSE)
assertNumeric(PV1_2, len=1, null.ok = FALSE)
assertNumeric(PV1_3, len=1, null.ok = FALSE)
assertNumeric(PV1_4, len=1, null.ok = FALSE)
# assertNumeric(PV1_5, len=1, null.ok = FALSE)
# assertNumeric(PV1_6, len=1, null.ok = FALSE)
# assertNumeric(PV1_7, len=1, null.ok = FALSE)
# assertNumeric(PV1_8, len=1, null.ok = FALSE)
# assertNumeric(PV1_9, len=1, null.ok = FALSE)
# assertNumeric(PV1_10, len=1, null.ok = FALSE)
assertNumeric(PV2_0, len=1, null.ok = FALSE)
assertNumeric(PV2_1, len=1, null.ok = FALSE)
assertNumeric(PV2_2, len=1, null.ok = FALSE)
assertNumeric(PV2_3, len=1, null.ok = FALSE)
assertNumeric(PV2_4, len=1, null.ok = FALSE)
assertNumeric(PV2_5, len=1, null.ok = FALSE)
# assertNumeric(PV2_6, len=1, null.ok = FALSE)
# assertNumeric(PV2_7, len=1, null.ok = FALSE)
# assertNumeric(PV2_8, len=1, null.ok = FALSE)
# assertNumeric(PV2_9, len=1, null.ok = FALSE)
# assertNumeric(PV2_10, len=1, null.ok = FALSE)
keyvalues$CTYPE1 = CTYPE1
keyvalues$CTYPE2 = CTYPE2
keyvalues$CRVAL1 = CRVAL1
keyvalues$CRVAL2 = CRVAL2
keyvalues$CRPIX1 = CRPIX1
keyvalues$CRPIX2 = CRPIX2
keyvalues$CD1_1 = CD1_1
keyvalues$CD1_2 = CD1_2
keyvalues$CD2_1 = CD2_1
keyvalues$CD2_2 = CD2_2
keyvalues$RADESYS = RADESYS
keyvalues$EQUINOX = EQUINOX
keyvalues$CUNIT1 = CUNIT1
keyvalues$CUNIT2 = CUNIT2
keyvalues$PV1_0 = PV1_0
keyvalues$PV1_1 = PV1_1
keyvalues$PV1_2 = PV1_2
keyvalues$PV1_3 = PV1_3
keyvalues$PV1_4 = PV1_4
# keyvalues$PV1_5 = PV1_5
# keyvalues$PV1_6 = PV1_6
# keyvalues$PV1_7 = PV1_7
# keyvalues$PV1_8 = PV1_8
# keyvalues$PV1_9 = PV1_9
# keyvalues$PV1_10 = PV1_10
keyvalues$PV2_0 = PV2_0
keyvalues$PV2_1 = PV2_1
keyvalues$PV2_2 = PV2_2
keyvalues$PV2_3 = PV2_3
keyvalues$PV2_4 = PV2_4
keyvalues$PV2_5 = PV2_5
# keyvalues$PV2_6 = PV2_6
# keyvalues$PV2_7 = PV2_7
# keyvalues$PV2_8 = PV2_8
# keyvalues$PV2_9 = PV2_9
# keyvalues$PV2_10 = PV2_10
class(keyvalues) = 'Rfits_keylist'
return(keyvalues)
}
Rwcs_pixscale = function(keyvalues=NULL, CD1_1=1, CD1_2=0, CD2_1=0, CD2_2=1, type='old', dim=NULL){
assertList(keyvalues, null.ok = TRUE)
if(is.null(keyvalues)){
keyvalues = options()$current_keyvalues
}
if(type=='old'){
if(!is.null(keyvalues)){
keyvalues = Rwcs_keypass(keyvalues)
CD1_1 = keyvalues$CD1_1
CD1_2 = keyvalues$CD1_2
CD2_1 = keyvalues$CD2_1
CD2_2 = keyvalues$CD2_2
}
assertNumeric(CD1_1, len=1)
assertNumeric(CD1_2, len=1)
assertNumeric(CD2_1, len=1)
assertNumeric(CD2_2, len=1)
return(3600*(sqrt(CD1_1^2+CD1_2^2)+sqrt(CD2_1^2+CD2_2^2))/2)
}
if(type=='new'){
if(is.null(dim)){
dim = c(keyvalues$NAXIS1, keyvalues$NAXIS2)
}
output = Rwcs_p2s(dim[1]/2 + c(-0.5,0.5), dim[2]/2 + c(-0.5,0.5), keyvalues = keyvalues)
output[,1] = output[,1] * cos(mean(output[,2])*pi/180)
return(2545.584412*sqrt(diff(output[,1])^2 + diff(output[,2])^2)) # 2545.584412 = 3600/sqrt(2)
}
}
Rwcs_in_image = function(RA, Dec, xlim, ylim, buffer=0, plot=FALSE, style='points', pad=0, add=FALSE, ...){
dots = list(...)
if(missing(xlim) & !is.null(dots$keyvalues)){
if(isTRUE(dots$keyvalues$ZIMAGE)){
xlim = c(0, dots$keyvalues$ZNAXIS1)
}else if(!is.null(dots$keyvalues$NAXIS1)){
xlim = c(0, dots$keyvalues$NAXIS1)
}else{
stop('Missing NAXIS1 in keyvalues, please specify xlim manually!')
}
}
if(missing(ylim) & !is.null(dots$keyvalues)){
if(isTRUE(dots$keyvalues$ZIMAGE)){
ylim = c(0, dots$keyvalues$ZNAXIS2)
}else if(!is.null(dots$keyvalues$NAXIS2)){
ylim = c(0, dots$keyvalues$NAXIS2)
}else{
stop('Missing NAXIS2 in keyvalues, please specify ylim manually!')
}
}
suppressMessages({
test_xy = Rwcs_s2p(RA=RA, Dec=Dec, pixcen='R', ...)
})
if(plot){
if(add==FALSE){
magplot(NA, NA, xlim=xlim + c(-pad,pad), ylim=ylim + c(-pad,pad), pch='.', asp=1, side=FALSE)
}
if(style=='points'){
points(test_xy, col='red')
}else if(style=='polygon'){
polygon(test_xy, col=hsv(alpha=0.2), border='red')
}else{
stop('style must be points or polygon!')
}
if(add==FALSE){
rect(xleft=xlim[1], ybottom=ylim[1], xright=xlim[2], ytop=ylim[2])
suppressMessages({
Rwcs_grid(...)
Rwcs_labels(...)
Rwcs_compass(...)
})
}
}
return(as.logical(test_xy[,'x'] >= xlim[1] - buffer & test_xy[,'x'] <= xlim[2] + buffer & test_xy[,'y'] >= ylim[1] - buffer & test_xy[,'y'] <= ylim[2] + buffer))
}
Rwcs_overlap = function(keyvalues_test, keyvalues_ref=NULL, buffer=0, plot=FALSE, pad=0, add=FALSE){
if(is.null(keyvalues_ref)){
message('Using last provided keyvalues_ref!')
keyvalues_ref = options()$current_keyvalues_ref
if(is.null(keyvalues_ref)){
stop('User must provide keyvalues_ref!')
}
}else{
options(current_keyvalues_ref = keyvalues_ref)
}
if(isTRUE(keyvalues_test$ZIMAGE)){
NAXIS1_test = keyvalues_test$ZNAXIS1
NAXIS2_test = keyvalues_test$ZNAXIS2
}else{
NAXIS1_test = keyvalues_test$NAXIS1
NAXIS2_test = keyvalues_test$NAXIS2
}
if(isTRUE(keyvalues_ref$ZIMAGE)){
NAXIS1_ref = keyvalues_ref$ZNAXIS1
NAXIS2_ref = keyvalues_ref$ZNAXIS2
}else{
NAXIS1_ref = keyvalues_ref$NAXIS1
NAXIS2_ref = keyvalues_ref$NAXIS2
}
suppressMessages({
pixscale_test = Rwcs_pixscale(keyvalues_test) # in asec
pixscale_ref = Rwcs_pixscale(keyvalues_ref) # in asec
centre_test = Rwcs_p2s(NAXIS1_test/2, NAXIS2_test/2, keyvalues=keyvalues_test, pixcen='R')
centre_ref = Rwcs_p2s(NAXIS1_ref/2, NAXIS2_ref/2, keyvalues=keyvalues_ref, pixcen='R')
})
sph_test = .sph2car(centre_test)[1,]
sph_ref = .sph2car(centre_ref)[1,]
dot_prod = sph_test[1]*sph_ref[1] + sph_test[2]*sph_ref[2] + sph_test[3]*sph_ref[3]
dot_prod[dot_prod < -1] = -1
dot_prod[dot_prod > 1] = 1
ang_sep = acos(dot_prod)/((pi/180)/3600) # in asec
#using a 10% buffer to be safe
max_sep = 1.1*(sqrt(NAXIS1_test^2 + NAXIS2_test^2)*pixscale_test + sqrt(NAXIS1_ref^2 + NAXIS2_ref^2)*pixscale_ref)/2
if(ang_sep > max_sep){
return(FALSE)
}
suppressMessages({
left = Rwcs_p2s(rep(0,NAXIS2_test + 1L), 0:NAXIS2_test, keyvalues=keyvalues_test, pixcen='R')
top = Rwcs_p2s(0:NAXIS1_test, rep(NAXIS2_test,NAXIS1_test + 1L), keyvalues=keyvalues_test, pixcen='R')
right = Rwcs_p2s(rep(NAXIS1_test,NAXIS2_test + 1L), NAXIS2_test:0 , keyvalues=keyvalues_test, pixcen='R')
bottom = Rwcs_p2s(NAXIS1_test:0, rep(0,NAXIS1_test + 1L), keyvalues=keyvalues_test, pixcen='R')
})
test_in_ref = any(Rwcs_in_image(RA=c(left[,'RA'], top[,'RA'], right[,'RA'], bottom[,'RA']), Dec=c(left[,'Dec'], top[,'Dec'], right[,'Dec'], bottom[,'Dec']), buffer=buffer, plot=plot, style='polygon', pad=pad, add=add, keyvalues=keyvalues_ref))
if(test_in_ref){
return(test_in_ref)
}else{
suppressMessages({
left = Rwcs_p2s(rep(0,NAXIS2_ref + 1L), 0:NAXIS2_ref, keyvalues=keyvalues_ref, pixcen='R')
top = Rwcs_p2s(0:NAXIS1_ref, rep(NAXIS2_ref,NAXIS1_ref + 1L), keyvalues=keyvalues_ref, pixcen='R')
right = Rwcs_p2s(rep(NAXIS1_ref,NAXIS2_ref + 1L), NAXIS2_ref:0 , keyvalues=keyvalues_ref, pixcen='R')
bottom = Rwcs_p2s(NAXIS1_ref:0, rep(0,NAXIS1_ref + 1L), keyvalues=keyvalues_ref, pixcen='R')
})
ref_in_test = any(Rwcs_in_image(RA=c(left[,'RA'], top[,'RA'], right[,'RA'], bottom[,'RA']), Dec=c(left[,'Dec'], top[,'Dec'], right[,'Dec'], bottom[,'Dec']), buffer=buffer, keyvalues=keyvalues_test))
return(ref_in_test)
}
} | /R/Rwcs.R | no_license | asgr/Rwcs | R | false | false | 39,176 | r | .sph2car = function(long, lat, radius=1, deg=TRUE){
if (is.matrix(long) || is.data.frame(long)) {
if (ncol(long) == 1) {
long = long[, 1]
}
else if (ncol(long) == 2) {
lat = long[, 2]
long = long[, 1]
}
else if (ncol(long) == 3) {
radius = long[, 3]
lat = long[, 2]
long = long[, 1]
}
}
if (missing(long) | missing(lat)) {
stop("Missing full spherical 3D input data.")
}
if (deg) {
long = long * pi/180
lat = lat * pi/180
}
return = cbind(x = radius * cos(long) * cos(lat), y = radius *
sin(long) * cos(lat), z = radius * sin(lat))
}
Rwcs_s2p = function(RA, Dec, keyvalues=NULL, pixcen='FITS', loc.diff=c(0,0), coord.type='deg',
sep=':', header=NULL, inherit=TRUE, WCSref=NULL, ctrl=2L, cores=1, ...){
assertList(keyvalues, null.ok = TRUE)
if(is.character(header) & is.null(keyvalues)){
if(length(header) > 1){
if(requireNamespace("Rfits", quietly = TRUE)){
keyvalues = Rfits::Rfits_hdr_to_keyvalues(header)
header = Rfits::Rfits_header_to_raw(Rfits::Rfits_keyvalues_to_header(keyvalues))
}else{
stop("The Rfits package is need to process the header. Install from GitHub asgr/Rfits.")
}
}
}
assertChoice(pixcen, c('R','FITS'))
assertNumeric(loc.diff, len=2)
assertChoice(coord.type, c('deg','sex'))
assertCharacter(sep, len=1)
if(length(dim(RA))==2){
Dec = RA[,2]
RA = RA[,1]
}
if(coord.type=='sex'){
RA = hms2deg(RA,sep=sep)
Dec = dms2deg(Dec,sep=sep)
}
assertNumeric(RA)
assertNumeric(Dec, len = length(RA))
if(inherit){
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
header = options()$current_header
}
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
keyvalues = options()$current_keyvalues
}
if(is.null(WCSref)){
WCSref = options()$current_WCSref
}
}
if(length(header)==1){
nkey = nchar(header)/80
if(!is.null(WCSref)){
WCSref = tolower(WCSref)
reflet = 1:26
names(reflet) = letters
if(! WCSref %in% letters){
stop('WCS ref must be 0 (base WCS) or a letter [a-z]!')
}
WCSref = reflet[WCSref]
}else{
WCSref = 0
}
if(cores == 1L){
output = Cwcs_head_s2p(
RA = RA,
Dec = Dec,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(RA), 2)
if(length(good) > 0){
output[good,] = Cwcs_head_s2p(
RA = RA[good],
Dec = Dec[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_s2p(
RA = RA[bad] + 1e-12,
Dec = Dec[bad] + 1e-12,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_s2p(
RA = RA[bad] + 1e-8,
Dec = Dec[bad] + 1e-8,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}else{
registerDoParallel(cores=cores)
maxlen = length(RA)
chunk = ceiling(maxlen/cores)
i = RAsub = Decsub = NULL
RA = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
RA[lo:hi]
}
Dec = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
Dec[lo:hi]
}
output = foreach(RAsub = RA, Decsub = Dec, .combine='rbind')%dopar%{
temp = Cwcs_head_s2p(
RA = RAsub,
Dec = Decsub,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(RAsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_head_s2p(
RA = RAsub[good],
Dec = Decsub[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_s2p(
RA = RAsub[bad] + 1e-12,
Dec = Decsub[bad] + 1e-12,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_s2p(
RA = RAsub[bad] + 1e-8,
Dec = Decsub[bad] + 1e-8,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
return(temp)
}
}
}else{
keyvalues = Rwcs_keypass(keyvalues, ...)
if(cores == 1L){
output = Cwcs_s2p(
RA = RA,
Dec = Dec,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(RA), 2)
if(length(good)>0){
output[good,] = Cwcs_s2p(
RA = RA[good],
Dec = Dec[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
}else{
registerDoParallel(cores=cores)
maxlen = length(RA)
chunk = ceiling(maxlen/cores)
i = RAsub = Decsub = NULL
RA = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
RA[lo:hi]
}
Dec = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
Dec[lo:hi]
}
output = foreach(RAsub = RA, Decsub = Dec, .combine='rbind')%dopar%{
temp = Cwcs_s2p(
RA = RAsub,
Dec = Decsub,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(RAsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_s2p(
RA = RAsub[good],
Dec = Decsub[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
return(temp)
}
}
}
if(loc.diff[1] != 0){
output[,1] = output[,1] - loc.diff[1]
}
if(loc.diff[2] != 0){
output[,2] = output[,2] - loc.diff[2]
}
if(pixcen == 'R'){
output[,1] = output[,1] - 0.5
output[,2] = output[,2] - 0.5
}
colnames(output) = c('x','y')
return(output)
}
Rwcs_p2s = function(x, y, keyvalues=NULL, pixcen='FITS', loc.diff=c(0,0), coord.type='deg',
sep=':', header=NULL, inherit=TRUE, WCSref=NULL, ctrl=2L, cores=1, ...){
assertList(keyvalues, null.ok = TRUE)
if(is.character(header) & is.null(keyvalues)){
if(length(header) > 1){
if(requireNamespace("Rfits", quietly = TRUE)){
keyvalues = Rfits::Rfits_hdr_to_keyvalues(header)
header = Rfits::Rfits_header_to_raw(Rfits::Rfits_keyvalues_to_header(keyvalues))
}else{
stop("The Rfits package is need to process the header. Install from GitHub asgr/Rfits.")
}
}
}
assertChoice(pixcen, c('R','FITS'))
assertNumeric(loc.diff, len=2)
assertChoice(coord.type, c('deg','sex'))
assertCharacter(sep, len=1)
if(length(dim(x))==2){
if(dim(x)[2]==2){
y = x[,2]
x = x[,1]
}else{
x = expand.grid(1:dim(x)[1], 1:dim(x)[2])
y = x[,2] - 0.5
x = x[,1] - 0.5
pixcen = 'R'
}
}
if(pixcen == 'R'){
x = as.numeric(x) + 0.5
y = as.numeric(y) + 0.5
}
if(loc.diff[1] != 0){
x = x + loc.diff[1]
}
if(loc.diff[2] != 0){
y = y + loc.diff[2]
}
assertNumeric(x)
assertNumeric(y, len = length(x))
if(inherit){
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
header = options()$current_header
}
if(is.null(keyvalues) & is.null(header) & length(list(...))==0){
keyvalues = options()$current_keyvalues
}
if(is.null(WCSref)){
WCSref = options()$current_WCSref
}
}
if(length(header)==1){
nkey = nchar(header)/80
if(!is.null(WCSref)){
WCSref = tolower(WCSref)
reflet = 1:26
names(reflet) = letters
if(! WCSref %in% letters){
stop('WCS ref must be 0 (base WCS) or a letter [a-z]!')
}
WCSref = reflet[WCSref]
}else{
WCSref = 0
}
if(cores == 1L){
output = Cwcs_head_p2s(
x = x,
y = y,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(x), 2)
if(length(good) > 0){
output[good,] = Cwcs_head_p2s(
x = x[good],
y = y[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_p2s(
x = x[bad] + 1e-6,
y = y[bad] + 1e-6,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(output)){ #catch for weird inversion problems
bad = unique(which(is.infinite(output), arr.ind = TRUE)[,1])
output[bad,] = Cwcs_head_p2s(
x = x[bad] + 1e-2,
y = y[bad] + 1e-2,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}else{
registerDoParallel(cores=cores)
maxlen = length(x)
chunk = ceiling(maxlen/cores)
i = xsub = ysub = NULL
x = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
x[lo:hi]
}
y = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
y[lo:hi]
}
output = foreach(xsub = x, ysub = y, .combine='rbind')%dopar%{
temp = Cwcs_head_p2s(
x = xsub,
y = ysub,
header = header,
nkey = nkey,
WCSref = WCSref,
ctrl=ctrl
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(xsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_head_p2s(
x = xsub[good],
y = ysub[good],
header = header,
nkey = nkey,
WCSref = WCSref
)
}
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_p2s(
x = xsub[bad] + 1e-6,
y = ysub[bad] + 1e-6,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
if(anyInfinite(temp)){ #catch for weird inversion problems
bad = unique(which(is.infinite(temp), arr.ind = TRUE)[,1])
temp[bad,] = Cwcs_head_p2s(
x = xsub[bad] + 1e-2,
y = ysub[bad] + 1e-2,
header = header,
nkey = nkey,
WCSref = WCSref
)
}
return(temp)
}
}
}else{
keyvalues = Rwcs_keypass(keyvalues, ...)
if(cores == 1L){
output = Cwcs_p2s(
x = x,
y = y,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(output))){
good = which(output == 0)
output = matrix(NA, length(x), 2)
if(length(good)>0){
output[good,] = Cwcs_p2s(
x = x[good],
y = y[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
}else{
registerDoParallel(cores=cores)
maxlen = length(x)
chunk = ceiling(maxlen/cores)
i = xsub = ysub = NULL
x = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
x[lo:hi]
}
y = foreach(i = 1:cores)%do%{
lo = (i - 1L)*chunk + 1L
hi = min(lo + chunk - 1L, maxlen)
y[lo:hi]
}
output = foreach(xsub = x, ysub = y, .combine='rbind')%dopar%{
temp = Cwcs_p2s(
x = xsub,
y = ysub,
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
if(is.null(dim(temp))){
good = which(temp == 0)
temp = matrix(NA, length(xsub), 2)
if(length(good)>0){
temp[good,] = Cwcs_p2s(
x = xsub[good],
y = ysub[good],
CTYPE1 = keyvalues$CTYPE1,
CTYPE2 = keyvalues$CTYPE2,
CRVAL1 = keyvalues$CRVAL1,
CRVAL2 = keyvalues$CRVAL2,
CRPIX1 = keyvalues$CRPIX1,
CRPIX2 = keyvalues$CRPIX2,
CD1_1 = keyvalues$CD1_1,
CD1_2 = keyvalues$CD1_2,
CD2_1 = keyvalues$CD2_1,
CD2_2 = keyvalues$CD2_2,
RADESYS = keyvalues$RADESYS,
EQUINOX = keyvalues$EQUINOX,
PV1_0 = keyvalues$PV1_0,
PV1_1 = keyvalues$PV1_1,
PV1_2 = keyvalues$PV1_2,
PV1_3 = keyvalues$PV1_3,
PV1_4 = keyvalues$PV1_4,
PV2_0 = keyvalues$PV2_0,
PV2_1 = keyvalues$PV2_1,
PV2_2 = keyvalues$PV2_2,
PV2_3 = keyvalues$PV2_3,
PV2_4 = keyvalues$PV2_4,
PV2_5 = keyvalues$PV2_5
)
}
}
return(temp)
}
}
}
if(coord.type=='sex'){
RAsex = deg2hms(output[,1], type='cat', sep=sep)
Decsex = deg2dms(output[,2], type='cat', sep=sep)
output = cbind(RAsex, Decsex)
}
colnames(output)=c('RA','Dec')
return(output)
}
Rwcs_keypass=function(keyvalues=NULL,
CTYPE1='RA---TAN', CTYPE2='DEC--TAN',
CRVAL1=0, CRVAL2=0,
CRPIX1=0, CRPIX2=0,
CD1_1=1, CD1_2=0,
CD2_1=0, CD2_2=1,
RADESYS='ICRS',
EQUINOX='infer',
CUNIT1='deg', CUNIT2='deg',
PV1_0=NULL, PV1_1=NULL, PV1_2=NULL, PV1_3=NULL, PV1_4=NULL,
#PV1_5=NULL, PV1_6=NULL, PV1_7=NULL, PV1_8=NULL, PV1_9=NULL, PV1_10=NULL,
PV2_0=NULL, PV2_1=NULL, PV2_2=NULL, PV2_3=NULL, PV2_4=NULL, PV2_5=NULL,
#PV2_6=NULL, PV2_7=NULL, PV2_8=NULL, PV2_9=NULL, PV2_10=NULL,
...){
if(!is.null(keyvalues)){
if(missing(CTYPE1)){if(!is.null(keyvalues$CTYPE1)){CTYPE1 = keyvalues$CTYPE1}else{message('CTYPE1 is not defined!')}}
if(missing(CTYPE2)){if(!is.null(keyvalues$CTYPE2)){CTYPE2 = keyvalues$CTYPE2}else{message('CTYPE2 is not defined!')}}
if(missing(CRVAL1)){if(!is.null(keyvalues$CRVAL1)){CRVAL1 = keyvalues$CRVAL1}else{message('CRVAL1 is not defined!')}}
if(missing(CRVAL2)){if(!is.null(keyvalues$CRVAL2)){CRVAL2 = keyvalues$CRVAL2}else{message('CRVAL2 is not defined!')}}
if(missing(CRPIX1)){if(!is.null(keyvalues$CRPIX1)){CRPIX1 = keyvalues$CRPIX1}else{message('CRPIX1 is not defined!')}}
if(missing(CRPIX2)){if(!is.null(keyvalues$CRPIX2)){CRPIX2 = keyvalues$CRPIX2}else{message('CRPIX2 is not defined!')}}
if(missing(CUNIT1)){if(!is.null(keyvalues$CUNIT1)){CUNIT1 = keyvalues$CUNIT1}else{message('CUNIT1 is not defined!')}}
if(missing(CUNIT2)){if(!is.null(keyvalues$CUNIT2)){CUNIT2 = keyvalues$CUNIT2}else{message('CUNIT2 is not defined!')}}
if(missing(CD1_1)){
if(!is.null(keyvalues$CD1_1)){
CD1_1 = keyvalues$CD1_1
}else{
if((!is.null(keyvalues$CDELT1)) & (!is.null(keyvalues$PC1_1)) & (!is.null(keyvalues$PC2_1))){
CD1_1 = keyvalues$CDELT1 * keyvalues$PC1_1
CD2_1 = keyvalues$CDELT1 * keyvalues$PC2_1
}else if((!is.null(keyvalues$CDELT1)) & (!is.null(keyvalues$PC1_1)) & (is.null(keyvalues$PC2_1))){
CD1_1 = keyvalues$CDELT1 * keyvalues$PC1_1
CD2_1 = 0
}else if((!is.null(keyvalues$CDELT1)) & (!is.null(keyvalues$CROTA2))){
CD1_1 = keyvalues$CDELT1 * cos(keyvalues$CROTA2*pi/180)
CD2_1 = keyvalues$CDELT1 * sin(keyvalues$CROTA2*pi/180)
}else if((!is.null(keyvalues$CDELT1)) & (is.null(keyvalues$CROTA2))){
CD1_1 = keyvalues$CDELT1
CD2_1 = 0 #for clarity
}else{
stop('CD1_1 and/or CD2_1 is not definable!')
}
}
}
if(missing(CD2_2)){
if(!is.null(keyvalues$CD2_2)) {
CD2_2 = keyvalues$CD2_2
}else{
if((!is.null(keyvalues$CDELT2)) & (!is.null(keyvalues$PC2_2)) & (!is.null(keyvalues$PC1_2))){
CD2_2 = keyvalues$CDELT2 * keyvalues$PC2_2
CD1_2 = keyvalues$CDELT2 * keyvalues$PC1_2
}else if((!is.null(keyvalues$CDELT2)) & (!is.null(keyvalues$PC2_2)) & (is.null(keyvalues$PC1_2))){
CD2_2 = keyvalues$CDELT2 * keyvalues$PC2_2
CD1_2 = 0
}else if((!is.null(keyvalues$CDELT2)) & (!is.null(keyvalues$CROTA2))){
CD2_2 = keyvalues$CDELT2 * cos(keyvalues$CROTA2*pi/180)
CD1_2 = -keyvalues$CDELT2 * sin(keyvalues$CROTA2*pi/180)
}else if((!is.null(keyvalues$CDELT2)) & (is.null(keyvalues$CROTA2))){
CD2_2 = keyvalues$CDELT2
CD1_2 = 0 #for clarity
}else{
stop('CD2_2 and/or CD1_2 is not definable!')
}
}
}
if(missing(CD1_2)){
if(!is.null(keyvalues$CD1_2)){
CD1_2 = keyvalues$CD1_2
}else{
CD1_2 = 0
message('CD1_2 is not definable, setting to 0!')
}
}
if(missing(CD2_1)){
if(!is.null(keyvalues$CD2_1)){
CD2_1 = keyvalues$CD2_1
}else{
CD2_1 = 0
message('CD2_1 is not definable, setting to 0!')
}
}
if (missing(RADESYS)) {
if (!is.null(keyvalues$RADESYS)) {
RADESYS = keyvalues$RADESYS
} else{
if (!is.null(keyvalues$RADECSYS)) {
RADESYS = keyvalues$RADECSYS
}else{
message('RADESYS is not defined (also no RADECSYS)!')
}
}
if (!is.null(keyvalues$EQUINOX)) {
EQUINOX = keyvalues$EQUINOX
} else{
if (!is.null(keyvalues$EPOCH)) {
EQUINOX = keyvalues$EPOCH
}else{
message('EQUINOX is not defined (also no EPOCH)!')
}
}
}
if(CUNIT1==' '){
CUNIT1 = 'deg'
message('CUNIT1 is blank, setting to \'deg\'!')
}
if(CUNIT2==' '){
CUNIT2 = 'deg'
message('CUNIT1 is blank, setting to \'deg\'!')
}
if(is.null(PV1_0)){if(!is.null(keyvalues$PV1_0)){PV1_0 = keyvalues$PV1_0}else{PV1_0 = NA}}
if(is.null(PV1_1)){if(!is.null(keyvalues$PV1_1)){PV1_1 = keyvalues$PV1_1}else{PV1_1 = NA}}
if(is.null(PV1_2)){if(!is.null(keyvalues$PV1_2)){PV1_2 = keyvalues$PV1_2}else{PV1_2 = NA}}
if(is.null(PV1_3)){if(!is.null(keyvalues$PV1_3)){PV1_3 = keyvalues$PV1_3}else{PV1_3 = NA}}
if(is.null(PV1_4)){if(!is.null(keyvalues$PV1_4)){PV1_4 = keyvalues$PV1_4}else{PV1_4 = NA}}
# Beyond this appears to be non-standard
# if(is.null(PV1_5)){if(!is.null(keyvalues$PV1_5)){PV1_5 = keyvalues$PV1_5}else{PV1_5 = NA}}
# if(is.null(PV1_6)){if(!is.null(keyvalues$PV1_6)){PV1_6 = keyvalues$PV1_6}else{PV1_6 = NA}}
# if(is.null(PV1_7)){if(!is.null(keyvalues$PV1_7)){PV1_7 = keyvalues$PV1_7}else{PV1_7 = NA}}
# if(is.null(PV1_8)){if(!is.null(keyvalues$PV1_8)){PV1_8 = keyvalues$PV1_8}else{PV1_8 = NA}}
# if(is.null(PV1_9)){if(!is.null(keyvalues$PV1_9)){PV1_9 = keyvalues$PV1_9}else{PV1_9 = NA}}
# if(is.null(PV1_10)){if(!is.null(keyvalues$PV1_10)){PV1_10 = keyvalues$PV1_10}else{PV1_10 = NA}}
if(is.null(PV2_0)){if(!is.null(keyvalues$PV2_0)){PV2_0 = keyvalues$PV2_0}else{PV2_0 = NA}}
if(is.null(PV2_1)){if(!is.null(keyvalues$PV2_1)){PV2_1 = keyvalues$PV2_1}else{PV2_1 = NA}}
if(is.null(PV2_2)){if(!is.null(keyvalues$PV2_2)){PV2_2 = keyvalues$PV2_2}else{PV2_2 = NA}}
if(is.null(PV2_3)){if(!is.null(keyvalues$PV2_3)){PV2_3 = keyvalues$PV2_3}else{PV2_3 = NA}}
if(is.null(PV2_4)){if(!is.null(keyvalues$PV2_4)){PV2_4 = keyvalues$PV2_4}else{PV2_4 = NA}}
if(is.null(PV2_5)){if(!is.null(keyvalues$PV2_5)){PV2_5 = keyvalues$PV2_5}else{PV2_5 = NA}}
# Beyond this appears to be non-standard
# if(is.null(PV2_6)){if(!is.null(keyvalues$PV2_6)){PV2_6 = keyvalues$PV2_6}else{PV2_6 = NA}}
# if(is.null(PV2_7)){if(!is.null(keyvalues$PV2_7)){PV2_7 = keyvalues$PV2_7}else{PV2_7 = NA}}
# if(is.null(PV2_8)){if(!is.null(keyvalues$PV2_8)){PV2_8 = keyvalues$PV2_8}else{PV2_8 = NA}}
# if(is.null(PV2_9)){if(!is.null(keyvalues$PV2_9)){PV2_9 = keyvalues$PV2_9}else{PV2_9 = NA}}
# if(is.null(PV2_10)){if(!is.null(keyvalues$PV2_10)){PV2_10 = keyvalues$PV2_10}else{PV2_10 = NA}}
}else{
keyvalues=list()
if(is.null(PV1_0)){PV1_0 = NA}
if(is.null(PV1_1)){PV1_1 = NA}
if(is.null(PV1_2)){PV1_2 = NA}
if(is.null(PV1_3)){PV1_3 = NA}
if(is.null(PV1_4)){PV1_4 = NA}
if(is.null(PV2_0)){PV2_0 = NA}
if(is.null(PV2_1)){PV2_1 = NA}
if(is.null(PV2_2)){PV2_2 = NA}
if(is.null(PV2_3)){PV2_3 = NA}
if(is.null(PV2_4)){PV2_4 = NA}
if(is.null(PV2_5)){PV2_5 = NA}
}
if(EQUINOX == 'infer'){
if(RADESYS %in% c('ICRS', 'FK5')){EQUINOX = 2000}else{EQUINOX = 1950}
}
allowed_proj = c(
"AZP", #zenithal/azimuthal perspective
"SZP", #slant zenithal perspective
"TAN", #gnomonic
"STG", #stereographic
"SIN", #orthographic/synthesis
"NCP", #unofficially supported SIN-like projection
"ARC", #zenithal/azimuthal equidistant
"ZPN", #zenithal/azimuthal polynomial
"ZEA", #zenithal/azimuthal equal area
"AIR", #Airy’s projection
"CYP", #cylindrical perspective
"CEA", #cylindrical equal area
"CAR", #plate carrée
"MER", #Mercator’s projection
"COP", #conic perspective
"COE", #conic equal area
"COD", #conic equidistant
"COO", #conic orthomorphic
"SFL", #Sanson-Flamsteed (“global sinusoid”)
"PAR", #parabolic
"MOL", #Mollweide’s projection
"AIT", #Hammer-Aitoff
"BON", #Bonne’s projection
"PCO", #polyconic
"TSC", #tangential spherical cube
"CSC", #COBE quadrilateralized spherical cube
"QSC", #quadrilateralized spherical cube
"HPX", #HEALPix
"XPH" #HEALPix polar, aka “butterfly”
)
allowed_axes = c(
"RA", #right ascension
"DEC", #declination
"GLON", #galactic longitude
"GLAT", #galactic latitude
"ELON", #ecliptic longitude
"ELAT", #ecliptic latitude
"HLON", #helioecliptic longitude
"HLAT", #helioecliptic latitude
"SLON", #supergalactic longitude
"SLAT" #supergalactic latitude
)
allowed_rade = c(
"ICRS",
"FK5",
"FK4",
"FK4-NO-E",
"GAPPT"
)
assertCharacter(CTYPE1, len=1)
assertCharacter(CTYPE2, len=1)
if(grepl('-SIP', CTYPE1)){message('SIP not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-SIP', '', CTYPE1)}
if(grepl('-SIP', CTYPE2)){message('SIP not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-SIP', '', CTYPE2)}
if(grepl('-TPV', CTYPE1)){message('TPV not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-TPV', '', CTYPE1)}
if(grepl('-TPV', CTYPE2)){message('TPV not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-TPV', '', CTYPE2)}
if(grepl('-DSS', CTYPE1)){message('DSS not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-DSS', '', CTYPE1)}
if(grepl('-DSS', CTYPE2)){message('DSS not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-DSS', '', CTYPE2)}
if(grepl('-WAT', CTYPE1)){message('WAT not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-WAT', '', CTYPE1)}
if(grepl('-WAT', CTYPE2)){message('WAT not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-WAT', '', CTYPE2)}
if(grepl('-TPD', CTYPE1)){message('TPD not supported for CTYPE1 and ignored!'); CTYPE1=gsub('-TPD', '', CTYPE1)}
if(grepl('-TPD', CTYPE2)){message('TPD not supported for CTYPE2 and ignored!'); CTYPE2=gsub('-TPD', '', CTYPE2)}
if(nchar(CTYPE1) != 8){stop('CTYPE1 must be 8 characters!')}
if(nchar(CTYPE2) != 8){stop('CTYPE2 must be 8 characters!')}
split1=strsplit(CTYPE1, '-+')[[1]]
split2=strsplit(CTYPE2, '-+')[[1]]
assertCharacter(split1, len = 2)
assertCharacter(split2, len = 2)
assertChoice(split1[1], allowed_axes)
assertChoice(split2[1], allowed_axes)
assertChoice(split1[2], allowed_proj)
assertChoice(split2[2], allowed_proj)
assertNumeric(CRVAL1, len=1)
assertNumeric(CRVAL2, len=1)
assertNumeric(CRPIX1, len=1)
assertNumeric(CRPIX2, len=1)
assertNumeric(CD1_1, len=1)
assertNumeric(CD1_2, len=1)
assertNumeric(CD2_1, len=1)
assertNumeric(CD2_2, len=1)
assertCharacter(RADESYS, len=1)
assertChoice(RADESYS, choices = allowed_rade)
assertChoice(EQUINOX, choices = c(1950, 2000))
assertNumeric(PV1_0, len=1, null.ok = FALSE)
assertNumeric(PV1_1, len=1, null.ok = FALSE)
assertNumeric(PV1_2, len=1, null.ok = FALSE)
assertNumeric(PV1_3, len=1, null.ok = FALSE)
assertNumeric(PV1_4, len=1, null.ok = FALSE)
# assertNumeric(PV1_5, len=1, null.ok = FALSE)
# assertNumeric(PV1_6, len=1, null.ok = FALSE)
# assertNumeric(PV1_7, len=1, null.ok = FALSE)
# assertNumeric(PV1_8, len=1, null.ok = FALSE)
# assertNumeric(PV1_9, len=1, null.ok = FALSE)
# assertNumeric(PV1_10, len=1, null.ok = FALSE)
assertNumeric(PV2_0, len=1, null.ok = FALSE)
assertNumeric(PV2_1, len=1, null.ok = FALSE)
assertNumeric(PV2_2, len=1, null.ok = FALSE)
assertNumeric(PV2_3, len=1, null.ok = FALSE)
assertNumeric(PV2_4, len=1, null.ok = FALSE)
assertNumeric(PV2_5, len=1, null.ok = FALSE)
# assertNumeric(PV2_6, len=1, null.ok = FALSE)
# assertNumeric(PV2_7, len=1, null.ok = FALSE)
# assertNumeric(PV2_8, len=1, null.ok = FALSE)
# assertNumeric(PV2_9, len=1, null.ok = FALSE)
# assertNumeric(PV2_10, len=1, null.ok = FALSE)
keyvalues$CTYPE1 = CTYPE1
keyvalues$CTYPE2 = CTYPE2
keyvalues$CRVAL1 = CRVAL1
keyvalues$CRVAL2 = CRVAL2
keyvalues$CRPIX1 = CRPIX1
keyvalues$CRPIX2 = CRPIX2
keyvalues$CD1_1 = CD1_1
keyvalues$CD1_2 = CD1_2
keyvalues$CD2_1 = CD2_1
keyvalues$CD2_2 = CD2_2
keyvalues$RADESYS = RADESYS
keyvalues$EQUINOX = EQUINOX
keyvalues$CUNIT1 = CUNIT1
keyvalues$CUNIT2 = CUNIT2
keyvalues$PV1_0 = PV1_0
keyvalues$PV1_1 = PV1_1
keyvalues$PV1_2 = PV1_2
keyvalues$PV1_3 = PV1_3
keyvalues$PV1_4 = PV1_4
# keyvalues$PV1_5 = PV1_5
# keyvalues$PV1_6 = PV1_6
# keyvalues$PV1_7 = PV1_7
# keyvalues$PV1_8 = PV1_8
# keyvalues$PV1_9 = PV1_9
# keyvalues$PV1_10 = PV1_10
keyvalues$PV2_0 = PV2_0
keyvalues$PV2_1 = PV2_1
keyvalues$PV2_2 = PV2_2
keyvalues$PV2_3 = PV2_3
keyvalues$PV2_4 = PV2_4
keyvalues$PV2_5 = PV2_5
# keyvalues$PV2_6 = PV2_6
# keyvalues$PV2_7 = PV2_7
# keyvalues$PV2_8 = PV2_8
# keyvalues$PV2_9 = PV2_9
# keyvalues$PV2_10 = PV2_10
class(keyvalues) = 'Rfits_keylist'
return(keyvalues)
}
Rwcs_pixscale = function(keyvalues=NULL, CD1_1=1, CD1_2=0, CD2_1=0, CD2_2=1, type='old', dim=NULL){
assertList(keyvalues, null.ok = TRUE)
if(is.null(keyvalues)){
keyvalues = options()$current_keyvalues
}
if(type=='old'){
if(!is.null(keyvalues)){
keyvalues = Rwcs_keypass(keyvalues)
CD1_1 = keyvalues$CD1_1
CD1_2 = keyvalues$CD1_2
CD2_1 = keyvalues$CD2_1
CD2_2 = keyvalues$CD2_2
}
assertNumeric(CD1_1, len=1)
assertNumeric(CD1_2, len=1)
assertNumeric(CD2_1, len=1)
assertNumeric(CD2_2, len=1)
return(3600*(sqrt(CD1_1^2+CD1_2^2)+sqrt(CD2_1^2+CD2_2^2))/2)
}
if(type=='new'){
if(is.null(dim)){
dim = c(keyvalues$NAXIS1, keyvalues$NAXIS2)
}
output = Rwcs_p2s(dim[1]/2 + c(-0.5,0.5), dim[2]/2 + c(-0.5,0.5), keyvalues = keyvalues)
output[,1] = output[,1] * cos(mean(output[,2])*pi/180)
return(2545.584412*sqrt(diff(output[,1])^2 + diff(output[,2])^2)) # 2545.584412 = 3600/sqrt(2)
}
}
Rwcs_in_image = function(RA, Dec, xlim, ylim, buffer=0, plot=FALSE, style='points', pad=0, add=FALSE, ...){
dots = list(...)
if(missing(xlim) & !is.null(dots$keyvalues)){
if(isTRUE(dots$keyvalues$ZIMAGE)){
xlim = c(0, dots$keyvalues$ZNAXIS1)
}else if(!is.null(dots$keyvalues$NAXIS1)){
xlim = c(0, dots$keyvalues$NAXIS1)
}else{
stop('Missing NAXIS1 in keyvalues, please specify xlim manually!')
}
}
if(missing(ylim) & !is.null(dots$keyvalues)){
if(isTRUE(dots$keyvalues$ZIMAGE)){
ylim = c(0, dots$keyvalues$ZNAXIS2)
}else if(!is.null(dots$keyvalues$NAXIS2)){
ylim = c(0, dots$keyvalues$NAXIS2)
}else{
stop('Missing NAXIS2 in keyvalues, please specify ylim manually!')
}
}
suppressMessages({
test_xy = Rwcs_s2p(RA=RA, Dec=Dec, pixcen='R', ...)
})
if(plot){
if(add==FALSE){
magplot(NA, NA, xlim=xlim + c(-pad,pad), ylim=ylim + c(-pad,pad), pch='.', asp=1, side=FALSE)
}
if(style=='points'){
points(test_xy, col='red')
}else if(style=='polygon'){
polygon(test_xy, col=hsv(alpha=0.2), border='red')
}else{
stop('style must be points or polygon!')
}
if(add==FALSE){
rect(xleft=xlim[1], ybottom=ylim[1], xright=xlim[2], ytop=ylim[2])
suppressMessages({
Rwcs_grid(...)
Rwcs_labels(...)
Rwcs_compass(...)
})
}
}
return(as.logical(test_xy[,'x'] >= xlim[1] - buffer & test_xy[,'x'] <= xlim[2] + buffer & test_xy[,'y'] >= ylim[1] - buffer & test_xy[,'y'] <= ylim[2] + buffer))
}
Rwcs_overlap = function(keyvalues_test, keyvalues_ref=NULL, buffer=0, plot=FALSE, pad=0, add=FALSE){
if(is.null(keyvalues_ref)){
message('Using last provided keyvalues_ref!')
keyvalues_ref = options()$current_keyvalues_ref
if(is.null(keyvalues_ref)){
stop('User must provide keyvalues_ref!')
}
}else{
options(current_keyvalues_ref = keyvalues_ref)
}
if(isTRUE(keyvalues_test$ZIMAGE)){
NAXIS1_test = keyvalues_test$ZNAXIS1
NAXIS2_test = keyvalues_test$ZNAXIS2
}else{
NAXIS1_test = keyvalues_test$NAXIS1
NAXIS2_test = keyvalues_test$NAXIS2
}
if(isTRUE(keyvalues_ref$ZIMAGE)){
NAXIS1_ref = keyvalues_ref$ZNAXIS1
NAXIS2_ref = keyvalues_ref$ZNAXIS2
}else{
NAXIS1_ref = keyvalues_ref$NAXIS1
NAXIS2_ref = keyvalues_ref$NAXIS2
}
suppressMessages({
pixscale_test = Rwcs_pixscale(keyvalues_test) # in asec
pixscale_ref = Rwcs_pixscale(keyvalues_ref) # in asec
centre_test = Rwcs_p2s(NAXIS1_test/2, NAXIS2_test/2, keyvalues=keyvalues_test, pixcen='R')
centre_ref = Rwcs_p2s(NAXIS1_ref/2, NAXIS2_ref/2, keyvalues=keyvalues_ref, pixcen='R')
})
sph_test = .sph2car(centre_test)[1,]
sph_ref = .sph2car(centre_ref)[1,]
dot_prod = sph_test[1]*sph_ref[1] + sph_test[2]*sph_ref[2] + sph_test[3]*sph_ref[3]
dot_prod[dot_prod < -1] = -1
dot_prod[dot_prod > 1] = 1
ang_sep = acos(dot_prod)/((pi/180)/3600) # in asec
#using a 10% buffer to be safe
max_sep = 1.1*(sqrt(NAXIS1_test^2 + NAXIS2_test^2)*pixscale_test + sqrt(NAXIS1_ref^2 + NAXIS2_ref^2)*pixscale_ref)/2
if(ang_sep > max_sep){
return(FALSE)
}
suppressMessages({
left = Rwcs_p2s(rep(0,NAXIS2_test + 1L), 0:NAXIS2_test, keyvalues=keyvalues_test, pixcen='R')
top = Rwcs_p2s(0:NAXIS1_test, rep(NAXIS2_test,NAXIS1_test + 1L), keyvalues=keyvalues_test, pixcen='R')
right = Rwcs_p2s(rep(NAXIS1_test,NAXIS2_test + 1L), NAXIS2_test:0 , keyvalues=keyvalues_test, pixcen='R')
bottom = Rwcs_p2s(NAXIS1_test:0, rep(0,NAXIS1_test + 1L), keyvalues=keyvalues_test, pixcen='R')
})
test_in_ref = any(Rwcs_in_image(RA=c(left[,'RA'], top[,'RA'], right[,'RA'], bottom[,'RA']), Dec=c(left[,'Dec'], top[,'Dec'], right[,'Dec'], bottom[,'Dec']), buffer=buffer, plot=plot, style='polygon', pad=pad, add=add, keyvalues=keyvalues_ref))
if(test_in_ref){
return(test_in_ref)
}else{
suppressMessages({
left = Rwcs_p2s(rep(0,NAXIS2_ref + 1L), 0:NAXIS2_ref, keyvalues=keyvalues_ref, pixcen='R')
top = Rwcs_p2s(0:NAXIS1_ref, rep(NAXIS2_ref,NAXIS1_ref + 1L), keyvalues=keyvalues_ref, pixcen='R')
right = Rwcs_p2s(rep(NAXIS1_ref,NAXIS2_ref + 1L), NAXIS2_ref:0 , keyvalues=keyvalues_ref, pixcen='R')
bottom = Rwcs_p2s(NAXIS1_ref:0, rep(0,NAXIS1_ref + 1L), keyvalues=keyvalues_ref, pixcen='R')
})
ref_in_test = any(Rwcs_in_image(RA=c(left[,'RA'], top[,'RA'], right[,'RA'], bottom[,'RA']), Dec=c(left[,'Dec'], top[,'Dec'], right[,'Dec'], bottom[,'Dec']), buffer=buffer, keyvalues=keyvalues_test))
return(ref_in_test)
}
} |
#' EDMeasure: A package for energy-based dependence measures
#'
#' The EDMeasure package provides measures of mutual dependence and tests of mutual independence,
#' independent component analysis methods based on mutual dependence measures,
#' and measures of conditional mean dependence and tests of conditional mean independence.
#'
#' The three main parts are:
#' \itemize{
#' \item mutual dependence measures via energy statistics
#' \itemize{
#' \item measuring mutual dependence
#' \item testing mutual independence
#' }
#' \item independent component analysis via mutual dependence measures
#' \itemize{
#' \item applying mutual dependence measures
#' \item initializing local optimization methods
#' }
#' \item conditional mean dependence measures via energy statistics
#' \itemize{
#' \item measuring conditional mean dependence
#' \item testing conditional mean independence
#' }
#' }
#'
#' @section Mutual Dependence Measures via Energy Statistics:
#' \strong{Measuring mutual dependence}
#'
#' The mutual dependence measures include:
#' \itemize{
#' \item asymmetric measure \eqn{\mathcal{R}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item symmetric measure \eqn{\mathcal{S}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item complete measure \eqn{\mathcal{Q}_n} based on complete V-statistics
#' \item simplified complete measure \eqn{\mathcal{Q}_n^\star} based on incomplete V-statistics
#' \item asymmetric measure \eqn{\mathcal{J}_n} based on complete measure \eqn{\mathcal{Q}_n}
#' \item simplified asymmetric measure \eqn{\mathcal{J}_n^\star} based on simplified complete measure
#' \eqn{\mathcal{Q}_n^\star}
#' \item symmetric measure \eqn{\mathcal{I}_n} based on complete measure \eqn{\mathcal{Q}_n}
#' \item simplified symmetric measure \eqn{\mathcal{I}_n^\star} based on simplified complete measure
#' \eqn{\mathcal{Q}_n^\star}
#' }
#'
#' \strong{Testing mutual independence}
#'
#' The mutual independence tests based on the mutual dependence measures are implemented as permutation
#' tests.
#'
#' @section Independent Component Analysis via Mutual Dependence Measures:
#' \strong{Applying mutual dependence measures}
#'
#' The mutual dependence measures include:
#' \itemize{
#' \item distance-based energy statistics
#' \itemize{
#' \item asymmetric measure \eqn{\mathcal{R}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item symmetric measure \eqn{\mathcal{S}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item simplified complete measure \eqn{\mathcal{Q}_n^\star} based on incomplete V-statistics
#' }
#' }
#' \itemize{
#' \item kernel-based maximum mean discrepancies
#' \itemize{
#' \item d-variable Hilbert--Schmidt independence criterion dHSIC\eqn{_n} based on
#' Hilbert--Schmidt independence criterion HSIC\eqn{_n}
#' }
#' }
#'
#' \strong{Initializing local optimization methods}
#'
#' The initialization methods include:
#' \itemize{
#' \item Latin hypercube sampling
#' \item Bayesian optimization
#' }
#'
#' @section Conditional Mean Dependence Measures via Energy Statistics:
#' \strong{Measuring conditional mean dependence}
#'
#' The conditional mean dependence measures include:
#' \itemize{
#' \item conditional mean dependence of \code{Y} given \code{X}
#' \itemize{
#' \item martingale difference divergence
#' \item martingale difference correlation
#' \item martingale difference divergence matrix
#' }
#' }
#' \itemize{
#' \item conditional mean dependence of \code{Y} given \code{X} adjusting for the dependence on \code{Z}
#' \itemize{
#' \item partial martingale difference divergence
#' \item partial martingale difference correlation
#' }
#' }
#'
#' \strong{Testing conditional mean independence}
#'
#' The conditional mean independence tests include:
#' \itemize{
#' \item conditional mean independence of \code{Y} given \code{X} conditioning on \code{Z}
#' \itemize{
#' \item martingale difference divergence under a linear assumption
#' \item partial martingale difference divergence
#' }
#' }
#' The conditional mean independence tests based on the conditional mean dependence measures are
#' implemented as permutation tests.
#'
#' @name EDMeasure-package
#'
#' @aliases EDMeasure
#'
#' @docType package
#'
#' @title Energy-Based Dependence Measures
#'
#' @author Ze Jin \email{zj58@cornell.edu},
#' Shun Yao \email{shunyao2@illinois.edu}, \cr
#' David S. Matteson \email{matteson@cornell.edu},
#' Xiaofeng Shao \email{xshao@illinois.edu}
NULL
| /R/EDMeasure-package.R | no_license | cran/EDMeasure | R | false | false | 4,748 | r | #' EDMeasure: A package for energy-based dependence measures
#'
#' The EDMeasure package provides measures of mutual dependence and tests of mutual independence,
#' independent component analysis methods based on mutual dependence measures,
#' and measures of conditional mean dependence and tests of conditional mean independence.
#'
#' The three main parts are:
#' \itemize{
#' \item mutual dependence measures via energy statistics
#' \itemize{
#' \item measuring mutual dependence
#' \item testing mutual independence
#' }
#' \item independent component analysis via mutual dependence measures
#' \itemize{
#' \item applying mutual dependence measures
#' \item initializing local optimization methods
#' }
#' \item conditional mean dependence measures via energy statistics
#' \itemize{
#' \item measuring conditional mean dependence
#' \item testing conditional mean independence
#' }
#' }
#'
#' @section Mutual Dependence Measures via Energy Statistics:
#' \strong{Measuring mutual dependence}
#'
#' The mutual dependence measures include:
#' \itemize{
#' \item asymmetric measure \eqn{\mathcal{R}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item symmetric measure \eqn{\mathcal{S}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item complete measure \eqn{\mathcal{Q}_n} based on complete V-statistics
#' \item simplified complete measure \eqn{\mathcal{Q}_n^\star} based on incomplete V-statistics
#' \item asymmetric measure \eqn{\mathcal{J}_n} based on complete measure \eqn{\mathcal{Q}_n}
#' \item simplified asymmetric measure \eqn{\mathcal{J}_n^\star} based on simplified complete measure
#' \eqn{\mathcal{Q}_n^\star}
#' \item symmetric measure \eqn{\mathcal{I}_n} based on complete measure \eqn{\mathcal{Q}_n}
#' \item simplified symmetric measure \eqn{\mathcal{I}_n^\star} based on simplified complete measure
#' \eqn{\mathcal{Q}_n^\star}
#' }
#'
#' \strong{Testing mutual independence}
#'
#' The mutual independence tests based on the mutual dependence measures are implemented as permutation
#' tests.
#'
#' @section Independent Component Analysis via Mutual Dependence Measures:
#' \strong{Applying mutual dependence measures}
#'
#' The mutual dependence measures include:
#' \itemize{
#' \item distance-based energy statistics
#' \itemize{
#' \item asymmetric measure \eqn{\mathcal{R}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item symmetric measure \eqn{\mathcal{S}_n} based on distance covariance \eqn{\mathcal{V}_n}
#' \item simplified complete measure \eqn{\mathcal{Q}_n^\star} based on incomplete V-statistics
#' }
#' }
#' \itemize{
#' \item kernel-based maximum mean discrepancies
#' \itemize{
#' \item d-variable Hilbert--Schmidt independence criterion dHSIC\eqn{_n} based on
#' Hilbert--Schmidt independence criterion HSIC\eqn{_n}
#' }
#' }
#'
#' \strong{Initializing local optimization methods}
#'
#' The initialization methods include:
#' \itemize{
#' \item Latin hypercube sampling
#' \item Bayesian optimization
#' }
#'
#' @section Conditional Mean Dependence Measures via Energy Statistics:
#' \strong{Measuring conditional mean dependence}
#'
#' The conditional mean dependence measures include:
#' \itemize{
#' \item conditional mean dependence of \code{Y} given \code{X}
#' \itemize{
#' \item martingale difference divergence
#' \item martingale difference correlation
#' \item martingale difference divergence matrix
#' }
#' }
#' \itemize{
#' \item conditional mean dependence of \code{Y} given \code{X} adjusting for the dependence on \code{Z}
#' \itemize{
#' \item partial martingale difference divergence
#' \item partial martingale difference correlation
#' }
#' }
#'
#' \strong{Testing conditional mean independence}
#'
#' The conditional mean independence tests include:
#' \itemize{
#' \item conditional mean independence of \code{Y} given \code{X} conditioning on \code{Z}
#' \itemize{
#' \item martingale difference divergence under a linear assumption
#' \item partial martingale difference divergence
#' }
#' }
#' The conditional mean independence tests based on the conditional mean dependence measures are
#' implemented as permutation tests.
#'
#' @name EDMeasure-package
#'
#' @aliases EDMeasure
#'
#' @docType package
#'
#' @title Energy-Based Dependence Measures
#'
#' @author Ze Jin \email{zj58@cornell.edu},
#' Shun Yao \email{shunyao2@illinois.edu}, \cr
#' David S. Matteson \email{matteson@cornell.edu},
#' Xiaofeng Shao \email{xshao@illinois.edu}
NULL
|
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverso <- function(inverso) m <<- inverso
getInverso <- function() m
list(set = set, get = get,
setInverso = setInverso,
getInverso = getInverso)
}
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getInverso()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setInverso(m)
m
} | /ProgrammingAssignment2.R | no_license | Batikitty/ProgrammingAssignment2- | R | false | false | 869 | r | #makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverso <- function(inverso) m <<- inverso
getInverso <- function() m
list(set = set, get = get,
setInverso = setInverso,
getInverso = getInverso)
}
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getInverso()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- mean(data, ...)
x$setInverso(m)
m
} |
testlist <- list(a = 1179010587L, b = 993737472L, x = c(1179010630L, 1179010630L, 1179010630L, 1179010630L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055988-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 170 | r | testlist <- list(a = 1179010587L, b = 993737472L, x = c(1179010630L, 1179010630L, 1179010630L, 1179010630L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
## My functions will take a matrix (makeCacheMatrix) and create special "matrix"
## capable of caching its inverse, then (cacheSolve) take "matrix" like this and
## compute its inverse (if it hasn't been done yet) or take the cached value.
## makeCacheMatrix takes a matrix and creates a list containing 4 functions:
## set, using which we can change the matrix we're working with (and sets the
## cached inversion to NULL whenever called);
## get, which allows us to use said matrix;
## setsolve setting the inversion we want to cache (in variable z which exists
## in function's closure);
## getsolve giving us acces to said inversion.
makeCacheMatrix <- function(x = matrix()) {
z <- NULL
set <- function(y) {
z <<- NULL
x <<- y
}
get = function() x
setsolve <- function(solve) z <<- solve
getsolve <- function() z
list (set=set, get=get, setsolve=setsolve, getsolve=getsolve)
}
## cacheSolve takes the "matrix" created by above function and returns cached
## value (if it's not NULL) or calculates the inversion (otherwise).
cacheSolve <- function(x, ...) {
z <- x$getsolve()
if(!is.null(z)) {
message("getting cached data")
return(z)
}
data <- x$get()
z <- solve(data, ...)
x$setsolve(z)
z
}
| /cachematrix.R | no_license | Stozek/ProgrammingAssignment2 | R | false | false | 1,256 | r | ## My functions will take a matrix (makeCacheMatrix) and create special "matrix"
## capable of caching its inverse, then (cacheSolve) take "matrix" like this and
## compute its inverse (if it hasn't been done yet) or take the cached value.
## makeCacheMatrix takes a matrix and creates a list containing 4 functions:
## set, using which we can change the matrix we're working with (and sets the
## cached inversion to NULL whenever called);
## get, which allows us to use said matrix;
## setsolve setting the inversion we want to cache (in variable z which exists
## in function's closure);
## getsolve giving us acces to said inversion.
makeCacheMatrix <- function(x = matrix()) {
z <- NULL
set <- function(y) {
z <<- NULL
x <<- y
}
get = function() x
setsolve <- function(solve) z <<- solve
getsolve <- function() z
list (set=set, get=get, setsolve=setsolve, getsolve=getsolve)
}
## cacheSolve takes the "matrix" created by above function and returns cached
## value (if it's not NULL) or calculates the inversion (otherwise).
cacheSolve <- function(x, ...) {
z <- x$getsolve()
if(!is.null(z)) {
message("getting cached data")
return(z)
}
data <- x$get()
z <- solve(data, ...)
x$setsolve(z)
z
}
|
a=3
a
b=c("bonjour","merci","au revoir")
b
rep(b,a)
rep(x = b,times=a)#argument explicite dans rep.int mais caché dans ... pour rep()
install.packages(arsenal)
library(arsenal)
read.csv("2_dataviz_ggplot2/etalab_finess_geocoded.csv")
devtools::install_github("dmlc/xgboost")
library(cartography)
library(sp)
data("nuts2006")
nuts2.df$unemprate <- nuts2.df$unemp2008/nuts2.df$act2008*100
choroLayer(spdf = nuts2.spdf,
df = nuts2.df,
var = "unemprate")
myfunc <- function(a,b){
a+b^2
}
c <- myfunc(c(1,2,3,5),rep(2,4))
| /0_introduction/example.R | no_license | phileas-condemine/r_initiation_drees | R | false | false | 554 | r | a=3
a
b=c("bonjour","merci","au revoir")
b
rep(b,a)
rep(x = b,times=a)#argument explicite dans rep.int mais caché dans ... pour rep()
install.packages(arsenal)
library(arsenal)
read.csv("2_dataviz_ggplot2/etalab_finess_geocoded.csv")
devtools::install_github("dmlc/xgboost")
library(cartography)
library(sp)
data("nuts2006")
nuts2.df$unemprate <- nuts2.df$unemp2008/nuts2.df$act2008*100
choroLayer(spdf = nuts2.spdf,
df = nuts2.df,
var = "unemprate")
myfunc <- function(a,b){
a+b^2
}
c <- myfunc(c(1,2,3,5),rep(2,4))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{LUXLung6(2)_2A}
\alias{LUXLung6(2)_2A}
\title{LUXLung6(2), figure 2A}
\format{
A data frame of 364 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab PFS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (afatinib, gemcitabine_cisplatin) \cr
}
}
\source{
Wu Y-L, Zhou C, Hu C-P, et al. Afatinib versus cisplatin plus
gemcitabine for first-line treatment of Asian patients with advanced
non-small-cell lung cancer harbouring EGFR mutations (LUX-Lung 6): an
open-label, randomised phase 3 trial. Lancet Oncol 2014; 15: 213–22.
}
\usage{
`LUXLung6(2)_2A`
}
\description{
Kaplan-Meier digitized data from LUXLung6(2), figure 2A (PMID 24439929). A reported sample size of 364 for a primary endpoint of PFS in lung cancer.
}
\examples{
summary(`LUXLung6(2)_2A`)
kmplot(`LUXLung6(2)_2A`)
}
\keyword{datasets}
| /man/LUXLung6-open-paren-2-close-paren-_2A.Rd | no_license | Owain-S/kmdata | R | false | true | 1,021 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{LUXLung6(2)_2A}
\alias{LUXLung6(2)_2A}
\title{LUXLung6(2), figure 2A}
\format{
A data frame of 364 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab PFS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (afatinib, gemcitabine_cisplatin) \cr
}
}
\source{
Wu Y-L, Zhou C, Hu C-P, et al. Afatinib versus cisplatin plus
gemcitabine for first-line treatment of Asian patients with advanced
non-small-cell lung cancer harbouring EGFR mutations (LUX-Lung 6): an
open-label, randomised phase 3 trial. Lancet Oncol 2014; 15: 213–22.
}
\usage{
`LUXLung6(2)_2A`
}
\description{
Kaplan-Meier digitized data from LUXLung6(2), figure 2A (PMID 24439929). A reported sample size of 364 for a primary endpoint of PFS in lung cancer.
}
\examples{
summary(`LUXLung6(2)_2A`)
kmplot(`LUXLung6(2)_2A`)
}
\keyword{datasets}
|
##################################################################################################
# ARCHIPELAGO PLANTS: OUTPUT OF HISTORICAL OCCURRENCE MODEL
##################################################################################################
rm(list=ls())
gc()
memory.size()
memory.limit(64000)
#### - Read packages - ####
library(devtools)
library(BayesLogit)
library(Hmsc)
library(abind)
library(corrplot)
library(ggplot2)
library(ape)
library(phytools)
#### - Load the fitted model - ####
setwd("F:/HelsinkiData23102019/archipelago/hmsc/Rcode/histocc")
load("modelNS.RData")
# Extract posterior distribution
post = convertToCodaObject(m)
# Compute effective sample sizes and PSRFs
esBeta = effectiveSize(post$Beta)
esGamma = effectiveSize(post$Gamma)
esRho = effectiveSize(post$Rho)
esAlpha = effectiveSize(post$Alpha[[1]])
esEta = effectiveSize(post$Eta[[1]])
esLambda = effectiveSize(post$Lambda[[1]])
str(post$Omega[[1]])
set.seed(1)
ss = sample(1:248004, 1000, replace=F)
esOmega = effectiveSize(post$Omega[[1]][,ss])
psBeta = gelman.diag(post$Beta, multivariate=F)$psrf
psGamma = gelman.diag(post$Gamma, multivariate=F)$psrf
psRho = gelman.diag(post$Rho, multivariate=F)$psrf
psAlpha = gelman.diag(post$Alpha[[1]], multivariate=F)$psrf
psEta = gelman.diag(post$Eta[[1]], multivariate=F)$psrf
psLambda = gelman.diag(post$Lambda[[1]], multivariate=F)$psrf
psOmega = gelman.diag(post$Omega[[1]][,ss], multivariate=F)$psrf
mixing = list(esBeta, esGamma, esRho, esAlpha, esEta, esLambda, esOmega,
psBeta, psGamma, psRho, psAlpha, psEta, psLambda, psOmega)
#save(mixing, file="mixing.RData")
load(file="mixing.RData")
summary(mixing[[1]]) # Beta
summary(mixing[[2]]) # Gamma
summary(mixing[[3]]) # Rho
summary(mixing[[7]]) # Omega
summary(mixing[[8]]) # Beta
summary(mixing[[9]]) # Gamma
summary(mixing[[10]]) # Rho
summary(mixing[[14]]) # Omega
# Produce posterior trace plots
plot(post$Rho)
summary(post$Rho[[1]])
pdf("posteriorplots/gammapost.pdf") #Trait effects
plot(post$Gamma)
dev.off()
pdf("posteriorplots/betapost.pdf") #Regression parameters
plot(post$Beta[,1:200])
dev.off()
pdf("posteriorplots/omegapost.pdf") #Species associations
plot(post$Omega[[1]][,1:200])
dev.off()
# Trait effects Gamma
postGamma = getPostEstimate(m, "Gamma")
postGamma
plotGamma(m, post=postGamma, "Mean")
#### - Evaluate model fit - ####
#load("CrossVal5PredY.RData")
predY = computePredictedValues(m)
predYm = apply(simplify2array(predY), 1:2, mean) #Posterior mean
#save(predYm, file="predYm.RData")
MF = evaluateModelFit(m, predY)
#save(MF, file="MF.RData")
load(file="MF.RData")
AUC = MF$AUC
R2 = MF$TjurR2
mean(R2, na.rm=T) #0.29
range(R2, na.rm=T) #0 - 0.72
mean(AUC, na.rm=T) #0.92
range(AUC, na.rm=T) #.70 - 1.00
# Explanatory power at island level
load(file="predYm.RData")
tmp = (m$Y>(-Inf))*1
predYm2 = predYm*tmp
plot(rowSums(m$Y, na.rm=T), rowSums(predYm2, na.rm=T))
lines(0:100, 0:100)
cor(rowSums(m$Y, na.rm=T), rowSums(predYm2, na.rm=T))^2
# Plot Tjur r^2 vs species prevalence
prev = colSums(m$Y)/(m$ny)
pdf("tjur_vs_prev.pdf", height=4, width=4, family="Times")
par(mfrow=c(1,1), mar=c(4,5,2,1))
plot(prev, R2, las=1, pch=16, col="grey", cex=.8, main=paste("Repeated measures model: Mean = ", signif(mean(R2, na.rm=T),2),".", sep=""),
ylim = c(0,1), xlab = "", ylab=expression(paste("Coefficient of discrimination (Tjur's", r^2,")")))
mtext("Species prevalence", 1, line=2.5)
dev.off()
#### - Compute and plot variance partitioning - ####
m$covNames
group = c(1,1,2,3)
groupnames = c(m$covNames[-1])
groupnames
VP = computeVariancePartitioning(m, group=group, groupnames=groupnames)
#save(VP, file="VP.RData")
load(file="VP.RData")
str(VP)
pdf("plots/varpartOcc.pdf", height=5, width=60)
plotVariancePartitioning(m, VP=VP)
dev.off()
### - Plot association networks ordered by taxonomy - ####
OmegaCorOcc = computeAssociations(m)
#save(OmegaCorOcc, file="OmegaCorOcc.RData")
load(file="OmegaCorOcc.RData")
tree = m$phyloTree
tree = untangle(tree, "read.tree")
orderC = m$C[tree$tip.label, tree$tip.label]
orderC[1:5,1:5]
orderOmega = OmegaCorOcc[[1]]$mean[tree$tip.label, tree$tip.label]
orderOmega[1:5,1:5]
cor(c(orderC), c(orderOmega)) #0.032
plotOrderOcc = match(tree$tip.label, colnames(OmegaCorOcc[[1]]$mean))
pdf("plots/CorPhylo.pdf", width=12, height=3)
par(mfrow=c(1,4))
supportLevel = 0.75
toPlot = m$C
corrplot(toPlot[plotOrderOcc,plotOrderOcc], type="lower", tl.pos="n", method = "color", main="Taxonomic correlation",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
toPlot = ((OmegaCorOcc[[1]]$support>supportLevel) + (OmegaCorOcc[[1]]$support<(1-supportLevel))>0)*OmegaCorOcc[[1]]$mean
corrplot(toPlot[plotOrderOcc,plotOrderOcc], type="lower", tl.pos="n", method = "color", main="Historical occurrence",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
toPlot = ((OmegaCorCol[[1]]$support>supportLevel) + (OmegaCorCol[[1]]$support<(1-supportLevel))>0)*OmegaCorCol[[1]]$mean
corrplot(toPlot[plotOrderCol,plotOrderCol], type="lower", tl.pos="n", method = "color", main="Colonisation probability",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
toPlot = ((OmegaCorExt[[1]]$support>supportLevel) + (OmegaCorExt[[1]]$support<(1-supportLevel))>0)*OmegaCorExt[[1]]$mean
corrplot(toPlot[plotOrderExt,plotOrderExt], type="lower", tl.pos="n", method = "color", main="Extinction probability",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
dev.off()
#### - Cross-validation - ####
a = Sys.time()
partition = createPartition(m, nfolds=2, column=1)
predY_CV2 = computePredictedValues(m, partition=partition, updater=list(GammaEta=FALSE))
MF_CV2 = evaluateModelFit(m, predY_CV2)
Sys.time()-a
save(MF_CV2, file="MF_CV2.Rdata") #2fold
save(predY_CV2, file="crossvalY.Rdata") #2fold
load("crossvalY.Rdata") #5fold
R2 = MF_CV2$TjurR2
AUC = MF_CV2$AUC
mean(R2, na.rm=T)
mean(AUC, na.rm=T)
| /histocc_output_clean.R | no_license | oysteiop/ArchipelagoPlants | R | false | false | 5,990 | r | ##################################################################################################
# ARCHIPELAGO PLANTS: OUTPUT OF HISTORICAL OCCURRENCE MODEL
##################################################################################################
rm(list=ls())
gc()
memory.size()
memory.limit(64000)
#### - Read packages - ####
library(devtools)
library(BayesLogit)
library(Hmsc)
library(abind)
library(corrplot)
library(ggplot2)
library(ape)
library(phytools)
#### - Load the fitted model - ####
setwd("F:/HelsinkiData23102019/archipelago/hmsc/Rcode/histocc")
load("modelNS.RData")
# Extract posterior distribution
post = convertToCodaObject(m)
# Compute effective sample sizes and PSRFs
esBeta = effectiveSize(post$Beta)
esGamma = effectiveSize(post$Gamma)
esRho = effectiveSize(post$Rho)
esAlpha = effectiveSize(post$Alpha[[1]])
esEta = effectiveSize(post$Eta[[1]])
esLambda = effectiveSize(post$Lambda[[1]])
str(post$Omega[[1]])
set.seed(1)
ss = sample(1:248004, 1000, replace=F)
esOmega = effectiveSize(post$Omega[[1]][,ss])
psBeta = gelman.diag(post$Beta, multivariate=F)$psrf
psGamma = gelman.diag(post$Gamma, multivariate=F)$psrf
psRho = gelman.diag(post$Rho, multivariate=F)$psrf
psAlpha = gelman.diag(post$Alpha[[1]], multivariate=F)$psrf
psEta = gelman.diag(post$Eta[[1]], multivariate=F)$psrf
psLambda = gelman.diag(post$Lambda[[1]], multivariate=F)$psrf
psOmega = gelman.diag(post$Omega[[1]][,ss], multivariate=F)$psrf
mixing = list(esBeta, esGamma, esRho, esAlpha, esEta, esLambda, esOmega,
psBeta, psGamma, psRho, psAlpha, psEta, psLambda, psOmega)
#save(mixing, file="mixing.RData")
load(file="mixing.RData")
summary(mixing[[1]]) # Beta
summary(mixing[[2]]) # Gamma
summary(mixing[[3]]) # Rho
summary(mixing[[7]]) # Omega
summary(mixing[[8]]) # Beta
summary(mixing[[9]]) # Gamma
summary(mixing[[10]]) # Rho
summary(mixing[[14]]) # Omega
# Produce posterior trace plots
plot(post$Rho)
summary(post$Rho[[1]])
pdf("posteriorplots/gammapost.pdf") #Trait effects
plot(post$Gamma)
dev.off()
pdf("posteriorplots/betapost.pdf") #Regression parameters
plot(post$Beta[,1:200])
dev.off()
pdf("posteriorplots/omegapost.pdf") #Species associations
plot(post$Omega[[1]][,1:200])
dev.off()
# Trait effects Gamma
postGamma = getPostEstimate(m, "Gamma")
postGamma
plotGamma(m, post=postGamma, "Mean")
#### - Evaluate model fit - ####
#load("CrossVal5PredY.RData")
predY = computePredictedValues(m)
predYm = apply(simplify2array(predY), 1:2, mean) #Posterior mean
#save(predYm, file="predYm.RData")
MF = evaluateModelFit(m, predY)
#save(MF, file="MF.RData")
load(file="MF.RData")
AUC = MF$AUC
R2 = MF$TjurR2
mean(R2, na.rm=T) #0.29
range(R2, na.rm=T) #0 - 0.72
mean(AUC, na.rm=T) #0.92
range(AUC, na.rm=T) #.70 - 1.00
# Explanatory power at island level
load(file="predYm.RData")
tmp = (m$Y>(-Inf))*1
predYm2 = predYm*tmp
plot(rowSums(m$Y, na.rm=T), rowSums(predYm2, na.rm=T))
lines(0:100, 0:100)
cor(rowSums(m$Y, na.rm=T), rowSums(predYm2, na.rm=T))^2
# Plot Tjur r^2 vs species prevalence
prev = colSums(m$Y)/(m$ny)
pdf("tjur_vs_prev.pdf", height=4, width=4, family="Times")
par(mfrow=c(1,1), mar=c(4,5,2,1))
plot(prev, R2, las=1, pch=16, col="grey", cex=.8, main=paste("Repeated measures model: Mean = ", signif(mean(R2, na.rm=T),2),".", sep=""),
ylim = c(0,1), xlab = "", ylab=expression(paste("Coefficient of discrimination (Tjur's", r^2,")")))
mtext("Species prevalence", 1, line=2.5)
dev.off()
#### - Compute and plot variance partitioning - ####
m$covNames
group = c(1,1,2,3)
groupnames = c(m$covNames[-1])
groupnames
VP = computeVariancePartitioning(m, group=group, groupnames=groupnames)
#save(VP, file="VP.RData")
load(file="VP.RData")
str(VP)
pdf("plots/varpartOcc.pdf", height=5, width=60)
plotVariancePartitioning(m, VP=VP)
dev.off()
### - Plot association networks ordered by taxonomy - ####
OmegaCorOcc = computeAssociations(m)
#save(OmegaCorOcc, file="OmegaCorOcc.RData")
load(file="OmegaCorOcc.RData")
tree = m$phyloTree
tree = untangle(tree, "read.tree")
orderC = m$C[tree$tip.label, tree$tip.label]
orderC[1:5,1:5]
orderOmega = OmegaCorOcc[[1]]$mean[tree$tip.label, tree$tip.label]
orderOmega[1:5,1:5]
cor(c(orderC), c(orderOmega)) #0.032
plotOrderOcc = match(tree$tip.label, colnames(OmegaCorOcc[[1]]$mean))
pdf("plots/CorPhylo.pdf", width=12, height=3)
par(mfrow=c(1,4))
supportLevel = 0.75
toPlot = m$C
corrplot(toPlot[plotOrderOcc,plotOrderOcc], type="lower", tl.pos="n", method = "color", main="Taxonomic correlation",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
toPlot = ((OmegaCorOcc[[1]]$support>supportLevel) + (OmegaCorOcc[[1]]$support<(1-supportLevel))>0)*OmegaCorOcc[[1]]$mean
corrplot(toPlot[plotOrderOcc,plotOrderOcc], type="lower", tl.pos="n", method = "color", main="Historical occurrence",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
toPlot = ((OmegaCorCol[[1]]$support>supportLevel) + (OmegaCorCol[[1]]$support<(1-supportLevel))>0)*OmegaCorCol[[1]]$mean
corrplot(toPlot[plotOrderCol,plotOrderCol], type="lower", tl.pos="n", method = "color", main="Colonisation probability",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
toPlot = ((OmegaCorExt[[1]]$support>supportLevel) + (OmegaCorExt[[1]]$support<(1-supportLevel))>0)*OmegaCorExt[[1]]$mean
corrplot(toPlot[plotOrderExt,plotOrderExt], type="lower", tl.pos="n", method = "color", main="Extinction probability",
col=colorRampPalette(c("blue","white","red"))(200), mar=c(0,0,1,0))
dev.off()
#### - Cross-validation - ####
a = Sys.time()
partition = createPartition(m, nfolds=2, column=1)
predY_CV2 = computePredictedValues(m, partition=partition, updater=list(GammaEta=FALSE))
MF_CV2 = evaluateModelFit(m, predY_CV2)
Sys.time()-a
save(MF_CV2, file="MF_CV2.Rdata") #2fold
save(predY_CV2, file="crossvalY.Rdata") #2fold
load("crossvalY.Rdata") #5fold
R2 = MF_CV2$TjurR2
AUC = MF_CV2$AUC
mean(R2, na.rm=T)
mean(AUC, na.rm=T)
|
#' Account Management Functions
#'
#' Functions to enumerate and remove accounts on the local system. Prior to
#' deploying applications you need to register your account on the local system.
#'
#' You register an account using the [setAccountInfo()] function (for
#' ShinyApps) or [connectUser()] function (for other servers). You can
#' subsequently remove the account using the `removeAccount` function.
#'
#' The `accounts` and `accountInfo` functions are provided for viewing
#' previously registered accounts.
#'
#' @param name Name of account
#' @param server Name of the server on which the account is registered
#' (optional; see [servers()])
#'
#' @return `accounts` returns a data frame with the names of all accounts
#' registered on the system and the servers on which they reside.
#' `accountInfo` returns a list with account details.
#'
#' @rdname accounts
#' @export
accounts <- function(server = NULL) {
path <- accountsConfigDir()
if (!is.null(server))
path <- file.path(path, server)
# get a raw list of accounts
accountnames <- file_path_sans_ext(list.files(path,
pattern=glob2rx("*.dcf"), recursive = TRUE))
if (length(accountnames) == 0) {
return(NULL)
}
# convert to a data frame
servers <- dirname(accountnames)
servers[servers == "."] <- "shinyapps.io"
names <- fileLeaf(accountnames)
data.frame(name = names, server = servers, stringsAsFactors = FALSE)
}
#' Connect User Account
#'
#' Connect a user account to the package so that it can be used to deploy and
#' manage applications on behalf of the account.
#'
#' @param account A name for the account to connect. Optional.
#' @param server The server to connect to. Optional if there is only one server
#' registered.
#' @param quiet Whether or not to show messages and prompts while connecting the
#' account.
#'
#' @details When this function is invoked, a web browser will be opened to a
#' page on the target server where you will be prompted to enter your
#' credentials. Upon successful authentication, your local installation of
#' \pkg{rsconnect} and your server account will be paired, and you'll
#' be able to deploy and manage applications using the package without further
#' prompts for credentials.
#'
#' @family Account functions
#' @export
connectUser <- function(account = NULL, server = NULL, quiet = FALSE) {
# if server isn't specified, look up the default
if (is.null(server)) {
target <- getDefaultServer(local = TRUE)
} else {
target <- serverInfo(server)
}
if (is.null(target)) {
stop("You must specify a server to connect to.")
}
# if account is specified and we already know about the account, get the User
# ID so we can prefill authentication fields
userId <- 0
userAccounts <- accounts(target$name)
if (!is.null(account) && !is.null(userAccounts)) {
if (account %in% userAccounts[,"name"]) {
accountDetails <- accountInfo(account, target$name)
userId <- accountDetails$accountId
if (!quiet) {
message("The account '", account, "' is already registered; ",
"attempting to reconnect it.")
}
}
}
# generate a token and send it to the server
token <- getAuthToken(target$name)
if (!quiet) {
message("A browser window should open; if it doesn't, you may authenticate ",
"manually by visiting ", token$claim_url, ".")
message("Waiting for authentication...")
}
utils::browseURL(token$claim_url)
# keep trying to authenticate until we're successful
repeat {
Sys.sleep(1)
user <- getUserFromRawToken(target$url, token$token, token$private_key,
target$certificate)
if (!is.null(user))
break
}
# populate the username if there wasn't one set on the server
if (nchar(user$username) == 0) {
if (!is.null(account))
user$username <- account
else
user$username <- tolower(paste0(substr(user$first_name, 1, 1),
user$last_name))
# in interactive mode, prompt for a username before accepting defaults
if (!quiet && interactive() && is.null(account)) {
input <- readline(paste0("Choose a nickname for this account (default '",
user$username, "'): "))
if (nchar(input) > 0)
user$username <- input
}
}
# write the user info
registerUserToken(serverName = target$name,
accountName = user$username,
userId = user$id,
token = token$token,
privateKey = token$private_key)
if (!quiet) {
message("Account registered successfully: ", user$first_name, " ",
user$last_name, " (", user$username, ")")
}
}
#' Set ShinyApps Account Info
#'
#' Configure a ShinyApps account for publishing from this system.
#'
#' @param name Name of account to save or remove
#' @param token User token for the account
#' @param secret User secret for the account
#'
#' @examples
#' \dontrun{
#'
#' # register an account
#' setAccountInfo("user", "token", "secret")
#'
#' # remove the same account
#' removeAccount("user")
#' }
#'
#' @family Account functions
#' @export
setAccountInfo <- function(name, token, secret) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
if (!isStringParam(token))
stop(stringParamErrorMessage("token"))
if (!isStringParam(secret))
stop(stringParamErrorMessage("secret"))
# create connect client
serverInfo <- shinyappsServerInfo()
authInfo <- list(token = token, secret = secret,
certificate = serverInfo$certificate)
lucid <- lucidClient(serverInfo$url, authInfo)
# get user Id
userId <- lucid$currentUser()$id
# get account id
accountId <- NULL
accounts <- lucid$accountsForUser(userId)
for (account in accounts) {
if (identical(account$name, name)) {
accountId <- account$id
break
}
}
if (is.null(accountId))
stop("Unable to determine account id for account named '", name, "'")
# get the path to the config file
configFile <- accountConfigFile(name, serverInfo$name)
dir.create(dirname(configFile), recursive = TRUE, showWarnings = FALSE)
# write the user info
write.dcf(list(name = name,
userId = userId,
accountId = accountId,
token = token,
secret = secret,
server = serverInfo$name),
configFile,
width = 100)
# set restrictive permissions on it if possible
if (identical(.Platform$OS.type, "unix"))
Sys.chmod(configFile, mode="0600")
}
#' @rdname accounts
#' @family Account functions
#' @export
accountInfo <- function(name, server = NULL) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
configFile <- accountConfigFile(name, server)
if (length(configFile) > 1)
stopWithMultipleAccounts(name)
if (length(configFile) == 0 || !file.exists(configFile))
stop(missingAccountErrorMessage(name))
accountDcf <- readDcf(configFile, all = TRUE)
info <- as.list(accountDcf)
# remove all whitespace from private key
if (!is.null(info$private_key)) {
info$private_key <- gsub("[[:space:]]","",info$private_key)
}
info
}
#' @rdname accounts
#' @export
removeAccount <- function(name, server = NULL) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
configFile <- accountConfigFile(name, server)
if (length(configFile) > 1)
stopWithMultipleAccounts(name)
if (length(configFile) == 0 || !file.exists(configFile))
stop(missingAccountErrorMessage(name))
file.remove(configFile)
invisible(NULL)
}
# given the name of a registered server, does the following:
# 1) generates a public/private key pair and token ID
# 2) pushes the public side of the key pair to the server, and obtains
# from the server a URL at which the token can be claimed
# 3) returns the token ID, private key, and claim URL
getAuthToken <- function(server, userId = 0) {
if (missing(server) || is.null(server)) {
stop("You must specify a server to connect to.")
}
target <- serverInfo(server)
# generate a token and push it to the server
token <- generateToken()
connect <- connectClient(service = target$url,
authInfo = list(certificate = target$certificate))
response <- connect$addToken(list(token = token$token,
public_key = token$public_key,
user_id = as.integer(userId)))
# return the generated token and the information needed to claim it
list(
token = token$token,
private_key = token$private_key,
claim_url = response$token_claim_url)
}
# given a server URL and raw information about an auth token, return the user
# who owns the token, if it's claimed, and NULL if the token is unclaimed.
# raises an error on any other HTTP error.
#
# this function is used by the RStudio IDE as part of the workflow which
# attaches a new Connect account.
getUserFromRawToken <- function(serverUrl, token, privateKey,
serverCertificate = NULL) {
# form a temporary client from the raw token
connect <- connectClient(service = serverUrl, authInfo =
list(token = token,
private_key = as.character(privateKey),
certificate = serverCertificate))
# attempt to fetch the user
user <- NULL
tryCatch({
user <- connect$currentUser()
}, error = function(e) {
if (length(grep("HTTP 500", e$message)) == 0) {
stop(e)
}
})
# return the user we found
user
}
registerUserToken <- function(serverName, accountName, userId, token,
privateKey) {
# write the user info
configFile <- accountConfigFile(accountName, serverName)
dir.create(dirname(configFile), recursive = TRUE, showWarnings = FALSE)
write.dcf(list(username = accountName,
accountId = userId,
token = token,
server = serverName,
private_key = as.character(privateKey)),
configFile)
# set restrictive permissions on it if possible
if (identical(.Platform$OS.type, "unix"))
Sys.chmod(configFile, mode="0600")
}
accountConfigFile <- function(name, server = NULL) {
# if no server is specified, try to find an account with the given name
# associated with any server
if (is.null(server)) {
return(list.files(accountsConfigDir(), pattern = paste0(name, ".dcf"),
recursive = TRUE, full.names = TRUE))
}
normalizePath(file.path(accountsConfigDir(), server,
paste(name, ".dcf", sep="")),
mustWork = FALSE)
}
accountsConfigDir <- function() {
rsconnectConfigDir("accounts")
}
missingAccountErrorMessage <- function(name) {
paste("account named '", name, "' does not exist", sep="")
}
resolveAccount <- function(account, server = NULL) {
# get existing accounts
accounts <- accounts(server)[,"name"]
if (length(accounts) == 0)
stopWithNoAccount()
# if no account was specified see if we can resolve the account to a default
if (is.null(account)) {
if (length(accounts) == 1)
accounts[[1]]
else
stopWithSpecifyAccount()
}
# account explicitly specified, confirm it exists
else {
count <- sum(accounts == account)
if (count == 0)
stopWithMissingAccount(account)
else if (count == 1)
account
else
stopWithMultipleAccounts(account)
}
}
isShinyapps <- function(accountInfo) {
identical(accountInfo$server, "shinyapps.io")
}
stopWithNoAccount <- function() {
stop("You must register an account using setAccountInfo prior to ",
"proceeding.", call. = FALSE)
}
stopWithSpecifyAccount <- function() {
stop("Please specify the account name (there is more than one ",
"account registered on this system)", call. = FALSE)
}
stopWithMissingAccount <- function(account) {
stop(missingAccountErrorMessage(account), call. = FALSE)
}
stopWithMultipleAccounts <- function(account) {
stop("Multiple accounts with the name '", account, "' exist. Please specify ",
"the server of the account you wish to use.", call. = FALSE)
}
accountInfoFromHostUrl <- function(hostUrl) {
# get the list of all registered servers
servers <- servers()
# filter to just those matching the given host url
server <- servers[as.character(servers$url) == hostUrl,]
if (nrow(server) < 1) {
stop("No server with the URL ", hostUrl, " is registered.", call. = FALSE)
}
# extract server name
server <- as.character(server[1,"name"])
# now find accounts with the given server
account <- accounts(server = server)
if (is.null(account) || nrow(account) < 1) {
stop("No accounts registered with server ", server, call. = FALSE)
}
# return account info from the first one
return(accountInfo(name = as.character(account[1,"name"]),
server = server))
}
| /R/accounts.R | no_license | AlgoSkyNet/rsconnect | R | false | false | 13,083 | r | #' Account Management Functions
#'
#' Functions to enumerate and remove accounts on the local system. Prior to
#' deploying applications you need to register your account on the local system.
#'
#' You register an account using the [setAccountInfo()] function (for
#' ShinyApps) or [connectUser()] function (for other servers). You can
#' subsequently remove the account using the `removeAccount` function.
#'
#' The `accounts` and `accountInfo` functions are provided for viewing
#' previously registered accounts.
#'
#' @param name Name of account
#' @param server Name of the server on which the account is registered
#' (optional; see [servers()])
#'
#' @return `accounts` returns a data frame with the names of all accounts
#' registered on the system and the servers on which they reside.
#' `accountInfo` returns a list with account details.
#'
#' @rdname accounts
#' @export
accounts <- function(server = NULL) {
path <- accountsConfigDir()
if (!is.null(server))
path <- file.path(path, server)
# get a raw list of accounts
accountnames <- file_path_sans_ext(list.files(path,
pattern=glob2rx("*.dcf"), recursive = TRUE))
if (length(accountnames) == 0) {
return(NULL)
}
# convert to a data frame
servers <- dirname(accountnames)
servers[servers == "."] <- "shinyapps.io"
names <- fileLeaf(accountnames)
data.frame(name = names, server = servers, stringsAsFactors = FALSE)
}
#' Connect User Account
#'
#' Connect a user account to the package so that it can be used to deploy and
#' manage applications on behalf of the account.
#'
#' @param account A name for the account to connect. Optional.
#' @param server The server to connect to. Optional if there is only one server
#' registered.
#' @param quiet Whether or not to show messages and prompts while connecting the
#' account.
#'
#' @details When this function is invoked, a web browser will be opened to a
#' page on the target server where you will be prompted to enter your
#' credentials. Upon successful authentication, your local installation of
#' \pkg{rsconnect} and your server account will be paired, and you'll
#' be able to deploy and manage applications using the package without further
#' prompts for credentials.
#'
#' @family Account functions
#' @export
connectUser <- function(account = NULL, server = NULL, quiet = FALSE) {
# if server isn't specified, look up the default
if (is.null(server)) {
target <- getDefaultServer(local = TRUE)
} else {
target <- serverInfo(server)
}
if (is.null(target)) {
stop("You must specify a server to connect to.")
}
# if account is specified and we already know about the account, get the User
# ID so we can prefill authentication fields
userId <- 0
userAccounts <- accounts(target$name)
if (!is.null(account) && !is.null(userAccounts)) {
if (account %in% userAccounts[,"name"]) {
accountDetails <- accountInfo(account, target$name)
userId <- accountDetails$accountId
if (!quiet) {
message("The account '", account, "' is already registered; ",
"attempting to reconnect it.")
}
}
}
# generate a token and send it to the server
token <- getAuthToken(target$name)
if (!quiet) {
message("A browser window should open; if it doesn't, you may authenticate ",
"manually by visiting ", token$claim_url, ".")
message("Waiting for authentication...")
}
utils::browseURL(token$claim_url)
# keep trying to authenticate until we're successful
repeat {
Sys.sleep(1)
user <- getUserFromRawToken(target$url, token$token, token$private_key,
target$certificate)
if (!is.null(user))
break
}
# populate the username if there wasn't one set on the server
if (nchar(user$username) == 0) {
if (!is.null(account))
user$username <- account
else
user$username <- tolower(paste0(substr(user$first_name, 1, 1),
user$last_name))
# in interactive mode, prompt for a username before accepting defaults
if (!quiet && interactive() && is.null(account)) {
input <- readline(paste0("Choose a nickname for this account (default '",
user$username, "'): "))
if (nchar(input) > 0)
user$username <- input
}
}
# write the user info
registerUserToken(serverName = target$name,
accountName = user$username,
userId = user$id,
token = token$token,
privateKey = token$private_key)
if (!quiet) {
message("Account registered successfully: ", user$first_name, " ",
user$last_name, " (", user$username, ")")
}
}
#' Set ShinyApps Account Info
#'
#' Configure a ShinyApps account for publishing from this system.
#'
#' @param name Name of account to save or remove
#' @param token User token for the account
#' @param secret User secret for the account
#'
#' @examples
#' \dontrun{
#'
#' # register an account
#' setAccountInfo("user", "token", "secret")
#'
#' # remove the same account
#' removeAccount("user")
#' }
#'
#' @family Account functions
#' @export
setAccountInfo <- function(name, token, secret) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
if (!isStringParam(token))
stop(stringParamErrorMessage("token"))
if (!isStringParam(secret))
stop(stringParamErrorMessage("secret"))
# create connect client
serverInfo <- shinyappsServerInfo()
authInfo <- list(token = token, secret = secret,
certificate = serverInfo$certificate)
lucid <- lucidClient(serverInfo$url, authInfo)
# get user Id
userId <- lucid$currentUser()$id
# get account id
accountId <- NULL
accounts <- lucid$accountsForUser(userId)
for (account in accounts) {
if (identical(account$name, name)) {
accountId <- account$id
break
}
}
if (is.null(accountId))
stop("Unable to determine account id for account named '", name, "'")
# get the path to the config file
configFile <- accountConfigFile(name, serverInfo$name)
dir.create(dirname(configFile), recursive = TRUE, showWarnings = FALSE)
# write the user info
write.dcf(list(name = name,
userId = userId,
accountId = accountId,
token = token,
secret = secret,
server = serverInfo$name),
configFile,
width = 100)
# set restrictive permissions on it if possible
if (identical(.Platform$OS.type, "unix"))
Sys.chmod(configFile, mode="0600")
}
#' @rdname accounts
#' @family Account functions
#' @export
accountInfo <- function(name, server = NULL) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
configFile <- accountConfigFile(name, server)
if (length(configFile) > 1)
stopWithMultipleAccounts(name)
if (length(configFile) == 0 || !file.exists(configFile))
stop(missingAccountErrorMessage(name))
accountDcf <- readDcf(configFile, all = TRUE)
info <- as.list(accountDcf)
# remove all whitespace from private key
if (!is.null(info$private_key)) {
info$private_key <- gsub("[[:space:]]","",info$private_key)
}
info
}
#' @rdname accounts
#' @export
removeAccount <- function(name, server = NULL) {
if (!isStringParam(name))
stop(stringParamErrorMessage("name"))
configFile <- accountConfigFile(name, server)
if (length(configFile) > 1)
stopWithMultipleAccounts(name)
if (length(configFile) == 0 || !file.exists(configFile))
stop(missingAccountErrorMessage(name))
file.remove(configFile)
invisible(NULL)
}
# given the name of a registered server, does the following:
# 1) generates a public/private key pair and token ID
# 2) pushes the public side of the key pair to the server, and obtains
# from the server a URL at which the token can be claimed
# 3) returns the token ID, private key, and claim URL
getAuthToken <- function(server, userId = 0) {
if (missing(server) || is.null(server)) {
stop("You must specify a server to connect to.")
}
target <- serverInfo(server)
# generate a token and push it to the server
token <- generateToken()
connect <- connectClient(service = target$url,
authInfo = list(certificate = target$certificate))
response <- connect$addToken(list(token = token$token,
public_key = token$public_key,
user_id = as.integer(userId)))
# return the generated token and the information needed to claim it
list(
token = token$token,
private_key = token$private_key,
claim_url = response$token_claim_url)
}
# given a server URL and raw information about an auth token, return the user
# who owns the token, if it's claimed, and NULL if the token is unclaimed.
# raises an error on any other HTTP error.
#
# this function is used by the RStudio IDE as part of the workflow which
# attaches a new Connect account.
getUserFromRawToken <- function(serverUrl, token, privateKey,
serverCertificate = NULL) {
# form a temporary client from the raw token
connect <- connectClient(service = serverUrl, authInfo =
list(token = token,
private_key = as.character(privateKey),
certificate = serverCertificate))
# attempt to fetch the user
user <- NULL
tryCatch({
user <- connect$currentUser()
}, error = function(e) {
if (length(grep("HTTP 500", e$message)) == 0) {
stop(e)
}
})
# return the user we found
user
}
registerUserToken <- function(serverName, accountName, userId, token,
privateKey) {
# write the user info
configFile <- accountConfigFile(accountName, serverName)
dir.create(dirname(configFile), recursive = TRUE, showWarnings = FALSE)
write.dcf(list(username = accountName,
accountId = userId,
token = token,
server = serverName,
private_key = as.character(privateKey)),
configFile)
# set restrictive permissions on it if possible
if (identical(.Platform$OS.type, "unix"))
Sys.chmod(configFile, mode="0600")
}
accountConfigFile <- function(name, server = NULL) {
# if no server is specified, try to find an account with the given name
# associated with any server
if (is.null(server)) {
return(list.files(accountsConfigDir(), pattern = paste0(name, ".dcf"),
recursive = TRUE, full.names = TRUE))
}
normalizePath(file.path(accountsConfigDir(), server,
paste(name, ".dcf", sep="")),
mustWork = FALSE)
}
accountsConfigDir <- function() {
rsconnectConfigDir("accounts")
}
missingAccountErrorMessage <- function(name) {
paste("account named '", name, "' does not exist", sep="")
}
resolveAccount <- function(account, server = NULL) {
# get existing accounts
accounts <- accounts(server)[,"name"]
if (length(accounts) == 0)
stopWithNoAccount()
# if no account was specified see if we can resolve the account to a default
if (is.null(account)) {
if (length(accounts) == 1)
accounts[[1]]
else
stopWithSpecifyAccount()
}
# account explicitly specified, confirm it exists
else {
count <- sum(accounts == account)
if (count == 0)
stopWithMissingAccount(account)
else if (count == 1)
account
else
stopWithMultipleAccounts(account)
}
}
isShinyapps <- function(accountInfo) {
identical(accountInfo$server, "shinyapps.io")
}
stopWithNoAccount <- function() {
stop("You must register an account using setAccountInfo prior to ",
"proceeding.", call. = FALSE)
}
stopWithSpecifyAccount <- function() {
stop("Please specify the account name (there is more than one ",
"account registered on this system)", call. = FALSE)
}
stopWithMissingAccount <- function(account) {
stop(missingAccountErrorMessage(account), call. = FALSE)
}
stopWithMultipleAccounts <- function(account) {
stop("Multiple accounts with the name '", account, "' exist. Please specify ",
"the server of the account you wish to use.", call. = FALSE)
}
accountInfoFromHostUrl <- function(hostUrl) {
# get the list of all registered servers
servers <- servers()
# filter to just those matching the given host url
server <- servers[as.character(servers$url) == hostUrl,]
if (nrow(server) < 1) {
stop("No server with the URL ", hostUrl, " is registered.", call. = FALSE)
}
# extract server name
server <- as.character(server[1,"name"])
# now find accounts with the given server
account <- accounts(server = server)
if (is.null(account) || nrow(account) < 1) {
stop("No accounts registered with server ", server, call. = FALSE)
}
# return account info from the first one
return(accountInfo(name = as.character(account[1,"name"]),
server = server))
}
|
### EXPLORATORY DATA ANALYSIS - Course Project 1: Plot 1
## Data Source: UC Irvine Machine Learning Repository
## Data Set: Individual household electric power consumption
#Before running, set a working directory and copy this file into it. Data files and plots will be created in the working dir
#Please ensure to check plots against the reference ones in the "figures" directory of the github repo
#The reference plots display with a transparent background, not white
## Download file from website into temp file
fileurl <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
temp <- tempfile()
download.file(fileurl,temp, method = "curl")
##Unzip temp file and read into dataframe
df <- read.table (unz(temp, "household_power_consumption.txt"),sep=";",header = TRUE)
##Subset the data for the two dates of interest and bind into a working dataframe (wkdf) to use for the plots.
##Note that at this point, the date column values are being treated as a charater string
ss1 <- df[,1] == "1/2/2007"
ss2 <- df[,1] == "2/2/2007"
wkdf <- rbind(df[ss1,],df[ss2,])
##Reformat the Date and Time columns to be of the correct type and format
##Create DateTime Character Vector from the Date and Time variables
dtv <- paste(wkdf[,1],wkdf[,2])
##Convert to Posix time vector
DateTime <- strptime(dtv, format = "%d/%m/%Y %H:%M:%S")
##Bind the Posix Date Time Vector to the working data frame
wkdf <- cbind(DateTime,wkdf)
## Set up PNG device and define the output file to write the plot to
png("plot1.png", bg = "transparent")
## Construct the histogram plot
## convert Global_active_power variable to numeric type (needs to convert via character type to maintain actual values)
gap <- as.numeric(as.character(wkdf$Global_active_power))
# produce histogram plot of Global_active_power variable
with(wkdf, hist(gap, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
##Close png() device
dev.off()
| /Plot1.R | no_license | nickijc/ExData_Plotting1 | R | false | false | 1,979 | r | ### EXPLORATORY DATA ANALYSIS - Course Project 1: Plot 1
## Data Source: UC Irvine Machine Learning Repository
## Data Set: Individual household electric power consumption
#Before running, set a working directory and copy this file into it. Data files and plots will be created in the working dir
#Please ensure to check plots against the reference ones in the "figures" directory of the github repo
#The reference plots display with a transparent background, not white
## Download file from website into temp file
fileurl <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
temp <- tempfile()
download.file(fileurl,temp, method = "curl")
##Unzip temp file and read into dataframe
df <- read.table (unz(temp, "household_power_consumption.txt"),sep=";",header = TRUE)
##Subset the data for the two dates of interest and bind into a working dataframe (wkdf) to use for the plots.
##Note that at this point, the date column values are being treated as a charater string
ss1 <- df[,1] == "1/2/2007"
ss2 <- df[,1] == "2/2/2007"
wkdf <- rbind(df[ss1,],df[ss2,])
##Reformat the Date and Time columns to be of the correct type and format
##Create DateTime Character Vector from the Date and Time variables
dtv <- paste(wkdf[,1],wkdf[,2])
##Convert to Posix time vector
DateTime <- strptime(dtv, format = "%d/%m/%Y %H:%M:%S")
##Bind the Posix Date Time Vector to the working data frame
wkdf <- cbind(DateTime,wkdf)
## Set up PNG device and define the output file to write the plot to
png("plot1.png", bg = "transparent")
## Construct the histogram plot
## convert Global_active_power variable to numeric type (needs to convert via character type to maintain actual values)
gap <- as.numeric(as.character(wkdf$Global_active_power))
# produce histogram plot of Global_active_power variable
with(wkdf, hist(gap, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"))
##Close png() device
dev.off()
|
/Validacion.Rcheck Renny/00_pkg_src/Validacion/man/PlanktonR.Rd | no_license | mablan/Validacion | R | false | false | 913 | rd | ||
library(testthat)
library(psychTestRCAT)
test_check("psychTestRCAT")
| /tests/testthat.R | permissive | pmcharrison/psychTestRCAT | R | false | false | 70 | r | library(testthat)
library(psychTestRCAT)
test_check("psychTestRCAT")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stars.R, R/sf.R, R/raster.R, R/ncdf.R,
% R/proxy.R, R/rasterize.R, R/xts.R, R/OpenStreetMap.R
\name{st_as_stars}
\alias{st_as_stars}
\alias{st_as_stars.list}
\alias{st_as_stars.default}
\alias{st_as_stars.stars}
\alias{st_as_stars.bbox}
\alias{st_as_stars.sf}
\alias{st_as_stars.Raster}
\alias{st_as_stars.ncdfgeom}
\alias{st_as_stars.stars_proxy}
\alias{st_as_stars.data.frame}
\alias{st_as_stars.xts}
\alias{st_as_stars.OpenStreetMap}
\title{convert objects into a stars object}
\usage{
st_as_stars(.x, ...)
\method{st_as_stars}{list}(.x, ..., dimensions = NULL)
\method{st_as_stars}{default}(.x = NULL, ..., raster = NULL)
\method{st_as_stars}{stars}(.x, ..., curvilinear = NULL, crs = st_crs(4326))
\method{st_as_stars}{bbox}(
.x,
...,
nx,
ny,
dx = dy,
dy = dx,
xlim = .x[c("xmin", "xmax")],
ylim = .x[c("ymin", "ymax")],
values = 0,
n = 64800,
pretty = FALSE,
inside = FALSE,
nz
)
\method{st_as_stars}{sf}(.x, ..., name = attr(.x, "sf_column"))
\method{st_as_stars}{Raster}(.x, ..., att = 1, ignore_file = FALSE)
\method{st_as_stars}{ncdfgeom}(.x, ..., sf_geometry = NA)
\method{st_as_stars}{stars_proxy}(
.x,
...,
downsample = 0,
url = attr(.x, "url"),
envir = parent.frame()
)
\method{st_as_stars}{data.frame}(
.x,
...,
dims = coords,
xy = dims[1:2],
y_decreasing = TRUE,
coords = 1:2
)
\method{st_as_stars}{xts}(.x, ..., dimensions)
\method{st_as_stars}{OpenStreetMap}(.x, ..., as_col = FALSE)
}
\arguments{
\item{.x}{object to convert}
\item{...}{in case \code{.x} is of class \code{bbox}, arguments passed on to \link{pretty}}
\item{dimensions}{object of class dimensions}
\item{raster}{character; the names of the dimensions that denote raster dimensions}
\item{curvilinear}{only for creating curvilinear grids: named length 2 list holding longitude and latitude matrices; the names of this list should correspond to raster dimensions referred to}
\item{crs}{object of class \code{crs} with the coordinate reference system of the values in \code{curvilinear}; see details}
\item{nx}{integer; number of cells in x direction; see details}
\item{ny}{integer; number of cells in y direction; see details}
\item{dx}{numeric; cell size in x direction; see details}
\item{dy}{numeric; cell size in y direction; see details}
\item{xlim}{length 2 numeric vector with extent (min, max) in x direction}
\item{ylim}{length 2 numeric vector with extent (min, max) in y direction}
\item{values}{value(s) to populate the raster values with}
\item{n}{the (approximate) target number of grid cells}
\item{pretty}{logical; should cell coordinates have \link{pretty} values?}
\item{inside}{logical; should all cells entirely fall inside the bbox, potentially not covering it completely?}
\item{nz}{integer; number of cells in z direction; if missing no z-dimension is created.}
\item{name}{character; name for the geometry dimensions}
\item{att}{see \link[raster:factor]{factorValues}; column in the RasterLayer's attribute table}
\item{ignore_file}{logical; if \code{TRUE}, ignore the Raster object file name}
\item{sf_geometry}{sf data.frame with geometry and attributes to be added to stars object.
Must have same number of rows as timeseries instances.}
\item{downsample}{integer: if larger than 0, downsample with this rate (number of pixels to skip in every row/column); if length 2, specifies downsampling rate in x and y.}
\item{url}{character; URL of the stars endpoint where the data reside}
\item{envir}{environment to resolve objects in}
\item{dims}{the column names or indices that form the cube dimensions}
\item{xy}{the x and y raster dimension names or indices; only takes effect after dims has been specified}
\item{y_decreasing}{logical; if TRUE, (numeric) y values get a negative delta (decrease with increasing index)}
\item{coords}{same as dims, for symmetry with \link[sf]{st_as_sf}}
\item{as_col}{logical; return rgb numbers (FALSE) or (character) color values (TRUE)?}
}
\description{
convert objects into a stars object
}
\details{
if \code{curvilinear} is a \code{stars} object with longitude and latitude values, its coordinate reference system is typically not that of the latitude and longitude values.
For the \code{bbox} method: if \code{pretty} is \code{TRUE}, raster cells may extend the coordinate range of \code{.x} on all sides. If in addition to \code{nx} and \code{ny}, \code{dx} and \code{dy} are also missing, these are set to a single value computed as \code{sqrt(diff(xlim)*diff(ylim)/n)}. If \code{nx} and \code{ny} are missing, they are computed as the ceiling of the ratio of the (x or y) range divided by (dx or dy), unless \code{inside} is \code{TRUE}, in which case ceiling is replaced by floor. Postive \code{dy} will be made negative. Further named arguments (\code{...}) are passed on to \code{pretty}.
For the \code{ncdfgeom} method: objects are point-timeseries with optional line or polygon geometry for each timeseries specified with the \code{sf_geometry} parameter. See \pkg{ncdfgeom} for more about this NetCDF-based format for geometry and timeseries.
for the \code{xts} methods, if \code{dimensions} are provided, time has to be the first dimension.
}
\examples{
data(Produc, package = "plm")
st_as_stars(Produc, y_decreasing = FALSE)
}
| /man/st_as_stars.Rd | permissive | bafuentes/stars | R | false | true | 5,359 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stars.R, R/sf.R, R/raster.R, R/ncdf.R,
% R/proxy.R, R/rasterize.R, R/xts.R, R/OpenStreetMap.R
\name{st_as_stars}
\alias{st_as_stars}
\alias{st_as_stars.list}
\alias{st_as_stars.default}
\alias{st_as_stars.stars}
\alias{st_as_stars.bbox}
\alias{st_as_stars.sf}
\alias{st_as_stars.Raster}
\alias{st_as_stars.ncdfgeom}
\alias{st_as_stars.stars_proxy}
\alias{st_as_stars.data.frame}
\alias{st_as_stars.xts}
\alias{st_as_stars.OpenStreetMap}
\title{convert objects into a stars object}
\usage{
st_as_stars(.x, ...)
\method{st_as_stars}{list}(.x, ..., dimensions = NULL)
\method{st_as_stars}{default}(.x = NULL, ..., raster = NULL)
\method{st_as_stars}{stars}(.x, ..., curvilinear = NULL, crs = st_crs(4326))
\method{st_as_stars}{bbox}(
.x,
...,
nx,
ny,
dx = dy,
dy = dx,
xlim = .x[c("xmin", "xmax")],
ylim = .x[c("ymin", "ymax")],
values = 0,
n = 64800,
pretty = FALSE,
inside = FALSE,
nz
)
\method{st_as_stars}{sf}(.x, ..., name = attr(.x, "sf_column"))
\method{st_as_stars}{Raster}(.x, ..., att = 1, ignore_file = FALSE)
\method{st_as_stars}{ncdfgeom}(.x, ..., sf_geometry = NA)
\method{st_as_stars}{stars_proxy}(
.x,
...,
downsample = 0,
url = attr(.x, "url"),
envir = parent.frame()
)
\method{st_as_stars}{data.frame}(
.x,
...,
dims = coords,
xy = dims[1:2],
y_decreasing = TRUE,
coords = 1:2
)
\method{st_as_stars}{xts}(.x, ..., dimensions)
\method{st_as_stars}{OpenStreetMap}(.x, ..., as_col = FALSE)
}
\arguments{
\item{.x}{object to convert}
\item{...}{in case \code{.x} is of class \code{bbox}, arguments passed on to \link{pretty}}
\item{dimensions}{object of class dimensions}
\item{raster}{character; the names of the dimensions that denote raster dimensions}
\item{curvilinear}{only for creating curvilinear grids: named length 2 list holding longitude and latitude matrices; the names of this list should correspond to raster dimensions referred to}
\item{crs}{object of class \code{crs} with the coordinate reference system of the values in \code{curvilinear}; see details}
\item{nx}{integer; number of cells in x direction; see details}
\item{ny}{integer; number of cells in y direction; see details}
\item{dx}{numeric; cell size in x direction; see details}
\item{dy}{numeric; cell size in y direction; see details}
\item{xlim}{length 2 numeric vector with extent (min, max) in x direction}
\item{ylim}{length 2 numeric vector with extent (min, max) in y direction}
\item{values}{value(s) to populate the raster values with}
\item{n}{the (approximate) target number of grid cells}
\item{pretty}{logical; should cell coordinates have \link{pretty} values?}
\item{inside}{logical; should all cells entirely fall inside the bbox, potentially not covering it completely?}
\item{nz}{integer; number of cells in z direction; if missing no z-dimension is created.}
\item{name}{character; name for the geometry dimensions}
\item{att}{see \link[raster:factor]{factorValues}; column in the RasterLayer's attribute table}
\item{ignore_file}{logical; if \code{TRUE}, ignore the Raster object file name}
\item{sf_geometry}{sf data.frame with geometry and attributes to be added to stars object.
Must have same number of rows as timeseries instances.}
\item{downsample}{integer: if larger than 0, downsample with this rate (number of pixels to skip in every row/column); if length 2, specifies downsampling rate in x and y.}
\item{url}{character; URL of the stars endpoint where the data reside}
\item{envir}{environment to resolve objects in}
\item{dims}{the column names or indices that form the cube dimensions}
\item{xy}{the x and y raster dimension names or indices; only takes effect after dims has been specified}
\item{y_decreasing}{logical; if TRUE, (numeric) y values get a negative delta (decrease with increasing index)}
\item{coords}{same as dims, for symmetry with \link[sf]{st_as_sf}}
\item{as_col}{logical; return rgb numbers (FALSE) or (character) color values (TRUE)?}
}
\description{
convert objects into a stars object
}
\details{
if \code{curvilinear} is a \code{stars} object with longitude and latitude values, its coordinate reference system is typically not that of the latitude and longitude values.
For the \code{bbox} method: if \code{pretty} is \code{TRUE}, raster cells may extend the coordinate range of \code{.x} on all sides. If in addition to \code{nx} and \code{ny}, \code{dx} and \code{dy} are also missing, these are set to a single value computed as \code{sqrt(diff(xlim)*diff(ylim)/n)}. If \code{nx} and \code{ny} are missing, they are computed as the ceiling of the ratio of the (x or y) range divided by (dx or dy), unless \code{inside} is \code{TRUE}, in which case ceiling is replaced by floor. Postive \code{dy} will be made negative. Further named arguments (\code{...}) are passed on to \code{pretty}.
For the \code{ncdfgeom} method: objects are point-timeseries with optional line or polygon geometry for each timeseries specified with the \code{sf_geometry} parameter. See \pkg{ncdfgeom} for more about this NetCDF-based format for geometry and timeseries.
for the \code{xts} methods, if \code{dimensions} are provided, time has to be the first dimension.
}
\examples{
data(Produc, package = "plm")
st_as_stars(Produc, y_decreasing = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crawlers.R
\name{download_pdf_tjrs}
\alias{download_pdf_tjrs}
\title{Download de DJE do TJRS}
\usage{
download_pdf_tjrs(...)
}
\arguments{
\item{caderno}{Número inteiro que representa o caderno a ser baixado. Dependendo da época esse número tem interpretações diferentes. Veja as vignettes para mais informações.}
\item{edicao}{Número da edição. A primeira edição disponível é a de número 3424, de agosto de 2006. Aparentemente é contínuo, veja as vignettes para mais informações.}
\item{path}{caminho p/ salvar o pdf}
}
\description{
Faz a requisição para download do PDF de DJE do TJRS
}
| /man/download_pdf_tjrs.Rd | permissive | courtsbr/scraperTJRS | R | false | true | 692 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crawlers.R
\name{download_pdf_tjrs}
\alias{download_pdf_tjrs}
\title{Download de DJE do TJRS}
\usage{
download_pdf_tjrs(...)
}
\arguments{
\item{caderno}{Número inteiro que representa o caderno a ser baixado. Dependendo da época esse número tem interpretações diferentes. Veja as vignettes para mais informações.}
\item{edicao}{Número da edição. A primeira edição disponível é a de número 3424, de agosto de 2006. Aparentemente é contínuo, veja as vignettes para mais informações.}
\item{path}{caminho p/ salvar o pdf}
}
\description{
Faz a requisição para download do PDF de DJE do TJRS
}
|
library("dplyr")
library("ncdf4")
## library("ncdf4.helpers")
library("weathermetrics")
library("raster")
## load("~/Dropbox/thesis/code/pubPriCmp/data/buildingLatlng.rda")
load("../data/building_location.rda")
load("../data/energy_monthly_web_continental.rda")
load("../data/retrofit.avg.energy.enough.data.rda")
folders = list.files("cmip5/bcca_now/", "bcca5*")
## ## check downloads
## result = lapply(folders, function(f) {
## lines = readLines(paste0("cmip5/bcca_now/", f, "/MetaData.txt"))
## period.line = lines[[5]]
## period = gsub("Period: ", "", period.line)
## model.lines <- readLines(paste0("cmip5/bcca_now/", f, "/Projections5.txt"))
## model = sapply(model.lines, function (m) {substr(m, 1, nchar(m) - 6)})
## scenario = sapply(model.lines, function (m) {substr(m, nchar(m) - 4, nchar(m))})
## return(tibble::tibble("model"=model, "period"=period, "scenario"=scenario, "folder"=f))
## })
## result %>%
## dplyr::bind_rows() %>%
## dplyr::distinct(model, period, scenario) %>%
## dplyr::group_by(period, scenario) %>%
## dplyr::count() %>%
## dplyr::ungroup() %>%
## {.}
exist.location = readr::read_csv("cmip5/bcca/bcca5/canesm2.1.rcp45.csv") %>%
dplyr::distinct(lat.pts, lon.pts) %>%
dplyr::mutate(has.data=T) %>%
{.}
## locations to get weather for
uniqueLocation <- building_location %>%
dplyr::select(BLDGNUM, Latitude, Longitude) %>%
dplyr::filter(BLDGNUM %in% unique(energy_monthly_web_continental$BLDGNUM)) %>%
dplyr::rename(latitude=Latitude, longitude=Longitude) %>%
distinct(latitude, longitude) %>%
dplyr::mutate(longitude360=longitude %% 360) %>%
## ## only process locations not already with data
## dplyr::left_join(exist.location, by=c("latitude"="lat.pts", "longitude360"="lon.pts")) %>%
## dplyr::filter(is.na(has.data)) %>%
{.}
## locations to get weather for
lon.pts <- uniqueLocation$longitude360
lat.pts <- uniqueLocation$latitude
extract.pts <- cbind(lon.pts,lat.pts)
folders = list.files("cmip5/bcca_now/", "bcca5*")
## folders = list.files("cmip5/bcca/", "bcca5*")
for (f in folders[10:34]) {
## lines = readLines(paste0("cmip5/bcca/", f, "/MetaData.txt"))
lines = readLines(paste0("cmip5/bcca_now/", f, "/MetaData.txt"))
period.line = lines[[5]]
period = gsub("Period: ", "", period.line)
startdate = as.Date(sprintf("%s-01", substr(period, 1, 7)), format='%Y%b-%d')
if (!stringr::str_detect(period, "2014Feb")) {
print(startdate)
next
}
print(startdate)
## ncdfData = ncdf4::nc_open(paste0("cmip5/bcca/", f, "/Extraction_tasmax.nc"))
ncdfData = ncdf4::nc_open(paste0("cmip5/bcca_now/", f, "/Extraction_tasmax.nc"))
proj = ncdf4::ncatt_get(ncdfData, varid=0, attname="Projections")
projections = unlist(strsplit(proj$value, split=", "))
nsteps = dim(ncdf4::ncvar_get(ncdfData, varid = "time"))
print(projections)
ncdf4::nc_close(ncdfData)
break_labels = c("<10", sprintf("[%s-%s)", seq(10, 80, by=10), seq(20, 90, by=10)), ">90", "Missing")
for (i in 1:length(projections)) {
proj = projections[[i]]
## tasmax = raster::brick(sprintf("cmip5/bcca/%s/Extraction_tasmax.nc", f), lvar=4, level=i)
## tasmin = raster::brick(sprintf("cmip5/bcca/%s/Extraction_tasmin.nc", f), lvar=4, level=i)
tasmax = raster::brick(sprintf("cmip5/bcca_now/%s/Extraction_tasmax.nc", f), lvar=4, level=i)
tasmin = raster::brick(sprintf("cmip5/bcca_now/%s/Extraction_tasmin.nc", f), lvar=4, level=i)
tasavg = (tasmax + tasmin) / 2
## spatial interpolation at each step
result = lapply(1:nsteps, function(j) {
if (j %% 100 == 0) {
print(j)
}
ext <- raster::extract(tasavg[[j]], extract.pts, method="bilinear")
df <- data.frame(lat.pts, lon.pts, ext, step=j, proj=proj) %>%
{.}
return(df)
})
df.output = dplyr::bind_rows(result) %>%
tibble::as_tibble() %>%
dplyr::mutate(ext=weathermetrics::celsius.to.fahrenheit(ext)) %>%
dplyr::mutate(date=startdate + step - 1) %>%
dplyr::mutate(`value_label`=dplyr::case_when(`ext` < 10 ~ break_labels[1],
`ext` < 20 ~ break_labels[2],
`ext` < 30 ~ break_labels[3],
`ext` < 40 ~ break_labels[4],
`ext` < 50 ~ break_labels[5],
`ext` < 60 ~ break_labels[6],
`ext` < 70 ~ break_labels[7],
`ext` < 80 ~ break_labels[8],
`ext` < 90 ~ break_labels[9],
`ext` < 1000 ~ break_labels[10],
TRUE ~ break_labels[11])) %>%
dplyr::mutate(`value_label`=factor(`value_label`, levels=break_labels)) %>%
dplyr::mutate(`year`=format(date, "%Y")) %>%
dplyr::mutate(`month`=format(date, "%m")) %>%
dplyr::select(-date, -step) %>%
dplyr::group_by(`proj`, lat.pts, lon.pts, `year`, `month`, `value_label`) %>%
dplyr::summarise(`value_count`=n()) %>%
dplyr::ungroup() %>%
tidyr::spread(key=`value_label`, value=`value_count`, fill=0) %>%
dplyr::mutate_at(vars(ends_with(")"), ends_with("0")), funs(ifelse(is.na(.), 0L, .))) %>%
{.}
## output.filename = sprintf("cmip5/bcca/%s/%s.csv", f, proj)
output.filename = sprintf("cmip5/bcca_now/%s/%s.csv", f, proj)
if (FALSE) {
## if (file.exists(output.filename)) {
readr::read_csv(output.filename) %>%
dplyr::bind_rows(df.output) %>%
readr::write_csv(output.filename)
print(sprintf("append to %s", output.filename))
} else {
dplyr::bind_rows(df.output) %>%
readr::write_csv(output.filename)
print(sprintf("write to %s", output.filename))
}
}
}
| /data-raw/read_bcca.R | no_license | yujiex/retrofitClimateEffect | R | false | false | 6,009 | r | library("dplyr")
library("ncdf4")
## library("ncdf4.helpers")
library("weathermetrics")
library("raster")
## load("~/Dropbox/thesis/code/pubPriCmp/data/buildingLatlng.rda")
load("../data/building_location.rda")
load("../data/energy_monthly_web_continental.rda")
load("../data/retrofit.avg.energy.enough.data.rda")
folders = list.files("cmip5/bcca_now/", "bcca5*")
## ## check downloads
## result = lapply(folders, function(f) {
## lines = readLines(paste0("cmip5/bcca_now/", f, "/MetaData.txt"))
## period.line = lines[[5]]
## period = gsub("Period: ", "", period.line)
## model.lines <- readLines(paste0("cmip5/bcca_now/", f, "/Projections5.txt"))
## model = sapply(model.lines, function (m) {substr(m, 1, nchar(m) - 6)})
## scenario = sapply(model.lines, function (m) {substr(m, nchar(m) - 4, nchar(m))})
## return(tibble::tibble("model"=model, "period"=period, "scenario"=scenario, "folder"=f))
## })
## result %>%
## dplyr::bind_rows() %>%
## dplyr::distinct(model, period, scenario) %>%
## dplyr::group_by(period, scenario) %>%
## dplyr::count() %>%
## dplyr::ungroup() %>%
## {.}
exist.location = readr::read_csv("cmip5/bcca/bcca5/canesm2.1.rcp45.csv") %>%
dplyr::distinct(lat.pts, lon.pts) %>%
dplyr::mutate(has.data=T) %>%
{.}
## locations to get weather for
uniqueLocation <- building_location %>%
dplyr::select(BLDGNUM, Latitude, Longitude) %>%
dplyr::filter(BLDGNUM %in% unique(energy_monthly_web_continental$BLDGNUM)) %>%
dplyr::rename(latitude=Latitude, longitude=Longitude) %>%
distinct(latitude, longitude) %>%
dplyr::mutate(longitude360=longitude %% 360) %>%
## ## only process locations not already with data
## dplyr::left_join(exist.location, by=c("latitude"="lat.pts", "longitude360"="lon.pts")) %>%
## dplyr::filter(is.na(has.data)) %>%
{.}
## locations to get weather for
lon.pts <- uniqueLocation$longitude360
lat.pts <- uniqueLocation$latitude
extract.pts <- cbind(lon.pts,lat.pts)
folders = list.files("cmip5/bcca_now/", "bcca5*")
## folders = list.files("cmip5/bcca/", "bcca5*")
for (f in folders[10:34]) {
## lines = readLines(paste0("cmip5/bcca/", f, "/MetaData.txt"))
lines = readLines(paste0("cmip5/bcca_now/", f, "/MetaData.txt"))
period.line = lines[[5]]
period = gsub("Period: ", "", period.line)
startdate = as.Date(sprintf("%s-01", substr(period, 1, 7)), format='%Y%b-%d')
if (!stringr::str_detect(period, "2014Feb")) {
print(startdate)
next
}
print(startdate)
## ncdfData = ncdf4::nc_open(paste0("cmip5/bcca/", f, "/Extraction_tasmax.nc"))
ncdfData = ncdf4::nc_open(paste0("cmip5/bcca_now/", f, "/Extraction_tasmax.nc"))
proj = ncdf4::ncatt_get(ncdfData, varid=0, attname="Projections")
projections = unlist(strsplit(proj$value, split=", "))
nsteps = dim(ncdf4::ncvar_get(ncdfData, varid = "time"))
print(projections)
ncdf4::nc_close(ncdfData)
break_labels = c("<10", sprintf("[%s-%s)", seq(10, 80, by=10), seq(20, 90, by=10)), ">90", "Missing")
for (i in 1:length(projections)) {
proj = projections[[i]]
## tasmax = raster::brick(sprintf("cmip5/bcca/%s/Extraction_tasmax.nc", f), lvar=4, level=i)
## tasmin = raster::brick(sprintf("cmip5/bcca/%s/Extraction_tasmin.nc", f), lvar=4, level=i)
tasmax = raster::brick(sprintf("cmip5/bcca_now/%s/Extraction_tasmax.nc", f), lvar=4, level=i)
tasmin = raster::brick(sprintf("cmip5/bcca_now/%s/Extraction_tasmin.nc", f), lvar=4, level=i)
tasavg = (tasmax + tasmin) / 2
## spatial interpolation at each step
result = lapply(1:nsteps, function(j) {
if (j %% 100 == 0) {
print(j)
}
ext <- raster::extract(tasavg[[j]], extract.pts, method="bilinear")
df <- data.frame(lat.pts, lon.pts, ext, step=j, proj=proj) %>%
{.}
return(df)
})
df.output = dplyr::bind_rows(result) %>%
tibble::as_tibble() %>%
dplyr::mutate(ext=weathermetrics::celsius.to.fahrenheit(ext)) %>%
dplyr::mutate(date=startdate + step - 1) %>%
dplyr::mutate(`value_label`=dplyr::case_when(`ext` < 10 ~ break_labels[1],
`ext` < 20 ~ break_labels[2],
`ext` < 30 ~ break_labels[3],
`ext` < 40 ~ break_labels[4],
`ext` < 50 ~ break_labels[5],
`ext` < 60 ~ break_labels[6],
`ext` < 70 ~ break_labels[7],
`ext` < 80 ~ break_labels[8],
`ext` < 90 ~ break_labels[9],
`ext` < 1000 ~ break_labels[10],
TRUE ~ break_labels[11])) %>%
dplyr::mutate(`value_label`=factor(`value_label`, levels=break_labels)) %>%
dplyr::mutate(`year`=format(date, "%Y")) %>%
dplyr::mutate(`month`=format(date, "%m")) %>%
dplyr::select(-date, -step) %>%
dplyr::group_by(`proj`, lat.pts, lon.pts, `year`, `month`, `value_label`) %>%
dplyr::summarise(`value_count`=n()) %>%
dplyr::ungroup() %>%
tidyr::spread(key=`value_label`, value=`value_count`, fill=0) %>%
dplyr::mutate_at(vars(ends_with(")"), ends_with("0")), funs(ifelse(is.na(.), 0L, .))) %>%
{.}
## output.filename = sprintf("cmip5/bcca/%s/%s.csv", f, proj)
output.filename = sprintf("cmip5/bcca_now/%s/%s.csv", f, proj)
if (FALSE) {
## if (file.exists(output.filename)) {
readr::read_csv(output.filename) %>%
dplyr::bind_rows(df.output) %>%
readr::write_csv(output.filename)
print(sprintf("append to %s", output.filename))
} else {
dplyr::bind_rows(df.output) %>%
readr::write_csv(output.filename)
print(sprintf("write to %s", output.filename))
}
}
}
|
#' @import stringr
#' @import GenomicRanges
#' @import IRanges
#' @title Makes a reference file for Salmon
#' @description This function creates decoys and a transcriptome that will be used by Salmon. It also creates a reference file to import the estimates after the Salmon run.
#' The user can enter a RepMask file without deleting co-transcribed or overlapping repeats with the RepMask argument, or enter a RepMask file without co-transcribed but overlapping repeats with the RepMask.clean argument, or a file free of co-transcribed or overlapping repeats with the RepMask.ovlp.clean argument. When the file contains co-transcribed repeats, it must indicate rm.cotrans = T and when the file contains overlaps it must indicate overlapping = T.
#' @param overlapping Indicates whether the RepMask file contains overlapping repetitions (TRUE) or not (FALSE). When the RepMask file contains overlapping repetitions, the ovlp.res() function will be used to solve them and the resolution criteria must be indicated (higher score (HS), longer length (LE) or lower Kimura distances (LD))
#' @param rule A numerical vector respectively indicating the minimum percentage of identity, length (in base pairs) of the repeat to be analyzed and the percentage of the length of class/family repeat with respect to the length of the transcript. Example: c(80, 80, 80). Default is c(0,0,0)
#' @param trme transcriptome in fasta format
#' @param RepMask RepeatMasker output file. If rm.cotrans = F it is assumed that the file does not contain cotranscribed repeats. If overlapping = F it is assumed that the file does not contain overlapping.
#' @param rm.cotrnas logical vector indicating whether co-transcribed repeats should be removed
#' @param align .align file
#' @param over.res Indicates the method by which the repetition overlap will be resolved.
#' HS: higher score, bases are assigned to the element with the highest score
#' LS: longer element, bases are assigned to the longest element
#' LD: lower divergence, bases are assigned to the element with the least divergence.
#' in all cases both elements have the same characteristics, the bases are assigned to the first element.
#' @param anot annotation file in outfmt6 format. It is necessary when the option rm.cotrans = T
#' @param gff3 gff3 file. It is necessary when the option rm.cotrans = T
#' @param stranded logical vector indicating if the library is strand specific
#' @param cleanTEsProt logical vector indicating whether the search for TEs-related proteins should be carried out (e.g.
#' transposases, integrases, env, reverse transcriptase, etc.). We recommend that users use a curated annotations file,
#' in which these genes have been excluded; therefore the default option is F. When T is selected, a search is performed
#' against a database obtained from UniProt, so we recommend that the annotations file have this format for the subject
#' sequence id (e.g. "CO1A2_MOUSE"/"sp|Q01149|CO1A2_MOUSE"/"tr|H9GLU4|H9GLU4_ANOCA")
#' @param featureSum Returns statistics related to the characteristics of the transcripts. Requires a gff3 file. If TRUE, returns a list of the
#' @param outdir Output directory
#' @param by The column by which the repeats will be classified
#' @param annot_by A character vector indicating whether the annotations should be made by "transcripts" or by "fragments". When annot_by = "transcripts", the proportion of each transposon class/family in each transcript is calculated and the transcript is annotated with the class/family with the highest coverage.
#' @param ignore.aln.pos The RepeatMasker alignments file may have discrepancies in the repeats positions with respect to the output file. If you selected over.res = "LD", then you can choose whether to take into account the positions of the alignment file or to take the average per repeats class (default).
#' @param threads Number of cores to use in the processing. By default threads = 1
#' @param bedtools bedtools binary path
#' @export
mk.reference <- function(RepMask,overlapping=F, by=c("namRep","classRep", "class", "supFam", "Fam"), rule=c(0,0,0), trme, threads=1, annot_by="transcripts", bedtools="bedtools", outdir, over.res="HS", ...){
if(overlapping==T){
RM <- ovlp.res(RepMask=RepMask, threads=threads, outdir=outdir,over.res=over.res,...)
}else{
RM <- RepMask
}
RM <- RM[RM$classRep%!in%c("Unknown", "rRNA", "Satellite", "Simple_repeat","Low_complexity","RNA","scRNA","snRNA","srpRNA", "tRNA","Other"),]
RM$width <- (RM$EPMQuer - RM$SPMQuer)
if(sum(rule)==0 && annot_by=="fragments"){
message("making references to fragments annotations ...")
message("rule is 0-0-0 ...")
BED <- cbind(RM[,c(5,6,7)],RM[,by])
}else{
message("applying rule ...")
refSeqs <- RM[RM$PersubM<(100-rule[1]) & RM$width>rule[2],]
refSeqs$namSeqL <- paste0(refSeqs$namSeq,".",refSeqs$seqLength)
suppressWarnings(
a <- as.data.frame(
unlist(
apply(
t(
tapply(
refSeqs$width,
list(refSeqs[,by],
refSeqs$namSeqL),
sum)
),
1,
function(x){
return(
na.omit(x)
)
}
)
)
)
)
sm_df <- data.frame(
namSeq=split.vec(row.names(a),"\\.",1),
repNam=split.vec(row.names(a),"\\.",3),
seqLength=as.numeric(split.vec(row.names(a),"\\.",2)),
Sum=a[,1]
)
sm_df$perRepOvTran <- (sm_df$Sum/sm_df$seqLength*100)
if(annot_by=="transcripts"){
BED <- sm_df[sm_df$perRepOvTran>rule[3],]
BED <- BED[order(BED$perRepOvTran, decreasing = T),]
BED <- BED[!duplicated(BED$namSeq),]
BED <- data.frame(namSeq=BED$namSeq,start=1,end=as.numeric(BED$seqLength),repNam=BED$repNam)
message("making references to transcripts annotations ...")
}else{
refSeqs <- refSeqs[refSeqs$namSeq%in% sm_df$namSeq[sm_df$perRepOvTran>rule[3]],]
BED <- data.frame(namSeq=refSeqs$namSeq,start=refSeqs$SPMQuer,end=refSeqs$EPMQuer,repNam=refSeqs[,by])
message("making references to fragments annotations ...")
}
}
write.table(BED,"decoys_head.bed", quote = F, row.names = F, col.names=F, sep="\t")
allIDfasta <- system(paste("grep '>'",trme,"|sed 's/>//' | awk '{print $1}'"),intern = T)
decoys <- allIDfasta[allIDfasta%!in%BED$refSeqs.namSeq]
write.table(decoys,"decoys.txt", col.names = F, row.names = F, quote = F)
system(paste0("awk", ' \'BEGIN{while((getline<"decoys.txt")>0)l[">"$1]=1}/^>/{f=l[$1]}f\' ',trme," > decoy.fa"))
message("making trmeSalmon.fasta file")
system("cat decoys_head.bed | sort -k1,1 -k2,2n > RM_or.bed")
system(paste(bedtools,"merge -i RM_or.bed -c 4 -o collapse > RM_or_merged.bed"))
system(paste(bedtools,"getfasta -fi",trme,"-bed RM_or_merged.bed -fo Rep.fa"))
system("cat Rep.fa decoy.fa > trmeSalmon.fasta")
system(paste("rm decoys_head.bed Rep.fa RM_or_merged.bed RM_or.bed decoy.fa decoys.txt", paste0(trme,".fai")))
Ref.salmon <- data.frame(paste0(BED[,1],":",BED[,2],"-",BED[,3]),BED[,4])
names(Ref.salmon) <- c("seqID","repID")
message("writing files in the output directory...")
system(paste("mv trmeSalmon.fasta",outdir))
write.table(Ref.salmon,paste0(outdir,"/references.csv"), col.names = F, row.names = F, quote = F, sep = ";")
write.table(decoys,paste0(outdir,"/decoys.txt"), col.names = F, row.names = F, quote = F)
message(paste("The reference.csv, decoys.txt and trmeSalmon.fasta files are in", outdir, "directory"))
Ref.salmon
}
| /R/mk.reference.r | no_license | Ravindra-Raut/ExplorATEproject | R | false | false | 7,451 | r | #' @import stringr
#' @import GenomicRanges
#' @import IRanges
#' @title Makes a reference file for Salmon
#' @description This function creates decoys and a transcriptome that will be used by Salmon. It also creates a reference file to import the estimates after the Salmon run.
#' The user can enter a RepMask file without deleting co-transcribed or overlapping repeats with the RepMask argument, or enter a RepMask file without co-transcribed but overlapping repeats with the RepMask.clean argument, or a file free of co-transcribed or overlapping repeats with the RepMask.ovlp.clean argument. When the file contains co-transcribed repeats, it must indicate rm.cotrans = T and when the file contains overlaps it must indicate overlapping = T.
#' @param overlapping Indicates whether the RepMask file contains overlapping repetitions (TRUE) or not (FALSE). When the RepMask file contains overlapping repetitions, the ovlp.res() function will be used to solve them and the resolution criteria must be indicated (higher score (HS), longer length (LE) or lower Kimura distances (LD))
#' @param rule A numerical vector respectively indicating the minimum percentage of identity, length (in base pairs) of the repeat to be analyzed and the percentage of the length of class/family repeat with respect to the length of the transcript. Example: c(80, 80, 80). Default is c(0,0,0)
#' @param trme transcriptome in fasta format
#' @param RepMask RepeatMasker output file. If rm.cotrans = F it is assumed that the file does not contain cotranscribed repeats. If overlapping = F it is assumed that the file does not contain overlapping.
#' @param rm.cotrnas logical vector indicating whether co-transcribed repeats should be removed
#' @param align .align file
#' @param over.res Indicates the method by which the repetition overlap will be resolved.
#' HS: higher score, bases are assigned to the element with the highest score
#' LS: longer element, bases are assigned to the longest element
#' LD: lower divergence, bases are assigned to the element with the least divergence.
#' in all cases both elements have the same characteristics, the bases are assigned to the first element.
#' @param anot annotation file in outfmt6 format. It is necessary when the option rm.cotrans = T
#' @param gff3 gff3 file. It is necessary when the option rm.cotrans = T
#' @param stranded logical vector indicating if the library is strand specific
#' @param cleanTEsProt logical vector indicating whether the search for TEs-related proteins should be carried out (e.g.
#' transposases, integrases, env, reverse transcriptase, etc.). We recommend that users use a curated annotations file,
#' in which these genes have been excluded; therefore the default option is F. When T is selected, a search is performed
#' against a database obtained from UniProt, so we recommend that the annotations file have this format for the subject
#' sequence id (e.g. "CO1A2_MOUSE"/"sp|Q01149|CO1A2_MOUSE"/"tr|H9GLU4|H9GLU4_ANOCA")
#' @param featureSum Returns statistics related to the characteristics of the transcripts. Requires a gff3 file. If TRUE, returns a list of the
#' @param outdir Output directory
#' @param by The column by which the repeats will be classified
#' @param annot_by A character vector indicating whether the annotations should be made by "transcripts" or by "fragments". When annot_by = "transcripts", the proportion of each transposon class/family in each transcript is calculated and the transcript is annotated with the class/family with the highest coverage.
#' @param ignore.aln.pos The RepeatMasker alignments file may have discrepancies in the repeats positions with respect to the output file. If you selected over.res = "LD", then you can choose whether to take into account the positions of the alignment file or to take the average per repeats class (default).
#' @param threads Number of cores to use in the processing. By default threads = 1
#' @param bedtools bedtools binary path
#' @export
mk.reference <- function(RepMask,overlapping=F, by=c("namRep","classRep", "class", "supFam", "Fam"), rule=c(0,0,0), trme, threads=1, annot_by="transcripts", bedtools="bedtools", outdir, over.res="HS", ...){
if(overlapping==T){
RM <- ovlp.res(RepMask=RepMask, threads=threads, outdir=outdir,over.res=over.res,...)
}else{
RM <- RepMask
}
RM <- RM[RM$classRep%!in%c("Unknown", "rRNA", "Satellite", "Simple_repeat","Low_complexity","RNA","scRNA","snRNA","srpRNA", "tRNA","Other"),]
RM$width <- (RM$EPMQuer - RM$SPMQuer)
if(sum(rule)==0 && annot_by=="fragments"){
message("making references to fragments annotations ...")
message("rule is 0-0-0 ...")
BED <- cbind(RM[,c(5,6,7)],RM[,by])
}else{
message("applying rule ...")
refSeqs <- RM[RM$PersubM<(100-rule[1]) & RM$width>rule[2],]
refSeqs$namSeqL <- paste0(refSeqs$namSeq,".",refSeqs$seqLength)
suppressWarnings(
a <- as.data.frame(
unlist(
apply(
t(
tapply(
refSeqs$width,
list(refSeqs[,by],
refSeqs$namSeqL),
sum)
),
1,
function(x){
return(
na.omit(x)
)
}
)
)
)
)
sm_df <- data.frame(
namSeq=split.vec(row.names(a),"\\.",1),
repNam=split.vec(row.names(a),"\\.",3),
seqLength=as.numeric(split.vec(row.names(a),"\\.",2)),
Sum=a[,1]
)
sm_df$perRepOvTran <- (sm_df$Sum/sm_df$seqLength*100)
if(annot_by=="transcripts"){
BED <- sm_df[sm_df$perRepOvTran>rule[3],]
BED <- BED[order(BED$perRepOvTran, decreasing = T),]
BED <- BED[!duplicated(BED$namSeq),]
BED <- data.frame(namSeq=BED$namSeq,start=1,end=as.numeric(BED$seqLength),repNam=BED$repNam)
message("making references to transcripts annotations ...")
}else{
refSeqs <- refSeqs[refSeqs$namSeq%in% sm_df$namSeq[sm_df$perRepOvTran>rule[3]],]
BED <- data.frame(namSeq=refSeqs$namSeq,start=refSeqs$SPMQuer,end=refSeqs$EPMQuer,repNam=refSeqs[,by])
message("making references to fragments annotations ...")
}
}
write.table(BED,"decoys_head.bed", quote = F, row.names = F, col.names=F, sep="\t")
allIDfasta <- system(paste("grep '>'",trme,"|sed 's/>//' | awk '{print $1}'"),intern = T)
decoys <- allIDfasta[allIDfasta%!in%BED$refSeqs.namSeq]
write.table(decoys,"decoys.txt", col.names = F, row.names = F, quote = F)
system(paste0("awk", ' \'BEGIN{while((getline<"decoys.txt")>0)l[">"$1]=1}/^>/{f=l[$1]}f\' ',trme," > decoy.fa"))
message("making trmeSalmon.fasta file")
system("cat decoys_head.bed | sort -k1,1 -k2,2n > RM_or.bed")
system(paste(bedtools,"merge -i RM_or.bed -c 4 -o collapse > RM_or_merged.bed"))
system(paste(bedtools,"getfasta -fi",trme,"-bed RM_or_merged.bed -fo Rep.fa"))
system("cat Rep.fa decoy.fa > trmeSalmon.fasta")
system(paste("rm decoys_head.bed Rep.fa RM_or_merged.bed RM_or.bed decoy.fa decoys.txt", paste0(trme,".fai")))
Ref.salmon <- data.frame(paste0(BED[,1],":",BED[,2],"-",BED[,3]),BED[,4])
names(Ref.salmon) <- c("seqID","repID")
message("writing files in the output directory...")
system(paste("mv trmeSalmon.fasta",outdir))
write.table(Ref.salmon,paste0(outdir,"/references.csv"), col.names = F, row.names = F, quote = F, sep = ";")
write.table(decoys,paste0(outdir,"/decoys.txt"), col.names = F, row.names = F, quote = F)
message(paste("The reference.csv, decoys.txt and trmeSalmon.fasta files are in", outdir, "directory"))
Ref.salmon
}
|
#' Combine vectors
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' `combine()` is deprecated in favour of [vctrs::vec_c()]. `combine()`
#' attempted to automatically guess whether you wanted [c()] or [unlist()],
#' but could fail in surprising ways. We now believe it's better to be explicit.
#'
#' @param ... Vectors to combine.
#' @keywords internal
#' @export
#' @examples
#' f1 <- factor("a")
#' f2 <- factor("b")
#'
#' combine(f1, f2)
#' # ->
#' vctrs::vec_c(f1, f1)
#'
#' combine(list(f1, f2))
#' # ->
#' vctrs::vec_c(!!!list(f1, f2))
combine <- function(...) {
lifecycle::deprecate_warn("1.0.0", "combine()", "vctrs::vec_c()", always = TRUE)
args <- list2(...)
if (length(args) == 1 && is.list(args[[1]])) {
args <- args[[1]]
}
args <- keep(args, function(.x) !is.null(.x))
names(args) <- NULL
if (length(args) == 0) {
logical()
} else {
vec_c(!!!args)
}
}
| /R/deprec-combine.R | permissive | tidyverse/dplyr | R | false | false | 910 | r | #' Combine vectors
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' `combine()` is deprecated in favour of [vctrs::vec_c()]. `combine()`
#' attempted to automatically guess whether you wanted [c()] or [unlist()],
#' but could fail in surprising ways. We now believe it's better to be explicit.
#'
#' @param ... Vectors to combine.
#' @keywords internal
#' @export
#' @examples
#' f1 <- factor("a")
#' f2 <- factor("b")
#'
#' combine(f1, f2)
#' # ->
#' vctrs::vec_c(f1, f1)
#'
#' combine(list(f1, f2))
#' # ->
#' vctrs::vec_c(!!!list(f1, f2))
combine <- function(...) {
lifecycle::deprecate_warn("1.0.0", "combine()", "vctrs::vec_c()", always = TRUE)
args <- list2(...)
if (length(args) == 1 && is.list(args[[1]])) {
args <- args[[1]]
}
args <- keep(args, function(.x) !is.null(.x))
names(args) <- NULL
if (length(args) == 0) {
logical()
} else {
vec_c(!!!args)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ProjectData.R
\name{SetProjectPI}
\alias{SetProjectPI}
\title{Set PI Name}
\usage{
SetProjectPI(PI)
}
\arguments{
\item{PI}{A string containing the analyst name}
}
\value{
A message stating the name has been changed.
}
\description{
This function allows you to set the Project's PI. This will overwrite the
current value if exists.
}
\keyword{PI}
\keyword{ProjData}
\keyword{options}
| /man/SetProjectPI.Rd | permissive | akirosingh/Jagannathantools | R | false | true | 462 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ProjectData.R
\name{SetProjectPI}
\alias{SetProjectPI}
\title{Set PI Name}
\usage{
SetProjectPI(PI)
}
\arguments{
\item{PI}{A string containing the analyst name}
}
\value{
A message stating the name has been changed.
}
\description{
This function allows you to set the Project's PI. This will overwrite the
current value if exists.
}
\keyword{PI}
\keyword{ProjData}
\keyword{options}
|
context("theme_genes")
test_that("a simple geom_gene_arrow plot with theme_genes is drawn without errors", {
expect_error( {
ggplot(
gggenes_example_genes,
aes(xmin = start, xmax = end, y = molecule, fill = gene)
) +
geom_gene_arrow(alpha = 0.5, linetype = 2, colour = "purple") +
theme_genes() %+replace% theme(legend.position = "bottom")
} , NA)
})
| /tests/testthat/test-theme_genes.R | no_license | snashraf/gggenes | R | false | false | 389 | r | context("theme_genes")
test_that("a simple geom_gene_arrow plot with theme_genes is drawn without errors", {
expect_error( {
ggplot(
gggenes_example_genes,
aes(xmin = start, xmax = end, y = molecule, fill = gene)
) +
geom_gene_arrow(alpha = 0.5, linetype = 2, colour = "purple") +
theme_genes() %+replace% theme(legend.position = "bottom")
} , NA)
})
|
library(reshape2)
url<- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
if(!file.exists("data")){
dir.create('data')
}
download.file(url, destfile='data/sensor_data.zip', method='curl')
files<-unzip('data/sensor_data.zip',list=TRUE)
## 1. Merges the training and the test sets to create one data set.
## Getting list of features
features <- read.table('data/UCI HAR Dataset/features.txt', header=FALSE)
## Getting list of activity label
activity_labels<- read.table('data/UCI HAR Dataset/activity_labels.txt', header=FALSE)
## Getting all training data
x_train <- read.table('data/UCI HAR Dataset/train/X_train.txt', header=FALSE)
y_train <- read.table('data/UCI HAR Dataset/train/y_train.txt', header=FALSE)
subject_train <- read.table('data/UCI HAR Dataset/train/subject_train.txt', header=FALSE)
## Getting all test data
x_test <- read.table('data/UCI HAR Dataset/test/X_test.txt', header=FALSE)
y_test <- read.table('data/UCI HAR Dataset/test/y_test.txt', header=FALSE)
subject_test <- read.table('data/UCI HAR Dataset/test/subject_test.txt', header=FALSE)
## Concatenating training and test X data
x_data<-rbind(x_train, x_test)
names(x_data)<-features$V2
## Concatenating training and test Y data along with replacing with activity labels
y_data<-rbind(y_train, y_test)
names(y_data)<-c('activity')
## 3. Uses descriptive activity names to name the activities in the data set
y_data_f <- factor(y_data$activity)
levels(y_data_f) = activity_labels$V2
## Concatenating training and test subject data
subject_data<-rbind(subject_train,subject_test)
names(subject_data)<-c('subject')
## Binding all the columns and merging is done
all_data<-cbind(x_data, subject_data, y_data_f)
names(all_data)[names(all_data) == 'y_data_f'] <- 'activity'
## Writing data for backup
write.table(all_data, file = 'merged_dataset.txt', col.names=TRUE)
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
col_selection <- grep("*[Mm]ean[()]|*[Ss]td[()]|activity|subject",names(all_data),ignore.case=TRUE)
selected_data <- all_data[, col_selection]
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
temp_data <- melt(selected_data,id=c("activity", "subject"))
tidy_data <- dcast(temp_data, activity + subject ~ variable,mean)
head(tidy_data)
## Create a file with the new tidy dataset
write.table(tidy_data,"tidy_data.txt")
| /run_analysis.R | no_license | akarim78/getting-cleaning-data-project | R | false | false | 2,478 | r | library(reshape2)
url<- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
if(!file.exists("data")){
dir.create('data')
}
download.file(url, destfile='data/sensor_data.zip', method='curl')
files<-unzip('data/sensor_data.zip',list=TRUE)
## 1. Merges the training and the test sets to create one data set.
## Getting list of features
features <- read.table('data/UCI HAR Dataset/features.txt', header=FALSE)
## Getting list of activity label
activity_labels<- read.table('data/UCI HAR Dataset/activity_labels.txt', header=FALSE)
## Getting all training data
x_train <- read.table('data/UCI HAR Dataset/train/X_train.txt', header=FALSE)
y_train <- read.table('data/UCI HAR Dataset/train/y_train.txt', header=FALSE)
subject_train <- read.table('data/UCI HAR Dataset/train/subject_train.txt', header=FALSE)
## Getting all test data
x_test <- read.table('data/UCI HAR Dataset/test/X_test.txt', header=FALSE)
y_test <- read.table('data/UCI HAR Dataset/test/y_test.txt', header=FALSE)
subject_test <- read.table('data/UCI HAR Dataset/test/subject_test.txt', header=FALSE)
## Concatenating training and test X data
x_data<-rbind(x_train, x_test)
names(x_data)<-features$V2
## Concatenating training and test Y data along with replacing with activity labels
y_data<-rbind(y_train, y_test)
names(y_data)<-c('activity')
## 3. Uses descriptive activity names to name the activities in the data set
y_data_f <- factor(y_data$activity)
levels(y_data_f) = activity_labels$V2
## Concatenating training and test subject data
subject_data<-rbind(subject_train,subject_test)
names(subject_data)<-c('subject')
## Binding all the columns and merging is done
all_data<-cbind(x_data, subject_data, y_data_f)
names(all_data)[names(all_data) == 'y_data_f'] <- 'activity'
## Writing data for backup
write.table(all_data, file = 'merged_dataset.txt', col.names=TRUE)
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
col_selection <- grep("*[Mm]ean[()]|*[Ss]td[()]|activity|subject",names(all_data),ignore.case=TRUE)
selected_data <- all_data[, col_selection]
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
temp_data <- melt(selected_data,id=c("activity", "subject"))
tidy_data <- dcast(temp_data, activity + subject ~ variable,mean)
head(tidy_data)
## Create a file with the new tidy dataset
write.table(tidy_data,"tidy_data.txt")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subgroupAnalysis.R
\name{plot.subgroupAnalysis}
\alias{plot.subgroupAnalysis}
\title{Plot method for objects of class 'runMetaAnalysis'}
\usage{
\method{plot}{subgroupAnalysis}(x, which = NULL, ...)
}
\arguments{
\item{x}{An object of class \code{runMetaAnalysis}.}
\item{which}{\code{character}. Subgroup analysis to be plotted (variable name).}
\item{...}{Additional arguments.}
}
\description{
Plot S3 method for objects of class \code{runMetaAnalysis}.
}
\author{
Mathias Harrer \email{mathias.h.harrer@gmail.com},
Paula Kuper \email{paula.r.kuper@gmail.com}, Pim Cuijpers \email{p.cuijpers@vu.nl}
}
| /man/plot.subgroupAnalysis.Rd | no_license | metapsy-project/metapsyTools | R | false | true | 684 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subgroupAnalysis.R
\name{plot.subgroupAnalysis}
\alias{plot.subgroupAnalysis}
\title{Plot method for objects of class 'runMetaAnalysis'}
\usage{
\method{plot}{subgroupAnalysis}(x, which = NULL, ...)
}
\arguments{
\item{x}{An object of class \code{runMetaAnalysis}.}
\item{which}{\code{character}. Subgroup analysis to be plotted (variable name).}
\item{...}{Additional arguments.}
}
\description{
Plot S3 method for objects of class \code{runMetaAnalysis}.
}
\author{
Mathias Harrer \email{mathias.h.harrer@gmail.com},
Paula Kuper \email{paula.r.kuper@gmail.com}, Pim Cuijpers \email{p.cuijpers@vu.nl}
}
|
library(readr)
library(signal)
#These Values Need to be changed for each data set
{
fileFolder <- 'data/FinalAggregated_210204_122536.csv'
Append <- '_210204_122536.csv'
#StationaryAnem <- "data/Stationary_210202_122942.csv"
StationaryAnem <- 'data/array0204.dat'
lat_anem <- 40.594926667
long_anem <- -105.13984
}
#These Values remain constant
{
#AerisFileName <- paste('Aeris',Append,sep='_')
#LocationFileName <- paste("Location",Append,sep='_')
VesselHeadingName <- paste("VesselHeading",Append,sep='_')
WindFileName <- paste('Stationary',Append,sep='_')
finalFolder <- fileFolder
finalFilename <- paste('finalAggregated',Append,sep='_')
combinedScriptLoc <- 'combineDataScript.R'
source("Scripts/WindCollection.R")
source("Scripts/CarCorrection.R")
source("Scripts/CarCorrectionAlt.R")
#suppressMessages(source(combinedScriptLoc))
}
FinalAggregated2 <- WindCollection(StationaryAnem, finalFilename, finalFolder, lat_anem, long_anem)
write_csv(FinalAggregated2,'data/appended_210202_121448')
FinalAggregated2 <- CarCorrection_airmar(FinalAggregated2,fileFolder, VesselHeadingName, lat_anem, long_anem)
FinalAggregated2 <- CarCorrection_airmar_Alt(FinalAggregated2,fileFolder, VesselHeadingName, lat_anem, long_anem)
anem_data <- StationaryAnem
finalFilename <- finalFilename
finalFolder <- finalFolder
lat_anem <- lat_anem
long_anem <- long_anem
test <- filter(FinalAggregated2, AnemDistance < 43, RelErrorMag > -15, RelErrorMag < 15, AnemDeriv < 0, Velocity > 0.3, CarMagRatio < 10, TurnRate < 0.3)
selectdata <- FinalAggregated2[1:1500,1:ncol(FinalAggregated2)]
CarVel <- ggplot() +
geom_point(data = selecdata, aes(x = nearest10hz, y = Velocity, color = "blue")) + # must include argument label "data"
labs(title = "Car Velocity", x = "Time (s)", y = "Velocity (m/s)") +
theme(legend.position="top", plot.title = element_text(hjust = 0.5),)
MagPlot <- ggplot() +
geom_point(data = FinalAggregated2, aes(x = nearest10hz, y = Array.Station.4.Mag., color = "blue")) + # must include argument label "data"
geom_point(data = FinalAggregated2, aes(x = nearest10hz, y = TruewindSpeed_Mag_am, color = "purple")) +
geom_point(data = FinalAggregated2, aes(x = nearest10hz, y = Velocity, color = "red")) +
labs(title = "Car Wind Mag\n", x = "Time (s)", y = "Velocity (m/s)") +
scale_color_manual(labels=c("Windmaster Array","Car Airmar","Car Velocity"), values = c("blue", "purple","red")) +
ylim(-2,30) +
theme(legend.position="top", plot.title = element_text(hjust = 0.5),)
MagPlot
Dirlot <- ggplot() +
geom_point(data = selecdata, aes(x = nearest10hz, y = TrueWindDirection_am, color = "blue")) + # must include argument label "data"
geom_point(data = selecdata, aes(x = nearest10hz, y = Array.Station.4.Angle, color = "red")) +
labs(title = "Wind Direction\n", x = "Time (s)", y = "Direction (degrees)") +
scale_color_manual(labels=c("Car","Stationary"), values = c("blue", "red")) +
ylim(0,360) +
theme(legend.position="top", plot.title = element_text(hjust = 0.5),)
CarVStatVel <- ggplot() +
geom_point(data = selecdata, aes(x = Array.Station.4.Mag., y = TruewindSpeed_Mag)) +
labs(title = "Stationary vs Car Wind Speed", x = "Stationary Mag (m/s)", y = "Car Mag (m/s)") +
ylim(0,9) +
theme(plot.title = element_text(hjust = 0.5),)
CarArrayRatio <- ggplot(data = test, aes(x = Velocity, y = CarMagRatio, color = Accel)) +
geom_point() +
geom_smooth(method = "lm", se=FALSE, color="black", formula = y ~ x) +
labs(title = "Car-Array Mag Ratio", x = "Car Velocity (m/s)", y = "Ratio") +
ylim(0,10) +
xlim(0,10) +
theme(plot.title = element_text(hjust = 0.5),)
CarArrayRatio
Path <- ggplot() +
geom_point(data = FinalAggregated2, aes(x = Latitude, y = Longitude, color = Accel)) + xlim(40.5949,40.5956)
Path
| /Scripts/combineDataOptions.R | no_license | elimywilliams/windAnalysis | R | false | false | 3,855 | r | library(readr)
library(signal)
#These Values Need to be changed for each data set
{
fileFolder <- 'data/FinalAggregated_210204_122536.csv'
Append <- '_210204_122536.csv'
#StationaryAnem <- "data/Stationary_210202_122942.csv"
StationaryAnem <- 'data/array0204.dat'
lat_anem <- 40.594926667
long_anem <- -105.13984
}
#These Values remain constant
{
#AerisFileName <- paste('Aeris',Append,sep='_')
#LocationFileName <- paste("Location",Append,sep='_')
VesselHeadingName <- paste("VesselHeading",Append,sep='_')
WindFileName <- paste('Stationary',Append,sep='_')
finalFolder <- fileFolder
finalFilename <- paste('finalAggregated',Append,sep='_')
combinedScriptLoc <- 'combineDataScript.R'
source("Scripts/WindCollection.R")
source("Scripts/CarCorrection.R")
source("Scripts/CarCorrectionAlt.R")
#suppressMessages(source(combinedScriptLoc))
}
FinalAggregated2 <- WindCollection(StationaryAnem, finalFilename, finalFolder, lat_anem, long_anem)
write_csv(FinalAggregated2,'data/appended_210202_121448')
FinalAggregated2 <- CarCorrection_airmar(FinalAggregated2,fileFolder, VesselHeadingName, lat_anem, long_anem)
FinalAggregated2 <- CarCorrection_airmar_Alt(FinalAggregated2,fileFolder, VesselHeadingName, lat_anem, long_anem)
anem_data <- StationaryAnem
finalFilename <- finalFilename
finalFolder <- finalFolder
lat_anem <- lat_anem
long_anem <- long_anem
test <- filter(FinalAggregated2, AnemDistance < 43, RelErrorMag > -15, RelErrorMag < 15, AnemDeriv < 0, Velocity > 0.3, CarMagRatio < 10, TurnRate < 0.3)
selectdata <- FinalAggregated2[1:1500,1:ncol(FinalAggregated2)]
CarVel <- ggplot() +
geom_point(data = selecdata, aes(x = nearest10hz, y = Velocity, color = "blue")) + # must include argument label "data"
labs(title = "Car Velocity", x = "Time (s)", y = "Velocity (m/s)") +
theme(legend.position="top", plot.title = element_text(hjust = 0.5),)
MagPlot <- ggplot() +
geom_point(data = FinalAggregated2, aes(x = nearest10hz, y = Array.Station.4.Mag., color = "blue")) + # must include argument label "data"
geom_point(data = FinalAggregated2, aes(x = nearest10hz, y = TruewindSpeed_Mag_am, color = "purple")) +
geom_point(data = FinalAggregated2, aes(x = nearest10hz, y = Velocity, color = "red")) +
labs(title = "Car Wind Mag\n", x = "Time (s)", y = "Velocity (m/s)") +
scale_color_manual(labels=c("Windmaster Array","Car Airmar","Car Velocity"), values = c("blue", "purple","red")) +
ylim(-2,30) +
theme(legend.position="top", plot.title = element_text(hjust = 0.5),)
MagPlot
Dirlot <- ggplot() +
geom_point(data = selecdata, aes(x = nearest10hz, y = TrueWindDirection_am, color = "blue")) + # must include argument label "data"
geom_point(data = selecdata, aes(x = nearest10hz, y = Array.Station.4.Angle, color = "red")) +
labs(title = "Wind Direction\n", x = "Time (s)", y = "Direction (degrees)") +
scale_color_manual(labels=c("Car","Stationary"), values = c("blue", "red")) +
ylim(0,360) +
theme(legend.position="top", plot.title = element_text(hjust = 0.5),)
CarVStatVel <- ggplot() +
geom_point(data = selecdata, aes(x = Array.Station.4.Mag., y = TruewindSpeed_Mag)) +
labs(title = "Stationary vs Car Wind Speed", x = "Stationary Mag (m/s)", y = "Car Mag (m/s)") +
ylim(0,9) +
theme(plot.title = element_text(hjust = 0.5),)
CarArrayRatio <- ggplot(data = test, aes(x = Velocity, y = CarMagRatio, color = Accel)) +
geom_point() +
geom_smooth(method = "lm", se=FALSE, color="black", formula = y ~ x) +
labs(title = "Car-Array Mag Ratio", x = "Car Velocity (m/s)", y = "Ratio") +
ylim(0,10) +
xlim(0,10) +
theme(plot.title = element_text(hjust = 0.5),)
CarArrayRatio
Path <- ggplot() +
geom_point(data = FinalAggregated2, aes(x = Latitude, y = Longitude, color = Accel)) + xlim(40.5949,40.5956)
Path
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title Connect to an OmniSci database
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @param host host
#' @param port port
#' @param username username
#' @param password password
#' @param dbname dbname
#'
#' @return List
#' @export
#' @examples
#' \dontrun{
#'
#' conn <- connect("localhost", 6274, "admin", "HyperInteractive", "omnisci")
#'
#' }
connect <- function(host, port, username, password, dbname) {
.Call('_ROmniSci_connect', PACKAGE = 'ROmniSci', host, port, username, password, dbname)
}
#' @title Disconnect from an OmniSci database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return Nothing
#' @export
#' @examples
#' \dontrun{
#'
#' disconnect(conn)
#'
#' }
disconnect <- function(conn) {
invisible(.Call('_ROmniSci_disconnect', PACKAGE = 'ROmniSci', conn))
}
#' @title Get details of the specified database table
#'
#' @param conn conn
#' @param table_name table_name
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List (TTableDetails)
#' @export
#' @examples
#' \dontrun{
#'
#' tbl_details <- get_table_details(conn, "omnisci_states")
#'
#' }
get_table_details <- function(conn, table_name) {
.Call('_ROmniSci_get_table_details', PACKAGE = 'ROmniSci', conn, table_name)
}
#' @title Get status of OmniSci server
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List (TServerStatus)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gss <- get_server_status(conn)
#'
#' }
get_server_status <- function(conn) {
.Call('_ROmniSci_get_server_status', PACKAGE = 'ROmniSci', conn)
}
#' @title Get status of OmniSci server
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List (TServerStatus)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gs <- get_status(conn)
#'
#' }
get_status <- function(conn) {
.Call('_ROmniSci_get_status', PACKAGE = 'ROmniSci', conn)
}
#' @title Get version of OmniSci server
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return Character
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gv <- get_version(conn)
#'
#' }
get_version <- function(conn) {
.Call('_ROmniSci_get_version', PACKAGE = 'ROmniSci', conn)
}
#' @title Get tables in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gt <- get_tables(conn)
#'
#' }
get_tables <- function(conn) {
.Call('_ROmniSci_get_tables', PACKAGE = 'ROmniSci', conn)
}
#' @title Get users in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gu <- get_users(conn)
#'
#' }
get_users <- function(conn) {
.Call('_ROmniSci_get_users', PACKAGE = 'ROmniSci', conn)
}
#' @title Get physical tables in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gpt <- get_physical_tables(conn)
#'
#' }
get_physical_tables <- function(conn) {
.Call('_ROmniSci_get_physical_tables', PACKAGE = 'ROmniSci', conn)
}
#' @title Get views in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gvs <- get_views(conn)
#'
#' }
get_views <- function(conn) {
.Call('_ROmniSci_get_views', PACKAGE = 'ROmniSci', conn)
}
#' @title Get session information
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TSessionInfo)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gsi <- get_session_info(conn)
#'
#' }
get_session_info <- function(conn) {
.Call('_ROmniSci_get_session_info', PACKAGE = 'ROmniSci', conn)
}
#' @title Get databases list
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TDBInfo)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gdb <- get_databases(conn)
#'
#' }
get_databases <- function(conn) {
.Call('_ROmniSci_get_databases', PACKAGE = 'ROmniSci', conn)
}
#' @title Get hardware info
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TClusterHardwareInfo)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' ghi <- get_hardware_info(conn)
#'
#' }
get_hardware_info <- function(conn) {
.Call('_ROmniSci_get_hardware_info', PACKAGE = 'ROmniSci', conn)
}
#' @title Get metadata of tables in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TTableMeta)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gtm <- get_tables_meta(conn)
#'
#' }
get_tables_meta <- function(conn) {
.Call('_ROmniSci_get_tables_meta', PACKAGE = 'ROmniSci', conn)
}
#' @title Switch current database
#'
#' @param conn conn
#' @param dbname dbname
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return Nothing
#'
#' @export
#' @examples
#' \dontrun{
#'
#' switch_database(conn, "database2")
#'
#' }
switch_database <- function(conn, dbname) {
invisible(.Call('_ROmniSci_switch_database', PACKAGE = 'ROmniSci', conn, dbname))
}
#' @title Execute SQL statement
#'
#' @param conn conn
#' @param query query
#' @param first_n first_n
#' @param at_most_n at_most_n
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TQueryResult)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' result <- sql_execute(conn, "select * from omnisci_states")
#'
#' }
sql_execute <- function(conn, query, first_n = -1L, at_most_n = -1L) {
.Call('_ROmniSci_sql_execute', PACKAGE = 'ROmniSci', conn, query, first_n, at_most_n)
}
| /R/RcppExports.R | permissive | stjordanis/ROmniSci | R | false | false | 6,191 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title Connect to an OmniSci database
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @param host host
#' @param port port
#' @param username username
#' @param password password
#' @param dbname dbname
#'
#' @return List
#' @export
#' @examples
#' \dontrun{
#'
#' conn <- connect("localhost", 6274, "admin", "HyperInteractive", "omnisci")
#'
#' }
connect <- function(host, port, username, password, dbname) {
.Call('_ROmniSci_connect', PACKAGE = 'ROmniSci', host, port, username, password, dbname)
}
#' @title Disconnect from an OmniSci database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return Nothing
#' @export
#' @examples
#' \dontrun{
#'
#' disconnect(conn)
#'
#' }
disconnect <- function(conn) {
invisible(.Call('_ROmniSci_disconnect', PACKAGE = 'ROmniSci', conn))
}
#' @title Get details of the specified database table
#'
#' @param conn conn
#' @param table_name table_name
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List (TTableDetails)
#' @export
#' @examples
#' \dontrun{
#'
#' tbl_details <- get_table_details(conn, "omnisci_states")
#'
#' }
get_table_details <- function(conn, table_name) {
.Call('_ROmniSci_get_table_details', PACKAGE = 'ROmniSci', conn, table_name)
}
#' @title Get status of OmniSci server
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List (TServerStatus)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gss <- get_server_status(conn)
#'
#' }
get_server_status <- function(conn) {
.Call('_ROmniSci_get_server_status', PACKAGE = 'ROmniSci', conn)
}
#' @title Get status of OmniSci server
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List (TServerStatus)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gs <- get_status(conn)
#'
#' }
get_status <- function(conn) {
.Call('_ROmniSci_get_status', PACKAGE = 'ROmniSci', conn)
}
#' @title Get version of OmniSci server
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return Character
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gv <- get_version(conn)
#'
#' }
get_version <- function(conn) {
.Call('_ROmniSci_get_version', PACKAGE = 'ROmniSci', conn)
}
#' @title Get tables in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gt <- get_tables(conn)
#'
#' }
get_tables <- function(conn) {
.Call('_ROmniSci_get_tables', PACKAGE = 'ROmniSci', conn)
}
#' @title Get users in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gu <- get_users(conn)
#'
#' }
get_users <- function(conn) {
.Call('_ROmniSci_get_users', PACKAGE = 'ROmniSci', conn)
}
#' @title Get physical tables in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gpt <- get_physical_tables(conn)
#'
#' }
get_physical_tables <- function(conn) {
.Call('_ROmniSci_get_physical_tables', PACKAGE = 'ROmniSci', conn)
}
#' @title Get views in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(Character)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gvs <- get_views(conn)
#'
#' }
get_views <- function(conn) {
.Call('_ROmniSci_get_views', PACKAGE = 'ROmniSci', conn)
}
#' @title Get session information
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TSessionInfo)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gsi <- get_session_info(conn)
#'
#' }
get_session_info <- function(conn) {
.Call('_ROmniSci_get_session_info', PACKAGE = 'ROmniSci', conn)
}
#' @title Get databases list
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TDBInfo)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gdb <- get_databases(conn)
#'
#' }
get_databases <- function(conn) {
.Call('_ROmniSci_get_databases', PACKAGE = 'ROmniSci', conn)
}
#' @title Get hardware info
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TClusterHardwareInfo)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' ghi <- get_hardware_info(conn)
#'
#' }
get_hardware_info <- function(conn) {
.Call('_ROmniSci_get_hardware_info', PACKAGE = 'ROmniSci', conn)
}
#' @title Get metadata of tables in current database
#'
#' @param conn conn
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TTableMeta)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' gtm <- get_tables_meta(conn)
#'
#' }
get_tables_meta <- function(conn) {
.Call('_ROmniSci_get_tables_meta', PACKAGE = 'ROmniSci', conn)
}
#' @title Switch current database
#'
#' @param conn conn
#' @param dbname dbname
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return Nothing
#'
#' @export
#' @examples
#' \dontrun{
#'
#' switch_database(conn, "database2")
#'
#' }
switch_database <- function(conn, dbname) {
invisible(.Call('_ROmniSci_switch_database', PACKAGE = 'ROmniSci', conn, dbname))
}
#' @title Execute SQL statement
#'
#' @param conn conn
#' @param query query
#' @param first_n first_n
#' @param at_most_n at_most_n
#'
#' @details TBD Details
#'
#' @description TBD description
#'
#' @return List(TQueryResult)
#'
#' @export
#' @examples
#' \dontrun{
#'
#' result <- sql_execute(conn, "select * from omnisci_states")
#'
#' }
sql_execute <- function(conn, query, first_n = -1L, at_most_n = -1L) {
.Call('_ROmniSci_sql_execute', PACKAGE = 'ROmniSci', conn, query, first_n, at_most_n)
}
|
generatePPs_internal <- function(model_fn=model_fn, initGuess, NP, paramLims, stateLims, method="SA", scoreThreshold=0.0, model_args=list(),optim_control=list()){
# paramLims and stateLims are both dataframes with a column for the parameter/state name, upper and lower bounds
paramLims <- as.data.frame(paramLims)
stateLims <- as.data.frame(stateLims)
ssFLAG = 0
nssFLAG = 0
nssTimes = c()
if(!("Time" %in% names(stateLims))){
stateLims$Time <- "SS"
ssFLAG <- 1
}else if("SS" %in% stateLims$Time){
# print('here')
ssFLAG <- 1
}
if(NROW((stateLims %>% dplyr::filter(Time!="SS")))>0){
nssFLAG <- 1
nssTimes <- as.numeric(as.character((stateLims%>%dplyr::filter(Time!="SS"))$Time))
nssTimes <- unique(nssTimes)
}
if(method!="SA"){
stop("Only Simulated Annealing supported at the moment")
}
for(state in unique(stateLims$Name)){
UNIQUEDF <- stateLims %>% dplyr::filter(Name==state)
if(NROW(unique(as.character(UNIQUEDF$Time)))<NROW(UNIQUEDF)){
stop(paste0(state," has simultaneous values"))
}
}
# for(name in unique(paramLims$Name)){
# if(NROW(paramLims %>% dplyr::filter(Name==name))>1){
# stop(paste0("Parameter ",name," has multiply defined limits!"))
# }else if((!name %in% names(param(model)))){
# stop(paste0(name," is not a model parameter!"))
# }
# }
row.names(paramLims) <- paramLims$Name
for(p in paramLims$Name){
if(is.na(paramLims[p,]$Lower)){
paramLims[p,]$Lower <- -Inf
}
if(is.na(paramLims[p,]$Upper)){
paramLims[p,]$Upper <- Inf
}
}
stateLims <- transform(stateLims, Lower = ifelse(is.na(Lower), -Inf, Lower))
stateLims <- transform(stateLims, Upper = ifelse(is.na(Upper), -Inf, Upper))
row.names(paramLims) <- NULL
# Get Time varying and Steady-State Limits
steadystateLims <- stateLims %>% dplyr::filter(Time=="SS")
steadystateLims$Time <- as.character(steadystateLims$Time)
stateLims <- stateLims %>% dplyr::filter(Time!="SS")
stateLims$Time <- as.numeric(as.character(stateLims$Time))
# GENERATE TEST OUTPUT
test_output <- do.call(model_fn,model_args)
if(class(test_output)!="list"){
stop("Model output must be a list containing some combination of steady state (SS) and non steady state (NSS) values")
}
# Check for SS output
if(!("SS" %in% names(test_output)) & ssFLAG==1){
stop("Steady steady state outputs must be provided for steady state limits!")
}else if(ssFLAG==1){
for(state in steadystateLims$Name){
if(!(state %in% names(test_output$SS))){
stop(paste0("State ", state, " must be in model steady state output!"))
}
}
}
if(nssFLAG==1){
if(!("NSS" %in% names(test_output))){
stop("Model output must contain non-steady state values!")
}else{
test_output$NSS <- mrgsolve::lctran(test_output$NSS)
for(state in stateLims$Name){
if(!(state %in% names(test_output$NSS))){
stop(paste0("State ", state, " must be present in non-steady-state output"))
}
}
for(t in unique(stateLims$Time)){
if(!(t %in% test_output$NSS$time)){
stop(paste0("Time ",t," found in state limits but not in non-steady-state model outputs"))
}
tunique <- unique(test_output$NSS$time)
if(length(tunique)!=NROW(test_output$NSS)){
stop("Duplicated times found in model output! Please ensure model output times are unique!")
}
}
}
}
# Drop unused levels
stateLims <- droplevels(stateLims)
steadystateLims <- droplevels(steadystateLims)
if(method=="SA"){
gen_pp_f <- purrr::partial(genPP_SA, model_fn=model_fn,paramLims=paramLims,stateLims=stateLims,steadystateLims=steadystateLims, model_args=model_args, scoreThreshold=scoreThreshold,optim_control=optim_control)
}
out_df <- gen_pp_f(NP)
return(out_df)
}
| /content/script/VPop/generatePPs_internal.R | no_license | metrumresearchgroup/ub-cdse-2019 | R | false | false | 3,871 | r | generatePPs_internal <- function(model_fn=model_fn, initGuess, NP, paramLims, stateLims, method="SA", scoreThreshold=0.0, model_args=list(),optim_control=list()){
# paramLims and stateLims are both dataframes with a column for the parameter/state name, upper and lower bounds
paramLims <- as.data.frame(paramLims)
stateLims <- as.data.frame(stateLims)
ssFLAG = 0
nssFLAG = 0
nssTimes = c()
if(!("Time" %in% names(stateLims))){
stateLims$Time <- "SS"
ssFLAG <- 1
}else if("SS" %in% stateLims$Time){
# print('here')
ssFLAG <- 1
}
if(NROW((stateLims %>% dplyr::filter(Time!="SS")))>0){
nssFLAG <- 1
nssTimes <- as.numeric(as.character((stateLims%>%dplyr::filter(Time!="SS"))$Time))
nssTimes <- unique(nssTimes)
}
if(method!="SA"){
stop("Only Simulated Annealing supported at the moment")
}
for(state in unique(stateLims$Name)){
UNIQUEDF <- stateLims %>% dplyr::filter(Name==state)
if(NROW(unique(as.character(UNIQUEDF$Time)))<NROW(UNIQUEDF)){
stop(paste0(state," has simultaneous values"))
}
}
# for(name in unique(paramLims$Name)){
# if(NROW(paramLims %>% dplyr::filter(Name==name))>1){
# stop(paste0("Parameter ",name," has multiply defined limits!"))
# }else if((!name %in% names(param(model)))){
# stop(paste0(name," is not a model parameter!"))
# }
# }
row.names(paramLims) <- paramLims$Name
for(p in paramLims$Name){
if(is.na(paramLims[p,]$Lower)){
paramLims[p,]$Lower <- -Inf
}
if(is.na(paramLims[p,]$Upper)){
paramLims[p,]$Upper <- Inf
}
}
stateLims <- transform(stateLims, Lower = ifelse(is.na(Lower), -Inf, Lower))
stateLims <- transform(stateLims, Upper = ifelse(is.na(Upper), -Inf, Upper))
row.names(paramLims) <- NULL
# Get Time varying and Steady-State Limits
steadystateLims <- stateLims %>% dplyr::filter(Time=="SS")
steadystateLims$Time <- as.character(steadystateLims$Time)
stateLims <- stateLims %>% dplyr::filter(Time!="SS")
stateLims$Time <- as.numeric(as.character(stateLims$Time))
# GENERATE TEST OUTPUT
test_output <- do.call(model_fn,model_args)
if(class(test_output)!="list"){
stop("Model output must be a list containing some combination of steady state (SS) and non steady state (NSS) values")
}
# Check for SS output
if(!("SS" %in% names(test_output)) & ssFLAG==1){
stop("Steady steady state outputs must be provided for steady state limits!")
}else if(ssFLAG==1){
for(state in steadystateLims$Name){
if(!(state %in% names(test_output$SS))){
stop(paste0("State ", state, " must be in model steady state output!"))
}
}
}
if(nssFLAG==1){
if(!("NSS" %in% names(test_output))){
stop("Model output must contain non-steady state values!")
}else{
test_output$NSS <- mrgsolve::lctran(test_output$NSS)
for(state in stateLims$Name){
if(!(state %in% names(test_output$NSS))){
stop(paste0("State ", state, " must be present in non-steady-state output"))
}
}
for(t in unique(stateLims$Time)){
if(!(t %in% test_output$NSS$time)){
stop(paste0("Time ",t," found in state limits but not in non-steady-state model outputs"))
}
tunique <- unique(test_output$NSS$time)
if(length(tunique)!=NROW(test_output$NSS)){
stop("Duplicated times found in model output! Please ensure model output times are unique!")
}
}
}
}
# Drop unused levels
stateLims <- droplevels(stateLims)
steadystateLims <- droplevels(steadystateLims)
if(method=="SA"){
gen_pp_f <- purrr::partial(genPP_SA, model_fn=model_fn,paramLims=paramLims,stateLims=stateLims,steadystateLims=steadystateLims, model_args=model_args, scoreThreshold=scoreThreshold,optim_control=optim_control)
}
out_df <- gen_pp_f(NP)
return(out_df)
}
|
---
title: "HW02 台大英語授課課程數變化"
author: "B04202016 物理二 朱文亞"
date: "2017年4月24日"
output: html_document
---
```{r setup, include=FALSE}
library("dplyr")
library("plotly")
library("RColorBrewer")
require(stats)
knitr::opts_chunk$set(echo = TRUE)
```
## 台大英語授課數量
> 我們專題研究的主題是在台大的國際學生,想藉由不同的數據來描繪出他們在台大的生活。其中的一項指標,就是台大開設以英語來授課的課程數。由於來台大的國際學生有增加的趨勢,假定兩者具有關聯,因此在未分析數據前的預測是英語授課的課程數也會有增加的趨勢。
## 96~104學年度總體/各學院英語授課數量
```{r eng_class, echo=TRUE}
raw_data<- read.csv("eng_class.csv", sep= ",", header= TRUE)
lastsemId = length(raw_data$semester)
firstsem = raw_data$semester[1]
lastsem = raw_data$semester[lastsemId]
n = 18
allType = names(raw_data)
rownames(raw_data) <- 1:nrow(raw_data)
typeId = c(2:13)
newTable = data.frame()
for( nid in c(1:n) )
{
semester = as.matrix(rep(raw_data$semester[nid], length(raw_data[nid,typeId])))
class_num = as.matrix(as.numeric(raw_data[nid,typeId]))
type = as.matrix(as.character(allType[typeId]))
temp = cbind(semester, class_num, type)
newTable = rbind(newTable, temp)
}
names(newTable) = c('semester', 'class_num', 'pos')
newTable = newTable[with(newTable, order(pos)),]
rownames(newTable) <- 1:nrow(newTable)
colourCount = length(unique(mtcars$hp))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
p <- plot_ly(data = newTable, x = ~semester,
y = ~class_num, color = ~pos, colors= getPalette(colourCount)) %>%
add_lines( yaxis = list(range = c(0,10)))
p
```
> y軸為英語授課的課程;x軸為學期 (ex:1041->104學年度第1學期)
> 由這張圖表來分析,可以看出個學院英語授課的課程總數有明顯增加的趨勢 (約從400增加到約550),大致上是符合原先的預測。但各學院的英語課程數量卻都沒有明顯的增加趨勢。
##96~104學年度各學院英語授課數量
```{r echo= TRUE}
newTable<- newTable[0:198, ]
rownames(newTable) <- 1:nrow(newTable)
colourCount = length(unique(mtcars$hp))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
p <- plot_ly(data = newTable, x = ~semester,
y = ~class_num, color = ~pos, colors= getPalette(colourCount)) %>%
add_lines( yaxis = list(range = c(0,10)))
p
```
#
> 去除個學院英語課程總數後,放大檢視各學院間的差異。其中文學院的英語課程數量明顯多於其他學院,有很大的可能性是外文系所導致的結果,因為比起其他學院,文學院的英語課程數量反而有遞減的趨勢,外文系中英語授課的對象主要還是本地的學生,與國際學生較無關聯;而其他學院雖然上升的幅度並不大,但在這幾年中,英語授課的課程數有所增加。
> 雖然並不能以此就斷定以英語授課的課程數與國際學生人數有因果關係 (國際學生人數增加而必須開設更多英語課程,又或是英語課程數的增加吸引了國際學生),但這次的分析還是可以說明台大開設的英語課程的確有增加,如果能找出更多的數據支持 (例如國際學生修習英語課程與非英語課程的比例,或是在兩種課程中成績的分布),那這份數據也許就能夠做為一個指標,來略窺國際學生在台大的學習狀況。
| /HW/HW2/eng_class(test).R | no_license | whirllllll/2017_data_course | R | false | false | 3,514 | r | ---
title: "HW02 台大英語授課課程數變化"
author: "B04202016 物理二 朱文亞"
date: "2017年4月24日"
output: html_document
---
```{r setup, include=FALSE}
library("dplyr")
library("plotly")
library("RColorBrewer")
require(stats)
knitr::opts_chunk$set(echo = TRUE)
```
## 台大英語授課數量
> 我們專題研究的主題是在台大的國際學生,想藉由不同的數據來描繪出他們在台大的生活。其中的一項指標,就是台大開設以英語來授課的課程數。由於來台大的國際學生有增加的趨勢,假定兩者具有關聯,因此在未分析數據前的預測是英語授課的課程數也會有增加的趨勢。
## 96~104學年度總體/各學院英語授課數量
```{r eng_class, echo=TRUE}
raw_data<- read.csv("eng_class.csv", sep= ",", header= TRUE)
lastsemId = length(raw_data$semester)
firstsem = raw_data$semester[1]
lastsem = raw_data$semester[lastsemId]
n = 18
allType = names(raw_data)
rownames(raw_data) <- 1:nrow(raw_data)
typeId = c(2:13)
newTable = data.frame()
for( nid in c(1:n) )
{
semester = as.matrix(rep(raw_data$semester[nid], length(raw_data[nid,typeId])))
class_num = as.matrix(as.numeric(raw_data[nid,typeId]))
type = as.matrix(as.character(allType[typeId]))
temp = cbind(semester, class_num, type)
newTable = rbind(newTable, temp)
}
names(newTable) = c('semester', 'class_num', 'pos')
newTable = newTable[with(newTable, order(pos)),]
rownames(newTable) <- 1:nrow(newTable)
colourCount = length(unique(mtcars$hp))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
p <- plot_ly(data = newTable, x = ~semester,
y = ~class_num, color = ~pos, colors= getPalette(colourCount)) %>%
add_lines( yaxis = list(range = c(0,10)))
p
```
> y軸為英語授課的課程;x軸為學期 (ex:1041->104學年度第1學期)
> 由這張圖表來分析,可以看出個學院英語授課的課程總數有明顯增加的趨勢 (約從400增加到約550),大致上是符合原先的預測。但各學院的英語課程數量卻都沒有明顯的增加趨勢。
##96~104學年度各學院英語授課數量
```{r echo= TRUE}
newTable<- newTable[0:198, ]
rownames(newTable) <- 1:nrow(newTable)
colourCount = length(unique(mtcars$hp))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
p <- plot_ly(data = newTable, x = ~semester,
y = ~class_num, color = ~pos, colors= getPalette(colourCount)) %>%
add_lines( yaxis = list(range = c(0,10)))
p
```
#
> 去除個學院英語課程總數後,放大檢視各學院間的差異。其中文學院的英語課程數量明顯多於其他學院,有很大的可能性是外文系所導致的結果,因為比起其他學院,文學院的英語課程數量反而有遞減的趨勢,外文系中英語授課的對象主要還是本地的學生,與國際學生較無關聯;而其他學院雖然上升的幅度並不大,但在這幾年中,英語授課的課程數有所增加。
> 雖然並不能以此就斷定以英語授課的課程數與國際學生人數有因果關係 (國際學生人數增加而必須開設更多英語課程,又或是英語課程數的增加吸引了國際學生),但這次的分析還是可以說明台大開設的英語課程的確有增加,如果能找出更多的數據支持 (例如國際學生修習英語課程與非英語課程的比例,或是在兩種課程中成績的分布),那這份數據也許就能夠做為一個指標,來略窺國際學生在台大的學習狀況。
|
tdm.save <- function(input,output,n){
dfm <- dfm.generate(input, n)
dfm <- dfm_trim(dfm, min_docfreq = 3)
y <- data.frame(cbind(words=names(topfeatures(dfm, 1000000)),count=topfeatures(dfm, 1000000)), row.names = c())
#write.csv(y, output)
y
} | /scripts/dfm_save.R | no_license | markusams/WordPredictor | R | false | false | 251 | r | tdm.save <- function(input,output,n){
dfm <- dfm.generate(input, n)
dfm <- dfm_trim(dfm, min_docfreq = 3)
y <- data.frame(cbind(words=names(topfeatures(dfm, 1000000)),count=topfeatures(dfm, 1000000)), row.names = c())
#write.csv(y, output)
y
} |
#' Impute2toBD: A package for creating binary dosage files from Impute 2 files
#'
#' Impute2toBD: A package for creating binary dosage files from Impute 2 files
#'
#' @section Functions:
#' Currently none
#'
#' @docType package
#' @name Impute2toBD
NULL
#' @importFrom Rcpp evalCpp
#' @useDynLib Impute2toBD
NULL
| /R/Impute2toBD.R | no_license | jimb3/Impute2toBD | R | false | false | 314 | r | #' Impute2toBD: A package for creating binary dosage files from Impute 2 files
#'
#' Impute2toBD: A package for creating binary dosage files from Impute 2 files
#'
#' @section Functions:
#' Currently none
#'
#' @docType package
#' @name Impute2toBD
NULL
#' @importFrom Rcpp evalCpp
#' @useDynLib Impute2toBD
NULL
|
######################################################################
## Decompose a vector into a matrix of "fourier" components
##
## Author: Aidan McDermott (AMcD), modified by Roger Peng <rpeng@jhsph.edu>
## Date : Dec 8, 2000
## Revised: 2005-09-29 by Roger Peng <rpeng@jhsph.edu>
######################################################################
#' Time Scale Decomposition
#'
#' Decompose a vector into frequency components
#'
#' @param x a numeric vector with no missing data
#' @param breaks a numeric constant or a vector of break points into which \code{x} should be broken. If breaks is a constant then \code{x} will be broken into that number of frequncies. This argument is passed directly to \code{cut} to determine the break points. See \code{cut} for more details.
#'
#' @returns A matrix with dimension n x m where n is the length of \code{x} and m is the number of break categories.
#'
#' @references Dominici FD, McDermott A, Zeger SL, Samet JM (2003). \dQuote{Airborne particulate matter and mortality: Timescale effects in four US cities}, American Journal of Epidemiology, 157 (12), 1055--1065.
#' @author Original by Aidan McDermott; revised by Roger Peng \email{rpeng@jhsph.edu}
#'
#' @examples
#' x <- rnorm(101)
#' freq.x <- tsdecomp(x, c(1, 10, 30, 80))
#'
#' ## decompose x into 3 frequency categories.
#' ## x[,1] represents from 1 to 9 cycles in 101 data points
#' ## x[,2] represents from 10 to 29 cycles in 101 data points
#' ## x[,3] represents from 30 to 50 cycles in 101 data points
#' ## you can only have up to 50 cycles in 101 data points.
#'
#' @export
#' @importFrom stats fft
tsdecomp <- function(x, breaks) {
## Check for missing values
nax <- is.na(x)
if(nas <- any(nax))
x <- x[!nax]
## Need to be careful if length(x) is even or odd
is.even <- !length(x) %% 2
xf <- fft(x) / length(x)
xf1 <- xf[1] # first bit is the sum of x
xf.first <- xf[2:(1 + floor(length(xf) / 2))]
## Break xf.first into various components
cuts <- cut(seq(length(xf.first)), breaks, include.lowest = TRUE)
lcuts <- levels(cuts)
ncuts <- length(lcuts)
mat <- matrix(0, nrow = length(x), ncol = ncuts)
for(i in 1:ncuts) {
xf.temp <- rep(0, length(xf.first))
xf.temp[cuts == lcuts[i]] <- xf.first[cuts == lcuts[i]]
d <- if(is.even)
c(xf1 / ncuts, xf.temp, rev(Conj(xf.temp[-length(xf.temp)])))
else
c(xf1 / ncuts, xf.temp, rev(Conj(xf.temp)))
mat[, i] <- Re(fft(d, inverse = TRUE))
}
if(nas) {
nmat <- matrix(NA, length(nax), NCOL(mat))
nmat[!nax, ] <- mat
mat <- nmat
}
structure(mat, breaks = breaks, class = c("tsdecomp", "matrix"))
}
#' @importFrom graphics par
#' @exportS3Method
plot.tsdecomp <- function(x, y, xlab = "", ylab = "", ...) {
breaks <- attr(x, "breaks")
xpts <- seq(NROW(x))
op <- par(no.readonly = TRUE)
on.exit(par(op))
par(mfrow = c(NCOL(x), 1), mar = c(3, 4, 2, 2))
for(i in seq(NCOL(x))) {
b1 <- breaks[i]
b2 <- breaks[i+1] - 1
main <- if(b1 == b2) {
if(b1 == 1)
paste(b1, "cycle")
else
paste(b1, "cycles")
}
else
paste(b1, "to", b2, "cycles")
plot(xpts, x[, i], type = "l", xlab = xlab, ylab = ylab,
main = main, ...)
}
invisible()
}
| /R/decompose.R | no_license | rdpeng/tsmodel | R | false | false | 3,866 | r | ######################################################################
## Decompose a vector into a matrix of "fourier" components
##
## Author: Aidan McDermott (AMcD), modified by Roger Peng <rpeng@jhsph.edu>
## Date : Dec 8, 2000
## Revised: 2005-09-29 by Roger Peng <rpeng@jhsph.edu>
######################################################################
#' Time Scale Decomposition
#'
#' Decompose a vector into frequency components
#'
#' @param x a numeric vector with no missing data
#' @param breaks a numeric constant or a vector of break points into which \code{x} should be broken. If breaks is a constant then \code{x} will be broken into that number of frequncies. This argument is passed directly to \code{cut} to determine the break points. See \code{cut} for more details.
#'
#' @returns A matrix with dimension n x m where n is the length of \code{x} and m is the number of break categories.
#'
#' @references Dominici FD, McDermott A, Zeger SL, Samet JM (2003). \dQuote{Airborne particulate matter and mortality: Timescale effects in four US cities}, American Journal of Epidemiology, 157 (12), 1055--1065.
#' @author Original by Aidan McDermott; revised by Roger Peng \email{rpeng@jhsph.edu}
#'
#' @examples
#' x <- rnorm(101)
#' freq.x <- tsdecomp(x, c(1, 10, 30, 80))
#'
#' ## decompose x into 3 frequency categories.
#' ## x[,1] represents from 1 to 9 cycles in 101 data points
#' ## x[,2] represents from 10 to 29 cycles in 101 data points
#' ## x[,3] represents from 30 to 50 cycles in 101 data points
#' ## you can only have up to 50 cycles in 101 data points.
#'
#' @export
#' @importFrom stats fft
tsdecomp <- function(x, breaks) {
## Check for missing values
nax <- is.na(x)
if(nas <- any(nax))
x <- x[!nax]
## Need to be careful if length(x) is even or odd
is.even <- !length(x) %% 2
xf <- fft(x) / length(x)
xf1 <- xf[1] # first bit is the sum of x
xf.first <- xf[2:(1 + floor(length(xf) / 2))]
## Break xf.first into various components
cuts <- cut(seq(length(xf.first)), breaks, include.lowest = TRUE)
lcuts <- levels(cuts)
ncuts <- length(lcuts)
mat <- matrix(0, nrow = length(x), ncol = ncuts)
for(i in 1:ncuts) {
xf.temp <- rep(0, length(xf.first))
xf.temp[cuts == lcuts[i]] <- xf.first[cuts == lcuts[i]]
d <- if(is.even)
c(xf1 / ncuts, xf.temp, rev(Conj(xf.temp[-length(xf.temp)])))
else
c(xf1 / ncuts, xf.temp, rev(Conj(xf.temp)))
mat[, i] <- Re(fft(d, inverse = TRUE))
}
if(nas) {
nmat <- matrix(NA, length(nax), NCOL(mat))
nmat[!nax, ] <- mat
mat <- nmat
}
structure(mat, breaks = breaks, class = c("tsdecomp", "matrix"))
}
#' @importFrom graphics par
#' @exportS3Method
plot.tsdecomp <- function(x, y, xlab = "", ylab = "", ...) {
breaks <- attr(x, "breaks")
xpts <- seq(NROW(x))
op <- par(no.readonly = TRUE)
on.exit(par(op))
par(mfrow = c(NCOL(x), 1), mar = c(3, 4, 2, 2))
for(i in seq(NCOL(x))) {
b1 <- breaks[i]
b2 <- breaks[i+1] - 1
main <- if(b1 == b2) {
if(b1 == 1)
paste(b1, "cycle")
else
paste(b1, "cycles")
}
else
paste(b1, "to", b2, "cycles")
plot(xpts, x[, i], type = "l", xlab = xlab, ylab = ylab,
main = main, ...)
}
invisible()
}
|
\name{repl.correl.heatmap}
\alias{repl.correl.heatmap}
\title{Adequation of replicates using a heatmap with hierarchical clustering}
\usage{
repl.correl.heatmap(XP.conditions, XP.names, pathout)
}
\arguments{
\item{XP.conditions}{Vector of experimental conditions for each sample}
\item{XP.names}{Vector of names for each sample}
\item{pathout}{Address where output files will be written}
}
\value{
This function returns a list containing the following :
\describe{
\item{plot}{Address of plot file in png format}
\item{value}{Spearman correlation (in absolute value) between clusters and groups of replicates}
\item{color}{Color white/orange/red corresponding to good/warning/poor level of quality}
\item{recommendation}{Description and recommendation based on value}
}
}
\description{
\code{repl.correl.heatmap} This function takes a list of samples and experimental conditions in input,
# generates a heatmap with hierarchical clustering, and compares this clustering
# with the groups of replicates.
}
\examples{
\donttest{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
periodicity(list.bam, refCDS, refFASTA, pathout, XP.names, versionStrip = FALSE)
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE)
#
## Replicates.
#
repl.correl.counts.Venn.res <- repl.correl.counts.Venn(XP.conditions, XP.names,
pathout)
repl.correl.counts.Venn.res
repl.correl.gene.res <- repl.correl.gene(XP.conditions, XP.names, pathout)
repl.correl.gene.res
repl.correl.codon.res <- repl.correl.codon(list.bam, refCDS, refFASTA,
mini, maxi,
XP.names, XP.conditions, pathout)
repl.correl.codon.res
repl.correl.heatmap.res <- repl.correl.heatmap(XP.conditions.i, XP.names, pathout)
repl.correl.heatmap.res
}
\dontshow{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
suppressMessages(periodicity(list.bam, refCDS, refFASTA, pathout, XP.names,
versionStrip = FALSE,
python.messages=FALSE))
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE,
r.messages=FALSE,
python.messages=FALSE)
#
## Replicates.
#
repl.correl.counts.Venn.res <- repl.correl.counts.Venn(XP.conditions, XP.names,
pathout, r.messages=FALSE)
repl.correl.gene.res <- repl.correl.gene(XP.conditions, XP.names, pathout)
repl.correl.codon.res <- repl.correl.codon(list.bam, refCDS, refFASTA,
mini, maxi,
XP.names, XP.conditions, pathout)
repl.correl.heatmap.res <- repl.correl.heatmap(XP.conditions.i, XP.names, pathout)
}
}
| /man/repl.correl.heatmap.Rd | no_license | carinelegrand/RiboVIEW | R | false | false | 8,721 | rd | \name{repl.correl.heatmap}
\alias{repl.correl.heatmap}
\title{Adequation of replicates using a heatmap with hierarchical clustering}
\usage{
repl.correl.heatmap(XP.conditions, XP.names, pathout)
}
\arguments{
\item{XP.conditions}{Vector of experimental conditions for each sample}
\item{XP.names}{Vector of names for each sample}
\item{pathout}{Address where output files will be written}
}
\value{
This function returns a list containing the following :
\describe{
\item{plot}{Address of plot file in png format}
\item{value}{Spearman correlation (in absolute value) between clusters and groups of replicates}
\item{color}{Color white/orange/red corresponding to good/warning/poor level of quality}
\item{recommendation}{Description and recommendation based on value}
}
}
\description{
\code{repl.correl.heatmap} This function takes a list of samples and experimental conditions in input,
# generates a heatmap with hierarchical clustering, and compares this clustering
# with the groups of replicates.
}
\examples{
\donttest{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
periodicity(list.bam, refCDS, refFASTA, pathout, XP.names, versionStrip = FALSE)
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE)
#
## Replicates.
#
repl.correl.counts.Venn.res <- repl.correl.counts.Venn(XP.conditions, XP.names,
pathout)
repl.correl.counts.Venn.res
repl.correl.gene.res <- repl.correl.gene(XP.conditions, XP.names, pathout)
repl.correl.gene.res
repl.correl.codon.res <- repl.correl.codon(list.bam, refCDS, refFASTA,
mini, maxi,
XP.names, XP.conditions, pathout)
repl.correl.codon.res
repl.correl.heatmap.res <- repl.correl.heatmap(XP.conditions.i, XP.names, pathout)
repl.correl.heatmap.res
}
\dontshow{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
suppressMessages(periodicity(list.bam, refCDS, refFASTA, pathout, XP.names,
versionStrip = FALSE,
python.messages=FALSE))
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE,
r.messages=FALSE,
python.messages=FALSE)
#
## Replicates.
#
repl.correl.counts.Venn.res <- repl.correl.counts.Venn(XP.conditions, XP.names,
pathout, r.messages=FALSE)
repl.correl.gene.res <- repl.correl.gene(XP.conditions, XP.names, pathout)
repl.correl.codon.res <- repl.correl.codon(list.bam, refCDS, refFASTA,
mini, maxi,
XP.names, XP.conditions, pathout)
repl.correl.heatmap.res <- repl.correl.heatmap(XP.conditions.i, XP.names, pathout)
}
}
|
# Functions for parsing and building SciDB schema strings.
# A SciDB schema string up to version 15.12 looks like:
# optional_array_name<attribute_1:type_1 NULL DEFAULT VALUE, attribute_2:type_2, ...>
# [dimension_1=start:end,chunksize,overlap, dimension_2=start:end,chunksize,overlap, ...]
#
# Starting with SciDB version 16.9, schema strings changed a lot. They look like:
# optional_array_name<v:double,a:int64 NOT NULL DEFAULT 5> [i=1:2:0:1000; j=1:3:0:1000]
# in particular, the dimensions are now start:end:overlap:chunksize
#
#' Internal function for processing SciDB dimension schema
#' @param x a scidb object or schema string
#' @return a data frame with parsed dimension data
#' @importFrom utils tail
.dimsplitter = function(x)
{
if (inherits(x, "scidb")) x = schema(x)
x = gsub("\\t", " ", x)
x = gsub("\\n", " ", x)
tokenize = function(s, token)
{
x = strsplit(s, token)[[1]]
x = as.vector(rbind(x, rep(token, length(x))))
x[- length(x)]
}
diagram = function(tokens, labels=c())
{
if(length(tokens) == 0) return(labels)
last = tail(labels, 1)
prev = tail(labels, 2)[1]
if(is.null(last)) labels = c(labels, "name")
else if(tokens[1] == "=") labels = c(labels, "equals")
else if(tokens[1] == ";") labels = c(labels, "semicolon")
else if(tokens[1] == ":") labels = c(labels, "colon")
else if(tokens[1] == ",") labels = c(labels, "comma")
else
{
if(last == "semicolon") labels = c(labels, "name")
else if(last == "equals") labels = c(labels, "start")
else if(last == "colon")
{
if(is.null(prev)) stop("invalid : character")
else if(prev == "start") labels = c(labels, "end")
else if(prev == "end") labels = c(labels, "overlap")
else if(prev == "overlap") labels = c(labels, "chunk")
}
else if(last == "comma")
{
if(is.null(prev)) stop("invalid , character")
else if(prev == "name") labels = c(labels, "name")
else if(prev == "start") labels = c(labels, "end")
else if(prev == "end") labels = c(labels, "chunk")
else if(prev == "chunk") labels = c(labels, "overlap")
else if(prev == "overlap") labels = c(labels, "name")
}
}
diagram(tokens[-1], labels)
}
form = function(x)
{
c(name=x["name"], start=x["start"], end=x["end"], chunk=x["chunk"], overlap=x["overlap"])
}
s = tryCatch(gsub("]", "", strsplit(x, "\\[")[[1]][[2]]), error=function(e) NULL)
if(is.null(s) || nchar(s) == 0) return(NULL)
tokens = Reduce(c, lapply(Reduce(c, lapply(Reduce(c, lapply(tokenize(s, "="), tokenize, ":")), tokenize, ";")), tokenize, ","))
names(tokens) = diagram(tokens)
tokens[!(names(tokens) %in% c("equals", "colon", "semicolon", "comma"))]
i = which(names(tokens) %in% "name")
j = c((i - 1)[-1], length(tokens))
ans = Reduce(rbind, lapply(1:length(i), function(k) form(tokens[i[k]:j[k]])))
if(length(i) == 1) {
ans = data.frame(as.list(ans), stringsAsFactors=FALSE, row.names=NULL)
} else ans = data.frame(ans, stringsAsFactors=FALSE, row.names=c())
names(ans) = c("name", "start", "end", "chunk", "overlap")
ans$name = gsub(" ", "", ans$name)
ans
}
#' Internal function for processing SciDB attribute schema
#' @param x a scidb object or schema string
#' @return a data frame with parsed attribute data
.attsplitter = function(x)
{
if (is.character(x)) s = x
else
{
if (!(inherits(x, "scidb"))) return(NULL)
s = schema(x)
}
s = gsub("\\t", " ", s)
s = gsub("\\n", " ", s)
s = gsub("default[^,]*", "", s, ignore.case=TRUE)
s = strsplit(strsplit(strsplit(strsplit(s, ">")[[1]][1], "<")[[1]][2], ",")[[1]], ":")
# SciDB schema syntax changed in 15.12
null = if (at_least(attr(x@meta$db, "connection")$scidb.version, "15.12"))
! grepl("NOT NULL", s)
else grepl(" NULL", s)
type = gsub(" ", "", gsub("null", "", gsub("not null", "", gsub("compression '.*'", "", vapply(s, function(x) x[2], ""), ignore.case=TRUE), ignore.case=TRUE), ignore.case=TRUE))
data.frame(name=gsub("[ \\\t\\\n]", "", vapply(s, function(x) x[1], "")),
type=type,
nullable=null, stringsAsFactors=FALSE)
}
#' SciDB array schema
#' @param x a \code{\link{scidb}} array object
#' @param what optional schema subset (subsets are returned in data frames; partial
#' argument matching is supported)
#' @return character-valued SciDB array schema
#' @examples
#' \dontrun{
#' s <- scidbconnect()
#' x <- scidb(s,"build(<v:double>[i=1:10,2,0,j=0:19,1,0],0)")
#' schema(x)
#' # [1] "<v:double> [i=1:10:0:2; j=0:19:0:1]"
#' schema(x, "attributes")
#' # name type nullable
#' #1 v double TRUE
#' schema(x, "dimensions")
#' name start end chunk overlap
#' #1 i 1 10 2 j
#' #2 0 0 19 1 0
#' }
#' @export
schema = function(x, what=c("schema", "attributes", "dimensions"))
{
if (!(inherits(x, "scidb"))) return(NULL)
switch(match.arg(what),
schema = gsub(".*<", "<", x@meta$schema),
attributes = .attsplitter(x),
dimensions = .dimsplitter(x),
invisible()
)
}
dfschema = function(names, types, len, chunk=10000)
{
dimname = make.unique_(names, "i")
sprintf("<%s>[%s=1:%d,%d,0]", paste(paste(names, types, sep=":"), collapse=","), dimname, len, chunk)
}
| /R/schema-utils.R | no_license | vishalbelsare/SciDBR | R | false | false | 5,388 | r | # Functions for parsing and building SciDB schema strings.
# A SciDB schema string up to version 15.12 looks like:
# optional_array_name<attribute_1:type_1 NULL DEFAULT VALUE, attribute_2:type_2, ...>
# [dimension_1=start:end,chunksize,overlap, dimension_2=start:end,chunksize,overlap, ...]
#
# Starting with SciDB version 16.9, schema strings changed a lot. They look like:
# optional_array_name<v:double,a:int64 NOT NULL DEFAULT 5> [i=1:2:0:1000; j=1:3:0:1000]
# in particular, the dimensions are now start:end:overlap:chunksize
#
#' Internal function for processing SciDB dimension schema
#' @param x a scidb object or schema string
#' @return a data frame with parsed dimension data
#' @importFrom utils tail
.dimsplitter = function(x)
{
if (inherits(x, "scidb")) x = schema(x)
x = gsub("\\t", " ", x)
x = gsub("\\n", " ", x)
tokenize = function(s, token)
{
x = strsplit(s, token)[[1]]
x = as.vector(rbind(x, rep(token, length(x))))
x[- length(x)]
}
diagram = function(tokens, labels=c())
{
if(length(tokens) == 0) return(labels)
last = tail(labels, 1)
prev = tail(labels, 2)[1]
if(is.null(last)) labels = c(labels, "name")
else if(tokens[1] == "=") labels = c(labels, "equals")
else if(tokens[1] == ";") labels = c(labels, "semicolon")
else if(tokens[1] == ":") labels = c(labels, "colon")
else if(tokens[1] == ",") labels = c(labels, "comma")
else
{
if(last == "semicolon") labels = c(labels, "name")
else if(last == "equals") labels = c(labels, "start")
else if(last == "colon")
{
if(is.null(prev)) stop("invalid : character")
else if(prev == "start") labels = c(labels, "end")
else if(prev == "end") labels = c(labels, "overlap")
else if(prev == "overlap") labels = c(labels, "chunk")
}
else if(last == "comma")
{
if(is.null(prev)) stop("invalid , character")
else if(prev == "name") labels = c(labels, "name")
else if(prev == "start") labels = c(labels, "end")
else if(prev == "end") labels = c(labels, "chunk")
else if(prev == "chunk") labels = c(labels, "overlap")
else if(prev == "overlap") labels = c(labels, "name")
}
}
diagram(tokens[-1], labels)
}
form = function(x)
{
c(name=x["name"], start=x["start"], end=x["end"], chunk=x["chunk"], overlap=x["overlap"])
}
s = tryCatch(gsub("]", "", strsplit(x, "\\[")[[1]][[2]]), error=function(e) NULL)
if(is.null(s) || nchar(s) == 0) return(NULL)
tokens = Reduce(c, lapply(Reduce(c, lapply(Reduce(c, lapply(tokenize(s, "="), tokenize, ":")), tokenize, ";")), tokenize, ","))
names(tokens) = diagram(tokens)
tokens[!(names(tokens) %in% c("equals", "colon", "semicolon", "comma"))]
i = which(names(tokens) %in% "name")
j = c((i - 1)[-1], length(tokens))
ans = Reduce(rbind, lapply(1:length(i), function(k) form(tokens[i[k]:j[k]])))
if(length(i) == 1) {
ans = data.frame(as.list(ans), stringsAsFactors=FALSE, row.names=NULL)
} else ans = data.frame(ans, stringsAsFactors=FALSE, row.names=c())
names(ans) = c("name", "start", "end", "chunk", "overlap")
ans$name = gsub(" ", "", ans$name)
ans
}
#' Internal function for processing SciDB attribute schema
#' @param x a scidb object or schema string
#' @return a data frame with parsed attribute data
.attsplitter = function(x)
{
if (is.character(x)) s = x
else
{
if (!(inherits(x, "scidb"))) return(NULL)
s = schema(x)
}
s = gsub("\\t", " ", s)
s = gsub("\\n", " ", s)
s = gsub("default[^,]*", "", s, ignore.case=TRUE)
s = strsplit(strsplit(strsplit(strsplit(s, ">")[[1]][1], "<")[[1]][2], ",")[[1]], ":")
# SciDB schema syntax changed in 15.12
null = if (at_least(attr(x@meta$db, "connection")$scidb.version, "15.12"))
! grepl("NOT NULL", s)
else grepl(" NULL", s)
type = gsub(" ", "", gsub("null", "", gsub("not null", "", gsub("compression '.*'", "", vapply(s, function(x) x[2], ""), ignore.case=TRUE), ignore.case=TRUE), ignore.case=TRUE))
data.frame(name=gsub("[ \\\t\\\n]", "", vapply(s, function(x) x[1], "")),
type=type,
nullable=null, stringsAsFactors=FALSE)
}
#' SciDB array schema
#' @param x a \code{\link{scidb}} array object
#' @param what optional schema subset (subsets are returned in data frames; partial
#' argument matching is supported)
#' @return character-valued SciDB array schema
#' @examples
#' \dontrun{
#' s <- scidbconnect()
#' x <- scidb(s,"build(<v:double>[i=1:10,2,0,j=0:19,1,0],0)")
#' schema(x)
#' # [1] "<v:double> [i=1:10:0:2; j=0:19:0:1]"
#' schema(x, "attributes")
#' # name type nullable
#' #1 v double TRUE
#' schema(x, "dimensions")
#' name start end chunk overlap
#' #1 i 1 10 2 j
#' #2 0 0 19 1 0
#' }
#' @export
schema = function(x, what=c("schema", "attributes", "dimensions"))
{
if (!(inherits(x, "scidb"))) return(NULL)
switch(match.arg(what),
schema = gsub(".*<", "<", x@meta$schema),
attributes = .attsplitter(x),
dimensions = .dimsplitter(x),
invisible()
)
}
dfschema = function(names, types, len, chunk=10000)
{
dimname = make.unique_(names, "i")
sprintf("<%s>[%s=1:%d,%d,0]", paste(paste(names, types, sep=":"), collapse=","), dimname, len, chunk)
}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix' stores a list of 4 functions associated with the stored
## matrix, including how to set its value, get its value, sets its solve value
## (i.e. inverse), and retrieve its solve value
makeCacheMatrix <- <- function(x = numeric()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## 'cacheSolve' takes a cached matrix and uses its saved functions to check
## the solved inverse (if it exists); otherwise it solves for the inverse;
## inverse is then output either way
cachesolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | Magagumo/RProgram | R | false | false | 964 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix' stores a list of 4 functions associated with the stored
## matrix, including how to set its value, get its value, sets its solve value
## (i.e. inverse), and retrieve its solve value
makeCacheMatrix <- <- function(x = numeric()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## 'cacheSolve' takes a cached matrix and uses its saved functions to check
## the solved inverse (if it exists); otherwise it solves for the inverse;
## inverse is then output either way
cachesolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
|
# path setup
func.path <- "~/FuDGE/Functions_Def"
save.path <- "~/FuDGE/Model2/FFGL/result"
model.path <- "~/FuDGE/Model_Setting/Model2"
################## Part 0: Preparation #########################################
### Reads in arguments passed in from command line
### args is a vector of strings
args <- (commandArgs(TRUE))
for(i in 1:length(args)){
# run each element of the vector as if passed directly to the console
# have run.ind
eval(parse(text = args[[i]]))
}
timestart <- proc.time()
# Load the library and functions
source(paste(func.path,"local_sup_basis_func.R", sep="/"))
source(paste(func.path,"KFoldCVSplit2.R", sep="/"))
source(paste(func.path,"ROC_JGL2.R", sep="/"))
source(paste(func.path,"AUC_Func.R", sep="/"))
source(paste(func.path,"FFGL_ADMM.R", sep="/"))
library(Matrix)
library(MASS)
library(matrixcalc)
library(fda)
library(quadprog)
library(foreach)
library(doParallel)
library(JGL)
# Set the constants
m <- 5 ## number of function basis
p <- 60 ## dimension
n <- 100 ## sample size
obo <- 200 ## original observation number on each function
# The tuning parameter for the fused or group lasso penalty
lambda2.v <- c(seq(from=100, to=20, by=-10), seq(from=18, to=0, by=-2))
M.paral <- length(lambda2.v)
################### Part 1: Generate Random Function #########################
# Decide Precision Matrix
load(paste(model.path, "/model2_p", p, ".RData", sep=""))
Theta.L <- g
Theta.X <- Theta.L$X
Theta.Y <- Theta.L$Y
Delta.true <- Theta.X - Theta.Y
# generate the covariance matrix
Covariance.X <- solve(Theta.X)
Covariance.Y <- solve(Theta.Y)
# generate Gaussian random vector with zero mean and Covariance matrix
set.seed(run.ind)
X <- mvrnorm(n, mu=rep(0, (m * p)), Sigma=Covariance.X)
Y <- mvrnorm(n, mu=rep(0, (m * p)), Sigma=Covariance.Y)
# generate the observations
u.X <- seq(1/obo, 1, 1/obo) ## a vector recording the obervation time points of X
u.Y <- seq(1/obo, 1, 1/obo) ## a vector recording the obervation time points of Y
basis.val.M.X <- local_sup_basis_func(u.X, m)
basis.val.M.Y <- local_sup_basis_func(u.Y, m)
fbasis <- create.fourier.basis(nbasis = m) ## set the fourier basis
observ.X <- array(rep(0, (n * p * obo)), c(n, p, obo)) ## array recording X observations
observ.Y <- array(rep(0, (n * p * obo)), c(n, p, obo)) ## array recording Y observations
for (i in c(1:n)){
for (j in c(1:p)){
coeff.X <- X[i, (1 + (j - 1) * m):(j * m)]
coeff.X <- matrix(coeff.X, ncol=1)
val.x <- basis.val.M.X %*% coeff.X
val.x <- as.vector(val.x)
observ.X[i, j, ] <- val.x + rnorm(obo, 0, 0.5)
coeff.Y <- Y[i, (1 + (j - 1) * m):(j * m)]
coeff.Y <- matrix(coeff.Y, ncol=1)
val.Y <- basis.val.M.Y %*% coeff.Y
val.Y <- as.vector(val.Y)
observ.Y[i, j, ] <- val.Y + rnorm(obo, 0, 0.5)
}
}
## As a conclude, what we have for analysis in the following steps are
## u.x, u.y, observ.X, observ.Y
################### Part 2: Basis Expansion and FPCA #########################
# choose hyperparameters of Graph X & Y
# Select the number of basis L and number of principle components M via Cross Validation
# First for Graph X
potential.L.X <- 5:10 ## candidates for L of X
potential.M.X <- 4:6 ## candidates for M of X
k.cv <- 5 ## number of folds for Cross Validation
RSS.record.X <- matrix(rep(Inf, (length(potential.L.X ) * length(potential.M.X))),
nrow=length(potential.L.X )) ## record the RSS via matrix
L.num <- 0
M.num <- 0
for (L in potential.L.X){
L.num <- L.num + 1
M.num <- 0
for (M in potential.M.X){
if (M > L){
M.num <- M.num + 1
}
else {
M.num <- M.num + 1
RSS.record.X[L.num, M.num] <- 0
bbasis <- create.bspline.basis(rangeval=c(0, 1), nbasis=L) ## Use bspline basis
## to fit data
for (j in 1:p){
## For every dimension j, we make the observations as a matrix
obs.val.matrix <- matrix(rep(0, (n * obo)), nrow=obo)
for (i in c(1:n)){
obs.val.vec <- as.vector(observ.X[i, j, ])
obs.val.matrix[, i] <- obs.val.vec
}
## CV Split the observation matrix
g.temp <- KFoldCVSplit2(obs.val.matrix, u.X, k.cv)
obs.val.matrix.L <- g.temp$fval
time.pts.L <- g.temp$t
## Fit the model and do fpca seperately, then calculate the RSS in validation set
for (l in 1:k.cv){
valid.matrix <- obs.val.matrix.L[[l]]
valid.time <- time.pts.L[[l]]
nrow.valid <- length(valid.time)
if (l == 1){
train.matrix <- obs.val.matrix.L[[2]]
train.time <- time.pts.L[[2]]
for (temp.count in 3:k.cv){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else if (l == k.cv){
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in 2:(k.cv-1)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else {
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in c(1:(l-1), (l+1):k.cv)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
}
## As a result, we have train.matrix for trainning, valid.matrix for validation
## train.time records the time points in train set,
## valid.time records validation set
## Train the model using train.matrix
### Fit with basis expansion
fd.object.array <- Data2fd(argvals=train.time, y=train.matrix, basisobj=bbasis)
### FPCA
fd.pca.obj <- pca.fd(fd.object.array, nharm=M)
## Compute RSS with Validation matrix
predict.valid <- apply(matrix(eval.fd(fd.pca.obj$harmonics, valid.time), ncol=M),
1, sum)
predict.valid <- matrix(predict.valid, nrow=nrow.valid) %*% matrix(rep(1, n),
ncol=n)
RSS.temp <- valid.matrix - predict.valid
RSS.record.X[L.num, M.num] <- RSS.record.X[L.num, M.num] + sum(RSS.temp ^ 2) /
(dim(RSS.temp)[1] * dim(RSS.temp)[2])
}
}
}
}
}
L.selected.X <- potential.L.X[which(RSS.record.X == min(RSS.record.X),
arr.ind = TRUE)[1, 1]]
M.selected.X <- potential.M.X[which(RSS.record.X == min(RSS.record.X),
arr.ind = TRUE)[1, 2]]
# Then for Graph Y
potential.L.Y <- 5:10 ## candidates for L of Y
potential.M.Y <- 4:6 ## candidates for M of Y
k.cv <- 5 ## number of folds for Cross Validation
RSS.record.Y <- matrix(rep(Inf, (length(potential.L.Y ) * length(potential.M.Y))),
nrow=length(potential.L.Y )) ## record the RSS via matrix
L.num <- 0
M.num <- 0
for (L in potential.L.Y){
L.num <- L.num + 1
M.num <- 0
for (M in potential.M.Y){
if (M > L){
M.num <- M.num + 1
}
else {
M.num <- M.num + 1
RSS.record.Y[L.num, M.num] <- 0
bbasis <- create.bspline.basis(rangeval=c(0, 1), nbasis=L) ## Use bspline basis
## to fit data
for (j in 1:p){
## For every dimension j, we make the observations as a matrix
obs.val.matrix <- matrix(rep(0, (n * obo)), nrow=obo)
for (i in c(1:n)){
obs.val.vec <- as.vector(observ.Y[i, j, ])
obs.val.matrix[, i] <- obs.val.vec
}
## CV Split the observation matrix
g.temp <- KFoldCVSplit2(obs.val.matrix, u.Y, k.cv)
obs.val.matrix.L <- g.temp$fval
time.pts.L <- g.temp$t
## Fit the model and do fpca seperately, then calculate the RSS in validation set
for (l in 1:k.cv){
valid.matrix <- obs.val.matrix.L[[l]]
valid.time <- time.pts.L[[l]]
nrow.valid <- length(valid.time)
if (l == 1){
train.matrix <- obs.val.matrix.L[[2]]
train.time <- time.pts.L[[2]]
for (temp.count in 3:k.cv){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else if (l == k.cv){
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in 2:(k.cv-1)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else {
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in c(1:(l-1), (l+1):k.cv)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
}
## As a result, we have train.matrix for trainning, valid.matrix for validation
## train.time records the time points in train set,
## valid.time records validation set
## Train the model using train.matrix
### Fit with basis expansion
fd.object.array <- Data2fd(argvals=train.time, y=train.matrix, basisobj=bbasis)
### FPCA
fd.pca.obj <- pca.fd(fd.object.array, nharm=M)
## Compute RSS with Validation matrix
predict.valid <- apply(matrix(eval.fd(fd.pca.obj$harmonics, valid.time), ncol=M),
1, sum)
predict.valid <- matrix(predict.valid, nrow=nrow.valid) %*% matrix(rep(1, n),
ncol=n)
RSS.temp <- valid.matrix - predict.valid
RSS.record.Y[L.num, M.num] <- RSS.record.Y[L.num, M.num] + sum(RSS.temp ^ 2) /
(dim(RSS.temp)[1] * dim(RSS.temp)[2])
}
}
}
}
}
L.selected.Y <- potential.L.Y[which(RSS.record.Y == min(RSS.record.Y),
arr.ind = TRUE)[1, 1]]
M.selected.Y <- potential.M.Y[which(RSS.record.Y == min(RSS.record.Y),
arr.ind = TRUE)[1, 2]]
# set the L and M for X and Y be the same
L.selected <- ceiling((L.selected.X + L.selected.Y) / 2)
M.selected <- ceiling((M.selected.X + M.selected.Y) / 2)
# Use selected L and M to estimate the principal component scores matrix
principle.score.X <- matrix(0, nrow=n, ncol=1)
principle.score.Y <- matrix(0, nrow=n, ncol=1)
for (j in 1:p){
obs.val.matrix.X <- matrix(rep(0, (n * obo)), nrow=obo)
obs.val.matrix.Y <- matrix(rep(0, (n * obo)), nrow=obo)
for (i in c(1:n)){
obs.val.matrix.X[, i] <- as.vector(observ.X[i, j, ])
obs.val.matrix.Y[, i] <- as.vector(observ.Y[i, j, ])
}
bbasis <- create.bspline.basis(rangeval=c(0, 1), nbasis=L.selected)
fd.object.array.X <- Data2fd(argvals=u.X, y=obs.val.matrix.X, basisobj=bbasis)
fd.object.array.Y <- Data2fd(argvals=u.X, y=obs.val.matrix.Y, basisobj=bbasis)
fd.pca.obj.X <- pca.fd(fd.object.array.X, nharm=M.selected)
fd.pca.obj.Y <- pca.fd(fd.object.array.Y, nharm=M.selected)
fd.pca.obj <- pca.fd(fd.object.array.X+fd.object.array.Y, nharm=M.selected)
proj.score.X <- inprod(fd.object.array.X, fd.pca.obj$harmonics, rng=c(0,1))
proj.score.Y <- inprod(fd.object.array.Y, fd.pca.obj$harmonics, rng=c(0,1))
principle.score.X <- cbind(principle.score.X, proj.score.X)
principle.score.Y <- cbind(principle.score.Y, proj.score.Y)
}
principle.score.X <- principle.score.X[,-1]
principle.score.Y <- principle.score.Y[,-1]
# Use estimated principle components scores matrix to estimate covariance matrix of
# principle components, both for X and Y
principle.score.X.cen <- scale(principle.score.X, center=T, scale=F)
principle.score.Y.cen <- scale(principle.score.Y, center=T, scale=F)
estimated.cov.pc.X <- (t(principle.score.X.cen) %*% principle.score.X.cen) / (n-1)
estimated.cov.pc.Y <- (t(principle.score.Y.cen) %*% principle.score.Y.cen) / (n-1)
# In the following steps, we use estimated.cov.pc.X and estimated.cov.pc.Y to
# estimated the differential matrix
################### Part 3: Estimate the Differential Graph #########################
cov.list <- list(X=estimated.cov.pc.X, Y=estimated.cov.pc.Y)
n.sam <- c(n, n)
lambda1.choose <- 0.1
ROC.result <- matrix(0, nrow=length(lambda2.v), ncol=2)
colnames(ROC.result) <- c("TPR", "FPR")
for (lambda2.ind in c(1:length(lambda2.v)) ){
lambda2 <- lambda2.v[lambda2.ind]
FFGL.result <- FFGL_ADMM(cov.list, n.sam, p, M.selected, lambda1.choose, lambda2)
ROC.res <- ROC_JGL2(FFGL.result, Theta.L$SupportDelta, p, M.selected)
ROC.result[lambda2.ind, ] <- ROC.res$ROC.point
}
ROC.result <- rbind(matrix(c(0,0), nrow=1), ROC.result)
ROC.result <- rbind(ROC.result, matrix(c(1,1), nrow=1))
ind.v <- rep(FALSE, dim(ROC.result)[1])
while(sum(!ind.v)>0){
ind.v <- rep(TRUE, dim(ROC.result)[1])
for(i in 2:dim(ROC.result)[1]){
if (ROC.result[i, 1]<ROC.result[i-1, 1] | ROC.result[i, 2]<ROC.result[i-1, 2]){
ind.v[i] <- FALSE
}
}
ROC.result <- ROC.result[ind.v, ]
}
Paral.result <- ROC.result
save(Paral.result, file=paste(save.path, "/Para_result_p", p,
"_runind", run.ind, "_lam1_", lambda1.choose,
".RData", sep="")) | /Simulation/Simulation_Code/Model2/FFGL/script/FFGL_Model2_p60.R | no_license | boxinz17/FuDGE | R | false | false | 13,866 | r | # path setup
func.path <- "~/FuDGE/Functions_Def"
save.path <- "~/FuDGE/Model2/FFGL/result"
model.path <- "~/FuDGE/Model_Setting/Model2"
################## Part 0: Preparation #########################################
### Reads in arguments passed in from command line
### args is a vector of strings
args <- (commandArgs(TRUE))
for(i in 1:length(args)){
# run each element of the vector as if passed directly to the console
# have run.ind
eval(parse(text = args[[i]]))
}
timestart <- proc.time()
# Load the library and functions
source(paste(func.path,"local_sup_basis_func.R", sep="/"))
source(paste(func.path,"KFoldCVSplit2.R", sep="/"))
source(paste(func.path,"ROC_JGL2.R", sep="/"))
source(paste(func.path,"AUC_Func.R", sep="/"))
source(paste(func.path,"FFGL_ADMM.R", sep="/"))
library(Matrix)
library(MASS)
library(matrixcalc)
library(fda)
library(quadprog)
library(foreach)
library(doParallel)
library(JGL)
# Set the constants
m <- 5 ## number of function basis
p <- 60 ## dimension
n <- 100 ## sample size
obo <- 200 ## original observation number on each function
# The tuning parameter for the fused or group lasso penalty
lambda2.v <- c(seq(from=100, to=20, by=-10), seq(from=18, to=0, by=-2))
M.paral <- length(lambda2.v)
################### Part 1: Generate Random Function #########################
# Decide Precision Matrix
load(paste(model.path, "/model2_p", p, ".RData", sep=""))
Theta.L <- g
Theta.X <- Theta.L$X
Theta.Y <- Theta.L$Y
Delta.true <- Theta.X - Theta.Y
# generate the covariance matrix
Covariance.X <- solve(Theta.X)
Covariance.Y <- solve(Theta.Y)
# generate Gaussian random vector with zero mean and Covariance matrix
set.seed(run.ind)
X <- mvrnorm(n, mu=rep(0, (m * p)), Sigma=Covariance.X)
Y <- mvrnorm(n, mu=rep(0, (m * p)), Sigma=Covariance.Y)
# generate the observations
u.X <- seq(1/obo, 1, 1/obo) ## a vector recording the obervation time points of X
u.Y <- seq(1/obo, 1, 1/obo) ## a vector recording the obervation time points of Y
basis.val.M.X <- local_sup_basis_func(u.X, m)
basis.val.M.Y <- local_sup_basis_func(u.Y, m)
fbasis <- create.fourier.basis(nbasis = m) ## set the fourier basis
observ.X <- array(rep(0, (n * p * obo)), c(n, p, obo)) ## array recording X observations
observ.Y <- array(rep(0, (n * p * obo)), c(n, p, obo)) ## array recording Y observations
for (i in c(1:n)){
for (j in c(1:p)){
coeff.X <- X[i, (1 + (j - 1) * m):(j * m)]
coeff.X <- matrix(coeff.X, ncol=1)
val.x <- basis.val.M.X %*% coeff.X
val.x <- as.vector(val.x)
observ.X[i, j, ] <- val.x + rnorm(obo, 0, 0.5)
coeff.Y <- Y[i, (1 + (j - 1) * m):(j * m)]
coeff.Y <- matrix(coeff.Y, ncol=1)
val.Y <- basis.val.M.Y %*% coeff.Y
val.Y <- as.vector(val.Y)
observ.Y[i, j, ] <- val.Y + rnorm(obo, 0, 0.5)
}
}
## As a conclude, what we have for analysis in the following steps are
## u.x, u.y, observ.X, observ.Y
################### Part 2: Basis Expansion and FPCA #########################
# choose hyperparameters of Graph X & Y
# Select the number of basis L and number of principle components M via Cross Validation
# First for Graph X
potential.L.X <- 5:10 ## candidates for L of X
potential.M.X <- 4:6 ## candidates for M of X
k.cv <- 5 ## number of folds for Cross Validation
RSS.record.X <- matrix(rep(Inf, (length(potential.L.X ) * length(potential.M.X))),
nrow=length(potential.L.X )) ## record the RSS via matrix
L.num <- 0
M.num <- 0
for (L in potential.L.X){
L.num <- L.num + 1
M.num <- 0
for (M in potential.M.X){
if (M > L){
M.num <- M.num + 1
}
else {
M.num <- M.num + 1
RSS.record.X[L.num, M.num] <- 0
bbasis <- create.bspline.basis(rangeval=c(0, 1), nbasis=L) ## Use bspline basis
## to fit data
for (j in 1:p){
## For every dimension j, we make the observations as a matrix
obs.val.matrix <- matrix(rep(0, (n * obo)), nrow=obo)
for (i in c(1:n)){
obs.val.vec <- as.vector(observ.X[i, j, ])
obs.val.matrix[, i] <- obs.val.vec
}
## CV Split the observation matrix
g.temp <- KFoldCVSplit2(obs.val.matrix, u.X, k.cv)
obs.val.matrix.L <- g.temp$fval
time.pts.L <- g.temp$t
## Fit the model and do fpca seperately, then calculate the RSS in validation set
for (l in 1:k.cv){
valid.matrix <- obs.val.matrix.L[[l]]
valid.time <- time.pts.L[[l]]
nrow.valid <- length(valid.time)
if (l == 1){
train.matrix <- obs.val.matrix.L[[2]]
train.time <- time.pts.L[[2]]
for (temp.count in 3:k.cv){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else if (l == k.cv){
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in 2:(k.cv-1)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else {
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in c(1:(l-1), (l+1):k.cv)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
}
## As a result, we have train.matrix for trainning, valid.matrix for validation
## train.time records the time points in train set,
## valid.time records validation set
## Train the model using train.matrix
### Fit with basis expansion
fd.object.array <- Data2fd(argvals=train.time, y=train.matrix, basisobj=bbasis)
### FPCA
fd.pca.obj <- pca.fd(fd.object.array, nharm=M)
## Compute RSS with Validation matrix
predict.valid <- apply(matrix(eval.fd(fd.pca.obj$harmonics, valid.time), ncol=M),
1, sum)
predict.valid <- matrix(predict.valid, nrow=nrow.valid) %*% matrix(rep(1, n),
ncol=n)
RSS.temp <- valid.matrix - predict.valid
RSS.record.X[L.num, M.num] <- RSS.record.X[L.num, M.num] + sum(RSS.temp ^ 2) /
(dim(RSS.temp)[1] * dim(RSS.temp)[2])
}
}
}
}
}
L.selected.X <- potential.L.X[which(RSS.record.X == min(RSS.record.X),
arr.ind = TRUE)[1, 1]]
M.selected.X <- potential.M.X[which(RSS.record.X == min(RSS.record.X),
arr.ind = TRUE)[1, 2]]
# Then for Graph Y
potential.L.Y <- 5:10 ## candidates for L of Y
potential.M.Y <- 4:6 ## candidates for M of Y
k.cv <- 5 ## number of folds for Cross Validation
RSS.record.Y <- matrix(rep(Inf, (length(potential.L.Y ) * length(potential.M.Y))),
nrow=length(potential.L.Y )) ## record the RSS via matrix
L.num <- 0
M.num <- 0
for (L in potential.L.Y){
L.num <- L.num + 1
M.num <- 0
for (M in potential.M.Y){
if (M > L){
M.num <- M.num + 1
}
else {
M.num <- M.num + 1
RSS.record.Y[L.num, M.num] <- 0
bbasis <- create.bspline.basis(rangeval=c(0, 1), nbasis=L) ## Use bspline basis
## to fit data
for (j in 1:p){
## For every dimension j, we make the observations as a matrix
obs.val.matrix <- matrix(rep(0, (n * obo)), nrow=obo)
for (i in c(1:n)){
obs.val.vec <- as.vector(observ.Y[i, j, ])
obs.val.matrix[, i] <- obs.val.vec
}
## CV Split the observation matrix
g.temp <- KFoldCVSplit2(obs.val.matrix, u.Y, k.cv)
obs.val.matrix.L <- g.temp$fval
time.pts.L <- g.temp$t
## Fit the model and do fpca seperately, then calculate the RSS in validation set
for (l in 1:k.cv){
valid.matrix <- obs.val.matrix.L[[l]]
valid.time <- time.pts.L[[l]]
nrow.valid <- length(valid.time)
if (l == 1){
train.matrix <- obs.val.matrix.L[[2]]
train.time <- time.pts.L[[2]]
for (temp.count in 3:k.cv){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else if (l == k.cv){
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in 2:(k.cv-1)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
} else {
train.matrix <- obs.val.matrix.L[[1]]
train.time <- time.pts.L[[1]]
for (temp.count in c(1:(l-1), (l+1):k.cv)){
train.matrix <- rbind(train.matrix, obs.val.matrix.L[[temp.count]])
train.time <- c(train.time, time.pts.L[[temp.count]])
}
}
## As a result, we have train.matrix for trainning, valid.matrix for validation
## train.time records the time points in train set,
## valid.time records validation set
## Train the model using train.matrix
### Fit with basis expansion
fd.object.array <- Data2fd(argvals=train.time, y=train.matrix, basisobj=bbasis)
### FPCA
fd.pca.obj <- pca.fd(fd.object.array, nharm=M)
## Compute RSS with Validation matrix
predict.valid <- apply(matrix(eval.fd(fd.pca.obj$harmonics, valid.time), ncol=M),
1, sum)
predict.valid <- matrix(predict.valid, nrow=nrow.valid) %*% matrix(rep(1, n),
ncol=n)
RSS.temp <- valid.matrix - predict.valid
RSS.record.Y[L.num, M.num] <- RSS.record.Y[L.num, M.num] + sum(RSS.temp ^ 2) /
(dim(RSS.temp)[1] * dim(RSS.temp)[2])
}
}
}
}
}
L.selected.Y <- potential.L.Y[which(RSS.record.Y == min(RSS.record.Y),
arr.ind = TRUE)[1, 1]]
M.selected.Y <- potential.M.Y[which(RSS.record.Y == min(RSS.record.Y),
arr.ind = TRUE)[1, 2]]
# set the L and M for X and Y be the same
L.selected <- ceiling((L.selected.X + L.selected.Y) / 2)
M.selected <- ceiling((M.selected.X + M.selected.Y) / 2)
# Use selected L and M to estimate the principal component scores matrix
principle.score.X <- matrix(0, nrow=n, ncol=1)
principle.score.Y <- matrix(0, nrow=n, ncol=1)
for (j in 1:p){
obs.val.matrix.X <- matrix(rep(0, (n * obo)), nrow=obo)
obs.val.matrix.Y <- matrix(rep(0, (n * obo)), nrow=obo)
for (i in c(1:n)){
obs.val.matrix.X[, i] <- as.vector(observ.X[i, j, ])
obs.val.matrix.Y[, i] <- as.vector(observ.Y[i, j, ])
}
bbasis <- create.bspline.basis(rangeval=c(0, 1), nbasis=L.selected)
fd.object.array.X <- Data2fd(argvals=u.X, y=obs.val.matrix.X, basisobj=bbasis)
fd.object.array.Y <- Data2fd(argvals=u.X, y=obs.val.matrix.Y, basisobj=bbasis)
fd.pca.obj.X <- pca.fd(fd.object.array.X, nharm=M.selected)
fd.pca.obj.Y <- pca.fd(fd.object.array.Y, nharm=M.selected)
fd.pca.obj <- pca.fd(fd.object.array.X+fd.object.array.Y, nharm=M.selected)
proj.score.X <- inprod(fd.object.array.X, fd.pca.obj$harmonics, rng=c(0,1))
proj.score.Y <- inprod(fd.object.array.Y, fd.pca.obj$harmonics, rng=c(0,1))
principle.score.X <- cbind(principle.score.X, proj.score.X)
principle.score.Y <- cbind(principle.score.Y, proj.score.Y)
}
principle.score.X <- principle.score.X[,-1]
principle.score.Y <- principle.score.Y[,-1]
# Use estimated principle components scores matrix to estimate covariance matrix of
# principle components, both for X and Y
principle.score.X.cen <- scale(principle.score.X, center=T, scale=F)
principle.score.Y.cen <- scale(principle.score.Y, center=T, scale=F)
estimated.cov.pc.X <- (t(principle.score.X.cen) %*% principle.score.X.cen) / (n-1)
estimated.cov.pc.Y <- (t(principle.score.Y.cen) %*% principle.score.Y.cen) / (n-1)
# In the following steps, we use estimated.cov.pc.X and estimated.cov.pc.Y to
# estimated the differential matrix
################### Part 3: Estimate the Differential Graph #########################
cov.list <- list(X=estimated.cov.pc.X, Y=estimated.cov.pc.Y)
n.sam <- c(n, n)
lambda1.choose <- 0.1
ROC.result <- matrix(0, nrow=length(lambda2.v), ncol=2)
colnames(ROC.result) <- c("TPR", "FPR")
for (lambda2.ind in c(1:length(lambda2.v)) ){
lambda2 <- lambda2.v[lambda2.ind]
FFGL.result <- FFGL_ADMM(cov.list, n.sam, p, M.selected, lambda1.choose, lambda2)
ROC.res <- ROC_JGL2(FFGL.result, Theta.L$SupportDelta, p, M.selected)
ROC.result[lambda2.ind, ] <- ROC.res$ROC.point
}
ROC.result <- rbind(matrix(c(0,0), nrow=1), ROC.result)
ROC.result <- rbind(ROC.result, matrix(c(1,1), nrow=1))
ind.v <- rep(FALSE, dim(ROC.result)[1])
while(sum(!ind.v)>0){
ind.v <- rep(TRUE, dim(ROC.result)[1])
for(i in 2:dim(ROC.result)[1]){
if (ROC.result[i, 1]<ROC.result[i-1, 1] | ROC.result[i, 2]<ROC.result[i-1, 2]){
ind.v[i] <- FALSE
}
}
ROC.result <- ROC.result[ind.v, ]
}
Paral.result <- ROC.result
save(Paral.result, file=paste(save.path, "/Para_result_p", p,
"_runind", run.ind, "_lam1_", lambda1.choose,
".RData", sep="")) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.